debuggers.hg
annotate xen/include/asm-x86/domain.h @ 16712:ad0f20f5590a
Rename uintN_t guest handles to uintN, to avoid nameclash with uintN_t
macros during the handle definitions.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
macros during the handle definitions.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Dec 28 15:44:51 2007 +0000 (2007-12-28) |
parents | 69b56d3289f5 |
children | 03d13b696027 |
rev | line source |
---|---|
kaf24@3715 | 1 #ifndef __ASM_DOMAIN_H__ |
kaf24@3715 | 2 #define __ASM_DOMAIN_H__ |
kaf24@3715 | 3 |
kaf24@4842 | 4 #include <xen/config.h> |
kaf24@4493 | 5 #include <xen/mm.h> |
kaf24@8746 | 6 #include <asm/hvm/vcpu.h> |
kaf24@8746 | 7 #include <asm/hvm/domain.h> |
kfraser@12841 | 8 #include <asm/e820.h> |
kaf24@4493 | 9 |
kfraser@15012 | 10 #define has_32bit_shinfo(d) ((d)->arch.has_32bit_shinfo) |
kfraser@15012 | 11 #define is_pv_32bit_domain(d) ((d)->arch.is_32bit_pv) |
kfraser@15012 | 12 #define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain)) |
kfraser@14999 | 13 #ifdef __x86_64__ |
kfraser@15012 | 14 #define is_pv_32on64_domain(d) (is_pv_32bit_domain(d)) |
kfraser@14999 | 15 #else |
kfraser@15012 | 16 #define is_pv_32on64_domain(d) (0) |
kfraser@14999 | 17 #endif |
kfraser@15012 | 18 #define is_pv_32on64_vcpu(v) (is_pv_32on64_domain((v)->domain)) |
kfraser@15012 | 19 #define IS_COMPAT(d) (is_pv_32on64_domain(d)) |
kfraser@14999 | 20 |
kaf24@3715 | 21 struct trap_bounce { |
kfraser@14968 | 22 uint32_t error_code; |
kfraser@14968 | 23 uint8_t flags; /* TBF_ */ |
kfraser@14968 | 24 uint16_t cs; |
kfraser@14968 | 25 unsigned long eip; |
kaf24@3715 | 26 }; |
kaf24@3715 | 27 |
kaf24@8595 | 28 #define MAPHASH_ENTRIES 8 |
kaf24@8595 | 29 #define MAPHASH_HASHFN(pfn) ((pfn) & (MAPHASH_ENTRIES-1)) |
kaf24@8595 | 30 #define MAPHASHENT_NOTINUSE ((u16)~0U) |
keir@16041 | 31 struct mapcache_vcpu { |
keir@16041 | 32 /* Shadow of mapcache_domain.epoch. */ |
keir@16041 | 33 unsigned int shadow_epoch; |
keir@16041 | 34 |
keir@16041 | 35 /* Lock-free per-VCPU hash of recently-used mappings. */ |
kaf24@8595 | 36 struct vcpu_maphash_entry { |
kfraser@11252 | 37 unsigned long mfn; |
kaf24@8595 | 38 uint16_t idx; |
kaf24@8595 | 39 uint16_t refcnt; |
kaf24@8595 | 40 } hash[MAPHASH_ENTRIES]; |
keir@16041 | 41 }; |
kaf24@8595 | 42 |
kaf24@8595 | 43 #define MAPCACHE_ORDER 10 |
kaf24@8595 | 44 #define MAPCACHE_ENTRIES (1 << MAPCACHE_ORDER) |
keir@16041 | 45 struct mapcache_domain { |
kaf24@8595 | 46 /* The PTEs that provide the mappings, and a cursor into the array. */ |
kaf24@8563 | 47 l1_pgentry_t *l1tab; |
kaf24@8563 | 48 unsigned int cursor; |
kaf24@8595 | 49 |
kaf24@8595 | 50 /* Protects map_domain_page(). */ |
kaf24@8595 | 51 spinlock_t lock; |
kaf24@8595 | 52 |
kaf24@8595 | 53 /* Garbage mappings are flushed from TLBs in batches called 'epochs'. */ |
keir@16041 | 54 unsigned int epoch; |
kaf24@8581 | 55 u32 tlbflush_timestamp; |
kaf24@8595 | 56 |
kaf24@8595 | 57 /* Which mappings are in use, and which are garbage to reap next epoch? */ |
kaf24@8595 | 58 unsigned long inuse[BITS_TO_LONGS(MAPCACHE_ENTRIES)]; |
kaf24@8595 | 59 unsigned long garbage[BITS_TO_LONGS(MAPCACHE_ENTRIES)]; |
kaf24@8563 | 60 }; |
kaf24@8563 | 61 |
keir@16041 | 62 void mapcache_domain_init(struct domain *); |
keir@16041 | 63 void mapcache_vcpu_init(struct vcpu *); |
kaf24@8601 | 64 |
kaf24@8601 | 65 /* x86/64: toggle guest between kernel and user modes. */ |
keir@16041 | 66 void toggle_guest_mode(struct vcpu *); |
kaf24@8595 | 67 |
kaf24@8689 | 68 /* |
kaf24@8689 | 69 * Initialise a hypercall-transfer page. The given pointer must be mapped |
kaf24@8689 | 70 * in Xen virtual address space (accesses are not validated or checked). |
kaf24@8689 | 71 */ |
keir@16041 | 72 void hypercall_page_initialise(struct domain *d, void *); |
kaf24@8689 | 73 |
Tim@13938 | 74 /************************************************/ |
Tim@13938 | 75 /* shadow paging extension */ |
Tim@13938 | 76 /************************************************/ |
kaf24@11243 | 77 struct shadow_domain { |
kaf24@11298 | 78 spinlock_t lock; /* shadow domain lock */ |
kaf24@11243 | 79 int locker; /* processor which holds the lock */ |
kaf24@11243 | 80 const char *locker_function; /* Func that took it */ |
Tim@13938 | 81 unsigned int opt_flags; /* runtime tunable optimizations on/off */ |
kfraser@15737 | 82 struct list_head pinned_shadows; |
Tim@13938 | 83 |
Tim@13938 | 84 /* Memory allocation */ |
kfraser@15737 | 85 struct list_head freelists[SHADOW_MAX_ORDER + 1]; |
kaf24@11243 | 86 struct list_head p2m_freelist; |
kaf24@11243 | 87 unsigned int total_pages; /* number of pages allocated */ |
kaf24@11243 | 88 unsigned int free_pages; /* number of pages on freelists */ |
Tim@13938 | 89 unsigned int p2m_pages; /* number of pages allocates to p2m */ |
kaf24@11243 | 90 |
Tim@15843 | 91 /* 1-to-1 map for use when HVM vcpus have paging disabled */ |
Tim@15843 | 92 pagetable_t unpaged_pagetable; |
Tim@15843 | 93 |
kaf24@11298 | 94 /* Shadow hashtable */ |
Tim@12569 | 95 struct shadow_page_info **hash_table; |
kaf24@11243 | 96 int hash_walking; /* Some function is walking the hash table */ |
kaf24@11243 | 97 |
Tim@14382 | 98 /* Fast MMIO path heuristic */ |
Tim@14382 | 99 int has_fast_mmio_entries; |
kaf24@11243 | 100 }; |
kaf24@11243 | 101 |
Tim@13938 | 102 struct shadow_vcpu { |
Tim@13938 | 103 #if CONFIG_PAGING_LEVELS >= 3 |
Tim@13938 | 104 /* PAE guests: per-vcpu shadow top-level table */ |
Tim@13938 | 105 l3_pgentry_t l3table[4] __attribute__((__aligned__(32))); |
Tim@13944 | 106 /* PAE guests: per-vcpu cache of the top-level *guest* entries */ |
Tim@13944 | 107 l3_pgentry_t gl3e[4] __attribute__((__aligned__(32))); |
Tim@13938 | 108 #endif |
Tim@13944 | 109 /* Non-PAE guests: pointer to guest top-level pagetable */ |
Tim@13944 | 110 void *guest_vtable; |
Tim@13938 | 111 /* Last MFN that we emulated a write to. */ |
Tim@13938 | 112 unsigned long last_emulated_mfn; |
Tim@13938 | 113 /* MFN of the last shadow that we shot a writeable mapping in */ |
Tim@13938 | 114 unsigned long last_writeable_pte_smfn; |
Tim@13938 | 115 }; |
Tim@13938 | 116 |
Tim@13938 | 117 /************************************************/ |
Tim@14321 | 118 /* hardware assisted paging */ |
Tim@14321 | 119 /************************************************/ |
Tim@14321 | 120 struct hap_domain { |
Tim@14321 | 121 spinlock_t lock; |
Tim@14321 | 122 int locker; |
Tim@14321 | 123 const char *locker_function; |
kfraser@15737 | 124 |
Tim@15649 | 125 struct list_head freelist; |
Tim@14321 | 126 unsigned int total_pages; /* number of pages allocated */ |
Tim@14321 | 127 unsigned int free_pages; /* number of pages on freelists */ |
Tim@14321 | 128 unsigned int p2m_pages; /* number of pages allocates to p2m */ |
Tim@14321 | 129 }; |
Tim@14321 | 130 |
Tim@14321 | 131 /************************************************/ |
Tim@13938 | 132 /* p2m handling */ |
Tim@13938 | 133 /************************************************/ |
Tim@13938 | 134 struct p2m_domain { |
Tim@13938 | 135 /* Lock that protects updates to the p2m */ |
Tim@13938 | 136 spinlock_t lock; |
Tim@13938 | 137 int locker; /* processor which holds the lock */ |
Tim@13938 | 138 const char *locker_function; /* Func that took it */ |
kfraser@15737 | 139 |
Tim@13938 | 140 /* Pages used to construct the p2m */ |
Tim@13938 | 141 struct list_head pages; |
Tim@13938 | 142 |
Tim@13938 | 143 /* Functions to call to get or free pages for the p2m */ |
Tim@13938 | 144 struct page_info * (*alloc_page )(struct domain *d); |
kfraser@15737 | 145 void (*free_page )(struct domain *d, |
Tim@13938 | 146 struct page_info *pg); |
Tim@13938 | 147 |
Tim@13938 | 148 /* Highest guest frame that's ever been mapped in the p2m */ |
Tim@13938 | 149 unsigned long max_mapped_pfn; |
Tim@13938 | 150 }; |
Tim@13938 | 151 |
Tim@13938 | 152 /************************************************/ |
Tim@13938 | 153 /* common paging data structure */ |
Tim@13938 | 154 /************************************************/ |
Tim@15293 | 155 struct log_dirty_domain { |
Tim@15293 | 156 /* log-dirty lock */ |
Tim@15293 | 157 spinlock_t lock; |
Tim@15293 | 158 int locker; /* processor that holds the lock */ |
Tim@15293 | 159 const char *locker_function; /* func that took it */ |
Tim@13938 | 160 |
keir@16431 | 161 /* log-dirty radix tree to record dirty pages */ |
keir@16431 | 162 mfn_t top; |
keir@16431 | 163 unsigned int allocs; |
keir@16431 | 164 unsigned int failed_allocs; |
Tim@13938 | 165 |
Tim@15293 | 166 /* log-dirty mode stats */ |
Tim@15293 | 167 unsigned int fault_count; |
Tim@15293 | 168 unsigned int dirty_count; |
Tim@15293 | 169 |
Tim@15293 | 170 /* functions which are paging mode specific */ |
Tim@15293 | 171 int (*enable_log_dirty )(struct domain *d); |
Tim@15293 | 172 int (*disable_log_dirty )(struct domain *d); |
Tim@15293 | 173 void (*clean_dirty_bitmap )(struct domain *d); |
Tim@13938 | 174 }; |
Tim@13938 | 175 |
Tim@15293 | 176 struct paging_domain { |
Tim@15293 | 177 /* flags to control paging operation */ |
Tim@15293 | 178 u32 mode; |
Tim@15293 | 179 /* extension for shadow paging support */ |
Tim@15293 | 180 struct shadow_domain shadow; |
Tim@15293 | 181 /* extension for hardware-assited paging */ |
Tim@15293 | 182 struct hap_domain hap; |
Tim@15293 | 183 /* log dirty support */ |
Tim@15293 | 184 struct log_dirty_domain log_dirty; |
Tim@15293 | 185 }; |
kfraser@15737 | 186 |
Tim@13938 | 187 struct paging_vcpu { |
Tim@13938 | 188 /* Pointers to mode-specific entry points. */ |
Tim@13938 | 189 struct paging_mode *mode; |
Tim@15237 | 190 /* HVM guest: last emulate was to a pagetable */ |
Tim@15237 | 191 unsigned int last_write_was_pt:1; |
kfraser@15737 | 192 /* Translated guest: virtual TLB */ |
Tim@15238 | 193 struct shadow_vtlb *vtlb; |
kfraser@15737 | 194 spinlock_t vtlb_lock; |
Tim@13938 | 195 |
Tim@13938 | 196 /* paging support extension */ |
Tim@13938 | 197 struct shadow_vcpu shadow; |
Tim@13938 | 198 }; |
Tim@13938 | 199 |
kaf24@3715 | 200 struct arch_domain |
kaf24@3715 | 201 { |
kaf24@3715 | 202 l1_pgentry_t *mm_perdomain_pt; |
kaf24@4968 | 203 #ifdef CONFIG_X86_64 |
kaf24@3791 | 204 l2_pgentry_t *mm_perdomain_l2; |
kaf24@3791 | 205 l3_pgentry_t *mm_perdomain_l3; |
kaf24@3791 | 206 #endif |
kaf24@3715 | 207 |
kaf24@8563 | 208 #ifdef CONFIG_X86_32 |
kaf24@8563 | 209 /* map_domain_page() mapping cache. */ |
keir@16041 | 210 struct mapcache_domain mapcache; |
kaf24@8563 | 211 #endif |
kaf24@8563 | 212 |
ack@13295 | 213 #ifdef CONFIG_COMPAT |
ack@13295 | 214 unsigned int hv_compat_vstart; |
ack@13297 | 215 l3_pgentry_t *mm_arg_xlat_l3; |
ack@13295 | 216 #endif |
ack@13295 | 217 |
kaf24@8497 | 218 /* I/O-port admin-specified access capabilities. */ |
kaf24@8497 | 219 struct rangeset *ioport_caps; |
kaf24@4313 | 220 |
kfraser@12841 | 221 struct hvm_domain hvm_domain; |
tdeegan@11189 | 222 |
Tim@13938 | 223 struct paging_domain paging; |
Tim@13938 | 224 struct p2m_domain p2m ; |
tdeegan@11189 | 225 |
tdeegan@11189 | 226 /* Shadow translated domain: P2M mapping */ |
tdeegan@11189 | 227 pagetable_t phys_table; |
tdeegan@11189 | 228 |
kfraser@12841 | 229 /* Pseudophysical e820 map (XENMEM_memory_map). */ |
kfraser@12841 | 230 struct e820entry e820[3]; |
kfraser@12841 | 231 unsigned int nr_e820; |
keir@14135 | 232 |
keir@14135 | 233 /* Maximum physical-address bitwidth supported by this guest. */ |
keir@14135 | 234 unsigned int physaddr_bitsize; |
kfraser@15012 | 235 |
kfraser@15012 | 236 /* Is a 32-bit PV (non-HVM) guest? */ |
kfraser@15012 | 237 bool_t is_32bit_pv; |
kfraser@15012 | 238 /* Is shared-info page in 32-bit format? */ |
kfraser@15012 | 239 bool_t has_32bit_shinfo; |
kfraser@15859 | 240 |
kfraser@15859 | 241 /* Continuable domain_relinquish_resources(). */ |
kfraser@15859 | 242 enum { |
kfraser@15859 | 243 RELMEM_not_started, |
kfraser@15859 | 244 RELMEM_xen_l4, |
kfraser@15859 | 245 RELMEM_dom_l4, |
kfraser@15859 | 246 RELMEM_xen_l3, |
kfraser@15859 | 247 RELMEM_dom_l3, |
kfraser@15859 | 248 RELMEM_xen_l2, |
kfraser@15859 | 249 RELMEM_dom_l2, |
kfraser@15859 | 250 RELMEM_done, |
kfraser@15859 | 251 } relmem; |
kfraser@15859 | 252 struct list_head relmem_list; |
kaf24@3715 | 253 } __cacheline_aligned; |
kaf24@3715 | 254 |
kaf24@10215 | 255 #ifdef CONFIG_X86_PAE |
kaf24@10215 | 256 struct pae_l3_cache { |
kaf24@10215 | 257 /* |
kaf24@10215 | 258 * Two low-memory (<4GB) PAE L3 tables, used as fallback when the guest |
kaf24@10215 | 259 * supplies a >=4GB PAE L3 table. We need two because we cannot set up |
kaf24@10215 | 260 * an L3 table while we are currently running on it (without using |
kaf24@10215 | 261 * expensive atomic 64-bit operations). |
kaf24@10215 | 262 */ |
kaf24@10215 | 263 l3_pgentry_t table[2][4] __attribute__((__aligned__(32))); |
kaf24@10215 | 264 unsigned long high_mfn; /* The >=4GB MFN being shadowed. */ |
kaf24@10215 | 265 unsigned int inuse_idx; /* Which of the two cache slots is in use? */ |
kaf24@10215 | 266 spinlock_t lock; |
kaf24@10215 | 267 }; |
kaf24@10215 | 268 #define pae_l3_cache_init(c) spin_lock_init(&(c)->lock) |
kaf24@10215 | 269 #else /* !CONFIG_X86_PAE */ |
kaf24@10215 | 270 struct pae_l3_cache { }; |
kaf24@10215 | 271 #define pae_l3_cache_init(c) ((void)0) |
kaf24@10215 | 272 #endif |
kaf24@10215 | 273 |
kaf24@5327 | 274 struct arch_vcpu |
kaf24@3715 | 275 { |
kaf24@5686 | 276 /* Needs 16-byte aligment for FXSAVE/FXRSTOR. */ |
kaf24@5686 | 277 struct vcpu_guest_context guest_context |
kaf24@5686 | 278 __attribute__((__aligned__(16))); |
kaf24@3715 | 279 |
kaf24@10215 | 280 struct pae_l3_cache pae_l3_cache; |
kaf24@10211 | 281 |
kaf24@3715 | 282 unsigned long flags; /* TF_ */ |
kaf24@3715 | 283 |
kaf24@5327 | 284 void (*schedule_tail) (struct vcpu *); |
kaf24@3712 | 285 |
kaf24@9361 | 286 void (*ctxt_switch_from) (struct vcpu *); |
kaf24@9361 | 287 void (*ctxt_switch_to) (struct vcpu *); |
kaf24@9361 | 288 |
kfraser@15654 | 289 /* Record information required to continue execution after migration */ |
kfraser@15654 | 290 void *continue_info; |
kfraser@15654 | 291 |
kaf24@3715 | 292 /* Bounce information for propagating an exception to guest OS. */ |
kaf24@3715 | 293 struct trap_bounce trap_bounce; |
kaf24@3715 | 294 |
kaf24@3715 | 295 /* I/O-port access bitmap. */ |
keir@16712 | 296 XEN_GUEST_HANDLE(uint8) iobmp; /* Guest kernel vaddr of the bitmap. */ |
kaf24@4313 | 297 int iobmp_limit; /* Number of ports represented in the bitmap. */ |
kaf24@4313 | 298 int iopl; /* Current IOPL for this VCPU. */ |
kaf24@3712 | 299 |
kaf24@4968 | 300 #ifdef CONFIG_X86_32 |
kaf24@4968 | 301 struct desc_struct int80_desc; |
kaf24@3715 | 302 #endif |
ian@14483 | 303 #ifdef CONFIG_X86_64 |
ian@14483 | 304 struct trap_bounce int80_bounce; |
keir@16245 | 305 unsigned long syscall32_callback_eip; |
keir@16245 | 306 unsigned long sysenter_callback_eip; |
keir@16245 | 307 unsigned short syscall32_callback_cs; |
keir@16245 | 308 unsigned short sysenter_callback_cs; |
keir@16245 | 309 bool_t syscall32_disables_events; |
keir@16245 | 310 bool_t sysenter_disables_events; |
ian@14483 | 311 #endif |
kaf24@3931 | 312 |
kaf24@3931 | 313 /* Virtual Machine Extensions */ |
kaf24@8746 | 314 struct hvm_vcpu hvm_vcpu; |
kaf24@3715 | 315 |
kaf24@3715 | 316 /* |
kaf24@3715 | 317 * Every domain has a L1 pagetable of its own. Per-domain mappings |
kaf24@3715 | 318 * are put in this table (eg. the current GDT is mapped here). |
kaf24@3715 | 319 */ |
kaf24@3715 | 320 l1_pgentry_t *perdomain_ptes; |
kaf24@3712 | 321 |
tdeegan@11189 | 322 #ifdef CONFIG_X86_64 |
tdeegan@11189 | 323 pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */ |
tdeegan@11189 | 324 #endif |
tdeegan@11189 | 325 pagetable_t guest_table; /* (MFN) guest notion of cr3 */ |
tdeegan@11189 | 326 /* guest_table holds a ref to the page, and also a type-count unless |
tdeegan@11189 | 327 * shadow refcounts are in use */ |
Tim@11832 | 328 pagetable_t shadow_table[4]; /* (MFN) shadow(s) of guest */ |
tdeegan@11189 | 329 pagetable_t monitor_table; /* (MFN) hypervisor PT (for HVM) */ |
kfraser@15737 | 330 unsigned long cr3; /* (MA) value to install in HW CR3 */ |
iap10@3848 | 331 |
kaf24@3715 | 332 /* Current LDT details. */ |
kaf24@4727 | 333 unsigned long shadow_ldt_mapcnt; |
tdeegan@11189 | 334 |
Tim@13938 | 335 struct paging_vcpu paging; |
kfraser@15226 | 336 |
kfraser@15226 | 337 /* Guest-specified relocation of vcpu_info. */ |
kfraser@15226 | 338 unsigned long vcpu_info_mfn; |
keir@16041 | 339 |
keir@16041 | 340 #ifdef CONFIG_X86_32 |
keir@16041 | 341 /* map_domain_page() mapping cache. */ |
keir@16041 | 342 struct mapcache_vcpu mapcache; |
keir@16041 | 343 #endif |
keir@16041 | 344 |
kaf24@3715 | 345 } __cacheline_aligned; |
kaf24@3715 | 346 |
kfraser@15654 | 347 /* Shorthands to improve code legibility. */ |
kaf24@8746 | 348 #define hvm_vmx hvm_vcpu.u.vmx |
kaf24@8746 | 349 #define hvm_svm hvm_vcpu.u.svm |
kaf24@8746 | 350 |
kfraser@15654 | 351 /* Continue the current hypercall via func(data) on specified cpu. */ |
kfraser@15654 | 352 int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data); |
kfraser@15654 | 353 |
keir@16371 | 354 /* Clean up CR4 bits that are not under guest control. */ |
keir@16297 | 355 unsigned long pv_guest_cr4_fixup(unsigned long guest_cr4); |
keir@16297 | 356 |
keir@16371 | 357 /* Convert between guest-visible and real CR4 values. */ |
keir@16371 | 358 #define pv_guest_cr4_to_real_cr4(c) \ |
keir@16466 | 359 (((c) | (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE))) & ~X86_CR4_DE) |
keir@16371 | 360 #define real_cr4_to_pv_guest_cr4(c) \ |
keir@16371 | 361 ((c) & ~(X86_CR4_PGE | X86_CR4_PSE)) |
keir@16371 | 362 |
kaf24@3715 | 363 #endif /* __ASM_DOMAIN_H__ */ |
kaf24@3952 | 364 |
kaf24@3952 | 365 /* |
kaf24@3952 | 366 * Local variables: |
kaf24@3952 | 367 * mode: C |
kaf24@3952 | 368 * c-set-style: "BSD" |
kaf24@3952 | 369 * c-basic-offset: 4 |
kaf24@3952 | 370 * tab-width: 4 |
kaf24@3952 | 371 * indent-tabs-mode: nil |
kaf24@4026 | 372 * End: |
kaf24@3952 | 373 */ |