debuggers.hg

view xen/include/asm-x86/domain.h @ 16466:69b56d3289f5

x86: emulate I/O port access breakpoints

Emulate the trapping on I/O port accesses when emulating IN/OUT.

Also allow 8-byte breakpoints on x86-64 (and on i686 if the hardware
supports them), and tighten the condition for loading debug registers
during context switch.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir.fraser@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Nov 22 19:23:40 2007 +0000 (2007-11-22)
parents 68c911f7733a
children ad0f20f5590a
line source
1 #ifndef __ASM_DOMAIN_H__
2 #define __ASM_DOMAIN_H__
4 #include <xen/config.h>
5 #include <xen/mm.h>
6 #include <asm/hvm/vcpu.h>
7 #include <asm/hvm/domain.h>
8 #include <asm/e820.h>
10 #define has_32bit_shinfo(d) ((d)->arch.has_32bit_shinfo)
11 #define is_pv_32bit_domain(d) ((d)->arch.is_32bit_pv)
12 #define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain))
13 #ifdef __x86_64__
14 #define is_pv_32on64_domain(d) (is_pv_32bit_domain(d))
15 #else
16 #define is_pv_32on64_domain(d) (0)
17 #endif
18 #define is_pv_32on64_vcpu(v) (is_pv_32on64_domain((v)->domain))
19 #define IS_COMPAT(d) (is_pv_32on64_domain(d))
21 struct trap_bounce {
22 uint32_t error_code;
23 uint8_t flags; /* TBF_ */
24 uint16_t cs;
25 unsigned long eip;
26 };
28 #define MAPHASH_ENTRIES 8
29 #define MAPHASH_HASHFN(pfn) ((pfn) & (MAPHASH_ENTRIES-1))
30 #define MAPHASHENT_NOTINUSE ((u16)~0U)
31 struct mapcache_vcpu {
32 /* Shadow of mapcache_domain.epoch. */
33 unsigned int shadow_epoch;
35 /* Lock-free per-VCPU hash of recently-used mappings. */
36 struct vcpu_maphash_entry {
37 unsigned long mfn;
38 uint16_t idx;
39 uint16_t refcnt;
40 } hash[MAPHASH_ENTRIES];
41 };
43 #define MAPCACHE_ORDER 10
44 #define MAPCACHE_ENTRIES (1 << MAPCACHE_ORDER)
45 struct mapcache_domain {
46 /* The PTEs that provide the mappings, and a cursor into the array. */
47 l1_pgentry_t *l1tab;
48 unsigned int cursor;
50 /* Protects map_domain_page(). */
51 spinlock_t lock;
53 /* Garbage mappings are flushed from TLBs in batches called 'epochs'. */
54 unsigned int epoch;
55 u32 tlbflush_timestamp;
57 /* Which mappings are in use, and which are garbage to reap next epoch? */
58 unsigned long inuse[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
59 unsigned long garbage[BITS_TO_LONGS(MAPCACHE_ENTRIES)];
60 };
62 void mapcache_domain_init(struct domain *);
63 void mapcache_vcpu_init(struct vcpu *);
65 /* x86/64: toggle guest between kernel and user modes. */
66 void toggle_guest_mode(struct vcpu *);
68 /*
69 * Initialise a hypercall-transfer page. The given pointer must be mapped
70 * in Xen virtual address space (accesses are not validated or checked).
71 */
72 void hypercall_page_initialise(struct domain *d, void *);
74 /************************************************/
75 /* shadow paging extension */
76 /************************************************/
77 struct shadow_domain {
78 spinlock_t lock; /* shadow domain lock */
79 int locker; /* processor which holds the lock */
80 const char *locker_function; /* Func that took it */
81 unsigned int opt_flags; /* runtime tunable optimizations on/off */
82 struct list_head pinned_shadows;
84 /* Memory allocation */
85 struct list_head freelists[SHADOW_MAX_ORDER + 1];
86 struct list_head p2m_freelist;
87 unsigned int total_pages; /* number of pages allocated */
88 unsigned int free_pages; /* number of pages on freelists */
89 unsigned int p2m_pages; /* number of pages allocates to p2m */
91 /* 1-to-1 map for use when HVM vcpus have paging disabled */
92 pagetable_t unpaged_pagetable;
94 /* Shadow hashtable */
95 struct shadow_page_info **hash_table;
96 int hash_walking; /* Some function is walking the hash table */
98 /* Fast MMIO path heuristic */
99 int has_fast_mmio_entries;
100 };
102 struct shadow_vcpu {
103 #if CONFIG_PAGING_LEVELS >= 3
104 /* PAE guests: per-vcpu shadow top-level table */
105 l3_pgentry_t l3table[4] __attribute__((__aligned__(32)));
106 /* PAE guests: per-vcpu cache of the top-level *guest* entries */
107 l3_pgentry_t gl3e[4] __attribute__((__aligned__(32)));
108 #endif
109 /* Non-PAE guests: pointer to guest top-level pagetable */
110 void *guest_vtable;
111 /* Last MFN that we emulated a write to. */
112 unsigned long last_emulated_mfn;
113 /* MFN of the last shadow that we shot a writeable mapping in */
114 unsigned long last_writeable_pte_smfn;
115 };
117 /************************************************/
118 /* hardware assisted paging */
119 /************************************************/
120 struct hap_domain {
121 spinlock_t lock;
122 int locker;
123 const char *locker_function;
125 struct list_head freelist;
126 unsigned int total_pages; /* number of pages allocated */
127 unsigned int free_pages; /* number of pages on freelists */
128 unsigned int p2m_pages; /* number of pages allocates to p2m */
129 };
131 /************************************************/
132 /* p2m handling */
133 /************************************************/
134 struct p2m_domain {
135 /* Lock that protects updates to the p2m */
136 spinlock_t lock;
137 int locker; /* processor which holds the lock */
138 const char *locker_function; /* Func that took it */
140 /* Pages used to construct the p2m */
141 struct list_head pages;
143 /* Functions to call to get or free pages for the p2m */
144 struct page_info * (*alloc_page )(struct domain *d);
145 void (*free_page )(struct domain *d,
146 struct page_info *pg);
148 /* Highest guest frame that's ever been mapped in the p2m */
149 unsigned long max_mapped_pfn;
150 };
152 /************************************************/
153 /* common paging data structure */
154 /************************************************/
155 struct log_dirty_domain {
156 /* log-dirty lock */
157 spinlock_t lock;
158 int locker; /* processor that holds the lock */
159 const char *locker_function; /* func that took it */
161 /* log-dirty radix tree to record dirty pages */
162 mfn_t top;
163 unsigned int allocs;
164 unsigned int failed_allocs;
166 /* log-dirty mode stats */
167 unsigned int fault_count;
168 unsigned int dirty_count;
170 /* functions which are paging mode specific */
171 int (*enable_log_dirty )(struct domain *d);
172 int (*disable_log_dirty )(struct domain *d);
173 void (*clean_dirty_bitmap )(struct domain *d);
174 };
176 struct paging_domain {
177 /* flags to control paging operation */
178 u32 mode;
179 /* extension for shadow paging support */
180 struct shadow_domain shadow;
181 /* extension for hardware-assited paging */
182 struct hap_domain hap;
183 /* log dirty support */
184 struct log_dirty_domain log_dirty;
185 };
187 struct paging_vcpu {
188 /* Pointers to mode-specific entry points. */
189 struct paging_mode *mode;
190 /* HVM guest: last emulate was to a pagetable */
191 unsigned int last_write_was_pt:1;
192 /* Translated guest: virtual TLB */
193 struct shadow_vtlb *vtlb;
194 spinlock_t vtlb_lock;
196 /* paging support extension */
197 struct shadow_vcpu shadow;
198 };
200 struct arch_domain
201 {
202 l1_pgentry_t *mm_perdomain_pt;
203 #ifdef CONFIG_X86_64
204 l2_pgentry_t *mm_perdomain_l2;
205 l3_pgentry_t *mm_perdomain_l3;
206 #endif
208 #ifdef CONFIG_X86_32
209 /* map_domain_page() mapping cache. */
210 struct mapcache_domain mapcache;
211 #endif
213 #ifdef CONFIG_COMPAT
214 unsigned int hv_compat_vstart;
215 l3_pgentry_t *mm_arg_xlat_l3;
216 #endif
218 /* I/O-port admin-specified access capabilities. */
219 struct rangeset *ioport_caps;
221 struct hvm_domain hvm_domain;
223 struct paging_domain paging;
224 struct p2m_domain p2m ;
226 /* Shadow translated domain: P2M mapping */
227 pagetable_t phys_table;
229 /* Pseudophysical e820 map (XENMEM_memory_map). */
230 struct e820entry e820[3];
231 unsigned int nr_e820;
233 /* Maximum physical-address bitwidth supported by this guest. */
234 unsigned int physaddr_bitsize;
236 /* Is a 32-bit PV (non-HVM) guest? */
237 bool_t is_32bit_pv;
238 /* Is shared-info page in 32-bit format? */
239 bool_t has_32bit_shinfo;
241 /* Continuable domain_relinquish_resources(). */
242 enum {
243 RELMEM_not_started,
244 RELMEM_xen_l4,
245 RELMEM_dom_l4,
246 RELMEM_xen_l3,
247 RELMEM_dom_l3,
248 RELMEM_xen_l2,
249 RELMEM_dom_l2,
250 RELMEM_done,
251 } relmem;
252 struct list_head relmem_list;
253 } __cacheline_aligned;
255 #ifdef CONFIG_X86_PAE
256 struct pae_l3_cache {
257 /*
258 * Two low-memory (<4GB) PAE L3 tables, used as fallback when the guest
259 * supplies a >=4GB PAE L3 table. We need two because we cannot set up
260 * an L3 table while we are currently running on it (without using
261 * expensive atomic 64-bit operations).
262 */
263 l3_pgentry_t table[2][4] __attribute__((__aligned__(32)));
264 unsigned long high_mfn; /* The >=4GB MFN being shadowed. */
265 unsigned int inuse_idx; /* Which of the two cache slots is in use? */
266 spinlock_t lock;
267 };
268 #define pae_l3_cache_init(c) spin_lock_init(&(c)->lock)
269 #else /* !CONFIG_X86_PAE */
270 struct pae_l3_cache { };
271 #define pae_l3_cache_init(c) ((void)0)
272 #endif
274 struct arch_vcpu
275 {
276 /* Needs 16-byte aligment for FXSAVE/FXRSTOR. */
277 struct vcpu_guest_context guest_context
278 __attribute__((__aligned__(16)));
280 struct pae_l3_cache pae_l3_cache;
282 unsigned long flags; /* TF_ */
284 void (*schedule_tail) (struct vcpu *);
286 void (*ctxt_switch_from) (struct vcpu *);
287 void (*ctxt_switch_to) (struct vcpu *);
289 /* Record information required to continue execution after migration */
290 void *continue_info;
292 /* Bounce information for propagating an exception to guest OS. */
293 struct trap_bounce trap_bounce;
295 /* I/O-port access bitmap. */
296 XEN_GUEST_HANDLE(uint8_t) iobmp; /* Guest kernel vaddr of the bitmap. */
297 int iobmp_limit; /* Number of ports represented in the bitmap. */
298 int iopl; /* Current IOPL for this VCPU. */
300 #ifdef CONFIG_X86_32
301 struct desc_struct int80_desc;
302 #endif
303 #ifdef CONFIG_X86_64
304 struct trap_bounce int80_bounce;
305 unsigned long syscall32_callback_eip;
306 unsigned long sysenter_callback_eip;
307 unsigned short syscall32_callback_cs;
308 unsigned short sysenter_callback_cs;
309 bool_t syscall32_disables_events;
310 bool_t sysenter_disables_events;
311 #endif
313 /* Virtual Machine Extensions */
314 struct hvm_vcpu hvm_vcpu;
316 /*
317 * Every domain has a L1 pagetable of its own. Per-domain mappings
318 * are put in this table (eg. the current GDT is mapped here).
319 */
320 l1_pgentry_t *perdomain_ptes;
322 #ifdef CONFIG_X86_64
323 pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */
324 #endif
325 pagetable_t guest_table; /* (MFN) guest notion of cr3 */
326 /* guest_table holds a ref to the page, and also a type-count unless
327 * shadow refcounts are in use */
328 pagetable_t shadow_table[4]; /* (MFN) shadow(s) of guest */
329 pagetable_t monitor_table; /* (MFN) hypervisor PT (for HVM) */
330 unsigned long cr3; /* (MA) value to install in HW CR3 */
332 /* Current LDT details. */
333 unsigned long shadow_ldt_mapcnt;
335 struct paging_vcpu paging;
337 /* Guest-specified relocation of vcpu_info. */
338 unsigned long vcpu_info_mfn;
340 #ifdef CONFIG_X86_32
341 /* map_domain_page() mapping cache. */
342 struct mapcache_vcpu mapcache;
343 #endif
345 } __cacheline_aligned;
347 /* Shorthands to improve code legibility. */
348 #define hvm_vmx hvm_vcpu.u.vmx
349 #define hvm_svm hvm_vcpu.u.svm
351 /* Continue the current hypercall via func(data) on specified cpu. */
352 int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data);
354 /* Clean up CR4 bits that are not under guest control. */
355 unsigned long pv_guest_cr4_fixup(unsigned long guest_cr4);
357 /* Convert between guest-visible and real CR4 values. */
358 #define pv_guest_cr4_to_real_cr4(c) \
359 (((c) | (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE))) & ~X86_CR4_DE)
360 #define real_cr4_to_pv_guest_cr4(c) \
361 ((c) & ~(X86_CR4_PGE | X86_CR4_PSE))
363 #endif /* __ASM_DOMAIN_H__ */
365 /*
366 * Local variables:
367 * mode: C
368 * c-set-style: "BSD"
369 * c-basic-offset: 4
370 * tab-width: 4
371 * indent-tabs-mode: nil
372 * End:
373 */