debuggers.hg

view xen/include/asm-x86/mm.h @ 3770:d21fbb46b9d8

bitkeeper revision 1.1159.253.1 (4208f8a54Zaz-XgC11YTHeLxPHPoZg)

Merge scramble.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
author kaf24@scramble.cl.cam.ac.uk
date Tue Feb 08 17:36:37 2005 +0000 (2005-02-08)
parents f5f2757b3aa2 cb87fd290eb0
children 12104922e743
line source
2 #ifndef __ASM_X86_MM_H__
3 #define __ASM_X86_MM_H__
5 #include <xen/config.h>
6 #include <xen/list.h>
7 #include <xen/spinlock.h>
8 #include <xen/perfc.h>
9 #include <xen/sched.h>
11 #include <asm/processor.h>
12 #include <asm/atomic.h>
13 #include <asm/desc.h>
14 #include <asm/flushtlb.h>
15 #include <asm/io.h>
16 #include <asm/uaccess.h>
18 #include <public/xen.h>
20 /*
21 * Per-page-frame information.
22 *
23 * Every architecture must ensure the following:
24 * 1. 'struct pfn_info' contains a 'struct list_head list'.
25 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
26 */
27 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
29 struct pfn_info
30 {
31 /* Each frame can be threaded onto a doubly-linked list. */
32 struct list_head list;
34 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
35 u32 tlbflush_timestamp;
37 /* Reference count and various PGC_xxx flags and fields. */
38 u32 count_info;
40 /* Context-dependent fields follow... */
41 union {
43 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
44 struct {
45 /* Owner of this page (NULL if page is anonymous). */
46 u32 _domain; /* pickled format */
47 /* Type reference count and various PGT_xxx flags and fields. */
48 u32 type_info;
49 } PACKED inuse;
51 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
52 struct {
53 /* Mask of possibly-tainted TLBs. */
54 u32 cpu_mask;
55 /* Order-size of the free chunk this page is the head of. */
56 u8 order;
57 } PACKED free;
59 } PACKED u;
61 } PACKED;
63 /* The following page types are MUTUALLY EXCLUSIVE. */
64 #define PGT_none (0<<29) /* no special uses of this page */
65 #define PGT_l1_page_table (1<<29) /* using this page as an L1 page table? */
66 #define PGT_l2_page_table (2<<29) /* using this page as an L2 page table? */
67 #define PGT_l3_page_table (3<<29) /* using this page as an L3 page table? */
68 #define PGT_l4_page_table (4<<29) /* using this page as an L4 page table? */
69 #define PGT_gdt_page (5<<29) /* using this page in a GDT? */
70 #define PGT_ldt_page (6<<29) /* using this page in an LDT? */
71 #define PGT_writable_page (7<<29) /* has writable mappings of this page? */
72 #define PGT_type_mask (7<<29) /* Bits 29-31. */
73 /* Has this page been validated for use as its current type? */
74 #define _PGT_validated 28
75 #define PGT_validated (1U<<_PGT_validated)
76 /* Owning guest has pinned this page to its current type? */
77 #define _PGT_pinned 27
78 #define PGT_pinned (1U<<_PGT_pinned)
79 /* The 10 most significant bits of virt address if this is a page table. */
80 #define PGT_va_shift 17
81 #define PGT_va_mask (((1U<<10)-1)<<PGT_va_shift)
82 /* Is the back pointer still mutable (i.e. not fixed yet)? */
83 #define PGT_va_mutable (((1U<<10)-1)<<PGT_va_shift)
84 /* Is the back pointer unknown (e.g., p.t. is mapped at multiple VAs)? */
85 #define PGT_va_unknown (((1U<<10)-2)<<PGT_va_shift)
86 /* 17-bit count of uses of this frame as its current type. */
87 #define PGT_count_mask ((1U<<17)-1)
89 /* Cleared when the owning guest 'frees' this page. */
90 #define _PGC_allocated 31
91 #define PGC_allocated (1U<<_PGC_allocated)
92 /* 31-bit count of references to this frame. */
93 #define PGC_count_mask ((1U<<31)-1)
95 /* We trust the slab allocator in slab.c, and our use of it. */
96 #define PageSlab(page) (1)
97 #define PageSetSlab(page) ((void)0)
98 #define PageClearSlab(page) ((void)0)
100 #define IS_XEN_HEAP_FRAME(_pfn) (page_to_phys(_pfn) < xenheap_phys_end)
102 #if defined(__i386__)
103 #define pickle_domptr(_d) ((u32)(unsigned long)(_d))
104 #define unpickle_domptr(_d) ((struct domain *)(unsigned long)(_d))
105 #elif defined(__x86_64__)
106 static inline struct domain *unpickle_domptr(u32 _domain)
107 { return (_domain == 0) ? NULL : __va(_domain); }
108 static inline u32 pickle_domptr(struct domain *domain)
109 { return (domain == NULL) ? 0 : (u32)__pa(domain); }
110 #endif
112 #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
113 #define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
115 #define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) \
116 do { \
117 page_set_owner((_pfn), (_dom)); \
118 /* The incremented type count is intended to pin to 'writable'. */ \
119 (_pfn)->u.inuse.type_info = PGT_writable_page | PGT_validated | 1; \
120 wmb(); /* install valid domain ptr before updating refcnt. */ \
121 spin_lock(&(_dom)->page_alloc_lock); \
122 /* _dom holds an allocation reference */ \
123 ASSERT((_pfn)->count_info == 0); \
124 (_pfn)->count_info |= PGC_allocated | 1; \
125 if ( unlikely((_dom)->xenheap_pages++ == 0) ) \
126 get_knownalive_domain(_dom); \
127 list_add_tail(&(_pfn)->list, &(_dom)->xenpage_list); \
128 spin_unlock(&(_dom)->page_alloc_lock); \
129 } while ( 0 )
131 #define INVALID_P2M_ENTRY (~0UL)
133 extern struct pfn_info *frame_table;
134 extern unsigned long frame_table_size;
135 extern unsigned long max_page;
136 void init_frametable(void);
138 int alloc_page_type(struct pfn_info *page, unsigned int type);
139 void free_page_type(struct pfn_info *page, unsigned int type);
141 static inline void put_page(struct pfn_info *page)
142 {
143 u32 nx, x, y = page->count_info;
145 do {
146 x = y;
147 nx = x - 1;
148 }
149 while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
151 if ( unlikely((nx & PGC_count_mask) == 0) )
152 free_domheap_page(page);
153 }
156 static inline int get_page(struct pfn_info *page,
157 struct domain *domain)
158 {
159 u32 x, nx, y = page->count_info;
160 u32 d, nd = page->u.inuse._domain;
161 u32 _domain = pickle_domptr(domain);
163 do {
164 x = y;
165 nx = x + 1;
166 d = nd;
167 if ( unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
168 unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
169 unlikely(d != _domain) ) /* Wrong owner? */
170 {
171 DPRINTK("Error pfn %p: ed=%p, sd=%p, caf=%08x, taf=%08x\n",
172 page_to_pfn(page), domain, unpickle_domptr(d),
173 x, page->u.inuse.type_info);
174 return 0;
175 }
176 __asm__ __volatile__(
177 LOCK_PREFIX "cmpxchg8b %3"
178 : "=d" (nd), "=a" (y), "=c" (d),
179 "=m" (*(volatile u64 *)(&page->count_info))
180 : "0" (d), "1" (x), "c" (d), "b" (nx) );
181 }
182 while ( unlikely(nd != d) || unlikely(y != x) );
184 return 1;
185 }
187 void put_page_type(struct pfn_info *page);
188 int get_page_type(struct pfn_info *page, u32 type);
190 static inline void put_page_and_type(struct pfn_info *page)
191 {
192 put_page_type(page);
193 put_page(page);
194 }
197 static inline int get_page_and_type(struct pfn_info *page,
198 struct domain *domain,
199 u32 type)
200 {
201 int rc = get_page(page, domain);
203 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
204 {
205 put_page(page);
206 rc = 0;
207 }
209 return rc;
210 }
212 #define ASSERT_PAGE_IS_TYPE(_p, _t) \
213 ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \
214 ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0)
215 #define ASSERT_PAGE_IS_DOMAIN(_p, _d) \
216 ASSERT(((_p)->count_info & PGC_count_mask) != 0); \
217 ASSERT(page_get_owner(_p) == (_d))
219 int check_descriptor(struct desc_struct *d);
221 /*
222 * Use currently-executing domain's pagetables on the specified CPUs.
223 * i.e., stop borrowing someone else's tables if you are the idle domain.
224 */
225 void synchronise_pagetables(unsigned long cpu_mask);
227 /*
228 * The MPT (machine->physical mapping table) is an array of word-sized
229 * values, indexed on machine frame number. It is expected that guest OSes
230 * will use it to store a "physical" frame number to give the appearance of
231 * contiguous (or near contiguous) physical memory.
232 */
233 #undef machine_to_phys_mapping
235 /*
236 * The phys_to_machine_mapping is the reversed mapping of MPT for full
237 * virtualization.
238 */
239 #undef phys_to_machine_mapping
241 #define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
242 #define __phys_to_machine_mapping ((unsigned long *)PERDOMAIN_VIRT_START)
243 /* Returns the machine physical */
244 static inline unsigned long phys_to_machine_mapping(unsigned long pfn)
245 {
246 unsigned long mfn;
247 l1_pgentry_t pte;
249 if (__get_user(l1_pgentry_val(pte), (__phys_to_machine_mapping + pfn))) {
250 return 0;
251 }
253 mfn = l1_pgentry_to_phys(pte) >> PAGE_SHIFT;
254 return mfn;
255 }
256 #define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn)
258 #define DEFAULT_GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY+1)
259 #define DEFAULT_GDT_ADDRESS ((unsigned long)gdt_table)
261 #ifdef MEMORY_GUARD
262 void *memguard_init(void *heap_start);
263 void memguard_guard_stack(void *p);
264 void memguard_guard_range(void *p, unsigned long l);
265 void memguard_unguard_range(void *p, unsigned long l);
266 #else
267 #define memguard_init(_s) (_s)
268 #define memguard_guard_stack(_p) ((void)0)
269 #define memguard_guard_range(_p,_l) ((void)0)
270 #define memguard_unguard_range(_p,_l) ((void)0)
271 #endif
274 typedef struct {
275 void (*enable)(struct domain *);
276 void (*disable)(struct domain *);
277 } vm_assist_info_t;
278 extern vm_assist_info_t vm_assist_info[];
281 /* Writable Pagetables */
282 typedef struct {
283 /* Linear address where the guest is updating the p.t. page. */
284 unsigned long l1va;
285 /* Copy of the p.t. page, taken before guest is given write access. */
286 l1_pgentry_t *page;
287 /* A temporary Xen mapping of the actual p.t. page. */
288 l1_pgentry_t *pl1e;
289 /* Index in L2 page table where this L1 p.t. is always hooked. */
290 unsigned int l2_idx; /* NB. Only used for PTWR_PT_ACTIVE. */
291 } ptwr_ptinfo_t;
293 typedef struct {
294 ptwr_ptinfo_t ptinfo[2];
295 } __cacheline_aligned ptwr_info_t;
297 extern ptwr_info_t ptwr_info[];
299 #define PTWR_PT_ACTIVE 0
300 #define PTWR_PT_INACTIVE 1
302 #define PTWR_CLEANUP_ACTIVE 1
303 #define PTWR_CLEANUP_INACTIVE 2
305 void ptwr_flush(const int);
306 int ptwr_do_page_fault(unsigned long);
308 int new_guest_cr3(unsigned long pfn);
310 #define __cleanup_writable_pagetable(_what) \
311 do { \
312 int cpu = smp_processor_id(); \
313 if ((_what) & PTWR_CLEANUP_ACTIVE) \
314 if (ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va) \
315 ptwr_flush(PTWR_PT_ACTIVE); \
316 if ((_what) & PTWR_CLEANUP_INACTIVE) \
317 if (ptwr_info[cpu].ptinfo[PTWR_PT_INACTIVE].l1va) \
318 ptwr_flush(PTWR_PT_INACTIVE); \
319 } while ( 0 )
321 #define cleanup_writable_pagetable(_d) \
322 do { \
323 if ( unlikely(VM_ASSIST((_d), VMASST_TYPE_writable_pagetables)) ) \
324 __cleanup_writable_pagetable(PTWR_CLEANUP_ACTIVE | \
325 PTWR_CLEANUP_INACTIVE); \
326 } while ( 0 )
328 #ifndef NDEBUG
329 void audit_domain(struct domain *d);
330 void audit_domains(void);
331 #else
332 #define audit_domain(_d) ((void)0)
333 #define audit_domains() ((void)0)
334 #endif
336 void propagate_page_fault(unsigned long addr, u16 error_code);
338 #endif /* __ASM_X86_MM_H__ */