debuggers.hg

view xen/include/asm-x86/mm.h @ 3515:d331c6994d28

bitkeeper revision 1.1159.223.12 (41f14d3cE4GADmEAEr6XE9nXX4dyGw)

Common-code cleanups. Moved arch-specific code out into arch/x86
and asm-x86.
author kaf24@scramble.cl.cam.ac.uk
date Fri Jan 21 18:43:08 2005 +0000 (2005-01-21)
parents cfb5f80fb23e
children 46c14b1a4351 0dc3b8b8c298
line source
2 #ifndef __ASM_X86_MM_H__
3 #define __ASM_X86_MM_H__
5 #include <xen/config.h>
6 #include <xen/list.h>
7 #include <xen/spinlock.h>
8 #include <xen/perfc.h>
9 #include <xen/sched.h>
11 #include <asm/processor.h>
12 #include <asm/atomic.h>
13 #include <asm/desc.h>
14 #include <asm/flushtlb.h>
15 #include <asm/io.h>
17 #include <public/xen.h>
19 /*
20 * Per-page-frame information.
21 *
22 * Every architecture must ensure the following:
23 * 1. 'struct pfn_info' contains a 'struct list_head list'.
24 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
25 */
26 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
28 struct pfn_info
29 {
30 /* Each frame can be threaded onto a doubly-linked list. */
31 struct list_head list;
33 /* Reference count and various PGC_xxx flags and fields. */
34 u32 count_info;
36 /* Context-dependent fields follow... */
37 union {
39 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
40 struct {
41 /* Owner of this page (NULL if page is anonymous). */
42 struct domain *domain;
43 /* Type reference count and various PGT_xxx flags and fields. */
44 u32 type_info;
45 } inuse;
47 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
48 struct {
49 /* Mask of possibly-tainted TLBs. */
50 unsigned long cpu_mask;
51 /* Order-size of the free chunk this page is the head of. */
52 u8 order;
53 } free;
55 } u;
57 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
58 u32 tlbflush_timestamp;
59 };
61 /* The following page types are MUTUALLY EXCLUSIVE. */
62 #define PGT_none (0<<29) /* no special uses of this page */
63 #define PGT_l1_page_table (1<<29) /* using this page as an L1 page table? */
64 #define PGT_l2_page_table (2<<29) /* using this page as an L2 page table? */
65 #define PGT_l3_page_table (3<<29) /* using this page as an L3 page table? */
66 #define PGT_l4_page_table (4<<29) /* using this page as an L4 page table? */
67 #define PGT_gdt_page (5<<29) /* using this page in a GDT? */
68 #define PGT_ldt_page (6<<29) /* using this page in an LDT? */
69 #define PGT_writable_page (7<<29) /* has writable mappings of this page? */
70 #define PGT_type_mask (7<<29) /* Bits 29-31. */
71 /* Has this page been validated for use as its current type? */
72 #define _PGT_validated 28
73 #define PGT_validated (1U<<_PGT_validated)
74 /* Owning guest has pinned this page to its current type? */
75 #define _PGT_pinned 27
76 #define PGT_pinned (1U<<_PGT_pinned)
77 /* The 10 most significant bits of virt address if this is a page table. */
78 #define PGT_va_shift 17
79 #define PGT_va_mask (((1U<<10)-1)<<PGT_va_shift)
80 /* Is the back pointer still mutable (i.e. not fixed yet)? */
81 #define PGT_va_mutable (((1U<<10)-1)<<PGT_va_shift)
82 /* Is the back pointer unknown (e.g., p.t. is mapped at multiple VAs)? */
83 #define PGT_va_unknown (((1U<<10)-2)<<PGT_va_shift)
84 /* 17-bit count of uses of this frame as its current type. */
85 #define PGT_count_mask ((1U<<17)-1)
87 /* Cleared when the owning guest 'frees' this page. */
88 #define _PGC_allocated 31
89 #define PGC_allocated (1U<<_PGC_allocated)
90 /* 31-bit count of references to this frame. */
91 #define PGC_count_mask ((1U<<31)-1)
93 /* We trust the slab allocator in slab.c, and our use of it. */
94 #define PageSlab(page) (1)
95 #define PageSetSlab(page) ((void)0)
96 #define PageClearSlab(page) ((void)0)
98 #define IS_XEN_HEAP_FRAME(_pfn) (page_to_phys(_pfn) < xenheap_phys_end)
100 #define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) \
101 do { \
102 (_pfn)->u.inuse.domain = (_dom); \
103 /* The incremented type count is intended to pin to 'writable'. */ \
104 (_pfn)->u.inuse.type_info = PGT_writable_page | PGT_validated | 1; \
105 wmb(); /* install valid domain ptr before updating refcnt. */ \
106 spin_lock(&(_dom)->page_alloc_lock); \
107 /* _dom holds an allocation reference */ \
108 ASSERT((_pfn)->count_info == 0); \
109 (_pfn)->count_info |= PGC_allocated | 1; \
110 if ( unlikely((_dom)->xenheap_pages++ == 0) ) \
111 get_knownalive_domain(_dom); \
112 list_add_tail(&(_pfn)->list, &(_dom)->xenpage_list); \
113 spin_unlock(&(_dom)->page_alloc_lock); \
114 } while ( 0 )
116 #define INVALID_P2M_ENTRY (~0UL)
118 extern struct pfn_info *frame_table;
119 extern unsigned long frame_table_size;
120 extern unsigned long max_page;
121 void init_frametable(void);
123 int alloc_page_type(struct pfn_info *page, unsigned int type);
124 void free_page_type(struct pfn_info *page, unsigned int type);
126 static inline void put_page(struct pfn_info *page)
127 {
128 u32 nx, x, y = page->count_info;
130 do {
131 x = y;
132 nx = x - 1;
133 }
134 while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
136 if ( unlikely((nx & PGC_count_mask) == 0) )
137 free_domheap_page(page);
138 }
141 static inline int get_page(struct pfn_info *page,
142 struct domain *domain)
143 {
144 u32 x, nx, y = page->count_info;
145 struct domain *d, *nd = page->u.inuse.domain;
147 do {
148 x = y;
149 nx = x + 1;
150 d = nd;
151 if ( unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
152 unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
153 unlikely(d != domain) ) /* Wrong owner? */
154 {
155 DPRINTK("Error pfn %08lx: ed=%p, sd=%p, caf=%08x, taf=%08x\n",
156 page_to_pfn(page), domain, d,
157 x, page->u.inuse.type_info);
158 return 0;
159 }
160 __asm__ __volatile__(
161 LOCK_PREFIX "cmpxchg8b %3"
162 : "=d" (nd), "=a" (y), "=c" (d),
163 "=m" (*(volatile u64 *)(&page->count_info))
164 : "0" (d), "1" (x), "c" (d), "b" (nx) );
165 }
166 while ( unlikely(nd != d) || unlikely(y != x) );
168 return 1;
169 }
171 void put_page_type(struct pfn_info *page);
172 int get_page_type(struct pfn_info *page, u32 type);
174 static inline void put_page_and_type(struct pfn_info *page)
175 {
176 put_page_type(page);
177 put_page(page);
178 }
181 static inline int get_page_and_type(struct pfn_info *page,
182 struct domain *domain,
183 u32 type)
184 {
185 int rc = get_page(page, domain);
187 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
188 {
189 put_page(page);
190 rc = 0;
191 }
193 return rc;
194 }
196 #define ASSERT_PAGE_IS_TYPE(_p, _t) \
197 ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \
198 ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0)
199 #define ASSERT_PAGE_IS_DOMAIN(_p, _d) \
200 ASSERT(((_p)->count_info & PGC_count_mask) != 0); \
201 ASSERT((_p)->u.inuse.domain == (_d))
203 int check_descriptor(unsigned long *d);
205 /*
206 * Use currently-executing domain's pagetables on the specified CPUs.
207 * i.e., stop borrowing someone else's tables if you are the idle domain.
208 */
209 void synchronise_pagetables(unsigned long cpu_mask);
211 /*
212 * The MPT (machine->physical mapping table) is an array of word-sized
213 * values, indexed on machine frame number. It is expected that guest OSes
214 * will use it to store a "physical" frame number to give the appearance of
215 * contiguous (or near contiguous) physical memory.
216 */
217 #undef machine_to_phys_mapping
218 #ifdef __x86_64__
219 extern unsigned long *machine_to_phys_mapping;
220 #else
221 /* Don't call virt_to_phys on this: it isn't direct mapped. Using
222 m2p_start_mfn instead. */
223 #define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
224 extern unsigned long m2p_start_mfn;
225 #endif
227 #define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn)
229 #define DEFAULT_GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY+1)
230 #define DEFAULT_GDT_ADDRESS ((unsigned long)gdt_table)
232 #ifdef MEMORY_GUARD
233 void *memguard_init(void *heap_start);
234 void memguard_guard_range(void *p, unsigned long l);
235 void memguard_unguard_range(void *p, unsigned long l);
236 int memguard_is_guarded(void *p);
237 #else
238 #define memguard_init(_s) (_s)
239 #define memguard_guard_range(_p,_l) ((void)0)
240 #define memguard_unguard_range(_p,_l) ((void)0)
241 #define memguard_is_guarded(_p) (0)
242 #endif
245 typedef struct {
246 void (*enable)(struct domain *);
247 void (*disable)(struct domain *);
248 } vm_assist_info_t;
249 extern vm_assist_info_t vm_assist_info[];
252 /* Writable Pagetables */
253 typedef struct {
254 /* Linear address where the guest is updating the p.t. page. */
255 unsigned long l1va;
256 /* Copy of the p.t. page, taken before guest is given write access. */
257 l1_pgentry_t *page;
258 /* A temporary Xen mapping of the actual p.t. page. */
259 l1_pgentry_t *pl1e;
260 /* Index in L2 page table where this L1 p.t. is always hooked. */
261 unsigned int l2_idx; /* NB. Only used for PTWR_PT_ACTIVE. */
262 } ptwr_ptinfo_t;
264 typedef struct {
265 ptwr_ptinfo_t ptinfo[2];
266 } __cacheline_aligned ptwr_info_t;
268 extern ptwr_info_t ptwr_info[];
270 #define PTWR_PT_ACTIVE 0
271 #define PTWR_PT_INACTIVE 1
273 #define PTWR_CLEANUP_ACTIVE 1
274 #define PTWR_CLEANUP_INACTIVE 2
276 void ptwr_flush(const int);
277 int ptwr_do_page_fault(unsigned long);
279 #define __cleanup_writable_pagetable(_what) \
280 do { \
281 int cpu = smp_processor_id(); \
282 if ((_what) & PTWR_CLEANUP_ACTIVE) \
283 if (ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va) \
284 ptwr_flush(PTWR_PT_ACTIVE); \
285 if ((_what) & PTWR_CLEANUP_INACTIVE) \
286 if (ptwr_info[cpu].ptinfo[PTWR_PT_INACTIVE].l1va) \
287 ptwr_flush(PTWR_PT_INACTIVE); \
288 } while ( 0 )
290 #define cleanup_writable_pagetable(_d) \
291 do { \
292 if ( unlikely(VM_ASSIST((_d), VMASST_TYPE_writable_pagetables)) ) \
293 __cleanup_writable_pagetable(PTWR_CLEANUP_ACTIVE | \
294 PTWR_CLEANUP_INACTIVE); \
295 } while ( 0 )
297 #ifndef NDEBUG
298 void audit_domain(struct domain *d);
299 void audit_domains(void);
300 #else
301 #define audit_domain(_d) ((void)0)
302 #define audit_domains() ((void)0)
303 #endif
305 void propagate_page_fault(unsigned long addr, u16 error_code);
307 #endif /* __ASM_X86_MM_H__ */