debuggers.hg

view xen/include/asm-x86/mm.h @ 2632:a4fbb98f00cb

bitkeeper revision 1.1159.1.202 (41616cc2-ciBh_VkJKwmQaCL6BEU6Q)

Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into labyrinth.cl.cam.ac.uk:/auto/anfs/scratch/labyrinth/iap10/xeno-clone/xeno.bk
author iap10@labyrinth.cl.cam.ac.uk
date Mon Oct 04 15:31:14 2004 +0000 (2004-10-04)
parents a8fef40fad11 a28d3cf3832c
children 0dfd459518e4
line source
2 #ifndef __ASM_X86_MM_H__
3 #define __ASM_X86_MM_H__
5 #include <xen/config.h>
6 #include <xen/list.h>
7 #include <xen/spinlock.h>
8 #include <xen/perfc.h>
9 #include <xen/sched.h>
11 #include <asm/processor.h>
12 #include <asm/atomic.h>
13 #include <asm/desc.h>
14 #include <asm/flushtlb.h>
15 #include <asm/io.h>
17 #include <hypervisor-ifs/hypervisor-if.h>
19 /*
20 * Per-page-frame information.
21 *
22 * Every architecture must ensure the following:
23 * 1. 'struct pfn_info' contains a 'struct list_head list'.
24 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
25 */
26 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
28 struct pfn_info
29 {
30 /* Each frame can be threaded onto a doubly-linked list. */
31 struct list_head list;
33 /* Reference count and various PGC_xxx flags and fields. */
34 u32 count_info;
36 /* Context-dependent fields follow... */
37 union {
39 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
40 struct {
41 /* Owner of this page (NULL if page is anonymous). */
42 struct domain *domain;
43 /* Type reference count and various PGT_xxx flags and fields. */
44 u32 type_info;
45 } inuse;
47 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
48 struct {
49 /* Mask of possibly-tainted TLBs. */
50 unsigned long cpu_mask;
51 /* Order-size of the free chunk this page is the head of. */
52 u8 order;
53 } free;
55 } u;
57 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
58 u32 tlbflush_timestamp;
59 };
61 /* The following page types are MUTUALLY EXCLUSIVE. */
62 #define PGT_none (0<<29) /* no special uses of this page */
63 #define PGT_l1_page_table (1<<29) /* using this page as an L1 page table? */
64 #define PGT_l2_page_table (2<<29) /* using this page as an L2 page table? */
65 #define PGT_l3_page_table (3<<29) /* using this page as an L3 page table? */
66 #define PGT_l4_page_table (4<<29) /* using this page as an L4 page table? */
67 #define PGT_gdt_page (5<<29) /* using this page in a GDT? */
68 #define PGT_ldt_page (6<<29) /* using this page in an LDT? */
69 #define PGT_writable_page (7<<29) /* has writable mappings of this page? */
70 #define PGT_type_mask (7<<29) /* Bits 29-31. */
71 /* Has this page been validated for use as its current type? */
72 #define _PGT_validated 28
73 #define PGT_validated (1<<_PGT_validated)
74 /* Owning guest has pinned this page to its current type? */
75 #define _PGT_pinned 27
76 #define PGT_pinned (1<<_PGT_pinned)
77 /* The 10 most significant bits of virt address if this is a page table. */
78 #define PGT_va_shift 17
79 #define PGT_va_mask (((1<<10)-1)<<PGT_va_shift)
80 /* Is the back pointer still mutable (i.e. not fixed yet)? */
81 #define PGT_va_mutable (((1<<10)-1)<<PGT_va_shift)
82 /* Is the back pointer unknown (e.g., p.t. is mapped at multiple VAs)? */
83 #define PGT_va_unknown (((1<<10)-2)<<PGT_va_shift)
84 /* 17-bit count of uses of this frame as its current type. */
85 #define PGT_count_mask ((1<<17)-1)
87 /* Cleared when the owning guest 'frees' this page. */
88 #define _PGC_allocated 31
89 #define PGC_allocated (1<<_PGC_allocated)
90 /* This bit is always set, guaranteeing that the count word is never zero. */
91 #define _PGC_always_set 30
92 #define PGC_always_set (1<<_PGC_always_set)
93 /* 30-bit count of references to this frame. */
94 #define PGC_count_mask ((1<<30)-1)
96 /* We trust the slab allocator in slab.c, and our use of it. */
97 #define PageSlab(page) (1)
98 #define PageSetSlab(page) ((void)0)
99 #define PageClearSlab(page) ((void)0)
101 #define IS_XEN_HEAP_FRAME(_pfn) (page_to_phys(_pfn) < xenheap_phys_end)
103 #define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) \
104 do { \
105 (_pfn)->u.inuse.domain = (_dom); \
106 /* The incremented type count is intended to pin to 'writable'. */ \
107 (_pfn)->u.inuse.type_info = PGT_writable_page | PGT_validated | 1; \
108 wmb(); /* install valid domain ptr before updating refcnt. */ \
109 spin_lock(&(_dom)->page_alloc_lock); \
110 /* _dom holds an allocation reference */ \
111 ASSERT((_pfn)->count_info == PGC_always_set); \
112 (_pfn)->count_info |= PGC_allocated | 1; \
113 if ( unlikely((_dom)->xenheap_pages++ == 0) ) \
114 get_knownalive_domain(_dom); \
115 list_add_tail(&(_pfn)->list, &(_dom)->xenpage_list); \
116 spin_unlock(&(_dom)->page_alloc_lock); \
117 } while ( 0 )
119 #define INVALID_P2M_ENTRY (~0UL)
121 extern struct pfn_info *frame_table;
122 extern unsigned long frame_table_size;
123 extern unsigned long max_page;
124 void init_frametable(void *frametable_vstart, unsigned long nr_pages);
126 int alloc_page_type(struct pfn_info *page, unsigned int type);
127 void free_page_type(struct pfn_info *page, unsigned int type);
129 static inline void put_page(struct pfn_info *page)
130 {
131 u32 nx, x, y = page->count_info;
133 do {
134 x = y;
135 nx = x - 1;
136 }
137 while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
139 if ( unlikely((nx & PGC_count_mask) == 0) )
140 free_domheap_page(page);
141 }
144 static inline int get_page(struct pfn_info *page,
145 struct domain *domain)
146 {
147 u32 x, nx, y = page->count_info;
148 struct domain *d, *nd = page->u.inuse.domain;
150 do {
151 x = y;
152 nx = x + 1;
153 d = nd;
154 if ( unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
155 unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
156 unlikely(d != domain) ) /* Wrong owner? */
157 {
158 DPRINTK("Error pfn %08lx: ed=%p, sd=%p, caf=%08x, taf=%08x\n",
159 page_to_pfn(page), domain, d,
160 x, page->u.inuse.type_info);
161 return 0;
162 }
163 __asm__ __volatile__(
164 LOCK_PREFIX "cmpxchg8b %3"
165 : "=d" (nd), "=a" (y), "=c" (d),
166 "=m" (*(volatile u64 *)(&page->count_info))
167 : "0" (d), "1" (x), "c" (d), "b" (nx) );
168 }
169 while ( unlikely(nd != d) || unlikely(y != x) );
171 return 1;
172 }
174 void put_page_type(struct pfn_info *page);
175 int get_page_type(struct pfn_info *page, u32 type);
177 static inline void put_page_and_type(struct pfn_info *page)
178 {
179 put_page_type(page);
180 put_page(page);
181 }
184 static inline int get_page_and_type(struct pfn_info *page,
185 struct domain *domain,
186 u32 type)
187 {
188 int rc = get_page(page, domain);
190 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
191 {
192 put_page(page);
193 rc = 0;
194 }
196 return rc;
197 }
199 #define ASSERT_PAGE_IS_TYPE(_p, _t) \
200 ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \
201 ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0)
202 #define ASSERT_PAGE_IS_DOMAIN(_p, _d) \
203 ASSERT(((_p)->count_info & PGC_count_mask) != 0); \
204 ASSERT((_p)->u.inuse.domain == (_d))
206 int check_descriptor(unsigned long *d);
208 /*
209 * Use currently-executing domain's pagetables on the specified CPUs.
210 * i.e., stop borrowing someone else's tables if you are the idle domain.
211 */
212 void synchronise_pagetables(unsigned long cpu_mask);
214 /*
215 * The MPT (machine->physical mapping table) is an array of word-sized
216 * values, indexed on machine frame number. It is expected that guest OSes
217 * will use it to store a "physical" frame number to give the appearance of
218 * contiguous (or near contiguous) physical memory.
219 */
220 #undef machine_to_phys_mapping
221 #ifdef __x86_64__
222 extern unsigned long *machine_to_phys_mapping;
223 #else
224 #define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
225 #endif
227 /* Part of the domain API. */
228 int do_mmu_update(mmu_update_t *updates, int count, int *success_count);
230 #define DEFAULT_GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY+1)
231 #define DEFAULT_GDT_ADDRESS ((unsigned long)gdt_table)
233 #ifdef MEMORY_GUARD
234 void *memguard_init(void *heap_start);
235 void memguard_guard_range(void *p, unsigned long l);
236 void memguard_unguard_range(void *p, unsigned long l);
237 int memguard_is_guarded(void *p);
238 #else
239 #define memguard_init(_s) (_s)
240 #define memguard_guard_range(_p,_l) ((void)0)
241 #define memguard_unguard_range(_p,_l) ((void)0)
242 #define memguard_is_guarded(_p) (0)
243 #endif
246 typedef struct {
247 void (*enable)(struct domain *);
248 void (*disable)(struct domain *);
249 } vm_assist_info_t;
250 extern vm_assist_info_t vm_assist_info[];
253 /* Writable Pagetables */
254 typedef struct {
255 unsigned long l1va;
256 l1_pgentry_t *page;
257 l1_pgentry_t *pl1e;
258 } ptwr_ptinfo_t;
260 typedef struct {
261 ptwr_ptinfo_t ptinfo[2];
262 long active_pteidx;
263 } __cacheline_aligned ptwr_info_t;
265 extern ptwr_info_t ptwr_info[];
267 #define PTWR_PT_ACTIVE 0
268 #define PTWR_PT_INACTIVE 1
270 #define PTWR_CLEANUP_ACTIVE 1
271 #define PTWR_CLEANUP_INACTIVE 2
273 void ptwr_flush(const int);
274 int ptwr_do_page_fault(unsigned long);
276 #define __cleanup_writable_pagetable(_what) \
277 do { \
278 int cpu = smp_processor_id(); \
279 if ((_what) & PTWR_CLEANUP_ACTIVE) \
280 if (ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va) \
281 ptwr_flush(PTWR_PT_ACTIVE); \
282 if ((_what) & PTWR_CLEANUP_INACTIVE) \
283 if (ptwr_info[cpu].ptinfo[PTWR_PT_INACTIVE].l1va) \
284 ptwr_flush(PTWR_PT_INACTIVE); \
285 } while ( 0 )
287 #define cleanup_writable_pagetable(_d, _w) \
288 do { \
289 if ( unlikely(VM_ASSIST((_d), VMASST_TYPE_writable_pagetables)) ) \
290 __cleanup_writable_pagetable(_w); \
291 } while ( 0 )
293 #endif /* __ASM_X86_MM_H__ */