debuggers.hg

view xen/include/asm-x86/page.h @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents 426f3a265784
children
line source
1 #ifndef __X86_PAGE_H__
2 #define __X86_PAGE_H__
4 /*
5 * It is important that the masks are signed quantities. This ensures that
6 * the compiler sign-extends a 32-bit mask to 64 bits if that is required.
7 */
8 #ifndef __ASSEMBLY__
9 #define PAGE_SIZE (1L << PAGE_SHIFT)
10 #else
11 #define PAGE_SIZE (1 << PAGE_SHIFT)
12 #endif
13 #define PAGE_MASK (~(PAGE_SIZE-1))
14 #define PAGE_FLAG_MASK (~0)
16 #ifndef __ASSEMBLY__
17 # include <asm/types.h>
18 # include <xen/lib.h>
19 #endif
21 #if defined(__i386__)
22 # include <asm/x86_32/page.h>
23 #elif defined(__x86_64__)
24 # include <asm/x86_64/page.h>
25 #endif
27 /* Read a pte atomically from memory. */
28 #define l1e_read_atomic(l1ep) \
29 l1e_from_intpte(pte_read_atomic(&l1e_get_intpte(*(l1ep))))
30 #define l2e_read_atomic(l2ep) \
31 l2e_from_intpte(pte_read_atomic(&l2e_get_intpte(*(l2ep))))
32 #define l3e_read_atomic(l3ep) \
33 l3e_from_intpte(pte_read_atomic(&l3e_get_intpte(*(l3ep))))
34 #define l4e_read_atomic(l4ep) \
35 l4e_from_intpte(pte_read_atomic(&l4e_get_intpte(*(l4ep))))
37 /* Write a pte atomically to memory. */
38 #define l1e_write_atomic(l1ep, l1e) \
39 pte_write_atomic(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
40 #define l2e_write_atomic(l2ep, l2e) \
41 pte_write_atomic(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
42 #define l3e_write_atomic(l3ep, l3e) \
43 pte_write_atomic(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
44 #define l4e_write_atomic(l4ep, l4e) \
45 pte_write_atomic(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
47 /*
48 * Write a pte safely but non-atomically to memory.
49 * The PTE may become temporarily not-present during the update.
50 */
51 #define l1e_write(l1ep, l1e) \
52 pte_write(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
53 #define l2e_write(l2ep, l2e) \
54 pte_write(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
55 #define l3e_write(l3ep, l3e) \
56 pte_write(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
57 #define l4e_write(l4ep, l4e) \
58 pte_write(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
60 /* Get direct integer representation of a pte's contents (intpte_t). */
61 #define l1e_get_intpte(x) ((x).l1)
62 #define l2e_get_intpte(x) ((x).l2)
63 #define l3e_get_intpte(x) ((x).l3)
64 #define l4e_get_intpte(x) ((x).l4)
66 /* Get pfn mapped by pte (unsigned long). */
67 #define l1e_get_pfn(x) \
68 ((unsigned long)(((x).l1 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
69 #define l2e_get_pfn(x) \
70 ((unsigned long)(((x).l2 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
71 #define l3e_get_pfn(x) \
72 ((unsigned long)(((x).l3 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
73 #define l4e_get_pfn(x) \
74 ((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
76 /* Get physical address of page mapped by pte (paddr_t). */
77 #define l1e_get_paddr(x) \
78 ((paddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
79 #define l2e_get_paddr(x) \
80 ((paddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
81 #define l3e_get_paddr(x) \
82 ((paddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK))))
83 #define l4e_get_paddr(x) \
84 ((paddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
86 /* Get pointer to info structure of page mapped by pte (struct page_info *). */
87 #define l1e_get_page(x) (mfn_to_page(l1e_get_pfn(x)))
88 #define l2e_get_page(x) (mfn_to_page(l2e_get_pfn(x)))
89 #define l3e_get_page(x) (mfn_to_page(l3e_get_pfn(x)))
90 #define l4e_get_page(x) (mfn_to_page(l4e_get_pfn(x)))
92 /* Get pte access flags (unsigned int). */
93 #define l1e_get_flags(x) (get_pte_flags((x).l1))
94 #define l2e_get_flags(x) (get_pte_flags((x).l2))
95 #define l3e_get_flags(x) (get_pte_flags((x).l3))
96 #define l4e_get_flags(x) (get_pte_flags((x).l4))
98 /* Construct an empty pte. */
99 #define l1e_empty() ((l1_pgentry_t) { 0 })
100 #define l2e_empty() ((l2_pgentry_t) { 0 })
101 #define l3e_empty() ((l3_pgentry_t) { 0 })
102 #define l4e_empty() ((l4_pgentry_t) { 0 })
104 /* Construct a pte from a pfn and access flags. */
105 #define l1e_from_pfn(pfn, flags) \
106 ((l1_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
107 #define l2e_from_pfn(pfn, flags) \
108 ((l2_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
109 #define l3e_from_pfn(pfn, flags) \
110 ((l3_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
111 #define l4e_from_pfn(pfn, flags) \
112 ((l4_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
114 /* Construct a pte from a physical address and access flags. */
115 #ifndef __ASSEMBLY__
116 static inline l1_pgentry_t l1e_from_paddr(paddr_t pa, unsigned int flags)
117 {
118 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
119 return (l1_pgentry_t) { pa | put_pte_flags(flags) };
120 }
121 static inline l2_pgentry_t l2e_from_paddr(paddr_t pa, unsigned int flags)
122 {
123 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
124 return (l2_pgentry_t) { pa | put_pte_flags(flags) };
125 }
126 static inline l3_pgentry_t l3e_from_paddr(paddr_t pa, unsigned int flags)
127 {
128 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
129 return (l3_pgentry_t) { pa | put_pte_flags(flags) };
130 }
131 #if CONFIG_PAGING_LEVELS >= 4
132 static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
133 {
134 ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
135 return (l4_pgentry_t) { pa | put_pte_flags(flags) };
136 }
137 #endif
138 #endif /* !__ASSEMBLY__ */
140 /* Construct a pte from its direct integer representation. */
141 #define l1e_from_intpte(intpte) ((l1_pgentry_t) { (intpte_t)(intpte) })
142 #define l2e_from_intpte(intpte) ((l2_pgentry_t) { (intpte_t)(intpte) })
143 #define l3e_from_intpte(intpte) ((l3_pgentry_t) { (intpte_t)(intpte) })
144 #define l4e_from_intpte(intpte) ((l4_pgentry_t) { (intpte_t)(intpte) })
146 /* Construct a pte from a page pointer and access flags. */
147 #define l1e_from_page(page, flags) (l1e_from_pfn(page_to_mfn(page),(flags)))
148 #define l2e_from_page(page, flags) (l2e_from_pfn(page_to_mfn(page),(flags)))
149 #define l3e_from_page(page, flags) (l3e_from_pfn(page_to_mfn(page),(flags)))
150 #define l4e_from_page(page, flags) (l4e_from_pfn(page_to_mfn(page),(flags)))
152 /* Add extra flags to an existing pte. */
153 #define l1e_add_flags(x, flags) ((x).l1 |= put_pte_flags(flags))
154 #define l2e_add_flags(x, flags) ((x).l2 |= put_pte_flags(flags))
155 #define l3e_add_flags(x, flags) ((x).l3 |= put_pte_flags(flags))
156 #define l4e_add_flags(x, flags) ((x).l4 |= put_pte_flags(flags))
158 /* Remove flags from an existing pte. */
159 #define l1e_remove_flags(x, flags) ((x).l1 &= ~put_pte_flags(flags))
160 #define l2e_remove_flags(x, flags) ((x).l2 &= ~put_pte_flags(flags))
161 #define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags))
162 #define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags))
164 /* Check if a pte's page mapping or significant access flags have changed. */
165 #define l1e_has_changed(x,y,flags) \
166 ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
167 #define l2e_has_changed(x,y,flags) \
168 ( !!(((x).l2 ^ (y).l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
169 #define l3e_has_changed(x,y,flags) \
170 ( !!(((x).l3 ^ (y).l3) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
171 #define l4e_has_changed(x,y,flags) \
172 ( !!(((x).l4 ^ (y).l4) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
174 /* Pagetable walking. */
175 #define l2e_to_l1e(x) ((l1_pgentry_t *)__va(l2e_get_paddr(x)))
176 #define l3e_to_l2e(x) ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
177 #define l4e_to_l3e(x) ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
179 /* Given a virtual address, get an entry offset into a page table. */
180 #define l1_table_offset(a) \
181 (((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
182 #define l2_table_offset(a) \
183 (((a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
184 #define l3_table_offset(a) \
185 (((a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
186 #define l4_table_offset(a) \
187 (((a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
189 /* Convert a pointer to a page-table entry into pagetable slot index. */
190 #define pgentry_ptr_to_slot(_p) \
191 (((unsigned long)(_p) & ~PAGE_MASK) / sizeof(*(_p)))
193 #ifndef __ASSEMBLY__
195 /* Page-table type. */
196 #if CONFIG_PAGING_LEVELS == 3
197 /* x86_32 PAE */
198 typedef struct { u32 pfn; } pagetable_t;
199 #elif CONFIG_PAGING_LEVELS == 4
200 /* x86_64 */
201 typedef struct { u64 pfn; } pagetable_t;
202 #endif
203 #define pagetable_get_paddr(x) ((paddr_t)(x).pfn << PAGE_SHIFT)
204 #define pagetable_get_page(x) mfn_to_page((x).pfn)
205 #define pagetable_get_pfn(x) ((x).pfn)
206 #define pagetable_get_mfn(x) _mfn(((x).pfn))
207 #define pagetable_is_null(x) ((x).pfn == 0)
208 #define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
209 #define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) })
210 #define pagetable_from_page(pg) pagetable_from_pfn(page_to_mfn(pg))
211 #define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
212 #define pagetable_null() pagetable_from_pfn(0)
214 void clear_page_sse2(void *);
215 #define clear_page(_p) (cpu_has_xmm2 ? \
216 clear_page_sse2((void *)(_p)) : \
217 (void)memset((void *)(_p), 0, PAGE_SIZE))
218 void copy_page_sse2(void *, const void *);
219 #define copy_page(_t,_f) (cpu_has_xmm2 ? \
220 copy_page_sse2(_t, _f) : \
221 (void)memcpy(_t, _f, PAGE_SIZE))
223 /* Convert between Xen-heap virtual addresses and machine addresses. */
224 #define __pa(x) (virt_to_maddr(x))
225 #define __va(x) (maddr_to_virt(x))
227 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
228 #define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
229 #define __mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
231 /* Convert between machine frame numbers and page-info structures. */
232 #define __mfn_to_page(mfn) (frame_table + pfn_to_pdx(mfn))
233 #define __page_to_mfn(pg) pdx_to_pfn((unsigned long)((pg) - frame_table))
235 /* Convert between machine addresses and page-info structures. */
236 #define __maddr_to_page(ma) __mfn_to_page((ma) >> PAGE_SHIFT)
237 #define __page_to_maddr(pg) ((paddr_t)__page_to_mfn(pg) << PAGE_SHIFT)
239 /* Convert between frame number and address formats. */
240 #define __pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
241 #define __paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
243 /* Convert between machine frame numbers and spage-info structures. */
244 #define __mfn_to_spage(mfn) (spage_table + pfn_to_sdx(mfn))
245 #define __spage_to_mfn(pg) sdx_to_pfn((unsigned long)((pg) - spage_table))
247 /* Convert between page-info structures and spage-info structures. */
248 #define page_to_spage(page) (spage_table+(((page)-frame_table)>>(SUPERPAGE_SHIFT-PAGE_SHIFT)))
249 #define spage_to_page(spage) (frame_table+(((spage)-spage_table)<<(SUPERPAGE_SHIFT-PAGE_SHIFT)))
251 /*
252 * We define non-underscored wrappers for above conversion functions. These are
253 * overridden in various source files while underscored versions remain intact.
254 */
255 #define mfn_valid(mfn) __mfn_valid(mfn)
256 #define virt_to_mfn(va) __virt_to_mfn(va)
257 #define mfn_to_virt(mfn) __mfn_to_virt(mfn)
258 #define virt_to_maddr(va) __virt_to_maddr((unsigned long)(va))
259 #define maddr_to_virt(ma) __maddr_to_virt((unsigned long)(ma))
260 #define mfn_to_page(mfn) __mfn_to_page(mfn)
261 #define page_to_mfn(pg) __page_to_mfn(pg)
262 #define mfn_to_spage(mfn) __mfn_to_spage(mfn)
263 #define spage_to_mfn(pg) __spage_to_mfn(pg)
264 #define maddr_to_page(ma) __maddr_to_page(ma)
265 #define page_to_maddr(pg) __page_to_maddr(pg)
266 #define virt_to_page(va) __virt_to_page(va)
267 #define page_to_virt(pg) __page_to_virt(pg)
268 #define pfn_to_paddr(pfn) __pfn_to_paddr(pfn)
269 #define paddr_to_pfn(pa) __paddr_to_pfn(pa)
270 #define paddr_to_pdx(pa) pfn_to_pdx(paddr_to_pfn(pa))
272 #endif /* !defined(__ASSEMBLY__) */
274 /* High table entries are reserved by the hypervisor. */
275 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE 0
276 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE 0
278 #define DOMAIN_ENTRIES_PER_L4_PAGETABLE \
279 (l4_table_offset(HYPERVISOR_VIRT_START))
280 #define GUEST_ENTRIES_PER_L4_PAGETABLE \
281 (l4_table_offset(HYPERVISOR_VIRT_END))
282 #define HYPERVISOR_ENTRIES_PER_L4_PAGETABLE \
283 (L4_PAGETABLE_ENTRIES - GUEST_ENTRIES_PER_L4_PAGETABLE \
284 + DOMAIN_ENTRIES_PER_L4_PAGETABLE)
286 /* Where to find each level of the linear mapping */
287 #define __linear_l1_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
288 #define __linear_l2_table \
289 ((l2_pgentry_t *)(__linear_l1_table + l1_linear_offset(LINEAR_PT_VIRT_START)))
290 #define __linear_l3_table \
291 ((l3_pgentry_t *)(__linear_l2_table + l2_linear_offset(LINEAR_PT_VIRT_START)))
292 #define __linear_l4_table \
293 ((l4_pgentry_t *)(__linear_l3_table + l3_linear_offset(LINEAR_PT_VIRT_START)))
296 #ifndef __ASSEMBLY__
297 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
298 #if CONFIG_PAGING_LEVELS == 3
299 extern l2_pgentry_t idle_pg_table_l2[
300 ROOT_PAGETABLE_ENTRIES * L2_PAGETABLE_ENTRIES];
301 #elif CONFIG_PAGING_LEVELS == 4
302 extern l2_pgentry_t *compat_idle_pg_table_l2;
303 extern unsigned int m2p_compat_vstart;
304 #endif
305 extern l2_pgentry_t l2_identmap[4*L2_PAGETABLE_ENTRIES];
306 void paging_init(void);
307 void setup_idle_pagetable(void);
308 #endif /* !defined(__ASSEMBLY__) */
310 #define _PAGE_PRESENT 0x001U
311 #define _PAGE_RW 0x002U
312 #define _PAGE_USER 0x004U
313 #define _PAGE_PWT 0x008U
314 #define _PAGE_PCD 0x010U
315 #define _PAGE_ACCESSED 0x020U
316 #define _PAGE_DIRTY 0x040U
317 #define _PAGE_PAT 0x080U
318 #define _PAGE_PSE 0x080U
319 #define _PAGE_GLOBAL 0x100U
320 #define _PAGE_AVAIL0 0x200U
321 #define _PAGE_AVAIL1 0x400U
322 #define _PAGE_AVAIL2 0x800U
323 #define _PAGE_AVAIL 0xE00U
324 #define _PAGE_PSE_PAT 0x1000U
325 #define _PAGE_PAGED 0x2000U
326 #define _PAGE_SHARED 0x4000U
328 /*
329 * Debug option: Ensure that granted mappings are not implicitly unmapped.
330 * WARNING: This will need to be disabled to run OSes that use the spare PTE
331 * bits themselves (e.g., *BSD).
332 */
333 #ifdef NDEBUG
334 #undef _PAGE_GNTTAB
335 #endif
336 #ifndef _PAGE_GNTTAB
337 #define _PAGE_GNTTAB 0
338 #endif
340 #define __PAGE_HYPERVISOR \
341 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
342 #define __PAGE_HYPERVISOR_NOCACHE \
343 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
345 #define GRANT_PTE_FLAGS \
346 (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NX | _PAGE_GNTTAB)
348 #ifndef __ASSEMBLY__
350 static inline int get_order_from_bytes(paddr_t size)
351 {
352 int order;
353 size = (size-1) >> PAGE_SHIFT;
354 for ( order = 0; size; order++ )
355 size >>= 1;
356 return order;
357 }
359 static inline int get_order_from_pages(unsigned long nr_pages)
360 {
361 int order;
362 nr_pages--;
363 for ( order = 0; nr_pages; order++ )
364 nr_pages >>= 1;
365 return order;
366 }
368 /* Allocator functions for Xen pagetables. */
369 void *alloc_xen_pagetable(void);
370 void free_xen_pagetable(void *v);
371 l2_pgentry_t *virt_to_xen_l2e(unsigned long v);
372 #ifdef __x86_64__
373 l3_pgentry_t *virt_to_xen_l3e(unsigned long v);
374 #endif
376 extern void set_pdx_range(unsigned long smfn, unsigned long emfn);
378 /* Map machine page range in Xen virtual address space. */
379 #define MAP_SMALL_PAGES _PAGE_AVAIL0 /* don't use superpages for the mapping */
380 int map_pages_to_xen(
381 unsigned long virt,
382 unsigned long mfn,
383 unsigned long nr_mfns,
384 unsigned int flags);
385 void destroy_xen_mappings(unsigned long v, unsigned long e);
387 /* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */
388 static inline uint32_t pte_flags_to_cacheattr(uint32_t flags)
389 {
390 return ((flags >> 5) & 4) | ((flags >> 3) & 3);
391 }
392 static inline uint32_t cacheattr_to_pte_flags(uint32_t cacheattr)
393 {
394 return ((cacheattr & 4) << 5) | ((cacheattr & 3) << 3);
395 }
397 #endif /* !__ASSEMBLY__ */
399 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
400 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
401 #define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
403 #endif /* __X86_PAGE_H__ */
405 /*
406 * Local variables:
407 * mode: C
408 * c-set-style: "BSD"
409 * c-basic-offset: 4
410 * tab-width: 4
411 * indent-tabs-mode: nil
412 * End:
413 */