debuggers.hg

view xen/include/asm-x86/mm.h @ 22906:700ac6445812

Now add KDB to the non-kdb tree
author Mukesh Rathor
date Thu Feb 03 15:42:41 2011 -0800 (2011-02-03)
parents e8acb9753ff1
children
line source
2 #ifndef __ASM_X86_MM_H__
3 #define __ASM_X86_MM_H__
5 #include <xen/config.h>
6 #include <xen/list.h>
7 #include <asm/io.h>
8 #include <asm/uaccess.h>
10 /*
11 * Per-page-frame information.
12 *
13 * Every architecture must ensure the following:
14 * 1. 'struct page_info' contains a 'struct page_list_entry list'.
15 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
16 */
17 #define PFN_ORDER(_pfn) ((_pfn)->v.free.order)
19 /*
20 * This definition is solely for the use in struct page_info (and
21 * struct page_list_head), intended to allow easy adjustment once x86-64
22 * wants to support more than 16TB.
23 * 'unsigned long' should be used for MFNs everywhere else.
24 */
25 #define __pdx_t unsigned int
27 #undef page_list_entry
28 struct page_list_entry
29 {
30 __pdx_t next, prev;
31 };
33 struct page_info
34 {
35 union {
36 /* Each frame can be threaded onto a doubly-linked list.
37 *
38 * For unused shadow pages, a list of free shadow pages;
39 * for multi-page shadows, links to the other pages in this shadow;
40 * for pinnable shadows, if pinned, a list of all pinned shadows
41 * (see sh_type_is_pinnable() for the definition of "pinnable"
42 * shadow types). N.B. a shadow may be both pinnable and multi-page.
43 * In that case the pages are inserted in order in the list of
44 * pinned shadows and walkers of that list must be prepared
45 * to keep them all together during updates.
46 */
47 struct page_list_entry list;
48 /* For non-pinnable single-page shadows, a higher entry that points
49 * at us. */
50 paddr_t up;
51 /* For shared/sharable pages the sharing handle */
52 uint64_t shr_handle;
53 };
55 /* Reference count and various PGC_xxx flags and fields. */
56 unsigned long count_info;
58 /* Context-dependent fields follow... */
59 union {
61 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
62 struct {
63 /* Type reference count and various PGT_xxx flags and fields. */
64 unsigned long type_info;
65 } inuse;
67 /* Page is in use as a shadow: count_info == 0. */
68 struct {
69 unsigned long type:5; /* What kind of shadow is this? */
70 unsigned long pinned:1; /* Is the shadow pinned? */
71 unsigned long head:1; /* Is this the first page of the shadow? */
72 unsigned long count:25; /* Reference count */
73 } sh;
75 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
76 struct {
77 /* Do TLBs need flushing for safety before next page use? */
78 bool_t need_tlbflush;
79 } free;
81 } u;
83 union {
85 /* Page is in use, but not as a shadow. */
86 struct {
87 /* Owner of this page (zero if page is anonymous). */
88 __pdx_t _domain;
89 } inuse;
91 /* Page is in use as a shadow. */
92 struct {
93 /* GMFN of guest page we're a shadow of. */
94 __pdx_t back;
95 } sh;
97 /* Page is on a free list. */
98 struct {
99 /* Order-size of the free chunk this page is the head of. */
100 unsigned int order;
101 } free;
103 } v;
105 union {
106 /*
107 * Timestamp from 'TLB clock', used to avoid extra safety flushes.
108 * Only valid for: a) free pages, and b) pages with zero type count
109 * (except page table pages when the guest is in shadow mode).
110 */
111 u32 tlbflush_timestamp;
113 /*
114 * When PGT_partial is true then this field is valid and indicates
115 * that PTEs in the range [0, @nr_validated_ptes) have been validated.
116 * An extra page reference must be acquired (or not dropped) whenever
117 * PGT_partial gets set, and it must be dropped when the flag gets
118 * cleared. This is so that a get() leaving a page in partially
119 * validated state (where the caller would drop the reference acquired
120 * due to the getting of the type [apparently] failing [-EAGAIN])
121 * would not accidentally result in a page left with zero general
122 * reference count, but non-zero type reference count (possible when
123 * the partial get() is followed immediately by domain destruction).
124 * Likewise, the ownership of the single type reference for partially
125 * (in-)validated pages is tied to this flag, i.e. the instance
126 * setting the flag must not drop that reference, whereas the instance
127 * clearing it will have to.
128 *
129 * If @partial_pte is positive then PTE at @nr_validated_ptes+1 has
130 * been partially validated. This implies that the general reference
131 * to the page (acquired from get_page_from_lNe()) would be dropped
132 * (again due to the apparent failure) and hence must be re-acquired
133 * when resuming the validation, but must not be dropped when picking
134 * up the page for invalidation.
135 *
136 * If @partial_pte is negative then PTE at @nr_validated_ptes+1 has
137 * been partially invalidated. This is basically the opposite case of
138 * above, i.e. the general reference to the page was not dropped in
139 * put_page_from_lNe() (due to the apparent failure), and hence it
140 * must be dropped when the put operation is resumed (and completes),
141 * but it must not be acquired if picking up the page for validation.
142 */
143 struct {
144 u16 nr_validated_ptes;
145 s8 partial_pte;
146 };
148 /*
149 * Guest pages with a shadow. This does not conflict with
150 * tlbflush_timestamp since page table pages are explicitly not
151 * tracked for TLB-flush avoidance when a guest runs in shadow mode.
152 */
153 u32 shadow_flags;
155 /* When in use as a shadow, next shadow in this hash chain. */
156 __pdx_t next_shadow;
157 };
158 };
160 #undef __pdx_t
162 #define PG_shift(idx) (BITS_PER_LONG - (idx))
163 #define PG_mask(x, idx) (x ## UL << PG_shift(idx))
165 /* The following page types are MUTUALLY EXCLUSIVE. */
166 #define PGT_none PG_mask(0, 4) /* no special uses of this page */
167 #define PGT_l1_page_table PG_mask(1, 4) /* using as an L1 page table? */
168 #define PGT_l2_page_table PG_mask(2, 4) /* using as an L2 page table? */
169 #define PGT_l3_page_table PG_mask(3, 4) /* using as an L3 page table? */
170 #define PGT_l4_page_table PG_mask(4, 4) /* using as an L4 page table? */
171 #define PGT_seg_desc_page PG_mask(5, 4) /* using this page in a GDT/LDT? */
172 #define PGT_writable_page PG_mask(7, 4) /* has writable mappings? */
173 #define PGT_shared_page PG_mask(8, 4) /* CoW sharable page */
174 #define PGT_type_mask PG_mask(15, 4) /* Bits 28-31 or 60-63. */
176 /* Owning guest has pinned this page to its current type? */
177 #define _PGT_pinned PG_shift(5)
178 #define PGT_pinned PG_mask(1, 5)
179 /* Has this page been validated for use as its current type? */
180 #define _PGT_validated PG_shift(6)
181 #define PGT_validated PG_mask(1, 6)
182 /* PAE only: is this an L2 page directory containing Xen-private mappings? */
183 #define _PGT_pae_xen_l2 PG_shift(7)
184 #define PGT_pae_xen_l2 PG_mask(1, 7)
185 /* Has this page been *partially* validated for use as its current type? */
186 #define _PGT_partial PG_shift(8)
187 #define PGT_partial PG_mask(1, 8)
188 /* Page is locked? */
189 #define _PGT_locked PG_shift(9)
190 #define PGT_locked PG_mask(1, 9)
192 /* Count of uses of this frame as its current type. */
193 #define PGT_count_width PG_shift(9)
194 #define PGT_count_mask ((1UL<<PGT_count_width)-1)
196 /* Cleared when the owning guest 'frees' this page. */
197 #define _PGC_allocated PG_shift(1)
198 #define PGC_allocated PG_mask(1, 1)
199 /* Page is Xen heap? */
200 #define _PGC_xen_heap PG_shift(2)
201 #define PGC_xen_heap PG_mask(1, 2)
202 /* Set when is using a page as a page table */
203 #define _PGC_page_table PG_shift(3)
204 #define PGC_page_table PG_mask(1, 3)
205 /* 3-bit PAT/PCD/PWT cache-attribute hint. */
206 #define PGC_cacheattr_base PG_shift(6)
207 #define PGC_cacheattr_mask PG_mask(7, 6)
208 /* Page is broken? */
209 #define _PGC_broken PG_shift(7)
210 #define PGC_broken PG_mask(1, 7)
211 /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */
212 #define PGC_state PG_mask(3, 9)
213 #define PGC_state_inuse PG_mask(0, 9)
214 #define PGC_state_offlining PG_mask(1, 9)
215 #define PGC_state_offlined PG_mask(2, 9)
216 #define PGC_state_free PG_mask(3, 9)
217 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
219 /* Count of references to this frame. */
220 #define PGC_count_width PG_shift(9)
221 #define PGC_count_mask ((1UL<<PGC_count_width)-1)
223 #ifdef __x86_64__
224 struct spage_info
225 {
226 unsigned long type_info;
227 };
229 /* The following page types are MUTUALLY EXCLUSIVE. */
230 #define SGT_none PG_mask(0, 2) /* superpage not in use */
231 #define SGT_mark PG_mask(1, 2) /* Marked as a superpage */
232 #define SGT_dynamic PG_mask(2, 2) /* has been dynamically mapped as a superpage */
233 #define SGT_type_mask PG_mask(3, 2) /* Bits 30-31 or 62-63. */
235 /* Count of uses of this superpage as its current type. */
236 #define SGT_count_width PG_shift(3)
237 #define SGT_count_mask ((1UL<<SGT_count_width)-1)
238 #endif
240 #if defined(__i386__)
241 #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
242 #define is_xen_heap_mfn(mfn) ({ \
243 unsigned long _mfn = (mfn); \
244 (_mfn < paddr_to_pfn(xenheap_phys_end)); \
245 })
246 #define is_xen_fixed_mfn(mfn) is_xen_heap_mfn(mfn)
247 #else
248 #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
249 #define is_xen_heap_mfn(mfn) \
250 (__mfn_valid(mfn) && is_xen_heap_page(__mfn_to_page(mfn)))
251 #define is_xen_fixed_mfn(mfn) \
252 ((((mfn) << PAGE_SHIFT) >= __pa(&_start)) && \
253 (((mfn) << PAGE_SHIFT) <= __pa(&_end)))
254 #endif
256 #if defined(__i386__)
257 #define PRtype_info "08lx" /* should only be used for printk's */
258 #elif defined(__x86_64__)
259 #define PRtype_info "016lx"/* should only be used for printk's */
260 #endif
262 /* The number of out-of-sync shadows we allow per vcpu (prime, please) */
263 #define SHADOW_OOS_PAGES 3
265 /* OOS fixup entries */
266 #define SHADOW_OOS_FIXUPS 2
268 #define page_get_owner(_p) \
269 ((struct domain *)((_p)->v.inuse._domain ? \
270 pdx_to_virt((_p)->v.inuse._domain) : NULL))
271 #define page_set_owner(_p,_d) \
272 ((_p)->v.inuse._domain = (_d) ? virt_to_pdx(_d) : 0)
274 #define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma))))
275 #define vaddr_get_owner(va) (page_get_owner(virt_to_page((va))))
277 #define XENSHARE_writable 0
278 #define XENSHARE_readonly 1
279 extern void share_xen_page_with_guest(
280 struct page_info *page, struct domain *d, int readonly);
281 extern void share_xen_page_with_privileged_guests(
282 struct page_info *page, int readonly);
284 #define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
285 #ifdef __x86_64__
286 #define spage_table ((struct spage_info *)SPAGETABLE_VIRT_START)
287 int get_superpage(unsigned long mfn, struct domain *d);
288 #endif
289 extern unsigned long max_page;
290 extern unsigned long total_pages;
291 void init_frametable(void);
293 #define PDX_GROUP_COUNT ((1 << L2_PAGETABLE_SHIFT) / \
294 (sizeof(*frame_table) & -sizeof(*frame_table)))
295 extern unsigned long pdx_group_valid[];
297 /* Convert between Xen-heap virtual addresses and page-info structures. */
298 static inline struct page_info *__virt_to_page(const void *v)
299 {
300 unsigned long va = (unsigned long)v;
302 #ifdef __x86_64__
303 ASSERT(va >= XEN_VIRT_START);
304 ASSERT(va < DIRECTMAP_VIRT_END);
305 if ( va < XEN_VIRT_END )
306 va += DIRECTMAP_VIRT_START - XEN_VIRT_START + xen_phys_start;
307 else
308 ASSERT(va >= DIRECTMAP_VIRT_START);
309 #else
310 ASSERT(va - DIRECTMAP_VIRT_START < DIRECTMAP_VIRT_END);
311 #endif
312 return frame_table + ((va - DIRECTMAP_VIRT_START) >> PAGE_SHIFT);
313 }
315 static inline void *__page_to_virt(const struct page_info *pg)
316 {
317 ASSERT((unsigned long)pg - FRAMETABLE_VIRT_START < FRAMETABLE_VIRT_END);
318 return (void *)(DIRECTMAP_VIRT_START +
319 ((unsigned long)pg - FRAMETABLE_VIRT_START) /
320 (sizeof(*pg) / (sizeof(*pg) & -sizeof(*pg))) *
321 (PAGE_SIZE / (sizeof(*pg) & -sizeof(*pg))));
322 }
324 int free_page_type(struct page_info *page, unsigned long type,
325 int preemptible);
326 int _shadow_mode_refcounts(struct domain *d);
328 void cleanup_page_cacheattr(struct page_info *page);
330 int is_iomem_page(unsigned long mfn);
332 void clear_superpage_mark(struct page_info *page);
334 struct domain *page_get_owner_and_reference(struct page_info *page);
335 void put_page(struct page_info *page);
336 int get_page(struct page_info *page, struct domain *domain);
337 void put_page_type(struct page_info *page);
338 int get_page_type(struct page_info *page, unsigned long type);
339 int put_page_type_preemptible(struct page_info *page);
340 int get_page_type_preemptible(struct page_info *page, unsigned long type);
341 int get_page_from_l1e(
342 l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner);
343 void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner);
345 static inline void put_page_and_type(struct page_info *page)
346 {
347 put_page_type(page);
348 put_page(page);
349 }
351 static inline int put_page_and_type_preemptible(struct page_info *page,
352 int preemptible)
353 {
354 int rc = 0;
356 if ( preemptible )
357 rc = put_page_type_preemptible(page);
358 else
359 put_page_type(page);
360 if ( likely(rc == 0) )
361 put_page(page);
362 return rc;
363 }
365 static inline int get_page_and_type(struct page_info *page,
366 struct domain *domain,
367 unsigned long type)
368 {
369 int rc = get_page(page, domain);
371 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
372 {
373 put_page(page);
374 rc = 0;
375 }
377 return rc;
378 }
380 #define ASSERT_PAGE_IS_TYPE(_p, _t) \
381 ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \
382 ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0)
383 #define ASSERT_PAGE_IS_DOMAIN(_p, _d) \
384 ASSERT(((_p)->count_info & PGC_count_mask) != 0); \
385 ASSERT(page_get_owner(_p) == (_d))
387 // Quick test for whether a given page can be represented directly in CR3.
388 //
389 #if CONFIG_PAGING_LEVELS == 3
390 #define MFN_FITS_IN_CR3(_MFN) !(mfn_x(_MFN) >> 20)
392 /* returns a lowmem machine address of the copied L3 root table */
393 unsigned long
394 pae_copy_root(struct vcpu *v, l3_pgentry_t *l3tab);
395 #endif /* CONFIG_PAGING_LEVELS == 3 */
397 int check_descriptor(const struct domain *, struct desc_struct *d);
399 extern bool_t opt_allow_superpage;
400 extern bool_t mem_hotplug;
402 /******************************************************************************
403 * With shadow pagetables, the different kinds of address start
404 * to get get confusing.
405 *
406 * Virtual addresses are what they usually are: the addresses that are used
407 * to accessing memory while the guest is running. The MMU translates from
408 * virtual addresses to machine addresses.
409 *
410 * (Pseudo-)physical addresses are the abstraction of physical memory the
411 * guest uses for allocation and so forth. For the purposes of this code,
412 * we can largely ignore them.
413 *
414 * Guest frame numbers (gfns) are the entries that the guest puts in its
415 * pagetables. For normal paravirtual guests, they are actual frame numbers,
416 * with the translation done by the guest.
417 *
418 * Machine frame numbers (mfns) are the entries that the hypervisor puts
419 * in the shadow page tables.
420 *
421 * Elsewhere in the xen code base, the name "gmfn" is generally used to refer
422 * to a "machine frame number, from the guest's perspective", or in other
423 * words, pseudo-physical frame numbers. However, in the shadow code, the
424 * term "gmfn" means "the mfn of a guest page"; this combines naturally with
425 * other terms such as "smfn" (the mfn of a shadow page), gl2mfn (the mfn of a
426 * guest L2 page), etc...
427 */
429 /* With this defined, we do some ugly things to force the compiler to
430 * give us type safety between mfns and gfns and other integers.
431 * TYPE_SAFE(int foo) defines a foo_t, and _foo() and foo_x() functions
432 * that translate beween int and foo_t.
433 *
434 * It does have some performance cost because the types now have
435 * a different storage attribute, so may not want it on all the time. */
437 #ifndef NDEBUG
438 #define TYPE_SAFETY 1
439 #endif
441 #ifdef TYPE_SAFETY
442 #define TYPE_SAFE(_type,_name) \
443 typedef struct { _type _name; } _name##_t; \
444 static inline _name##_t _##_name(_type n) { return (_name##_t) { n }; } \
445 static inline _type _name##_x(_name##_t n) { return n._name; }
446 #else
447 #define TYPE_SAFE(_type,_name) \
448 typedef _type _name##_t; \
449 static inline _name##_t _##_name(_type n) { return n; } \
450 static inline _type _name##_x(_name##_t n) { return n; }
451 #endif
453 TYPE_SAFE(unsigned long,mfn);
455 /* Macro for printk formats: use as printk("%"PRI_mfn"\n", mfn_x(foo)); */
456 #define PRI_mfn "05lx"
459 /*
460 * The MPT (machine->physical mapping table) is an array of word-sized
461 * values, indexed on machine frame number. It is expected that guest OSes
462 * will use it to store a "physical" frame number to give the appearance of
463 * contiguous (or near contiguous) physical memory.
464 */
465 #undef machine_to_phys_mapping
466 #define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
467 #define INVALID_M2P_ENTRY (~0UL)
468 #define VALID_M2P(_e) (!((_e) & (1UL<<(BITS_PER_LONG-1))))
469 #define SHARED_M2P_ENTRY (~0UL - 1UL)
470 #define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY)
472 #ifdef CONFIG_COMPAT
473 #define compat_machine_to_phys_mapping ((unsigned int *)RDWR_COMPAT_MPT_VIRT_START)
474 #define set_gpfn_from_mfn(mfn, pfn) ({ \
475 struct domain *d = page_get_owner(__mfn_to_page(mfn)); \
476 unsigned long entry = (d && (d == dom_cow)) ? \
477 SHARED_M2P_ENTRY : (pfn); \
478 ((void)((mfn) >= (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) / 4 || \
479 (compat_machine_to_phys_mapping[(mfn)] = (unsigned int)(entry))), \
480 machine_to_phys_mapping[(mfn)] = (entry)); \
481 })
482 #else
483 #define set_gpfn_from_mfn(mfn, pfn) ({ \
484 struct domain *d = page_get_owner(__mfn_to_page(mfn)); \
485 if(d && (d == dom_cow)) \
486 machine_to_phys_mapping[(mfn)] = SHARED_M2P_ENTRY; \
487 else \
488 machine_to_phys_mapping[(mfn)] = (pfn); \
489 })
490 #endif
491 #define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)])
493 #define mfn_to_gmfn(_d, mfn) \
494 ( (paging_mode_translate(_d)) \
495 ? get_gpfn_from_mfn(mfn) \
496 : (mfn) )
498 #define INVALID_MFN (~0UL)
500 #define compat_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
501 #define compat_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
503 #ifdef MEMORY_GUARD
504 void memguard_init(void);
505 void memguard_guard_range(void *p, unsigned long l);
506 void memguard_unguard_range(void *p, unsigned long l);
507 #else
508 #define memguard_init() ((void)0)
509 #define memguard_guard_range(_p,_l) ((void)0)
510 #define memguard_unguard_range(_p,_l) ((void)0)
511 #endif
513 void memguard_guard_stack(void *p);
514 void memguard_unguard_stack(void *p);
516 int ptwr_do_page_fault(struct vcpu *, unsigned long,
517 struct cpu_user_regs *);
519 int audit_adjust_pgtables(struct domain *d, int dir, int noisy);
521 #ifdef CONFIG_X86_64
522 extern int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs);
523 extern int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs);
524 #else
525 static inline int pagefault_by_memadd(unsigned long addr,
526 struct cpu_user_regs *regs)
527 {
528 return 0;
529 }
531 static inline int handle_memadd_fault(unsigned long addr,
532 struct cpu_user_regs *regs)
533 {
534 return 0;
535 }
536 #endif
538 #ifndef NDEBUG
540 #define AUDIT_SHADOW_ALREADY_LOCKED ( 1u << 0 )
541 #define AUDIT_ERRORS_OK ( 1u << 1 )
542 #define AUDIT_QUIET ( 1u << 2 )
544 void _audit_domain(struct domain *d, int flags);
545 #define audit_domain(_d) _audit_domain((_d), AUDIT_ERRORS_OK)
546 void audit_domains(void);
548 #else
550 #define _audit_domain(_d, _f) ((void)0)
551 #define audit_domain(_d) ((void)0)
552 #define audit_domains() ((void)0)
554 #endif
556 int new_guest_cr3(unsigned long pfn);
557 void make_cr3(struct vcpu *v, unsigned long mfn);
558 void update_cr3(struct vcpu *v);
559 void propagate_page_fault(unsigned long addr, u16 error_code);
560 void *do_page_walk(struct vcpu *v, unsigned long addr);
562 int __sync_local_execstate(void);
564 /* Arch-specific portion of memory_op hypercall. */
565 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
566 long subarch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
567 int compat_arch_memory_op(int op, XEN_GUEST_HANDLE(void));
568 int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE(void));
570 int steal_page(
571 struct domain *d, struct page_info *page, unsigned int memflags);
572 int donate_page(
573 struct domain *d, struct page_info *page, unsigned int memflags);
574 int page_make_sharable(struct domain *d,
575 struct page_info *page,
576 int expected_refcnt);
577 int page_make_private(struct domain *d, struct page_info *page);
579 int map_ldt_shadow_page(unsigned int);
581 #ifdef CONFIG_X86_64
582 extern int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm);
583 #else
584 static inline int memory_add(uint64_t spfn, uint64_t epfn, uint32_t pxm)
585 {
586 return -ENOSYS;
587 }
588 #endif
590 #ifdef CONFIG_COMPAT
591 void domain_set_alloc_bitsize(struct domain *d);
592 unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits);
593 #else
594 # define domain_set_alloc_bitsize(d) ((void)0)
595 # define domain_clamp_alloc_bitsize(d, b) (b)
596 #endif
598 unsigned long domain_get_maximum_gpfn(struct domain *d);
600 extern struct domain *dom_xen, *dom_io, *dom_cow; /* for vmcoreinfo */
602 #endif /* __ASM_X86_MM_H__ */