debuggers.hg

view xen/include/asm-ia64/mm.h @ 16369:ff2edb1fd9f2

x86: Change cache attributes of Xen 1:1 page mappings in response to
guest mapping requests.
Based on a patch by Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Wed Nov 07 11:44:05 2007 +0000 (2007-11-07)
parents a18dbd4a96e6
children 5b8730c78454
line source
1 /*
2 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
3 * VA Linux Systems Japan K.K.
4 * dom0 vp model support
5 */
6 #ifndef __ASM_IA64_MM_H__
7 #define __ASM_IA64_MM_H__
9 #include <xen/config.h>
10 #ifdef LINUX_2_6
11 #include <linux/gfp.h>
12 #endif
13 #include <xen/list.h>
14 #include <xen/spinlock.h>
15 #include <xen/perfc.h>
16 #include <xen/sched.h>
18 #include <asm/processor.h>
19 #include <asm/atomic.h>
20 #include <asm/tlbflush.h>
21 #include <asm/flushtlb.h>
22 #include <asm/io.h>
24 #include <public/xen.h>
26 /*
27 * The following is for page_alloc.c.
28 */
30 typedef unsigned long page_flags_t;
32 /*
33 * Per-page-frame information.
34 *
35 * Every architecture must ensure the following:
36 * 1. 'struct page_info' contains a 'struct list_head list'.
37 * 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
38 */
39 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
41 #define PRtype_info "016lx"
43 struct page_info
44 {
45 /* Each frame can be threaded onto a doubly-linked list. */
46 struct list_head list;
48 /* Reference count and various PGC_xxx flags and fields. */
49 u32 count_info;
51 /* Context-dependent fields follow... */
52 union {
54 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
55 struct {
56 /* Owner of this page (NULL if page is anonymous). */
57 u32 _domain; /* pickled format */
58 /* Type reference count and various PGT_xxx flags and fields. */
59 unsigned long type_info;
60 } __attribute__ ((packed)) inuse;
62 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
63 struct {
64 /* Order-size of the free chunk this page is the head of. */
65 u32 order;
66 /* Mask of possibly-tainted TLBs. */
67 cpumask_t cpumask;
68 } __attribute__ ((packed)) free;
70 } u;
72 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
73 u32 tlbflush_timestamp;
75 #if 0
76 // following added for Linux compiling
77 page_flags_t flags;
78 atomic_t _count;
79 struct list_head lru; // is this the same as above "list"?
80 #endif
81 };
83 #define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
85 /*
86 * Still small set of flags defined by far on IA-64.
87 * IA-64 should make it a definition same as x86_64.
88 */
89 /* The following page types are MUTUALLY EXCLUSIVE. */
90 #define PGT_none (0UL<<29) /* no special uses of this page */
91 #define PGT_l1_page_table (1UL<<29) /* using this page as an L1 page table? */
92 #define PGT_l2_page_table (2UL<<29) /* using this page as an L2 page table? */
93 #define PGT_l3_page_table (3UL<<29) /* using this page as an L3 page table? */
94 #define PGT_l4_page_table (4UL<<29) /* using this page as an L4 page table? */
95 /* Value 5 reserved. See asm-x86/mm.h */
96 /* Value 6 reserved. See asm-x86/mm.h */
97 #define PGT_writable_page (7UL<<29) /* has writable mappings of this page? */
98 #define PGT_type_mask (7UL<<29) /* Bits 29-31. */
100 /* Has this page been validated for use as its current type? */
101 #define _PGT_validated 28
102 #define PGT_validated (1UL<<_PGT_validated)
103 /* Owning guest has pinned this page to its current type? */
104 #define _PGT_pinned 27
105 #define PGT_pinned (1UL<<_PGT_pinned)
107 /* 16-bit count of uses of this frame as its current type. */
108 #define PGT_count_mask ((1UL<<16)-1)
110 /* Cleared when the owning guest 'frees' this page. */
111 #define _PGC_allocated 31
112 #define PGC_allocated (1UL<<_PGC_allocated)
113 /* Bit 30 reserved. See asm-x86/mm.h */
114 /* Bit 29 reserved. See asm-x86/mm.h */
115 /* 29-bit count of references to this frame. */
116 #define PGC_count_mask ((1UL<<29)-1)
118 #define is_xen_heap_frame(pfn) ((page_to_maddr(pfn) < xenheap_phys_end) \
119 && (page_to_maddr(pfn) >= xen_pstart))
121 extern void* xen_pickle_offset;
122 #define __pickle(a) ((unsigned long)a - (unsigned long)xen_pickle_offset)
123 #define __unpickle(a) (void *)(a + xen_pickle_offset)
125 static inline struct domain *unpickle_domptr(u64 _d)
126 { return (_d == 0) ? NULL : __unpickle(_d); }
127 static inline u32 pickle_domptr(struct domain *_d)
128 { return (_d == NULL) ? 0 : (u32)__pickle(_d); }
130 #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
131 #define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d))
133 #define XENSHARE_writable 0
134 #define XENSHARE_readonly 1
135 void share_xen_page_with_guest(struct page_info *page,
136 struct domain *d, int readonly);
137 void share_xen_page_with_privileged_guests(struct page_info *page,
138 int readonly);
140 extern struct page_info *frame_table;
141 extern unsigned long frame_table_size;
142 extern struct list_head free_list;
143 extern spinlock_t free_list_lock;
144 extern unsigned int free_pfns;
145 extern unsigned long max_page;
147 extern void __init init_frametable(void);
148 void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
150 static inline void put_page(struct page_info *page)
151 {
152 u32 nx, x, y = page->count_info;
154 do {
155 x = y;
156 nx = x - 1;
157 }
158 while (unlikely((y = cmpxchg_rel(&page->count_info, x, nx)) != x));
160 if (unlikely((nx & PGC_count_mask) == 0))
161 free_domheap_page(page);
162 }
164 /* count_info and ownership are checked atomically. */
165 static inline int get_page(struct page_info *page,
166 struct domain *domain)
167 {
168 u64 x, nx, y = *((u64*)&page->count_info);
169 u32 _domain = pickle_domptr(domain);
171 do {
172 x = y;
173 nx = x + 1;
174 if (unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */
175 unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
176 unlikely((x >> 32) != _domain)) { /* Wrong owner? */
178 gdprintk(XENLOG_INFO, "Error pfn %lx: rd=%p, od=%p, caf=%016lx, taf=%"
179 PRtype_info "\n", page_to_mfn(page), domain,
180 unpickle_domptr(x >> 32), x, page->u.inuse.type_info);
181 return 0;
182 }
183 }
184 while(unlikely((y = cmpxchg_acq((u64*)&page->count_info, x, nx)) != x));
185 return 1;
186 }
188 int is_iomem_page(unsigned long mfn);
190 extern void put_page_type(struct page_info *page);
191 extern int get_page_type(struct page_info *page, u32 type);
193 static inline void put_page_and_type(struct page_info *page)
194 {
195 put_page_type(page);
196 put_page(page);
197 }
200 static inline int get_page_and_type(struct page_info *page,
201 struct domain *domain,
202 u32 type)
203 {
204 int rc = get_page(page, domain);
206 if ( likely(rc) && unlikely(!get_page_type(page, type)) )
207 {
208 put_page(page);
209 rc = 0;
210 }
212 return rc;
213 }
215 #define set_machinetophys(_mfn, _pfn) do { } while(0);
217 #ifdef MEMORY_GUARD
218 void *memguard_init(void *heap_start);
219 void memguard_guard_stack(void *p);
220 void memguard_guard_range(void *p, unsigned long l);
221 void memguard_unguard_range(void *p, unsigned long l);
222 #else
223 #define memguard_init(_s) (_s)
224 #define memguard_guard_stack(_p) ((void)0)
225 #define memguard_guard_range(_p,_l) ((void)0)
226 #define memguard_unguard_range(_p,_l) ((void)0)
227 #endif
229 // prototype of misc memory stuff
230 //unsigned long __get_free_pages(unsigned int mask, unsigned int order);
231 //void __free_pages(struct page_info *page, unsigned int order);
232 void *pgtable_quicklist_alloc(void);
233 void pgtable_quicklist_free(void *pgtable_entry);
235 // FOLLOWING FROM linux-2.6.7/include/mm.h
237 /*
238 * This struct defines a memory VMM memory area. There is one of these
239 * per VM-area/task. A VM area is any part of the process virtual memory
240 * space that has a special rule for the page-fault handlers (ie a shared
241 * library, the executable area etc).
242 */
243 struct vm_area_struct {
244 struct mm_struct * vm_mm; /* The address space we belong to. */
245 unsigned long vm_start; /* Our start address within vm_mm. */
246 unsigned long vm_end; /* The first byte after our end address
247 within vm_mm. */
249 /* linked list of VM areas per task, sorted by address */
250 struct vm_area_struct *vm_next;
252 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
253 unsigned long vm_flags; /* Flags, listed below. */
255 #ifndef XEN
256 struct rb_node vm_rb;
258 // XEN doesn't need all the backing store stuff
259 /*
260 * For areas with an address space and backing store,
261 * linkage into the address_space->i_mmap prio tree, or
262 * linkage to the list of like vmas hanging off its node, or
263 * linkage of vma in the address_space->i_mmap_nonlinear list.
264 */
265 union {
266 struct {
267 struct list_head list;
268 void *parent; /* aligns with prio_tree_node parent */
269 struct vm_area_struct *head;
270 } vm_set;
272 struct prio_tree_node prio_tree_node;
273 } shared;
275 /*
276 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
277 * list, after a COW of one of the file pages. A MAP_SHARED vma
278 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
279 * or brk vma (with NULL file) can only be in an anon_vma list.
280 */
281 struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
282 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
284 /* Function pointers to deal with this struct. */
285 struct vm_operations_struct * vm_ops;
287 /* Information about our backing store: */
288 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
289 units, *not* PAGE_CACHE_SIZE */
290 struct file * vm_file; /* File we map to (can be NULL). */
291 void * vm_private_data; /* was vm_pte (shared mem) */
293 #ifdef CONFIG_NUMA
294 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
295 #endif
296 #endif
297 };
298 /*
299 * vm_flags..
300 */
301 #define VM_READ 0x00000001 /* currently active flags */
302 #define VM_WRITE 0x00000002
303 #define VM_EXEC 0x00000004
304 #define VM_SHARED 0x00000008
306 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
307 #define VM_MAYWRITE 0x00000020
308 #define VM_MAYEXEC 0x00000040
309 #define VM_MAYSHARE 0x00000080
311 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
312 #define VM_GROWSUP 0x00000200
313 #define VM_SHM 0x00000400 /* shared memory area, don't swap out */
314 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
316 #define VM_EXECUTABLE 0x00001000
317 #define VM_LOCKED 0x00002000
318 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */
320 /* Used by sys_madvise() */
321 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
322 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
324 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
325 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
326 #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
327 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
328 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
329 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
331 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
332 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
333 #endif
335 #ifdef CONFIG_STACK_GROWSUP
336 #define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
337 #else
338 #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
339 #endif
341 #if 0 /* removed when rebasing to 2.6.13 */
342 /*
343 * The zone field is never updated after free_area_init_core()
344 * sets it, so none of the operations on it need to be atomic.
345 * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
346 * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
347 */
348 #define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
349 #define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)
351 static inline unsigned long page_zonenum(struct page_info *page)
352 {
353 return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
354 }
355 static inline unsigned long page_to_nid(struct page_info *page)
356 {
357 return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
358 }
360 struct zone;
361 extern struct zone *zone_table[];
363 static inline struct zone *page_zone(struct page_info *page)
364 {
365 return zone_table[page->flags >> NODEZONE_SHIFT];
366 }
368 static inline void set_page_zone(struct page_info *page, unsigned long nodezone_num)
369 {
370 page->flags &= ~(~0UL << NODEZONE_SHIFT);
371 page->flags |= nodezone_num << NODEZONE_SHIFT;
372 }
373 #endif
375 #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
376 extern unsigned long max_mapnr;
377 #endif
379 static inline void *lowmem_page_address(struct page_info *page)
380 {
381 return __va(page_to_mfn(page) << PAGE_SHIFT);
382 }
384 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
385 #define HASHED_PAGE_VIRTUAL
386 #endif
388 #if defined(WANT_PAGE_VIRTUAL)
389 #define page_address(page) ((page)->virtual)
390 #define set_page_address(page, address) \
391 do { \
392 (page)->virtual = (address); \
393 } while(0)
394 #define page_address_init() do { } while(0)
395 #endif
397 #if defined(HASHED_PAGE_VIRTUAL)
398 void *page_address(struct page_info *page);
399 void set_page_address(struct page_info *page, void *virtual);
400 void page_address_init(void);
401 #endif
403 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
404 #define page_address(page) lowmem_page_address(page)
405 #define set_page_address(page, address) do { } while(0)
406 #define page_address_init() do { } while(0)
407 #endif
410 #ifndef CONFIG_DEBUG_PAGEALLOC
411 static inline void
412 kernel_map_pages(struct page_info *page, int numpages, int enable)
413 {
414 }
415 #endif
417 extern unsigned long num_physpages;
418 extern unsigned long totalram_pages;
419 extern int nr_swap_pages;
421 extern void alloc_dom_xen_and_dom_io(void);
422 extern int mm_teardown(struct domain* d);
423 extern void mm_final_teardown(struct domain* d);
424 extern struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr);
425 extern void assign_new_domain0_page(struct domain *d, unsigned long mpaddr);
426 extern int __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr, unsigned long flags);
427 extern void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr);
428 extern void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags);
429 struct p2m_entry;
430 extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr, struct p2m_entry* entry);
431 extern void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr);
432 extern volatile pte_t *lookup_noalloc_domain_pte(struct domain* d, unsigned long mpaddr);
433 extern unsigned long assign_domain_mmio_page(struct domain *d, unsigned long mpaddr, unsigned long phys_addr, unsigned long size, unsigned long flags);
434 extern unsigned long assign_domain_mach_page(struct domain *d, unsigned long mpaddr, unsigned long size, unsigned long flags);
435 int domain_page_mapped(struct domain *d, unsigned long mpaddr);
436 int efi_mmio(unsigned long physaddr, unsigned long size);
437 extern unsigned long ____lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
438 extern unsigned long do_dom0vp_op(unsigned long cmd, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3);
439 extern unsigned long dom0vp_zap_physmap(struct domain *d, unsigned long gpfn, unsigned int extent_order);
440 extern unsigned long dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn, unsigned long flags, domid_t domid);
441 extern unsigned long dom0vp_add_physmap_with_gmfn(struct domain* d, unsigned long gpfn, unsigned long gmfn, unsigned long flags, domid_t domid);
442 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M
443 extern void expose_p2m_init(void);
444 extern unsigned long dom0vp_expose_p2m(struct domain* d, unsigned long conv_start_gpfn, unsigned long assign_start_gpfn, unsigned long expose_size, unsigned long granule_pfn);
445 extern void foreign_p2m_init(struct domain* d);
446 extern void foreign_p2m_destroy(struct domain* d);
447 extern unsigned long dom0vp_expose_foreign_p2m(struct domain* dest_dom, unsigned long dest_gpfn, domid_t domid, XEN_GUEST_HANDLE(char) buffer, unsigned long flags);
448 extern unsigned long dom0vp_unexpose_foreign_p2m(struct domain* dest_dom, unsigned long dest_gpfn, domid_t domid);
449 #else
450 #define expose_p2m_init() do { } while (0)
451 #define dom0vp_expose_p2m(d, conv_start_gpfn, assign_start_gpfn, expose_size, granule_pfn) (-ENOSYS)
452 #define foreign_p2m_init(d) do { } while (0)
453 #define foreign_p2m_destroy(d) do { } while (0)
454 #define dom0vp_expose_foreign_p2m(dest_dom, dest_gpfn, domid, buffer, flags) (-ENOSYS)
455 #define dom0vp_unexpose_foreign_p2m(dest_dom, dest_gpfn, domid) (-ENOSYS)
456 #endif
458 extern volatile unsigned long *mpt_table;
459 extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
460 extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__,
461 u64* itir, struct p2m_entry* entry);
462 #define machine_to_phys_mapping mpt_table
464 #define INVALID_M2P_ENTRY (~0UL)
465 #define VALID_M2P(_e) (!((_e) & (1UL<<63)))
467 #define set_gpfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn))
468 #define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)])
470 /* If pmt table is provided by control pannel later, we need __get_user
471 * here. However if it's allocated by HV, we should access it directly
472 */
474 #define mfn_to_gmfn(_d, mfn) \
475 get_gpfn_from_mfn(mfn)
477 #define gmfn_to_mfn(_d, gpfn) \
478 gmfn_to_mfn_foreign((_d), (gpfn))
480 #define __gpfn_invalid(_d, gpfn) \
481 (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL) & GPFN_INV_MASK)
483 #define __gmfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn)
485 /* Return I/O type if trye */
486 #define __gpfn_is_io(_d, gpfn) \
487 ({ \
488 u64 pte, ret=0; \
489 pte = lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL); \
490 if(!(pte&GPFN_INV_MASK)) \
491 ret = pte & GPFN_IO_MASK; \
492 ret; \
493 })
495 #define __gpfn_is_mem(_d, gpfn) \
496 ({ \
497 u64 pte, ret=0; \
498 pte = lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL); \
499 if((!(pte&GPFN_INV_MASK))&&((pte & GPFN_IO_MASK)==GPFN_MEM)) \
500 ret = 1; \
501 ret; \
502 })
505 #define __gpa_to_mpa(_d, gpa) \
506 ((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
508 #define __mpa_to_gpa(madr) \
509 ((get_gpfn_from_mfn((madr) >> PAGE_SHIFT) << PAGE_SHIFT) | \
510 ((madr) & ~PAGE_MASK))
512 /* Internal use only: returns 0 in case of bad address. */
513 extern unsigned long paddr_to_maddr(unsigned long paddr);
515 /* Arch-specific portion of memory_op hypercall. */
516 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
518 int steal_page(
519 struct domain *d, struct page_info *page, unsigned int memflags);
521 #define domain_clamp_alloc_bitsize(d, b) (b)
523 unsigned long domain_get_maximum_gpfn(struct domain *d);
525 #endif /* __ASM_IA64_MM_H__ */