debuggers.hg

view xen/include/asm-ia64/mm.h @ 4615:58efb3448933

bitkeeper revision 1.1327.1.1 (426536d2PUqtjTi2v06bzD10RFwarg)

Merge bk://xen.bkbits.net/xeno-unstable.bk
into bkbits.net:/repos/x/xen-ia64/xeno-unstable-ia64.bk
author xen-ia64.adm@bkbits.net
date Tue Apr 19 16:50:26 2005 +0000 (2005-04-19)
parents 445b12a7221a f1c946e1226a
children 5b9e241131fb
line source
1 #ifndef __ASM_IA64_MM_H__
2 #define __ASM_IA64_MM_H__
4 #include <xen/config.h>
5 #ifdef LINUX_2_6
6 #include <xen/gfp.h>
7 #endif
8 #include <xen/list.h>
9 #include <xen/spinlock.h>
10 #include <xen/perfc.h>
11 #include <xen/sched.h>
13 #include <linux/rbtree.h>
15 #include <asm/processor.h>
16 #include <asm/atomic.h>
17 #include <asm/flushtlb.h>
18 #include <asm/io.h>
20 #include <public/xen.h>
22 /*
23 * The following is for page_alloc.c.
24 */
26 typedef unsigned long page_flags_t;
28 /*
29 * Per-page-frame information.
30 */
32 //FIXME: This can go away when common/dom0_ops.c is fully arch-independent
33 #if 0
34 struct pfn_info
35 {
36 /* Each frame can be threaded onto a doubly-linked list. */
37 struct list_head list;
38 /* Context-dependent fields follow... */
39 union {
41 /* Page is in use by a domain. */
42 struct {
43 /* Owner of this page. */
44 struct domain *domain;
45 /* Reference count and various PGC_xxx flags and fields. */
46 u32 count_info;
47 /* Type reference count and various PGT_xxx flags and fields. */
48 u32 type_info;
49 } inuse;
51 /* Page is on a free list. */
52 struct {
53 /* Mask of possibly-tainted TLBs. */
54 unsigned long cpu_mask;
55 /* Must be at same offset as 'u.inuse.count_flags'. */
56 u32 __unavailable;
57 /* Order-size of the free chunk this page is the head of. */
58 u8 order;
59 } free;
61 } u;
63 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
64 u32 tlbflush_timestamp;
65 };
66 #endif
68 struct page
69 {
70 /* Each frame can be threaded onto a doubly-linked list. */
71 struct list_head list;
73 /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
74 u32 tlbflush_timestamp;
76 /* Reference count and various PGC_xxx flags and fields. */
77 u32 count_info;
79 /* Context-dependent fields follow... */
80 union {
82 /* Page is in use by a domain. */
83 struct {
84 /* Owner of this page. */
85 u64 _domain;
86 /* Type reference count and various PGT_xxx flags and fields. */
87 u32 type_info;
88 } inuse;
90 /* Page is on a free list. */
91 struct {
92 /* Mask of possibly-tainted TLBs. */
93 u64 cpu_mask;
94 /* Order-size of the free chunk this page is the head of. */
95 u8 order;
96 } free;
98 } u;
99 // following added for Linux compiling
100 page_flags_t flags;
101 atomic_t _count;
102 struct list_head lru; // is this the same as above "list"?
103 };
105 #define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
107 //FIXME: These can go away when common/dom0_ops.c is fully arch-independent
108 /* The following page types are MUTUALLY EXCLUSIVE. */
109 #define PGT_none (0<<29) /* no special uses of this page */
110 #define PGT_l1_page_table (1<<29) /* using this page as an L1 page table? */
111 #define PGT_l2_page_table (2<<29) /* using this page as an L2 page table? */
112 #define PGT_l3_page_table (3<<29) /* using this page as an L3 page table? */
113 #define PGT_l4_page_table (4<<29) /* using this page as an L4 page table? */
114 #define PGT_gdt_page (5<<29) /* using this page in a GDT? */
115 #define PGT_ldt_page (6<<29) /* using this page in an LDT? */
116 #define PGT_writeable_page (7<<29) /* has writable mappings of this page? */
117 #define PGT_type_mask (7<<29) /* Bits 29-31. */
118 /* Has this page been validated for use as its current type? */
119 #define _PGT_validated 28
120 #define PGT_validated (1<<_PGT_validated)
121 /* 28-bit count of uses of this frame as its current type. */
122 #define PGT_count_mask ((1<<28)-1)
124 /* Cleared when the owning guest 'frees' this page. */
125 #define _PGC_allocated 31
126 #define PGC_allocated (1U<<_PGC_allocated)
127 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
129 #define IS_XEN_HEAP_FRAME(_pfn) ((page_to_phys(_pfn) < xenheap_phys_end) \
130 && (page_to_phys(_pfn) >= xen_pstart))
132 #define pickle_domptr(_d) ((u64)(_d))
133 #define unpickle_domptr(_d) ((struct domain*)(_d))
135 #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
136 #define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d))
138 extern struct pfn_info *frame_table;
139 extern unsigned long frame_table_size;
140 extern struct list_head free_list;
141 extern spinlock_t free_list_lock;
142 extern unsigned int free_pfns;
143 extern unsigned long max_page;
145 #ifdef CONFIG_VIRTUAL_MEM_MAP
146 void __init init_frametable(void *frametable_vstart, unsigned long nr_pages);
147 #else
148 extern void __init init_frametable(void);
149 #endif
150 void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
152 static inline void put_page(struct pfn_info *page)
153 {
154 dummy();
155 }
158 static inline int get_page(struct pfn_info *page,
159 struct domain *domain)
160 {
161 dummy();
162 }
164 // see alloc_new_dom_mem() in common/domain.c
165 #define set_machinetophys(_mfn, _pfn) do { } while(0);
167 #ifdef MEMORY_GUARD
168 void *memguard_init(void *heap_start);
169 void memguard_guard_stack(void *p);
170 void memguard_guard_range(void *p, unsigned long l);
171 void memguard_unguard_range(void *p, unsigned long l);
172 #else
173 #define memguard_init(_s) (_s)
174 #define memguard_guard_stack(_p) ((void)0)
175 #define memguard_guard_range(_p,_l) ((void)0)
176 #define memguard_unguard_range(_p,_l) ((void)0)
177 #endif
179 // FOLLOWING FROM linux-2.6.7/include/mm.h
181 /*
182 * This struct defines a memory VMM memory area. There is one of these
183 * per VM-area/task. A VM area is any part of the process virtual memory
184 * space that has a special rule for the page-fault handlers (ie a shared
185 * library, the executable area etc).
186 */
187 struct vm_area_struct {
188 struct mm_struct * vm_mm; /* The address space we belong to. */
189 unsigned long vm_start; /* Our start address within vm_mm. */
190 unsigned long vm_end; /* The first byte after our end address
191 within vm_mm. */
193 /* linked list of VM areas per task, sorted by address */
194 struct vm_area_struct *vm_next;
196 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
197 unsigned long vm_flags; /* Flags, listed below. */
199 #ifndef XEN
200 struct rb_node vm_rb;
202 // XEN doesn't need all the backing store stuff
203 /*
204 * For areas with an address space and backing store,
205 * linkage into the address_space->i_mmap prio tree, or
206 * linkage to the list of like vmas hanging off its node, or
207 * linkage of vma in the address_space->i_mmap_nonlinear list.
208 */
209 union {
210 struct {
211 struct list_head list;
212 void *parent; /* aligns with prio_tree_node parent */
213 struct vm_area_struct *head;
214 } vm_set;
216 struct prio_tree_node prio_tree_node;
217 } shared;
219 /*
220 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
221 * list, after a COW of one of the file pages. A MAP_SHARED vma
222 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
223 * or brk vma (with NULL file) can only be in an anon_vma list.
224 */
225 struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
226 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
228 /* Function pointers to deal with this struct. */
229 struct vm_operations_struct * vm_ops;
231 /* Information about our backing store: */
232 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
233 units, *not* PAGE_CACHE_SIZE */
234 struct file * vm_file; /* File we map to (can be NULL). */
235 void * vm_private_data; /* was vm_pte (shared mem) */
237 #ifdef CONFIG_NUMA
238 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
239 #endif
240 #endif
241 };
242 /*
243 * vm_flags..
244 */
245 #define VM_READ 0x00000001 /* currently active flags */
246 #define VM_WRITE 0x00000002
247 #define VM_EXEC 0x00000004
248 #define VM_SHARED 0x00000008
250 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
251 #define VM_MAYWRITE 0x00000020
252 #define VM_MAYEXEC 0x00000040
253 #define VM_MAYSHARE 0x00000080
255 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
256 #define VM_GROWSUP 0x00000200
257 #define VM_SHM 0x00000400 /* shared memory area, don't swap out */
258 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
260 #define VM_EXECUTABLE 0x00001000
261 #define VM_LOCKED 0x00002000
262 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */
264 /* Used by sys_madvise() */
265 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
266 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
268 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
269 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
270 #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
271 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
272 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
273 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
275 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
276 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
277 #endif
279 #ifdef CONFIG_STACK_GROWSUP
280 #define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
281 #else
282 #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
283 #endif
285 /*
286 * The zone field is never updated after free_area_init_core()
287 * sets it, so none of the operations on it need to be atomic.
288 * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
289 * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
290 */
291 #define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
292 #define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone)
294 static inline unsigned long page_zonenum(struct page *page)
295 {
296 return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
297 }
298 static inline unsigned long page_to_nid(struct page *page)
299 {
300 return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
301 }
303 struct zone;
304 extern struct zone *zone_table[];
306 static inline struct zone *page_zone(struct page *page)
307 {
308 return zone_table[page->flags >> NODEZONE_SHIFT];
309 }
311 static inline void set_page_zone(struct page *page, unsigned long nodezone_num)
312 {
313 page->flags &= ~(~0UL << NODEZONE_SHIFT);
314 page->flags |= nodezone_num << NODEZONE_SHIFT;
315 }
317 #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
318 extern unsigned long max_mapnr;
319 #endif
321 static inline void *lowmem_page_address(struct page *page)
322 {
323 return __va(page_to_pfn(page) << PAGE_SHIFT);
324 }
326 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
327 #define HASHED_PAGE_VIRTUAL
328 #endif
330 #if defined(WANT_PAGE_VIRTUAL)
331 #define page_address(page) ((page)->virtual)
332 #define set_page_address(page, address) \
333 do { \
334 (page)->virtual = (address); \
335 } while(0)
336 #define page_address_init() do { } while(0)
337 #endif
339 #if defined(HASHED_PAGE_VIRTUAL)
340 void *page_address(struct page *page);
341 void set_page_address(struct page *page, void *virtual);
342 void page_address_init(void);
343 #endif
345 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
346 #define page_address(page) lowmem_page_address(page)
347 #define set_page_address(page, address) do { } while(0)
348 #define page_address_init() do { } while(0)
349 #endif
352 #ifndef CONFIG_DEBUG_PAGEALLOC
353 static inline void
354 kernel_map_pages(struct page *page, int numpages, int enable)
355 {
356 }
357 #endif
359 extern unsigned long num_physpages;
360 extern unsigned long totalram_pages;
361 extern int nr_swap_pages;
363 #endif /* __ASM_IA64_MM_H__ */