debuggers.hg

view xen/include/asm-x86/page.h @ 3336:2711f7eb364c

bitkeeper revision 1.1159.1.490 (41c1bb05aOZv3pnPk-NIbxvGZzv5BQ)

page.h, mm.c:
More cleaning.
author kaf24@pb001.cl.cam.ac.uk
date Thu Dec 16 16:42:45 2004 +0000 (2004-12-16)
parents 8a95c46fe237
children d1e0d9a8fde0
line source
1 /******************************************************************************
2 * asm-x86/page.h
3 *
4 * Definitions relating to page tables.
5 */
7 #ifndef __X86_PAGE_H__
8 #define __X86_PAGE_H__
10 #if defined(__x86_64__)
12 #define L1_PAGETABLE_SHIFT 12
13 #define L2_PAGETABLE_SHIFT 21
14 #define L3_PAGETABLE_SHIFT 30
15 #define L4_PAGETABLE_SHIFT 39
17 #define ENTRIES_PER_L1_PAGETABLE 512
18 #define ENTRIES_PER_L2_PAGETABLE 512
19 #define ENTRIES_PER_L3_PAGETABLE 512
20 #define ENTRIES_PER_L4_PAGETABLE 512
22 #define __PAGE_OFFSET (0xFFFF830000000000)
24 #elif defined(__i386__)
26 #define L1_PAGETABLE_SHIFT 12
27 #define L2_PAGETABLE_SHIFT 22
29 #define ENTRIES_PER_L1_PAGETABLE 1024
30 #define ENTRIES_PER_L2_PAGETABLE 1024
32 #define __PAGE_OFFSET (0xFC400000)
34 #endif
36 #define PAGE_SHIFT L1_PAGETABLE_SHIFT
37 #define PAGE_SIZE (1UL << PAGE_SHIFT)
38 #define PAGE_MASK (~(PAGE_SIZE-1))
40 #define clear_page(_p) memset((void *)(_p), 0, PAGE_SIZE)
41 #define copy_page(_t,_f) memcpy((void *)(_t), (void *)(_f), PAGE_SIZE)
43 #ifndef __ASSEMBLY__
44 #include <xen/config.h>
45 typedef struct { unsigned long l1_lo; } l1_pgentry_t;
46 typedef struct { unsigned long l2_lo; } l2_pgentry_t;
47 typedef struct { unsigned long l3_lo; } l3_pgentry_t;
48 typedef struct { unsigned long l4_lo; } l4_pgentry_t;
49 typedef struct { unsigned long pt_lo; } pagetable_t;
50 #endif /* !__ASSEMBLY__ */
52 /* Strip type from a table entry. */
53 #define l1_pgentry_val(_x) ((_x).l1_lo)
54 #define l2_pgentry_val(_x) ((_x).l2_lo)
55 #define l3_pgentry_val(_x) ((_x).l3_lo)
56 #define l4_pgentry_val(_x) ((_x).l4_lo)
57 #define pagetable_val(_x) ((_x).pt_lo)
59 /* Add type to a table entry. */
60 #define mk_l1_pgentry(_x) ( (l1_pgentry_t) { (_x) } )
61 #define mk_l2_pgentry(_x) ( (l2_pgentry_t) { (_x) } )
62 #define mk_l3_pgentry(_x) ( (l3_pgentry_t) { (_x) } )
63 #define mk_l4_pgentry(_x) ( (l4_pgentry_t) { (_x) } )
64 #define mk_pagetable(_x) ( (pagetable_t) { (_x) } )
66 /* Turn a typed table entry into a page index. */
67 #define l1_pgentry_to_pagenr(_x) (l1_pgentry_val(_x) >> PAGE_SHIFT)
68 #define l2_pgentry_to_pagenr(_x) (l2_pgentry_val(_x) >> PAGE_SHIFT)
69 #define l3_pgentry_to_pagenr(_x) (l3_pgentry_val(_x) >> PAGE_SHIFT)
70 #define l4_pgentry_to_pagenr(_x) (l4_pgentry_val(_x) >> PAGE_SHIFT)
72 /* Turn a typed table entry into a physical address. */
73 #define l1_pgentry_to_phys(_x) (l1_pgentry_val(_x) & PAGE_MASK)
74 #define l2_pgentry_to_phys(_x) (l2_pgentry_val(_x) & PAGE_MASK)
75 #define l3_pgentry_to_phys(_x) (l3_pgentry_val(_x) & PAGE_MASK)
76 #define l4_pgentry_to_phys(_x) (l4_pgentry_val(_x) & PAGE_MASK)
78 /* Pagetable walking. */
79 #define l2_pgentry_to_l1(_x) \
80 ((l1_pgentry_t *)__va(l2_pgentry_val(_x) & PAGE_MASK))
81 #define l3_pgentry_to_l2(_x) \
82 ((l2_pgentry_t *)__va(l3_pgentry_val(_x) & PAGE_MASK))
83 #define l4_pgentry_to_l3(_x) \
84 ((l3_pgentry_t *)__va(l4_pgentry_val(_x) & PAGE_MASK))
86 /* Given a virtual address, get an entry offset into a page table. */
87 #define l1_table_offset(_a) \
88 (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1))
89 #if defined(__i386__)
90 #define l2_table_offset(_a) \
91 ((_a) >> L2_PAGETABLE_SHIFT)
92 #elif defined(__x86_64__)
93 #define l2_table_offset(_a) \
94 (((_a) >> L2_PAGETABLE_SHIFT) & (ENTRIES_PER_L2_PAGETABLE -1))
95 #define l3_table_offset(_a) \
96 (((_a) >> L3_PAGETABLE_SHIFT) & (ENTRIES_PER_L3_PAGETABLE -1))
97 #define l4_table_offset(_a) \
98 ((_a) >> L4_PAGETABLE_SHIFT)
99 #endif
101 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
102 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
103 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
104 #define page_address(_p) (__va(((_p) - frame_table) << PAGE_SHIFT))
105 #define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT))
106 #define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
107 #define VALID_PAGE(page) ((page - frame_table) < max_mapnr)
109 /*
110 * NB. We don't currently track I/O holes in the physical RAM space.
111 * For now we guess that I/O devices will be mapped in the first 1MB
112 * (e.g., VGA buffers) or beyond the end of physical RAM.
113 */
114 #define pfn_is_ram(_pfn) (((_pfn) > 0x100) && ((_pfn) < max_page))
116 /* High table entries are reserved by the hypervisor. */
117 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE \
118 (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
119 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE \
120 (ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
122 #ifndef __ASSEMBLY__
123 #include <asm/processor.h>
124 #include <asm/fixmap.h>
125 #include <asm/bitops.h>
126 #include <asm/flushtlb.h>
128 #define linear_pg_table ((l1_pgentry_t *)LINEAR_PT_VIRT_START)
129 #define linear_l2_table ((l2_pgentry_t *)(LINEAR_PT_VIRT_START+(LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT-L1_PAGETABLE_SHIFT))))
131 #define va_to_l1mfn(_va) (l2_pgentry_val(linear_l2_table[_va>>L2_PAGETABLE_SHIFT]) >> PAGE_SHIFT)
133 #ifdef __i386__
134 extern l2_pgentry_t idle_pg_table[ENTRIES_PER_L2_PAGETABLE];
135 #else
136 extern l4_pgentry_t idle_pg_table[ENTRIES_PER_L4_PAGETABLE];
137 #endif
139 extern void paging_init(void);
141 /* Flush global pages as well. */
143 #define __pge_off() \
144 do { \
145 __asm__ __volatile__( \
146 "mov %0, %%cr4; # turn off PGE " \
147 :: "r" (mmu_cr4_features & ~X86_CR4_PGE)); \
148 } while (0)
150 #define __pge_on() \
151 do { \
152 __asm__ __volatile__( \
153 "mov %0, %%cr4; # turn off PGE " \
154 :: "r" (mmu_cr4_features)); \
155 } while (0)
158 #define __flush_tlb_pge() \
159 do { \
160 __pge_off(); \
161 __flush_tlb(); \
162 __pge_on(); \
163 } while (0)
165 #define __flush_tlb_one(__addr) \
166 __asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr)))
168 #endif /* !__ASSEMBLY__ */
171 #define _PAGE_PRESENT 0x001
172 #define _PAGE_RW 0x002
173 #define _PAGE_USER 0x004
174 #define _PAGE_PWT 0x008
175 #define _PAGE_PCD 0x010
176 #define _PAGE_ACCESSED 0x020
177 #define _PAGE_DIRTY 0x040
178 #define _PAGE_PAT 0x080
179 #define _PAGE_PSE 0x080
180 #define _PAGE_GLOBAL 0x100
182 #define __PAGE_HYPERVISOR \
183 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
184 #define __PAGE_HYPERVISOR_NOCACHE \
185 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
187 #define MAKE_GLOBAL(_x) ((_x) | _PAGE_GLOBAL)
189 #define PAGE_HYPERVISOR MAKE_GLOBAL(__PAGE_HYPERVISOR)
190 #define PAGE_HYPERVISOR_NOCACHE MAKE_GLOBAL(__PAGE_HYPERVISOR_NOCACHE)
192 #ifndef __ASSEMBLY__
193 static __inline__ int get_order(unsigned long size)
194 {
195 int order;
197 size = (size-1) >> (PAGE_SHIFT-1);
198 order = -1;
199 do {
200 size >>= 1;
201 order++;
202 } while (size);
203 return order;
204 }
206 extern void zap_low_mappings(void);
207 #endif
209 #endif /* __I386_PAGE_H__ */