debuggers.hg

view xen/include/asm-x86/page.h @ 3758:9f7935ea4606

bitkeeper revision 1.1159.212.128 (4208d72fZEHIE9NOZZbr91V7R-3gUg)

Merge scramble.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
author kaf24@scramble.cl.cam.ac.uk
date Tue Feb 08 15:13:51 2005 +0000 (2005-02-08)
parents 253e8e10e986 f504382b179f
children 4dfebfdc7933 89e86842952a
line source
1 /******************************************************************************
2 * asm-x86/page.h
3 *
4 * Definitions relating to page tables.
5 */
7 #ifndef __X86_PAGE_H__
8 #define __X86_PAGE_H__
10 #if defined(__x86_64__)
12 #define L1_PAGETABLE_SHIFT 12
13 #define L2_PAGETABLE_SHIFT 21
14 #define L3_PAGETABLE_SHIFT 30
15 #define L4_PAGETABLE_SHIFT 39
17 #define ENTRIES_PER_L1_PAGETABLE 512
18 #define ENTRIES_PER_L2_PAGETABLE 512
19 #define ENTRIES_PER_L3_PAGETABLE 512
20 #define ENTRIES_PER_L4_PAGETABLE 512
22 #define __PAGE_OFFSET (0xFFFF830000000000)
24 #elif defined(__i386__)
26 #define L1_PAGETABLE_SHIFT 12
27 #define L2_PAGETABLE_SHIFT 22
29 #define ENTRIES_PER_L1_PAGETABLE 1024
30 #define ENTRIES_PER_L2_PAGETABLE 1024
32 #define __PAGE_OFFSET (0xFC400000)
34 #endif
36 #define PAGE_SHIFT L1_PAGETABLE_SHIFT
37 #ifndef __ASSEMBLY__
38 #define PAGE_SIZE (1UL << PAGE_SHIFT)
39 #else
40 #define PAGE_SIZE (1 << PAGE_SHIFT)
41 #endif
42 #define PAGE_MASK (~(PAGE_SIZE-1))
44 #define clear_page(_p) memset((void *)(_p), 0, PAGE_SIZE)
45 #define copy_page(_t,_f) memcpy((void *)(_t), (void *)(_f), PAGE_SIZE)
47 #ifndef __ASSEMBLY__
48 #include <xen/config.h>
49 typedef struct { unsigned long l1_lo; } l1_pgentry_t;
50 typedef struct { unsigned long l2_lo; } l2_pgentry_t;
51 typedef struct { unsigned long l3_lo; } l3_pgentry_t;
52 typedef struct { unsigned long l4_lo; } l4_pgentry_t;
53 #endif /* !__ASSEMBLY__ */
55 /* Strip type from a table entry. */
56 #define l1_pgentry_val(_x) ((_x).l1_lo)
57 #define l2_pgentry_val(_x) ((_x).l2_lo)
58 #define l3_pgentry_val(_x) ((_x).l3_lo)
59 #define l4_pgentry_val(_x) ((_x).l4_lo)
61 /* Add type to a table entry. */
62 #define mk_l1_pgentry(_x) ( (l1_pgentry_t) { (_x) } )
63 #define mk_l2_pgentry(_x) ( (l2_pgentry_t) { (_x) } )
64 #define mk_l3_pgentry(_x) ( (l3_pgentry_t) { (_x) } )
65 #define mk_l4_pgentry(_x) ( (l4_pgentry_t) { (_x) } )
67 /* Turn a typed table entry into a page index. */
68 #define l1_pgentry_to_pagenr(_x) (l1_pgentry_val(_x) >> PAGE_SHIFT)
69 #define l2_pgentry_to_pagenr(_x) (l2_pgentry_val(_x) >> PAGE_SHIFT)
70 #define l3_pgentry_to_pagenr(_x) (l3_pgentry_val(_x) >> PAGE_SHIFT)
71 #define l4_pgentry_to_pagenr(_x) (l4_pgentry_val(_x) >> PAGE_SHIFT)
73 /* Turn a typed table entry into a physical address. */
74 #define l1_pgentry_to_phys(_x) (l1_pgentry_val(_x) & PAGE_MASK)
75 #define l2_pgentry_to_phys(_x) (l2_pgentry_val(_x) & PAGE_MASK)
76 #define l3_pgentry_to_phys(_x) (l3_pgentry_val(_x) & PAGE_MASK)
77 #define l4_pgentry_to_phys(_x) (l4_pgentry_val(_x) & PAGE_MASK)
79 /* Pagetable walking. */
80 #define l2_pgentry_to_l1(_x) \
81 ((l1_pgentry_t *)__va(l2_pgentry_val(_x) & PAGE_MASK))
82 #define l3_pgentry_to_l2(_x) \
83 ((l2_pgentry_t *)__va(l3_pgentry_val(_x) & PAGE_MASK))
84 #define l4_pgentry_to_l3(_x) \
85 ((l3_pgentry_t *)__va(l4_pgentry_val(_x) & PAGE_MASK))
87 /* Given a virtual address, get an entry offset into a page table. */
88 #define l1_table_offset(_a) \
89 (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1))
90 #if defined(__i386__)
91 #define l2_table_offset(_a) \
92 ((_a) >> L2_PAGETABLE_SHIFT)
93 #elif defined(__x86_64__)
94 #define l2_table_offset(_a) \
95 (((_a) >> L2_PAGETABLE_SHIFT) & (ENTRIES_PER_L2_PAGETABLE - 1))
96 #define l3_table_offset(_a) \
97 (((_a) >> L3_PAGETABLE_SHIFT) & (ENTRIES_PER_L3_PAGETABLE - 1))
98 #define l4_table_offset(_a) \
99 (((_a) >> L4_PAGETABLE_SHIFT) & (ENTRIES_PER_L4_PAGETABLE - 1))
100 #endif
102 /* Given a virtual address, get an entry offset into a linear page table. */
103 #if defined(__i386__)
104 #define l1_linear_offset(_a) ((_a) >> PAGE_SHIFT)
105 #elif defined(__x86_64__)
106 #define l1_linear_offset(_a) (((_a) & ((1UL << 48) - 1)) >> PAGE_SHIFT)
107 #endif
109 #if defined(__i386__)
110 #define pagetable_t l2_pgentry_t
111 #define pagetable_val(_x) ((_x).l2_lo)
112 #define mk_pagetable(_x) ( (l2_pgentry_t) { (_x) } )
113 #define ENTRIES_PER_PAGETABLE ENTRIES_PER_L2_PAGETABLE
114 #elif defined(__x86_64__)
115 #define pagetable_t l4_pgentry_t
116 #define pagetable_val(_x) ((_x).l4_lo)
117 #define mk_pagetable(_x) ( (l4_pgentry_t) { (_x) } )
118 #define ENTRIES_PER_PAGETABLE ENTRIES_PER_L4_PAGETABLE
119 #endif
121 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
122 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
123 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
124 #define page_address(_p) (__va(((_p) - frame_table) << PAGE_SHIFT))
125 #define pfn_to_page(_pfn) (frame_table + (_pfn))
126 #define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT))
127 #define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
128 #define VALID_PAGE(page) ((page - frame_table) < max_mapnr)
130 /*
131 * NB. We don't currently track I/O holes in the physical RAM space.
132 * For now we guess that I/O devices will be mapped in the first 1MB
133 * (e.g., VGA buffers) or beyond the end of physical RAM.
134 */
135 #define pfn_is_ram(_pfn) (((_pfn) > 0x100) && ((_pfn) < max_page))
137 /* High table entries are reserved by the hypervisor. */
138 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE \
139 (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
140 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE \
141 (ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
143 #ifndef __ASSEMBLY__
144 #include <asm/processor.h>
145 #include <asm/fixmap.h>
146 #include <asm/bitops.h>
147 #include <asm/flushtlb.h>
149 #define linear_pg_table ((l1_pgentry_t *)LINEAR_PT_VIRT_START)
150 #define linear_l2_table ((l2_pgentry_t *)(LINEAR_PT_VIRT_START+(LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT-L1_PAGETABLE_SHIFT))))
152 #define va_to_l1mfn(_va) (l2_pgentry_val(linear_l2_table[_va>>L2_PAGETABLE_SHIFT]) >> PAGE_SHIFT)
154 extern pagetable_t idle_pg_table[ENTRIES_PER_PAGETABLE];
156 extern void paging_init(void);
158 /* Flush global pages as well. */
160 #define __pge_off() \
161 do { \
162 __asm__ __volatile__( \
163 "mov %0, %%cr4; # turn off PGE " \
164 :: "r" (mmu_cr4_features & ~X86_CR4_PGE)); \
165 } while (0)
167 #define __pge_on() \
168 do { \
169 __asm__ __volatile__( \
170 "mov %0, %%cr4; # turn off PGE " \
171 :: "r" (mmu_cr4_features)); \
172 } while (0)
175 #define __flush_tlb_pge() \
176 do { \
177 __pge_off(); \
178 __flush_tlb(); \
179 __pge_on(); \
180 } while (0)
182 #define __flush_tlb_one(__addr) \
183 __asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr)))
185 #endif /* !__ASSEMBLY__ */
188 #define _PAGE_PRESENT 0x001
189 #define _PAGE_RW 0x002
190 #define _PAGE_USER 0x004
191 #define _PAGE_PWT 0x008
192 #define _PAGE_PCD 0x010
193 #define _PAGE_ACCESSED 0x020
194 #define _PAGE_DIRTY 0x040
195 #define _PAGE_PAT 0x080
196 #define _PAGE_PSE 0x080
197 #define _PAGE_GLOBAL 0x100
199 #define __PAGE_HYPERVISOR \
200 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
201 #define __PAGE_HYPERVISOR_NOCACHE \
202 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
204 #define MAKE_GLOBAL(_x) ((_x) | _PAGE_GLOBAL)
206 #define PAGE_HYPERVISOR MAKE_GLOBAL(__PAGE_HYPERVISOR)
207 #define PAGE_HYPERVISOR_NOCACHE MAKE_GLOBAL(__PAGE_HYPERVISOR_NOCACHE)
209 #ifndef __ASSEMBLY__
211 static __inline__ int get_order(unsigned long size)
212 {
213 int order;
215 size = (size-1) >> (PAGE_SHIFT-1);
216 order = -1;
217 do {
218 size >>= 1;
219 order++;
220 } while (size);
221 return order;
222 }
224 extern void zap_low_mappings(void);
226 /* Map physical byte range (@p, @p+@s) at virt address @v in pagetable @pt. */
227 extern int
228 map_pages(
229 pagetable_t *pt,
230 unsigned long v,
231 unsigned long p,
232 unsigned long s,
233 unsigned long flags);
235 #endif /* !__ASSEMBLY__ */
237 #endif /* __I386_PAGE_H__ */