debuggers.hg

view xen/include/asm-x86/page.h @ 3724:253e8e10e986

bitkeeper revision 1.1159.212.105 (420666bemy1hHhMRPUknF0p3-jxn_w)

x86/64 debug builds use guard pages in unallocated heap space and for
stack-limit enforcement.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@viper.(none)
date Sun Feb 06 18:49:34 2005 +0000 (2005-02-06)
parents fec8b1778268
children 88957a238191 f504382b179f 9f7935ea4606
line source
1 /******************************************************************************
2 * asm-x86/page.h
3 *
4 * Definitions relating to page tables.
5 */
7 #ifndef __X86_PAGE_H__
8 #define __X86_PAGE_H__
10 #if defined(__x86_64__)
12 #define L1_PAGETABLE_SHIFT 12
13 #define L2_PAGETABLE_SHIFT 21
14 #define L3_PAGETABLE_SHIFT 30
15 #define L4_PAGETABLE_SHIFT 39
17 #define ENTRIES_PER_L1_PAGETABLE 512
18 #define ENTRIES_PER_L2_PAGETABLE 512
19 #define ENTRIES_PER_L3_PAGETABLE 512
20 #define ENTRIES_PER_L4_PAGETABLE 512
22 #define __PAGE_OFFSET (0xFFFF830000000000)
24 #elif defined(__i386__)
26 #define L1_PAGETABLE_SHIFT 12
27 #define L2_PAGETABLE_SHIFT 22
29 #define ENTRIES_PER_L1_PAGETABLE 1024
30 #define ENTRIES_PER_L2_PAGETABLE 1024
32 #define __PAGE_OFFSET (0xFC400000)
34 #endif
36 #define PAGE_SHIFT L1_PAGETABLE_SHIFT
37 #ifndef __ASSEMBLY__
38 #define PAGE_SIZE (1UL << PAGE_SHIFT)
39 #else
40 #define PAGE_SIZE (1 << PAGE_SHIFT)
41 #endif
42 #define PAGE_MASK (~(PAGE_SIZE-1))
44 #define clear_page(_p) memset((void *)(_p), 0, PAGE_SIZE)
45 #define copy_page(_t,_f) memcpy((void *)(_t), (void *)(_f), PAGE_SIZE)
47 #ifndef __ASSEMBLY__
48 #include <xen/config.h>
49 typedef struct { unsigned long l1_lo; } l1_pgentry_t;
50 typedef struct { unsigned long l2_lo; } l2_pgentry_t;
51 typedef struct { unsigned long l3_lo; } l3_pgentry_t;
52 typedef struct { unsigned long l4_lo; } l4_pgentry_t;
53 #endif /* !__ASSEMBLY__ */
55 /* Strip type from a table entry. */
56 #define l1_pgentry_val(_x) ((_x).l1_lo)
57 #define l2_pgentry_val(_x) ((_x).l2_lo)
58 #define l3_pgentry_val(_x) ((_x).l3_lo)
59 #define l4_pgentry_val(_x) ((_x).l4_lo)
61 /* Add type to a table entry. */
62 #define mk_l1_pgentry(_x) ( (l1_pgentry_t) { (_x) } )
63 #define mk_l2_pgentry(_x) ( (l2_pgentry_t) { (_x) } )
64 #define mk_l3_pgentry(_x) ( (l3_pgentry_t) { (_x) } )
65 #define mk_l4_pgentry(_x) ( (l4_pgentry_t) { (_x) } )
67 /* Turn a typed table entry into a page index. */
68 #define l1_pgentry_to_pagenr(_x) (l1_pgentry_val(_x) >> PAGE_SHIFT)
69 #define l2_pgentry_to_pagenr(_x) (l2_pgentry_val(_x) >> PAGE_SHIFT)
70 #define l3_pgentry_to_pagenr(_x) (l3_pgentry_val(_x) >> PAGE_SHIFT)
71 #define l4_pgentry_to_pagenr(_x) (l4_pgentry_val(_x) >> PAGE_SHIFT)
73 /* Turn a typed table entry into a physical address. */
74 #define l1_pgentry_to_phys(_x) (l1_pgentry_val(_x) & PAGE_MASK)
75 #define l2_pgentry_to_phys(_x) (l2_pgentry_val(_x) & PAGE_MASK)
76 #define l3_pgentry_to_phys(_x) (l3_pgentry_val(_x) & PAGE_MASK)
77 #define l4_pgentry_to_phys(_x) (l4_pgentry_val(_x) & PAGE_MASK)
79 /* Pagetable walking. */
80 #define l2_pgentry_to_l1(_x) \
81 ((l1_pgentry_t *)__va(l2_pgentry_val(_x) & PAGE_MASK))
82 #define l3_pgentry_to_l2(_x) \
83 ((l2_pgentry_t *)__va(l3_pgentry_val(_x) & PAGE_MASK))
84 #define l4_pgentry_to_l3(_x) \
85 ((l3_pgentry_t *)__va(l4_pgentry_val(_x) & PAGE_MASK))
87 /* Given a virtual address, get an entry offset into a page table. */
88 #define l1_table_offset(_a) \
89 (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1))
90 #if defined(__i386__)
91 #define l2_table_offset(_a) \
92 ((_a) >> L2_PAGETABLE_SHIFT)
93 #elif defined(__x86_64__)
94 #define l2_table_offset(_a) \
95 (((_a) >> L2_PAGETABLE_SHIFT) & (ENTRIES_PER_L2_PAGETABLE - 1))
96 #define l3_table_offset(_a) \
97 (((_a) >> L3_PAGETABLE_SHIFT) & (ENTRIES_PER_L3_PAGETABLE - 1))
98 #define l4_table_offset(_a) \
99 (((_a) >> L4_PAGETABLE_SHIFT) & (ENTRIES_PER_L4_PAGETABLE - 1))
100 #endif
102 #if defined(__i386__)
103 #define pagetable_t l2_pgentry_t
104 #define pagetable_val(_x) ((_x).l2_lo)
105 #define mk_pagetable(_x) ( (l2_pgentry_t) { (_x) } )
106 #define ENTRIES_PER_PAGETABLE ENTRIES_PER_L2_PAGETABLE
107 #elif defined(__x86_64__)
108 #define pagetable_t l4_pgentry_t
109 #define pagetable_val(_x) ((_x).l4_lo)
110 #define mk_pagetable(_x) ( (l4_pgentry_t) { (_x) } )
111 #define ENTRIES_PER_PAGETABLE ENTRIES_PER_L4_PAGETABLE
112 #endif
114 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
115 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
116 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
117 #define page_address(_p) (__va(((_p) - frame_table) << PAGE_SHIFT))
118 #define pfn_to_page(_pfn) (frame_table + (_pfn))
119 #define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT))
120 #define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
121 #define VALID_PAGE(page) ((page - frame_table) < max_mapnr)
123 /*
124 * NB. We don't currently track I/O holes in the physical RAM space.
125 * For now we guess that I/O devices will be mapped in the first 1MB
126 * (e.g., VGA buffers) or beyond the end of physical RAM.
127 */
128 #define pfn_is_ram(_pfn) (((_pfn) > 0x100) && ((_pfn) < max_page))
130 /* High table entries are reserved by the hypervisor. */
131 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE \
132 (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
133 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE \
134 (ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
136 #ifndef __ASSEMBLY__
137 #include <asm/processor.h>
138 #include <asm/fixmap.h>
139 #include <asm/bitops.h>
140 #include <asm/flushtlb.h>
142 #define linear_pg_table ((l1_pgentry_t *)LINEAR_PT_VIRT_START)
143 #define linear_l2_table ((l2_pgentry_t *)(LINEAR_PT_VIRT_START+(LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT-L1_PAGETABLE_SHIFT))))
145 #define va_to_l1mfn(_va) (l2_pgentry_val(linear_l2_table[_va>>L2_PAGETABLE_SHIFT]) >> PAGE_SHIFT)
147 extern pagetable_t idle_pg_table[ENTRIES_PER_PAGETABLE];
149 extern void paging_init(void);
151 /* Flush global pages as well. */
153 #define __pge_off() \
154 do { \
155 __asm__ __volatile__( \
156 "mov %0, %%cr4; # turn off PGE " \
157 :: "r" (mmu_cr4_features & ~X86_CR4_PGE)); \
158 } while (0)
160 #define __pge_on() \
161 do { \
162 __asm__ __volatile__( \
163 "mov %0, %%cr4; # turn off PGE " \
164 :: "r" (mmu_cr4_features)); \
165 } while (0)
168 #define __flush_tlb_pge() \
169 do { \
170 __pge_off(); \
171 __flush_tlb(); \
172 __pge_on(); \
173 } while (0)
175 #define __flush_tlb_one(__addr) \
176 __asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr)))
178 #endif /* !__ASSEMBLY__ */
181 #define _PAGE_PRESENT 0x001
182 #define _PAGE_RW 0x002
183 #define _PAGE_USER 0x004
184 #define _PAGE_PWT 0x008
185 #define _PAGE_PCD 0x010
186 #define _PAGE_ACCESSED 0x020
187 #define _PAGE_DIRTY 0x040
188 #define _PAGE_PAT 0x080
189 #define _PAGE_PSE 0x080
190 #define _PAGE_GLOBAL 0x100
192 #define __PAGE_HYPERVISOR \
193 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
194 #define __PAGE_HYPERVISOR_NOCACHE \
195 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
197 #define MAKE_GLOBAL(_x) ((_x) | _PAGE_GLOBAL)
199 #define PAGE_HYPERVISOR MAKE_GLOBAL(__PAGE_HYPERVISOR)
200 #define PAGE_HYPERVISOR_NOCACHE MAKE_GLOBAL(__PAGE_HYPERVISOR_NOCACHE)
202 #ifndef __ASSEMBLY__
204 static __inline__ int get_order(unsigned long size)
205 {
206 int order;
208 size = (size-1) >> (PAGE_SHIFT-1);
209 order = -1;
210 do {
211 size >>= 1;
212 order++;
213 } while (size);
214 return order;
215 }
217 extern void zap_low_mappings(void);
219 /* Map physical byte range (@p, @p+@s) at virt address @v in pagetable @pt. */
220 extern int
221 map_pages(
222 pagetable_t *pt,
223 unsigned long v,
224 unsigned long p,
225 unsigned long s,
226 unsigned long flags);
228 #endif /* !__ASSEMBLY__ */
230 #endif /* __I386_PAGE_H__ */