debuggers.hg

view xen/include/asm-x86/page.h @ 3632:fec8b1778268

bitkeeper revision 1.1159.212.60 (41febc4bKKSkh9u-Zes9v2CmBuLZxA)

More bootstrap fixes for x86/64. Next thing to do is sort out the IDT and
get traps.c working; then we can get rid of a bunch of dummy labels from
end of boot/x86_64.S. We're also going to need some kind of entry.S before
we can safely enable interrupts. Also bear in mind that not all of physical
RAM may be mapped (only first 1GB) and no m2p table is yet allocated or
mapped. Plenty to be done!
author kaf24@viper.(none)
date Mon Jan 31 23:16:27 2005 +0000 (2005-01-31)
parents d1e0d9a8fde0
children bbe8541361dd 253e8e10e986
line source
1 /******************************************************************************
2 * asm-x86/page.h
3 *
4 * Definitions relating to page tables.
5 */
7 #ifndef __X86_PAGE_H__
8 #define __X86_PAGE_H__
10 #if defined(__x86_64__)
12 #define L1_PAGETABLE_SHIFT 12
13 #define L2_PAGETABLE_SHIFT 21
14 #define L3_PAGETABLE_SHIFT 30
15 #define L4_PAGETABLE_SHIFT 39
17 #define ENTRIES_PER_L1_PAGETABLE 512
18 #define ENTRIES_PER_L2_PAGETABLE 512
19 #define ENTRIES_PER_L3_PAGETABLE 512
20 #define ENTRIES_PER_L4_PAGETABLE 512
22 #define __PAGE_OFFSET (0xFFFF830000000000)
24 #elif defined(__i386__)
26 #define L1_PAGETABLE_SHIFT 12
27 #define L2_PAGETABLE_SHIFT 22
29 #define ENTRIES_PER_L1_PAGETABLE 1024
30 #define ENTRIES_PER_L2_PAGETABLE 1024
32 #define __PAGE_OFFSET (0xFC400000)
34 #endif
36 #define PAGE_SHIFT L1_PAGETABLE_SHIFT
37 #define PAGE_SIZE (1UL << PAGE_SHIFT)
38 #define PAGE_MASK (~(PAGE_SIZE-1))
40 #define clear_page(_p) memset((void *)(_p), 0, PAGE_SIZE)
41 #define copy_page(_t,_f) memcpy((void *)(_t), (void *)(_f), PAGE_SIZE)
43 #ifndef __ASSEMBLY__
44 #include <xen/config.h>
45 typedef struct { unsigned long l1_lo; } l1_pgentry_t;
46 typedef struct { unsigned long l2_lo; } l2_pgentry_t;
47 typedef struct { unsigned long l3_lo; } l3_pgentry_t;
48 typedef struct { unsigned long l4_lo; } l4_pgentry_t;
49 #endif /* !__ASSEMBLY__ */
51 /* Strip type from a table entry. */
52 #define l1_pgentry_val(_x) ((_x).l1_lo)
53 #define l2_pgentry_val(_x) ((_x).l2_lo)
54 #define l3_pgentry_val(_x) ((_x).l3_lo)
55 #define l4_pgentry_val(_x) ((_x).l4_lo)
57 /* Add type to a table entry. */
58 #define mk_l1_pgentry(_x) ( (l1_pgentry_t) { (_x) } )
59 #define mk_l2_pgentry(_x) ( (l2_pgentry_t) { (_x) } )
60 #define mk_l3_pgentry(_x) ( (l3_pgentry_t) { (_x) } )
61 #define mk_l4_pgentry(_x) ( (l4_pgentry_t) { (_x) } )
63 /* Turn a typed table entry into a page index. */
64 #define l1_pgentry_to_pagenr(_x) (l1_pgentry_val(_x) >> PAGE_SHIFT)
65 #define l2_pgentry_to_pagenr(_x) (l2_pgentry_val(_x) >> PAGE_SHIFT)
66 #define l3_pgentry_to_pagenr(_x) (l3_pgentry_val(_x) >> PAGE_SHIFT)
67 #define l4_pgentry_to_pagenr(_x) (l4_pgentry_val(_x) >> PAGE_SHIFT)
69 /* Turn a typed table entry into a physical address. */
70 #define l1_pgentry_to_phys(_x) (l1_pgentry_val(_x) & PAGE_MASK)
71 #define l2_pgentry_to_phys(_x) (l2_pgentry_val(_x) & PAGE_MASK)
72 #define l3_pgentry_to_phys(_x) (l3_pgentry_val(_x) & PAGE_MASK)
73 #define l4_pgentry_to_phys(_x) (l4_pgentry_val(_x) & PAGE_MASK)
75 /* Pagetable walking. */
76 #define l2_pgentry_to_l1(_x) \
77 ((l1_pgentry_t *)__va(l2_pgentry_val(_x) & PAGE_MASK))
78 #define l3_pgentry_to_l2(_x) \
79 ((l2_pgentry_t *)__va(l3_pgentry_val(_x) & PAGE_MASK))
80 #define l4_pgentry_to_l3(_x) \
81 ((l3_pgentry_t *)__va(l4_pgentry_val(_x) & PAGE_MASK))
83 /* Given a virtual address, get an entry offset into a page table. */
84 #define l1_table_offset(_a) \
85 (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1))
86 #if defined(__i386__)
87 #define l2_table_offset(_a) \
88 ((_a) >> L2_PAGETABLE_SHIFT)
89 #elif defined(__x86_64__)
90 #define l2_table_offset(_a) \
91 (((_a) >> L2_PAGETABLE_SHIFT) & (ENTRIES_PER_L2_PAGETABLE - 1))
92 #define l3_table_offset(_a) \
93 (((_a) >> L3_PAGETABLE_SHIFT) & (ENTRIES_PER_L3_PAGETABLE - 1))
94 #define l4_table_offset(_a) \
95 (((_a) >> L4_PAGETABLE_SHIFT) & (ENTRIES_PER_L4_PAGETABLE - 1))
96 #endif
98 #if defined(__i386__)
99 #define pagetable_t l2_pgentry_t
100 #define pagetable_val(_x) ((_x).l2_lo)
101 #define mk_pagetable(_x) ( (l2_pgentry_t) { (_x) } )
102 #define ENTRIES_PER_PAGETABLE ENTRIES_PER_L2_PAGETABLE
103 #elif defined(__x86_64__)
104 #define pagetable_t l4_pgentry_t
105 #define pagetable_val(_x) ((_x).l4_lo)
106 #define mk_pagetable(_x) ( (l4_pgentry_t) { (_x) } )
107 #define ENTRIES_PER_PAGETABLE ENTRIES_PER_L4_PAGETABLE
108 #endif
110 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
111 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
112 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
113 #define page_address(_p) (__va(((_p) - frame_table) << PAGE_SHIFT))
114 #define pfn_to_page(_pfn) (frame_table + (_pfn))
115 #define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT))
116 #define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
117 #define VALID_PAGE(page) ((page - frame_table) < max_mapnr)
119 /*
120 * NB. We don't currently track I/O holes in the physical RAM space.
121 * For now we guess that I/O devices will be mapped in the first 1MB
122 * (e.g., VGA buffers) or beyond the end of physical RAM.
123 */
124 #define pfn_is_ram(_pfn) (((_pfn) > 0x100) && ((_pfn) < max_page))
126 /* High table entries are reserved by the hypervisor. */
127 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE \
128 (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
129 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE \
130 (ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
132 #ifndef __ASSEMBLY__
133 #include <asm/processor.h>
134 #include <asm/fixmap.h>
135 #include <asm/bitops.h>
136 #include <asm/flushtlb.h>
138 #define linear_pg_table ((l1_pgentry_t *)LINEAR_PT_VIRT_START)
139 #define linear_l2_table ((l2_pgentry_t *)(LINEAR_PT_VIRT_START+(LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT-L1_PAGETABLE_SHIFT))))
141 #define va_to_l1mfn(_va) (l2_pgentry_val(linear_l2_table[_va>>L2_PAGETABLE_SHIFT]) >> PAGE_SHIFT)
143 extern pagetable_t idle_pg_table[ENTRIES_PER_PAGETABLE];
145 extern void paging_init(void);
147 /* Flush global pages as well. */
149 #define __pge_off() \
150 do { \
151 __asm__ __volatile__( \
152 "mov %0, %%cr4; # turn off PGE " \
153 :: "r" (mmu_cr4_features & ~X86_CR4_PGE)); \
154 } while (0)
156 #define __pge_on() \
157 do { \
158 __asm__ __volatile__( \
159 "mov %0, %%cr4; # turn off PGE " \
160 :: "r" (mmu_cr4_features)); \
161 } while (0)
164 #define __flush_tlb_pge() \
165 do { \
166 __pge_off(); \
167 __flush_tlb(); \
168 __pge_on(); \
169 } while (0)
171 #define __flush_tlb_one(__addr) \
172 __asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr)))
174 #endif /* !__ASSEMBLY__ */
177 #define _PAGE_PRESENT 0x001
178 #define _PAGE_RW 0x002
179 #define _PAGE_USER 0x004
180 #define _PAGE_PWT 0x008
181 #define _PAGE_PCD 0x010
182 #define _PAGE_ACCESSED 0x020
183 #define _PAGE_DIRTY 0x040
184 #define _PAGE_PAT 0x080
185 #define _PAGE_PSE 0x080
186 #define _PAGE_GLOBAL 0x100
188 #define __PAGE_HYPERVISOR \
189 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
190 #define __PAGE_HYPERVISOR_NOCACHE \
191 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
193 #define MAKE_GLOBAL(_x) ((_x) | _PAGE_GLOBAL)
195 #define PAGE_HYPERVISOR MAKE_GLOBAL(__PAGE_HYPERVISOR)
196 #define PAGE_HYPERVISOR_NOCACHE MAKE_GLOBAL(__PAGE_HYPERVISOR_NOCACHE)
198 #ifndef __ASSEMBLY__
200 static __inline__ int get_order(unsigned long size)
201 {
202 int order;
204 size = (size-1) >> (PAGE_SHIFT-1);
205 order = -1;
206 do {
207 size >>= 1;
208 order++;
209 } while (size);
210 return order;
211 }
213 extern void zap_low_mappings(void);
215 /* Map physical byte range (@p, @p+@s) at virt address @v in pagetable @pt. */
216 extern int
217 map_pages(
218 pagetable_t *pt,
219 unsigned long v,
220 unsigned long p,
221 unsigned long s,
222 unsigned long flags);
224 #endif /* !__ASSEMBLY__ */
226 #endif /* __I386_PAGE_H__ */