debuggers.hg
annotate xen/include/asm-x86/page.h @ 3632:fec8b1778268
bitkeeper revision 1.1159.212.60 (41febc4bKKSkh9u-Zes9v2CmBuLZxA)
More bootstrap fixes for x86/64. Next thing to do is sort out the IDT and
get traps.c working; then we can get rid of a bunch of dummy labels from
end of boot/x86_64.S. We're also going to need some kind of entry.S before
we can safely enable interrupts. Also bear in mind that not all of physical
RAM may be mapped (only first 1GB) and no m2p table is yet allocated or
mapped. Plenty to be done!
More bootstrap fixes for x86/64. Next thing to do is sort out the IDT and
get traps.c working; then we can get rid of a bunch of dummy labels from
end of boot/x86_64.S. We're also going to need some kind of entry.S before
we can safely enable interrupts. Also bear in mind that not all of physical
RAM may be mapped (only first 1GB) and no m2p table is yet allocated or
mapped. Plenty to be done!
author | kaf24@viper.(none) |
---|---|
date | Mon Jan 31 23:16:27 2005 +0000 (2005-01-31) |
parents | d1e0d9a8fde0 |
children | bbe8541361dd 253e8e10e986 |
rev | line source |
---|---|
kaf24@1830 | 1 /****************************************************************************** |
kaf24@1830 | 2 * asm-x86/page.h |
kaf24@1830 | 3 * |
kaf24@1830 | 4 * Definitions relating to page tables. |
kaf24@1830 | 5 */ |
kaf24@1830 | 6 |
kaf24@1830 | 7 #ifndef __X86_PAGE_H__ |
kaf24@1830 | 8 #define __X86_PAGE_H__ |
kaf24@1830 | 9 |
kaf24@1830 | 10 #if defined(__x86_64__) |
kaf24@1490 | 11 |
kaf24@1830 | 12 #define L1_PAGETABLE_SHIFT 12 |
kaf24@1830 | 13 #define L2_PAGETABLE_SHIFT 21 |
kaf24@1830 | 14 #define L3_PAGETABLE_SHIFT 30 |
kaf24@1830 | 15 #define L4_PAGETABLE_SHIFT 39 |
kaf24@1830 | 16 |
kaf24@1830 | 17 #define ENTRIES_PER_L1_PAGETABLE 512 |
kaf24@1830 | 18 #define ENTRIES_PER_L2_PAGETABLE 512 |
kaf24@1830 | 19 #define ENTRIES_PER_L3_PAGETABLE 512 |
kaf24@1830 | 20 #define ENTRIES_PER_L4_PAGETABLE 512 |
kaf24@1830 | 21 |
kaf24@1830 | 22 #define __PAGE_OFFSET (0xFFFF830000000000) |
kaf24@1830 | 23 |
kaf24@1830 | 24 #elif defined(__i386__) |
kaf24@1490 | 25 |
kaf24@1490 | 26 #define L1_PAGETABLE_SHIFT 12 |
kaf24@1490 | 27 #define L2_PAGETABLE_SHIFT 22 |
kaf24@1490 | 28 |
kaf24@1490 | 29 #define ENTRIES_PER_L1_PAGETABLE 1024 |
kaf24@1490 | 30 #define ENTRIES_PER_L2_PAGETABLE 1024 |
kaf24@1490 | 31 |
kaf24@1830 | 32 #define __PAGE_OFFSET (0xFC400000) |
kaf24@1830 | 33 |
kaf24@1830 | 34 #endif |
kaf24@1830 | 35 |
kaf24@1490 | 36 #define PAGE_SHIFT L1_PAGETABLE_SHIFT |
kaf24@1490 | 37 #define PAGE_SIZE (1UL << PAGE_SHIFT) |
kaf24@1490 | 38 #define PAGE_MASK (~(PAGE_SIZE-1)) |
kaf24@1490 | 39 |
kaf24@1490 | 40 #define clear_page(_p) memset((void *)(_p), 0, PAGE_SIZE) |
kaf24@1490 | 41 #define copy_page(_t,_f) memcpy((void *)(_t), (void *)(_f), PAGE_SIZE) |
kaf24@1490 | 42 |
kaf24@1490 | 43 #ifndef __ASSEMBLY__ |
kaf24@1490 | 44 #include <xen/config.h> |
kaf24@1490 | 45 typedef struct { unsigned long l1_lo; } l1_pgentry_t; |
kaf24@1490 | 46 typedef struct { unsigned long l2_lo; } l2_pgentry_t; |
kaf24@1830 | 47 typedef struct { unsigned long l3_lo; } l3_pgentry_t; |
kaf24@1830 | 48 typedef struct { unsigned long l4_lo; } l4_pgentry_t; |
kaf24@1490 | 49 #endif /* !__ASSEMBLY__ */ |
kaf24@1490 | 50 |
kaf24@1490 | 51 /* Strip type from a table entry. */ |
kaf24@1490 | 52 #define l1_pgentry_val(_x) ((_x).l1_lo) |
kaf24@1490 | 53 #define l2_pgentry_val(_x) ((_x).l2_lo) |
kaf24@1830 | 54 #define l3_pgentry_val(_x) ((_x).l3_lo) |
kaf24@1830 | 55 #define l4_pgentry_val(_x) ((_x).l4_lo) |
kaf24@1490 | 56 |
kaf24@1490 | 57 /* Add type to a table entry. */ |
kaf24@1490 | 58 #define mk_l1_pgentry(_x) ( (l1_pgentry_t) { (_x) } ) |
kaf24@1490 | 59 #define mk_l2_pgentry(_x) ( (l2_pgentry_t) { (_x) } ) |
kaf24@1830 | 60 #define mk_l3_pgentry(_x) ( (l3_pgentry_t) { (_x) } ) |
kaf24@1830 | 61 #define mk_l4_pgentry(_x) ( (l4_pgentry_t) { (_x) } ) |
kaf24@1490 | 62 |
kaf24@1490 | 63 /* Turn a typed table entry into a page index. */ |
kaf24@1490 | 64 #define l1_pgentry_to_pagenr(_x) (l1_pgentry_val(_x) >> PAGE_SHIFT) |
kaf24@1490 | 65 #define l2_pgentry_to_pagenr(_x) (l2_pgentry_val(_x) >> PAGE_SHIFT) |
kaf24@1830 | 66 #define l3_pgentry_to_pagenr(_x) (l3_pgentry_val(_x) >> PAGE_SHIFT) |
kaf24@1830 | 67 #define l4_pgentry_to_pagenr(_x) (l4_pgentry_val(_x) >> PAGE_SHIFT) |
kaf24@1490 | 68 |
kaf24@1490 | 69 /* Turn a typed table entry into a physical address. */ |
kaf24@1490 | 70 #define l1_pgentry_to_phys(_x) (l1_pgentry_val(_x) & PAGE_MASK) |
kaf24@1490 | 71 #define l2_pgentry_to_phys(_x) (l2_pgentry_val(_x) & PAGE_MASK) |
kaf24@1830 | 72 #define l3_pgentry_to_phys(_x) (l3_pgentry_val(_x) & PAGE_MASK) |
kaf24@1830 | 73 #define l4_pgentry_to_phys(_x) (l4_pgentry_val(_x) & PAGE_MASK) |
kaf24@1490 | 74 |
kaf24@1830 | 75 /* Pagetable walking. */ |
kaf24@1830 | 76 #define l2_pgentry_to_l1(_x) \ |
kaf24@1490 | 77 ((l1_pgentry_t *)__va(l2_pgentry_val(_x) & PAGE_MASK)) |
kaf24@1830 | 78 #define l3_pgentry_to_l2(_x) \ |
kaf24@1830 | 79 ((l2_pgentry_t *)__va(l3_pgentry_val(_x) & PAGE_MASK)) |
kaf24@1830 | 80 #define l4_pgentry_to_l3(_x) \ |
kaf24@1830 | 81 ((l3_pgentry_t *)__va(l4_pgentry_val(_x) & PAGE_MASK)) |
kaf24@1490 | 82 |
kaf24@1490 | 83 /* Given a virtual address, get an entry offset into a page table. */ |
kaf24@1490 | 84 #define l1_table_offset(_a) \ |
kaf24@1490 | 85 (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1)) |
kaf24@1830 | 86 #if defined(__i386__) |
kaf24@1490 | 87 #define l2_table_offset(_a) \ |
kaf24@1490 | 88 ((_a) >> L2_PAGETABLE_SHIFT) |
kaf24@1830 | 89 #elif defined(__x86_64__) |
kaf24@1830 | 90 #define l2_table_offset(_a) \ |
kaf24@3632 | 91 (((_a) >> L2_PAGETABLE_SHIFT) & (ENTRIES_PER_L2_PAGETABLE - 1)) |
kaf24@1830 | 92 #define l3_table_offset(_a) \ |
kaf24@3632 | 93 (((_a) >> L3_PAGETABLE_SHIFT) & (ENTRIES_PER_L3_PAGETABLE - 1)) |
kaf24@1830 | 94 #define l4_table_offset(_a) \ |
kaf24@3632 | 95 (((_a) >> L4_PAGETABLE_SHIFT) & (ENTRIES_PER_L4_PAGETABLE - 1)) |
kaf24@3632 | 96 #endif |
kaf24@3632 | 97 |
kaf24@3632 | 98 #if defined(__i386__) |
kaf24@3632 | 99 #define pagetable_t l2_pgentry_t |
kaf24@3632 | 100 #define pagetable_val(_x) ((_x).l2_lo) |
kaf24@3632 | 101 #define mk_pagetable(_x) ( (l2_pgentry_t) { (_x) } ) |
kaf24@3632 | 102 #define ENTRIES_PER_PAGETABLE ENTRIES_PER_L2_PAGETABLE |
kaf24@3632 | 103 #elif defined(__x86_64__) |
kaf24@3632 | 104 #define pagetable_t l4_pgentry_t |
kaf24@3632 | 105 #define pagetable_val(_x) ((_x).l4_lo) |
kaf24@3632 | 106 #define mk_pagetable(_x) ( (l4_pgentry_t) { (_x) } ) |
kaf24@3632 | 107 #define ENTRIES_PER_PAGETABLE ENTRIES_PER_L4_PAGETABLE |
kaf24@1830 | 108 #endif |
kaf24@1490 | 109 |
kaf24@1490 | 110 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) |
kaf24@1490 | 111 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) |
kaf24@1490 | 112 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) |
kaf24@1490 | 113 #define page_address(_p) (__va(((_p) - frame_table) << PAGE_SHIFT)) |
kaf24@3392 | 114 #define pfn_to_page(_pfn) (frame_table + (_pfn)) |
kaf24@1974 | 115 #define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT)) |
kaf24@1490 | 116 #define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT)) |
kaf24@1490 | 117 #define VALID_PAGE(page) ((page - frame_table) < max_mapnr) |
kaf24@1490 | 118 |
kaf24@1490 | 119 /* |
kaf24@1490 | 120 * NB. We don't currently track I/O holes in the physical RAM space. |
kaf24@1490 | 121 * For now we guess that I/O devices will be mapped in the first 1MB |
kaf24@1490 | 122 * (e.g., VGA buffers) or beyond the end of physical RAM. |
kaf24@1490 | 123 */ |
kaf24@1490 | 124 #define pfn_is_ram(_pfn) (((_pfn) > 0x100) && ((_pfn) < max_page)) |
kaf24@1490 | 125 |
kaf24@1490 | 126 /* High table entries are reserved by the hypervisor. */ |
kaf24@1490 | 127 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE \ |
kaf24@1490 | 128 (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT) |
kaf24@1490 | 129 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE \ |
kaf24@1490 | 130 (ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE) |
kaf24@1490 | 131 |
kaf24@1490 | 132 #ifndef __ASSEMBLY__ |
kaf24@1490 | 133 #include <asm/processor.h> |
kaf24@1490 | 134 #include <asm/fixmap.h> |
kaf24@1490 | 135 #include <asm/bitops.h> |
kaf24@1490 | 136 #include <asm/flushtlb.h> |
kaf24@1490 | 137 |
kaf24@1490 | 138 #define linear_pg_table ((l1_pgentry_t *)LINEAR_PT_VIRT_START) |
kaf24@1490 | 139 #define linear_l2_table ((l2_pgentry_t *)(LINEAR_PT_VIRT_START+(LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT-L1_PAGETABLE_SHIFT)))) |
kaf24@1490 | 140 |
kaf24@1490 | 141 #define va_to_l1mfn(_va) (l2_pgentry_val(linear_l2_table[_va>>L2_PAGETABLE_SHIFT]) >> PAGE_SHIFT) |
kaf24@1490 | 142 |
kaf24@3632 | 143 extern pagetable_t idle_pg_table[ENTRIES_PER_PAGETABLE]; |
kaf24@3336 | 144 |
kaf24@1490 | 145 extern void paging_init(void); |
kaf24@1490 | 146 |
kaf24@1490 | 147 /* Flush global pages as well. */ |
kaf24@1490 | 148 |
kaf24@1490 | 149 #define __pge_off() \ |
kaf24@1490 | 150 do { \ |
kaf24@1490 | 151 __asm__ __volatile__( \ |
kaf24@1660 | 152 "mov %0, %%cr4; # turn off PGE " \ |
kaf24@1490 | 153 :: "r" (mmu_cr4_features & ~X86_CR4_PGE)); \ |
kaf24@1490 | 154 } while (0) |
kaf24@1490 | 155 |
kaf24@1490 | 156 #define __pge_on() \ |
kaf24@1490 | 157 do { \ |
kaf24@1490 | 158 __asm__ __volatile__( \ |
kaf24@1660 | 159 "mov %0, %%cr4; # turn off PGE " \ |
kaf24@1490 | 160 :: "r" (mmu_cr4_features)); \ |
kaf24@1490 | 161 } while (0) |
kaf24@1490 | 162 |
kaf24@1490 | 163 |
kaf24@1490 | 164 #define __flush_tlb_pge() \ |
kaf24@1490 | 165 do { \ |
kaf24@1490 | 166 __pge_off(); \ |
kaf24@1490 | 167 __flush_tlb(); \ |
kaf24@1490 | 168 __pge_on(); \ |
kaf24@1490 | 169 } while (0) |
kaf24@1490 | 170 |
kaf24@1490 | 171 #define __flush_tlb_one(__addr) \ |
kaf24@1490 | 172 __asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr))) |
kaf24@1490 | 173 |
kaf24@1490 | 174 #endif /* !__ASSEMBLY__ */ |
kaf24@1490 | 175 |
kaf24@1490 | 176 |
kaf24@1490 | 177 #define _PAGE_PRESENT 0x001 |
kaf24@1490 | 178 #define _PAGE_RW 0x002 |
kaf24@1490 | 179 #define _PAGE_USER 0x004 |
kaf24@1490 | 180 #define _PAGE_PWT 0x008 |
kaf24@1490 | 181 #define _PAGE_PCD 0x010 |
kaf24@1490 | 182 #define _PAGE_ACCESSED 0x020 |
kaf24@1490 | 183 #define _PAGE_DIRTY 0x040 |
kaf24@1490 | 184 #define _PAGE_PAT 0x080 |
kaf24@1490 | 185 #define _PAGE_PSE 0x080 |
kaf24@1490 | 186 #define _PAGE_GLOBAL 0x100 |
kaf24@1490 | 187 |
kaf24@1490 | 188 #define __PAGE_HYPERVISOR \ |
kaf24@1490 | 189 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) |
kaf24@1490 | 190 #define __PAGE_HYPERVISOR_NOCACHE \ |
kaf24@1490 | 191 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED) |
kaf24@1490 | 192 |
kaf24@1490 | 193 #define MAKE_GLOBAL(_x) ((_x) | _PAGE_GLOBAL) |
kaf24@1490 | 194 |
kaf24@1490 | 195 #define PAGE_HYPERVISOR MAKE_GLOBAL(__PAGE_HYPERVISOR) |
kaf24@1490 | 196 #define PAGE_HYPERVISOR_NOCACHE MAKE_GLOBAL(__PAGE_HYPERVISOR_NOCACHE) |
kaf24@1490 | 197 |
kaf24@1490 | 198 #ifndef __ASSEMBLY__ |
kaf24@3632 | 199 |
kaf24@1490 | 200 static __inline__ int get_order(unsigned long size) |
kaf24@1490 | 201 { |
kaf24@1490 | 202 int order; |
kaf24@1490 | 203 |
kaf24@1490 | 204 size = (size-1) >> (PAGE_SHIFT-1); |
kaf24@1490 | 205 order = -1; |
kaf24@1490 | 206 do { |
kaf24@1490 | 207 size >>= 1; |
kaf24@1490 | 208 order++; |
kaf24@1490 | 209 } while (size); |
kaf24@1490 | 210 return order; |
kaf24@1490 | 211 } |
kaf24@1696 | 212 |
kaf24@1696 | 213 extern void zap_low_mappings(void); |
kaf24@3632 | 214 |
kaf24@3632 | 215 /* Map physical byte range (@p, @p+@s) at virt address @v in pagetable @pt. */ |
kaf24@3632 | 216 extern int |
kaf24@3632 | 217 map_pages( |
kaf24@3632 | 218 pagetable_t *pt, |
kaf24@3632 | 219 unsigned long v, |
kaf24@3632 | 220 unsigned long p, |
kaf24@3632 | 221 unsigned long s, |
kaf24@3632 | 222 unsigned long flags); |
kaf24@3632 | 223 |
kaf24@3632 | 224 #endif /* !__ASSEMBLY__ */ |
kaf24@1490 | 225 |
kaf24@1830 | 226 #endif /* __I386_PAGE_H__ */ |