debuggers.hg

view xen/include/asm-x86/page.h @ 3393:d1e0d9a8fde0

bitkeeper revision 1.1159.1.518 (41d448acXfjJK8iSoExMLCtViOvsoA)

Merge scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-2.0-testing.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
author kaf24@scramble.cl.cam.ac.uk
date Thu Dec 30 18:27:56 2004 +0000 (2004-12-30)
parents 2711f7eb364c 7f2bf9fecd7e
children fec8b1778268 bbe8541361dd
line source
1 /******************************************************************************
2 * asm-x86/page.h
3 *
4 * Definitions relating to page tables.
5 */
7 #ifndef __X86_PAGE_H__
8 #define __X86_PAGE_H__
10 #if defined(__x86_64__)
12 #define L1_PAGETABLE_SHIFT 12
13 #define L2_PAGETABLE_SHIFT 21
14 #define L3_PAGETABLE_SHIFT 30
15 #define L4_PAGETABLE_SHIFT 39
17 #define ENTRIES_PER_L1_PAGETABLE 512
18 #define ENTRIES_PER_L2_PAGETABLE 512
19 #define ENTRIES_PER_L3_PAGETABLE 512
20 #define ENTRIES_PER_L4_PAGETABLE 512
22 #define __PAGE_OFFSET (0xFFFF830000000000)
24 #elif defined(__i386__)
26 #define L1_PAGETABLE_SHIFT 12
27 #define L2_PAGETABLE_SHIFT 22
29 #define ENTRIES_PER_L1_PAGETABLE 1024
30 #define ENTRIES_PER_L2_PAGETABLE 1024
32 #define __PAGE_OFFSET (0xFC400000)
34 #endif
36 #define PAGE_SHIFT L1_PAGETABLE_SHIFT
37 #define PAGE_SIZE (1UL << PAGE_SHIFT)
38 #define PAGE_MASK (~(PAGE_SIZE-1))
40 #define clear_page(_p) memset((void *)(_p), 0, PAGE_SIZE)
41 #define copy_page(_t,_f) memcpy((void *)(_t), (void *)(_f), PAGE_SIZE)
43 #ifndef __ASSEMBLY__
44 #include <xen/config.h>
45 typedef struct { unsigned long l1_lo; } l1_pgentry_t;
46 typedef struct { unsigned long l2_lo; } l2_pgentry_t;
47 typedef struct { unsigned long l3_lo; } l3_pgentry_t;
48 typedef struct { unsigned long l4_lo; } l4_pgentry_t;
49 typedef struct { unsigned long pt_lo; } pagetable_t;
50 #endif /* !__ASSEMBLY__ */
52 /* Strip type from a table entry. */
53 #define l1_pgentry_val(_x) ((_x).l1_lo)
54 #define l2_pgentry_val(_x) ((_x).l2_lo)
55 #define l3_pgentry_val(_x) ((_x).l3_lo)
56 #define l4_pgentry_val(_x) ((_x).l4_lo)
57 #define pagetable_val(_x) ((_x).pt_lo)
59 /* Add type to a table entry. */
60 #define mk_l1_pgentry(_x) ( (l1_pgentry_t) { (_x) } )
61 #define mk_l2_pgentry(_x) ( (l2_pgentry_t) { (_x) } )
62 #define mk_l3_pgentry(_x) ( (l3_pgentry_t) { (_x) } )
63 #define mk_l4_pgentry(_x) ( (l4_pgentry_t) { (_x) } )
64 #define mk_pagetable(_x) ( (pagetable_t) { (_x) } )
66 /* Turn a typed table entry into a page index. */
67 #define l1_pgentry_to_pagenr(_x) (l1_pgentry_val(_x) >> PAGE_SHIFT)
68 #define l2_pgentry_to_pagenr(_x) (l2_pgentry_val(_x) >> PAGE_SHIFT)
69 #define l3_pgentry_to_pagenr(_x) (l3_pgentry_val(_x) >> PAGE_SHIFT)
70 #define l4_pgentry_to_pagenr(_x) (l4_pgentry_val(_x) >> PAGE_SHIFT)
72 /* Turn a typed table entry into a physical address. */
73 #define l1_pgentry_to_phys(_x) (l1_pgentry_val(_x) & PAGE_MASK)
74 #define l2_pgentry_to_phys(_x) (l2_pgentry_val(_x) & PAGE_MASK)
75 #define l3_pgentry_to_phys(_x) (l3_pgentry_val(_x) & PAGE_MASK)
76 #define l4_pgentry_to_phys(_x) (l4_pgentry_val(_x) & PAGE_MASK)
78 /* Pagetable walking. */
79 #define l2_pgentry_to_l1(_x) \
80 ((l1_pgentry_t *)__va(l2_pgentry_val(_x) & PAGE_MASK))
81 #define l3_pgentry_to_l2(_x) \
82 ((l2_pgentry_t *)__va(l3_pgentry_val(_x) & PAGE_MASK))
83 #define l4_pgentry_to_l3(_x) \
84 ((l3_pgentry_t *)__va(l4_pgentry_val(_x) & PAGE_MASK))
86 /* Given a virtual address, get an entry offset into a page table. */
87 #define l1_table_offset(_a) \
88 (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1))
89 #if defined(__i386__)
90 #define l2_table_offset(_a) \
91 ((_a) >> L2_PAGETABLE_SHIFT)
92 #elif defined(__x86_64__)
93 #define l2_table_offset(_a) \
94 (((_a) >> L2_PAGETABLE_SHIFT) & (ENTRIES_PER_L2_PAGETABLE -1))
95 #define l3_table_offset(_a) \
96 (((_a) >> L3_PAGETABLE_SHIFT) & (ENTRIES_PER_L3_PAGETABLE -1))
97 #define l4_table_offset(_a) \
98 ((_a) >> L4_PAGETABLE_SHIFT)
99 #endif
101 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
102 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
103 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
104 #define page_address(_p) (__va(((_p) - frame_table) << PAGE_SHIFT))
105 #define pfn_to_page(_pfn) (frame_table + (_pfn))
106 #define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT))
107 #define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
108 #define VALID_PAGE(page) ((page - frame_table) < max_mapnr)
110 /*
111 * NB. We don't currently track I/O holes in the physical RAM space.
112 * For now we guess that I/O devices will be mapped in the first 1MB
113 * (e.g., VGA buffers) or beyond the end of physical RAM.
114 */
115 #define pfn_is_ram(_pfn) (((_pfn) > 0x100) && ((_pfn) < max_page))
117 /* High table entries are reserved by the hypervisor. */
118 #define DOMAIN_ENTRIES_PER_L2_PAGETABLE \
119 (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
120 #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE \
121 (ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
123 #ifndef __ASSEMBLY__
124 #include <asm/processor.h>
125 #include <asm/fixmap.h>
126 #include <asm/bitops.h>
127 #include <asm/flushtlb.h>
129 #define linear_pg_table ((l1_pgentry_t *)LINEAR_PT_VIRT_START)
130 #define linear_l2_table ((l2_pgentry_t *)(LINEAR_PT_VIRT_START+(LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT-L1_PAGETABLE_SHIFT))))
132 #define va_to_l1mfn(_va) (l2_pgentry_val(linear_l2_table[_va>>L2_PAGETABLE_SHIFT]) >> PAGE_SHIFT)
134 #ifdef __i386__
135 extern l2_pgentry_t idle_pg_table[ENTRIES_PER_L2_PAGETABLE];
136 #else
137 extern l4_pgentry_t idle_pg_table[ENTRIES_PER_L4_PAGETABLE];
138 #endif
140 extern void paging_init(void);
142 /* Flush global pages as well. */
144 #define __pge_off() \
145 do { \
146 __asm__ __volatile__( \
147 "mov %0, %%cr4; # turn off PGE " \
148 :: "r" (mmu_cr4_features & ~X86_CR4_PGE)); \
149 } while (0)
151 #define __pge_on() \
152 do { \
153 __asm__ __volatile__( \
154 "mov %0, %%cr4; # turn off PGE " \
155 :: "r" (mmu_cr4_features)); \
156 } while (0)
159 #define __flush_tlb_pge() \
160 do { \
161 __pge_off(); \
162 __flush_tlb(); \
163 __pge_on(); \
164 } while (0)
166 #define __flush_tlb_one(__addr) \
167 __asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr)))
169 #endif /* !__ASSEMBLY__ */
172 #define _PAGE_PRESENT 0x001
173 #define _PAGE_RW 0x002
174 #define _PAGE_USER 0x004
175 #define _PAGE_PWT 0x008
176 #define _PAGE_PCD 0x010
177 #define _PAGE_ACCESSED 0x020
178 #define _PAGE_DIRTY 0x040
179 #define _PAGE_PAT 0x080
180 #define _PAGE_PSE 0x080
181 #define _PAGE_GLOBAL 0x100
183 #define __PAGE_HYPERVISOR \
184 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
185 #define __PAGE_HYPERVISOR_NOCACHE \
186 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED)
188 #define MAKE_GLOBAL(_x) ((_x) | _PAGE_GLOBAL)
190 #define PAGE_HYPERVISOR MAKE_GLOBAL(__PAGE_HYPERVISOR)
191 #define PAGE_HYPERVISOR_NOCACHE MAKE_GLOBAL(__PAGE_HYPERVISOR_NOCACHE)
193 #ifndef __ASSEMBLY__
194 static __inline__ int get_order(unsigned long size)
195 {
196 int order;
198 size = (size-1) >> (PAGE_SHIFT-1);
199 order = -1;
200 do {
201 size >>= 1;
202 order++;
203 } while (size);
204 return order;
205 }
207 extern void zap_low_mappings(void);
208 #endif
210 #endif /* __I386_PAGE_H__ */