debuggers.hg
changeset 3765:4dfebfdc7933
bitkeeper revision 1.1159.252.1 (4208e2a42Fwe83QQfJdFQI8V302tYg)
Reorganise mm.h to split out 32-bit and 64-bit definitions. Fix x86_64
definitions to mask out the bits that we don't care about.
Signed-off-by: keir.fraser@cl.cam.ac.uk
Reorganise mm.h to split out 32-bit and 64-bit definitions. Fix x86_64
definitions to mask out the bits that we don't care about.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Tue Feb 08 16:02:44 2005 +0000 (2005-02-08) |
parents | 9db7fbdf56b6 |
children | 89e86842952a |
files | .rootkeys xen/arch/x86/dom0_ops.c xen/arch/x86/mm.c xen/arch/x86/x86_32/domain_build.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_64/domain_build.c xen/arch/x86/x86_64/mm.c xen/include/asm-x86/page.h xen/include/asm-x86/x86_32/page.h xen/include/asm-x86/x86_64/page.h |
line diff
1.1 --- a/.rootkeys Tue Feb 08 15:42:33 2005 +0000 1.2 +++ b/.rootkeys Tue Feb 08 16:02:44 2005 +0000 1.3 @@ -1038,6 +1038,7 @@ 41c0c412lQ0NVVN9PsOSznQ-qhOiPA xen/inclu 1.4 418fbcfe_WliJPToeVM-9VStvym-hw xen/include/asm-x86/x86_32/asm_defns.h 1.5 3ddb79c2ADvRmdexd9y3AYK9_NTx-Q xen/include/asm-x86/x86_32/current.h 1.6 3e20b82fl1jmQiKdLy7fxMcutfpjWA xen/include/asm-x86/x86_32/domain_page.h 1.7 +4208e2a3ZNFroNXbX9OYaOB-xtUyDQ xen/include/asm-x86/x86_32/page.h 1.8 3ddb79c3mbqEM7QQr3zVq7NiBNhouA xen/include/asm-x86/x86_32/regs.h 1.9 3e7f358aG11EvMI9VJ4_9hD4LUO7rQ xen/include/asm-x86/x86_32/string.h 1.10 3ddb79c3M2n1ROZH6xk3HbyN4CPDqg xen/include/asm-x86/x86_32/uaccess.h 1.11 @@ -1045,6 +1046,7 @@ 41bf1717bML6GxpclTWJabiaO5W5vg xen/inclu 1.12 404f1b9ceJeGVaPNIENm2FkK0AgEOQ xen/include/asm-x86/x86_64/current.h 1.13 41febc4b1aCGLsm0Y0b_82h7lFtrEA xen/include/asm-x86/x86_64/domain_page.h 1.14 404f1badfXZJZ2sU8sh9PS2EZvd19Q xen/include/asm-x86/x86_64/ldt.h 1.15 +4208e2a3Fktw4ZttKdDxbhvTQ6brfQ xen/include/asm-x86/x86_64/page.h 1.16 404f1bb86rAXB3aLS1vYdcqpJiEcyg xen/include/asm-x86/x86_64/regs.h 1.17 40e1966azOJZfNI6Ilthe6Q-T3Hewg xen/include/asm-x86/x86_64/string.h 1.18 404f1bc4tWkB9Qr8RkKtZGW5eMQzhw xen/include/asm-x86/x86_64/uaccess.h
2.1 --- a/xen/arch/x86/dom0_ops.c Tue Feb 08 15:42:33 2005 +0000 2.2 +++ b/xen/arch/x86/dom0_ops.c Tue Feb 08 16:02:44 2005 +0000 2.3 @@ -376,7 +376,7 @@ void arch_getdomaininfo_ctxt( 2.4 { 2.5 for ( i = 0; i < 16; i++ ) 2.6 c->gdt_frames[i] = 2.7 - l1_pgentry_to_pagenr(ed->arch.perdomain_ptes[i]); 2.8 + l1_pgentry_to_pfn(ed->arch.perdomain_ptes[i]); 2.9 c->gdt_ents = GET_GDT_ENTRIES(ed); 2.10 } 2.11 c->guestos_ss = ed->arch.guestos_ss;
3.1 --- a/xen/arch/x86/mm.c Tue Feb 08 15:42:33 2005 +0000 3.2 +++ b/xen/arch/x86/mm.c Tue Feb 08 16:02:44 2005 +0000 3.3 @@ -226,7 +226,7 @@ static void __invalidate_shadow_ldt(stru 3.4 3.5 for ( i = 16; i < 32; i++ ) 3.6 { 3.7 - pfn = l1_pgentry_to_pagenr(d->arch.perdomain_ptes[i]); 3.8 + pfn = l1_pgentry_to_pfn(d->arch.perdomain_ptes[i]); 3.9 if ( pfn == 0 ) continue; 3.10 d->arch.perdomain_ptes[i] = mk_l1_pgentry(0); 3.11 page = &frame_table[pfn]; 3.12 @@ -364,14 +364,14 @@ get_linear_pagetable( 3.13 if ( (l2_pgentry_val(l2e) >> PAGE_SHIFT) != pfn ) 3.14 { 3.15 /* Make sure the mapped frame belongs to the correct domain. */ 3.16 - if ( unlikely(!get_page_from_pagenr(l2_pgentry_to_pagenr(l2e), d)) ) 3.17 + if ( unlikely(!get_page_from_pagenr(l2_pgentry_to_pfn(l2e), d)) ) 3.18 return 0; 3.19 3.20 /* 3.21 * Make sure that the mapped frame is an already-validated L2 table. 3.22 * If so, atomically increment the count (checking for overflow). 3.23 */ 3.24 - page = &frame_table[l2_pgentry_to_pagenr(l2e)]; 3.25 + page = &frame_table[l2_pgentry_to_pfn(l2e)]; 3.26 y = page->u.inuse.type_info; 3.27 do { 3.28 x = y; 3.29 @@ -395,7 +395,7 @@ get_page_from_l1e( 3.30 l1_pgentry_t l1e, struct domain *d) 3.31 { 3.32 unsigned long l1v = l1_pgentry_val(l1e); 3.33 - unsigned long pfn = l1_pgentry_to_pagenr(l1e); 3.34 + unsigned long pfn = l1_pgentry_to_pfn(l1e); 3.35 struct pfn_info *page = &frame_table[pfn]; 3.36 extern int domain_iomem_in_pfn(struct domain *d, unsigned long pfn); 3.37 3.38 @@ -449,7 +449,7 @@ get_page_from_l2e( 3.39 } 3.40 3.41 rc = get_page_and_type_from_pagenr( 3.42 - l2_pgentry_to_pagenr(l2e), 3.43 + l2_pgentry_to_pfn(l2e), 3.44 PGT_l1_page_table | (va_idx<<PGT_va_shift), d); 3.45 3.46 if ( unlikely(!rc) ) 3.47 @@ -462,7 +462,7 @@ get_page_from_l2e( 3.48 static void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d) 3.49 { 3.50 unsigned long l1v = l1_pgentry_val(l1e); 3.51 - unsigned long pfn = l1_pgentry_to_pagenr(l1e); 3.52 + unsigned long pfn = l1_pgentry_to_pfn(l1e); 3.53 struct pfn_info *page = &frame_table[pfn]; 3.54 struct domain *e; 3.55 3.56 @@ -512,7 +512,7 @@ static void put_page_from_l2e(l2_pgentry 3.57 { 3.58 if ( (l2_pgentry_val(l2e) & _PAGE_PRESENT) && 3.59 ((l2_pgentry_val(l2e) >> PAGE_SHIFT) != pfn) ) 3.60 - put_page_and_type(&frame_table[l2_pgentry_to_pagenr(l2e)]); 3.61 + put_page_and_type(&frame_table[l2_pgentry_to_pfn(l2e)]); 3.62 } 3.63 3.64 3.65 @@ -1670,7 +1670,7 @@ void destroy_gdt(struct exec_domain *ed) 3.66 3.67 for ( i = 0; i < 16; i++ ) 3.68 { 3.69 - if ( (pfn = l1_pgentry_to_pagenr(ed->arch.perdomain_ptes[i])) != 0 ) 3.70 + if ( (pfn = l1_pgentry_to_pfn(ed->arch.perdomain_ptes[i])) != 0 ) 3.71 put_page_and_type(&frame_table[pfn]); 3.72 ed->arch.perdomain_ptes[i] = mk_l1_pgentry(0); 3.73 } 3.74 @@ -1798,7 +1798,7 @@ long do_update_descriptor( 3.75 case PGT_gdt_page: 3.76 /* Disallow updates of Xen-reserved descriptors in the current GDT. */ 3.77 for_each_exec_domain(current->domain, ed) { 3.78 - if ( (l1_pgentry_to_pagenr(ed->arch.perdomain_ptes[0]) == pfn) && 3.79 + if ( (l1_pgentry_to_pfn(ed->arch.perdomain_ptes[0]) == pfn) && 3.80 (((pa&(PAGE_SIZE-1))>>3) >= FIRST_RESERVED_GDT_ENTRY) && 3.81 (((pa&(PAGE_SIZE-1))>>3) <= LAST_RESERVED_GDT_ENTRY) ) 3.82 goto out; 3.83 @@ -1939,7 +1939,7 @@ void ptwr_flush(const int which) 3.84 l1pte_propagate_from_guest( 3.85 d, &l1_pgentry_val(nl1e), 3.86 &l1_pgentry_val(sl1e[i])); 3.87 - put_page_type(&frame_table[l1_pgentry_to_pagenr(nl1e)]); 3.88 + put_page_type(&frame_table[l1_pgentry_to_pfn(nl1e)]); 3.89 } 3.90 continue; 3.91 }
4.1 --- a/xen/arch/x86/x86_32/domain_build.c Tue Feb 08 15:42:33 2005 +0000 4.2 +++ b/xen/arch/x86/x86_32/domain_build.c Tue Feb 08 16:02:44 2005 +0000 4.3 @@ -262,7 +262,7 @@ int construct_dom0(struct domain *d, 4.4 for ( count = 0; count < nr_pt_pages; count++ ) 4.5 { 4.6 *l1tab = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW); 4.7 - page = &frame_table[l1_pgentry_to_pagenr(*l1tab)]; 4.8 + page = &frame_table[l1_pgentry_to_pfn(*l1tab)]; 4.9 if ( count == 0 ) 4.10 { 4.11 page->u.inuse.type_info &= ~PGT_type_mask;
5.1 --- a/xen/arch/x86/x86_32/mm.c Tue Feb 08 15:42:33 2005 +0000 5.2 +++ b/xen/arch/x86/x86_32/mm.c Tue Feb 08 16:02:44 2005 +0000 5.3 @@ -164,7 +164,7 @@ void subarch_init_memory(struct domain * 5.4 } 5.5 5.6 /* M2P table is mappable read-only by privileged domains. */ 5.7 - m2p_start_mfn = l2_pgentry_to_pagenr( 5.8 + m2p_start_mfn = l2_pgentry_to_pfn( 5.9 idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)]); 5.10 for ( i = 0; i < 1024; i++ ) 5.11 {
6.1 --- a/xen/arch/x86/x86_64/domain_build.c Tue Feb 08 15:42:33 2005 +0000 6.2 +++ b/xen/arch/x86/x86_64/domain_build.c Tue Feb 08 16:02:44 2005 +0000 6.3 @@ -294,7 +294,7 @@ int construct_dom0(struct domain *d, 6.4 for ( count = 0; count < nr_pt_pages; count++ ) 6.5 { 6.6 *l1tab = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW); 6.7 - page = &frame_table[l1_pgentry_to_pagenr(*l1tab)]; 6.8 + page = &frame_table[l1_pgentry_to_pfn(*l1tab)]; 6.9 6.10 /* Read-only mapping + PGC_allocated + page-table page. */ 6.11 page->count_info = PGC_allocated | 3;
7.1 --- a/xen/arch/x86/x86_64/mm.c Tue Feb 08 15:42:33 2005 +0000 7.2 +++ b/xen/arch/x86/x86_64/mm.c Tue Feb 08 16:02:44 2005 +0000 7.3 @@ -199,7 +199,7 @@ void subarch_init_memory(struct domain * 7.4 l2e = l3_pgentry_to_l2(l3e)[l2_table_offset(v)]; 7.5 if ( !(l2_pgentry_val(l2e) & _PAGE_PRESENT) ) 7.6 continue; 7.7 - m2p_start_mfn = l2_pgentry_to_pagenr(l2e); 7.8 + m2p_start_mfn = l2_pgentry_to_pfn(l2e); 7.9 7.10 for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ ) 7.11 {
8.1 --- a/xen/include/asm-x86/page.h Tue Feb 08 15:42:33 2005 +0000 8.2 +++ b/xen/include/asm-x86/page.h Tue Feb 08 16:02:44 2005 +0000 8.3 @@ -1,39 +1,14 @@ 8.4 -/****************************************************************************** 8.5 - * asm-x86/page.h 8.6 - * 8.7 - * Definitions relating to page tables. 8.8 - */ 8.9 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 8.10 8.11 #ifndef __X86_PAGE_H__ 8.12 #define __X86_PAGE_H__ 8.13 8.14 -#if defined(__x86_64__) 8.15 - 8.16 -#define L1_PAGETABLE_SHIFT 12 8.17 -#define L2_PAGETABLE_SHIFT 21 8.18 -#define L3_PAGETABLE_SHIFT 30 8.19 -#define L4_PAGETABLE_SHIFT 39 8.20 - 8.21 -#define ENTRIES_PER_L1_PAGETABLE 512 8.22 -#define ENTRIES_PER_L2_PAGETABLE 512 8.23 -#define ENTRIES_PER_L3_PAGETABLE 512 8.24 -#define ENTRIES_PER_L4_PAGETABLE 512 8.25 - 8.26 -#define __PAGE_OFFSET (0xFFFF830000000000) 8.27 - 8.28 -#elif defined(__i386__) 8.29 - 8.30 -#define L1_PAGETABLE_SHIFT 12 8.31 -#define L2_PAGETABLE_SHIFT 22 8.32 - 8.33 -#define ENTRIES_PER_L1_PAGETABLE 1024 8.34 -#define ENTRIES_PER_L2_PAGETABLE 1024 8.35 - 8.36 -#define __PAGE_OFFSET (0xFC400000) 8.37 - 8.38 +#if defined(__i386__) 8.39 +#include <asm/x86_32/page.h> 8.40 +#elif defined(__x86_64__) 8.41 +#include <asm/x86_64/page.h> 8.42 #endif 8.43 8.44 -#define PAGE_SHIFT L1_PAGETABLE_SHIFT 8.45 #ifndef __ASSEMBLY__ 8.46 #define PAGE_SIZE (1UL << PAGE_SHIFT) 8.47 #else 8.48 @@ -44,84 +19,9 @@ 8.49 #define clear_page(_p) memset((void *)(_p), 0, PAGE_SIZE) 8.50 #define copy_page(_t,_f) memcpy((void *)(_t), (void *)(_f), PAGE_SIZE) 8.51 8.52 -#ifndef __ASSEMBLY__ 8.53 -#include <xen/config.h> 8.54 -typedef struct { unsigned long l1_lo; } l1_pgentry_t; 8.55 -typedef struct { unsigned long l2_lo; } l2_pgentry_t; 8.56 -typedef struct { unsigned long l3_lo; } l3_pgentry_t; 8.57 -typedef struct { unsigned long l4_lo; } l4_pgentry_t; 8.58 -#endif /* !__ASSEMBLY__ */ 8.59 - 8.60 -/* Strip type from a table entry. */ 8.61 -#define l1_pgentry_val(_x) ((_x).l1_lo) 8.62 -#define l2_pgentry_val(_x) ((_x).l2_lo) 8.63 -#define l3_pgentry_val(_x) ((_x).l3_lo) 8.64 -#define l4_pgentry_val(_x) ((_x).l4_lo) 8.65 - 8.66 -/* Add type to a table entry. */ 8.67 -#define mk_l1_pgentry(_x) ( (l1_pgentry_t) { (_x) } ) 8.68 -#define mk_l2_pgentry(_x) ( (l2_pgentry_t) { (_x) } ) 8.69 -#define mk_l3_pgentry(_x) ( (l3_pgentry_t) { (_x) } ) 8.70 -#define mk_l4_pgentry(_x) ( (l4_pgentry_t) { (_x) } ) 8.71 - 8.72 -/* Turn a typed table entry into a page index. */ 8.73 -#define l1_pgentry_to_pagenr(_x) (l1_pgentry_val(_x) >> PAGE_SHIFT) 8.74 -#define l2_pgentry_to_pagenr(_x) (l2_pgentry_val(_x) >> PAGE_SHIFT) 8.75 -#define l3_pgentry_to_pagenr(_x) (l3_pgentry_val(_x) >> PAGE_SHIFT) 8.76 -#define l4_pgentry_to_pagenr(_x) (l4_pgentry_val(_x) >> PAGE_SHIFT) 8.77 - 8.78 -/* Turn a typed table entry into a physical address. */ 8.79 -#define l1_pgentry_to_phys(_x) (l1_pgentry_val(_x) & PAGE_MASK) 8.80 -#define l2_pgentry_to_phys(_x) (l2_pgentry_val(_x) & PAGE_MASK) 8.81 -#define l3_pgentry_to_phys(_x) (l3_pgentry_val(_x) & PAGE_MASK) 8.82 -#define l4_pgentry_to_phys(_x) (l4_pgentry_val(_x) & PAGE_MASK) 8.83 - 8.84 -/* Pagetable walking. */ 8.85 -#define l2_pgentry_to_l1(_x) \ 8.86 - ((l1_pgentry_t *)__va(l2_pgentry_val(_x) & PAGE_MASK)) 8.87 -#define l3_pgentry_to_l2(_x) \ 8.88 - ((l2_pgentry_t *)__va(l3_pgentry_val(_x) & PAGE_MASK)) 8.89 -#define l4_pgentry_to_l3(_x) \ 8.90 - ((l3_pgentry_t *)__va(l4_pgentry_val(_x) & PAGE_MASK)) 8.91 - 8.92 -/* Given a virtual address, get an entry offset into a page table. */ 8.93 -#define l1_table_offset(_a) \ 8.94 - (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1)) 8.95 -#if defined(__i386__) 8.96 -#define l2_table_offset(_a) \ 8.97 - ((_a) >> L2_PAGETABLE_SHIFT) 8.98 -#elif defined(__x86_64__) 8.99 -#define l2_table_offset(_a) \ 8.100 - (((_a) >> L2_PAGETABLE_SHIFT) & (ENTRIES_PER_L2_PAGETABLE - 1)) 8.101 -#define l3_table_offset(_a) \ 8.102 - (((_a) >> L3_PAGETABLE_SHIFT) & (ENTRIES_PER_L3_PAGETABLE - 1)) 8.103 -#define l4_table_offset(_a) \ 8.104 - (((_a) >> L4_PAGETABLE_SHIFT) & (ENTRIES_PER_L4_PAGETABLE - 1)) 8.105 -#endif 8.106 - 8.107 -/* Given a virtual address, get an entry offset into a linear page table. */ 8.108 -#if defined(__i386__) 8.109 -#define l1_linear_offset(_a) ((_a) >> PAGE_SHIFT) 8.110 -#elif defined(__x86_64__) 8.111 -#define l1_linear_offset(_a) (((_a) & ((1UL << 48) - 1)) >> PAGE_SHIFT) 8.112 -#endif 8.113 - 8.114 -#if defined(__i386__) 8.115 -#define pagetable_t l2_pgentry_t 8.116 -#define pagetable_val(_x) ((_x).l2_lo) 8.117 -#define mk_pagetable(_x) ( (l2_pgentry_t) { (_x) } ) 8.118 -#define ENTRIES_PER_PAGETABLE ENTRIES_PER_L2_PAGETABLE 8.119 -#elif defined(__x86_64__) 8.120 -#define pagetable_t l4_pgentry_t 8.121 -#define pagetable_val(_x) ((_x).l4_lo) 8.122 -#define mk_pagetable(_x) ( (l4_pgentry_t) { (_x) } ) 8.123 -#define ENTRIES_PER_PAGETABLE ENTRIES_PER_L4_PAGETABLE 8.124 -#endif 8.125 - 8.126 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) 8.127 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) 8.128 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) 8.129 -#define page_address(_p) (__va(((_p) - frame_table) << PAGE_SHIFT)) 8.130 #define pfn_to_page(_pfn) (frame_table + (_pfn)) 8.131 #define phys_to_page(kaddr) (frame_table + ((kaddr) >> PAGE_SHIFT)) 8.132 #define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
9.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 9.2 +++ b/xen/include/asm-x86/x86_32/page.h Tue Feb 08 16:02:44 2005 +0000 9.3 @@ -0,0 +1,56 @@ 9.4 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 9.5 + 9.6 +#ifndef __X86_32_PAGE_H__ 9.7 +#define __X86_32_PAGE_H__ 9.8 + 9.9 +#define L1_PAGETABLE_SHIFT 12 9.10 +#define L2_PAGETABLE_SHIFT 22 9.11 +#define PAGE_SHIFT L1_PAGETABLE_SHIFT 9.12 + 9.13 +#define ENTRIES_PER_L1_PAGETABLE 1024 9.14 +#define ENTRIES_PER_L2_PAGETABLE 1024 9.15 + 9.16 +#define __PAGE_OFFSET (0xFC400000) 9.17 + 9.18 +#ifndef __ASSEMBLY__ 9.19 +#include <xen/config.h> 9.20 +typedef struct { unsigned long l1_lo; } l1_pgentry_t; 9.21 +typedef struct { unsigned long l2_lo; } l2_pgentry_t; 9.22 +#endif /* !__ASSEMBLY__ */ 9.23 + 9.24 +/* Strip type from a table entry. */ 9.25 +#define l1_pgentry_val(_x) ((_x).l1_lo) 9.26 +#define l2_pgentry_val(_x) ((_x).l2_lo) 9.27 + 9.28 +/* Add type to a table entry. */ 9.29 +#define mk_l1_pgentry(_x) ( (l1_pgentry_t) { (_x) } ) 9.30 +#define mk_l2_pgentry(_x) ( (l2_pgentry_t) { (_x) } ) 9.31 + 9.32 +/* Turn a typed table entry into a physical address. */ 9.33 +#define l1_pgentry_to_phys(_x) (l1_pgentry_val(_x) & PAGE_MASK) 9.34 +#define l2_pgentry_to_phys(_x) (l2_pgentry_val(_x) & PAGE_MASK) 9.35 + 9.36 +/* Turn a typed table entry into a page index. */ 9.37 +#define l1_pgentry_to_pfn(_x) (l1_pgentry_val(_x) >> PAGE_SHIFT) 9.38 +#define l2_pgentry_to_pfn(_x) (l2_pgentry_val(_x) >> PAGE_SHIFT) 9.39 + 9.40 +/* Pagetable walking. */ 9.41 +#define l2_pgentry_to_l1(_x) \ 9.42 + ((l1_pgentry_t *)__va(l2_pgentry_to_phys(_x))) 9.43 + 9.44 +/* Given a virtual address, get an entry offset into a page table. */ 9.45 +#define l1_table_offset(_a) \ 9.46 + (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1)) 9.47 +#define l2_table_offset(_a) \ 9.48 + ((_a) >> L2_PAGETABLE_SHIFT) 9.49 + 9.50 +/* Given a virtual address, get an entry offset into a linear page table. */ 9.51 +#define l1_linear_offset(_a) ((_a) >> PAGE_SHIFT) 9.52 + 9.53 +/* Root page-table definitions. */ 9.54 +#define pagetable_t l2_pgentry_t 9.55 +#define pagetable_val(_x) ((_x).l2_lo) 9.56 +#define mk_pagetable(_x) ( (l2_pgentry_t) { (_x) } ) 9.57 +#define ENTRIES_PER_PAGETABLE ENTRIES_PER_L2_PAGETABLE 9.58 + 9.59 +#endif /* __X86_32_PAGE_H__ */
10.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 10.2 +++ b/xen/include/asm-x86/x86_64/page.h Tue Feb 08 16:02:44 2005 +0000 10.3 @@ -0,0 +1,84 @@ 10.4 +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ 10.5 + 10.6 +#ifndef __X86_64_PAGE_H__ 10.7 +#define __X86_64_PAGE_H__ 10.8 + 10.9 +#define L1_PAGETABLE_SHIFT 12 10.10 +#define L2_PAGETABLE_SHIFT 21 10.11 +#define L3_PAGETABLE_SHIFT 30 10.12 +#define L4_PAGETABLE_SHIFT 39 10.13 +#define PAGE_SHIFT L1_PAGETABLE_SHIFT 10.14 + 10.15 +#define ENTRIES_PER_L1_PAGETABLE 512 10.16 +#define ENTRIES_PER_L2_PAGETABLE 512 10.17 +#define ENTRIES_PER_L3_PAGETABLE 512 10.18 +#define ENTRIES_PER_L4_PAGETABLE 512 10.19 + 10.20 +#define __PAGE_OFFSET (0xFFFF830000000000) 10.21 + 10.22 +/* These may increase in future (phys. bits in particular). */ 10.23 +#define PADDR_BITS 40 10.24 +#define VADDR_BITS 48 10.25 +#define PADDR_MASK ((1UL << PADDR_BITS)-1) 10.26 +#define VADDR_MASK ((1UL << VADDR_BITS)-1) 10.27 + 10.28 +#ifndef __ASSEMBLY__ 10.29 +#include <xen/config.h> 10.30 +typedef struct { unsigned long l1_lo; } l1_pgentry_t; 10.31 +typedef struct { unsigned long l2_lo; } l2_pgentry_t; 10.32 +typedef struct { unsigned long l3_lo; } l3_pgentry_t; 10.33 +typedef struct { unsigned long l4_lo; } l4_pgentry_t; 10.34 +#endif /* !__ASSEMBLY__ */ 10.35 + 10.36 +/* Strip type from a table entry. */ 10.37 +#define l1_pgentry_val(_x) ((_x).l1_lo) 10.38 +#define l2_pgentry_val(_x) ((_x).l2_lo) 10.39 +#define l3_pgentry_val(_x) ((_x).l3_lo) 10.40 +#define l4_pgentry_val(_x) ((_x).l4_lo) 10.41 + 10.42 +/* Add type to a table entry. */ 10.43 +#define mk_l1_pgentry(_x) ( (l1_pgentry_t) { (_x) } ) 10.44 +#define mk_l2_pgentry(_x) ( (l2_pgentry_t) { (_x) } ) 10.45 +#define mk_l3_pgentry(_x) ( (l3_pgentry_t) { (_x) } ) 10.46 +#define mk_l4_pgentry(_x) ( (l4_pgentry_t) { (_x) } ) 10.47 + 10.48 +/* Turn a typed table entry into a physical address. */ 10.49 +#define l1_pgentry_to_phys(_x) (l1_pgentry_val(_x) & (PADDR_MASK & PAGE_MASK)) 10.50 +#define l2_pgentry_to_phys(_x) (l2_pgentry_val(_x) & (PADDR_MASK & PAGE_MASK)) 10.51 +#define l3_pgentry_to_phys(_x) (l3_pgentry_val(_x) & (PADDR_MASK & PAGE_MASK)) 10.52 +#define l4_pgentry_to_phys(_x) (l4_pgentry_val(_x) & (PADDR_MASK & PAGE_MASK)) 10.53 + 10.54 +/* Turn a typed table entry into a page index. */ 10.55 +#define l1_pgentry_to_pfn(_x) (l1_pgentry_val(_x) >> PAGE_SHIFT) 10.56 +#define l2_pgentry_to_pfn(_x) (l2_pgentry_val(_x) >> PAGE_SHIFT) 10.57 +#define l3_pgentry_to_pfn(_x) (l3_pgentry_val(_x) >> PAGE_SHIFT) 10.58 +#define l4_pgentry_to_pfn(_x) (l4_pgentry_val(_x) >> PAGE_SHIFT) 10.59 + 10.60 +/* Pagetable walking. */ 10.61 +#define l2_pgentry_to_l1(_x) \ 10.62 + ((l1_pgentry_t *)__va(l2_pgentry_to_phys(_x))) 10.63 +#define l3_pgentry_to_l2(_x) \ 10.64 + ((l2_pgentry_t *)__va(l3_pgentry_to_phys(_x))) 10.65 +#define l4_pgentry_to_l3(_x) \ 10.66 + ((l3_pgentry_t *)__va(l4_pgentry_to_phys(_x))) 10.67 + 10.68 +/* Given a virtual address, get an entry offset into a page table. */ 10.69 +#define l1_table_offset(_a) \ 10.70 + (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1)) 10.71 +#define l2_table_offset(_a) \ 10.72 + (((_a) >> L2_PAGETABLE_SHIFT) & (ENTRIES_PER_L2_PAGETABLE - 1)) 10.73 +#define l3_table_offset(_a) \ 10.74 + (((_a) >> L3_PAGETABLE_SHIFT) & (ENTRIES_PER_L3_PAGETABLE - 1)) 10.75 +#define l4_table_offset(_a) \ 10.76 + (((_a) >> L4_PAGETABLE_SHIFT) & (ENTRIES_PER_L4_PAGETABLE - 1)) 10.77 + 10.78 +/* Given a virtual address, get an entry offset into a linear page table. */ 10.79 +#define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> PAGE_SHIFT) 10.80 + 10.81 +/* Root page-table definitions. */ 10.82 +#define pagetable_t l4_pgentry_t 10.83 +#define pagetable_val(_x) ((_x).l4_lo) 10.84 +#define mk_pagetable(_x) ( (l4_pgentry_t) { (_x) } ) 10.85 +#define ENTRIES_PER_PAGETABLE ENTRIES_PER_L4_PAGETABLE 10.86 + 10.87 +#endif /* __X86_64_PAGE_H__ */