xen-vtx-unstable
changeset 6481:551870a55f24
Unify access to mpt using macros.
Also some code cleanup to x86_64 fault.c
Signed-off-by: Xin Li <xin.b.li@intel.com>
Also some code cleanup to x86_64 fault.c
Signed-off-by: Xin Li <xin.b.li@intel.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Tue Aug 30 17:53:49 2005 +0000 (2005-08-30) |
parents | 1b9f23175fa8 |
children | 1fc6473ecc01 287d36b46fa3 69d21d9d6b57 |
files | linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/mm.c xen/arch/x86/shadow32.c xen/arch/x86/shadow_public.c xen/arch/x86/vmx.c xen/arch/x86/vmx_platform.c xen/arch/x86/vmx_vmcs.c xen/common/grant_table.c xen/include/asm-ia64/mm.h xen/include/asm-x86/mm.h xen/include/asm-x86/shadow.h xen/include/asm-x86/shadow_64.h xen/include/asm-x86/vmx_platform.h |
line diff
1.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c Tue Aug 30 17:09:43 2005 +0000 1.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c Tue Aug 30 17:53:49 2005 +0000 1.3 @@ -149,7 +149,7 @@ void dump_pagetable(unsigned long addres 1.4 pmd_t *pmd; 1.5 pte_t *pte; 1.6 1.7 - pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id()); 1.8 + pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id()); 1.9 pgd += pgd_index(address); 1.10 1.11 printk("PGD %lx ", pgd_val(*pgd)); 1.12 @@ -296,9 +296,9 @@ int exception_trace = 1; 1.13 #define MEM_VERBOSE 1 1.14 1.15 #ifdef MEM_VERBOSE 1.16 -#define MEM_LOG(_f, _a...) \ 1.17 - printk("fault.c:[%d]-> " _f "\n", \ 1.18 - __LINE__ , ## _a ) 1.19 +#define MEM_LOG(_f, _a...) \ 1.20 + printk("fault.c:[%d]-> " _f "\n", \ 1.21 + __LINE__ , ## _a ) 1.22 #else 1.23 #define MEM_LOG(_f, _a...) ((void)0) 1.24 #endif 1.25 @@ -325,7 +325,7 @@ asmlinkage void do_page_fault(struct pt_ 1.26 siginfo_t info; 1.27 1.28 if (!user_mode(regs)) 1.29 - error_code &= ~4; /* means kernel */ 1.30 + error_code &= ~4; /* means kernel */ 1.31 1.32 #ifdef CONFIG_CHECKING 1.33 {
2.1 --- a/xen/arch/x86/domain.c Tue Aug 30 17:09:43 2005 +0000 2.2 +++ b/xen/arch/x86/domain.c Tue Aug 30 17:53:49 2005 +0000 2.3 @@ -255,13 +255,13 @@ void arch_do_createdomain(struct vcpu *v 2.4 v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id]; 2.5 v->cpumap = CPUMAP_RUNANYWHERE; 2.6 SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d); 2.7 - machine_to_phys_mapping[virt_to_phys(d->shared_info) >> 2.8 - PAGE_SHIFT] = INVALID_M2P_ENTRY; 2.9 + set_pfn_from_mfn(virt_to_phys(d->shared_info) >> PAGE_SHIFT, 2.10 + INVALID_M2P_ENTRY); 2.11 2.12 d->arch.mm_perdomain_pt = alloc_xenheap_page(); 2.13 memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE); 2.14 - machine_to_phys_mapping[virt_to_phys(d->arch.mm_perdomain_pt) >> 2.15 - PAGE_SHIFT] = INVALID_M2P_ENTRY; 2.16 + set_pfn_from_mfn(virt_to_phys(d->arch.mm_perdomain_pt) >> PAGE_SHIFT, 2.17 + INVALID_M2P_ENTRY); 2.18 v->arch.perdomain_ptes = d->arch.mm_perdomain_pt; 2.19 v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] = 2.20 l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
3.1 --- a/xen/arch/x86/domain_build.c Tue Aug 30 17:09:43 2005 +0000 3.2 +++ b/xen/arch/x86/domain_build.c Tue Aug 30 17:53:49 2005 +0000 3.3 @@ -592,8 +592,7 @@ int construct_dom0(struct domain *d, 3.4 if ( opt_dom0_translate ) 3.5 { 3.6 si->shared_info = d->next_io_page << PAGE_SHIFT; 3.7 - set_machinetophys(virt_to_phys(d->shared_info) >> PAGE_SHIFT, 3.8 - d->next_io_page); 3.9 + set_pfn_from_mfn(virt_to_phys(d->shared_info) >> PAGE_SHIFT, d->next_io_page); 3.10 d->next_io_page++; 3.11 } 3.12 else 3.13 @@ -614,7 +613,7 @@ int construct_dom0(struct domain *d, 3.14 mfn = alloc_epfn - (pfn - REVERSE_START); 3.15 #endif 3.16 ((u32 *)vphysmap_start)[pfn] = mfn; 3.17 - machine_to_phys_mapping[mfn] = pfn; 3.18 + set_pfn_from_mfn(mfn, pfn); 3.19 } 3.20 while ( pfn < nr_pages ) 3.21 { 3.22 @@ -627,7 +626,7 @@ int construct_dom0(struct domain *d, 3.23 #define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn))) 3.24 #endif 3.25 ((u32 *)vphysmap_start)[pfn] = mfn; 3.26 - machine_to_phys_mapping[mfn] = pfn; 3.27 + set_pfn_from_mfn(mfn, pfn); 3.28 #undef pfn 3.29 page++; pfn++; 3.30 }
4.1 --- a/xen/arch/x86/mm.c Tue Aug 30 17:09:43 2005 +0000 4.2 +++ b/xen/arch/x86/mm.c Tue Aug 30 17:53:49 2005 +0000 4.3 @@ -1452,7 +1452,7 @@ int get_page_type(struct pfn_info *page, 4.4 "!= exp %" PRtype_info ") " 4.5 "for mfn %lx (pfn %x)", 4.6 x, type, page_to_pfn(page), 4.7 - machine_to_phys_mapping[page_to_pfn(page)]); 4.8 + get_pfn_from_mfn(page_to_pfn(page))); 4.9 return 0; 4.10 } 4.11 else if ( (x & PGT_va_mask) == PGT_va_mutable ) 4.12 @@ -2206,7 +2206,7 @@ int do_mmu_update( 4.13 printk("privileged guest dom%d requests pfn=%lx to " 4.14 "map mfn=%lx for dom%d\n", 4.15 d->domain_id, gpfn, mfn, FOREIGNDOM->domain_id); 4.16 - set_machinetophys(mfn, gpfn); 4.17 + set_pfn_from_mfn(mfn, gpfn); 4.18 set_p2m_entry(FOREIGNDOM, gpfn, mfn, &sh_mapcache, &mapcache); 4.19 okay = 1; 4.20 shadow_unlock(FOREIGNDOM); 4.21 @@ -2225,7 +2225,7 @@ int do_mmu_update( 4.22 break; 4.23 } 4.24 4.25 - set_machinetophys(mfn, gpfn); 4.26 + set_pfn_from_mfn(mfn, gpfn); 4.27 okay = 1; 4.28 4.29 /*
5.1 --- a/xen/arch/x86/shadow32.c Tue Aug 30 17:09:43 2005 +0000 5.2 +++ b/xen/arch/x86/shadow32.c Tue Aug 30 17:53:49 2005 +0000 5.3 @@ -827,7 +827,7 @@ alloc_p2m_table(struct domain *d) 5.4 { 5.5 page = list_entry(list_ent, struct pfn_info, list); 5.6 mfn = page_to_pfn(page); 5.7 - pfn = machine_to_phys_mapping[mfn]; 5.8 + pfn = get_pfn_from_mfn(mfn); 5.9 ASSERT(pfn != INVALID_M2P_ENTRY); 5.10 ASSERT(pfn < (1u<<20)); 5.11 5.12 @@ -841,7 +841,7 @@ alloc_p2m_table(struct domain *d) 5.13 { 5.14 page = list_entry(list_ent, struct pfn_info, list); 5.15 mfn = page_to_pfn(page); 5.16 - pfn = machine_to_phys_mapping[mfn]; 5.17 + pfn = get_pfn_from_mfn(mfn); 5.18 if ( (pfn != INVALID_M2P_ENTRY) && 5.19 (pfn < (1u<<20)) ) 5.20 {
6.1 --- a/xen/arch/x86/shadow_public.c Tue Aug 30 17:09:43 2005 +0000 6.2 +++ b/xen/arch/x86/shadow_public.c Tue Aug 30 17:53:49 2005 +0000 6.3 @@ -1311,7 +1311,7 @@ alloc_p2m_table(struct domain *d) 6.4 { 6.5 page = list_entry(list_ent, struct pfn_info, list); 6.6 mfn = page_to_pfn(page); 6.7 - pfn = machine_to_phys_mapping[mfn]; 6.8 + pfn = get_pfn_from_mfn(mfn); 6.9 ASSERT(pfn != INVALID_M2P_ENTRY); 6.10 ASSERT(pfn < (1u<<20)); 6.11 6.12 @@ -1325,7 +1325,7 @@ alloc_p2m_table(struct domain *d) 6.13 { 6.14 page = list_entry(list_ent, struct pfn_info, list); 6.15 mfn = page_to_pfn(page); 6.16 - pfn = machine_to_phys_mapping[mfn]; 6.17 + pfn = get_pfn_from_mfn(mfn); 6.18 if ( (pfn != INVALID_M2P_ENTRY) && 6.19 (pfn < (1u<<20)) ) 6.20 {
7.1 --- a/xen/arch/x86/vmx.c Tue Aug 30 17:09:43 2005 +0000 7.2 +++ b/xen/arch/x86/vmx.c Tue Aug 30 17:53:49 2005 +0000 7.3 @@ -694,7 +694,7 @@ vmx_copy(void *buf, unsigned long laddr, 7.4 return 0; 7.5 } 7.6 7.7 - mfn = phys_to_machine_mapping(laddr >> PAGE_SHIFT); 7.8 + mfn = get_mfn_from_pfn(laddr >> PAGE_SHIFT); 7.9 addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK); 7.10 7.11 if (dir == COPY_IN) 7.12 @@ -795,7 +795,7 @@ vmx_world_restore(struct vcpu *d, struct 7.13 * removed some translation or changed page attributes. 7.14 * We simply invalidate the shadow. 7.15 */ 7.16 - mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT); 7.17 + mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT); 7.18 if (mfn != pagetable_get_pfn(d->arch.guest_table)) { 7.19 printk("Invalid CR3 value=%x", c->cr3); 7.20 domain_crash_synchronous(); 7.21 @@ -813,7 +813,7 @@ vmx_world_restore(struct vcpu *d, struct 7.22 domain_crash_synchronous(); 7.23 return 0; 7.24 } 7.25 - mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT); 7.26 + mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT); 7.27 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT); 7.28 update_pagetables(d); 7.29 /* 7.30 @@ -968,7 +968,7 @@ static int vmx_set_cr0(unsigned long val 7.31 /* 7.32 * The guest CR3 must be pointing to the guest physical. 7.33 */ 7.34 - if ( !VALID_MFN(mfn = phys_to_machine_mapping( 7.35 + if ( !VALID_MFN(mfn = get_mfn_from_pfn( 7.36 d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) || 7.37 !get_page(pfn_to_page(mfn), d->domain) ) 7.38 { 7.39 @@ -1164,7 +1164,7 @@ static int mov_to_cr(int gp, int cr, str 7.40 * removed some translation or changed page attributes. 7.41 * We simply invalidate the shadow. 7.42 */ 7.43 - mfn = phys_to_machine_mapping(value >> PAGE_SHIFT); 7.44 + mfn = get_mfn_from_pfn(value >> PAGE_SHIFT); 7.45 if (mfn != pagetable_get_pfn(d->arch.guest_table)) 7.46 __vmx_bug(regs); 7.47 shadow_sync_all(d->domain); 7.48 @@ -1175,7 +1175,7 @@ static int mov_to_cr(int gp, int cr, str 7.49 */ 7.50 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value); 7.51 if ( ((value >> PAGE_SHIFT) > d->domain->max_pages ) || 7.52 - !VALID_MFN(mfn = phys_to_machine_mapping(value >> PAGE_SHIFT)) || 7.53 + !VALID_MFN(mfn = get_mfn_from_pfn(value >> PAGE_SHIFT)) || 7.54 !get_page(pfn_to_page(mfn), d->domain) ) 7.55 { 7.56 printk("Invalid CR3 value=%lx", value);
8.1 --- a/xen/arch/x86/vmx_platform.c Tue Aug 30 17:09:43 2005 +0000 8.2 +++ b/xen/arch/x86/vmx_platform.c Tue Aug 30 17:53:49 2005 +0000 8.3 @@ -521,7 +521,7 @@ int inst_copy_from_guest(unsigned char * 8.4 if ( vmx_paging_enabled(current) ) 8.5 { 8.6 gpa = gva_to_gpa(guest_eip); 8.7 - mfn = phys_to_machine_mapping(gpa >> PAGE_SHIFT); 8.8 + mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT); 8.9 8.10 /* Does this cross a page boundary ? */ 8.11 if ( (guest_eip & PAGE_MASK) != ((guest_eip + inst_len) & PAGE_MASK) ) 8.12 @@ -532,7 +532,7 @@ int inst_copy_from_guest(unsigned char * 8.13 } 8.14 else 8.15 { 8.16 - mfn = phys_to_machine_mapping(guest_eip >> PAGE_SHIFT); 8.17 + mfn = get_mfn_from_pfn(guest_eip >> PAGE_SHIFT); 8.18 } 8.19 8.20 inst_start = map_domain_page(mfn); 8.21 @@ -542,7 +542,7 @@ int inst_copy_from_guest(unsigned char * 8.22 if ( remaining ) 8.23 { 8.24 gpa = gva_to_gpa(guest_eip+inst_len+remaining); 8.25 - mfn = phys_to_machine_mapping(gpa >> PAGE_SHIFT); 8.26 + mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT); 8.27 8.28 inst_start = map_domain_page(mfn); 8.29 memcpy((char *)buf+inst_len, inst_start, remaining);
9.1 --- a/xen/arch/x86/vmx_vmcs.c Tue Aug 30 17:09:43 2005 +0000 9.2 +++ b/xen/arch/x86/vmx_vmcs.c Tue Aug 30 17:53:49 2005 +0000 9.3 @@ -148,7 +148,7 @@ int vmx_setup_platform(struct vcpu *d, s 9.4 offset = (addr & ~PAGE_MASK); 9.5 addr = round_pgdown(addr); 9.6 9.7 - mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT); 9.8 + mpfn = get_mfn_from_pfn(addr >> PAGE_SHIFT); 9.9 p = map_domain_page(mpfn); 9.10 9.11 e820p = (struct e820entry *) ((unsigned long) p + offset); 9.12 @@ -175,7 +175,7 @@ int vmx_setup_platform(struct vcpu *d, s 9.13 unmap_domain_page(p); 9.14 9.15 /* Initialise shared page */ 9.16 - mpfn = phys_to_machine_mapping(gpfn); 9.17 + mpfn = get_mfn_from_pfn(gpfn); 9.18 p = map_domain_page(mpfn); 9.19 d->domain->arch.vmx_platform.shared_page_va = (unsigned long)p; 9.20
10.1 --- a/xen/common/grant_table.c Tue Aug 30 17:09:43 2005 +0000 10.2 +++ b/xen/common/grant_table.c Tue Aug 30 17:53:49 2005 +0000 10.3 @@ -1211,13 +1211,13 @@ gnttab_notify_transfer( 10.4 DPRINTK("Bad pfn (%lx)\n", pfn); 10.5 else 10.6 { 10.7 - machine_to_phys_mapping[frame] = pfn; 10.8 + set_pfn_from_mfn(frame, pfn); 10.9 10.10 if ( unlikely(shadow_mode_log_dirty(ld))) 10.11 mark_dirty(ld, frame); 10.12 10.13 if (shadow_mode_translate(ld)) 10.14 - __phys_to_machine_mapping[pfn] = frame; 10.15 + set_mfn_from_pfn(pfn, frame); 10.16 } 10.17 sha->frame = __mfn_to_gpfn(rd, frame); 10.18 sha->domid = rd->domain_id; 10.19 @@ -1268,8 +1268,7 @@ grant_table_create( 10.20 { 10.21 SHARE_PFN_WITH_DOMAIN( 10.22 virt_to_page((char *)(t->shared)+(i*PAGE_SIZE)), d); 10.23 - machine_to_phys_mapping[(virt_to_phys(t->shared) >> PAGE_SHIFT) + i] = 10.24 - INVALID_M2P_ENTRY; 10.25 + set_pfn_from_mfn((virt_to_phys(t->shared) >> PAGE_SHIFT) + i, INVALID_M2P_ENTRY); 10.26 } 10.27 10.28 /* Okay, install the structure. */
11.1 --- a/xen/include/asm-ia64/mm.h Tue Aug 30 17:09:43 2005 +0000 11.2 +++ b/xen/include/asm-ia64/mm.h Tue Aug 30 17:53:49 2005 +0000 11.3 @@ -405,7 +405,7 @@ extern unsigned long *mpt_table; 11.4 /* If pmt table is provided by control pannel later, we need __get_user 11.5 * here. However if it's allocated by HV, we should access it directly 11.6 */ 11.7 -#define phys_to_machine_mapping(d, gpfn) \ 11.8 +#define get_mfn_from_pfn(d, gpfn) \ 11.9 ((d) == dom0 ? gpfn : \ 11.10 (gpfn <= d->arch.max_pfn ? (d)->arch.pmt[(gpfn)] : \ 11.11 INVALID_MFN)) 11.12 @@ -414,7 +414,7 @@ extern unsigned long *mpt_table; 11.13 machine_to_phys_mapping[(mfn)] 11.14 11.15 #define __gpfn_to_mfn(_d, gpfn) \ 11.16 - phys_to_machine_mapping((_d), (gpfn)) 11.17 + get_mfn_from_pfn((_d), (gpfn)) 11.18 11.19 #define __gpfn_invalid(_d, gpfn) \ 11.20 (__gpfn_to_mfn((_d), (gpfn)) & GPFN_INV_MASK)
12.1 --- a/xen/include/asm-x86/mm.h Tue Aug 30 17:09:43 2005 +0000 12.2 +++ b/xen/include/asm-x86/mm.h Tue Aug 30 17:53:49 2005 +0000 12.3 @@ -255,28 +255,31 @@ int check_descriptor(struct desc_struct 12.4 * contiguous (or near contiguous) physical memory. 12.5 */ 12.6 #undef machine_to_phys_mapping 12.7 -#define machine_to_phys_mapping ((u32 *)RDWR_MPT_VIRT_START) 12.8 +#define machine_to_phys_mapping ((u32 *)RDWR_MPT_VIRT_START) 12.9 #define INVALID_M2P_ENTRY (~0U) 12.10 #define VALID_M2P(_e) (!((_e) & (1U<<31))) 12.11 #define IS_INVALID_M2P_ENTRY(_e) (!VALID_M2P(_e)) 12.12 12.13 +#define set_pfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn)) 12.14 +#define get_pfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)]) 12.15 + 12.16 /* 12.17 * The phys_to_machine_mapping is the reversed mapping of MPT for full 12.18 * virtualization. It is only used by shadow_mode_translate()==true 12.19 * guests, so we steal the address space that would have normally 12.20 * been used by the read-only MPT map. 12.21 */ 12.22 -#define __phys_to_machine_mapping ((unsigned long *)RO_MPT_VIRT_START) 12.23 -#define INVALID_MFN (~0UL) 12.24 -#define VALID_MFN(_mfn) (!((_mfn) & (1U<<31))) 12.25 +#define phys_to_machine_mapping ((unsigned long *)RO_MPT_VIRT_START) 12.26 +#define INVALID_MFN (~0UL) 12.27 +#define VALID_MFN(_mfn) (!((_mfn) & (1U<<31))) 12.28 12.29 -/* Returns the machine physical */ 12.30 -static inline unsigned long phys_to_machine_mapping(unsigned long pfn) 12.31 +#define set_mfn_from_pfn(pfn, mfn) (phys_to_machine_mapping[(pfn)] = (mfn)) 12.32 +static inline unsigned long get_mfn_from_pfn(unsigned long pfn) 12.33 { 12.34 unsigned long mfn; 12.35 l1_pgentry_t pte; 12.36 12.37 - if ( (__copy_from_user(&pte, &__phys_to_machine_mapping[pfn], 12.38 + if ( (__copy_from_user(&pte, &phys_to_machine_mapping[pfn], 12.39 sizeof(pte)) == 0) && 12.40 (l1e_get_flags(pte) & _PAGE_PRESENT) ) 12.41 mfn = l1e_get_pfn(pte); 12.42 @@ -285,7 +288,6 @@ static inline unsigned long phys_to_mach 12.43 12.44 return mfn; 12.45 } 12.46 -#define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn) 12.47 12.48 #ifdef MEMORY_GUARD 12.49 void memguard_init(void);
13.1 --- a/xen/include/asm-x86/shadow.h Tue Aug 30 17:09:43 2005 +0000 13.2 +++ b/xen/include/asm-x86/shadow.h Tue Aug 30 17:53:49 2005 +0000 13.3 @@ -269,14 +269,14 @@ static inline void shadow_mode_disable(s 13.4 13.5 #define __mfn_to_gpfn(_d, mfn) \ 13.6 ( (shadow_mode_translate(_d)) \ 13.7 - ? machine_to_phys_mapping[(mfn)] \ 13.8 + ? get_pfn_from_mfn(mfn) \ 13.9 : (mfn) ) 13.10 13.11 #define __gpfn_to_mfn(_d, gpfn) \ 13.12 ({ \ 13.13 ASSERT(current->domain == (_d)); \ 13.14 (shadow_mode_translate(_d)) \ 13.15 - ? phys_to_machine_mapping(gpfn) \ 13.16 + ? get_mfn_from_pfn(gpfn) \ 13.17 : (gpfn); \ 13.18 }) 13.19 13.20 @@ -461,7 +461,7 @@ static inline int __mark_dirty(struct do 13.21 // This wants the nice compact set of PFNs from 0..domain's max, 13.22 // which __mfn_to_gpfn() only returns for translated domains. 13.23 // 13.24 - pfn = machine_to_phys_mapping[mfn]; 13.25 + pfn = get_pfn_from_mfn(mfn); 13.26 13.27 /* 13.28 * Values with the MSB set denote MFNs that aren't really part of the 13.29 @@ -562,7 +562,7 @@ update_hl2e(struct vcpu *v, unsigned lon 13.30 old_hl2e = v->arch.hl2_vtable[index]; 13.31 13.32 if ( (l2e_get_flags(gl2e) & _PAGE_PRESENT) && 13.33 - VALID_MFN(mfn = phys_to_machine_mapping(l2e_get_pfn(gl2e))) ) 13.34 + VALID_MFN(mfn = get_mfn_from_pfn(l2e_get_pfn(gl2e))) ) 13.35 new_hl2e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR); 13.36 else 13.37 new_hl2e = l1e_empty();
14.1 --- a/xen/include/asm-x86/shadow_64.h Tue Aug 30 17:09:43 2005 +0000 14.2 +++ b/xen/include/asm-x86/shadow_64.h Tue Aug 30 17:53:49 2005 +0000 14.3 @@ -138,7 +138,7 @@ static inline pgentry_64_t *__entry( 14.4 return NULL; 14.5 mfn = entry_get_value(*le_e) >> PAGE_SHIFT; 14.6 if ((flag & GUEST_ENTRY) && shadow_mode_translate(d)) 14.7 - mfn = phys_to_machine_mapping(mfn); 14.8 + mfn = get_mfn_from_pfn(mfn); 14.9 le_p = (pgentry_64_t *)phys_to_virt(mfn << PAGE_SHIFT); 14.10 index = table_offset_64(va, (level + i - 1)); 14.11 le_e = &le_p[index]; 14.12 @@ -257,7 +257,7 @@ static inline void * __guest_set_l1e( 14.13 if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT))) 14.14 return NULL; 14.15 14.16 - l1mfn = phys_to_machine_mapping( 14.17 + l1mfn = get_mfn_from_pfn( 14.18 l2e_get_pfn(gl2e)); 14.19 14.20 l1va = (l1_pgentry_32_t *) 14.21 @@ -299,7 +299,7 @@ static inline void * __guest_get_l1e( 14.22 return NULL; 14.23 14.24 14.25 - l1mfn = phys_to_machine_mapping( 14.26 + l1mfn = get_mfn_from_pfn( 14.27 l2e_get_pfn(gl2e)); 14.28 l1va = (l1_pgentry_32_t *) phys_to_virt( 14.29 l1mfn << L1_PAGETABLE_SHIFT);
15.1 --- a/xen/include/asm-x86/vmx_platform.h Tue Aug 30 17:09:43 2005 +0000 15.2 +++ b/xen/include/asm-x86/vmx_platform.h Tue Aug 30 17:53:49 2005 +0000 15.3 @@ -91,6 +91,6 @@ extern int vmx_setup_platform(struct vcp 15.4 extern void vmx_io_assist(struct vcpu *v); 15.5 15.6 // XXX - think about this -- maybe use bit 30 of the mfn to signify an MMIO frame. 15.7 -#define mmio_space(gpa) (!VALID_MFN(phys_to_machine_mapping((gpa) >> PAGE_SHIFT))) 15.8 +#define mmio_space(gpa) (!VALID_MFN(get_mfn_from_pfn((gpa) >> PAGE_SHIFT))) 15.9 15.10 #endif