debuggers.hg
changeset 19670:b0966b6f5180
x86-64: also handle virtual aliases of Xen image pages
With the unification of the heaps, the pages freed from the Xen boot
image now can also end up being allocated to a domain, and hence the
respective aliases need handling when such pages get their
cacheability attributes changed.
Rather than establishing multiple mappings with non-WB attributes
(which temporarily still can cause aliasing issues), simply unmap
those pages from the Xen virtual space, and re-map them (to allow re-
establishing of eventual large page mappings) when the cachability
attribute for them gets restored to normal (WB).
Signed-off-by: Jan Beulich <jbeulich@novell.com>
With the unification of the heaps, the pages freed from the Xen boot
image now can also end up being allocated to a domain, and hence the
respective aliases need handling when such pages get their
cacheability attributes changed.
Rather than establishing multiple mappings with non-WB attributes
(which temporarily still can cause aliasing issues), simply unmap
those pages from the Xen virtual space, and re-map them (to allow re-
establishing of eventual large page mappings) when the cachability
attribute for them gets restored to normal (WB).
Signed-off-by: Jan Beulich <jbeulich@novell.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Wed May 20 15:38:34 2009 +0100 (2009-05-20) |
parents | cafab2084410 |
children | 89e50c449307 |
files | xen/arch/x86/mm.c |
line diff
1.1 --- a/xen/arch/x86/mm.c Wed May 20 15:35:32 2009 +0100 1.2 +++ b/xen/arch/x86/mm.c Wed May 20 15:38:34 2009 +0100 1.3 @@ -709,6 +709,23 @@ int is_iomem_page(unsigned long mfn) 1.4 return (page_get_owner(page) == dom_io); 1.5 } 1.6 1.7 +static void update_xen_mappings(unsigned long mfn, unsigned long cacheattr) 1.8 +{ 1.9 +#ifdef __x86_64__ 1.10 + bool_t alias = mfn >= PFN_DOWN(xen_phys_start) && 1.11 + mfn < PFN_UP(xen_phys_start + (unsigned long)_end - XEN_VIRT_START); 1.12 + unsigned long xen_va = 1.13 + XEN_VIRT_START + ((mfn - PFN_DOWN(xen_phys_start)) << PAGE_SHIFT); 1.14 + 1.15 + if ( unlikely(alias) && cacheattr ) 1.16 + map_pages_to_xen(xen_va, mfn, 1, 0); 1.17 + map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1, 1.18 + PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr)); 1.19 + if ( unlikely(alias) && !cacheattr ) 1.20 + map_pages_to_xen(xen_va, mfn, 1, PAGE_HYPERVISOR); 1.21 +#endif 1.22 +} 1.23 + 1.24 1.25 int 1.26 get_page_from_l1e( 1.27 @@ -796,10 +813,7 @@ get_page_from_l1e( 1.28 y = cmpxchg(&page->count_info, x, nx); 1.29 } 1.30 1.31 -#ifdef __x86_64__ 1.32 - map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1, 1.33 - PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr)); 1.34 -#endif 1.35 + update_xen_mappings(mfn, cacheattr); 1.36 } 1.37 1.38 return 1; 1.39 @@ -857,12 +871,6 @@ get_page_from_l2e( 1.40 return -EINVAL; 1.41 } 1.42 } while ( m++ < (mfn + (L1_PAGETABLE_ENTRIES-1)) ); 1.43 - 1.44 -#ifdef __x86_64__ 1.45 - map_pages_to_xen( 1.46 - (unsigned long)mfn_to_virt(mfn), mfn, L1_PAGETABLE_ENTRIES, 1.47 - PAGE_HYPERVISOR | l2e_get_flags(l2e)); 1.48 -#endif 1.49 } 1.50 1.51 return rc; 1.52 @@ -2406,10 +2414,7 @@ void cleanup_page_cacheattr(struct page_ 1.53 1.54 BUG_ON(is_xen_heap_page(page)); 1.55 1.56 -#ifdef __x86_64__ 1.57 - map_pages_to_xen((unsigned long)page_to_virt(page), page_to_mfn(page), 1.58 - 1, PAGE_HYPERVISOR); 1.59 -#endif 1.60 + update_xen_mappings(page_to_mfn(page), 0); 1.61 } 1.62 1.63