debuggers.hg
changeset 18991:629f028d22f9
PoD memory 2/9: calls to gfn_to_mfn_query()
Shadow code, and other important places, call gfn_to_mfn_query(). In
particular, any place that holds the shadow lock must make a query
call.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Shadow code, and other important places, call gfn_to_mfn_query(). In
particular, any place that holds the shadow lock must make a query
call.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Mon Jan 05 10:42:39 2009 +0000 (2009-01-05) |
parents | 0cd1ba8bd7cd |
children | f2ba08549466 |
files | xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/mm/p2m.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/types.h |
line diff
1.1 --- a/xen/arch/x86/hvm/svm/svm.c Mon Jan 05 10:41:48 2009 +0000 1.2 +++ b/xen/arch/x86/hvm/svm/svm.c Mon Jan 05 10:42:39 2009 +0000 1.3 @@ -888,7 +888,7 @@ static void svm_do_nested_pgfault(paddr_ 1.4 * If this GFN is emulated MMIO or marked as read-only, pass the fault 1.5 * to the mmio handler. 1.6 */ 1.7 - mfn = gfn_to_mfn_current(gfn, &p2mt); 1.8 + mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest); 1.9 if ( (p2mt == p2m_mmio_dm) || (p2mt == p2m_ram_ro) ) 1.10 { 1.11 if ( !handle_mmio() )
2.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Mon Jan 05 10:41:48 2009 +0000 2.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Jan 05 10:42:39 2009 +0000 2.3 @@ -2124,9 +2124,9 @@ static void ept_handle_violation(unsigne 2.4 mfn_t mfn; 2.5 p2m_type_t t; 2.6 2.7 - mfn = gfn_to_mfn(d, gfn, &t); 2.8 + mfn = gfn_to_mfn_guest(d, gfn, &t); 2.9 2.10 - /* There are two legitimate reasons for taking an EPT violation. 2.11 + /* There are three legitimate reasons for taking an EPT violation. 2.12 * One is a guest access to MMIO space. */ 2.13 if ( gla_validity == EPT_GLA_VALIDITY_MATCH && p2m_is_mmio(t) ) 2.14 { 2.15 @@ -2134,15 +2134,18 @@ static void ept_handle_violation(unsigne 2.16 return; 2.17 } 2.18 2.19 - /* The other is log-dirty mode, writing to a read-only page */ 2.20 - if ( paging_mode_log_dirty(d) 2.21 - && (gla_validity == EPT_GLA_VALIDITY_MATCH 2.22 - || gla_validity == EPT_GLA_VALIDITY_GPT_WALK) 2.23 + /* The second is log-dirty mode, writing to a read-only page; 2.24 + * The third is populating a populate-on-demand page. */ 2.25 + if ( (gla_validity == EPT_GLA_VALIDITY_MATCH 2.26 + || gla_validity == EPT_GLA_VALIDITY_GPT_WALK) 2.27 && p2m_is_ram(t) && (t != p2m_ram_ro) ) 2.28 { 2.29 - paging_mark_dirty(d, mfn_x(mfn)); 2.30 - p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw); 2.31 - flush_tlb_mask(d->domain_dirty_cpumask); 2.32 + if ( paging_mode_log_dirty(d) ) 2.33 + { 2.34 + paging_mark_dirty(d, mfn_x(mfn)); 2.35 + p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw); 2.36 + flush_tlb_mask(d->domain_dirty_cpumask); 2.37 + } 2.38 return; 2.39 } 2.40
3.1 --- a/xen/arch/x86/mm/p2m.c Mon Jan 05 10:41:48 2009 +0000 3.2 +++ b/xen/arch/x86/mm/p2m.c Mon Jan 05 10:42:39 2009 +0000 3.3 @@ -732,7 +732,7 @@ static void audit_p2m(struct domain *d) 3.4 continue; 3.5 } 3.6 3.7 - p2mfn = gfn_to_mfn_foreign(d, gfn, &type); 3.8 + p2mfn = gfn_to_mfn_type_foreign(d, gfn, &type, p2m_query); 3.9 if ( mfn_x(p2mfn) != mfn ) 3.10 { 3.11 mpbad++; 3.12 @@ -750,7 +750,7 @@ static void audit_p2m(struct domain *d) 3.13 3.14 if ( test_linear && (gfn <= d->arch.p2m->max_mapped_pfn) ) 3.15 { 3.16 - lp2mfn = mfn_x(gfn_to_mfn(d, gfn, &type)); 3.17 + lp2mfn = mfn_x(gfn_to_mfn_query(d, gfn, &type)); 3.18 if ( lp2mfn != mfn_x(p2mfn) ) 3.19 { 3.20 P2M_PRINTK("linear mismatch gfn %#lx -> mfn %#lx " 3.21 @@ -960,7 +960,7 @@ guest_physmap_add_entry(struct domain *d 3.22 /* First, remove m->p mappings for existing p->m mappings */ 3.23 for ( i = 0; i < (1UL << page_order); i++ ) 3.24 { 3.25 - omfn = gfn_to_mfn(d, gfn + i, &ot); 3.26 + omfn = gfn_to_mfn_query(d, gfn + i, &ot); 3.27 if ( p2m_is_ram(ot) ) 3.28 { 3.29 ASSERT(mfn_valid(omfn)); 3.30 @@ -985,7 +985,7 @@ guest_physmap_add_entry(struct domain *d 3.31 * address */ 3.32 P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n", 3.33 mfn + i, ogfn, gfn + i); 3.34 - omfn = gfn_to_mfn(d, ogfn, &ot); 3.35 + omfn = gfn_to_mfn_query(d, ogfn, &ot); 3.36 if ( p2m_is_ram(ot) ) 3.37 { 3.38 ASSERT(mfn_valid(omfn)); 3.39 @@ -1154,7 +1154,7 @@ set_mmio_p2m_entry(struct domain *d, uns 3.40 if ( !paging_mode_translate(d) ) 3.41 return 0; 3.42 3.43 - omfn = gfn_to_mfn(d, gfn, &ot); 3.44 + omfn = gfn_to_mfn_query(d, gfn, &ot); 3.45 if ( p2m_is_ram(ot) ) 3.46 { 3.47 ASSERT(mfn_valid(omfn));
4.1 --- a/xen/arch/x86/mm/shadow/multi.c Mon Jan 05 10:41:48 2009 +0000 4.2 +++ b/xen/arch/x86/mm/shadow/multi.c Mon Jan 05 10:42:39 2009 +0000 4.3 @@ -2170,7 +2170,7 @@ static int validate_gl4e(struct vcpu *v, 4.4 if ( guest_l4e_get_flags(new_gl4e) & _PAGE_PRESENT ) 4.5 { 4.6 gfn_t gl3gfn = guest_l4e_get_gfn(new_gl4e); 4.7 - mfn_t gl3mfn = gfn_to_mfn(d, gl3gfn, &p2mt); 4.8 + mfn_t gl3mfn = gfn_to_mfn_query(d, gl3gfn, &p2mt); 4.9 if ( p2m_is_ram(p2mt) ) 4.10 sl3mfn = get_shadow_status(v, gl3mfn, SH_type_l3_shadow); 4.11 else 4.12 @@ -2227,7 +2227,7 @@ static int validate_gl3e(struct vcpu *v, 4.13 if ( guest_l3e_get_flags(new_gl3e) & _PAGE_PRESENT ) 4.14 { 4.15 gfn_t gl2gfn = guest_l3e_get_gfn(new_gl3e); 4.16 - mfn_t gl2mfn = gfn_to_mfn(v->domain, gl2gfn, &p2mt); 4.17 + mfn_t gl2mfn = gfn_to_mfn_query(v->domain, gl2gfn, &p2mt); 4.18 if ( p2m_is_ram(p2mt) ) 4.19 sl2mfn = get_shadow_status(v, gl2mfn, SH_type_l2_shadow); 4.20 else 4.21 @@ -2276,7 +2276,7 @@ static int validate_gl2e(struct vcpu *v, 4.22 } 4.23 else 4.24 { 4.25 - mfn_t gl1mfn = gfn_to_mfn(v->domain, gl1gfn, &p2mt); 4.26 + mfn_t gl1mfn = gfn_to_mfn_query(v->domain, gl1gfn, &p2mt); 4.27 if ( p2m_is_ram(p2mt) ) 4.28 sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow); 4.29 else 4.30 @@ -2346,7 +2346,7 @@ static int validate_gl1e(struct vcpu *v, 4.31 perfc_incr(shadow_validate_gl1e_calls); 4.32 4.33 gfn = guest_l1e_get_gfn(new_gl1e); 4.34 - gmfn = gfn_to_mfn(v->domain, gfn, &p2mt); 4.35 + gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt); 4.36 4.37 l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt); 4.38 result |= shadow_set_l1e(v, sl1p, new_sl1e, sl1mfn); 4.39 @@ -2406,7 +2406,7 @@ void sh_resync_l1(struct vcpu *v, mfn_t 4.40 shadow_l1e_t nsl1e; 4.41 4.42 gfn = guest_l1e_get_gfn(gl1e); 4.43 - gmfn = gfn_to_mfn(v->domain, gfn, &p2mt); 4.44 + gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt); 4.45 l1e_propagate_from_guest(v, gl1e, gmfn, &nsl1e, ft_prefetch, p2mt); 4.46 rc |= shadow_set_l1e(v, sl1p, nsl1e, sl1mfn); 4.47 4.48 @@ -2723,7 +2723,7 @@ static void sh_prefetch(struct vcpu *v, 4.49 4.50 /* Look at the gfn that the l1e is pointing at */ 4.51 gfn = guest_l1e_get_gfn(gl1e); 4.52 - gmfn = gfn_to_mfn(v->domain, gfn, &p2mt); 4.53 + gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt); 4.54 4.55 /* Propagate the entry. */ 4.56 l1e_propagate_from_guest(v, gl1e, gmfn, &sl1e, ft_prefetch, p2mt); 4.57 @@ -3079,7 +3079,7 @@ static int sh_page_fault(struct vcpu *v, 4.58 4.59 /* What mfn is the guest trying to access? */ 4.60 gfn = guest_l1e_get_gfn(gw.l1e); 4.61 - gmfn = gfn_to_mfn(d, gfn, &p2mt); 4.62 + gmfn = gfn_to_mfn_guest(d, gfn, &p2mt); 4.63 4.64 if ( shadow_mode_refcounts(d) && 4.65 (!p2m_is_valid(p2mt) || (!p2m_is_mmio(p2mt) && !mfn_valid(gmfn))) ) 4.66 @@ -4119,7 +4119,7 @@ sh_update_cr3(struct vcpu *v, int do_loc 4.67 if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT ) 4.68 { 4.69 gl2gfn = guest_l3e_get_gfn(gl3e[i]); 4.70 - gl2mfn = gfn_to_mfn(d, gl2gfn, &p2mt); 4.71 + gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt); 4.72 if ( p2m_is_ram(p2mt) ) 4.73 flush |= sh_remove_write_access(v, gl2mfn, 2, 0); 4.74 } 4.75 @@ -4132,7 +4132,7 @@ sh_update_cr3(struct vcpu *v, int do_loc 4.76 if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT ) 4.77 { 4.78 gl2gfn = guest_l3e_get_gfn(gl3e[i]); 4.79 - gl2mfn = gfn_to_mfn(d, gl2gfn, &p2mt); 4.80 + gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt); 4.81 if ( p2m_is_ram(p2mt) ) 4.82 sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3) 4.83 ? SH_type_l2h_shadow 4.84 @@ -4518,7 +4518,12 @@ static mfn_t emulate_gva_to_mfn(struct v 4.85 } 4.86 4.87 /* Translate the GFN to an MFN */ 4.88 - mfn = gfn_to_mfn(v->domain, _gfn(gfn), &p2mt); 4.89 + /* PoD: query only if shadow lock is held (to avoid deadlock) */ 4.90 + if ( shadow_locked_by_me(v->domain) ) 4.91 + mfn = gfn_to_mfn_query(v->domain, _gfn(gfn), &p2mt); 4.92 + else 4.93 + mfn = gfn_to_mfn(v->domain, _gfn(gfn), &p2mt); 4.94 + 4.95 if ( p2mt == p2m_ram_ro ) 4.96 return _mfn(READONLY_GFN); 4.97 if ( !p2m_is_ram(p2mt) ) 4.98 @@ -4922,7 +4927,7 @@ int sh_audit_l1_table(struct vcpu *v, mf 4.99 { 4.100 gfn = guest_l1e_get_gfn(*gl1e); 4.101 mfn = shadow_l1e_get_mfn(*sl1e); 4.102 - gmfn = gfn_to_mfn(v->domain, gfn, &p2mt); 4.103 + gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt); 4.104 if ( mfn_x(gmfn) != mfn_x(mfn) ) 4.105 AUDIT_FAIL(1, "bad translation: gfn %" SH_PRI_gfn 4.106 " --> %" PRI_mfn " != mfn %" PRI_mfn, 4.107 @@ -4989,7 +4994,7 @@ int sh_audit_l2_table(struct vcpu *v, mf 4.108 mfn = shadow_l2e_get_mfn(*sl2e); 4.109 gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE) 4.110 ? get_fl1_shadow_status(v, gfn) 4.111 - : get_shadow_status(v, gfn_to_mfn(v->domain, gfn, &p2mt), 4.112 + : get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, &p2mt), 4.113 SH_type_l1_shadow); 4.114 if ( mfn_x(gmfn) != mfn_x(mfn) ) 4.115 AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn 4.116 @@ -4997,7 +5002,7 @@ int sh_audit_l2_table(struct vcpu *v, mf 4.117 " --> %" PRI_mfn " != mfn %" PRI_mfn, 4.118 gfn_x(gfn), 4.119 (guest_l2e_get_flags(*gl2e) & _PAGE_PSE) ? 0 4.120 - : mfn_x(gfn_to_mfn(v->domain, gfn, &p2mt)), 4.121 + : mfn_x(gfn_to_mfn_query(v->domain, gfn, &p2mt)), 4.122 mfn_x(gmfn), mfn_x(mfn)); 4.123 } 4.124 }); 4.125 @@ -5036,7 +5041,7 @@ int sh_audit_l3_table(struct vcpu *v, mf 4.126 { 4.127 gfn = guest_l3e_get_gfn(*gl3e); 4.128 mfn = shadow_l3e_get_mfn(*sl3e); 4.129 - gmfn = get_shadow_status(v, gfn_to_mfn(v->domain, gfn, &p2mt), 4.130 + gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, &p2mt), 4.131 ((GUEST_PAGING_LEVELS == 3 || 4.132 is_pv_32on64_vcpu(v)) 4.133 && !shadow_mode_external(v->domain) 4.134 @@ -5083,7 +5088,7 @@ int sh_audit_l4_table(struct vcpu *v, mf 4.135 { 4.136 gfn = guest_l4e_get_gfn(*gl4e); 4.137 mfn = shadow_l4e_get_mfn(*sl4e); 4.138 - gmfn = get_shadow_status(v, gfn_to_mfn(v->domain, gfn, &p2mt), 4.139 + gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, &p2mt), 4.140 SH_type_l3_shadow); 4.141 if ( mfn_x(gmfn) != mfn_x(mfn) ) 4.142 AUDIT_FAIL(4, "bad translation: gfn %" SH_PRI_gfn
5.1 --- a/xen/arch/x86/mm/shadow/types.h Mon Jan 05 10:41:48 2009 +0000 5.2 +++ b/xen/arch/x86/mm/shadow/types.h Mon Jan 05 10:42:39 2009 +0000 5.3 @@ -191,6 +191,12 @@ static inline shadow_l4e_t shadow_l4e_fr 5.4 }) 5.5 #endif 5.6 5.7 + /* Override gfn_to_mfn to work with gfn_t */ 5.8 +#undef gfn_to_mfn_query 5.9 +#define gfn_to_mfn_query(d, g, t) _gfn_to_mfn_type((d), gfn_x(g), (t), p2m_query) 5.10 +#undef gfn_to_mfn_guest 5.11 +#define gfn_to_mfn_guest(d, g, t) _gfn_to_mfn_type((d), gfn_x(g), (t), p2m_guest) 5.12 + 5.13 /* The shadow types needed for the various levels. */ 5.14 5.15 #if GUEST_PAGING_LEVELS == 2