debuggers.hg
changeset 17928:f178082cce0a
Out-of-sync L1 shadows: Fixup Tables
This patch implement a very simple non complete reverse map for OOS
pages writable mappings to avoid shadow brute-force search on resyncs.
Signed-off-by: Gianluca Guida <gianluca.guida@eu.citrix.com>
This patch implement a very simple non complete reverse map for OOS
pages writable mappings to avoid shadow brute-force search on resyncs.
Signed-off-by: Gianluca Guida <gianluca.guida@eu.citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Jun 20 18:40:32 2008 +0100 (2008-06-20) |
parents | 597058a3b619 |
children | 7b66b4e9f743 |
files | xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/multi.h xen/arch/x86/mm/shadow/private.h xen/arch/x86/mm/shadow/types.h xen/include/asm-x86/domain.h xen/include/asm-x86/mm.h xen/include/asm-x86/perfc_defn.h |
line diff
1.1 --- a/xen/arch/x86/mm/shadow/common.c Fri Jun 20 18:39:45 2008 +0100 1.2 +++ b/xen/arch/x86/mm/shadow/common.c Fri Jun 20 18:40:32 2008 +0100 1.3 @@ -580,6 +580,153 @@ static inline void _sh_resync_l1(struct 1.4 #endif 1.5 } 1.6 1.7 +#define _FIXUP_IDX(_b, _i) ((_b) * SHADOW_OOS_FT_HASH + (_i)) 1.8 + 1.9 +void oos_fixup_add(struct vcpu *v, mfn_t gmfn, 1.10 + mfn_t smfn, unsigned long off) 1.11 +{ 1.12 + int idx, i, free = 0, free_slot = 0; 1.13 + struct oos_fixup *fixups = v->arch.paging.shadow.oos_fixups; 1.14 + 1.15 + idx = mfn_x(gmfn) % SHADOW_OOS_FT_HASH; 1.16 + for ( i = 0; i < SHADOW_OOS_FT_ENTRIES; i++ ) 1.17 + { 1.18 + if ( !mfn_valid(fixups[_FIXUP_IDX(idx, i)].gmfn) 1.19 + || !mfn_is_out_of_sync(fixups[_FIXUP_IDX(idx, i)].gmfn) ) 1.20 + { 1.21 + free = 1; 1.22 + free_slot = _FIXUP_IDX(idx, i); 1.23 + } 1.24 + else if ( (mfn_x(fixups[_FIXUP_IDX(idx, i)].gmfn) == mfn_x(gmfn)) 1.25 + && (mfn_x(fixups[_FIXUP_IDX(idx, i)].smfn) == mfn_x(smfn)) 1.26 + && (fixups[_FIXUP_IDX(idx, i)].off == off) ) 1.27 + { 1.28 + perfc_incr(shadow_oos_fixup_no_add); 1.29 + return; 1.30 + } 1.31 + } 1.32 + 1.33 + if ( free ) 1.34 + { 1.35 + if ( !v->arch.paging.shadow.oos_fixup_used ) 1.36 + v->arch.paging.shadow.oos_fixup_used = 1; 1.37 + fixups[free_slot].gmfn = gmfn; 1.38 + fixups[free_slot].smfn = smfn; 1.39 + fixups[free_slot].off = off; 1.40 + perfc_incr(shadow_oos_fixup_add_ok); 1.41 + return; 1.42 + } 1.43 + 1.44 + 1.45 + perfc_incr(shadow_oos_fixup_add_fail); 1.46 +} 1.47 + 1.48 +void oos_fixup_remove(struct vcpu *v, mfn_t gmfn) 1.49 +{ 1.50 + int idx, i; 1.51 + struct domain *d = v->domain; 1.52 + 1.53 + perfc_incr(shadow_oos_fixup_remove); 1.54 + 1.55 + idx = mfn_x(gmfn) % SHADOW_OOS_FT_HASH; 1.56 + for_each_vcpu(d, v) 1.57 + { 1.58 + struct oos_fixup *fixups = v->arch.paging.shadow.oos_fixups; 1.59 + for ( i = 0; i < SHADOW_OOS_FT_ENTRIES; i++ ) 1.60 + if ( mfn_x(fixups[_FIXUP_IDX(idx, i)].gmfn) == mfn_x(gmfn) ) 1.61 + fixups[_FIXUP_IDX(idx, i)].gmfn = _mfn(INVALID_MFN); 1.62 + } 1.63 +} 1.64 + 1.65 +int oos_fixup_flush(struct vcpu *v) 1.66 +{ 1.67 + int i, rc = 0; 1.68 + struct oos_fixup *fixups = v->arch.paging.shadow.oos_fixups; 1.69 + 1.70 + perfc_incr(shadow_oos_fixup_flush); 1.71 + 1.72 + if ( !v->arch.paging.shadow.oos_fixup_used ) 1.73 + return 0; 1.74 + 1.75 + for ( i = 0; i < SHADOW_OOS_FT_HASH * SHADOW_OOS_FT_ENTRIES; i++ ) 1.76 + { 1.77 + if ( mfn_valid(fixups[i].gmfn) ) 1.78 + { 1.79 + if ( mfn_is_out_of_sync(fixups[i].gmfn) ) 1.80 + rc |= sh_remove_write_access_from_sl1p(v, fixups[i].gmfn, 1.81 + fixups[i].smfn, 1.82 + fixups[i].off); 1.83 + fixups[i].gmfn = _mfn(INVALID_MFN); 1.84 + } 1.85 + } 1.86 + 1.87 + v->arch.paging.shadow.oos_fixup_used = 0; 1.88 + 1.89 + return rc; 1.90 +} 1.91 + 1.92 +int oos_fixup_flush_gmfn(struct vcpu *v, mfn_t gmfn) 1.93 +{ 1.94 + int idx, i, rc = 0; 1.95 + struct domain *d = v->domain; 1.96 + 1.97 + perfc_incr(shadow_oos_fixup_flush_gmfn); 1.98 + 1.99 + idx = mfn_x(gmfn) % SHADOW_OOS_FT_HASH; 1.100 + for_each_vcpu(d, v) 1.101 + { 1.102 + struct oos_fixup *fixups = v->arch.paging.shadow.oos_fixups; 1.103 + 1.104 + for ( i = 0; i < SHADOW_OOS_FT_ENTRIES; i++ ) 1.105 + { 1.106 + if ( mfn_x(fixups[_FIXUP_IDX(idx, i)].gmfn) != mfn_x(gmfn) ) 1.107 + continue; 1.108 + 1.109 + rc |= sh_remove_write_access_from_sl1p(v, 1.110 + fixups[_FIXUP_IDX(idx,i)].gmfn, 1.111 + fixups[_FIXUP_IDX(idx,i)].smfn, 1.112 + fixups[_FIXUP_IDX(idx,i)].off); 1.113 + 1.114 + fixups[_FIXUP_IDX(idx,i)].gmfn = _mfn(INVALID_MFN); 1.115 + } 1.116 + } 1.117 + 1.118 + return rc; 1.119 +} 1.120 + 1.121 +static int oos_remove_write_access(struct vcpu *v, mfn_t gmfn, unsigned long va) 1.122 +{ 1.123 + int ftlb = 0; 1.124 + 1.125 + ftlb |= oos_fixup_flush_gmfn(v, gmfn); 1.126 + 1.127 + switch ( sh_remove_write_access(v, gmfn, 0, va) ) 1.128 + { 1.129 + default: 1.130 + case 0: 1.131 + break; 1.132 + 1.133 + case 1: 1.134 + ftlb |= 1; 1.135 + break; 1.136 + 1.137 + case -1: 1.138 + /* An unfindable writeable typecount has appeared, probably via a 1.139 + * grant table entry: can't shoot the mapping, so try to unshadow 1.140 + * the page. If that doesn't work either, the guest is granting 1.141 + * his pagetables and must be killed after all. 1.142 + * This will flush the tlb, so we can return with no worries. */ 1.143 + sh_remove_shadows(v, gmfn, 0 /* Be thorough */, 1 /* Must succeed */); 1.144 + return 1; 1.145 + } 1.146 + 1.147 + if ( ftlb ) 1.148 + flush_tlb_mask(v->domain->domain_dirty_cpumask); 1.149 + 1.150 + return 0; 1.151 +} 1.152 + 1.153 + 1.154 /* Pull all the entries on an out-of-sync page back into sync. */ 1.155 static void _sh_resync(struct vcpu *v, mfn_t gmfn, unsigned long va) 1.156 { 1.157 @@ -595,26 +742,10 @@ static void _sh_resync(struct vcpu *v, m 1.158 SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx, va=%lx\n", 1.159 v->domain->domain_id, v->vcpu_id, mfn_x(gmfn), va); 1.160 1.161 - /* Need to pull write access so the page *stays* in sync. 1.162 - * This might be rather slow but we hope that in the common case 1.163 - * we're handling this pagetable after a guest walk has pulled 1.164 - * write access the fast way. */ 1.165 - switch ( sh_remove_write_access(v, gmfn, 0, va) ) 1.166 + /* Need to pull write access so the page *stays* in sync. */ 1.167 + if ( oos_remove_write_access(v, gmfn, va) ) 1.168 { 1.169 - default: 1.170 - case 0: 1.171 - break; 1.172 - 1.173 - case 1: 1.174 - flush_tlb_mask(v->domain->domain_dirty_cpumask); 1.175 - break; 1.176 - 1.177 - case -1: 1.178 - /* An unfindable writeable typecount has appeared, probably via a 1.179 - * grant table entry: can't shoot the mapping, so try to unshadow 1.180 - * the page. If that doesn't work either, the guest is granting 1.181 - * his pagetables and must be killed after all. */ 1.182 - sh_remove_shadows(v, gmfn, 0 /* Be thorough */, 1 /* Must succeed */); 1.183 + /* Page has been unshadowed. */ 1.184 return; 1.185 } 1.186 1.187 @@ -753,6 +884,9 @@ void sh_resync_all(struct vcpu *v, int s 1.188 if ( do_locking ) 1.189 shadow_lock(v->domain); 1.190 1.191 + if ( oos_fixup_flush(v) ) 1.192 + flush_tlb_mask(v->domain->domain_dirty_cpumask); 1.193 + 1.194 /* First: resync all of this vcpu's oos pages */ 1.195 for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ ) 1.196 if ( mfn_valid(oos[idx]) ) 1.197 @@ -882,7 +1016,10 @@ void shadow_demote(struct vcpu *v, mfn_t 1.198 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 1.199 /* Was the page out of sync? */ 1.200 if ( page_is_out_of_sync(page) ) 1.201 + { 1.202 oos_hash_remove(v, gmfn); 1.203 + oos_fixup_remove(v, gmfn); 1.204 + } 1.205 #endif 1.206 clear_bit(_PGC_page_table, &page->count_info); 1.207 } 1.208 @@ -2224,7 +2361,10 @@ int sh_remove_write_access(struct vcpu * 1.209 #endif /* SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC */ 1.210 1.211 /* Brute-force search of all the shadows, by walking the hash */ 1.212 - perfc_incr(shadow_writeable_bf); 1.213 + if ( level == 0 ) 1.214 + perfc_incr(shadow_writeable_bf_1); 1.215 + else 1.216 + perfc_incr(shadow_writeable_bf); 1.217 hash_foreach(v, callback_mask, callbacks, gmfn); 1.218 1.219 /* If that didn't catch the mapping, then there's some non-pagetable 1.220 @@ -2244,7 +2384,34 @@ int sh_remove_write_access(struct vcpu * 1.221 return 1; 1.222 } 1.223 1.224 - 1.225 +#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 1.226 +int sh_remove_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn, 1.227 + mfn_t smfn, unsigned long off) 1.228 +{ 1.229 + struct shadow_page_info *sp = mfn_to_shadow_page(smfn); 1.230 + 1.231 + ASSERT(mfn_valid(smfn)); 1.232 + ASSERT(mfn_valid(gmfn)); 1.233 + 1.234 + if ( sp->type == SH_type_l1_32_shadow ) 1.235 + { 1.236 + return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,2) 1.237 + (v, gmfn, smfn, off); 1.238 + } 1.239 +#if CONFIG_PAGING_LEVELS >= 3 1.240 + else if ( sp->type == SH_type_l1_pae_shadow ) 1.241 + return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,3) 1.242 + (v, gmfn, smfn, off); 1.243 +#if CONFIG_PAGING_LEVELS >= 4 1.244 + else if ( sp->type == SH_type_l1_64_shadow ) 1.245 + return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,4) 1.246 + (v, gmfn, smfn, off); 1.247 +#endif 1.248 +#endif 1.249 + 1.250 + return 0; 1.251 +} 1.252 +#endif 1.253 1.254 /**************************************************************************/ 1.255 /* Remove all mappings of a guest frame from the shadow tables. 1.256 @@ -2581,6 +2748,25 @@ static void sh_update_paging_modes(struc 1.257 } 1.258 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */ 1.259 1.260 +#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 1.261 + if ( v->arch.paging.shadow.oos_fixups == NULL ) 1.262 + { 1.263 + int i; 1.264 + v->arch.paging.shadow.oos_fixups = 1.265 + alloc_xenheap_pages(SHADOW_OOS_FT_ORDER); 1.266 + if ( v->arch.paging.shadow.oos_fixups == NULL ) 1.267 + { 1.268 + SHADOW_ERROR("Could not allocate OOS fixup table" 1.269 + " for dom %u vcpu %u\n", 1.270 + v->domain->domain_id, v->vcpu_id); 1.271 + domain_crash(v->domain); 1.272 + return; 1.273 + } 1.274 + for ( i = 0; i < SHADOW_OOS_FT_HASH * SHADOW_OOS_FT_ENTRIES; i++ ) 1.275 + v->arch.paging.shadow.oos_fixups[i].gmfn = _mfn(INVALID_MFN); 1.276 + } 1.277 +#endif /* OOS */ 1.278 + 1.279 // Valid transitions handled by this function: 1.280 // - For PV guests: 1.281 // - after a shadow mode has been changed 1.282 @@ -2908,17 +3094,27 @@ void shadow_teardown(struct domain *d) 1.283 } 1.284 } 1.285 1.286 -#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) 1.287 +#if (SHADOW_OPTIMIZATIONS & (SHOPT_VIRTUAL_TLB|SHOPT_OUT_OF_SYNC)) 1.288 /* Free the virtual-TLB array attached to each vcpu */ 1.289 for_each_vcpu(d, v) 1.290 { 1.291 +#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) 1.292 if ( v->arch.paging.vtlb ) 1.293 { 1.294 xfree(v->arch.paging.vtlb); 1.295 v->arch.paging.vtlb = NULL; 1.296 } 1.297 +#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */ 1.298 + 1.299 +#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 1.300 + if ( v->arch.paging.shadow.oos_fixups ) 1.301 + { 1.302 + free_xenheap_pages(v->arch.paging.shadow.oos_fixups, 1.303 + SHADOW_OOS_FT_ORDER); 1.304 + } 1.305 +#endif /* OOS */ 1.306 } 1.307 -#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */ 1.308 +#endif /* (SHADOW_OPTIMIZATIONS & (SHOPT_VIRTUAL_TLB|SHOPT_OUT_OF_SYNC)) */ 1.309 1.310 list_for_each_safe(entry, n, &d->arch.paging.shadow.p2m_freelist) 1.311 {
2.1 --- a/xen/arch/x86/mm/shadow/multi.c Fri Jun 20 18:39:45 2008 +0100 2.2 +++ b/xen/arch/x86/mm/shadow/multi.c Fri Jun 20 18:40:32 2008 +0100 2.3 @@ -1409,6 +1409,9 @@ static int shadow_set_l1e(struct vcpu *v 2.4 int flags = 0; 2.5 struct domain *d = v->domain; 2.6 shadow_l1e_t old_sl1e; 2.7 +#if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC 2.8 + mfn_t new_gmfn = shadow_l1e_get_mfn(new_sl1e); 2.9 +#endif 2.10 ASSERT(sl1e != NULL); 2.11 2.12 old_sl1e = *sl1e; 2.13 @@ -1425,8 +1428,18 @@ static int shadow_set_l1e(struct vcpu *v 2.14 /* Doesn't look like a pagetable. */ 2.15 flags |= SHADOW_SET_ERROR; 2.16 new_sl1e = shadow_l1e_empty(); 2.17 - } else { 2.18 + } 2.19 + else 2.20 + { 2.21 shadow_vram_get_l1e(new_sl1e, sl1e, sl1mfn, d); 2.22 +#if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC 2.23 + if ( mfn_valid(new_gmfn) && mfn_oos_may_write(new_gmfn) 2.24 + && (shadow_l1e_get_flags(new_sl1e) & _PAGE_RW) ) 2.25 + { 2.26 + oos_fixup_add(v, new_gmfn, sl1mfn, pgentry_ptr_to_slot(sl1e)); 2.27 + } 2.28 +#endif 2.29 + 2.30 } 2.31 } 2.32 } 2.33 @@ -4238,6 +4251,56 @@ sh_update_cr3(struct vcpu *v, int do_loc 2.34 /**************************************************************************/ 2.35 /* Functions to revoke guest rights */ 2.36 2.37 +#if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC 2.38 +int sh_rm_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn, 2.39 + mfn_t smfn, unsigned long off) 2.40 +{ 2.41 + int r; 2.42 + shadow_l1e_t *sl1p, sl1e; 2.43 + struct shadow_page_info *sp; 2.44 + 2.45 + ASSERT(mfn_valid(gmfn)); 2.46 + ASSERT(mfn_valid(smfn)); 2.47 + 2.48 + sp = mfn_to_shadow_page(smfn); 2.49 + 2.50 + if ( sp->mbz != 0 || 2.51 +#if GUEST_PAGING_LEVELS == 4 2.52 + (sp->type != SH_type_l1_64_shadow) 2.53 +#elif GUEST_PAGING_LEVELS == 3 2.54 + (sp->type != SH_type_l1_pae_shadow) 2.55 +#elif GUEST_PAGING_LEVELS == 2 2.56 + (sp->type != SH_type_l1_32_shadow) 2.57 +#endif 2.58 + ) 2.59 + goto fail; 2.60 + 2.61 + sl1p = sh_map_domain_page(smfn); 2.62 + sl1p += off; 2.63 + sl1e = *sl1p; 2.64 + if ( ((shadow_l1e_get_flags(sl1e) & (_PAGE_PRESENT|_PAGE_RW)) 2.65 + != (_PAGE_PRESENT|_PAGE_RW)) 2.66 + || (mfn_x(shadow_l1e_get_mfn(sl1e)) != mfn_x(gmfn)) ) 2.67 + { 2.68 + sh_unmap_domain_page(sl1p); 2.69 + goto fail; 2.70 + } 2.71 + 2.72 + /* Found it! Need to remove its write permissions. */ 2.73 + sl1e = shadow_l1e_remove_flags(sl1e, _PAGE_RW); 2.74 + r = shadow_set_l1e(v, sl1p, sl1e, smfn); 2.75 + ASSERT( !(r & SHADOW_SET_ERROR) ); 2.76 + 2.77 + sh_unmap_domain_page(sl1p); 2.78 + perfc_incr(shadow_writeable_h_7); 2.79 + return 1; 2.80 + 2.81 + fail: 2.82 + perfc_incr(shadow_writeable_h_8); 2.83 + return 0; 2.84 +} 2.85 +#endif /* OOS */ 2.86 + 2.87 #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC 2.88 static int sh_guess_wrmap(struct vcpu *v, unsigned long vaddr, mfn_t gmfn) 2.89 /* Look up this vaddr in the current shadow and see if it's a writeable
3.1 --- a/xen/arch/x86/mm/shadow/multi.h Fri Jun 20 18:39:45 2008 +0100 3.2 +++ b/xen/arch/x86/mm/shadow/multi.h Fri Jun 20 18:40:32 2008 +0100 3.3 @@ -124,4 +124,8 @@ SHADOW_INTERNAL_NAME(sh_resync_l1, GUEST 3.4 extern int 3.5 SHADOW_INTERNAL_NAME(sh_safe_not_to_sync, GUEST_LEVELS) 3.6 (struct vcpu*v, mfn_t gmfn); 3.7 + 3.8 +extern int 3.9 +SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p, GUEST_LEVELS) 3.10 + (struct vcpu *v, mfn_t gmfn, mfn_t smfn, unsigned long off); 3.11 #endif
4.1 --- a/xen/arch/x86/mm/shadow/private.h Fri Jun 20 18:39:45 2008 +0100 4.2 +++ b/xen/arch/x86/mm/shadow/private.h Fri Jun 20 18:40:32 2008 +0100 4.3 @@ -321,6 +321,16 @@ static inline int sh_type_is_pinnable(st 4.4 */ 4.5 #define SHF_out_of_sync (1u<<30) 4.6 #define SHF_oos_may_write (1u<<29) 4.7 + 4.8 +/* Fixup tables are a non-complete writable-mappings reverse map for 4.9 + OOS pages. This let us quickly resync pages (avoiding brute-force 4.10 + search of the shadows) when the va hint is not sufficient (i.e., 4.11 + the pagetable is mapped in multiple places and in multiple 4.12 + shadows.) */ 4.13 +#define SHADOW_OOS_FT_ENTRIES \ 4.14 + ((PAGE_SIZE << SHADOW_OOS_FT_ORDER) \ 4.15 + / (SHADOW_OOS_FT_HASH * sizeof(struct oos_fixup))) 4.16 + 4.17 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */ 4.18 4.19 static inline int sh_page_has_multiple_shadows(struct page_info *pg) 4.20 @@ -415,6 +425,11 @@ int sh_unsync(struct vcpu *v, mfn_t gmfn 4.21 /* Pull an out-of-sync page back into sync. */ 4.22 void sh_resync(struct vcpu *v, mfn_t gmfn); 4.23 4.24 +void oos_fixup_add(struct vcpu *v, mfn_t gmfn, mfn_t smfn, unsigned long off); 4.25 + 4.26 +int sh_remove_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn, 4.27 + mfn_t smfn, unsigned long offset); 4.28 + 4.29 /* Pull all out-of-sync shadows back into sync. If skip != 0, we try 4.30 * to avoid resyncing where we think we can get away with it. */ 4.31
5.1 --- a/xen/arch/x86/mm/shadow/types.h Fri Jun 20 18:39:45 2008 +0100 5.2 +++ b/xen/arch/x86/mm/shadow/types.h Fri Jun 20 18:40:32 2008 +0100 5.3 @@ -441,6 +441,7 @@ struct shadow_walk_t 5.4 #if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC 5.5 #define sh_resync_l1 INTERNAL_NAME(sh_resync_l1) 5.6 #define sh_safe_not_to_sync INTERNAL_NAME(sh_safe_not_to_sync) 5.7 +#define sh_rm_write_access_from_sl1p INTERNAL_NAME(sh_rm_write_access_from_sl1p) 5.8 #endif 5.9 5.10 /* The sh_guest_(map|get)_* functions depends on Xen's paging levels */
6.1 --- a/xen/include/asm-x86/domain.h Fri Jun 20 18:39:45 2008 +0100 6.2 +++ b/xen/include/asm-x86/domain.h Fri Jun 20 18:40:32 2008 +0100 6.3 @@ -129,6 +129,12 @@ struct shadow_vcpu { 6.4 /* Shadow out-of-sync: pages that this vcpu has let go out of sync */ 6.5 mfn_t oos[SHADOW_OOS_PAGES]; 6.6 unsigned long oos_va[SHADOW_OOS_PAGES]; 6.7 + struct oos_fixup { 6.8 + mfn_t gmfn; 6.9 + mfn_t smfn; 6.10 + unsigned long off; 6.11 + } *oos_fixups; 6.12 + int oos_fixup_used; 6.13 }; 6.14 6.15 /************************************************/
7.1 --- a/xen/include/asm-x86/mm.h Fri Jun 20 18:39:45 2008 +0100 7.2 +++ b/xen/include/asm-x86/mm.h Fri Jun 20 18:40:32 2008 +0100 7.3 @@ -131,7 +131,12 @@ static inline u32 pickle_domptr(struct d 7.4 #define SHADOW_MAX_ORDER 2 /* Need up to 16k allocs for 32-bit on PAE/64 */ 7.5 7.6 /* The number of out-of-sync shadows we allow per vcpu (prime, please) */ 7.7 -#define SHADOW_OOS_PAGES 7 7.8 +#define SHADOW_OOS_PAGES 3 7.9 + 7.10 +/* The order OOS fixup tables per vcpu */ 7.11 +#define SHADOW_OOS_FT_ORDER 1 7.12 +/* OOS fixup tables hash entries */ 7.13 +#define SHADOW_OOS_FT_HASH 13 7.14 7.15 #define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain)) 7.16 #define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
8.1 --- a/xen/include/asm-x86/perfc_defn.h Fri Jun 20 18:39:45 2008 +0100 8.2 +++ b/xen/include/asm-x86/perfc_defn.h Fri Jun 20 18:40:32 2008 +0100 8.3 @@ -81,7 +81,10 @@ PERFCOUNTER(shadow_writeable_h_3, "shad 8.4 PERFCOUNTER(shadow_writeable_h_4, "shadow writeable: linux low/solaris") 8.5 PERFCOUNTER(shadow_writeable_h_5, "shadow writeable: linux high") 8.6 PERFCOUNTER(shadow_writeable_h_6, "shadow writeable: unsync va") 8.7 +PERFCOUNTER(shadow_writeable_h_7, "shadow writeable: sl1p") 8.8 +PERFCOUNTER(shadow_writeable_h_8, "shadow writeable: sl1p failed") 8.9 PERFCOUNTER(shadow_writeable_bf, "shadow writeable brute-force") 8.10 +PERFCOUNTER(shadow_writeable_bf_1, "shadow writeable resync bf") 8.11 PERFCOUNTER(shadow_mappings, "shadow removes all mappings") 8.12 PERFCOUNTER(shadow_mappings_bf, "shadow rm-mappings brute-force") 8.13 PERFCOUNTER(shadow_early_unshadow, "shadow unshadows for fork/exit") 8.14 @@ -102,6 +105,13 @@ PERFCOUNTER(shadow_em_ex_pt, "shad 8.15 PERFCOUNTER(shadow_em_ex_non_pt, "shadow extra non-pt-write op") 8.16 PERFCOUNTER(shadow_em_ex_fail, "shadow extra emulation failed") 8.17 8.18 +PERFCOUNTER(shadow_oos_fixup_add_ok, "shadow OOS fixups adds") 8.19 +PERFCOUNTER(shadow_oos_fixup_no_add, "shadow OOS fixups no adds") 8.20 +PERFCOUNTER(shadow_oos_fixup_add_fail, "shadow OOS fixups adds failed") 8.21 +PERFCOUNTER(shadow_oos_fixup_remove, "shadow OOS fixups removes") 8.22 +PERFCOUNTER(shadow_oos_fixup_flush, "shadow OOS fixups flushes") 8.23 +PERFCOUNTER(shadow_oos_fixup_flush_gmfn,"shadow OOS fixups gmfn flushes") 8.24 + 8.25 PERFCOUNTER(shadow_unsync, "shadow OOS unsyncs") 8.26 PERFCOUNTER(shadow_unsync_evict, "shadow OOS evictions") 8.27 PERFCOUNTER(shadow_resync, "shadow OOS resyncs")