debuggers.hg
changeset 17929:7b66b4e9f743
Out-of-sync L1 shadows: OOS snapshot.
Make snapshots of guest pages on unsync to allow faster revalidation
of OOS pages.
Signed-off-by: Gianluca Guida <gianluca.guida@eu.citrix.com>
Make snapshots of guest pages on unsync to allow faster revalidation
of OOS pages.
Signed-off-by: Gianluca Guida <gianluca.guida@eu.citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Jun 20 18:41:09 2008 +0100 (2008-06-20) |
parents | f178082cce0a |
children | 6563ea38e002 |
files | xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/multi.h xen/arch/x86/mm/shadow/private.h xen/include/asm-x86/domain.h |
line diff
1.1 --- a/xen/arch/x86/mm/shadow/common.c Fri Jun 20 18:40:32 2008 +0100 1.2 +++ b/xen/arch/x86/mm/shadow/common.c Fri Jun 20 18:41:09 2008 +0100 1.3 @@ -72,7 +72,10 @@ void shadow_vcpu_init(struct vcpu *v) 1.4 int i; 1.5 1.6 for ( i = 0; i < SHADOW_OOS_PAGES; i++ ) 1.7 + { 1.8 v->arch.paging.shadow.oos[i] = _mfn(INVALID_MFN); 1.9 + v->arch.paging.shadow.oos_snapshot[i] = _mfn(INVALID_MFN); 1.10 + } 1.11 #endif 1.12 1.13 v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3); 1.14 @@ -562,7 +565,7 @@ void oos_audit_hash_is_present(struct do 1.15 #endif 1.16 1.17 /* Update the shadow, but keep the page out of sync. */ 1.18 -static inline void _sh_resync_l1(struct vcpu *v, mfn_t gmfn) 1.19 +static inline void _sh_resync_l1(struct vcpu *v, mfn_t gmfn, mfn_t snpmfn) 1.20 { 1.21 struct page_info *pg = mfn_to_page(gmfn); 1.22 1.23 @@ -571,12 +574,12 @@ static inline void _sh_resync_l1(struct 1.24 1.25 /* Call out to the appropriate per-mode resyncing function */ 1.26 if ( pg->shadow_flags & SHF_L1_32 ) 1.27 - SHADOW_INTERNAL_NAME(sh_resync_l1, 2)(v, gmfn); 1.28 + SHADOW_INTERNAL_NAME(sh_resync_l1, 2)(v, gmfn, snpmfn); 1.29 else if ( pg->shadow_flags & SHF_L1_PAE ) 1.30 - SHADOW_INTERNAL_NAME(sh_resync_l1, 3)(v, gmfn); 1.31 + SHADOW_INTERNAL_NAME(sh_resync_l1, 3)(v, gmfn, snpmfn); 1.32 #if CONFIG_PAGING_LEVELS >= 4 1.33 else if ( pg->shadow_flags & SHF_L1_64 ) 1.34 - SHADOW_INTERNAL_NAME(sh_resync_l1, 4)(v, gmfn); 1.35 + SHADOW_INTERNAL_NAME(sh_resync_l1, 4)(v, gmfn, snpmfn); 1.36 #endif 1.37 } 1.38 1.39 @@ -728,7 +731,7 @@ static int oos_remove_write_access(struc 1.40 1.41 1.42 /* Pull all the entries on an out-of-sync page back into sync. */ 1.43 -static void _sh_resync(struct vcpu *v, mfn_t gmfn, unsigned long va) 1.44 +static void _sh_resync(struct vcpu *v, mfn_t gmfn, unsigned long va, mfn_t snp) 1.45 { 1.46 struct page_info *pg = mfn_to_page(gmfn); 1.47 1.48 @@ -753,7 +756,7 @@ static void _sh_resync(struct vcpu *v, m 1.49 pg->shadow_flags &= ~SHF_oos_may_write; 1.50 1.51 /* Update the shadows with current guest entries. */ 1.52 - _sh_resync_l1(v, gmfn); 1.53 + _sh_resync_l1(v, gmfn, snp); 1.54 1.55 /* Now we know all the entries are synced, and will stay that way */ 1.56 pg->shadow_flags &= ~SHF_out_of_sync; 1.57 @@ -764,27 +767,41 @@ static void _sh_resync(struct vcpu *v, m 1.58 /* Add an MFN to the list of out-of-sync guest pagetables */ 1.59 static void oos_hash_add(struct vcpu *v, mfn_t gmfn, unsigned long va) 1.60 { 1.61 - int idx; 1.62 + int idx, oidx, swap = 0; 1.63 + void *gptr, *gsnpptr; 1.64 mfn_t *oos = v->arch.paging.shadow.oos; 1.65 unsigned long *oos_va = v->arch.paging.shadow.oos_va; 1.66 + mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot; 1.67 1.68 idx = mfn_x(gmfn) % SHADOW_OOS_PAGES; 1.69 + oidx = idx; 1.70 + 1.71 if ( mfn_valid(oos[idx]) 1.72 && (mfn_x(oos[idx]) % SHADOW_OOS_PAGES) == idx ) 1.73 { 1.74 /* Punt the current occupant into the next slot */ 1.75 SWAP(oos[idx], gmfn); 1.76 SWAP(oos_va[idx], va); 1.77 + swap = 1; 1.78 idx = (idx + 1) % SHADOW_OOS_PAGES; 1.79 } 1.80 if ( mfn_valid(oos[idx]) ) 1.81 { 1.82 /* Crush the current occupant. */ 1.83 - _sh_resync(v, oos[idx], oos_va[idx]); 1.84 + _sh_resync(v, oos[idx], oos_va[idx], oos_snapshot[idx]); 1.85 perfc_incr(shadow_unsync_evict); 1.86 } 1.87 oos[idx] = gmfn; 1.88 oos_va[idx] = va; 1.89 + 1.90 + if ( swap ) 1.91 + SWAP(oos_snapshot[idx], oos_snapshot[oidx]); 1.92 + 1.93 + gptr = sh_map_domain_page(oos[oidx]); 1.94 + gsnpptr = sh_map_domain_page(oos_snapshot[oidx]); 1.95 + memcpy(gsnpptr, gptr, PAGE_SIZE); 1.96 + sh_unmap_domain_page(gptr); 1.97 + sh_unmap_domain_page(gsnpptr); 1.98 } 1.99 1.100 /* Remove an MFN from the list of out-of-sync guest pagetables */ 1.101 @@ -814,25 +831,52 @@ static void oos_hash_remove(struct vcpu 1.102 BUG(); 1.103 } 1.104 1.105 +mfn_t oos_snapshot_lookup(struct vcpu *v, mfn_t gmfn) 1.106 +{ 1.107 + int idx; 1.108 + mfn_t *oos; 1.109 + mfn_t *oos_snapshot; 1.110 + struct domain *d = v->domain; 1.111 + 1.112 + for_each_vcpu(d, v) 1.113 + { 1.114 + oos = v->arch.paging.shadow.oos; 1.115 + oos_snapshot = v->arch.paging.shadow.oos_snapshot; 1.116 + idx = mfn_x(gmfn) % SHADOW_OOS_PAGES; 1.117 + if ( mfn_x(oos[idx]) != mfn_x(gmfn) ) 1.118 + idx = (idx + 1) % SHADOW_OOS_PAGES; 1.119 + if ( mfn_x(oos[idx]) == mfn_x(gmfn) ) 1.120 + { 1.121 + return oos_snapshot[idx]; 1.122 + } 1.123 + } 1.124 + 1.125 + SHADOW_ERROR("gmfn %lx was OOS but not in hash table\n", mfn_x(gmfn)); 1.126 + BUG(); 1.127 + return _mfn(INVALID_MFN); 1.128 +} 1.129 + 1.130 /* Pull a single guest page back into sync */ 1.131 void sh_resync(struct vcpu *v, mfn_t gmfn) 1.132 { 1.133 int idx; 1.134 mfn_t *oos; 1.135 unsigned long *oos_va; 1.136 + mfn_t *oos_snapshot; 1.137 struct domain *d = v->domain; 1.138 1.139 for_each_vcpu(d, v) 1.140 { 1.141 oos = v->arch.paging.shadow.oos; 1.142 oos_va = v->arch.paging.shadow.oos_va; 1.143 + oos_snapshot = v->arch.paging.shadow.oos_snapshot; 1.144 idx = mfn_x(gmfn) % SHADOW_OOS_PAGES; 1.145 if ( mfn_x(oos[idx]) != mfn_x(gmfn) ) 1.146 idx = (idx + 1) % SHADOW_OOS_PAGES; 1.147 1.148 if ( mfn_x(oos[idx]) == mfn_x(gmfn) ) 1.149 { 1.150 - _sh_resync(v, gmfn, oos_va[idx]); 1.151 + _sh_resync(v, gmfn, oos_va[idx], oos_snapshot[idx]); 1.152 oos[idx] = _mfn(INVALID_MFN); 1.153 return; 1.154 } 1.155 @@ -873,6 +917,7 @@ void sh_resync_all(struct vcpu *v, int s 1.156 struct vcpu *other; 1.157 mfn_t *oos = v->arch.paging.shadow.oos; 1.158 unsigned long *oos_va = v->arch.paging.shadow.oos_va; 1.159 + mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot; 1.160 1.161 SHADOW_PRINTK("d=%d, v=%d\n", v->domain->domain_id, v->vcpu_id); 1.162 1.163 @@ -892,7 +937,7 @@ void sh_resync_all(struct vcpu *v, int s 1.164 if ( mfn_valid(oos[idx]) ) 1.165 { 1.166 /* Write-protect and sync contents */ 1.167 - _sh_resync(v, oos[idx], oos_va[idx]); 1.168 + _sh_resync(v, oos[idx], oos_va[idx], oos_snapshot[idx]); 1.169 oos[idx] = _mfn(INVALID_MFN); 1.170 } 1.171 1.172 @@ -914,7 +959,7 @@ void sh_resync_all(struct vcpu *v, int s 1.173 1.174 oos = other->arch.paging.shadow.oos; 1.175 oos_va = other->arch.paging.shadow.oos_va; 1.176 - 1.177 + oos_snapshot = other->arch.paging.shadow.oos_snapshot; 1.178 for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ ) 1.179 { 1.180 if ( !mfn_valid(oos[idx]) ) 1.181 @@ -925,12 +970,12 @@ void sh_resync_all(struct vcpu *v, int s 1.182 /* Update the shadows and leave the page OOS. */ 1.183 if ( sh_skip_sync(v, oos[idx]) ) 1.184 continue; 1.185 - _sh_resync_l1(other, oos[idx]); 1.186 + _sh_resync_l1(other, oos[idx], oos_snapshot[idx]); 1.187 } 1.188 else 1.189 { 1.190 /* Write-protect and sync contents */ 1.191 - _sh_resync(other, oos[idx], oos_va[idx]); 1.192 + _sh_resync(other, oos[idx], oos_va[idx], oos_snapshot[idx]); 1.193 oos[idx] = _mfn(INVALID_MFN); 1.194 } 1.195 } 1.196 @@ -1233,7 +1278,8 @@ shadow_order(unsigned int shadow_type) 1.197 0, /* SH_type_l3_64_shadow */ 1.198 0, /* SH_type_l4_64_shadow */ 1.199 2, /* SH_type_p2m_table */ 1.200 - 0 /* SH_type_monitor_table */ 1.201 + 0, /* SH_type_monitor_table */ 1.202 + 0 /* SH_type_oos_snapshot */ 1.203 }; 1.204 ASSERT(shadow_type < SH_type_unused); 1.205 return type_to_order[shadow_type]; 1.206 @@ -2765,6 +2811,17 @@ static void sh_update_paging_modes(struc 1.207 for ( i = 0; i < SHADOW_OOS_FT_HASH * SHADOW_OOS_FT_ENTRIES; i++ ) 1.208 v->arch.paging.shadow.oos_fixups[i].gmfn = _mfn(INVALID_MFN); 1.209 } 1.210 + 1.211 + if ( mfn_x(v->arch.paging.shadow.oos_snapshot[0]) == INVALID_MFN ) 1.212 + { 1.213 + int i; 1.214 + for(i = 0; i < SHADOW_OOS_PAGES; i++) 1.215 + { 1.216 + shadow_prealloc(d, SH_type_oos_snapshot, 1); 1.217 + v->arch.paging.shadow.oos_snapshot[i] = 1.218 + shadow_alloc(d, SH_type_oos_snapshot, 0); 1.219 + } 1.220 + } 1.221 #endif /* OOS */ 1.222 1.223 // Valid transitions handled by this function: 1.224 @@ -3112,6 +3169,14 @@ void shadow_teardown(struct domain *d) 1.225 free_xenheap_pages(v->arch.paging.shadow.oos_fixups, 1.226 SHADOW_OOS_FT_ORDER); 1.227 } 1.228 + 1.229 + { 1.230 + int i; 1.231 + mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot; 1.232 + for(i = 0; i < SHADOW_OOS_PAGES; i++) 1.233 + if ( mfn_valid(oos_snapshot[i]) ) 1.234 + shadow_free(d, oos_snapshot[i]); 1.235 + } 1.236 #endif /* OOS */ 1.237 } 1.238 #endif /* (SHADOW_OPTIMIZATIONS & (SHOPT_VIRTUAL_TLB|SHOPT_OUT_OF_SYNC)) */
2.1 --- a/xen/arch/x86/mm/shadow/multi.c Fri Jun 20 18:40:32 2008 +0100 2.2 +++ b/xen/arch/x86/mm/shadow/multi.c Fri Jun 20 18:41:09 2008 +0100 2.3 @@ -2607,6 +2607,9 @@ static int validate_gl1e(struct vcpu *v, 2.4 mfn_t gmfn; 2.5 p2m_type_t p2mt; 2.6 int result = 0; 2.7 +#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 2.8 + mfn_t gl1mfn; 2.9 +#endif /* OOS */ 2.10 2.11 perfc_incr(shadow_validate_gl1e_calls); 2.12 2.13 @@ -2614,8 +2617,25 @@ static int validate_gl1e(struct vcpu *v, 2.14 gmfn = gfn_to_mfn(v->domain, gfn, &p2mt); 2.15 2.16 l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt); 2.17 - 2.18 result |= shadow_set_l1e(v, sl1p, new_sl1e, sl1mfn); 2.19 + 2.20 +#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 2.21 + gl1mfn = _mfn(mfn_to_shadow_page(sl1mfn)->backpointer); 2.22 + if ( mfn_valid(gl1mfn) 2.23 + && mfn_is_out_of_sync(gl1mfn) ) 2.24 + { 2.25 + /* Update the OOS snapshot. */ 2.26 + mfn_t snpmfn = oos_snapshot_lookup(v, gl1mfn); 2.27 + guest_l1e_t *snp; 2.28 + 2.29 + ASSERT(mfn_valid(snpmfn)); 2.30 + 2.31 + snp = sh_map_domain_page(snpmfn); 2.32 + snp[guest_index(new_ge)] = new_gl1e; 2.33 + sh_unmap_domain_page(snp); 2.34 + } 2.35 +#endif /* OOS */ 2.36 + 2.37 return result; 2.38 } 2.39 2.40 @@ -2626,24 +2646,44 @@ static int validate_gl1e(struct vcpu *v, 2.41 * revalidates the guest entry that corresponds to it. 2.42 * N.B. This function is called with the vcpu that unsynced the page, 2.43 * *not* the one that is causing it to be resynced. */ 2.44 -void sh_resync_l1(struct vcpu *v, mfn_t gmfn) 2.45 +void sh_resync_l1(struct vcpu *v, mfn_t gl1mfn, mfn_t snpmfn) 2.46 { 2.47 mfn_t sl1mfn; 2.48 shadow_l1e_t *sl1p; 2.49 - guest_l1e_t *gl1p, *gp; 2.50 + guest_l1e_t *gl1p, *gp, *snp; 2.51 int rc = 0; 2.52 2.53 - sl1mfn = get_shadow_status(v, gmfn, SH_type_l1_shadow); 2.54 + ASSERT(mfn_valid(snpmfn)); 2.55 + 2.56 + sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow); 2.57 ASSERT(mfn_valid(sl1mfn)); /* Otherwise we would not have been called */ 2.58 2.59 - gp = sh_map_domain_page(gmfn); 2.60 + snp = sh_map_domain_page(snpmfn); 2.61 + gp = sh_map_domain_page(gl1mfn); 2.62 gl1p = gp; 2.63 2.64 - SHADOW_FOREACH_L1E(sl1mfn, sl1p, &gl1p, 0, { 2.65 - rc |= validate_gl1e(v, gl1p, sl1mfn, sl1p); 2.66 + SHADOW_FOREACH_L1E(sl1mfn, sl1p, &gl1p, 0, { 2.67 + guest_l1e_t gl1e = *gl1p; 2.68 + guest_l1e_t *snpl1p = (guest_l1e_t *)snp + guest_index(gl1p); 2.69 + 2.70 + if ( memcmp(snpl1p, &gl1e, sizeof(gl1e)) ) 2.71 + { 2.72 + gfn_t gfn; 2.73 + mfn_t gmfn; 2.74 + p2m_type_t p2mt; 2.75 + shadow_l1e_t nsl1e; 2.76 + 2.77 + gfn = guest_l1e_get_gfn(gl1e); 2.78 + gmfn = gfn_to_mfn(v->domain, gfn, &p2mt); 2.79 + l1e_propagate_from_guest(v, gl1e, gmfn, &nsl1e, ft_prefetch, p2mt); 2.80 + rc |= shadow_set_l1e(v, sl1p, nsl1e, sl1mfn); 2.81 + 2.82 + *snpl1p = gl1e; 2.83 + } 2.84 }); 2.85 2.86 sh_unmap_domain_page(gp); 2.87 + sh_unmap_domain_page(snp); 2.88 2.89 /* Setting shadow L1 entries should never need us to flush the TLB */ 2.90 ASSERT(!(rc & SHADOW_SET_FLUSH)); 2.91 @@ -2891,6 +2931,10 @@ static void sh_prefetch(struct vcpu *v, 2.92 shadow_l1e_t sl1e; 2.93 u32 gflags; 2.94 p2m_type_t p2mt; 2.95 +#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 2.96 + guest_l1e_t *snpl1p = NULL; 2.97 +#endif /* OOS */ 2.98 + 2.99 2.100 /* Prefetch no further than the end of the _shadow_ l1 MFN */ 2.101 dist = (PAGE_SIZE - ((unsigned long)ptr_sl1e & ~PAGE_MASK)) / sizeof sl1e; 2.102 @@ -2903,6 +2947,17 @@ static void sh_prefetch(struct vcpu *v, 2.103 /* Normal guest page; grab the next guest entry */ 2.104 gl1p = sh_map_domain_page(gw->l1mfn); 2.105 gl1p += guest_l1_table_offset(gw->va); 2.106 + 2.107 +#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 2.108 + if ( mfn_is_out_of_sync(gw->l1mfn) ) 2.109 + { 2.110 + mfn_t snpmfn = oos_snapshot_lookup(v, gw->l1mfn); 2.111 + 2.112 + ASSERT(mfn_valid(snpmfn)); 2.113 + snpl1p = sh_map_domain_page(snpmfn); 2.114 + snpl1p += guest_l1_table_offset(gw->va); 2.115 + } 2.116 +#endif /* OOS */ 2.117 } 2.118 2.119 for ( i = 1; i < dist ; i++ ) 2.120 @@ -2940,9 +2995,18 @@ static void sh_prefetch(struct vcpu *v, 2.121 /* Propagate the entry. */ 2.122 l1e_propagate_from_guest(v, gl1e, gmfn, &sl1e, ft_prefetch, p2mt); 2.123 (void) shadow_set_l1e(v, ptr_sl1e + i, sl1e, sl1mfn); 2.124 + 2.125 +#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 2.126 + if ( snpl1p != NULL ) 2.127 + snpl1p[i] = gl1e; 2.128 +#endif /* OOS */ 2.129 } 2.130 if ( gl1p != NULL ) 2.131 sh_unmap_domain_page(gl1p); 2.132 +#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 2.133 + if ( snpl1p != NULL ) 2.134 + sh_unmap_domain_page(snpl1p); 2.135 +#endif /* OOS */ 2.136 } 2.137 2.138 #endif /* SHADOW_OPTIMIZATIONS & SHOPT_PREFETCH */ 2.139 @@ -3228,6 +3292,22 @@ static int sh_page_fault(struct vcpu *v, 2.140 l1e_propagate_from_guest(v, gw.l1e, gmfn, &sl1e, ft, p2mt); 2.141 r = shadow_set_l1e(v, ptr_sl1e, sl1e, sl1mfn); 2.142 2.143 +#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 2.144 + if ( mfn_valid(gw.l1mfn) 2.145 + && mfn_is_out_of_sync(gw.l1mfn) ) 2.146 + { 2.147 + /* Update the OOS snapshot. */ 2.148 + mfn_t snpmfn = oos_snapshot_lookup(v, gw.l1mfn); 2.149 + guest_l1e_t *snp; 2.150 + 2.151 + ASSERT(mfn_valid(snpmfn)); 2.152 + 2.153 + snp = sh_map_domain_page(snpmfn); 2.154 + snp[guest_l1_table_offset(va)] = gw.l1e; 2.155 + sh_unmap_domain_page(snp); 2.156 + } 2.157 +#endif /* OOS */ 2.158 + 2.159 #if SHADOW_OPTIMIZATIONS & SHOPT_PREFETCH 2.160 /* Prefetch some more shadow entries */ 2.161 sh_prefetch(v, &gw, ptr_sl1e, sl1mfn);
3.1 --- a/xen/arch/x86/mm/shadow/multi.h Fri Jun 20 18:40:32 2008 +0100 3.2 +++ b/xen/arch/x86/mm/shadow/multi.h Fri Jun 20 18:41:09 2008 +0100 3.3 @@ -119,7 +119,7 @@ SHADOW_INTERNAL_NAME(sh_paging_mode, GUE 3.4 #if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC 3.5 extern void 3.6 SHADOW_INTERNAL_NAME(sh_resync_l1, GUEST_LEVELS) 3.7 - (struct vcpu *v, mfn_t gmfn); 3.8 + (struct vcpu *v, mfn_t gmfn, mfn_t snpmfn); 3.9 3.10 extern int 3.11 SHADOW_INTERNAL_NAME(sh_safe_not_to_sync, GUEST_LEVELS)
4.1 --- a/xen/arch/x86/mm/shadow/private.h Fri Jun 20 18:40:32 2008 +0100 4.2 +++ b/xen/arch/x86/mm/shadow/private.h Fri Jun 20 18:41:09 2008 +0100 4.3 @@ -196,9 +196,9 @@ struct shadow_page_info 4.4 u32 tlbflush_timestamp; 4.5 }; 4.6 struct { 4.7 - unsigned int type:4; /* What kind of shadow is this? */ 4.8 + unsigned int type:5; /* What kind of shadow is this? */ 4.9 unsigned int pinned:1; /* Is the shadow pinned? */ 4.10 - unsigned int count:27; /* Reference count */ 4.11 + unsigned int count:26; /* Reference count */ 4.12 u32 mbz; /* Must be zero: this is where the owner 4.13 * field lives in a non-shadow page */ 4.14 } __attribute__((packed)); 4.15 @@ -243,7 +243,8 @@ static inline void shadow_check_page_str 4.16 #define SH_type_max_shadow (13U) 4.17 #define SH_type_p2m_table (14U) /* in use as the p2m table */ 4.18 #define SH_type_monitor_table (15U) /* in use as a monitor table */ 4.19 -#define SH_type_unused (16U) 4.20 +#define SH_type_oos_snapshot (16U) /* in use as OOS snapshot */ 4.21 +#define SH_type_unused (17U) 4.22 4.23 /* 4.24 * What counts as a pinnable shadow? 4.25 @@ -466,6 +467,8 @@ shadow_sync_other_vcpus(struct vcpu *v, 4.26 } 4.27 4.28 void oos_audit_hash_is_present(struct domain *d, mfn_t gmfn); 4.29 +mfn_t oos_snapshot_lookup(struct vcpu *v, mfn_t gmfn); 4.30 + 4.31 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */ 4.32 4.33 /******************************************************************************
5.1 --- a/xen/include/asm-x86/domain.h Fri Jun 20 18:40:32 2008 +0100 5.2 +++ b/xen/include/asm-x86/domain.h Fri Jun 20 18:41:09 2008 +0100 5.3 @@ -129,6 +129,7 @@ struct shadow_vcpu { 5.4 /* Shadow out-of-sync: pages that this vcpu has let go out of sync */ 5.5 mfn_t oos[SHADOW_OOS_PAGES]; 5.6 unsigned long oos_va[SHADOW_OOS_PAGES]; 5.7 + mfn_t oos_snapshot[SHADOW_OOS_PAGES]; 5.8 struct oos_fixup { 5.9 mfn_t gmfn; 5.10 mfn_t smfn;