debuggers.hg
changeset 4662:9a768d11cc7b
bitkeeper revision 1.1358 (4267e561Ml7gO0DQYGp9EYRUYPBDHA)
Merge burn.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into burn.cl.cam.ac.uk:/local/scratch-1/maf46/xen-unstable.bk
Signed-off-by: michael.fetterman@cl.cam.ac.uk
Merge burn.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into burn.cl.cam.ac.uk:/local/scratch-1/maf46/xen-unstable.bk
Signed-off-by: michael.fetterman@cl.cam.ac.uk
author | maf46@burn.cl.cam.ac.uk |
---|---|
date | Thu Apr 21 17:39:45 2005 +0000 (2005-04-21) |
parents | caaf9d543bc5 acc0c98dfe3f |
children | 43d58d3eeaa5 |
files | xen/arch/x86/domain.c xen/arch/x86/shadow.c xen/arch/x86/vmx.c xen/arch/x86/vmx_io.c xen/common/page_alloc.c xen/include/asm-x86/shadow.h |
line diff
1.1 --- a/xen/arch/x86/domain.c Thu Apr 21 15:39:08 2005 +0000 1.2 +++ b/xen/arch/x86/domain.c Thu Apr 21 17:39:45 2005 +0000 1.3 @@ -986,29 +986,42 @@ void domain_relinquish_resources(struct 1.4 /* Release device mappings of other domains */ 1.5 gnttab_release_dev_mappings(d->grant_table); 1.6 1.7 - /* Exit shadow mode before deconstructing final guest page table. */ 1.8 - shadow_mode_disable(d); 1.9 - 1.10 /* Drop the in-use references to page-table bases. */ 1.11 for_each_exec_domain ( d, ed ) 1.12 { 1.13 if ( pagetable_val(ed->arch.guest_table) != 0 ) 1.14 { 1.15 - put_page_and_type(&frame_table[ 1.16 - pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT]); 1.17 + struct pfn_info *page = 1.18 + &frame_table[pagetable_val(ed->arch.guest_table)>>PAGE_SHIFT]; 1.19 + 1.20 + if ( shadow_mode_enabled(d) ) 1.21 + put_page(page); 1.22 + else 1.23 + put_page_and_type(page); 1.24 + 1.25 ed->arch.guest_table = mk_pagetable(0); 1.26 } 1.27 1.28 if ( pagetable_val(ed->arch.guest_table_user) != 0 ) 1.29 { 1.30 - put_page_and_type(&frame_table[ 1.31 - pagetable_val(ed->arch.guest_table_user) >> PAGE_SHIFT]); 1.32 + struct pfn_info *page = 1.33 + &frame_table[pagetable_val(ed->arch.guest_table_user) 1.34 + >> PAGE_SHIFT]; 1.35 + 1.36 + if ( shadow_mode_enabled(d) ) 1.37 + put_page(page); 1.38 + else 1.39 + put_page_and_type(page); 1.40 + 1.41 ed->arch.guest_table_user = mk_pagetable(0); 1.42 } 1.43 1.44 vmx_relinquish_resources(ed); 1.45 } 1.46 1.47 + /* Exit shadow mode before deconstructing final guest page table. */ 1.48 + shadow_mode_destroy(d); 1.49 + 1.50 /* 1.51 * Relinquish GDT mappings. No need for explicit unmapping of the LDT as 1.52 * it automatically gets squashed when the guest's mappings go away.
2.1 --- a/xen/arch/x86/shadow.c Thu Apr 21 15:39:08 2005 +0000 2.2 +++ b/xen/arch/x86/shadow.c Thu Apr 21 17:39:45 2005 +0000 2.3 @@ -33,6 +33,7 @@ 2.4 static void shadow_free_snapshot(struct domain *d, 2.5 struct out_of_sync_entry *entry); 2.6 static void remove_out_of_sync_entries(struct domain *d, unsigned long smfn); 2.7 +static void free_writable_pte_predictions(struct domain *d); 2.8 2.9 /******** 2.10 2.11 @@ -554,18 +555,22 @@ static void free_shadow_pages(struct dom 2.12 for_each_exec_domain(d, ed) 2.13 { 2.14 l2_pgentry_t *mpl2e = ed->arch.monitor_vtable; 2.15 - l2_pgentry_t hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)]; 2.16 - l2_pgentry_t smfn = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)]; 2.17 - 2.18 - if ( l2e_get_flags(hl2e) & _PAGE_PRESENT ) 2.19 + 2.20 + if ( mpl2e ) 2.21 { 2.22 - put_shadow_ref(l2e_get_pfn(hl2e)); 2.23 - mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = l2e_empty(); 2.24 - } 2.25 - if ( l2e_get_flags(smfn) & _PAGE_PRESENT ) 2.26 - { 2.27 - put_shadow_ref(l2e_get_pfn(smfn)); 2.28 - mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty(); 2.29 + l2_pgentry_t hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)]; 2.30 + l2_pgentry_t smfn = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)]; 2.31 + 2.32 + if ( l2e_get_flags(hl2e) & _PAGE_PRESENT ) 2.33 + { 2.34 + put_shadow_ref(l2e_get_pfn(hl2e)); 2.35 + mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = l2e_empty(); 2.36 + } 2.37 + if ( l2e_get_flags(smfn) & _PAGE_PRESENT ) 2.38 + { 2.39 + put_shadow_ref(l2e_get_pfn(smfn)); 2.40 + mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty(); 2.41 + } 2.42 } 2.43 } 2.44 } 2.45 @@ -586,12 +591,11 @@ static void free_shadow_pages(struct dom 2.46 unsigned long *mfn_list; 2.47 2.48 /* Skip empty buckets. */ 2.49 - x = &d->arch.shadow_ht[i]; 2.50 - if ( x->gpfn_and_flags == 0 ) 2.51 + if ( d->arch.shadow_ht[i].gpfn_and_flags == 0 ) 2.52 continue; 2.53 2.54 count = 0; 2.55 - for ( ; x != NULL; x = x->next ) 2.56 + for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) 2.57 if ( PINNED(x->smfn) ) 2.58 count++; 2.59 if ( !count ) 2.60 @@ -675,14 +679,13 @@ void free_monitor_pagetable(struct exec_ 2.61 unsigned long mfn; 2.62 2.63 ASSERT( pagetable_val(ed->arch.monitor_table) ); 2.64 - ASSERT( shadow_mode_external(ed->domain) ); 2.65 2.66 mpl2e = ed->arch.monitor_vtable; 2.67 2.68 /* 2.69 * First get the mfn for hl2_table by looking at monitor_table 2.70 */ 2.71 - hl2e = mpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]; 2.72 + hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)]; 2.73 if ( l2e_get_flags(hl2e) & _PAGE_PRESENT ) 2.74 { 2.75 mfn = l2e_get_pfn(hl2e); 2.76 @@ -690,7 +693,7 @@ void free_monitor_pagetable(struct exec_ 2.77 put_shadow_ref(mfn); 2.78 } 2.79 2.80 - sl2e = mpl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]; 2.81 + sl2e = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)]; 2.82 if ( l2e_get_flags(sl2e) & _PAGE_PRESENT ) 2.83 { 2.84 mfn = l2e_get_pfn(sl2e); 2.85 @@ -1108,6 +1111,34 @@ static void free_out_of_sync_entries(str 2.86 d->arch.out_of_sync_extras_count); 2.87 } 2.88 2.89 +void shadow_mode_destroy(struct domain *d) 2.90 +{ 2.91 + shadow_lock(d); 2.92 + 2.93 + free_shadow_pages(d); 2.94 + free_writable_pte_predictions(d); 2.95 + 2.96 +#ifndef NDEBUG 2.97 + int i; 2.98 + for ( i = 0; i < shadow_ht_buckets; i++ ) 2.99 + { 2.100 + if ( d->arch.shadow_ht[i].gpfn_and_flags != 0 ) 2.101 + { 2.102 + printk("%s: d->arch.shadow_ht[%x].gpfn_and_flags=%p\n", 2.103 + i, d->arch.shadow_ht[i].gpfn_and_flags); 2.104 + BUG(); 2.105 + } 2.106 + } 2.107 +#endif 2.108 + 2.109 + d->arch.shadow_mode = 0; 2.110 + 2.111 + free_shadow_ht_entries(d); 2.112 + free_out_of_sync_entries(d); 2.113 + 2.114 + shadow_unlock(d); 2.115 +} 2.116 + 2.117 void __shadow_mode_disable(struct domain *d) 2.118 { 2.119 // This needs rethinking for the full shadow mode stuff. 2.120 @@ -1914,6 +1945,42 @@ decrease_writable_pte_prediction(struct 2.121 } 2.122 } 2.123 2.124 +static void 2.125 +free_writable_pte_predictions(struct domain *d) 2.126 +{ 2.127 + int i; 2.128 + struct shadow_status *x; 2.129 + 2.130 + for ( i = 0; i < shadow_ht_buckets; i++ ) 2.131 + { 2.132 + u32 count; 2.133 + unsigned long *gpfn_list; 2.134 + 2.135 + /* Skip empty buckets. */ 2.136 + if ( d->arch.shadow_ht[i].gpfn_and_flags == 0 ) 2.137 + continue; 2.138 + 2.139 + count = 0; 2.140 + for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) 2.141 + if ( (x->gpfn_and_flags & PGT_type_mask) == PGT_writable_pred ) 2.142 + count++; 2.143 + 2.144 + gpfn_list = xmalloc_array(unsigned long, count); 2.145 + count = 0; 2.146 + for ( x = &d->arch.shadow_ht[i]; x != NULL; x = x->next ) 2.147 + if ( (x->gpfn_and_flags & PGT_type_mask) == PGT_writable_pred ) 2.148 + gpfn_list[count++] = x->gpfn_and_flags & PGT_mfn_mask; 2.149 + 2.150 + while ( count ) 2.151 + { 2.152 + count--; 2.153 + delete_shadow_status(d, gpfn_list[count], 0, PGT_writable_pred); 2.154 + } 2.155 + 2.156 + xfree(gpfn_list); 2.157 + } 2.158 +} 2.159 + 2.160 static u32 remove_all_write_access_in_ptpage( 2.161 struct domain *d, unsigned long pt_pfn, unsigned long pt_mfn, 2.162 unsigned long readonly_gpfn, unsigned long readonly_gmfn, 2.163 @@ -2606,30 +2673,39 @@ static int sh_l1_present; 2.164 char * sh_check_name; 2.165 int shadow_status_noswap; 2.166 2.167 -#define v2m(adr) ({ \ 2.168 - unsigned long _a = (unsigned long)(adr); \ 2.169 - l1_pgentry_t _pte = shadow_linear_pg_table[_a >> PAGE_SHIFT]; \ 2.170 - unsigned long _pa = l1e_get_phys(_pte); \ 2.171 - _pa | (_a & ~PAGE_MASK); \ 2.172 +#define v2m(_ed, _adr) ({ \ 2.173 + unsigned long _a = (unsigned long)(_adr); \ 2.174 + l2_pgentry_t _pde = shadow_linear_l2_table(_ed)[l2_table_offset(_a)]; \ 2.175 + unsigned long _pa = -1; \ 2.176 + if ( l2e_get_flags(_pde) & _PAGE_PRESENT ) \ 2.177 + { \ 2.178 + l1_pgentry_t _pte; \ 2.179 + _pte = shadow_linear_pg_table[l1_linear_offset(_a)]; \ 2.180 + if ( l1e_get_flags(_pte) & _PAGE_PRESENT ) \ 2.181 + _pa = l1e_get_phys(_pte); \ 2.182 + } \ 2.183 + _pa | (_a & ~PAGE_MASK); \ 2.184 }) 2.185 2.186 #define FAIL(_f, _a...) \ 2.187 do { \ 2.188 - printk("XXX %s-FAIL (%d,%d,%d)" _f "\n" \ 2.189 - "g=%08lx s=%08lx &g=%08lx &s=%08lx" \ 2.190 + printk("XXX %s-FAIL (%d,%d,%d)" _f " at %s(%d)\n", \ 2.191 + sh_check_name, level, l2_idx, l1_idx, ## _a, \ 2.192 + __FILE__, __LINE__); \ 2.193 + printk("g=%08lx s=%08lx &g=%08lx &s=%08lx" \ 2.194 " v2m(&g)=%08lx v2m(&s)=%08lx ea=%08lx\n", \ 2.195 - sh_check_name, level, l2_idx, l1_idx, ## _a , \ 2.196 gpte, spte, pgpte, pspte, \ 2.197 - v2m(pgpte), v2m(pspte), \ 2.198 + v2m(ed, pgpte), v2m(ed, pspte), \ 2.199 (l2_idx << L2_PAGETABLE_SHIFT) | \ 2.200 (l1_idx << L1_PAGETABLE_SHIFT)); \ 2.201 errors++; \ 2.202 } while ( 0 ) 2.203 2.204 static int check_pte( 2.205 - struct domain *d, l1_pgentry_t *pgpte, l1_pgentry_t *pspte, 2.206 + struct exec_domain *ed, l1_pgentry_t *pgpte, l1_pgentry_t *pspte, 2.207 int level, int l2_idx, int l1_idx, int oos_ptes) 2.208 { 2.209 + struct domain *d = ed->domain; 2.210 l1_pgentry_t gpte = *pgpte; 2.211 l1_pgentry_t spte = *pspte; 2.212 unsigned long mask, gpfn, smfn, gmfn; 2.213 @@ -2650,20 +2726,23 @@ static int check_pte( 2.214 if ( !(l1e_get_flags(gpte) & _PAGE_PRESENT) ) 2.215 FAIL("Guest not present yet shadow is"); 2.216 2.217 - mask = ~(_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW); 2.218 - 2.219 - if ( l1e_has_changed(&spte, &gpte, mask) ) 2.220 + mask = ~(_PAGE_GLOBAL|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW|PAGE_MASK); 2.221 + 2.222 + if ( (l1e_get_value(spte) & mask) != (l1e_get_value(gpte) & mask) ) 2.223 FAIL("Corrupt?"); 2.224 2.225 if ( (level == 1) && 2.226 - (l1e_get_flags(spte) & _PAGE_DIRTY ) && 2.227 + (l1e_get_flags(spte) & _PAGE_DIRTY) && 2.228 !(l1e_get_flags(gpte) & _PAGE_DIRTY) && !oos_ptes ) 2.229 FAIL("Dirty coherence"); 2.230 2.231 - if ( (l1e_get_flags(spte) & _PAGE_ACCESSED ) && 2.232 + if ( (l1e_get_flags(spte) & _PAGE_ACCESSED) && 2.233 !(l1e_get_flags(gpte) & _PAGE_ACCESSED) && !oos_ptes ) 2.234 FAIL("Accessed coherence"); 2.235 2.236 + if ( l1e_get_flags(spte) & _PAGE_GLOBAL ) 2.237 + FAIL("global bit set in shadow"); 2.238 + 2.239 smfn = l1e_get_pfn(spte); 2.240 gpfn = l1e_get_pfn(gpte); 2.241 gmfn = __gpfn_to_mfn(d, gpfn); 2.242 @@ -2721,11 +2800,14 @@ static int check_pte( 2.243 2.244 return errors; 2.245 } 2.246 +#undef FAIL 2.247 +#undef v2m 2.248 2.249 static int check_l1_table( 2.250 - struct domain *d, unsigned long gpfn, 2.251 + struct exec_domain *ed, unsigned long gpfn, 2.252 unsigned long gmfn, unsigned long smfn, unsigned l2_idx) 2.253 { 2.254 + struct domain *d = ed->domain; 2.255 int i; 2.256 l1_pgentry_t *gpl1e, *spl1e; 2.257 int errors = 0, oos_ptes = 0; 2.258 @@ -2741,7 +2823,7 @@ static int check_l1_table( 2.259 spl1e = map_domain_mem(smfn << PAGE_SHIFT); 2.260 2.261 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) 2.262 - errors += check_pte(d, &gpl1e[i], &spl1e[i], 1, l2_idx, i, oos_ptes); 2.263 + errors += check_pte(ed, &gpl1e[i], &spl1e[i], 1, l2_idx, i, oos_ptes); 2.264 2.265 unmap_domain_mem(spl1e); 2.266 unmap_domain_mem(gpl1e); 2.267 @@ -2756,8 +2838,9 @@ static int check_l1_table( 2.268 } while ( 0 ) 2.269 2.270 int check_l2_table( 2.271 - struct domain *d, unsigned long gmfn, unsigned long smfn, int oos_pdes) 2.272 + struct exec_domain *ed, unsigned long gmfn, unsigned long smfn, int oos_pdes) 2.273 { 2.274 + struct domain *d = ed->domain; 2.275 l2_pgentry_t *gpl2e = (l2_pgentry_t *)map_domain_mem(gmfn << PAGE_SHIFT); 2.276 l2_pgentry_t *spl2e = (l2_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT); 2.277 l2_pgentry_t match; 2.278 @@ -2825,7 +2908,7 @@ int check_l2_table( 2.279 2.280 /* Check the whole L2. */ 2.281 for ( i = 0; i < limit; i++ ) 2.282 - errors += check_pte(d, 2.283 + errors += check_pte(ed, 2.284 (l1_pgentry_t*)(&gpl2e[i]), /* Hmm, dirty ... */ 2.285 (l1_pgentry_t*)(&spl2e[i]), 2.286 2, i, 0, 0); 2.287 @@ -2840,6 +2923,7 @@ int check_l2_table( 2.288 2.289 return errors; 2.290 } 2.291 +#undef FAILPT 2.292 2.293 int _check_pagetable(struct exec_domain *ed, char *s) 2.294 { 2.295 @@ -2875,7 +2959,7 @@ int _check_pagetable(struct exec_domain 2.296 ASSERT(ptbase_mfn); 2.297 } 2.298 2.299 - errors += check_l2_table(d, ptbase_mfn, smfn, oos_pdes); 2.300 + errors += check_l2_table(ed, ptbase_mfn, smfn, oos_pdes); 2.301 2.302 gpl2e = (l2_pgentry_t *) map_domain_mem( ptbase_mfn << PAGE_SHIFT ); 2.303 spl2e = (l2_pgentry_t *) map_domain_mem( smfn << PAGE_SHIFT ); 2.304 @@ -2898,7 +2982,7 @@ int _check_pagetable(struct exec_domain 2.305 2.306 if ( l2e_get_value(spl2e[i]) != 0 ) /* FIXME: check flags? */ 2.307 { 2.308 - errors += check_l1_table(d, gl1pfn, gl1mfn, sl1mfn, i); 2.309 + errors += check_l1_table(ed, gl1pfn, gl1mfn, sl1mfn, i); 2.310 } 2.311 } 2.312 2.313 @@ -2944,11 +3028,11 @@ int _check_all_pagetables(struct exec_do 2.314 switch ( a->gpfn_and_flags & PGT_type_mask ) 2.315 { 2.316 case PGT_l1_shadow: 2.317 - errors += check_l1_table(d, a->gpfn_and_flags & PGT_mfn_mask, 2.318 + errors += check_l1_table(ed, a->gpfn_and_flags & PGT_mfn_mask, 2.319 gmfn, a->smfn, 0); 2.320 break; 2.321 case PGT_l2_shadow: 2.322 - errors += check_l2_table(d, gmfn, a->smfn, 2.323 + errors += check_l2_table(ed, gmfn, a->smfn, 2.324 page_out_of_sync(pfn_to_page(gmfn))); 2.325 break; 2.326 case PGT_l3_shadow:
3.1 --- a/xen/arch/x86/vmx.c Thu Apr 21 15:39:08 2005 +0000 3.2 +++ b/xen/arch/x86/vmx.c Thu Apr 21 17:39:45 2005 +0000 3.3 @@ -676,7 +676,7 @@ static int mov_to_cr(int gp, int cr, str 3.4 switch(cr) { 3.5 case 0: 3.6 { 3.7 - unsigned long old_base_mfn = 0, mfn; 3.8 + unsigned long old_base_mfn, mfn; 3.9 3.10 /* 3.11 * CR0: 3.12 @@ -694,14 +694,17 @@ static int mov_to_cr(int gp, int cr, str 3.13 /* 3.14 * The guest CR3 must be pointing to the guest physical. 3.15 */ 3.16 - if (!VALID_MFN(mfn = phys_to_machine_mapping( 3.17 - d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT))) 3.18 + if ( !VALID_MFN(mfn = phys_to_machine_mapping( 3.19 + d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) || 3.20 + !get_page(pfn_to_page(mfn), d->domain) ) 3.21 { 3.22 - VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx", 3.23 - d->arch.arch_vmx.cpu_cr3); 3.24 + VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx", 3.25 + d->arch.arch_vmx.cpu_cr3); 3.26 domain_crash_synchronous(); /* need to take a clean path */ 3.27 } 3.28 old_base_mfn = pagetable_val(d->arch.guest_table) >> PAGE_SHIFT; 3.29 + if ( old_base_mfn ) 3.30 + put_page(pfn_to_page(old_base_mfn)); 3.31 3.32 /* 3.33 * Now arch.guest_table points to machine physical. 3.34 @@ -718,8 +721,6 @@ static int mov_to_cr(int gp, int cr, str 3.35 */ 3.36 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx", 3.37 d->arch.arch_vmx.cpu_cr3, mfn); 3.38 - /* undo the get_page done in the para virt case */ 3.39 - put_page_and_type(&frame_table[old_base_mfn]); 3.40 } else { 3.41 if ((value & X86_CR0_PE) == 0) { 3.42 __vmread(GUEST_EIP, &eip); 3.43 @@ -752,7 +753,7 @@ static int mov_to_cr(int gp, int cr, str 3.44 } 3.45 case 3: 3.46 { 3.47 - unsigned long mfn; 3.48 + unsigned long old_base_mfn, mfn; 3.49 3.50 /* 3.51 * If paging is not enabled yet, simply copy the value to CR3. 3.52 @@ -781,14 +782,18 @@ static int mov_to_cr(int gp, int cr, str 3.53 * first. 3.54 */ 3.55 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value); 3.56 - if ((value >> PAGE_SHIFT) > d->domain->max_pages) 3.57 + if ( ((value >> PAGE_SHIFT) > d->domain->max_pages ) || 3.58 + !VALID_MFN(mfn = phys_to_machine_mapping(value >> PAGE_SHIFT)) || 3.59 + !get_page(pfn_to_page(mfn), d->domain) ) 3.60 { 3.61 VMX_DBG_LOG(DBG_LEVEL_VMMU, 3.62 "Invalid CR3 value=%lx", value); 3.63 domain_crash_synchronous(); /* need to take a clean path */ 3.64 } 3.65 - mfn = phys_to_machine_mapping(value >> PAGE_SHIFT); 3.66 + old_base_mfn = pagetable_val(d->arch.guest_table) >> PAGE_SHIFT; 3.67 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT); 3.68 + if ( old_base_mfn ) 3.69 + put_page(pfn_to_page(old_base_mfn)); 3.70 update_pagetables(d); 3.71 /* 3.72 * arch.shadow_table should now hold the next CR3 for shadow
4.1 --- a/xen/arch/x86/vmx_io.c Thu Apr 21 15:39:08 2005 +0000 4.2 +++ b/xen/arch/x86/vmx_io.c Thu Apr 21 17:39:45 2005 +0000 4.3 @@ -282,7 +282,7 @@ void vmx_io_assist(struct exec_domain *e 4.4 } 4.5 } 4.6 4.7 -#ifdef __i386__ 4.8 +#if defined(__i386__) || defined(__x86_64__) 4.9 static inline int __fls(u32 word) 4.10 { 4.11 int bit; 4.12 @@ -296,53 +296,52 @@ static inline int __fls(u32 word) 4.13 #define __fls(x) generic_fls(x) 4.14 static __inline__ int generic_fls(u32 x) 4.15 { 4.16 - int r = 32; 4.17 + int r = 31; 4.18 4.19 - if (!x) 4.20 - return 0; 4.21 - if (!(x & 0xffff0000u)) { 4.22 - x <<= 16; 4.23 - r -= 16; 4.24 - } 4.25 - if (!(x & 0xff000000u)) { 4.26 - x <<= 8; 4.27 - r -= 8; 4.28 - } 4.29 - if (!(x & 0xf0000000u)) { 4.30 - x <<= 4; 4.31 - r -= 4; 4.32 - } 4.33 - if (!(x & 0xc0000000u)) { 4.34 - x <<= 2; 4.35 - r -= 2; 4.36 - } 4.37 - if (!(x & 0x80000000u)) { 4.38 - x <<= 1; 4.39 - r -= 1; 4.40 - } 4.41 - return r; 4.42 + if (!x) 4.43 + return 0; 4.44 + if (!(x & 0xffff0000u)) { 4.45 + x <<= 16; 4.46 + r -= 16; 4.47 + } 4.48 + if (!(x & 0xff000000u)) { 4.49 + x <<= 8; 4.50 + r -= 8; 4.51 + } 4.52 + if (!(x & 0xf0000000u)) { 4.53 + x <<= 4; 4.54 + r -= 4; 4.55 + } 4.56 + if (!(x & 0xc0000000u)) { 4.57 + x <<= 2; 4.58 + r -= 2; 4.59 + } 4.60 + if (!(x & 0x80000000u)) { 4.61 + x <<= 1; 4.62 + r -= 1; 4.63 + } 4.64 + return r; 4.65 } 4.66 #endif 4.67 4.68 - 4.69 /* Simple minded Local APIC priority implementation. Fix later */ 4.70 static __inline__ int find_highest_irq(u32 *pintr) 4.71 { 4.72 if (pintr[7]) 4.73 - return __fls(pintr[7]) + (255-32*1); 4.74 + return __fls(pintr[7]) + (256-32*1); 4.75 if (pintr[6]) 4.76 - return __fls(pintr[6]) + (255-32*2); 4.77 + return __fls(pintr[6]) + (256-32*2); 4.78 if (pintr[5]) 4.79 - return __fls(pintr[5]) + (255-32*3); 4.80 + return __fls(pintr[5]) + (256-32*3); 4.81 if (pintr[4]) 4.82 - return __fls(pintr[4]) + (255-32*4); 4.83 + return __fls(pintr[4]) + (256-32*4); 4.84 if (pintr[3]) 4.85 - return __fls(pintr[3]) + (255-32*5); 4.86 + return __fls(pintr[3]) + (256-32*5); 4.87 if (pintr[2]) 4.88 - return __fls(pintr[2]) + (255-32*6); 4.89 + return __fls(pintr[2]) + (256-32*6); 4.90 if (pintr[1]) 4.91 - return __fls(pintr[1]) + (255-32*7); 4.92 - return (__fls(pintr[0])-1); 4.93 + return __fls(pintr[1]) + (256-32*7); 4.94 + return __fls(pintr[0]); 4.95 } 4.96 4.97 /*
5.1 --- a/xen/common/page_alloc.c Thu Apr 21 15:39:08 2005 +0000 5.2 +++ b/xen/common/page_alloc.c Thu Apr 21 17:39:45 2005 +0000 5.3 @@ -562,7 +562,6 @@ void free_domheap_pages(struct pfn_info 5.4 for ( i = 0; i < (1 << order); i++ ) 5.5 { 5.6 shadow_drop_references(d, &pg[i]); 5.7 - ASSERT((pg[i].u.inuse.type_info & PGT_count_mask) == 0); 5.8 pg[i].tlbflush_timestamp = tlbflush_current_time(); 5.9 pg[i].u.free.cpu_mask = d->cpuset; 5.10 list_del(&pg[i].list);
6.1 --- a/xen/include/asm-x86/shadow.h Thu Apr 21 15:39:08 2005 +0000 6.2 +++ b/xen/include/asm-x86/shadow.h Thu Apr 21 17:39:45 2005 +0000 6.3 @@ -177,6 +177,8 @@ static inline void shadow_mode_disable(s 6.4 __shadow_mode_disable(d); 6.5 } 6.6 6.7 +extern void shadow_mode_destroy(struct domain *d); 6.8 + 6.9 /************************************************************************/ 6.10 6.11 #define __mfn_to_gpfn(_d, mfn) \ 6.12 @@ -588,7 +590,7 @@ static inline int l1pte_write_fault( 6.13 6.14 ASSERT(l1e_get_flags(gpte) & _PAGE_RW); 6.15 l1e_add_flags(&gpte, _PAGE_DIRTY | _PAGE_ACCESSED); 6.16 - spte = l1e_create_pfn(gmfn, l1e_get_flags(gpte)); 6.17 + spte = l1e_create_pfn(gmfn, l1e_get_flags(gpte) & ~_PAGE_GLOBAL); 6.18 6.19 SH_VVLOG("l1pte_write_fault: updating spte=0x%p gpte=0x%p", 6.20 l1e_get_value(spte), l1e_get_value(gpte)); 6.21 @@ -621,7 +623,7 @@ static inline int l1pte_read_fault( 6.22 } 6.23 6.24 l1e_add_flags(&gpte, _PAGE_ACCESSED); 6.25 - spte = l1e_create_pfn(mfn, l1e_get_flags(gpte)); 6.26 + spte = l1e_create_pfn(mfn, l1e_get_flags(gpte) & ~_PAGE_GLOBAL); 6.27 6.28 if ( shadow_mode_log_dirty(d) || !(l1e_get_flags(gpte) & _PAGE_DIRTY) || 6.29 mfn_is_page_table(mfn) ) 6.30 @@ -649,7 +651,7 @@ static inline void l1pte_propagate_from_ 6.31 (_PAGE_PRESENT|_PAGE_ACCESSED)) && 6.32 VALID_MFN(mfn = __gpfn_to_mfn(d, l1e_get_pfn(gpte))) ) 6.33 { 6.34 - spte = l1e_create_pfn(mfn, l1e_get_flags(gpte)); 6.35 + spte = l1e_create_pfn(mfn, l1e_get_flags(gpte) & ~_PAGE_GLOBAL); 6.36 6.37 if ( shadow_mode_log_dirty(d) || 6.38 !(l1e_get_flags(gpte) & _PAGE_DIRTY) || 6.39 @@ -660,8 +662,9 @@ static inline void l1pte_propagate_from_ 6.40 } 6.41 6.42 #if 0 6.43 - if ( spte || gpte ) 6.44 - SH_VVLOG("%s: gpte=%p, new spte=%p", __func__, gpte, spte); 6.45 + if ( l1e_get_value(spte) || l1e_get_value(gpte) ) 6.46 + SH_VVLOG("%s: gpte=%p, new spte=%p", 6.47 + __func__, l1e_get_value(gpte), l1e_get_value(spte)); 6.48 #endif 6.49 6.50 *spte_p = spte;