debuggers.hg
changeset 3755:ea98f0bb6510
bitkeeper revision 1.1159.212.127 (4208b02bTdSR4AVYRg8diDkKZmIVUg)
General shadow code cleanup.
Fixed compilation problems when SHADOW_DEBUG is enabled.
Fixed compilation problems when CONFIG_VMX is undefined.
Simplified l1pte_write_fault and l1pte_read_fault.
Name change: spfn => smfn (shadow machine frame numbers).
In general, the terms pfn and gpfn now refer to pages in the
guest's idea of physical frames (which diffs for full shadow
guests). mfn always refers to a machine frame number.
One bug fix for check_pagetable():
If we're using writable page tables
along with shadow mode, don't check the currently writable page table
page -- check its snapshot instead.
Signed-off-by: michael.fetterman@cl.cam.ac.uk
General shadow code cleanup.
Fixed compilation problems when SHADOW_DEBUG is enabled.
Fixed compilation problems when CONFIG_VMX is undefined.
Simplified l1pte_write_fault and l1pte_read_fault.
Name change: spfn => smfn (shadow machine frame numbers).
In general, the terms pfn and gpfn now refer to pages in the
guest's idea of physical frames (which diffs for full shadow
guests). mfn always refers to a machine frame number.
One bug fix for check_pagetable():
If we're using writable page tables
along with shadow mode, don't check the currently writable page table
page -- check its snapshot instead.
Signed-off-by: michael.fetterman@cl.cam.ac.uk
author | mafetter@fleming.research |
---|---|
date | Tue Feb 08 12:27:23 2005 +0000 (2005-02-08) |
parents | 23e7cf28ddb3 |
children | 9f7935ea4606 4d39c79968fa |
files | xen/arch/x86/memory.c xen/arch/x86/shadow.c xen/arch/x86/traps.c xen/arch/x86/vmx.c xen/arch/x86/vmx_io.c xen/arch/x86/vmx_platform.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/domain_build.c xen/arch/x86/x86_32/traps.c xen/include/asm-x86/shadow.h |
line diff
1.1 --- a/xen/arch/x86/memory.c Tue Feb 08 11:07:10 2005 +0000 1.2 +++ b/xen/arch/x86/memory.c Tue Feb 08 12:27:23 2005 +0000 1.3 @@ -200,14 +200,14 @@ void write_ptbase(struct exec_domain *ed 1.4 unsigned long pa; 1.5 1.6 #ifdef CONFIG_VMX 1.7 - if ( unlikely(d->arch.shadow_mode) ) 1.8 - pa = ((d->arch.shadow_mode == SHM_full_32) ? 1.9 + if ( unlikely(shadow_mode(d)) ) 1.10 + pa = ((shadow_mode(d) == SHM_full_32) ? 1.11 pagetable_val(ed->arch.monitor_table) : 1.12 pagetable_val(ed->arch.shadow_table)); 1.13 else 1.14 pa = pagetable_val(ed->arch.pagetable); 1.15 #else 1.16 - if ( unlikely(d->arch.shadow_mode) ) 1.17 + if ( unlikely(shadow_mode(d)) ) 1.18 pa = pagetable_val(ed->arch.shadow_table); 1.19 else 1.20 pa = pagetable_val(ed->arch.pagetable); 1.21 @@ -707,7 +707,7 @@ static int mod_l1_entry(l1_pgentry_t *pl 1.22 1.23 if ( l1_pgentry_val(nl1e) & _PAGE_PRESENT ) 1.24 { 1.25 - /* Differ in mapping (bits 12-31), r/w (bit 1), or presence (bit 0)? */ 1.26 + /* Same mapping (bits 12-31), r/w (bit 1), and presence (bit 0)? */ 1.27 if ( ((l1_pgentry_val(ol1e) ^ l1_pgentry_val(nl1e)) & ~0xffc) == 0 ) 1.28 return update_l1e(pl1e, ol1e, nl1e); 1.29 1.30 @@ -772,7 +772,7 @@ void free_page_type(struct pfn_info *pag 1.31 BUG(); 1.32 } 1.33 1.34 - if ( unlikely(d->arch.shadow_mode) && 1.35 + if ( unlikely(shadow_mode(d)) && 1.36 (get_shadow_status(d, page_to_pfn(page)) & PSH_shadowed) ) 1.37 { 1.38 unshadow_table(page_to_pfn(page), type); 1.39 @@ -1329,7 +1329,7 @@ int do_mmu_update( 1.40 struct pfn_info *page; 1.41 int rc = 0, okay = 1, i = 0, cpu = smp_processor_id(); 1.42 unsigned int cmd, done = 0; 1.43 - unsigned long prev_spfn = 0; 1.44 + unsigned long prev_smfn = 0; 1.45 l1_pgentry_t *prev_spl1e = 0; 1.46 struct exec_domain *ed = current; 1.47 struct domain *d = ed->domain; 1.48 @@ -1340,6 +1340,9 @@ int do_mmu_update( 1.49 1.50 cleanup_writable_pagetable(d); 1.51 1.52 + if ( unlikely(shadow_mode(d)) ) 1.53 + check_pagetable(d, ed->arch.pagetable, "pre-mmu"); /* debug */ 1.54 + 1.55 /* 1.56 * If we are resuming after preemption, read how much work we have already 1.57 * done. This allows us to set the @done output parameter correctly. 1.58 @@ -1434,12 +1437,12 @@ int do_mmu_update( 1.59 okay = mod_l1_entry((l1_pgentry_t *)va, 1.60 mk_l1_pgentry(req.val)); 1.61 1.62 - if ( unlikely(d->arch.shadow_mode) && okay && 1.63 + if ( unlikely(shadow_mode(d)) && okay && 1.64 (get_shadow_status(d, page-frame_table) & 1.65 PSH_shadowed) ) 1.66 { 1.67 shadow_l1_normal_pt_update( 1.68 - req.ptr, req.val, &prev_spfn, &prev_spl1e); 1.69 + req.ptr, req.val, &prev_smfn, &prev_spl1e); 1.70 put_shadow_status(d); 1.71 } 1.72 1.73 @@ -1453,7 +1456,7 @@ int do_mmu_update( 1.74 mk_l2_pgentry(req.val), 1.75 pfn); 1.76 1.77 - if ( unlikely(d->arch.shadow_mode) && okay && 1.78 + if ( unlikely(shadow_mode(d)) && okay && 1.79 (get_shadow_status(d, page-frame_table) & 1.80 PSH_shadowed) ) 1.81 { 1.82 @@ -1491,7 +1494,7 @@ int do_mmu_update( 1.83 * If in log-dirty mode, mark the corresponding pseudo-physical 1.84 * page as dirty. 1.85 */ 1.86 - if ( unlikely(d->arch.shadow_mode == SHM_logdirty) && 1.87 + if ( unlikely(shadow_mode(d) == SHM_logdirty) && 1.88 mark_dirty(d, pfn) ) 1.89 d->arch.shadow_dirty_block_count++; 1.90 1.91 @@ -1547,6 +1550,9 @@ int do_mmu_update( 1.92 if ( unlikely(pdone != NULL) ) 1.93 __put_user(done + i, pdone); 1.94 1.95 + if ( unlikely(shadow_mode(d)) ) 1.96 + check_pagetable(d, ed->arch.pagetable, "post-mmu"); /* debug */ 1.97 + 1.98 UNLOCK_BIGLOCK(d); 1.99 return rc; 1.100 } 1.101 @@ -1580,9 +1586,9 @@ int do_update_va_mapping(unsigned long p 1.102 mk_l1_pgentry(val))) ) 1.103 err = -EINVAL; 1.104 1.105 - if ( unlikely(d->arch.shadow_mode) ) 1.106 + if ( unlikely(shadow_mode(d)) ) 1.107 { 1.108 - unsigned long sval; 1.109 + unsigned long sval = 0; 1.110 1.111 l1pte_propagate_from_guest(d, &val, &sval); 1.112 1.113 @@ -1601,7 +1607,7 @@ int do_update_va_mapping(unsigned long p 1.114 * the PTE in the PT-holding page. We need the machine frame number 1.115 * for this. 1.116 */ 1.117 - if ( d->arch.shadow_mode == SHM_logdirty ) 1.118 + if ( shadow_mode(d) == SHM_logdirty ) 1.119 mark_dirty(d, va_to_l1mfn(page_nr << PAGE_SHIFT)); 1.120 1.121 check_pagetable(d, ed->arch.pagetable, "va"); /* debug */ 1.122 @@ -1874,7 +1880,7 @@ void ptwr_flush(const int which) 1.123 PTWR_PRINT_WHICH, ptep, pte); 1.124 pte &= ~_PAGE_RW; 1.125 1.126 - if ( unlikely(d->arch.shadow_mode) ) 1.127 + if ( unlikely(shadow_mode(d)) ) 1.128 { 1.129 /* Write-protect the p.t. page in the shadow page table. */ 1.130 l1pte_propagate_from_guest(d, &pte, &spte); 1.131 @@ -1966,7 +1972,7 @@ void ptwr_flush(const int which) 1.132 * STEP 3. Reattach the L1 p.t. page into the current address space. 1.133 */ 1.134 1.135 - if ( (which == PTWR_PT_ACTIVE) && likely(!d->arch.shadow_mode) ) 1.136 + if ( (which == PTWR_PT_ACTIVE) && likely(!shadow_mode(d)) ) 1.137 { 1.138 pl2e = &linear_l2_table[ptwr_info[cpu].ptinfo[which].l2_idx]; 1.139 *pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT); 1.140 @@ -2070,7 +2076,7 @@ int ptwr_do_page_fault(unsigned long add 1.141 1.142 /* For safety, disconnect the L1 p.t. page from current space. */ 1.143 if ( (which == PTWR_PT_ACTIVE) && 1.144 - likely(!current->domain->arch.shadow_mode) ) 1.145 + likely(!shadow_mode(current->domain)) ) 1.146 { 1.147 *pl2e = mk_l2_pgentry(l2e & ~_PAGE_PRESENT); 1.148 #if 1
2.1 --- a/xen/arch/x86/shadow.c Tue Feb 08 11:07:10 2005 +0000 2.2 +++ b/xen/arch/x86/shadow.c Tue Feb 08 12:27:23 2005 +0000 2.3 @@ -73,11 +73,11 @@ static void free_shadow_state(struct dom 2.4 2.5 /* Free the head page. */ 2.6 free_shadow_page( 2.7 - d, &frame_table[x->spfn_and_flags & PSH_pfn_mask]); 2.8 + d, &frame_table[x->smfn_and_flags & PSH_pfn_mask]); 2.9 2.10 /* Reinitialise the head node. */ 2.11 x->pfn = 0; 2.12 - x->spfn_and_flags = 0; 2.13 + x->smfn_and_flags = 0; 2.14 n = x->next; 2.15 x->next = NULL; 2.16 2.17 @@ -88,11 +88,11 @@ static void free_shadow_state(struct dom 2.18 { 2.19 /* Free the shadow page. */ 2.20 free_shadow_page( 2.21 - d, &frame_table[x->spfn_and_flags & PSH_pfn_mask]); 2.22 + d, &frame_table[x->smfn_and_flags & PSH_pfn_mask]); 2.23 2.24 /* Re-initialise the chain node. */ 2.25 x->pfn = 0; 2.26 - x->spfn_and_flags = 0; 2.27 + x->smfn_and_flags = 0; 2.28 2.29 /* Add to the free list. */ 2.30 n = x->next; 2.31 @@ -113,14 +113,14 @@ static inline int clear_shadow_page( 2.32 { 2.33 unsigned long *p; 2.34 int restart = 0; 2.35 - struct pfn_info *spage = &frame_table[x->spfn_and_flags & PSH_pfn_mask]; 2.36 + struct pfn_info *spage = &frame_table[x->smfn_and_flags & PSH_pfn_mask]; 2.37 2.38 switch ( spage->u.inuse.type_info & PGT_type_mask ) 2.39 { 2.40 /* We clear L2 pages by zeroing the guest entries. */ 2.41 case PGT_l2_page_table: 2.42 p = map_domain_mem((spage - frame_table) << PAGE_SHIFT); 2.43 - if (d->arch.shadow_mode == SHM_full_32) 2.44 + if ( shadow_mode(d) == SHM_full_32 ) 2.45 memset(p, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(*p)); 2.46 else 2.47 memset(p, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(*p)); 2.48 @@ -419,7 +419,7 @@ static inline struct pfn_info *alloc_sha 2.49 2.50 void unshadow_table(unsigned long gpfn, unsigned int type) 2.51 { 2.52 - unsigned long spfn; 2.53 + unsigned long smfn; 2.54 struct domain *d = page_get_owner(&frame_table[gpfn]); 2.55 2.56 SH_VLOG("unshadow_table type=%08x gpfn=%08lx", type, gpfn); 2.57 @@ -431,15 +431,15 @@ void unshadow_table(unsigned long gpfn, 2.58 * guests there won't be a race here as this CPU was the one that 2.59 * cmpxchg'ed the page to invalid. 2.60 */ 2.61 - spfn = __shadow_status(d, gpfn) & PSH_pfn_mask; 2.62 + smfn = __shadow_status(d, gpfn) & PSH_pfn_mask; 2.63 delete_shadow_status(d, gpfn); 2.64 - free_shadow_page(d, &frame_table[spfn]); 2.65 + free_shadow_page(d, &frame_table[smfn]); 2.66 } 2.67 2.68 #ifdef CONFIG_VMX 2.69 void vmx_shadow_clear_state(struct domain *d) 2.70 { 2.71 - SH_VVLOG("vmx_clear_shadow_state: \n"); 2.72 + SH_VVLOG("vmx_clear_shadow_state:"); 2.73 clear_shadow_state(d); 2.74 } 2.75 #endif 2.76 @@ -453,7 +453,7 @@ unsigned long shadow_l2_table( 2.77 l2_pgentry_t *spl2e = 0; 2.78 unsigned long guest_gpfn; 2.79 2.80 - __get_machine_to_phys(d, guest_gpfn, gpfn); 2.81 + guest_gpfn = __mfn_to_gpfn(d, gpfn); 2.82 2.83 SH_VVLOG("shadow_l2_table( %08lx )", gpfn); 2.84 2.85 @@ -471,9 +471,13 @@ unsigned long shadow_l2_table( 2.86 2.87 #ifdef __i386__ 2.88 /* Install hypervisor and 2x linear p.t. mapings. */ 2.89 - if ( d->arch.shadow_mode == SHM_full_32 ) 2.90 + if ( shadow_mode(d) == SHM_full_32 ) 2.91 { 2.92 +#ifdef CONFIG_VMX 2.93 vmx_update_shadow_state(d->exec_domain[0], gpfn, spfn); 2.94 +#else 2.95 + panic("Shadow Full 32 not yet implemented without VMX\n"); 2.96 +#endif 2.97 } 2.98 else 2.99 { 2.100 @@ -499,7 +503,7 @@ unsigned long shadow_l2_table( 2.101 } 2.102 #endif 2.103 2.104 - if ( d->arch.shadow_mode != SHM_full_32 ) 2.105 + if ( shadow_mode(d) != SHM_full_32 ) 2.106 unmap_domain_mem(spl2e); 2.107 2.108 SH_VLOG("shadow_l2_table( %08lx -> %08lx)", gpfn, spfn); 2.109 @@ -510,13 +514,13 @@ static void shadow_map_l1_into_current_l 2.110 { 2.111 struct exec_domain *ed = current; 2.112 struct domain *d = ed->domain; 2.113 - unsigned long *gpl1e, *spl1e, gpl2e, spl2e, gl1pfn, sl1pfn=0, sl1ss; 2.114 + unsigned long *gpl1e, *spl1e, gl2e, sl2e, gl1pfn, sl1pfn=0, sl1ss; 2.115 struct pfn_info *sl1pfn_info; 2.116 int i; 2.117 2.118 - __guest_get_pl2e(ed, va, &gpl2e); 2.119 + __guest_get_l2e(ed, va, &gl2e); 2.120 2.121 - gl1pfn = gpl2e >> PAGE_SHIFT; 2.122 + gl1pfn = gl2e >> PAGE_SHIFT; 2.123 2.124 sl1ss = __shadow_status(d, gl1pfn); 2.125 if ( !(sl1ss & PSH_shadowed) ) 2.126 @@ -534,10 +538,10 @@ static void shadow_map_l1_into_current_l 2.127 2.128 set_shadow_status(d, gl1pfn, PSH_shadowed | sl1pfn); 2.129 2.130 - l2pde_general(d, &gpl2e, &spl2e, sl1pfn); 2.131 + l2pde_general(d, &gl2e, &sl2e, sl1pfn); 2.132 2.133 - __guest_set_pl2e(ed, va, gpl2e); 2.134 - __shadow_set_pl2e(ed, va, spl2e); 2.135 + __guest_set_l2e(ed, va, gl2e); 2.136 + __shadow_set_l2e(ed, va, sl2e); 2.137 2.138 gpl1e = (unsigned long *) &(linear_pg_table[ 2.139 (va>>L1_PAGETABLE_SHIFT) & ~(ENTRIES_PER_L1_PAGETABLE-1)]); 2.140 @@ -554,9 +558,9 @@ static void shadow_map_l1_into_current_l 2.141 SH_VVLOG("4b: was shadowed, l2 missing ( %08lx )", sl1pfn); 2.142 2.143 sl1pfn = sl1ss & PSH_pfn_mask; 2.144 - l2pde_general(d, &gpl2e, &spl2e, sl1pfn); 2.145 - __guest_set_pl2e(ed, va, gpl2e); 2.146 - __shadow_set_pl2e(ed, va, spl2e); 2.147 + l2pde_general(d, &gl2e, &sl2e, sl1pfn); 2.148 + __guest_set_l2e(ed, va, gl2e); 2.149 + __shadow_set_l2e(ed, va, sl2e); 2.150 } 2.151 } 2.152 2.153 @@ -588,7 +592,7 @@ void vmx_shadow_invlpg(struct domain *d, 2.154 2.155 int shadow_fault(unsigned long va, long error_code) 2.156 { 2.157 - unsigned long gpte, spte; 2.158 + unsigned long gpte, spte = 0; 2.159 struct exec_domain *ed = current; 2.160 struct domain *d = ed->domain; 2.161 2.162 @@ -628,14 +632,14 @@ int shadow_fault(unsigned long va, long 2.163 if ( unlikely(__get_user(gpte, (unsigned long *) 2.164 &linear_pg_table[va >> PAGE_SHIFT])) ) 2.165 { 2.166 - SH_VVLOG("shadow_fault - EXIT: read gpte faulted" ); 2.167 + SH_VVLOG("shadow_fault - EXIT: read gpte faulted2" ); 2.168 shadow_unlock(d); 2.169 return 0; 2.170 } 2.171 2.172 if ( unlikely(!(gpte & _PAGE_PRESENT)) ) 2.173 { 2.174 - SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte ); 2.175 + SH_VVLOG("shadow_fault - EXIT: gpte not present2 (%lx)",gpte ); 2.176 shadow_unlock(d); 2.177 return 0; 2.178 } 2.179 @@ -691,20 +695,20 @@ int shadow_fault(unsigned long va, long 2.180 2.181 void shadow_l1_normal_pt_update( 2.182 unsigned long pa, unsigned long gpte, 2.183 - unsigned long *prev_spfn_ptr, 2.184 + unsigned long *prev_smfn_ptr, 2.185 l1_pgentry_t **prev_spl1e_ptr) 2.186 { 2.187 - unsigned long spfn, spte, prev_spfn = *prev_spfn_ptr; 2.188 + unsigned long smfn, spte, prev_smfn = *prev_smfn_ptr; 2.189 l1_pgentry_t *spl1e, *prev_spl1e = *prev_spl1e_ptr; 2.190 2.191 /* N.B. To get here, we know the l1 page *must* be shadowed. */ 2.192 SH_VVLOG("shadow_l1_normal_pt_update pa=%08lx, gpte=%08lx, " 2.193 - "prev_spfn=%08lx, prev_spl1e=%p\n", 2.194 - pa, gpte, prev_spfn, prev_spl1e); 2.195 + "prev_smfn=%08lx, prev_spl1e=%p", 2.196 + pa, gpte, prev_smfn, prev_spl1e); 2.197 2.198 - spfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask; 2.199 + smfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask; 2.200 2.201 - if ( spfn == prev_spfn ) 2.202 + if ( smfn == prev_smfn ) 2.203 { 2.204 spl1e = prev_spl1e; 2.205 } 2.206 @@ -712,8 +716,8 @@ void shadow_l1_normal_pt_update( 2.207 { 2.208 if ( prev_spl1e != NULL ) 2.209 unmap_domain_mem( prev_spl1e ); 2.210 - spl1e = (l1_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT); 2.211 - *prev_spfn_ptr = spfn; 2.212 + spl1e = (l1_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT); 2.213 + *prev_smfn_ptr = smfn; 2.214 *prev_spl1e_ptr = spl1e; 2.215 } 2.216 2.217 @@ -721,24 +725,24 @@ void shadow_l1_normal_pt_update( 2.218 spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)] = mk_l1_pgentry(spte); 2.219 } 2.220 2.221 -void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpte) 2.222 +void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpde) 2.223 { 2.224 - unsigned long spfn, spte; 2.225 + unsigned long sl2mfn, spde; 2.226 l2_pgentry_t *spl2e; 2.227 - unsigned long s_sh; 2.228 + unsigned long sl1mfn; 2.229 2.230 /* N.B. To get here, we know the l2 page *must* be shadowed. */ 2.231 - SH_VVLOG("shadow_l2_normal_pt_update pa=%08lx, gpte=%08lx",pa,gpte); 2.232 + SH_VVLOG("shadow_l2_normal_pt_update pa=%08lx, gpde=%08lx",pa,gpde); 2.233 2.234 - spfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask; 2.235 + sl2mfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask; 2.236 2.237 - s_sh = (gpte & _PAGE_PRESENT) ? 2.238 - __shadow_status(current->domain, gpte >> PAGE_SHIFT) : 0; 2.239 + sl1mfn = (gpde & _PAGE_PRESENT) ? 2.240 + __shadow_status(current->domain, gpde >> PAGE_SHIFT) : 0; 2.241 2.242 /* XXXX Should mark guest pte as DIRTY and ACCESSED too! */ 2.243 - l2pde_general(current->domain, &gpte, &spte, s_sh); 2.244 - spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT); 2.245 - spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)] = mk_l2_pgentry(spte); 2.246 + l2pde_general(current->domain, &gpde, &spde, sl1mfn); 2.247 + spl2e = (l2_pgentry_t *)map_domain_mem(sl2mfn << PAGE_SHIFT); 2.248 + spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)] = mk_l2_pgentry(spde); 2.249 unmap_domain_mem(spl2e); 2.250 } 2.251 2.252 @@ -751,23 +755,36 @@ void shadow_l2_normal_pt_update(unsigned 2.253 2.254 #if SHADOW_DEBUG 2.255 2.256 +// BUG: these are not SMP safe... 2.257 static int sh_l2_present; 2.258 static int sh_l1_present; 2.259 +static int errors; 2.260 char * sh_check_name; 2.261 2.262 -#define FAIL(_f, _a...) \ 2.263 - do { \ 2.264 - printk("XXX %s-FAIL (%d,%d)" _f " g=%08lx s=%08lx\n", \ 2.265 - sh_check_name, level, i, ## _a , gpte, spte); \ 2.266 - BUG(); \ 2.267 +#define virt_to_phys2(adr) ({ \ 2.268 + unsigned long _a = (unsigned long)(adr); \ 2.269 + unsigned long _pte = l1_pgentry_val( \ 2.270 + shadow_linear_pg_table[_a >> PAGE_SHIFT]); \ 2.271 + unsigned long _pa = _pte & PAGE_MASK; \ 2.272 + _pa | (_a & ~PAGE_MASK); \ 2.273 +}) 2.274 + 2.275 +#define FAIL(_f, _a...) \ 2.276 + do { \ 2.277 + printk("XXX %s-FAIL (%d,%d)" _f " g=%08lx s=%08lx &g=%08lx &s=%08lx" \ 2.278 + " pa(&g)=%08lx pa(&s)=%08lx\n", \ 2.279 + sh_check_name, level, i, ## _a , gpte, spte, pgpte, pspte, \ 2.280 + virt_to_phys2(pgpte), virt_to_phys2(pspte)); \ 2.281 + errors++; \ 2.282 } while ( 0 ) 2.283 2.284 static int check_pte( 2.285 - struct domain *d, unsigned long gpte, unsigned long spte, 2.286 + struct domain *d, unsigned long *pgpte, unsigned long *pspte, 2.287 int level, int i) 2.288 { 2.289 - unsigned long mask, gpfn, spfn; 2.290 - unsigned long guest_gpfn; 2.291 + unsigned gpte = *pgpte; 2.292 + unsigned spte = *pspte; 2.293 + unsigned long mask, gpfn, smfn; 2.294 2.295 if ( (spte == 0) || (spte == 0xdeadface) || (spte == 0x00000E00) ) 2.296 return 1; /* always safe */ 2.297 @@ -781,7 +798,7 @@ static int check_pte( 2.298 if ( !(gpte & _PAGE_PRESENT) ) 2.299 FAIL("Guest not present yet shadow is"); 2.300 2.301 - mask = ~(_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW|0xFFFFF000); 2.302 + mask = ~(_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW|PAGE_MASK); 2.303 2.304 if ( (spte & mask) != (gpte & mask) ) 2.305 FAIL("Corrupt?"); 2.306 @@ -798,10 +815,10 @@ static int check_pte( 2.307 if ( (spte & _PAGE_RW ) && !((gpte & _PAGE_RW) && (gpte & _PAGE_DIRTY)) ) 2.308 FAIL("RW2 coherence"); 2.309 2.310 - spfn = spte >> PAGE_SHIFT; 2.311 + smfn = spte >> PAGE_SHIFT; 2.312 gpfn = gpte >> PAGE_SHIFT; 2.313 2.314 - if ( gpfn == spfn ) 2.315 + if ( gpfn == smfn ) 2.316 { 2.317 if ( level > 1 ) 2.318 FAIL("Linear map ???"); /* XXX this will fail on BSD */ 2.319 @@ -811,20 +828,9 @@ static int check_pte( 2.320 if ( level < 2 ) 2.321 FAIL("Shadow in L1 entry?"); 2.322 2.323 - if (d->arch.shadow_mode == SHM_full_32) { 2.324 - 2.325 - guest_gpfn = phys_to_machine_mapping(gpfn); 2.326 - 2.327 - if ( __shadow_status(d, guest_gpfn) != (PSH_shadowed | spfn) ) 2.328 - FAIL("spfn problem g.sf=%08lx", 2.329 - __shadow_status(d, guest_gpfn) ); 2.330 - 2.331 - } else { 2.332 - if ( __shadow_status(d, gpfn) != (PSH_shadowed | spfn) ) 2.333 - FAIL("spfn problem g.sf=%08lx", 2.334 - __shadow_status(d, gpfn) ); 2.335 - } 2.336 - 2.337 + if ( __shadow_status(d, gpfn) != (PSH_shadowed | smfn) ) 2.338 + FAIL("smfn problem g.sf=%08lx", 2.339 + __shadow_status(d, gpfn) ); 2.340 } 2.341 2.342 return 1; 2.343 @@ -832,17 +838,17 @@ static int check_pte( 2.344 2.345 2.346 static int check_l1_table( 2.347 - struct domain *d, unsigned long va, 2.348 - unsigned long g2, unsigned long s2) 2.349 + struct domain *d, 2.350 + unsigned long g2mfn, unsigned long s2mfn) 2.351 { 2.352 int i; 2.353 unsigned long *gpl1e, *spl1e; 2.354 2.355 - gpl1e = map_domain_mem(g2 << PAGE_SHIFT); 2.356 - spl1e = map_domain_mem(s2 << PAGE_SHIFT); 2.357 + gpl1e = map_domain_mem(g2mfn << PAGE_SHIFT); 2.358 + spl1e = map_domain_mem(s2mfn << PAGE_SHIFT); 2.359 2.360 for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ ) 2.361 - check_pte(d, gpl1e[i], spl1e[i], 1, i); 2.362 + check_pte(d, &gpl1e[i], &spl1e[i], 1, i); 2.363 2.364 unmap_domain_mem(spl1e); 2.365 unmap_domain_mem(gpl1e); 2.366 @@ -853,49 +859,46 @@ static int check_l1_table( 2.367 #define FAILPT(_f, _a...) \ 2.368 do { \ 2.369 printk("XXX FAIL %s-PT" _f "\n", s, ## _a ); \ 2.370 - BUG(); \ 2.371 + errors++; \ 2.372 } while ( 0 ) 2.373 2.374 -int check_pagetable(struct domain *d, pagetable_t pt, char *s) 2.375 +void check_pagetable(struct domain *d, pagetable_t pt, char *s) 2.376 { 2.377 unsigned long gptbase = pagetable_val(pt); 2.378 - unsigned long gpfn, spfn; 2.379 + unsigned long ptbase_pfn, smfn, ss; 2.380 unsigned long i; 2.381 l2_pgentry_t *gpl2e, *spl2e; 2.382 - unsigned long host_gpfn = 0; 2.383 + unsigned long ptbase_mfn = 0; 2.384 + int cpu = current->processor; 2.385 2.386 + errors = 0; 2.387 sh_check_name = s; 2.388 2.389 SH_VVLOG("%s-PT Audit", s); 2.390 2.391 sh_l2_present = sh_l1_present = 0; 2.392 2.393 - gpfn = gptbase >> PAGE_SHIFT; 2.394 + ptbase_pfn = gptbase >> PAGE_SHIFT; 2.395 + ptbase_mfn = __gpfn_to_mfn(d, ptbase_pfn); 2.396 2.397 - __get_phys_to_machine(d, host_gpfn, gpfn); 2.398 + ss = __shadow_status(d, ptbase_pfn); 2.399 2.400 - if ( ! (__shadow_status(d, gpfn) & PSH_shadowed) ) 2.401 + if ( ! (ss & PSH_shadowed) ) 2.402 { 2.403 printk("%s-PT %08lx not shadowed\n", s, gptbase); 2.404 2.405 - if( __shadow_status(d, gpfn) != 0 ) BUG(); 2.406 - return 0; 2.407 + if ( ss != 0 ) 2.408 + BUG(); 2.409 + return; 2.410 } 2.411 2.412 - spfn = __shadow_status(d, gpfn) & PSH_pfn_mask; 2.413 - 2.414 - if ( ! __shadow_status(d, gpfn) == (PSH_shadowed | spfn) ) 2.415 - FAILPT("ptbase shadow inconsistent1"); 2.416 + smfn = ss & PSH_pfn_mask; 2.417 2.418 - if (d->arch.shadow_mode == SHM_full_32) 2.419 - { 2.420 - host_gpfn = phys_to_machine_mapping(gpfn); 2.421 - gpl2e = (l2_pgentry_t *) map_domain_mem( host_gpfn << PAGE_SHIFT ); 2.422 + if ( ss != (PSH_shadowed | smfn) ) 2.423 + FAILPT("ptbase shadow inconsistent1"); 2.424 2.425 - } else 2.426 - gpl2e = (l2_pgentry_t *) map_domain_mem( gpfn << PAGE_SHIFT ); 2.427 - 2.428 - spl2e = (l2_pgentry_t *) map_domain_mem( spfn << PAGE_SHIFT ); 2.429 + gpl2e = (l2_pgentry_t *) map_domain_mem( ptbase_mfn << PAGE_SHIFT ); 2.430 + spl2e = (l2_pgentry_t *) map_domain_mem( smfn << PAGE_SHIFT ); 2.431 2.432 if ( memcmp(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 2.433 &gpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 2.434 @@ -916,40 +919,60 @@ int check_pagetable(struct domain *d, pa 2.435 2.436 if ( (l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >> 2.437 L2_PAGETABLE_SHIFT]) != 2.438 - ((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR)) ) 2.439 + ((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR)) ) 2.440 FAILPT("hypervisor shadow linear map inconsistent %08lx %08lx", 2.441 l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >> 2.442 L2_PAGETABLE_SHIFT]), 2.443 - (spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); 2.444 + (smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); 2.445 2.446 - if (d->arch.shadow_mode != SHM_full_32) { 2.447 + if ( shadow_mode(d) != SHM_full_32 ) { 2.448 + // BUG: this shouldn't be using exec_domain[0] here... 2.449 if ( (l2_pgentry_val(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]) != 2.450 - ((__pa(page_get_owner(&frame_table[gpfn])->arch.mm_perdomain_pt) | 2.451 + ((__pa(page_get_owner(&frame_table[ptbase_pfn])->arch.mm_perdomain_pt) | 2.452 __PAGE_HYPERVISOR))) ) 2.453 FAILPT("hypervisor per-domain map inconsistent"); 2.454 } 2.455 2.456 /* Check the whole L2. */ 2.457 for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ ) 2.458 - check_pte(d, l2_pgentry_val(gpl2e[i]), l2_pgentry_val(spl2e[i]), 2, i); 2.459 + check_pte(d, &l2_pgentry_val(gpl2e[i]), &l2_pgentry_val(spl2e[i]), 2, i); 2.460 2.461 /* Go back and recurse. */ 2.462 for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ ) 2.463 { 2.464 + unsigned long gl1pfn = l2_pgentry_val(gpl2e[i]) >> PAGE_SHIFT; 2.465 + unsigned long gl1mfn = __gpfn_to_mfn(d, gl1pfn); 2.466 + unsigned long sl1mfn = l2_pgentry_val(spl2e[i]) >> PAGE_SHIFT; 2.467 + 2.468 if ( l2_pgentry_val(spl2e[i]) != 0 ) 2.469 - check_l1_table( 2.470 - d, i << L2_PAGETABLE_SHIFT, 2.471 - l2_pgentry_val(gpl2e[i]) >> PAGE_SHIFT, 2.472 - l2_pgentry_val(spl2e[i]) >> PAGE_SHIFT); 2.473 + { 2.474 + // First check to see if this guest page is currently the active 2.475 + // PTWR page. If so, then we compare the (old) cached copy of the 2.476 + // guest page to the shadow, and not the currently writable (and 2.477 + // thus potentially out-of-sync) guest page. 2.478 + // 2.479 + if ( ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va && 2.480 + (i == ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l2_idx) && 2.481 + likely(VM_ASSIST(d, VMASST_TYPE_writable_pagetables)) ) 2.482 + { 2.483 + gl1mfn = (__pa(ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].page) >> 2.484 + PAGE_SHIFT); 2.485 + } 2.486 + 2.487 + check_l1_table(d, gl1mfn, sl1mfn); 2.488 + } 2.489 } 2.490 2.491 unmap_domain_mem(spl2e); 2.492 unmap_domain_mem(gpl2e); 2.493 2.494 - SH_VVLOG("PT verified : l2_present = %d, l1_present = %d\n", 2.495 + SH_VVLOG("PT verified : l2_present = %d, l1_present = %d", 2.496 sh_l2_present, sh_l1_present); 2.497 2.498 - return 1; 2.499 + if ( errors ) 2.500 + BUG(); 2.501 + 2.502 + return; 2.503 } 2.504 2.505 -#endif 2.506 +#endif // SHADOW_DEBUG
3.1 --- a/xen/arch/x86/traps.c Tue Feb 08 11:07:10 2005 +0000 3.2 +++ b/xen/arch/x86/traps.c Tue Feb 08 12:27:23 2005 +0000 3.3 @@ -272,6 +272,12 @@ asmlinkage int do_page_fault(struct xen_ 3.4 3.5 perfc_incrc(page_faults); 3.6 3.7 +#if 0 3.8 + printk("do_page_fault(addr=0x%08lx, error_code=%d)\n", 3.9 + addr, regs->error_code); 3.10 + show_registers(regs); 3.11 +#endif 3.12 + 3.13 if ( likely(VM_ASSIST(d, VMASST_TYPE_writable_pagetables)) ) 3.14 { 3.15 LOCK_BIGLOCK(d);
4.1 --- a/xen/arch/x86/vmx.c Tue Feb 08 11:07:10 2005 +0000 4.2 +++ b/xen/arch/x86/vmx.c Tue Feb 08 12:27:23 2005 +0000 4.3 @@ -36,6 +36,8 @@ 4.4 #include <asm/vmx_vmcs.h> 4.5 #include <public/io/ioreq.h> 4.6 4.7 +#ifdef CONFIG_VMX 4.8 + 4.9 int vmcs_size; 4.10 unsigned int opt_vmx_debug_level; 4.11 4.12 @@ -123,7 +125,7 @@ static int vmx_do_page_fault(unsigned lo 4.13 /* 4.14 * Set up guest page directory cache to make linear_pt_table[] work. 4.15 */ 4.16 - __guest_get_pl2e(ed, va, &gpde); 4.17 + __guest_get_l2e(ed, va, &gpde); 4.18 if (!(gpde & _PAGE_PRESENT)) 4.19 return 0; 4.20 4.21 @@ -301,7 +303,7 @@ inline unsigned long gva_to_gpa(unsigned 4.22 unsigned long gpde, gpte, pfn, index; 4.23 struct exec_domain *ed = current; 4.24 4.25 - __guest_get_pl2e(ed, gva, &gpde); 4.26 + __guest_get_l2e(ed, gva, &gpde); 4.27 index = (gva >> L2_PAGETABLE_SHIFT); 4.28 4.29 pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT); 4.30 @@ -939,3 +941,5 @@ asmlinkage void load_cr2(void) 4.31 local_irq_disable(); 4.32 asm volatile("movl %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2)); 4.33 } 4.34 + 4.35 +#endif /* CONFIG_VMX */
5.1 --- a/xen/arch/x86/vmx_io.c Tue Feb 08 11:07:10 2005 +0000 5.2 +++ b/xen/arch/x86/vmx_io.c Tue Feb 08 12:27:23 2005 +0000 5.3 @@ -32,6 +32,8 @@ 5.4 #include <public/io/ioreq.h> 5.5 #include <asm/vmx_platform.h> 5.6 5.7 +#ifdef CONFIG_VMX 5.8 + 5.9 extern long do_block(); 5.10 5.11 #if defined (__i386__) 5.12 @@ -386,3 +388,5 @@ void vmx_do_resume(struct exec_domain *d 5.13 if (!test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)) 5.14 vmx_intr_assist(d); 5.15 } 5.16 + 5.17 +#endif /* CONFIG_VMX */
6.1 --- a/xen/arch/x86/vmx_platform.c Tue Feb 08 11:07:10 2005 +0000 6.2 +++ b/xen/arch/x86/vmx_platform.c Tue Feb 08 12:27:23 2005 +0000 6.3 @@ -34,6 +34,8 @@ 6.4 #include <xen/sched.h> 6.5 #include <asm/current.h> 6.6 6.7 +#ifdef CONFIG_VMX 6.8 + 6.9 #define DECODE_success 1 6.10 #define DECODE_failure 0 6.11 6.12 @@ -553,3 +555,4 @@ void handle_mmio(unsigned long va, unsig 6.13 domain_crash(); 6.14 } 6.15 6.16 +#endif /* CONFIG_VMX */
7.1 --- a/xen/arch/x86/vmx_vmcs.c Tue Feb 08 11:07:10 2005 +0000 7.2 +++ b/xen/arch/x86/vmx_vmcs.c Tue Feb 08 12:27:23 2005 +0000 7.3 @@ -33,6 +33,8 @@ 7.4 #include <public/io/ioreq.h> 7.5 #include <asm/domain_page.h> 7.6 7.7 +#ifdef CONFIG_VMX 7.8 + 7.9 struct vmcs_struct *alloc_vmcs(void) 7.10 { 7.11 struct vmcs_struct *vmcs; 7.12 @@ -469,3 +471,4 @@ void vm_resume_fail(unsigned long eflags 7.13 BUG(); 7.14 } 7.15 7.16 +#endif /* CONFIG_VMX */
8.1 --- a/xen/arch/x86/x86_32/domain_build.c Tue Feb 08 11:07:10 2005 +0000 8.2 +++ b/xen/arch/x86/x86_32/domain_build.c Tue Feb 08 12:27:23 2005 +0000 8.3 @@ -20,6 +20,7 @@ 8.4 #include <xen/event.h> 8.5 #include <xen/elf.h> 8.6 #include <xen/kernel.h> 8.7 +#include <asm/shadow.h> 8.8 8.9 /* No ring-3 access in initial page tables. */ 8.10 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED) 8.11 @@ -377,10 +378,13 @@ int construct_dom0(struct domain *d, 8.12 8.13 new_thread(ed, dsi.v_kernentry, vstack_end, vstartinfo_start); 8.14 8.15 -#if 0 /* XXXXX DO NOT CHECK IN ENABLED !!! (but useful for testing so leave) */ 8.16 - shadow_lock(&d->mm); 8.17 - shadow_mode_enable(d, SHM_test); 8.18 - shadow_unlock(&d->mm); 8.19 +#ifndef NDEBUG 8.20 + if (0) /* XXXXX DO NOT CHECK IN ENABLED !!! (but useful for testing so leave) */ 8.21 + { 8.22 + shadow_lock(d); 8.23 + shadow_mode_enable(d, SHM_test); 8.24 + shadow_unlock(d); 8.25 + } 8.26 #endif 8.27 8.28 return 0;
9.1 --- a/xen/arch/x86/x86_32/traps.c Tue Feb 08 11:07:10 2005 +0000 9.2 +++ b/xen/arch/x86/x86_32/traps.c Tue Feb 08 12:27:23 2005 +0000 9.3 @@ -7,6 +7,7 @@ 9.4 #include <xen/console.h> 9.5 #include <xen/mm.h> 9.6 #include <xen/irq.h> 9.7 +#include <asm/flushtlb.h> 9.8 9.9 static int kstack_depth_to_print = 8*20; 9.10 9.11 @@ -114,6 +115,7 @@ void show_registers(struct xen_regs *reg 9.12 regs->esi, regs->edi, regs->ebp, esp); 9.13 printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n", 9.14 ds, es, fs, gs, ss); 9.15 + printk("cr3: %08lx\n", read_cr3()); 9.16 9.17 show_stack((unsigned long *)®s->esp); 9.18 }
10.1 --- a/xen/include/asm-x86/shadow.h Tue Feb 08 11:07:10 2005 +0000 10.2 +++ b/xen/include/asm-x86/shadow.h Tue Feb 08 12:27:23 2005 +0000 10.3 @@ -8,6 +8,10 @@ 10.4 #include <xen/perfc.h> 10.5 #include <asm/processor.h> 10.6 10.7 +#ifdef CONFIG_VMX 10.8 +#include <asm/domain_page.h> 10.9 +#endif 10.10 + 10.11 /* Shadow PT flag bits in pfn_info */ 10.12 #define PSH_shadowed (1<<31) /* page has a shadow. PFN points to shadow */ 10.13 #define PSH_pfn_mask ((1<<21)-1) 10.14 @@ -34,7 +38,7 @@ extern int shadow_fault(unsigned long va 10.15 extern void shadow_l1_normal_pt_update( 10.16 unsigned long pa, unsigned long gpte, 10.17 unsigned long *prev_spfn_ptr, l1_pgentry_t **prev_spl1e_ptr); 10.18 -extern void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpte); 10.19 +extern void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpde); 10.20 extern void unshadow_table(unsigned long gpfn, unsigned int type); 10.21 extern int shadow_mode_enable(struct domain *p, unsigned int mode); 10.22 10.23 @@ -43,17 +47,15 @@ extern void vmx_shadow_clear_state(struc 10.24 extern void vmx_shadow_invlpg(struct domain *, unsigned long); 10.25 #endif 10.26 10.27 -#define __get_machine_to_phys(_d, guest_gpfn, gpfn) \ 10.28 - if ((_d)->arch.shadow_mode == SHM_full_32) \ 10.29 - (guest_gpfn) = machine_to_phys_mapping[(gpfn)]; \ 10.30 - else \ 10.31 - (guest_gpfn) = (gpfn); 10.32 +#define __mfn_to_gpfn(_d, mfn) \ 10.33 + ( (shadow_mode(_d) == SHM_full_32) \ 10.34 + ? machine_to_phys_mapping[(mfn)] \ 10.35 + : (mfn) ) 10.36 10.37 -#define __get_phys_to_machine(_d, host_gpfn, gpfn) \ 10.38 - if ((_d)->arch.shadow_mode == SHM_full_32) \ 10.39 - (host_gpfn) = phys_to_machine_mapping(gpfn); \ 10.40 - else \ 10.41 - (host_gpfn) = (gpfn); 10.42 +#define __gpfn_to_mfn(_d, gpfn) \ 10.43 + ( (shadow_mode(_d) == SHM_full_32) \ 10.44 + ? phys_to_machine_mapping(gpfn) \ 10.45 + : (gpfn) ) 10.46 10.47 extern void __shadow_mode_disable(struct domain *d); 10.48 static inline void shadow_mode_disable(struct domain *d) 10.49 @@ -66,17 +68,18 @@ extern unsigned long shadow_l2_table( 10.50 struct domain *d, unsigned long gpfn); 10.51 10.52 static inline void shadow_invalidate(struct exec_domain *ed) { 10.53 - if ( ed->domain->arch.shadow_mode != SHM_full_32 ) 10.54 + if ( shadow_mode(ed->domain) != SHM_full_32 ) 10.55 BUG(); 10.56 memset(ed->arch.shadow_vtable, 0, PAGE_SIZE); 10.57 } 10.58 10.59 #define SHADOW_DEBUG 1 10.60 +#define SHADOW_VERBOSE_DEBUG 0 10.61 #define SHADOW_HASH_DEBUG 1 10.62 10.63 struct shadow_status { 10.64 unsigned long pfn; /* Guest pfn. */ 10.65 - unsigned long spfn_and_flags; /* Shadow pfn plus flags. */ 10.66 + unsigned long smfn_and_flags; /* Shadow mfn plus flags. */ 10.67 struct shadow_status *next; /* Pull-to-front list. */ 10.68 }; 10.69 10.70 @@ -84,58 +87,68 @@ struct shadow_status { 10.71 #define shadow_ht_buckets 256 10.72 10.73 #ifdef VERBOSE 10.74 -#define SH_LOG(_f, _a...) \ 10.75 -printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \ 10.76 - current->domain->id , __LINE__ , ## _a ) 10.77 +#define SH_LOG(_f, _a...) \ 10.78 +printk("DOM%uP%u: (file=shadow.c, line=%d) " _f "\n", \ 10.79 + current->domain->id , current->processor, __LINE__ , ## _a ) 10.80 #else 10.81 #define SH_LOG(_f, _a...) 10.82 #endif 10.83 10.84 #if SHADOW_DEBUG 10.85 -#define SH_VLOG(_f, _a...) \ 10.86 - printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \ 10.87 - current->domain->id , __LINE__ , ## _a ) 10.88 +#define SH_VLOG(_f, _a...) \ 10.89 + printk("DOM%uP%u: (file=shadow.c, line=%d) " _f "\n", \ 10.90 + current->domain->id, current->processor, __LINE__ , ## _a ) 10.91 #else 10.92 #define SH_VLOG(_f, _a...) 10.93 #endif 10.94 10.95 -#if 0 10.96 -#define SH_VVLOG(_f, _a...) \ 10.97 - printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \ 10.98 - current->domain->id , __LINE__ , ## _a ) 10.99 +#if SHADOW_VERBOSE_DEBUG 10.100 +#define SH_VVLOG(_f, _a...) \ 10.101 + printk("DOM%uP%u: (file=shadow.c, line=%d) " _f "\n", \ 10.102 + current->domain->id, current->processor, __LINE__ , ## _a ) 10.103 #else 10.104 #define SH_VVLOG(_f, _a...) 10.105 #endif 10.106 10.107 -static inline void __shadow_get_pl2e( 10.108 +// BUG: mafetter: this assumes ed == current, so why pass ed? 10.109 +static inline void __shadow_get_l2e( 10.110 struct exec_domain *ed, unsigned long va, unsigned long *sl2e) 10.111 { 10.112 - *sl2e = (ed->domain->arch.shadow_mode == SHM_full_32) ? 10.113 - l2_pgentry_val(ed->arch.shadow_vtable[l2_table_offset(va)]) : 10.114 - l2_pgentry_val(linear_l2_table[l2_table_offset(va)]); 10.115 + if ( shadow_mode(ed->domain) == SHM_full_32 ) { 10.116 + *sl2e = l2_pgentry_val(ed->arch.shadow_vtable[l2_table_offset(va)]); 10.117 + } 10.118 + else if ( shadow_mode(ed->domain) ) { 10.119 + *sl2e = l2_pgentry_val(shadow_linear_l2_table[l2_table_offset(va)]); 10.120 + } 10.121 + else 10.122 + *sl2e = l2_pgentry_val(linear_l2_table[l2_table_offset(va)]); 10.123 } 10.124 10.125 -static inline void __shadow_set_pl2e( 10.126 +static inline void __shadow_set_l2e( 10.127 struct exec_domain *ed, unsigned long va, unsigned long value) 10.128 { 10.129 - if ( ed->domain->arch.shadow_mode == SHM_full_32 ) 10.130 + if ( shadow_mode(ed->domain) == SHM_full_32 ) { 10.131 ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value); 10.132 + } 10.133 + else if ( shadow_mode(ed->domain) ) { 10.134 + shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value); 10.135 + } 10.136 else 10.137 linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value); 10.138 } 10.139 10.140 -static inline void __guest_get_pl2e( 10.141 +static inline void __guest_get_l2e( 10.142 struct exec_domain *ed, unsigned long va, unsigned long *l2e) 10.143 { 10.144 - *l2e = (ed->domain->arch.shadow_mode == SHM_full_32) ? 10.145 + *l2e = ( shadow_mode(ed->domain) == SHM_full_32) ? 10.146 l2_pgentry_val(ed->arch.vpagetable[l2_table_offset(va)]) : 10.147 l2_pgentry_val(linear_l2_table[l2_table_offset(va)]); 10.148 } 10.149 10.150 -static inline void __guest_set_pl2e( 10.151 +static inline void __guest_set_l2e( 10.152 struct exec_domain *ed, unsigned long va, unsigned long value) 10.153 { 10.154 - if ( ed->domain->arch.shadow_mode == SHM_full_32 ) 10.155 + if ( shadow_mode(ed->domain) == SHM_full_32 ) 10.156 { 10.157 unsigned long pfn; 10.158 10.159 @@ -213,32 +226,18 @@ static inline void l1pte_write_fault( 10.160 { 10.161 unsigned long gpte = *gpte_p; 10.162 unsigned long spte = *spte_p; 10.163 + unsigned long pfn = gpte >> PAGE_SHIFT; 10.164 + unsigned long mfn = __gpfn_to_mfn(d, pfn); 10.165 10.166 ASSERT(gpte & _PAGE_RW); 10.167 gpte |= _PAGE_DIRTY | _PAGE_ACCESSED; 10.168 10.169 - switch ( d->arch.shadow_mode ) 10.170 - { 10.171 - case SHM_test: 10.172 - spte = gpte | _PAGE_RW; 10.173 - break; 10.174 - 10.175 - case SHM_logdirty: 10.176 - spte = gpte | _PAGE_RW; 10.177 - __mark_dirty(d, gpte >> PAGE_SHIFT); 10.178 + if ( shadow_mode(d) == SHM_logdirty ) 10.179 + __mark_dirty(d, pfn); 10.180 10.181 - case SHM_full_32: 10.182 - { 10.183 - unsigned long host_pfn, host_gpte; 10.184 - 10.185 - host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT); 10.186 - host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK); 10.187 - spte = host_gpte | _PAGE_RW; 10.188 - } 10.189 - break; 10.190 - } 10.191 + spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK); 10.192 10.193 - SH_VVLOG("updating spte=%lx gpte=%lx", spte, gpte); 10.194 + SH_VVLOG("l1pte_write_fault: updating spte=0x%08lx gpte=0x%08lx", spte, gpte); 10.195 *gpte_p = gpte; 10.196 *spte_p = spte; 10.197 } 10.198 @@ -248,31 +247,16 @@ static inline void l1pte_read_fault( 10.199 { 10.200 unsigned long gpte = *gpte_p; 10.201 unsigned long spte = *spte_p; 10.202 + unsigned long pfn = gpte >> PAGE_SHIFT; 10.203 + unsigned long mfn = __gpfn_to_mfn(d, pfn); 10.204 10.205 gpte |= _PAGE_ACCESSED; 10.206 - 10.207 - switch ( d->arch.shadow_mode ) 10.208 - { 10.209 - case SHM_test: 10.210 - spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW); 10.211 - break; 10.212 - 10.213 - case SHM_logdirty: 10.214 - spte = gpte & ~_PAGE_RW; 10.215 - break; 10.216 + spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK); 10.217 10.218 - case SHM_full_32: 10.219 - { 10.220 - unsigned long host_pfn, host_gpte; 10.221 - 10.222 - host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT); 10.223 - host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK); 10.224 - spte = (host_gpte & _PAGE_DIRTY) ? host_gpte : (host_gpte & ~_PAGE_RW); 10.225 - } 10.226 - break; 10.227 + if ( (shadow_mode(d) == SHM_logdirty) || ! (gpte & _PAGE_DIRTY) ) 10.228 + spte &= ~_PAGE_RW; 10.229 10.230 - } 10.231 - 10.232 + SH_VVLOG("l1pte_read_fault: updating spte=0x%08lx gpte=0x%08lx", spte, gpte); 10.233 *gpte_p = gpte; 10.234 *spte_p = spte; 10.235 } 10.236 @@ -283,8 +267,11 @@ static inline void l1pte_propagate_from_ 10.237 unsigned long gpte = *gpte_p; 10.238 unsigned long spte = *spte_p; 10.239 unsigned long host_pfn, host_gpte; 10.240 +#if SHADOW_VERBOSE_DEBUG 10.241 + unsigned long old_spte = spte; 10.242 +#endif 10.243 10.244 - switch ( d->arch.shadow_mode ) 10.245 + switch ( shadow_mode(d) ) 10.246 { 10.247 case SHM_test: 10.248 spte = 0; 10.249 @@ -320,6 +307,11 @@ static inline void l1pte_propagate_from_ 10.250 break; 10.251 } 10.252 10.253 +#if SHADOW_VERBOSE_DEBUG 10.254 + if ( old_spte || spte || gpte ) 10.255 + SH_VVLOG("l1pte_propagate_from_guest: gpte=0x%08lx, old spte=0x%08lx, new spte=0x%08lx ", gpte, old_spte, spte); 10.256 +#endif 10.257 + 10.258 *gpte_p = gpte; 10.259 *spte_p = spte; 10.260 } 10.261 @@ -328,24 +320,24 @@ static inline void l2pde_general( 10.262 struct domain *d, 10.263 unsigned long *gpde_p, 10.264 unsigned long *spde_p, 10.265 - unsigned long sl1pfn) 10.266 + unsigned long sl1mfn) 10.267 { 10.268 unsigned long gpde = *gpde_p; 10.269 unsigned long spde = *spde_p; 10.270 10.271 spde = 0; 10.272 10.273 - if ( sl1pfn != 0 ) 10.274 + if ( sl1mfn != 0 ) 10.275 { 10.276 - spde = (gpde & ~PAGE_MASK) | (sl1pfn << PAGE_SHIFT) | 10.277 + spde = (gpde & ~PAGE_MASK) | (sl1mfn << PAGE_SHIFT) | 10.278 _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY; 10.279 gpde |= _PAGE_ACCESSED | _PAGE_DIRTY; 10.280 10.281 /* Detect linear p.t. mappings and write-protect them. */ 10.282 - if ( (frame_table[sl1pfn].u.inuse.type_info & PGT_type_mask) == 10.283 + if ( (frame_table[sl1mfn].u.inuse.type_info & PGT_type_mask) == 10.284 PGT_l2_page_table ) 10.285 { 10.286 - if ( d->arch.shadow_mode != SHM_full_32 ) 10.287 + if ( shadow_mode(d) != SHM_full_32 ) 10.288 spde = gpde & ~_PAGE_RW; 10.289 10.290 } 10.291 @@ -366,20 +358,20 @@ static void shadow_audit(struct domain * 10.292 for ( j = 0; j < shadow_ht_buckets; j++ ) 10.293 { 10.294 a = &d->arch.shadow_ht[j]; 10.295 - if ( a->pfn ) { live++; ASSERT(a->spfn_and_flags & PSH_pfn_mask); } 10.296 + if ( a->pfn ) { live++; ASSERT(a->smfn_and_flags & PSH_pfn_mask); } 10.297 ASSERT(a->pfn < 0x00100000UL); 10.298 a = a->next; 10.299 while ( a && (live < 9999) ) 10.300 { 10.301 live++; 10.302 - if ( (a->pfn == 0) || (a->spfn_and_flags == 0) ) 10.303 + if ( (a->pfn == 0) || (a->smfn_and_flags == 0) ) 10.304 { 10.305 printk("XXX live=%d pfn=%08lx sp=%08lx next=%p\n", 10.306 - live, a->pfn, a->spfn_and_flags, a->next); 10.307 + live, a->pfn, a->smfn_and_flags, a->next); 10.308 BUG(); 10.309 } 10.310 ASSERT(a->pfn < 0x00100000UL); 10.311 - ASSERT(a->spfn_and_flags & PSH_pfn_mask); 10.312 + ASSERT(a->smfn_and_flags & PSH_pfn_mask); 10.313 a = a->next; 10.314 } 10.315 ASSERT(live < 9999); 10.316 @@ -411,6 +403,12 @@ static inline struct shadow_status *hash 10.317 } 10.318 10.319 10.320 +/* 10.321 + * N.B. This takes a guest pfn (i.e. a pfn in the guest's namespace, 10.322 + * which, depending on full shadow mode, may or may not equal 10.323 + * its mfn). 10.324 + * The shadow status it returns is a mfn. 10.325 + */ 10.326 static inline unsigned long __shadow_status( 10.327 struct domain *d, unsigned int gpfn) 10.328 { 10.329 @@ -419,7 +417,7 @@ static inline unsigned long __shadow_sta 10.330 x = head = hash_bucket(d, gpfn); 10.331 p = NULL; 10.332 10.333 - SH_VVLOG("lookup gpfn=%08x bucket=%p", gpfn, x); 10.334 + //SH_VVLOG("lookup gpfn=%08x bucket=%p", gpfn, x); 10.335 shadow_audit(d, 0); 10.336 10.337 do 10.338 @@ -438,10 +436,12 @@ static inline unsigned long __shadow_sta 10.339 10.340 /* Swap 'x' contents with head contents. */ 10.341 SWAP(head->pfn, x->pfn); 10.342 - SWAP(head->spfn_and_flags, x->spfn_and_flags); 10.343 + SWAP(head->smfn_and_flags, x->smfn_and_flags); 10.344 } 10.345 10.346 - return head->spfn_and_flags; 10.347 + SH_VVLOG("lookup gpfn=%08lx => status=%08lx", 10.348 + gpfn, head->smfn_and_flags); 10.349 + return head->smfn_and_flags; 10.350 } 10.351 10.352 p = x; 10.353 @@ -449,6 +449,7 @@ static inline unsigned long __shadow_sta 10.354 } 10.355 while ( x != NULL ); 10.356 10.357 + SH_VVLOG("lookup gpfn=%08lx => status=0", gpfn); 10.358 return 0; 10.359 } 10.360 10.361 @@ -462,7 +463,7 @@ static inline unsigned long get_shadow_s 10.362 { 10.363 unsigned long res; 10.364 10.365 - ASSERT(d->arch.shadow_mode); 10.366 + ASSERT(shadow_mode(d)); 10.367 10.368 /* 10.369 * If we get here we know that some sort of update has happened to the 10.370 @@ -474,7 +475,7 @@ static inline unsigned long get_shadow_s 10.371 10.372 shadow_lock(d); 10.373 10.374 - if ( d->arch.shadow_mode == SHM_logdirty ) 10.375 + if ( shadow_mode(d) == SHM_logdirty ) 10.376 __mark_dirty(d, gpfn); 10.377 10.378 if ( !(res = __shadow_status(d, gpfn)) ) 10.379 @@ -511,14 +512,14 @@ static inline void delete_shadow_status( 10.380 { 10.381 /* Overwrite head with contents of following node. */ 10.382 head->pfn = n->pfn; 10.383 - head->spfn_and_flags = n->spfn_and_flags; 10.384 + head->smfn_and_flags = n->smfn_and_flags; 10.385 10.386 /* Delete following node. */ 10.387 head->next = n->next; 10.388 10.389 /* Add deleted node to the free list. */ 10.390 n->pfn = 0; 10.391 - n->spfn_and_flags = 0; 10.392 + n->smfn_and_flags = 0; 10.393 n->next = d->arch.shadow_ht_free; 10.394 d->arch.shadow_ht_free = n; 10.395 } 10.396 @@ -526,7 +527,7 @@ static inline void delete_shadow_status( 10.397 { 10.398 /* This bucket is now empty. Initialise the head node. */ 10.399 head->pfn = 0; 10.400 - head->spfn_and_flags = 0; 10.401 + head->smfn_and_flags = 0; 10.402 } 10.403 10.404 goto found; 10.405 @@ -544,7 +545,7 @@ static inline void delete_shadow_status( 10.406 10.407 /* Add deleted node to the free list. */ 10.408 x->pfn = 0; 10.409 - x->spfn_and_flags = 0; 10.410 + x->smfn_and_flags = 0; 10.411 x->next = d->arch.shadow_ht_free; 10.412 d->arch.shadow_ht_free = x; 10.413 10.414 @@ -587,7 +588,7 @@ static inline void set_shadow_status( 10.415 { 10.416 if ( x->pfn == gpfn ) 10.417 { 10.418 - x->spfn_and_flags = s; 10.419 + x->smfn_and_flags = s; 10.420 goto done; 10.421 } 10.422 10.423 @@ -603,7 +604,7 @@ static inline void set_shadow_status( 10.424 if ( head->pfn == 0 ) 10.425 { 10.426 head->pfn = gpfn; 10.427 - head->spfn_and_flags = s; 10.428 + head->smfn_and_flags = s; 10.429 ASSERT(head->next == NULL); 10.430 goto done; 10.431 } 10.432 @@ -643,7 +644,7 @@ static inline void set_shadow_status( 10.433 10.434 /* Initialise the new node and insert directly after the head item. */ 10.435 x->pfn = gpfn; 10.436 - x->spfn_and_flags = s; 10.437 + x->smfn_and_flags = s; 10.438 x->next = head->next; 10.439 head->next = x; 10.440 10.441 @@ -652,10 +653,9 @@ static inline void set_shadow_status( 10.442 } 10.443 10.444 #ifdef CONFIG_VMX 10.445 -#include <asm/domain_page.h> 10.446 10.447 static inline void vmx_update_shadow_state( 10.448 - struct exec_domain *ed, unsigned long gpfn, unsigned long spfn) 10.449 + struct exec_domain *ed, unsigned long gpfn, unsigned long smfn) 10.450 { 10.451 10.452 l2_pgentry_t *mpl2e = 0; 10.453 @@ -672,70 +672,46 @@ static inline void vmx_update_shadow_sta 10.454 map_domain_mem(pagetable_val(ed->arch.monitor_table)); 10.455 10.456 mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = 10.457 - mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); 10.458 + mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); 10.459 __flush_tlb_one(SH_LINEAR_PT_VIRT_START); 10.460 10.461 - spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT); 10.462 + spl2e = (l2_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT); 10.463 gpl2e = (l2_pgentry_t *)map_domain_mem(gpfn << PAGE_SHIFT); 10.464 memset(spl2e, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t)); 10.465 10.466 - ed->arch.shadow_table = mk_pagetable(spfn<<PAGE_SHIFT); 10.467 ed->arch.shadow_vtable = spl2e; 10.468 ed->arch.vpagetable = gpl2e; /* expect the guest did clean this up */ 10.469 unmap_domain_mem(mpl2e); 10.470 } 10.471 10.472 +#endif /* CONFIG_VMX */ 10.473 + 10.474 static inline void __shadow_mk_pagetable(struct exec_domain *ed) 10.475 { 10.476 struct domain *d = ed->domain; 10.477 unsigned long gpfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT; 10.478 - unsigned long spfn; 10.479 - SH_VLOG("0: __shadow_mk_pagetable(gpfn=%08lx\n", gpfn); 10.480 + unsigned long smfn = __shadow_status(d, gpfn) & PSH_pfn_mask; 10.481 10.482 - if (d->arch.shadow_mode == SHM_full_32) 10.483 - { 10.484 - unsigned long guest_gpfn; 10.485 - guest_gpfn = machine_to_phys_mapping[gpfn]; 10.486 - 10.487 - SH_VVLOG("__shadow_mk_pagetable(guest_gpfn=%08lx, gpfn=%08lx\n", 10.488 - guest_gpfn, gpfn); 10.489 + SH_VVLOG("0: __shadow_mk_pagetable(gpfn=%08lx, smfn=%08lx)", gpfn, smfn); 10.490 10.491 - spfn = __shadow_status(d, guest_gpfn) & PSH_pfn_mask; 10.492 - if ( unlikely(spfn == 0) ) { 10.493 - spfn = shadow_l2_table(d, gpfn); 10.494 - ed->arch.shadow_table = mk_pagetable(spfn<<PAGE_SHIFT); 10.495 - } else { 10.496 - vmx_update_shadow_state(ed, gpfn, spfn); 10.497 - } 10.498 - } else { 10.499 - spfn = __shadow_status(d, gpfn) & PSH_pfn_mask; 10.500 + if ( unlikely(smfn == 0) ) 10.501 + smfn = shadow_l2_table(d, gpfn); 10.502 +#ifdef CONFIG_VMX 10.503 + else 10.504 + if (d->arch.shadow_mode == SHM_full_32) 10.505 + vmx_update_shadow_state(ed, gpfn, smfn); 10.506 +#endif 10.507 10.508 - if ( unlikely(spfn == 0) ) { 10.509 - spfn = shadow_l2_table(d, gpfn); 10.510 - } 10.511 - ed->arch.shadow_table = mk_pagetable(spfn<<PAGE_SHIFT); 10.512 - } 10.513 + ed->arch.shadow_table = mk_pagetable(smfn<<PAGE_SHIFT); 10.514 } 10.515 -#else 10.516 -static inline void __shadow_mk_pagetable(struct exec_domain *ed) 10.517 -{ 10.518 - unsigned long gpfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT; 10.519 - unsigned long spfn = __shadow_status(ed->domain, gpfn); 10.520 - 10.521 - if ( unlikely(spfn == 0) ) 10.522 - spfn = shadow_l2_table(ed->domain, gpfn); 10.523 - 10.524 - ed->arch.shadow_table = mk_pagetable(spfn << PAGE_SHIFT); 10.525 -} 10.526 -#endif /* CONFIG_VMX */ 10.527 10.528 static inline void shadow_mk_pagetable(struct exec_domain *ed) 10.529 { 10.530 - if ( unlikely(ed->domain->arch.shadow_mode) ) 10.531 + if ( unlikely(shadow_mode(ed->domain)) ) 10.532 { 10.533 SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )", 10.534 pagetable_val(ed->arch.pagetable), 10.535 - ed->domain->arch.shadow_mode); 10.536 + shadow_mode(ed->domain)); 10.537 10.538 shadow_lock(ed->domain); 10.539 __shadow_mk_pagetable(ed); 10.540 @@ -744,13 +720,13 @@ static inline void shadow_mk_pagetable(s 10.541 SH_VVLOG("leaving shadow_mk_pagetable:\n" 10.542 "( gptbase=%08lx, mode=%d ) sh=%08lx", 10.543 pagetable_val(ed->arch.pagetable), 10.544 - ed->domain->arch.shadow_mode, 10.545 + shadow_mode(ed->domain), 10.546 pagetable_val(ed->arch.shadow_table) ); 10.547 } 10.548 } 10.549 10.550 #if SHADOW_DEBUG 10.551 -extern int check_pagetable(struct domain *d, pagetable_t pt, char *s); 10.552 +extern void check_pagetable(struct domain *d, pagetable_t pt, char *s); 10.553 #else 10.554 #define check_pagetable(d, pt, s) ((void)0) 10.555 #endif