debuggers.hg
changeset 4671:18a8f5216548
bitkeeper revision 1.1366 (4268c126o36cKcnzrSkVxkbrPsoz1g)
Clean up shadow destruction and fix domain destroy when shadow mode
is disabled.
Signed-off-by: Keir Fraser <keir@xensource.com>
Clean up shadow destruction and fix domain destroy when shadow mode
is disabled.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Fri Apr 22 09:17:26 2005 +0000 (2005-04-22) |
parents | 1caa83c98dee |
children | 10b57175d4e2 |
files | xen/arch/x86/domain.c xen/arch/x86/shadow.c xen/arch/x86/vmx_vmcs.c xen/common/page_alloc.c xen/include/asm-x86/domain.h xen/include/asm-x86/shadow.h xen/include/xen/shadow.h |
line diff
1.1 --- a/xen/arch/x86/domain.c Thu Apr 21 22:33:54 2005 +0000 1.2 +++ b/xen/arch/x86/domain.c Fri Apr 22 09:17:26 2005 +0000 1.3 @@ -991,36 +991,24 @@ void domain_relinquish_resources(struct 1.4 { 1.5 if ( pagetable_val(ed->arch.guest_table) != 0 ) 1.6 { 1.7 - struct pfn_info *page = 1.8 - &frame_table[pagetable_val(ed->arch.guest_table)>>PAGE_SHIFT]; 1.9 - 1.10 - if ( shadow_mode_enabled(d) ) 1.11 - put_page(page); 1.12 - else 1.13 - put_page_and_type(page); 1.14 - 1.15 + (shadow_mode_enabled(d) ? put_page : put_page_and_type) 1.16 + (&frame_table[pagetable_val( 1.17 + ed->arch.guest_table) >> PAGE_SHIFT]); 1.18 ed->arch.guest_table = mk_pagetable(0); 1.19 } 1.20 1.21 if ( pagetable_val(ed->arch.guest_table_user) != 0 ) 1.22 { 1.23 - struct pfn_info *page = 1.24 - &frame_table[pagetable_val(ed->arch.guest_table_user) 1.25 - >> PAGE_SHIFT]; 1.26 - 1.27 - if ( shadow_mode_enabled(d) ) 1.28 - put_page(page); 1.29 - else 1.30 - put_page_and_type(page); 1.31 - 1.32 + (shadow_mode_enabled(d) ? put_page : put_page_and_type) 1.33 + (&frame_table[pagetable_val( 1.34 + ed->arch.guest_table_user) >> PAGE_SHIFT]); 1.35 ed->arch.guest_table_user = mk_pagetable(0); 1.36 } 1.37 1.38 vmx_relinquish_resources(ed); 1.39 } 1.40 1.41 - /* Exit shadow mode before deconstructing final guest page table. */ 1.42 - shadow_mode_destroy(d); 1.43 + shadow_mode_disable(d); 1.44 1.45 /* 1.46 * Relinquish GDT mappings. No need for explicit unmapping of the LDT as
2.1 --- a/xen/arch/x86/shadow.c Thu Apr 21 22:33:54 2005 +0000 2.2 +++ b/xen/arch/x86/shadow.c Fri Apr 22 09:17:26 2005 +0000 2.3 @@ -1111,9 +1111,17 @@ static void free_out_of_sync_entries(str 2.4 d->arch.out_of_sync_extras_count); 2.5 } 2.6 2.7 -void shadow_mode_destroy(struct domain *d) 2.8 +void __shadow_mode_disable(struct domain *d) 2.9 { 2.10 - shadow_lock(d); 2.11 + if ( unlikely(!shadow_mode_enabled(d)) ) 2.12 + return; 2.13 + 2.14 + /* 2.15 + * Currently this does not fix up page ref counts, so it is valid to call 2.16 + * only when a domain is being destroyed. 2.17 + */ 2.18 + BUG_ON(!test_bit(DF_DYING, &d->d_flags)); 2.19 + d->arch.shadow_tainted_refcnts = 1; 2.20 2.21 free_shadow_pages(d); 2.22 free_writable_pte_predictions(d); 2.23 @@ -1135,26 +1143,6 @@ void shadow_mode_destroy(struct domain * 2.24 2.25 free_shadow_ht_entries(d); 2.26 free_out_of_sync_entries(d); 2.27 - 2.28 - shadow_unlock(d); 2.29 -} 2.30 - 2.31 -void __shadow_mode_disable(struct domain *d) 2.32 -{ 2.33 - // This needs rethinking for the full shadow mode stuff. 2.34 - // 2.35 - // Among other things, ref counts need to be restored to a sensible 2.36 - // state for a non-shadow-mode guest... 2.37 - // This is probably easiest to do by stealing code from audit_domain(). 2.38 - // 2.39 - BUG(); 2.40 - 2.41 - free_shadow_pages(d); 2.42 - 2.43 - d->arch.shadow_mode = 0; 2.44 - 2.45 - free_shadow_ht_entries(d); 2.46 - free_out_of_sync_entries(d); 2.47 } 2.48 2.49 static int shadow_mode_table_op( 2.50 @@ -1293,7 +1281,7 @@ int shadow_mode_control(struct domain *d 2.51 switch ( op ) 2.52 { 2.53 case DOM0_SHADOW_CONTROL_OP_OFF: 2.54 - shadow_mode_disable(d); 2.55 + __shadow_mode_disable(d); 2.56 break; 2.57 2.58 case DOM0_SHADOW_CONTROL_OP_ENABLE_TEST: 2.59 @@ -1303,12 +1291,14 @@ int shadow_mode_control(struct domain *d 2.60 2.61 case DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY: 2.62 free_shadow_pages(d); 2.63 - rc = __shadow_mode_enable(d, d->arch.shadow_mode|SHM_enable|SHM_log_dirty); 2.64 + rc = __shadow_mode_enable( 2.65 + d, d->arch.shadow_mode|SHM_enable|SHM_log_dirty); 2.66 break; 2.67 2.68 case DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE: 2.69 free_shadow_pages(d); 2.70 - rc = __shadow_mode_enable(d, d->arch.shadow_mode|SHM_enable|SHM_translate); 2.71 + rc = __shadow_mode_enable( 2.72 + d, d->arch.shadow_mode|SHM_enable|SHM_translate); 2.73 break; 2.74 2.75 default: 2.76 @@ -2166,6 +2156,9 @@ u32 shadow_remove_all_access(struct doma 2.77 struct shadow_status *a; 2.78 u32 count = 0; 2.79 2.80 + if ( unlikely(!shadow_mode_enabled(d)) ) 2.81 + return 0; 2.82 + 2.83 ASSERT(spin_is_locked(&d->arch.shadow_lock)); 2.84 perfc_incrc(remove_all_access); 2.85
3.1 --- a/xen/arch/x86/vmx_vmcs.c Thu Apr 21 22:33:54 2005 +0000 3.2 +++ b/xen/arch/x86/vmx_vmcs.c Fri Apr 22 09:17:26 2005 +0000 3.3 @@ -160,27 +160,11 @@ void vmx_do_launch(struct exec_domain *e 3.4 unsigned int tr, cpu, error = 0; 3.5 struct host_execution_env host_env; 3.6 struct Xgt_desc_struct desc; 3.7 - struct list_head *list_ent; 3.8 - unsigned long i, pfn = 0; 3.9 + unsigned long pfn = 0; 3.10 struct pfn_info *page; 3.11 execution_context_t *ec = get_execution_context(); 3.12 - struct domain *d = ed->domain; 3.13 3.14 - cpu = smp_processor_id(); 3.15 - d->arch.min_pfn = d->arch.max_pfn = 0; 3.16 - 3.17 - spin_lock(&d->page_alloc_lock); 3.18 - list_ent = d->page_list.next; 3.19 - 3.20 - for ( i = 0; list_ent != &d->page_list; i++ ) 3.21 - { 3.22 - pfn = list_entry(list_ent, struct pfn_info, list) - frame_table; 3.23 - d->arch.min_pfn = min(d->arch.min_pfn, pfn); 3.24 - d->arch.max_pfn = max(d->arch.max_pfn, pfn); 3.25 - list_ent = frame_table[pfn].list.next; 3.26 - } 3.27 - 3.28 - spin_unlock(&d->page_alloc_lock); 3.29 + cpu = smp_processor_id(); 3.30 3.31 page = (struct pfn_info *) alloc_domheap_page(NULL); 3.32 pfn = (unsigned long) (page - frame_table);
4.1 --- a/xen/common/page_alloc.c Thu Apr 21 22:33:54 2005 +0000 4.2 +++ b/xen/common/page_alloc.c Fri Apr 22 09:17:26 2005 +0000 4.3 @@ -562,6 +562,8 @@ void free_domheap_pages(struct pfn_info 4.4 for ( i = 0; i < (1 << order); i++ ) 4.5 { 4.6 shadow_drop_references(d, &pg[i]); 4.7 + ASSERT(((pg[i].u.inuse.type_info & PGT_count_mask) == 0) || 4.8 + shadow_tainted_refcnts(d)); 4.9 pg[i].tlbflush_timestamp = tlbflush_current_time(); 4.10 pg[i].u.free.cpu_mask = d->cpuset; 4.11 list_del(&pg[i].list);
5.1 --- a/xen/include/asm-x86/domain.h Thu Apr 21 22:33:54 2005 +0000 5.2 +++ b/xen/include/asm-x86/domain.h Fri Apr 22 09:17:26 2005 +0000 5.3 @@ -26,11 +26,11 @@ struct arch_domain 5.4 /* I/O-port access bitmap mask. */ 5.5 u8 *iobmp_mask; /* Address of IO bitmap mask, or NULL. */ 5.6 5.7 - /* shadow mode status and controls */ 5.8 + /* Shadow mode status and controls. */ 5.9 unsigned int shadow_mode; /* flags to control shadow table operation */ 5.10 spinlock_t shadow_lock; 5.11 - unsigned long min_pfn; /* min host physical */ 5.12 - unsigned long max_pfn; /* max host physical */ 5.13 + /* Shadow mode has tainted page reference counts? */ 5.14 + unsigned int shadow_tainted_refcnts; 5.15 5.16 /* shadow hashtable */ 5.17 struct shadow_status *shadow_ht;
6.1 --- a/xen/include/asm-x86/shadow.h Thu Apr 21 22:33:54 2005 +0000 6.2 +++ b/xen/include/asm-x86/shadow.h Fri Apr 22 09:17:26 2005 +0000 6.3 @@ -42,6 +42,8 @@ 6.4 #define shadow_mode_translate(_d) ((_d)->arch.shadow_mode & SHM_translate) 6.5 #define shadow_mode_external(_d) ((_d)->arch.shadow_mode & SHM_external) 6.6 6.7 +#define shadow_tainted_refcnts(_d) ((_d)->arch.shadow_tainted_refcnts) 6.8 + 6.9 #define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START) 6.10 #define __shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \ 6.11 (SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)))) 6.12 @@ -173,12 +175,14 @@ shadow_sync_va(struct exec_domain *ed, u 6.13 extern void __shadow_mode_disable(struct domain *d); 6.14 static inline void shadow_mode_disable(struct domain *d) 6.15 { 6.16 - if ( shadow_mode_enabled(d) ) 6.17 + if ( unlikely(shadow_mode_enabled(d)) ) 6.18 + { 6.19 + shadow_lock(d); 6.20 __shadow_mode_disable(d); 6.21 + shadow_unlock(d); 6.22 + } 6.23 } 6.24 6.25 -extern void shadow_mode_destroy(struct domain *d); 6.26 - 6.27 /************************************************************************/ 6.28 6.29 #define __mfn_to_gpfn(_d, mfn) \
7.1 --- a/xen/include/xen/shadow.h Thu Apr 21 22:33:54 2005 +0000 7.2 +++ b/xen/include/xen/shadow.h Fri Apr 22 09:17:26 2005 +0000 7.3 @@ -12,6 +12,7 @@ 7.4 7.5 #define shadow_drop_references(_d, _p) ((void)0) 7.6 #define shadow_sync_and_drop_references(_d, _p) ((void)0) 7.7 +#define shadow_tainted_refcnts(_d) (0) 7.8 7.9 #endif 7.10