debuggers.hg
changeset 17078:ec1fa84147ad
vmx realmode: __hvm_copy() should not hvm_get_segment_register() when
we are emulating. Firstly it is bogus, since VMCS segment state is
stale in this context. Secondly, real mode and real->protected
contexts are rather unlikely tohappen with SS.DPL == 3.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
we are emulating. Firstly it is bogus, since VMCS segment state is
stale in this context. Secondly, real mode and real->protected
contexts are rather unlikely tohappen with SS.DPL == 3.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Wed Feb 13 16:35:51 2008 +0000 (2008-02-13) |
parents | 4c64376d439d |
children | e7085b40dc08 |
files | xen/arch/x86/hvm/hvm.c |
line diff
1.1 --- a/xen/arch/x86/hvm/hvm.c Wed Feb 13 16:28:38 2008 +0000 1.2 +++ b/xen/arch/x86/hvm/hvm.c Wed Feb 13 16:35:51 2008 +0000 1.3 @@ -1386,6 +1386,7 @@ void hvm_task_switch( 1.4 static enum hvm_copy_result __hvm_copy( 1.5 void *buf, paddr_t addr, int size, int dir, int virt, int fetch) 1.6 { 1.7 + struct vcpu *curr = current; 1.8 unsigned long gfn, mfn; 1.9 p2m_type_t p2mt; 1.10 char *p; 1.11 @@ -1394,12 +1395,22 @@ static enum hvm_copy_result __hvm_copy( 1.12 1.13 if ( virt ) 1.14 { 1.15 - struct segment_register sreg; 1.16 - hvm_get_segment_register(current, x86_seg_ss, &sreg); 1.17 - if ( sreg.attr.fields.dpl == 3 ) 1.18 - pfec |= PFEC_user_mode; 1.19 + /* 1.20 + * We cannot use hvm_get_segment_register() while executing in 1.21 + * vmx_realmode() as segment register state is cached. Furthermore, 1.22 + * VMREADs on every data access hurts emulation performance. 1.23 + */ 1.24 + if ( !curr->arch.hvm_vmx.vmxemul ) 1.25 + { 1.26 + struct segment_register sreg; 1.27 + hvm_get_segment_register(curr, x86_seg_ss, &sreg); 1.28 + if ( sreg.attr.fields.dpl == 3 ) 1.29 + pfec |= PFEC_user_mode; 1.30 + } 1.31 + 1.32 if ( dir ) 1.33 pfec |= PFEC_write_access; 1.34 + 1.35 if ( fetch ) 1.36 pfec |= PFEC_insn_fetch; 1.37 } 1.38 @@ -1411,7 +1422,7 @@ static enum hvm_copy_result __hvm_copy( 1.39 1.40 if ( virt ) 1.41 { 1.42 - gfn = paging_gva_to_gfn(current, addr, &pfec); 1.43 + gfn = paging_gva_to_gfn(curr, addr, &pfec); 1.44 if ( gfn == INVALID_GFN ) 1.45 { 1.46 if ( virt == 2 ) /* 2 means generate a fault */ 1.47 @@ -1435,7 +1446,7 @@ static enum hvm_copy_result __hvm_copy( 1.48 if ( dir ) 1.49 { 1.50 memcpy(p, buf, count); /* dir == TRUE: *to* guest */ 1.51 - paging_mark_dirty(current->domain, mfn); 1.52 + paging_mark_dirty(curr->domain, mfn); 1.53 } 1.54 else 1.55 memcpy(buf, p, count); /* dir == FALSE: *from guest */