debuggers.hg
changeset 17612:e6f20d5ed5fe
x86 hvm: Simplify and consolidate logic for HLT emulation.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Tue May 06 11:05:00 2008 +0100 (2008-05-06) |
parents | 777f294e3be8 |
children | 01aa7c088e98 |
files | xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c |
line diff
1.1 --- a/xen/arch/x86/hvm/hvm.c Tue May 06 10:25:34 2008 +0100 1.2 +++ b/xen/arch/x86/hvm/hvm.c Tue May 06 11:05:00 2008 +0100 1.3 @@ -46,6 +46,7 @@ 1.4 #include <asm/hvm/vpt.h> 1.5 #include <asm/hvm/support.h> 1.6 #include <asm/hvm/cacheattr.h> 1.7 +#include <asm/hvm/trace.h> 1.8 #include <public/sched.h> 1.9 #include <public/hvm/ioreq.h> 1.10 #include <public/version.h> 1.11 @@ -739,15 +740,22 @@ void hvm_send_assist_req(struct vcpu *v) 1.12 1.13 void hvm_hlt(unsigned long rflags) 1.14 { 1.15 + struct vcpu *curr = current; 1.16 + 1.17 + if ( hvm_event_pending(curr) ) 1.18 + return; 1.19 + 1.20 /* 1.21 * If we halt with interrupts disabled, that's a pretty sure sign that we 1.22 * want to shut down. In a real processor, NMIs are the only way to break 1.23 * out of this. 1.24 */ 1.25 if ( unlikely(!(rflags & X86_EFLAGS_IF)) ) 1.26 - return hvm_vcpu_down(current); 1.27 + return hvm_vcpu_down(curr); 1.28 1.29 do_sched_op_compat(SCHEDOP_block, 0); 1.30 + 1.31 + HVMTRACE_1D(HLT, curr, /* pending = */ vcpu_runnable(curr)); 1.32 } 1.33 1.34 void hvm_triple_fault(void)
2.1 --- a/xen/arch/x86/hvm/svm/svm.c Tue May 06 10:25:34 2008 +0100 2.2 +++ b/xen/arch/x86/hvm/svm/svm.c Tue May 06 11:05:00 2008 +0100 2.3 @@ -1099,25 +1099,13 @@ static void svm_do_msr_access(struct cpu 2.4 static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb, 2.5 struct cpu_user_regs *regs) 2.6 { 2.7 - struct vcpu *curr = current; 2.8 - struct hvm_intack intack = hvm_vcpu_has_pending_irq(curr); 2.9 unsigned int inst_len; 2.10 2.11 - inst_len = __get_instruction_length(curr, INSTR_HLT, NULL); 2.12 + inst_len = __get_instruction_length(current, INSTR_HLT, NULL); 2.13 if ( inst_len == 0 ) 2.14 return; 2.15 __update_guest_eip(regs, inst_len); 2.16 2.17 - /* Check for pending exception or new interrupt. */ 2.18 - if ( vmcb->eventinj.fields.v || 2.19 - ((intack.source != hvm_intsrc_none) && 2.20 - !hvm_interrupt_blocked(current, intack)) ) 2.21 - { 2.22 - HVMTRACE_1D(HLT, curr, /*int pending=*/ 1); 2.23 - return; 2.24 - } 2.25 - 2.26 - HVMTRACE_1D(HLT, curr, /*int pending=*/ 0); 2.27 hvm_hlt(regs->eflags); 2.28 } 2.29
3.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Tue May 06 10:25:34 2008 +0100 3.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Tue May 06 11:05:00 2008 +0100 3.3 @@ -1857,22 +1857,6 @@ gp_fault: 3.4 return X86EMUL_EXCEPTION; 3.5 } 3.6 3.7 -static void vmx_do_hlt(struct cpu_user_regs *regs) 3.8 -{ 3.9 - unsigned long intr_info = __vmread(VM_ENTRY_INTR_INFO); 3.10 - struct vcpu *curr = current; 3.11 - 3.12 - /* Check for pending exception. */ 3.13 - if ( intr_info & INTR_INFO_VALID_MASK ) 3.14 - { 3.15 - HVMTRACE_1D(HLT, curr, /*int pending=*/ 1); 3.16 - return; 3.17 - } 3.18 - 3.19 - HVMTRACE_1D(HLT, curr, /*int pending=*/ 0); 3.20 - hvm_hlt(regs->eflags); 3.21 -} 3.22 - 3.23 static void vmx_do_extint(struct cpu_user_regs *regs) 3.24 { 3.25 unsigned int vector; 3.26 @@ -2187,7 +2171,7 @@ asmlinkage void vmx_vmexit_handler(struc 3.27 case EXIT_REASON_HLT: 3.28 inst_len = __get_instruction_length(); /* Safe: HLT */ 3.29 __update_guest_eip(inst_len); 3.30 - vmx_do_hlt(regs); 3.31 + hvm_hlt(regs->eflags); 3.32 break; 3.33 case EXIT_REASON_INVLPG: 3.34 {