debuggers.hg
changeset 16506:9f61a0add5b6
x86_emulate: Emulate CPUID and HLT.
vmx realmode: Fix decode & emulate loop, add hooks for CPUID, HLT and
WBINVD. Also do not hook realmode entry off of vmentry failure any
more.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
vmx realmode: Fix decode & emulate loop, add hooks for CPUID, HLT and
WBINVD. Also do not hook realmode entry off of vmentry failure any
more.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Mon Nov 26 15:32:54 2007 +0000 (2007-11-26) |
parents | dc3a566f9e44 |
children | c5332fa8b68d 4ac315e33f88 |
files | xen/arch/x86/hvm/vmx/realmode.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vmx/x86_32/exits.S xen/arch/x86/hvm/vmx/x86_64/exits.S xen/arch/x86/x86_32/asm-offsets.c xen/arch/x86/x86_64/asm-offsets.c xen/arch/x86/x86_emulate.c xen/include/asm-x86/hvm/vmx/vmx.h xen/include/asm-x86/x86_emulate.h |
line diff
1.1 --- a/xen/arch/x86/hvm/vmx/realmode.c Mon Nov 26 13:54:45 2007 +0000 1.2 +++ b/xen/arch/x86/hvm/vmx/realmode.c Mon Nov 26 15:32:54 2007 +0000 1.3 @@ -29,6 +29,16 @@ struct realmode_emulate_ctxt { 1.4 unsigned long insn_buf_eip; 1.5 1.6 struct segment_register seg_reg[10]; 1.7 + 1.8 + union { 1.9 + struct { 1.10 + unsigned int hlt:1; 1.11 + unsigned int mov_ss:1; 1.12 + unsigned int sti:1; 1.13 + unsigned int exn_raised:1; 1.14 + } flags; 1.15 + unsigned int flag_word; 1.16 + }; 1.17 }; 1.18 1.19 static void realmode_deliver_exception( 1.20 @@ -251,14 +261,8 @@ realmode_write_segment( 1.21 struct realmode_emulate_ctxt *rm_ctxt = 1.22 container_of(ctxt, struct realmode_emulate_ctxt, ctxt); 1.23 memcpy(&rm_ctxt->seg_reg[seg], reg, sizeof(struct segment_register)); 1.24 - 1.25 if ( seg == x86_seg_ss ) 1.26 - { 1.27 - u32 intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO); 1.28 - intr_shadow ^= VMX_INTR_SHADOW_MOV_SS; 1.29 - __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow); 1.30 - } 1.31 - 1.32 + rm_ctxt->flags.mov_ss = 1; 1.33 return X86EMUL_OKAY; 1.34 } 1.35 1.36 @@ -337,13 +341,37 @@ static int realmode_write_rflags( 1.37 unsigned long val, 1.38 struct x86_emulate_ctxt *ctxt) 1.39 { 1.40 + struct realmode_emulate_ctxt *rm_ctxt = 1.41 + container_of(ctxt, struct realmode_emulate_ctxt, ctxt); 1.42 if ( (val & X86_EFLAGS_IF) && !(ctxt->regs->eflags & X86_EFLAGS_IF) ) 1.43 - { 1.44 - u32 intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO); 1.45 - intr_shadow ^= VMX_INTR_SHADOW_STI; 1.46 - __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow); 1.47 - } 1.48 + rm_ctxt->flags.sti = 1; 1.49 + return X86EMUL_OKAY; 1.50 +} 1.51 + 1.52 +static int realmode_wbinvd( 1.53 + struct x86_emulate_ctxt *ctxt) 1.54 +{ 1.55 + vmx_wbinvd_intercept(); 1.56 + return X86EMUL_OKAY; 1.57 +} 1.58 1.59 +static int realmode_cpuid( 1.60 + unsigned int *eax, 1.61 + unsigned int *ebx, 1.62 + unsigned int *ecx, 1.63 + unsigned int *edx, 1.64 + struct x86_emulate_ctxt *ctxt) 1.65 +{ 1.66 + vmx_cpuid_intercept(eax, ebx, ecx, edx); 1.67 + return X86EMUL_OKAY; 1.68 +} 1.69 + 1.70 +static int realmode_hlt( 1.71 + struct x86_emulate_ctxt *ctxt) 1.72 +{ 1.73 + struct realmode_emulate_ctxt *rm_ctxt = 1.74 + container_of(ctxt, struct realmode_emulate_ctxt, ctxt); 1.75 + rm_ctxt->flags.hlt = 1; 1.76 return X86EMUL_OKAY; 1.77 } 1.78 1.79 @@ -354,6 +382,7 @@ static int realmode_inject_hw_exception( 1.80 struct realmode_emulate_ctxt *rm_ctxt = 1.81 container_of(ctxt, struct realmode_emulate_ctxt, ctxt); 1.82 1.83 + rm_ctxt->flags.exn_raised = 1; 1.84 realmode_deliver_exception(vector, 0, rm_ctxt); 1.85 1.86 return X86EMUL_OKAY; 1.87 @@ -383,6 +412,9 @@ static struct x86_emulate_ops realmode_e 1.88 .write_io = realmode_write_io, 1.89 .read_cr = realmode_read_cr, 1.90 .write_rflags = realmode_write_rflags, 1.91 + .wbinvd = realmode_wbinvd, 1.92 + .cpuid = realmode_cpuid, 1.93 + .hlt = realmode_hlt, 1.94 .inject_hw_exception = realmode_inject_hw_exception, 1.95 .inject_sw_interrupt = realmode_inject_sw_interrupt 1.96 }; 1.97 @@ -393,6 +425,7 @@ int vmx_realmode(struct cpu_user_regs *r 1.98 struct realmode_emulate_ctxt rm_ctxt; 1.99 unsigned long intr_info; 1.100 int i, rc = 0; 1.101 + u32 intr_shadow, new_intr_shadow; 1.102 1.103 rm_ctxt.ctxt.regs = regs; 1.104 1.105 @@ -411,6 +444,9 @@ int vmx_realmode(struct cpu_user_regs *r 1.106 realmode_deliver_exception((uint8_t)intr_info, 0, &rm_ctxt); 1.107 } 1.108 1.109 + intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO); 1.110 + new_intr_shadow = intr_shadow; 1.111 + 1.112 while ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) && 1.113 !softirq_pending(smp_processor_id()) && 1.114 !hvm_local_events_need_delivery(curr) ) 1.115 @@ -421,8 +457,35 @@ int vmx_realmode(struct cpu_user_regs *r 1.116 (uint32_t)(rm_ctxt.seg_reg[x86_seg_cs].base + regs->eip), 1.117 sizeof(rm_ctxt.insn_buf)); 1.118 1.119 + rm_ctxt.flag_word = 0; 1.120 + 1.121 rc = x86_emulate(&rm_ctxt.ctxt, &realmode_emulator_ops); 1.122 1.123 + /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */ 1.124 + if ( rm_ctxt.flags.mov_ss ) 1.125 + new_intr_shadow ^= VMX_INTR_SHADOW_MOV_SS; 1.126 + else 1.127 + new_intr_shadow &= ~VMX_INTR_SHADOW_MOV_SS; 1.128 + 1.129 + /* STI instruction toggles STI shadow, else we just clear it. */ 1.130 + if ( rm_ctxt.flags.sti ) 1.131 + new_intr_shadow ^= VMX_INTR_SHADOW_STI; 1.132 + else 1.133 + new_intr_shadow &= ~VMX_INTR_SHADOW_STI; 1.134 + 1.135 + /* Update interrupt shadow information in VMCS only if it changes. */ 1.136 + if ( intr_shadow != new_intr_shadow ) 1.137 + { 1.138 + intr_shadow = new_intr_shadow; 1.139 + __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow); 1.140 + } 1.141 + 1.142 + /* HLT happens after instruction retire, if no interrupt/exception. */ 1.143 + if ( unlikely(rm_ctxt.flags.hlt) && 1.144 + !rm_ctxt.flags.exn_raised && 1.145 + !hvm_local_events_need_delivery(curr) ) 1.146 + hvm_hlt(regs->eflags); 1.147 + 1.148 if ( curr->arch.hvm_vmx.real_mode_io_in_progress ) 1.149 { 1.150 rc = 0;
2.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Mon Nov 26 13:54:45 2007 +0000 2.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Nov 26 15:32:54 2007 +0000 2.3 @@ -1055,10 +1055,7 @@ static void vmx_update_guest_cr(struct v 2.4 2.5 v->arch.hvm_vcpu.hw_cr[0] = 2.6 v->arch.hvm_vcpu.guest_cr[0] | 2.7 - X86_CR0_NE | X86_CR0_PG | X86_CR0_WP; 2.8 -#ifdef VMXASSIST 2.9 - v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_PE; 2.10 -#endif 2.11 + X86_CR0_NE | X86_CR0_PG | X86_CR0_WP | X86_CR0_PE; 2.12 __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]); 2.13 __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]); 2.14 break; 2.15 @@ -1254,10 +1251,11 @@ static void vmx_do_no_device_fault(void) 2.16 } 2.17 2.18 #define bitmaskof(idx) (1U << ((idx) & 31)) 2.19 -static void vmx_do_cpuid(struct cpu_user_regs *regs) 2.20 +void vmx_cpuid_intercept( 2.21 + unsigned int *eax, unsigned int *ebx, 2.22 + unsigned int *ecx, unsigned int *edx) 2.23 { 2.24 - unsigned int input = regs->eax; 2.25 - unsigned int eax, ebx, ecx, edx; 2.26 + unsigned int input = *eax; 2.27 2.28 #ifdef VMXASSIST 2.29 if ( input == 0x40000003 ) 2.30 @@ -1266,7 +1264,7 @@ static void vmx_do_cpuid(struct cpu_user 2.31 * NB. Unsupported interface for private use of VMXASSIST only. 2.32 * Note that this leaf lives at <max-hypervisor-leaf> + 1. 2.33 */ 2.34 - u64 value = ((u64)regs->edx << 32) | (u32)regs->ecx; 2.35 + u64 value = ((u64)*edx << 32) | (u32)*ecx; 2.36 p2m_type_t p2mt; 2.37 unsigned long mfn; 2.38 struct vcpu *v = current; 2.39 @@ -1290,58 +1288,70 @@ static void vmx_do_cpuid(struct cpu_user 2.40 unmap_domain_page(p); 2.41 2.42 gdprintk(XENLOG_INFO, "Output value is 0x%"PRIx64".\n", value); 2.43 - regs->ecx = (u32)value; 2.44 - regs->edx = (u32)(value >> 32); 2.45 + *ecx = (u32)value; 2.46 + *edx = (u32)(value >> 32); 2.47 return; 2.48 } 2.49 #endif 2.50 2.51 - hvm_cpuid(input, &eax, &ebx, &ecx, &edx); 2.52 + hvm_cpuid(input, eax, ebx, ecx, edx); 2.53 2.54 switch ( input ) 2.55 { 2.56 case 0x00000001: 2.57 - ecx &= ~VMX_VCPU_CPUID_L1_ECX_RESERVED; 2.58 - ebx &= NUM_THREADS_RESET_MASK; 2.59 - ecx &= ~(bitmaskof(X86_FEATURE_VMXE) | 2.60 - bitmaskof(X86_FEATURE_EST) | 2.61 - bitmaskof(X86_FEATURE_TM2) | 2.62 - bitmaskof(X86_FEATURE_CID) | 2.63 - bitmaskof(X86_FEATURE_PDCM) | 2.64 - bitmaskof(X86_FEATURE_DSCPL)); 2.65 - edx &= ~(bitmaskof(X86_FEATURE_HT) | 2.66 - bitmaskof(X86_FEATURE_ACPI) | 2.67 - bitmaskof(X86_FEATURE_ACC) | 2.68 - bitmaskof(X86_FEATURE_DS)); 2.69 + *ecx &= ~VMX_VCPU_CPUID_L1_ECX_RESERVED; 2.70 + *ebx &= NUM_THREADS_RESET_MASK; 2.71 + *ecx &= ~(bitmaskof(X86_FEATURE_VMXE) | 2.72 + bitmaskof(X86_FEATURE_EST) | 2.73 + bitmaskof(X86_FEATURE_TM2) | 2.74 + bitmaskof(X86_FEATURE_CID) | 2.75 + bitmaskof(X86_FEATURE_PDCM) | 2.76 + bitmaskof(X86_FEATURE_DSCPL)); 2.77 + *edx &= ~(bitmaskof(X86_FEATURE_HT) | 2.78 + bitmaskof(X86_FEATURE_ACPI) | 2.79 + bitmaskof(X86_FEATURE_ACC) | 2.80 + bitmaskof(X86_FEATURE_DS)); 2.81 break; 2.82 2.83 case 0x00000004: 2.84 - cpuid_count(input, regs->ecx, &eax, &ebx, &ecx, &edx); 2.85 - eax &= NUM_CORES_RESET_MASK; 2.86 + cpuid_count(input, *ecx, eax, ebx, ecx, edx); 2.87 + *eax &= NUM_CORES_RESET_MASK; 2.88 break; 2.89 2.90 case 0x00000006: 2.91 case 0x00000009: 2.92 case 0x0000000A: 2.93 - eax = ebx = ecx = edx = 0; 2.94 + *eax = *ebx = *ecx = *edx = 0; 2.95 break; 2.96 2.97 case 0x80000001: 2.98 /* Only a few features are advertised in Intel's 0x80000001. */ 2.99 - ecx &= (bitmaskof(X86_FEATURE_LAHF_LM)); 2.100 - edx &= (bitmaskof(X86_FEATURE_NX) | 2.101 - bitmaskof(X86_FEATURE_LM) | 2.102 - bitmaskof(X86_FEATURE_SYSCALL)); 2.103 + *ecx &= (bitmaskof(X86_FEATURE_LAHF_LM)); 2.104 + *edx &= (bitmaskof(X86_FEATURE_NX) | 2.105 + bitmaskof(X86_FEATURE_LM) | 2.106 + bitmaskof(X86_FEATURE_SYSCALL)); 2.107 break; 2.108 } 2.109 2.110 + HVMTRACE_3D(CPUID, current, input, 2.111 + ((uint64_t)*eax << 32) | *ebx, ((uint64_t)*ecx << 32) | *edx); 2.112 +} 2.113 + 2.114 +static void vmx_do_cpuid(struct cpu_user_regs *regs) 2.115 +{ 2.116 + unsigned int eax, ebx, ecx, edx; 2.117 + 2.118 + eax = regs->eax; 2.119 + ebx = regs->ebx; 2.120 + ecx = regs->ecx; 2.121 + edx = regs->edx; 2.122 + 2.123 + vmx_cpuid_intercept(&eax, &ebx, &ecx, &edx); 2.124 + 2.125 regs->eax = eax; 2.126 regs->ebx = ebx; 2.127 regs->ecx = ecx; 2.128 regs->edx = edx; 2.129 - 2.130 - HVMTRACE_3D(CPUID, current, input, 2.131 - ((uint64_t)eax << 32) | ebx, ((uint64_t)ecx << 32) | edx); 2.132 } 2.133 2.134 #define CASE_GET_REG_P(REG, reg) \ 2.135 @@ -2696,20 +2706,23 @@ static void wbinvd_ipi(void *info) 2.136 wbinvd(); 2.137 } 2.138 2.139 +void vmx_wbinvd_intercept(void) 2.140 +{ 2.141 + if ( list_empty(&(domain_hvm_iommu(current->domain)->pdev_list)) ) 2.142 + return; 2.143 + 2.144 + if ( cpu_has_wbinvd_exiting ) 2.145 + on_each_cpu(wbinvd_ipi, NULL, 1, 1); 2.146 + else 2.147 + wbinvd(); 2.148 +} 2.149 + 2.150 static void vmx_failed_vmentry(unsigned int exit_reason, 2.151 struct cpu_user_regs *regs) 2.152 { 2.153 unsigned int failed_vmentry_reason = (uint16_t)exit_reason; 2.154 unsigned long exit_qualification = __vmread(EXIT_QUALIFICATION); 2.155 2.156 -#ifndef VMXASSIST 2.157 - if ( (failed_vmentry_reason == EXIT_REASON_INVALID_GUEST_STATE) && 2.158 - (exit_qualification == 0) && 2.159 - !(current->arch.hvm_vcpu.hw_cr[0] & X86_CR0_PE) && 2.160 - (vmx_realmode(regs) == 0) ) 2.161 - return; 2.162 -#endif 2.163 - 2.164 printk("Failed vm entry (exit reason 0x%x) ", exit_reason); 2.165 switch ( failed_vmentry_reason ) 2.166 { 2.167 @@ -2976,24 +2989,7 @@ asmlinkage void vmx_vmexit_handler(struc 2.168 { 2.169 inst_len = __get_instruction_length(); /* Safe: INVD, WBINVD */ 2.170 __update_guest_eip(inst_len); 2.171 - if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) ) 2.172 - { 2.173 - if ( cpu_has_wbinvd_exiting ) 2.174 - { 2.175 - on_each_cpu(wbinvd_ipi, NULL, 1, 1); 2.176 - } 2.177 - else 2.178 - { 2.179 - wbinvd(); 2.180 - /* Disable further WBINVD intercepts. */ 2.181 - if ( (exit_reason == EXIT_REASON_WBINVD) && 2.182 - (vmx_cpu_based_exec_control & 2.183 - CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) ) 2.184 - __vmwrite(SECONDARY_VM_EXEC_CONTROL, 2.185 - vmx_secondary_exec_control & 2.186 - ~SECONDARY_EXEC_WBINVD_EXITING); 2.187 - } 2.188 - } 2.189 + vmx_wbinvd_intercept(); 2.190 break; 2.191 } 2.192
3.1 --- a/xen/arch/x86/hvm/vmx/x86_32/exits.S Mon Nov 26 13:54:45 2007 +0000 3.2 +++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S Mon Nov 26 15:32:54 2007 +0000 3.3 @@ -103,7 +103,12 @@ ENTRY(vmx_asm_do_vmentry) 3.4 movl $GUEST_RFLAGS,%eax 3.5 VMWRITE(UREGS_eflags) 3.6 3.7 - cmpl $0,VCPU_vmx_launched(%ebx) 3.8 +#ifndef VMXASSIST 3.9 + testb $X86_CR0_PE,VCPU_hvm_guest_cr0(%ebx) 3.10 + jz vmx_goto_realmode 3.11 +#endif 3.12 + 3.13 + cmpb $0,VCPU_vmx_launched(%ebx) 3.14 je vmx_launch 3.15 3.16 /*vmx_resume:*/ 3.17 @@ -114,9 +119,19 @@ ENTRY(vmx_asm_do_vmentry) 3.18 ud2 3.19 3.20 vmx_launch: 3.21 - movl $1,VCPU_vmx_launched(%ebx) 3.22 + movb $1,VCPU_vmx_launched(%ebx) 3.23 HVM_RESTORE_ALL_NOSEGREGS 3.24 VMLAUNCH 3.25 pushf 3.26 call vm_launch_fail 3.27 ud2 3.28 + 3.29 +#ifndef VMXASSIST 3.30 +vmx_goto_realmode: 3.31 + sti 3.32 + movl %esp,%eax 3.33 + push %eax 3.34 + call vmx_realmode 3.35 + addl $4,%esp 3.36 + jmp vmx_asm_do_vmentry 3.37 +#endif
4.1 --- a/xen/arch/x86/hvm/vmx/x86_64/exits.S Mon Nov 26 13:54:45 2007 +0000 4.2 +++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S Mon Nov 26 15:32:54 2007 +0000 4.3 @@ -121,7 +121,12 @@ ENTRY(vmx_asm_do_vmentry) 4.4 movl $GUEST_RFLAGS,%eax 4.5 VMWRITE(UREGS_eflags) 4.6 4.7 - cmpl $0,VCPU_vmx_launched(%rbx) 4.8 +#ifndef VMXASSIST 4.9 + testb $X86_CR0_PE,VCPU_hvm_guest_cr0(%rbx) 4.10 + jz vmx_goto_realmode 4.11 +#endif 4.12 + 4.13 + cmpb $0,VCPU_vmx_launched(%rbx) 4.14 je vmx_launch 4.15 4.16 /*vmx_resume:*/ 4.17 @@ -132,9 +137,17 @@ ENTRY(vmx_asm_do_vmentry) 4.18 ud2 4.19 4.20 vmx_launch: 4.21 - movl $1,VCPU_vmx_launched(%rbx) 4.22 + movb $1,VCPU_vmx_launched(%rbx) 4.23 HVM_RESTORE_ALL_NOSEGREGS 4.24 VMLAUNCH 4.25 pushfq 4.26 call vm_launch_fail 4.27 ud2 4.28 + 4.29 +#ifndef VMXASSIST 4.30 +vmx_goto_realmode: 4.31 + sti 4.32 + movq %rsp,%rdi 4.33 + call vmx_realmode 4.34 + jmp vmx_asm_do_vmentry 4.35 +#endif
5.1 --- a/xen/arch/x86/x86_32/asm-offsets.c Mon Nov 26 13:54:45 2007 +0000 5.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c Mon Nov 26 15:32:54 2007 +0000 5.3 @@ -83,6 +83,7 @@ void __dummy__(void) 5.4 BLANK(); 5.5 5.6 OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched); 5.7 + OFFSET(VCPU_hvm_guest_cr0, struct vcpu, arch.hvm_vcpu.guest_cr[0]); 5.8 OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]); 5.9 BLANK(); 5.10
6.1 --- a/xen/arch/x86/x86_64/asm-offsets.c Mon Nov 26 13:54:45 2007 +0000 6.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c Mon Nov 26 15:32:54 2007 +0000 6.3 @@ -98,6 +98,7 @@ void __dummy__(void) 6.4 BLANK(); 6.5 6.6 OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched); 6.7 + OFFSET(VCPU_hvm_guest_cr0, struct vcpu, arch.hvm_vcpu.guest_cr[0]); 6.8 OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]); 6.9 BLANK(); 6.10
7.1 --- a/xen/arch/x86/x86_emulate.c Mon Nov 26 13:54:45 2007 +0000 7.2 +++ b/xen/arch/x86/x86_emulate.c Mon Nov 26 15:32:54 2007 +0000 7.3 @@ -167,7 +167,8 @@ static uint8_t opcode_table[256] = { 7.4 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 7.5 /* 0xF0 - 0xF7 */ 7.6 0, ImplicitOps, 0, 0, 7.7 - 0, ImplicitOps, ByteOp|DstMem|SrcNone|ModRM, DstMem|SrcNone|ModRM, 7.8 + ImplicitOps, ImplicitOps, 7.9 + ByteOp|DstMem|SrcNone|ModRM, DstMem|SrcNone|ModRM, 7.10 /* 0xF8 - 0xFF */ 7.11 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps, 7.12 ImplicitOps, ImplicitOps, ByteOp|DstMem|SrcNone|ModRM, DstMem|SrcNone|ModRM 7.13 @@ -225,7 +226,8 @@ static uint8_t twobyte_table[256] = { 7.14 ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 7.15 ByteOp|DstMem|SrcNone|ModRM|Mov, ByteOp|DstMem|SrcNone|ModRM|Mov, 7.16 /* 0xA0 - 0xA7 */ 7.17 - ImplicitOps, ImplicitOps, 0, DstBitBase|SrcReg|ModRM, 0, 0, 0, 0, 7.18 + ImplicitOps, ImplicitOps, ImplicitOps, DstBitBase|SrcReg|ModRM, 7.19 + 0, 0, 0, 0, 7.20 /* 0xA8 - 0xAF */ 7.21 ImplicitOps, ImplicitOps, 0, DstBitBase|SrcReg|ModRM, 7.22 0, 0, 0, DstReg|SrcMem|ModRM, 7.23 @@ -2450,6 +2452,12 @@ x86_emulate( 7.24 src.val = EXC_DB; 7.25 goto swint; 7.26 7.27 + case 0xf4: /* hlt */ 7.28 + fail_if(ops->hlt == NULL); 7.29 + if ( (rc = ops->hlt(ctxt)) != 0 ) 7.30 + goto done; 7.31 + break; 7.32 + 7.33 case 0xf5: /* cmc */ 7.34 _regs.eflags ^= EFLG_CF; 7.35 break; 7.36 @@ -2783,6 +2791,17 @@ x86_emulate( 7.37 src.val = x86_seg_fs; 7.38 goto pop_seg; 7.39 7.40 + case 0xa2: /* cpuid */ { 7.41 + unsigned int eax = _regs.eax, ebx = _regs.ebx; 7.42 + unsigned int ecx = _regs.ecx, edx = _regs.edx; 7.43 + fail_if(ops->cpuid == NULL); 7.44 + if ( (rc = ops->cpuid(&eax, &ebx, &ecx, &edx, ctxt)) != 0 ) 7.45 + goto done; 7.46 + _regs.eax = eax; _regs.ebx = ebx; 7.47 + _regs.ecx = ecx; _regs.edx = edx; 7.48 + break; 7.49 + } 7.50 + 7.51 case 0xa8: /* push %%gs */ 7.52 src.val = x86_seg_gs; 7.53 goto push_seg;
8.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h Mon Nov 26 13:54:45 2007 +0000 8.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h Mon Nov 26 15:32:54 2007 +0000 8.3 @@ -33,6 +33,10 @@ void vmx_intr_assist(void); 8.4 void vmx_do_resume(struct vcpu *); 8.5 void set_guest_time(struct vcpu *v, u64 gtime); 8.6 void vmx_vlapic_msr_changed(struct vcpu *v); 8.7 +void vmx_cpuid_intercept( 8.8 + unsigned int *eax, unsigned int *ebx, 8.9 + unsigned int *ecx, unsigned int *edx); 8.10 +void vmx_wbinvd_intercept(void); 8.11 int vmx_realmode(struct cpu_user_regs *regs); 8.12 int vmx_realmode_io_complete(void); 8.13
9.1 --- a/xen/include/asm-x86/x86_emulate.h Mon Nov 26 13:54:45 2007 +0000 9.2 +++ b/xen/include/asm-x86/x86_emulate.h Mon Nov 26 15:32:54 2007 +0000 9.3 @@ -275,6 +275,18 @@ struct x86_emulate_ops 9.4 int (*wbinvd)( 9.5 struct x86_emulate_ctxt *ctxt); 9.6 9.7 + /* cpuid: Emulate CPUID via given set of EAX-EDX inputs/outputs. */ 9.8 + int (*cpuid)( 9.9 + unsigned int *eax, 9.10 + unsigned int *ebx, 9.11 + unsigned int *ecx, 9.12 + unsigned int *edx, 9.13 + struct x86_emulate_ctxt *ctxt); 9.14 + 9.15 + /* hlt: Emulate HLT. */ 9.16 + int (*hlt)( 9.17 + struct x86_emulate_ctxt *ctxt); 9.18 + 9.19 /* inject_hw_exception */ 9.20 int (*inject_hw_exception)( 9.21 uint8_t vector,