debuggers.hg
changeset 14670:5c52e5ca8459
hvm: Clean up handling of exception intercepts.
Only intercept #DB/#BP if a debugger is attached.
Signed-off-by: Keir Fraser <keir@xensource.com>
Only intercept #DB/#BP if a debugger is attached.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | Keir Fraser <keir@xensource.com> |
---|---|
date | Wed Mar 28 18:47:17 2007 +0100 (2007-03-28) |
parents | ffb9dda42946 |
children | eddce921d414 |
files | xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/common/domain.c xen/common/domctl.c xen/include/asm-x86/debugger.h xen/include/asm-x86/hvm/support.h xen/include/asm-x86/hvm/vcpu.h xen/include/asm-x86/hvm/vmx/vmcs.h xen/include/asm-x86/hvm/vmx/vmx.h xen/include/xen/sched.h xen/include/xen/types.h |
line diff
1.1 --- a/xen/arch/x86/hvm/svm/svm.c Wed Mar 28 16:52:05 2007 +0100 1.2 +++ b/xen/arch/x86/hvm/svm/svm.c Wed Mar 28 18:47:17 2007 +0100 1.3 @@ -733,7 +733,7 @@ static void svm_stts(struct vcpu *v) 1.4 */ 1.5 if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) ) 1.6 { 1.7 - v->arch.hvm_svm.vmcb->exception_intercepts |= EXCEPTION_BITMAP_NM; 1.8 + v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device; 1.9 vmcb->cr0 |= X86_CR0_TS; 1.10 } 1.11 } 1.12 @@ -869,8 +869,6 @@ static void save_svm_cpu_user_regs(struc 1.13 { 1.14 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.15 1.16 - ASSERT(vmcb); 1.17 - 1.18 ctxt->eax = vmcb->rax; 1.19 ctxt->ss = vmcb->ss.sel; 1.20 ctxt->esp = vmcb->rsp; 1.21 @@ -884,42 +882,16 @@ static void save_svm_cpu_user_regs(struc 1.22 ctxt->ds = vmcb->ds.sel; 1.23 } 1.24 1.25 -static void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v) 1.26 +static void svm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs) 1.27 { 1.28 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.29 - 1.30 - regs->eip = vmcb->rip; 1.31 - regs->esp = vmcb->rsp; 1.32 - regs->eflags = vmcb->rflags; 1.33 - regs->cs = vmcb->cs.sel; 1.34 - regs->ds = vmcb->ds.sel; 1.35 - regs->es = vmcb->es.sel; 1.36 - regs->ss = vmcb->ss.sel; 1.37 -} 1.38 - 1.39 -/* XXX Use svm_load_cpu_guest_regs instead */ 1.40 -static void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs) 1.41 -{ 1.42 - struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.43 - u32 *intercepts = &v->arch.hvm_svm.vmcb->exception_intercepts; 1.44 1.45 - /* Write the guest register value into VMCB */ 1.46 vmcb->rax = regs->eax; 1.47 vmcb->ss.sel = regs->ss; 1.48 vmcb->rsp = regs->esp; 1.49 vmcb->rflags = regs->eflags | 2UL; 1.50 vmcb->cs.sel = regs->cs; 1.51 vmcb->rip = regs->eip; 1.52 - if (regs->eflags & EF_TF) 1.53 - *intercepts |= EXCEPTION_BITMAP_DB; 1.54 - else 1.55 - *intercepts &= ~EXCEPTION_BITMAP_DB; 1.56 -} 1.57 - 1.58 -static void svm_load_cpu_guest_regs( 1.59 - struct vcpu *v, struct cpu_user_regs *regs) 1.60 -{ 1.61 - svm_load_cpu_user_regs(v, regs); 1.62 } 1.63 1.64 static void svm_ctxt_switch_from(struct vcpu *v) 1.65 @@ -943,8 +915,20 @@ static void svm_ctxt_switch_to(struct vc 1.66 svm_restore_dr(v); 1.67 } 1.68 1.69 -static void arch_svm_do_resume(struct vcpu *v) 1.70 +static void svm_do_resume(struct vcpu *v) 1.71 { 1.72 + bool_t debug_state = v->domain->debugger_attached; 1.73 + 1.74 + if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) ) 1.75 + { 1.76 + uint32_t mask = (1U << TRAP_debug) | (1U << TRAP_int3); 1.77 + v->arch.hvm_vcpu.debug_state_latch = debug_state; 1.78 + if ( debug_state ) 1.79 + v->arch.hvm_svm.vmcb->exception_intercepts |= mask; 1.80 + else 1.81 + v->arch.hvm_svm.vmcb->exception_intercepts &= ~mask; 1.82 + } 1.83 + 1.84 if ( v->arch.hvm_svm.launch_core != smp_processor_id() ) 1.85 { 1.86 v->arch.hvm_svm.launch_core = smp_processor_id(); 1.87 @@ -959,7 +943,7 @@ static int svm_vcpu_initialise(struct vc 1.88 { 1.89 int rc; 1.90 1.91 - v->arch.schedule_tail = arch_svm_do_resume; 1.92 + v->arch.schedule_tail = svm_do_resume; 1.93 v->arch.ctxt_switch_from = svm_ctxt_switch_from; 1.94 v->arch.ctxt_switch_to = svm_ctxt_switch_to; 1.95 1.96 @@ -1118,47 +1102,12 @@ static void svm_do_no_device_fault(struc 1.97 struct vcpu *v = current; 1.98 1.99 setup_fpu(v); 1.100 - vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM; 1.101 + vmcb->exception_intercepts &= ~(1U << TRAP_no_device); 1.102 1.103 if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) ) 1.104 vmcb->cr0 &= ~X86_CR0_TS; 1.105 } 1.106 1.107 - 1.108 -static void svm_do_general_protection_fault(struct vcpu *v, 1.109 - struct cpu_user_regs *regs) 1.110 -{ 1.111 - struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.112 - unsigned long eip, error_code; 1.113 - 1.114 - ASSERT(vmcb); 1.115 - 1.116 - eip = vmcb->rip; 1.117 - error_code = vmcb->exitinfo1; 1.118 - 1.119 - if (vmcb->idtr.limit == 0) { 1.120 - printk("Huh? We got a GP Fault with an invalid IDTR!\n"); 1.121 - svm_dump_vmcb(__func__, vmcb); 1.122 - svm_dump_regs(__func__, regs); 1.123 - svm_dump_inst(svm_rip2pointer(v)); 1.124 - domain_crash(v->domain); 1.125 - return; 1.126 - } 1.127 - 1.128 - HVM_DBG_LOG(DBG_LEVEL_1, 1.129 - "svm_general_protection_fault: eip = %lx, erro_code = %lx", 1.130 - eip, error_code); 1.131 - 1.132 - HVM_DBG_LOG(DBG_LEVEL_1, 1.133 - "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx", 1.134 - (unsigned long)regs->eax, (unsigned long)regs->ebx, 1.135 - (unsigned long)regs->ecx, (unsigned long)regs->edx, 1.136 - (unsigned long)regs->esi, (unsigned long)regs->edi); 1.137 - 1.138 - /* Reflect it back into the guest */ 1.139 - svm_inject_exception(v, TRAP_gp_fault, 1, error_code); 1.140 -} 1.141 - 1.142 /* Reserved bits ECX: [31:14], [12:4], [2:1]*/ 1.143 #define SVM_VCPU_CPUID_L1_ECX_RESERVED 0xffffdff6 1.144 /* Reserved bits EDX: [31:29], [27], [22:20], [18], [10] */ 1.145 @@ -1767,7 +1716,7 @@ static int npt_set_cr0(unsigned long val 1.146 /* TS cleared? Then initialise FPU now. */ 1.147 if ( !(value & X86_CR0_TS) ) { 1.148 setup_fpu(v); 1.149 - vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM; 1.150 + vmcb->exception_intercepts &= ~(1U << TRAP_no_device); 1.151 } 1.152 1.153 paging_update_paging_modes(v); 1.154 @@ -1795,7 +1744,7 @@ static int svm_set_cr0(unsigned long val 1.155 if ( !(value & X86_CR0_TS) ) 1.156 { 1.157 setup_fpu(v); 1.158 - vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM; 1.159 + vmcb->exception_intercepts &= ~(1U << TRAP_no_device); 1.160 } 1.161 1.162 HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value); 1.163 @@ -2214,7 +2163,7 @@ static int svm_cr_access(struct vcpu *v, 1.164 case INSTR_CLTS: 1.165 /* TS being cleared means that it's time to restore fpu state. */ 1.166 setup_fpu(current); 1.167 - vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM; 1.168 + vmcb->exception_intercepts &= ~(1U << TRAP_no_device); 1.169 vmcb->cr0 &= ~X86_CR0_TS; /* clear TS */ 1.170 v->arch.hvm_svm.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */ 1.171 break; 1.172 @@ -2357,7 +2306,6 @@ static inline void svm_do_msr_access( 1.173 __update_guest_eip(vmcb, inst_len); 1.174 } 1.175 1.176 - 1.177 static inline void svm_vmexit_do_hlt(struct vmcb_struct *vmcb) 1.178 { 1.179 __update_guest_eip(vmcb, 1); 1.180 @@ -2373,7 +2321,6 @@ static inline void svm_vmexit_do_hlt(str 1.181 hvm_hlt(vmcb->rflags); 1.182 } 1.183 1.184 - 1.185 static void svm_vmexit_do_invd(struct vcpu *v) 1.186 { 1.187 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.188 @@ -2393,42 +2340,6 @@ static void svm_vmexit_do_invd(struct vc 1.189 __update_guest_eip(vmcb, inst_len); 1.190 } 1.191 1.192 - 1.193 - 1.194 - 1.195 -#ifdef XEN_DEBUGGER 1.196 -static void svm_debug_save_cpu_user_regs(struct vmcb_struct *vmcb, 1.197 - struct cpu_user_regs *regs) 1.198 -{ 1.199 - regs->eip = vmcb->rip; 1.200 - regs->esp = vmcb->rsp; 1.201 - regs->eflags = vmcb->rflags; 1.202 - 1.203 - regs->xcs = vmcb->cs.sel; 1.204 - regs->xds = vmcb->ds.sel; 1.205 - regs->xes = vmcb->es.sel; 1.206 - regs->xfs = vmcb->fs.sel; 1.207 - regs->xgs = vmcb->gs.sel; 1.208 - regs->xss = vmcb->ss.sel; 1.209 -} 1.210 - 1.211 - 1.212 -static void svm_debug_restore_cpu_user_regs(struct cpu_user_regs *regs) 1.213 -{ 1.214 - vmcb->ss.sel = regs->xss; 1.215 - vmcb->rsp = regs->esp; 1.216 - vmcb->rflags = regs->eflags; 1.217 - vmcb->cs.sel = regs->xcs; 1.218 - vmcb->rip = regs->eip; 1.219 - 1.220 - vmcb->gs.sel = regs->xgs; 1.221 - vmcb->fs.sel = regs->xfs; 1.222 - vmcb->es.sel = regs->xes; 1.223 - vmcb->ds.sel = regs->xds; 1.224 -} 1.225 -#endif 1.226 - 1.227 - 1.228 void svm_handle_invlpg(const short invlpga, struct cpu_user_regs *regs) 1.229 { 1.230 struct vcpu *v = current; 1.231 @@ -2976,60 +2887,40 @@ asmlinkage void svm_vmexit_handler(struc 1.232 1.233 switch (exit_reason) 1.234 { 1.235 - case VMEXIT_EXCEPTION_DB: 1.236 - { 1.237 -#ifdef XEN_DEBUGGER 1.238 - svm_debug_save_cpu_user_regs(regs); 1.239 - pdb_handle_exception(1, regs, 1); 1.240 - svm_debug_restore_cpu_user_regs(regs); 1.241 -#else 1.242 - svm_store_cpu_user_regs(regs, v); 1.243 - domain_pause_for_debugger(); 1.244 -#endif 1.245 - } 1.246 - break; 1.247 - 1.248 case VMEXIT_INTR: 1.249 /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ 1.250 HVMTRACE_0D(INTR, v); 1.251 break; 1.252 + 1.253 case VMEXIT_NMI: 1.254 /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ 1.255 HVMTRACE_0D(NMI, v); 1.256 break; 1.257 + 1.258 case VMEXIT_SMI: 1.259 /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ 1.260 HVMTRACE_0D(SMI, v); 1.261 break; 1.262 1.263 - case VMEXIT_INIT: 1.264 - BUG(); /* unreachable */ 1.265 + case VMEXIT_EXCEPTION_DB: 1.266 + if ( v->domain->debugger_attached ) 1.267 + domain_pause_for_debugger(); 1.268 + else 1.269 + svm_inject_exception(v, TRAP_debug, 0, 0); 1.270 + break; 1.271 1.272 case VMEXIT_EXCEPTION_BP: 1.273 -#ifdef XEN_DEBUGGER 1.274 - svm_debug_save_cpu_user_regs(regs); 1.275 - pdb_handle_exception(3, regs, 1); 1.276 - svm_debug_restore_cpu_user_regs(regs); 1.277 -#else 1.278 - if ( test_bit(_DOMF_debugging, &v->domain->domain_flags) ) 1.279 + if ( v->domain->debugger_attached ) 1.280 domain_pause_for_debugger(); 1.281 else 1.282 svm_inject_exception(v, TRAP_int3, 0, 0); 1.283 -#endif 1.284 break; 1.285 1.286 case VMEXIT_EXCEPTION_NM: 1.287 svm_do_no_device_fault(vmcb); 1.288 break; 1.289 1.290 - case VMEXIT_EXCEPTION_GP: 1.291 - /* This should probably not be trapped in the future */ 1.292 - regs->error_code = vmcb->exitinfo1; 1.293 - svm_do_general_protection_fault(v, regs); 1.294 - break; 1.295 - 1.296 - case VMEXIT_EXCEPTION_PF: 1.297 - { 1.298 + case VMEXIT_EXCEPTION_PF: { 1.299 unsigned long va; 1.300 va = vmcb->exitinfo2; 1.301 regs->error_code = vmcb->exitinfo1; 1.302 @@ -3050,14 +2941,6 @@ asmlinkage void svm_vmexit_handler(struc 1.303 break; 1.304 } 1.305 1.306 - case VMEXIT_EXCEPTION_DF: 1.307 - /* Debug info to hopefully help debug WHY the guest double-faulted. */ 1.308 - svm_dump_vmcb(__func__, vmcb); 1.309 - svm_dump_regs(__func__, regs); 1.310 - svm_dump_inst(svm_rip2pointer(v)); 1.311 - svm_inject_exception(v, TRAP_double_fault, 1, 0); 1.312 - break; 1.313 - 1.314 case VMEXIT_VINTR: 1.315 vmcb->vintr.fields.irq = 0; 1.316 vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR;
2.1 --- a/xen/arch/x86/hvm/svm/vmcb.c Wed Mar 28 16:52:05 2007 +0100 2.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c Wed Mar 28 18:47:17 2007 +0100 2.3 @@ -194,17 +194,18 @@ static int construct_vmcb(struct vcpu *v 2.4 paging_update_paging_modes(v); 2.5 vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 2.6 2.7 - arch_svm->vmcb->exception_intercepts = MONITOR_DEFAULT_EXCEPTION_BITMAP; 2.8 - 2.9 if ( paging_mode_hap(v->domain) ) 2.10 { 2.11 vmcb->cr0 = arch_svm->cpu_shadow_cr0; 2.12 vmcb->np_enable = 1; /* enable nested paging */ 2.13 vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */ 2.14 - vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_PG; 2.15 vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table); 2.16 vmcb->cr4 = arch_svm->cpu_shadow_cr4 = 0; 2.17 } 2.18 + else 2.19 + { 2.20 + vmcb->exception_intercepts = 1U << TRAP_page_fault; 2.21 + } 2.22 2.23 return 0; 2.24 }
3.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c Wed Mar 28 16:52:05 2007 +0100 3.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed Mar 28 18:47:17 2007 +0100 3.3 @@ -210,7 +210,7 @@ void vmx_vmcs_exit(struct vcpu *v) 3.4 if ( v == current ) 3.5 return; 3.6 3.7 - /* Don't confuse arch_vmx_do_resume (for @v or @current!) */ 3.8 + /* Don't confuse vmx_do_resume (for @v or @current!) */ 3.9 vmx_clear_vmcs(v); 3.10 if ( is_hvm_vcpu(current) ) 3.11 vmx_load_vmcs(current); 3.12 @@ -412,7 +412,7 @@ static void construct_vmcs(struct vcpu * 3.13 __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL); 3.14 #endif 3.15 3.16 - __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP); 3.17 + __vmwrite(EXCEPTION_BITMAP, 1U << TRAP_page_fault); 3.18 3.19 /* Guest CR0. */ 3.20 cr0 = read_cr0(); 3.21 @@ -493,8 +493,10 @@ void vm_resume_fail(unsigned long eflags 3.22 domain_crash_synchronous(); 3.23 } 3.24 3.25 -void arch_vmx_do_resume(struct vcpu *v) 3.26 +void vmx_do_resume(struct vcpu *v) 3.27 { 3.28 + bool_t debug_state; 3.29 + 3.30 if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() ) 3.31 { 3.32 vmx_load_vmcs(v); 3.33 @@ -507,6 +509,19 @@ void arch_vmx_do_resume(struct vcpu *v) 3.34 vmx_set_host_env(v); 3.35 } 3.36 3.37 + debug_state = v->domain->debugger_attached; 3.38 + if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) ) 3.39 + { 3.40 + unsigned long intercepts = __vmread(EXCEPTION_BITMAP); 3.41 + unsigned long mask = (1U << TRAP_debug) | (1U << TRAP_int3); 3.42 + v->arch.hvm_vcpu.debug_state_latch = debug_state; 3.43 + if ( debug_state ) 3.44 + intercepts |= mask; 3.45 + else 3.46 + intercepts &= ~mask; 3.47 + __vmwrite(EXCEPTION_BITMAP, intercepts); 3.48 + } 3.49 + 3.50 hvm_do_resume(v); 3.51 reset_stack_and_jump(vmx_asm_do_vmentry); 3.52 }
4.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Wed Mar 28 16:52:05 2007 +0100 4.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Mar 28 18:47:17 2007 +0100 4.3 @@ -60,7 +60,7 @@ static int vmx_vcpu_initialise(struct vc 4.4 4.5 spin_lock_init(&v->arch.hvm_vmx.vmcs_lock); 4.6 4.7 - v->arch.schedule_tail = arch_vmx_do_resume; 4.8 + v->arch.schedule_tail = vmx_do_resume; 4.9 v->arch.ctxt_switch_from = vmx_ctxt_switch_from; 4.10 v->arch.ctxt_switch_to = vmx_ctxt_switch_to; 4.11 4.12 @@ -716,11 +716,6 @@ static void vmx_load_cpu_guest_regs(stru 4.13 /* NB. Bit 1 of RFLAGS must be set for VMENTRY to succeed. */ 4.14 __vmwrite(GUEST_RFLAGS, regs->eflags | 2UL); 4.15 4.16 - if ( regs->eflags & EF_TF ) 4.17 - __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); 4.18 - else 4.19 - __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); 4.20 - 4.21 if ( regs->eflags & EF_VM ) 4.22 { 4.23 /* 4.24 @@ -880,7 +875,7 @@ static void vmx_stts(struct vcpu *v) 4.25 { 4.26 v->arch.hvm_vmx.cpu_cr0 |= X86_CR0_TS; 4.27 __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0); 4.28 - __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM); 4.29 + __vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device); 4.30 } 4.31 } 4.32 4.33 @@ -1144,7 +1139,7 @@ static void vmx_do_no_device_fault(void) 4.34 struct vcpu *v = current; 4.35 4.36 setup_fpu(current); 4.37 - __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM); 4.38 + __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device); 4.39 4.40 /* Disable TS in guest CR0 unless the guest wants the exception too. */ 4.41 if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) ) 4.42 @@ -1885,7 +1880,7 @@ static int vmx_set_cr0(unsigned long val 4.43 if ( !(value & X86_CR0_TS) ) 4.44 { 4.45 setup_fpu(v); 4.46 - __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM); 4.47 + __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device); 4.48 } 4.49 4.50 v->arch.hvm_vmx.cpu_cr0 = (value | X86_CR0_PE | X86_CR0_PG 4.51 @@ -2259,11 +2254,9 @@ static int vmx_cr_access(unsigned long e 4.52 mov_from_cr(cr, gp, regs); 4.53 break; 4.54 case TYPE_CLTS: 4.55 -// TRACE_VMEXIT(1, TYPE_CLTS); 4.56 - 4.57 /* We initialise the FPU now, to avoid needing another vmexit. */ 4.58 setup_fpu(v); 4.59 - __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM); 4.60 + __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device); 4.61 4.62 v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS; /* clear TS */ 4.63 __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0); 4.64 @@ -2275,10 +2268,7 @@ static int vmx_cr_access(unsigned long e 4.65 value = v->arch.hvm_vmx.cpu_shadow_cr0; 4.66 value = (value & ~0xF) | 4.67 (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF); 4.68 -// TRACE_VMEXIT(1, TYPE_LMSW); 4.69 -// TRACE_VMEXIT(2, value); 4.70 return vmx_set_cr0(value); 4.71 - break; 4.72 default: 4.73 BUG(); 4.74 } 4.75 @@ -2435,60 +2425,6 @@ static inline void vmx_do_extint(struct 4.76 } 4.77 } 4.78 4.79 -#if defined (__x86_64__) 4.80 -void store_cpu_user_regs(struct cpu_user_regs *regs) 4.81 -{ 4.82 - regs->ss = __vmread(GUEST_SS_SELECTOR); 4.83 - regs->rsp = __vmread(GUEST_RSP); 4.84 - regs->rflags = __vmread(GUEST_RFLAGS); 4.85 - regs->cs = __vmread(GUEST_CS_SELECTOR); 4.86 - regs->ds = __vmread(GUEST_DS_SELECTOR); 4.87 - regs->es = __vmread(GUEST_ES_SELECTOR); 4.88 - regs->rip = __vmread(GUEST_RIP); 4.89 -} 4.90 -#elif defined (__i386__) 4.91 -void store_cpu_user_regs(struct cpu_user_regs *regs) 4.92 -{ 4.93 - regs->ss = __vmread(GUEST_SS_SELECTOR); 4.94 - regs->esp = __vmread(GUEST_RSP); 4.95 - regs->eflags = __vmread(GUEST_RFLAGS); 4.96 - regs->cs = __vmread(GUEST_CS_SELECTOR); 4.97 - regs->ds = __vmread(GUEST_DS_SELECTOR); 4.98 - regs->es = __vmread(GUEST_ES_SELECTOR); 4.99 - regs->eip = __vmread(GUEST_RIP); 4.100 -} 4.101 -#endif 4.102 - 4.103 -#ifdef XEN_DEBUGGER 4.104 -void save_cpu_user_regs(struct cpu_user_regs *regs) 4.105 -{ 4.106 - regs->xss = __vmread(GUEST_SS_SELECTOR); 4.107 - regs->esp = __vmread(GUEST_RSP); 4.108 - regs->eflags = __vmread(GUEST_RFLAGS); 4.109 - regs->xcs = __vmread(GUEST_CS_SELECTOR); 4.110 - regs->eip = __vmread(GUEST_RIP); 4.111 - 4.112 - regs->xgs = __vmread(GUEST_GS_SELECTOR); 4.113 - regs->xfs = __vmread(GUEST_FS_SELECTOR); 4.114 - regs->xes = __vmread(GUEST_ES_SELECTOR); 4.115 - regs->xds = __vmread(GUEST_DS_SELECTOR); 4.116 -} 4.117 - 4.118 -void restore_cpu_user_regs(struct cpu_user_regs *regs) 4.119 -{ 4.120 - __vmwrite(GUEST_SS_SELECTOR, regs->xss); 4.121 - __vmwrite(GUEST_RSP, regs->esp); 4.122 - __vmwrite(GUEST_RFLAGS, regs->eflags); 4.123 - __vmwrite(GUEST_CS_SELECTOR, regs->xcs); 4.124 - __vmwrite(GUEST_RIP, regs->eip); 4.125 - 4.126 - __vmwrite(GUEST_GS_SELECTOR, regs->xgs); 4.127 - __vmwrite(GUEST_FS_SELECTOR, regs->xfs); 4.128 - __vmwrite(GUEST_ES_SELECTOR, regs->xes); 4.129 - __vmwrite(GUEST_DS_SELECTOR, regs->xds); 4.130 -} 4.131 -#endif 4.132 - 4.133 static void vmx_reflect_exception(struct vcpu *v) 4.134 { 4.135 int error_code, intr_info, vector; 4.136 @@ -2598,56 +2534,22 @@ asmlinkage void vmx_vmexit_handler(struc 4.137 4.138 switch ( vector ) 4.139 { 4.140 -#ifdef XEN_DEBUGGER 4.141 case TRAP_debug: 4.142 - { 4.143 - save_cpu_user_regs(regs); 4.144 - pdb_handle_exception(1, regs, 1); 4.145 - restore_cpu_user_regs(regs); 4.146 - break; 4.147 - } 4.148 - case TRAP_int3: 4.149 - { 4.150 - save_cpu_user_regs(regs); 4.151 - pdb_handle_exception(3, regs, 1); 4.152 - restore_cpu_user_regs(regs); 4.153 - break; 4.154 - } 4.155 -#else 4.156 - case TRAP_debug: 4.157 - { 4.158 - if ( test_bit(_DOMF_debugging, &v->domain->domain_flags) ) 4.159 - { 4.160 - store_cpu_user_regs(regs); 4.161 - domain_pause_for_debugger(); 4.162 - __vm_clear_bit(GUEST_PENDING_DBG_EXCEPTIONS, 4.163 - PENDING_DEBUG_EXC_BS); 4.164 - } 4.165 - else 4.166 - { 4.167 - vmx_reflect_exception(v); 4.168 - __vm_clear_bit(GUEST_PENDING_DBG_EXCEPTIONS, 4.169 - PENDING_DEBUG_EXC_BS); 4.170 - } 4.171 - 4.172 - break; 4.173 - } 4.174 - case TRAP_int3: 4.175 - { 4.176 - if ( test_bit(_DOMF_debugging, &v->domain->domain_flags) ) 4.177 + if ( v->domain->debugger_attached ) 4.178 domain_pause_for_debugger(); 4.179 else 4.180 vmx_reflect_exception(v); 4.181 break; 4.182 - } 4.183 -#endif 4.184 + case TRAP_int3: 4.185 + if ( v->domain->debugger_attached ) 4.186 + domain_pause_for_debugger(); 4.187 + else 4.188 + vmx_reflect_exception(v); 4.189 + break; 4.190 case TRAP_no_device: 4.191 - { 4.192 vmx_do_no_device_fault(); 4.193 break; 4.194 - } 4.195 case TRAP_page_fault: 4.196 - { 4.197 exit_qualification = __vmread(EXIT_QUALIFICATION); 4.198 regs->error_code = __vmread(VM_EXIT_INTR_ERROR_CODE); 4.199 4.200 @@ -2666,7 +2568,6 @@ asmlinkage void vmx_vmexit_handler(struc 4.201 v->arch.hvm_vmx.cpu_cr2 = exit_qualification; 4.202 vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code); 4.203 break; 4.204 - } 4.205 case TRAP_nmi: 4.206 HVMTRACE_0D(NMI, v); 4.207 if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI )
5.1 --- a/xen/common/domain.c Wed Mar 28 16:52:05 2007 +0100 5.2 +++ b/xen/common/domain.c Wed Mar 28 18:47:17 2007 +0100 5.3 @@ -336,7 +336,6 @@ void domain_shutdown(struct domain *d, u 5.4 send_guest_global_virq(dom0, VIRQ_DOM_EXC); 5.5 } 5.6 5.7 - 5.8 void domain_pause_for_debugger(void) 5.9 { 5.10 struct domain *d = current->domain; 5.11 @@ -350,6 +349,8 @@ void domain_pause_for_debugger(void) 5.12 send_guest_global_virq(dom0, VIRQ_DEBUGGER); 5.13 } 5.14 5.15 +__attribute__ ((weak)) void domain_debug_state_changed(struct domain *d) { } 5.16 + 5.17 /* Complete domain destroy after RCU readers are not holding old references. */ 5.18 static void complete_domain_destroy(struct rcu_head *head) 5.19 {
6.1 --- a/xen/common/domctl.c Wed Mar 28 16:52:05 2007 +0100 6.2 +++ b/xen/common/domctl.c Wed Mar 28 18:47:17 2007 +0100 6.3 @@ -639,10 +639,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc 6.4 d = rcu_lock_domain_by_id(op->domain); 6.5 if ( d != NULL ) 6.6 { 6.7 - if ( op->u.setdebugging.enable ) 6.8 - set_bit(_DOMF_debugging, &d->domain_flags); 6.9 - else 6.10 - clear_bit(_DOMF_debugging, &d->domain_flags); 6.11 + d->debugger_attached = !!op->u.setdebugging.enable; 6.12 rcu_unlock_domain(d); 6.13 ret = 0; 6.14 }
7.1 --- a/xen/include/asm-x86/debugger.h Wed Mar 28 16:52:05 2007 +0100 7.2 +++ b/xen/include/asm-x86/debugger.h Wed Mar 28 18:47:17 2007 +0100 7.3 @@ -65,8 +65,7 @@ static inline int debugger_trap_entry( 7.4 { 7.5 struct vcpu *v = current; 7.6 7.7 - if ( guest_kernel_mode(v, regs) && 7.8 - test_bit(_DOMF_debugging, &v->domain->domain_flags) && 7.9 + if ( guest_kernel_mode(v, regs) && v->domain->debugger_attached && 7.10 ((vector == TRAP_int3) || (vector == TRAP_debug)) ) 7.11 { 7.12 domain_pause_for_debugger();
8.1 --- a/xen/include/asm-x86/hvm/support.h Wed Mar 28 16:52:05 2007 +0100 8.2 +++ b/xen/include/asm-x86/hvm/support.h Wed Mar 28 18:47:17 2007 +0100 8.3 @@ -50,45 +50,6 @@ static inline vcpu_iodata_t *get_vio(str 8.4 #define TYPE_CLTS (2 << 4) 8.5 #define TYPE_LMSW (3 << 4) 8.6 8.7 -enum hval_bitmaps { 8.8 - EXCEPTION_BITMAP_TABLE=0, 8.9 -}; 8.10 - 8.11 -#define EXCEPTION_BITMAP_DE (1 << 0) /* Divide Error */ 8.12 -#define EXCEPTION_BITMAP_DB (1 << 1) /* Debug */ 8.13 -#define EXCEPTION_BITMAP_NMI (1 << 2) /* NMI */ 8.14 -#define EXCEPTION_BITMAP_BP (1 << 3) /* Breakpoint */ 8.15 -#define EXCEPTION_BITMAP_OF (1 << 4) /* Overflow */ 8.16 -#define EXCEPTION_BITMAP_BR (1 << 5) /* BOUND Range Exceeded */ 8.17 -#define EXCEPTION_BITMAP_UD (1 << 6) /* Invalid Opcode */ 8.18 -#define EXCEPTION_BITMAP_NM (1 << 7) /* Device Not Available */ 8.19 -#define EXCEPTION_BITMAP_DF (1 << 8) /* Double Fault */ 8.20 -/* reserved */ 8.21 -#define EXCEPTION_BITMAP_TS (1 << 10) /* Invalid TSS */ 8.22 -#define EXCEPTION_BITMAP_NP (1 << 11) /* Segment Not Present */ 8.23 -#define EXCEPTION_BITMAP_SS (1 << 12) /* Stack-Segment Fault */ 8.24 -#define EXCEPTION_BITMAP_GP (1 << 13) /* General Protection */ 8.25 -#define EXCEPTION_BITMAP_PG (1 << 14) /* Page Fault */ 8.26 -#define EXCEPTION_BITMAP_MF (1 << 16) /* x87 FPU Floating-Point Error (Math Fault) */ 8.27 -#define EXCEPTION_BITMAP_AC (1 << 17) /* Alignment Check */ 8.28 -#define EXCEPTION_BITMAP_MC (1 << 18) /* Machine Check */ 8.29 -#define EXCEPTION_BITMAP_XF (1 << 19) /* SIMD Floating-Point Exception */ 8.30 - 8.31 -/* Pending Debug exceptions */ 8.32 -#define PENDING_DEBUG_EXC_BP (1 << 12) /* break point */ 8.33 -#define PENDING_DEBUG_EXC_BS (1 << 14) /* Single step */ 8.34 - 8.35 -#ifdef XEN_DEBUGGER 8.36 -#define MONITOR_DEFAULT_EXCEPTION_BITMAP \ 8.37 - ( EXCEPTION_BITMAP_PG | \ 8.38 - EXCEPTION_BITMAP_DB | \ 8.39 - EXCEPTION_BITMAP_BP | \ 8.40 - EXCEPTION_BITMAP_GP ) 8.41 -#else 8.42 -#define MONITOR_DEFAULT_EXCEPTION_BITMAP \ 8.43 - ( EXCEPTION_BITMAP_PG ) 8.44 -#endif 8.45 - 8.46 #define VMX_DELIVER_NO_ERROR_CODE -1 8.47 8.48 #if HVM_DEBUG
9.1 --- a/xen/include/asm-x86/hvm/vcpu.h Wed Mar 28 16:52:05 2007 +0100 9.2 +++ b/xen/include/asm-x86/hvm/vcpu.h Wed Mar 28 18:47:17 2007 +0100 9.3 @@ -42,8 +42,8 @@ struct hvm_vcpu { 9.4 9.5 int xen_port; 9.6 9.7 - /* Flags */ 9.8 - int flag_dr_dirty; 9.9 + bool_t flag_dr_dirty; 9.10 + bool_t debug_state_latch; 9.11 9.12 union { 9.13 struct arch_vmx_struct vmx;
10.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Mar 28 16:52:05 2007 +0100 10.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Mar 28 18:47:17 2007 +0100 10.3 @@ -83,8 +83,6 @@ struct arch_vmx_struct { 10.4 #define vmx_schedule_tail(next) \ 10.5 (next)->thread.arch_vmx.arch_vmx_schedule_tail((next)) 10.6 10.7 -void vmx_do_resume(struct vcpu *); 10.8 - 10.9 struct vmcs_struct *vmx_alloc_host_vmcs(void); 10.10 void vmx_free_host_vmcs(struct vmcs_struct *vmcs); 10.11
11.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h Wed Mar 28 16:52:05 2007 +0100 11.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h Wed Mar 28 18:47:17 2007 +0100 11.3 @@ -27,11 +27,11 @@ 11.4 #include <asm/i387.h> 11.5 #include <asm/hvm/trace.h> 11.6 11.7 -extern void vmx_asm_vmexit_handler(struct cpu_user_regs); 11.8 -extern void vmx_asm_do_vmentry(void); 11.9 -extern void vmx_intr_assist(void); 11.10 -extern void arch_vmx_do_resume(struct vcpu *); 11.11 -extern void set_guest_time(struct vcpu *v, u64 gtime); 11.12 +void vmx_asm_vmexit_handler(struct cpu_user_regs); 11.13 +void vmx_asm_do_vmentry(void); 11.14 +void vmx_intr_assist(void); 11.15 +void vmx_do_resume(struct vcpu *); 11.16 +void set_guest_time(struct vcpu *v, u64 gtime); 11.17 11.18 extern unsigned int cpu_rev; 11.19 11.20 @@ -224,14 +224,14 @@ static inline unsigned long __vmread_saf 11.21 return ecx; 11.22 } 11.23 11.24 -static inline void __vm_set_bit(unsigned long field, unsigned long mask) 11.25 +static inline void __vm_set_bit(unsigned long field, unsigned int bit) 11.26 { 11.27 - __vmwrite(field, __vmread(field) | mask); 11.28 + __vmwrite(field, __vmread(field) | (1UL << bit)); 11.29 } 11.30 11.31 -static inline void __vm_clear_bit(unsigned long field, unsigned long mask) 11.32 +static inline void __vm_clear_bit(unsigned long field, unsigned int bit) 11.33 { 11.34 - __vmwrite(field, __vmread(field) & ~mask); 11.35 + __vmwrite(field, __vmread(field) & ~(1UL << bit)); 11.36 } 11.37 11.38 static inline void __vmxoff (void)
12.1 --- a/xen/include/xen/sched.h Wed Mar 28 16:52:05 2007 +0100 12.2 +++ b/xen/include/xen/sched.h Wed Mar 28 18:47:17 2007 +0100 12.3 @@ -167,11 +167,12 @@ struct domain 12.4 12.5 unsigned long domain_flags; 12.6 12.7 - /* Boolean: Is this an HVM guest? */ 12.8 - char is_hvm; 12.9 - 12.10 - /* Boolean: Is this guest fully privileged (aka dom0)? */ 12.11 - char is_privileged; 12.12 + /* Is this an HVM guest? */ 12.13 + bool_t is_hvm; 12.14 + /* Is this guest fully privileged (aka dom0)? */ 12.15 + bool_t is_privileged; 12.16 + /* Is this guest being debugged by dom0? */ 12.17 + bool_t debugger_attached; 12.18 12.19 spinlock_t pause_lock; 12.20 unsigned int pause_count; 12.21 @@ -310,6 +311,7 @@ void domain_destroy(struct domain *d); 12.22 void domain_kill(struct domain *d); 12.23 void domain_shutdown(struct domain *d, u8 reason); 12.24 void domain_pause_for_debugger(void); 12.25 +void domain_debug_state_changed(struct domain *d); 12.26 12.27 /* 12.28 * Mark specified domain as crashed. This function always returns, even if the 12.29 @@ -462,17 +464,14 @@ extern struct domain *domain_list; 12.30 /* Domain is paused by controller software. */ 12.31 #define _DOMF_ctrl_pause 2 12.32 #define DOMF_ctrl_pause (1UL<<_DOMF_ctrl_pause) 12.33 - /* Domain is being debugged by controller software. */ 12.34 -#define _DOMF_debugging 3 12.35 -#define DOMF_debugging (1UL<<_DOMF_debugging) 12.36 /* Are any VCPUs polling event channels (SCHEDOP_poll)? */ 12.37 -#define _DOMF_polling 4 12.38 +#define _DOMF_polling 3 12.39 #define DOMF_polling (1UL<<_DOMF_polling) 12.40 /* Domain is paused by the hypervisor? */ 12.41 -#define _DOMF_paused 5 12.42 +#define _DOMF_paused 4 12.43 #define DOMF_paused (1UL<<_DOMF_paused) 12.44 /* Domain is a compatibility one? */ 12.45 -#define _DOMF_compat 6 12.46 +#define _DOMF_compat 5 12.47 #define DOMF_compat (1UL<<_DOMF_compat) 12.48 12.49 static inline int vcpu_runnable(struct vcpu *v)
13.1 --- a/xen/include/xen/types.h Wed Mar 28 16:52:05 2007 +0100 13.2 +++ b/xen/include/xen/types.h Wed Mar 28 18:47:17 2007 +0100 13.3 @@ -20,6 +20,8 @@ 13.4 #define LONG_MIN (-LONG_MAX - 1) 13.5 #define ULONG_MAX (~0UL) 13.6 13.7 +typedef char bool_t; 13.8 + 13.9 /* bsd */ 13.10 typedef unsigned char u_char; 13.11 typedef unsigned short u_short;