debuggers.hg
changeset 14674:98b049ed2540
hvm: Remove extremely verbose debugging from SVM code.
It would be too verbose to actually turn on to find any non-subtle
bugs, and severely bloats the vmexit handler (making it hard to find
the real code amongst the debugging code).
Signed-off-by: Keir Fraser <keir@xensource.com>
It would be too verbose to actually turn on to find any non-subtle
bugs, and severely bloats the vmexit handler (making it hard to find
the real code amongst the debugging code).
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Thu Mar 29 11:01:41 2007 +0100 (2007-03-29) |
parents | ea55ead0fd47 |
children | a545ac9028d2 |
files | xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c |
line diff
1.1 --- a/xen/arch/x86/hvm/svm/svm.c Thu Mar 29 10:41:37 2007 +0100 1.2 +++ b/xen/arch/x86/hvm/svm/svm.c Thu Mar 29 11:01:41 2007 +0100 1.3 @@ -15,7 +15,6 @@ 1.4 * You should have received a copy of the GNU General Public License along with 1.5 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 1.6 * Place - Suite 330, Boston, MA 02111-1307 USA. 1.7 - * 1.8 */ 1.9 1.10 #include <xen/config.h> 1.11 @@ -50,19 +49,12 @@ 1.12 #include <asm/hvm/trace.h> 1.13 #include <asm/hap.h> 1.14 1.15 -#define SVM_EXTRA_DEBUG 1.16 - 1.17 #define set_segment_register(name, value) \ 1.18 - __asm__ __volatile__ ( "movw %%ax ,%%" STR(name) "" : : "a" (value) ) 1.19 - 1.20 -/* External functions. We should move these to some suitable header file(s) */ 1.21 + asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) ) 1.22 1.23 -extern int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, 1.24 - int inst_len); 1.25 -extern asmlinkage void do_IRQ(struct cpu_user_regs *); 1.26 -extern void svm_dump_inst(unsigned long eip); 1.27 -extern int svm_dbg_on; 1.28 -void svm_dump_regs(const char *from, struct cpu_user_regs *regs); 1.29 +int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, 1.30 + int inst_len); 1.31 +asmlinkage void do_IRQ(struct cpu_user_regs *); 1.32 1.33 static int svm_reset_to_realmode(struct vcpu *v, 1.34 struct cpu_user_regs *regs); 1.35 @@ -324,9 +316,9 @@ static inline int long_mode_do_msr_write 1.36 1.37 1.38 #define loaddebug(_v,_reg) \ 1.39 - __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg])) 1.40 + asm volatile ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg])) 1.41 #define savedebug(_v,_reg) \ 1.42 - __asm__ __volatile__ ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg])) 1.43 + asm volatile ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg])) 1.44 1.45 static inline void svm_save_dr(struct vcpu *v) 1.46 { 1.47 @@ -802,69 +794,6 @@ static void svm_init_hypercall_page(stru 1.48 *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */ 1.49 } 1.50 1.51 - 1.52 -int svm_dbg_on = 0; 1.53 - 1.54 -static inline int svm_do_debugout(unsigned long exit_code) 1.55 -{ 1.56 - int i; 1.57 - 1.58 - static unsigned long counter = 0; 1.59 - static unsigned long works[] = 1.60 - { 1.61 - VMEXIT_IOIO, 1.62 - VMEXIT_HLT, 1.63 - VMEXIT_CPUID, 1.64 - VMEXIT_DR0_READ, 1.65 - VMEXIT_DR1_READ, 1.66 - VMEXIT_DR2_READ, 1.67 - VMEXIT_DR3_READ, 1.68 - VMEXIT_DR6_READ, 1.69 - VMEXIT_DR7_READ, 1.70 - VMEXIT_DR0_WRITE, 1.71 - VMEXIT_DR1_WRITE, 1.72 - VMEXIT_DR2_WRITE, 1.73 - VMEXIT_DR3_WRITE, 1.74 - VMEXIT_CR0_READ, 1.75 - VMEXIT_CR0_WRITE, 1.76 - VMEXIT_CR3_READ, 1.77 - VMEXIT_CR4_READ, 1.78 - VMEXIT_MSR, 1.79 - VMEXIT_CR0_WRITE, 1.80 - VMEXIT_CR3_WRITE, 1.81 - VMEXIT_CR4_WRITE, 1.82 - VMEXIT_EXCEPTION_PF, 1.83 - VMEXIT_INTR, 1.84 - VMEXIT_INVLPG, 1.85 - VMEXIT_EXCEPTION_NM 1.86 - }; 1.87 - 1.88 - 1.89 -#if 0 1.90 - if (svm_dbg_on && exit_code != 0x7B) 1.91 - return 1; 1.92 -#endif 1.93 - 1.94 - counter++; 1.95 - 1.96 -#if 0 1.97 - if ((exit_code == 0x4E 1.98 - || exit_code == VMEXIT_CR0_READ 1.99 - || exit_code == VMEXIT_CR0_WRITE) 1.100 - && counter < 200000) 1.101 - return 0; 1.102 - 1.103 - if ((exit_code == 0x4E) && counter < 500000) 1.104 - return 0; 1.105 -#endif 1.106 - 1.107 - for (i = 0; i < sizeof(works) / sizeof(works[0]); i++) 1.108 - if (exit_code == works[i]) 1.109 - return 0; 1.110 - 1.111 - return 1; 1.112 -} 1.113 - 1.114 static void save_svm_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *ctxt) 1.115 { 1.116 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.117 @@ -1086,17 +1015,6 @@ static int svm_do_nested_pgfault(paddr_t 1.118 return 0; 1.119 } 1.120 1.121 - 1.122 -static int svm_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 1.123 -{ 1.124 - HVM_DBG_LOG(DBG_LEVEL_VMMU, 1.125 - "svm_do_page_fault = 0x%lx, eip = %lx, error_code = %lx", 1.126 - va, (unsigned long)current->arch.hvm_svm.vmcb->rip, 1.127 - (unsigned long)regs->error_code); 1.128 - return paging_fault(va, regs); 1.129 -} 1.130 - 1.131 - 1.132 static void svm_do_no_device_fault(struct vmcb_struct *vmcb) 1.133 { 1.134 struct vcpu *v = current; 1.135 @@ -1121,8 +1039,6 @@ static void svm_vmexit_do_cpuid(struct v 1.136 struct vcpu *v = current; 1.137 int inst_len; 1.138 1.139 - ASSERT(vmcb); 1.140 - 1.141 hvm_cpuid(input, &eax, &ebx, &ecx, &edx); 1.142 1.143 if ( input == 0x00000001 ) 1.144 @@ -1254,8 +1170,8 @@ static inline unsigned long *get_reg_p( 1.145 } 1.146 1.147 1.148 -static inline unsigned long get_reg(unsigned int gpreg, 1.149 - struct cpu_user_regs *regs, struct vmcb_struct *vmcb) 1.150 +static inline unsigned long get_reg( 1.151 + unsigned int gpreg, struct cpu_user_regs *regs, struct vmcb_struct *vmcb) 1.152 { 1.153 unsigned long *gp; 1.154 gp = get_reg_p(gpreg, regs, vmcb); 1.155 @@ -1263,8 +1179,9 @@ static inline unsigned long get_reg(unsi 1.156 } 1.157 1.158 1.159 -static inline void set_reg(unsigned int gpreg, unsigned long value, 1.160 - struct cpu_user_regs *regs, struct vmcb_struct *vmcb) 1.161 +static inline void set_reg( 1.162 + unsigned int gpreg, unsigned long value, 1.163 + struct cpu_user_regs *regs, struct vmcb_struct *vmcb) 1.164 { 1.165 unsigned long *gp; 1.166 gp = get_reg_p(gpreg, regs, vmcb); 1.167 @@ -1534,7 +1451,6 @@ static void svm_io_instruction(struct vc 1.168 ioio_info_t info; 1.169 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.170 1.171 - ASSERT(vmcb); 1.172 pio_opp = ¤t->arch.hvm_vcpu.io_op; 1.173 pio_opp->instr = INSTR_PIO; 1.174 pio_opp->flags = 0; 1.175 @@ -1683,8 +1599,6 @@ static int npt_set_cr0(unsigned long val 1.176 struct vcpu *v = current; 1.177 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.178 1.179 - ASSERT(vmcb); 1.180 - 1.181 /* ET is reserved and should be always be 1*/ 1.182 value |= X86_CR0_ET; 1.183 1.184 @@ -1732,8 +1646,6 @@ static int svm_set_cr0(unsigned long val 1.185 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.186 unsigned long old_base_mfn; 1.187 1.188 - ASSERT(vmcb); 1.189 - 1.190 /* We don't want to lose PG. ET is reserved and should be always be 1*/ 1.191 paging_enabled = svm_paging_enabled(v); 1.192 value |= X86_CR0_ET; 1.193 @@ -1821,10 +1733,6 @@ static int svm_set_cr0(unsigned long val 1.194 return 1; 1.195 } 1.196 1.197 -// 1.198 -// nested paging functions 1.199 -// 1.200 - 1.201 static int npt_mov_to_cr(int gpreg, int cr, struct cpu_user_regs *regs) 1.202 { 1.203 unsigned long value; 1.204 @@ -1832,11 +1740,10 @@ static int npt_mov_to_cr(int gpreg, int 1.205 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.206 struct vlapic *vlapic = vcpu_vlapic(v); 1.207 1.208 - ASSERT(vmcb); 1.209 - 1.210 value = get_reg(gpreg, regs, vmcb); 1.211 1.212 - switch (cr) { 1.213 + switch ( cr ) 1.214 + { 1.215 case 0: 1.216 return npt_set_cr0(value); 1.217 1.218 @@ -1869,13 +1776,11 @@ static void npt_mov_from_cr(int cr, int 1.219 { 1.220 unsigned long value = 0; 1.221 struct vcpu *v = current; 1.222 - struct vmcb_struct *vmcb; 1.223 + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.224 struct vlapic *vlapic = vcpu_vlapic(v); 1.225 1.226 - vmcb = v->arch.hvm_svm.vmcb; 1.227 - ASSERT(vmcb); 1.228 - 1.229 - switch(cr) { 1.230 + switch ( cr ) 1.231 + { 1.232 case 0: 1.233 value = (unsigned long) v->arch.hvm_svm.cpu_shadow_cr0; 1.234 break; 1.235 @@ -1908,30 +1813,21 @@ static void mov_from_cr(int cr, int gp, 1.236 unsigned long value = 0; 1.237 struct vcpu *v = current; 1.238 struct vlapic *vlapic = vcpu_vlapic(v); 1.239 - struct vmcb_struct *vmcb; 1.240 - 1.241 - vmcb = v->arch.hvm_svm.vmcb; 1.242 - ASSERT(vmcb); 1.243 + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.244 1.245 switch ( cr ) 1.246 { 1.247 case 0: 1.248 value = v->arch.hvm_svm.cpu_shadow_cr0; 1.249 - if (svm_dbg_on) 1.250 - printk("CR0 read =%lx \n", value ); 1.251 break; 1.252 case 2: 1.253 value = vmcb->cr2; 1.254 break; 1.255 case 3: 1.256 - value = (unsigned long) v->arch.hvm_svm.cpu_cr3; 1.257 - if (svm_dbg_on) 1.258 - printk("CR3 read =%lx \n", value ); 1.259 + value = (unsigned long)v->arch.hvm_svm.cpu_cr3; 1.260 break; 1.261 case 4: 1.262 - value = (unsigned long) v->arch.hvm_svm.cpu_shadow_cr4; 1.263 - if (svm_dbg_on) 1.264 - printk("CR4 read=%lx\n", value); 1.265 + value = (unsigned long)v->arch.hvm_svm.cpu_shadow_cr4; 1.266 break; 1.267 case 8: 1.268 value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI); 1.269 @@ -1971,21 +1867,18 @@ static int mov_to_cr(int gpreg, int cr, 1.270 switch (cr) 1.271 { 1.272 case 0: 1.273 - if (svm_dbg_on) 1.274 - printk("CR0 write =%lx \n", value ); 1.275 return svm_set_cr0(value); 1.276 1.277 case 3: 1.278 - if (svm_dbg_on) 1.279 - printk("CR3 write =%lx \n", value ); 1.280 /* If paging is not enabled yet, simply copy the value to CR3. */ 1.281 - if (!svm_paging_enabled(v)) { 1.282 + if ( !svm_paging_enabled(v) ) 1.283 + { 1.284 v->arch.hvm_svm.cpu_cr3 = value; 1.285 break; 1.286 } 1.287 1.288 /* We make a new one if the shadow does not exist. */ 1.289 - if (value == v->arch.hvm_svm.cpu_cr3) 1.290 + if ( value == v->arch.hvm_svm.cpu_cr3 ) 1.291 { 1.292 /* 1.293 * This is simple TLB flush, implying the guest has 1.294 @@ -1993,7 +1886,7 @@ static int mov_to_cr(int gpreg, int cr, 1.295 * We simply invalidate the shadow. 1.296 */ 1.297 mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT); 1.298 - if (mfn != pagetable_get_pfn(v->arch.guest_table)) 1.299 + if ( mfn != pagetable_get_pfn(v->arch.guest_table) ) 1.300 goto bad_cr3; 1.301 paging_update_cr3(v); 1.302 } 1.303 @@ -2005,13 +1898,13 @@ static int mov_to_cr(int gpreg, int cr, 1.304 */ 1.305 HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value); 1.306 mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT); 1.307 - if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain)) 1.308 + if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) ) 1.309 goto bad_cr3; 1.310 1.311 old_base_mfn = pagetable_get_pfn(v->arch.guest_table); 1.312 v->arch.guest_table = pagetable_from_pfn(mfn); 1.313 1.314 - if (old_base_mfn) 1.315 + if ( old_base_mfn ) 1.316 put_page(mfn_to_page(old_base_mfn)); 1.317 1.318 v->arch.hvm_svm.cpu_cr3 = value; 1.319 @@ -2021,9 +1914,6 @@ static int mov_to_cr(int gpreg, int cr, 1.320 break; 1.321 1.322 case 4: /* CR4 */ 1.323 - if (svm_dbg_on) 1.324 - printk( "write cr4=%lx, cr0=%lx\n", 1.325 - value, v->arch.hvm_svm.cpu_shadow_cr0 ); 1.326 old_cr = v->arch.hvm_svm.cpu_shadow_cr4; 1.327 if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) ) 1.328 { 1.329 @@ -2113,8 +2003,6 @@ static int svm_cr_access(struct vcpu *v, 1.330 enum instruction_index list_b[] = {INSTR_MOVCR2, INSTR_SMSW}; 1.331 enum instruction_index match; 1.332 1.333 - ASSERT(vmcb); 1.334 - 1.335 inst_copy_from_guest(buffer, svm_rip2pointer(v), sizeof(buffer)); 1.336 1.337 /* get index to first actual instruction byte - as we will need to know 1.338 @@ -2169,21 +2057,11 @@ static int svm_cr_access(struct vcpu *v, 1.339 break; 1.340 1.341 case INSTR_LMSW: 1.342 - if (svm_dbg_on) 1.343 - svm_dump_inst(svm_rip2pointer(v)); 1.344 - 1.345 gpreg = decode_src_reg(prefix, buffer[index+2]); 1.346 value = get_reg(gpreg, regs, vmcb) & 0xF; 1.347 1.348 - if (svm_dbg_on) 1.349 - printk("CR0-LMSW value=%lx, reg=%d, inst_len=%d\n", value, gpreg, 1.350 - inst_len); 1.351 - 1.352 value = (v->arch.hvm_svm.cpu_shadow_cr0 & ~0xF) | value; 1.353 1.354 - if (svm_dbg_on) 1.355 - printk("CR0-LMSW CR0 - New value=%lx\n", value); 1.356 - 1.357 if ( paging_mode_hap(v->domain) ) 1.358 result = npt_set_cr0(value); 1.359 else 1.360 @@ -2191,15 +2069,9 @@ static int svm_cr_access(struct vcpu *v, 1.361 break; 1.362 1.363 case INSTR_SMSW: 1.364 - if (svm_dbg_on) 1.365 - svm_dump_inst(svm_rip2pointer(v)); 1.366 value = v->arch.hvm_svm.cpu_shadow_cr0; 1.367 gpreg = decode_src_reg(prefix, buffer[index+2]); 1.368 set_reg(gpreg, value, regs, vmcb); 1.369 - 1.370 - if (svm_dbg_on) 1.371 - printk("CR0-SMSW value=%lx, reg=%d, inst_len=%d\n", value, gpreg, 1.372 - inst_len); 1.373 break; 1.374 1.375 default: 1.376 @@ -2221,8 +2093,6 @@ static inline void svm_do_msr_access( 1.377 u64 msr_content=0; 1.378 u32 ecx = regs->ecx, eax, edx; 1.379 1.380 - ASSERT(vmcb); 1.381 - 1.382 HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x, exitinfo = %lx", 1.383 ecx, (u32)regs->eax, (u32)regs->edx, 1.384 (unsigned long)vmcb->exitinfo1); 1.385 @@ -2334,7 +2204,7 @@ static void svm_vmexit_do_invd(struct vc 1.386 /* Tell the user that we did this - just in case someone runs some really 1.387 * weird operating system and wants to know why it's not working... 1.388 */ 1.389 - printk("INVD instruction intercepted - ignored\n"); 1.390 + gdprintk(XENLOG_WARNING, "INVD instruction intercepted - ignored\n"); 1.391 1.392 inst_len = __get_instruction_length(v, INSTR_INVD, NULL); 1.393 __update_guest_eip(vmcb, inst_len); 1.394 @@ -2408,15 +2278,8 @@ void svm_handle_invlpg(const short invlp 1.395 static int svm_reset_to_realmode(struct vcpu *v, 1.396 struct cpu_user_regs *regs) 1.397 { 1.398 - struct vmcb_struct *vmcb; 1.399 - 1.400 - ASSERT(v); 1.401 - ASSERT(regs); 1.402 + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.403 1.404 - vmcb = v->arch.hvm_svm.vmcb; 1.405 - 1.406 - ASSERT(vmcb); 1.407 - 1.408 /* clear the vmcb and user regs */ 1.409 memset(regs, 0, sizeof(struct cpu_user_regs)); 1.410 1.411 @@ -2498,394 +2361,28 @@ static int svm_reset_to_realmode(struct 1.412 return 0; 1.413 } 1.414 1.415 - 1.416 -void svm_dump_inst(unsigned long eip) 1.417 -{ 1.418 - u8 opcode[256]; 1.419 - unsigned long ptr; 1.420 - int len; 1.421 - int i; 1.422 - 1.423 - ptr = eip & ~0xff; 1.424 - len = 0; 1.425 - 1.426 - if (hvm_copy_from_guest_virt(opcode, ptr, sizeof(opcode)) == 0) 1.427 - len = sizeof(opcode); 1.428 - 1.429 - printk("Code bytes around(len=%d) %lx:", len, eip); 1.430 - for (i = 0; i < len; i++) 1.431 - { 1.432 - if ((i & 0x0f) == 0) 1.433 - printk("\n%08lx:", ptr+i); 1.434 - 1.435 - printk("%02x ", opcode[i]); 1.436 - } 1.437 - 1.438 - printk("\n"); 1.439 -} 1.440 - 1.441 - 1.442 -void svm_dump_regs(const char *from, struct cpu_user_regs *regs) 1.443 -{ 1.444 - struct vcpu *v = current; 1.445 - struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.446 - unsigned long pt = v->arch.hvm_vcpu.hw_cr3; 1.447 - 1.448 - printk("%s: guest registers from %s:\n", __func__, from); 1.449 -#if defined (__x86_64__) 1.450 - printk("rax: %016lx rbx: %016lx rcx: %016lx\n", 1.451 - regs->rax, regs->rbx, regs->rcx); 1.452 - printk("rdx: %016lx rsi: %016lx rdi: %016lx\n", 1.453 - regs->rdx, regs->rsi, regs->rdi); 1.454 - printk("rbp: %016lx rsp: %016lx r8: %016lx\n", 1.455 - regs->rbp, regs->rsp, regs->r8); 1.456 - printk("r9: %016lx r10: %016lx r11: %016lx\n", 1.457 - regs->r9, regs->r10, regs->r11); 1.458 - printk("r12: %016lx r13: %016lx r14: %016lx\n", 1.459 - regs->r12, regs->r13, regs->r14); 1.460 - printk("r15: %016lx cr0: %016lx cr3: %016lx\n", 1.461 - regs->r15, v->arch.hvm_svm.cpu_shadow_cr0, vmcb->cr3); 1.462 -#else 1.463 - printk("eax: %08x, ebx: %08x, ecx: %08x, edx: %08x\n", 1.464 - regs->eax, regs->ebx, regs->ecx, regs->edx); 1.465 - printk("edi: %08x, esi: %08x, ebp: %08x, esp: %08x\n", 1.466 - regs->edi, regs->esi, regs->ebp, regs->esp); 1.467 - printk("%s: guest cr0: %lx\n", __func__, 1.468 - v->arch.hvm_svm.cpu_shadow_cr0); 1.469 - printk("guest CR3 = %llx\n", vmcb->cr3); 1.470 -#endif 1.471 - printk("%s: pt = %lx\n", __func__, pt); 1.472 -} 1.473 - 1.474 - 1.475 -void svm_dump_host_regs(const char *from) 1.476 -{ 1.477 - struct vcpu *v = current; 1.478 - unsigned long pt = pt = pagetable_get_paddr(v->arch.monitor_table); 1.479 - unsigned long cr3, cr0; 1.480 - printk("Host registers at %s\n", from); 1.481 - 1.482 - __asm__ __volatile__ ("\tmov %%cr0,%0\n" 1.483 - "\tmov %%cr3,%1\n" 1.484 - : "=r" (cr0), "=r"(cr3)); 1.485 - printk("%s: pt = %lx, cr3 = %lx, cr0 = %lx\n", __func__, pt, cr3, cr0); 1.486 -} 1.487 - 1.488 -#ifdef SVM_EXTRA_DEBUG 1.489 -static char *exit_reasons[] = { 1.490 - [VMEXIT_CR0_READ] = "CR0_READ", 1.491 - [VMEXIT_CR1_READ] = "CR1_READ", 1.492 - [VMEXIT_CR2_READ] = "CR2_READ", 1.493 - [VMEXIT_CR3_READ] = "CR3_READ", 1.494 - [VMEXIT_CR4_READ] = "CR4_READ", 1.495 - [VMEXIT_CR5_READ] = "CR5_READ", 1.496 - [VMEXIT_CR6_READ] = "CR6_READ", 1.497 - [VMEXIT_CR7_READ] = "CR7_READ", 1.498 - [VMEXIT_CR8_READ] = "CR8_READ", 1.499 - [VMEXIT_CR9_READ] = "CR9_READ", 1.500 - [VMEXIT_CR10_READ] = "CR10_READ", 1.501 - [VMEXIT_CR11_READ] = "CR11_READ", 1.502 - [VMEXIT_CR12_READ] = "CR12_READ", 1.503 - [VMEXIT_CR13_READ] = "CR13_READ", 1.504 - [VMEXIT_CR14_READ] = "CR14_READ", 1.505 - [VMEXIT_CR15_READ] = "CR15_READ", 1.506 - [VMEXIT_CR0_WRITE] = "CR0_WRITE", 1.507 - [VMEXIT_CR1_WRITE] = "CR1_WRITE", 1.508 - [VMEXIT_CR2_WRITE] = "CR2_WRITE", 1.509 - [VMEXIT_CR3_WRITE] = "CR3_WRITE", 1.510 - [VMEXIT_CR4_WRITE] = "CR4_WRITE", 1.511 - [VMEXIT_CR5_WRITE] = "CR5_WRITE", 1.512 - [VMEXIT_CR6_WRITE] = "CR6_WRITE", 1.513 - [VMEXIT_CR7_WRITE] = "CR7_WRITE", 1.514 - [VMEXIT_CR8_WRITE] = "CR8_WRITE", 1.515 - [VMEXIT_CR9_WRITE] = "CR9_WRITE", 1.516 - [VMEXIT_CR10_WRITE] = "CR10_WRITE", 1.517 - [VMEXIT_CR11_WRITE] = "CR11_WRITE", 1.518 - [VMEXIT_CR12_WRITE] = "CR12_WRITE", 1.519 - [VMEXIT_CR13_WRITE] = "CR13_WRITE", 1.520 - [VMEXIT_CR14_WRITE] = "CR14_WRITE", 1.521 - [VMEXIT_CR15_WRITE] = "CR15_WRITE", 1.522 - [VMEXIT_DR0_READ] = "DR0_READ", 1.523 - [VMEXIT_DR1_READ] = "DR1_READ", 1.524 - [VMEXIT_DR2_READ] = "DR2_READ", 1.525 - [VMEXIT_DR3_READ] = "DR3_READ", 1.526 - [VMEXIT_DR4_READ] = "DR4_READ", 1.527 - [VMEXIT_DR5_READ] = "DR5_READ", 1.528 - [VMEXIT_DR6_READ] = "DR6_READ", 1.529 - [VMEXIT_DR7_READ] = "DR7_READ", 1.530 - [VMEXIT_DR8_READ] = "DR8_READ", 1.531 - [VMEXIT_DR9_READ] = "DR9_READ", 1.532 - [VMEXIT_DR10_READ] = "DR10_READ", 1.533 - [VMEXIT_DR11_READ] = "DR11_READ", 1.534 - [VMEXIT_DR12_READ] = "DR12_READ", 1.535 - [VMEXIT_DR13_READ] = "DR13_READ", 1.536 - [VMEXIT_DR14_READ] = "DR14_READ", 1.537 - [VMEXIT_DR15_READ] = "DR15_READ", 1.538 - [VMEXIT_DR0_WRITE] = "DR0_WRITE", 1.539 - [VMEXIT_DR1_WRITE] = "DR1_WRITE", 1.540 - [VMEXIT_DR2_WRITE] = "DR2_WRITE", 1.541 - [VMEXIT_DR3_WRITE] = "DR3_WRITE", 1.542 - [VMEXIT_DR4_WRITE] = "DR4_WRITE", 1.543 - [VMEXIT_DR5_WRITE] = "DR5_WRITE", 1.544 - [VMEXIT_DR6_WRITE] = "DR6_WRITE", 1.545 - [VMEXIT_DR7_WRITE] = "DR7_WRITE", 1.546 - [VMEXIT_DR8_WRITE] = "DR8_WRITE", 1.547 - [VMEXIT_DR9_WRITE] = "DR9_WRITE", 1.548 - [VMEXIT_DR10_WRITE] = "DR10_WRITE", 1.549 - [VMEXIT_DR11_WRITE] = "DR11_WRITE", 1.550 - [VMEXIT_DR12_WRITE] = "DR12_WRITE", 1.551 - [VMEXIT_DR13_WRITE] = "DR13_WRITE", 1.552 - [VMEXIT_DR14_WRITE] = "DR14_WRITE", 1.553 - [VMEXIT_DR15_WRITE] = "DR15_WRITE", 1.554 - [VMEXIT_EXCEPTION_DE] = "EXCEPTION_DE", 1.555 - [VMEXIT_EXCEPTION_DB] = "EXCEPTION_DB", 1.556 - [VMEXIT_EXCEPTION_NMI] = "EXCEPTION_NMI", 1.557 - [VMEXIT_EXCEPTION_BP] = "EXCEPTION_BP", 1.558 - [VMEXIT_EXCEPTION_OF] = "EXCEPTION_OF", 1.559 - [VMEXIT_EXCEPTION_BR] = "EXCEPTION_BR", 1.560 - [VMEXIT_EXCEPTION_UD] = "EXCEPTION_UD", 1.561 - [VMEXIT_EXCEPTION_NM] = "EXCEPTION_NM", 1.562 - [VMEXIT_EXCEPTION_DF] = "EXCEPTION_DF", 1.563 - [VMEXIT_EXCEPTION_09] = "EXCEPTION_09", 1.564 - [VMEXIT_EXCEPTION_TS] = "EXCEPTION_TS", 1.565 - [VMEXIT_EXCEPTION_NP] = "EXCEPTION_NP", 1.566 - [VMEXIT_EXCEPTION_SS] = "EXCEPTION_SS", 1.567 - [VMEXIT_EXCEPTION_GP] = "EXCEPTION_GP", 1.568 - [VMEXIT_EXCEPTION_PF] = "EXCEPTION_PF", 1.569 - [VMEXIT_EXCEPTION_15] = "EXCEPTION_15", 1.570 - [VMEXIT_EXCEPTION_MF] = "EXCEPTION_MF", 1.571 - [VMEXIT_EXCEPTION_AC] = "EXCEPTION_AC", 1.572 - [VMEXIT_EXCEPTION_MC] = "EXCEPTION_MC", 1.573 - [VMEXIT_EXCEPTION_XF] = "EXCEPTION_XF", 1.574 - [VMEXIT_INTR] = "INTR", 1.575 - [VMEXIT_NMI] = "NMI", 1.576 - [VMEXIT_SMI] = "SMI", 1.577 - [VMEXIT_INIT] = "INIT", 1.578 - [VMEXIT_VINTR] = "VINTR", 1.579 - [VMEXIT_CR0_SEL_WRITE] = "CR0_SEL_WRITE", 1.580 - [VMEXIT_IDTR_READ] = "IDTR_READ", 1.581 - [VMEXIT_GDTR_READ] = "GDTR_READ", 1.582 - [VMEXIT_LDTR_READ] = "LDTR_READ", 1.583 - [VMEXIT_TR_READ] = "TR_READ", 1.584 - [VMEXIT_IDTR_WRITE] = "IDTR_WRITE", 1.585 - [VMEXIT_GDTR_WRITE] = "GDTR_WRITE", 1.586 - [VMEXIT_LDTR_WRITE] = "LDTR_WRITE", 1.587 - [VMEXIT_TR_WRITE] = "TR_WRITE", 1.588 - [VMEXIT_RDTSC] = "RDTSC", 1.589 - [VMEXIT_RDPMC] = "RDPMC", 1.590 - [VMEXIT_PUSHF] = "PUSHF", 1.591 - [VMEXIT_POPF] = "POPF", 1.592 - [VMEXIT_CPUID] = "CPUID", 1.593 - [VMEXIT_RSM] = "RSM", 1.594 - [VMEXIT_IRET] = "IRET", 1.595 - [VMEXIT_SWINT] = "SWINT", 1.596 - [VMEXIT_INVD] = "INVD", 1.597 - [VMEXIT_PAUSE] = "PAUSE", 1.598 - [VMEXIT_HLT] = "HLT", 1.599 - [VMEXIT_INVLPG] = "INVLPG", 1.600 - [VMEXIT_INVLPGA] = "INVLPGA", 1.601 - [VMEXIT_IOIO] = "IOIO", 1.602 - [VMEXIT_MSR] = "MSR", 1.603 - [VMEXIT_TASK_SWITCH] = "TASK_SWITCH", 1.604 - [VMEXIT_FERR_FREEZE] = "FERR_FREEZE", 1.605 - [VMEXIT_SHUTDOWN] = "SHUTDOWN", 1.606 - [VMEXIT_VMRUN] = "VMRUN", 1.607 - [VMEXIT_VMMCALL] = "VMMCALL", 1.608 - [VMEXIT_VMLOAD] = "VMLOAD", 1.609 - [VMEXIT_VMSAVE] = "VMSAVE", 1.610 - [VMEXIT_STGI] = "STGI", 1.611 - [VMEXIT_CLGI] = "CLGI", 1.612 - [VMEXIT_SKINIT] = "SKINIT", 1.613 - [VMEXIT_RDTSCP] = "RDTSCP", 1.614 - [VMEXIT_ICEBP] = "ICEBP", 1.615 - [VMEXIT_NPF] = "NPF" 1.616 -}; 1.617 -#endif /* SVM_EXTRA_DEBUG */ 1.618 - 1.619 -#ifdef SVM_WALK_GUEST_PAGES 1.620 -void walk_shadow_and_guest_pt(unsigned long gva) 1.621 -{ 1.622 - l2_pgentry_t gpde; 1.623 - l2_pgentry_t spde; 1.624 - l1_pgentry_t gpte; 1.625 - l1_pgentry_t spte; 1.626 - struct vcpu *v = current; 1.627 - struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.628 - paddr_t gpa; 1.629 - 1.630 - gpa = paging_gva_to_gpa(current, gva); 1.631 - printk("gva = %lx, gpa=%"PRIpaddr", gCR3=%x\n", gva, gpa, (u32)vmcb->cr3); 1.632 - if( !svm_paging_enabled(v) || mmio_space(gpa) ) 1.633 - return; 1.634 - 1.635 - /* let's dump the guest and shadow page info */ 1.636 - 1.637 - __guest_get_l2e(v, gva, &gpde); 1.638 - printk( "G-PDE = %x, flags=%x\n", gpde.l2, l2e_get_flags(gpde) ); 1.639 - __shadow_get_l2e( v, gva, &spde ); 1.640 - printk( "S-PDE = %x, flags=%x\n", spde.l2, l2e_get_flags(spde) ); 1.641 - 1.642 - if ( unlikely(!(l2e_get_flags(gpde) & _PAGE_PRESENT)) ) 1.643 - return; 1.644 - 1.645 - spte = l1e_empty(); 1.646 - 1.647 - /* This is actually overkill - we only need to ensure the hl2 is in-sync.*/ 1.648 - shadow_sync_va(v, gva); 1.649 - 1.650 - gpte.l1 = 0; 1.651 - __copy_from_user(&gpte, &__linear_l1_table[ l1_linear_offset(gva) ], 1.652 - sizeof(gpte) ); 1.653 - printk( "G-PTE = %x, flags=%x\n", gpte.l1, l1e_get_flags(gpte) ); 1.654 - 1.655 - BUG(); // need to think about this, and convert usage of 1.656 - // phys_to_machine_mapping to use pagetable format... 1.657 - __copy_from_user( &spte, &phys_to_machine_mapping[ l1e_get_pfn( gpte ) ], 1.658 - sizeof(spte) ); 1.659 - 1.660 - printk( "S-PTE = %x, flags=%x\n", spte.l1, l1e_get_flags(spte)); 1.661 -} 1.662 -#endif /* SVM_WALK_GUEST_PAGES */ 1.663 - 1.664 - 1.665 asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs) 1.666 { 1.667 unsigned int exit_reason; 1.668 unsigned long eip; 1.669 struct vcpu *v = current; 1.670 - int do_debug = 0; 1.671 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.672 1.673 - ASSERT(vmcb); 1.674 - 1.675 exit_reason = vmcb->exitcode; 1.676 save_svm_cpu_user_regs(v, regs); 1.677 1.678 HVMTRACE_2D(VMEXIT, v, vmcb->rip, exit_reason); 1.679 1.680 - if (exit_reason == VMEXIT_INVALID) 1.681 + if ( unlikely(exit_reason == VMEXIT_INVALID) ) 1.682 { 1.683 svm_dump_vmcb(__func__, vmcb); 1.684 goto exit_and_crash; 1.685 } 1.686 1.687 -#ifdef SVM_EXTRA_DEBUG 1.688 - { 1.689 -#if defined(__i386__) 1.690 -#define rip eip 1.691 -#endif 1.692 - 1.693 - static unsigned long intercepts_counter = 0; 1.694 - 1.695 - if (svm_dbg_on && exit_reason == VMEXIT_EXCEPTION_PF) 1.696 - { 1.697 - if (svm_paging_enabled(v) && 1.698 - !mmio_space( 1.699 - paging_gva_to_gfn(current, vmcb->exitinfo2) << PAGE_SHIFT)) 1.700 - { 1.701 - printk("I%08ld,ExC=%s(%d),IP=%x:%"PRIx64"," 1.702 - "I1=%"PRIx64",I2=%"PRIx64",INT=%"PRIx64", " 1.703 - "gpa=%"PRIx64"\n", intercepts_counter, 1.704 - exit_reasons[exit_reason], exit_reason, regs->cs, 1.705 - (u64)regs->rip, 1.706 - (u64)vmcb->exitinfo1, 1.707 - (u64)vmcb->exitinfo2, 1.708 - (u64)vmcb->exitintinfo.bytes, 1.709 - (((u64)paging_gva_to_gfn(current, vmcb->exitinfo2) 1.710 - << PAGE_SHIFT) | (vmcb->exitinfo2 & ~PAGE_MASK))); 1.711 - } 1.712 - else 1.713 - { 1.714 - printk("I%08ld,ExC=%s(%d),IP=%x:%"PRIx64"," 1.715 - "I1=%"PRIx64",I2=%"PRIx64",INT=%"PRIx64"\n", 1.716 - intercepts_counter, 1.717 - exit_reasons[exit_reason], exit_reason, regs->cs, 1.718 - (u64)regs->rip, 1.719 - (u64)vmcb->exitinfo1, 1.720 - (u64)vmcb->exitinfo2, 1.721 - (u64)vmcb->exitintinfo.bytes ); 1.722 - } 1.723 - } 1.724 - else if ( svm_dbg_on 1.725 - && exit_reason != VMEXIT_IOIO 1.726 - && exit_reason != VMEXIT_INTR) 1.727 - { 1.728 - 1.729 - if (exit_reasons[exit_reason]) 1.730 - { 1.731 - printk("I%08ld,ExC=%s(%d),IP=%x:%"PRIx64"," 1.732 - "I1=%"PRIx64",I2=%"PRIx64",INT=%"PRIx64"\n", 1.733 - intercepts_counter, 1.734 - exit_reasons[exit_reason], exit_reason, regs->cs, 1.735 - (u64)regs->rip, 1.736 - (u64)vmcb->exitinfo1, 1.737 - (u64)vmcb->exitinfo2, 1.738 - (u64)vmcb->exitintinfo.bytes); 1.739 - } 1.740 - else 1.741 - { 1.742 - printk("I%08ld,ExC=%d(0x%x),IP=%x:%"PRIx64"," 1.743 - "I1=%"PRIx64",I2=%"PRIx64",INT=%"PRIx64"\n", 1.744 - intercepts_counter, exit_reason, exit_reason, regs->cs, 1.745 - (u64)regs->rip, 1.746 - (u64)vmcb->exitinfo1, 1.747 - (u64)vmcb->exitinfo2, 1.748 - (u64)vmcb->exitintinfo.bytes); 1.749 - } 1.750 - } 1.751 - 1.752 -#ifdef SVM_WALK_GUEST_PAGES 1.753 - if( exit_reason == VMEXIT_EXCEPTION_PF 1.754 - && ( ( vmcb->exitinfo2 == vmcb->rip ) 1.755 - || vmcb->exitintinfo.bytes) ) 1.756 - { 1.757 - if ( svm_paging_enabled(v) && 1.758 - !mmio_space(gva_to_gpa(vmcb->exitinfo2)) ) 1.759 - walk_shadow_and_guest_pt(vmcb->exitinfo2); 1.760 - } 1.761 -#endif 1.762 - 1.763 - intercepts_counter++; 1.764 - 1.765 -#if 0 1.766 - if (svm_dbg_on) 1.767 - do_debug = svm_do_debugout(exit_reason); 1.768 -#endif 1.769 - 1.770 - if (do_debug) 1.771 - { 1.772 - printk("%s:+ guest_table = 0x%08x, monitor_table = 0x%08x, " 1.773 - "hw_cr3 = 0x%16lx\n", 1.774 - __func__, 1.775 - (int) v->arch.guest_table.pfn, 1.776 - (int) v->arch.monitor_table.pfn, 1.777 - (long unsigned int) v->arch.hvm_vcpu.hw_cr3); 1.778 - 1.779 - svm_dump_vmcb(__func__, vmcb); 1.780 - svm_dump_regs(__func__, regs); 1.781 - svm_dump_inst(svm_rip2pointer(v)); 1.782 - } 1.783 - 1.784 -#if defined(__i386__) 1.785 -#undef rip 1.786 -#endif 1.787 - 1.788 - } 1.789 -#endif /* SVM_EXTRA_DEBUG */ 1.790 - 1.791 - 1.792 perfc_incra(svmexits, exit_reason); 1.793 eip = vmcb->rip; 1.794 1.795 -#ifdef SVM_EXTRA_DEBUG 1.796 - if (do_debug) 1.797 - { 1.798 - printk("eip = %lx, exit_reason = %d (0x%x)\n", 1.799 - eip, exit_reason, exit_reason); 1.800 - } 1.801 -#endif /* SVM_EXTRA_DEBUG */ 1.802 - 1.803 - switch (exit_reason) 1.804 + switch ( exit_reason ) 1.805 { 1.806 case VMEXIT_INTR: 1.807 /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ 1.808 @@ -2930,7 +2427,7 @@ asmlinkage void svm_vmexit_handler(struc 1.809 (unsigned long)regs->ecx, (unsigned long)regs->edx, 1.810 (unsigned long)regs->esi, (unsigned long)regs->edi); 1.811 1.812 - if ( svm_do_page_fault(va, regs) ) 1.813 + if ( paging_fault(va, regs) ) 1.814 { 1.815 HVMTRACE_2D(PF_XEN, v, va, regs->error_code); 1.816 break; 1.817 @@ -3051,9 +2548,8 @@ asmlinkage void svm_vmexit_handler(struc 1.818 1.819 case VMEXIT_NPF: 1.820 regs->error_code = vmcb->exitinfo1; 1.821 - if ( !svm_do_nested_pgfault(vmcb->exitinfo2, regs) ) { 1.822 + if ( !svm_do_nested_pgfault(vmcb->exitinfo2, regs) ) 1.823 domain_crash(v->domain); 1.824 - } 1.825 break; 1.826 1.827 default: 1.828 @@ -3065,35 +2561,16 @@ asmlinkage void svm_vmexit_handler(struc 1.829 domain_crash(v->domain); 1.830 break; 1.831 } 1.832 - 1.833 -#ifdef SVM_EXTRA_DEBUG 1.834 - if (do_debug) 1.835 - { 1.836 - printk("%s: Done switch on vmexit_code\n", __func__); 1.837 - svm_dump_regs(__func__, regs); 1.838 - } 1.839 - 1.840 - if (do_debug) 1.841 - { 1.842 - printk("vmexit_handler():- guest_table = 0x%08x, " 1.843 - "monitor_table = 0x%08x, hw_cr3 = 0x%16x\n", 1.844 - (int)v->arch.guest_table.pfn, 1.845 - (int)v->arch.monitor_table.pfn, 1.846 - (int)v->arch.hvm_vcpu.hw_cr3); 1.847 - printk("svm_vmexit_handler: Returning\n"); 1.848 - } 1.849 -#endif 1.850 } 1.851 1.852 asmlinkage void svm_load_cr2(void) 1.853 { 1.854 struct vcpu *v = current; 1.855 1.856 - // this is the last C code before the VMRUN instruction 1.857 + /* This is the last C code before the VMRUN instruction. */ 1.858 HVMTRACE_0D(VMENTRY, v); 1.859 1.860 - local_irq_disable(); 1.861 - asm volatile("mov %0,%%cr2": :"r" (v->arch.hvm_svm.cpu_cr2)); 1.862 + asm volatile ( "mov %0,%%cr2" : : "r" (v->arch.hvm_svm.cpu_cr2) ); 1.863 } 1.864 1.865 /*
2.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Thu Mar 29 10:41:37 2007 +0100 2.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Mar 29 11:01:41 2007 +0100 2.3 @@ -1104,36 +1104,6 @@ static void inline __update_guest_eip(un 2.4 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); 2.5 } 2.6 2.7 -static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 2.8 -{ 2.9 - int result; 2.10 - 2.11 -#if 0 /* keep for debugging */ 2.12 - { 2.13 - unsigned long eip, cs; 2.14 - 2.15 - cs = __vmread(GUEST_CS_BASE); 2.16 - eip = __vmread(GUEST_RIP); 2.17 - HVM_DBG_LOG(DBG_LEVEL_VMMU, 2.18 - "vmx_do_page_fault = 0x%lx, cs_base=%lx, " 2.19 - "eip = %lx, error_code = %lx\n", 2.20 - va, cs, eip, (unsigned long)regs->error_code); 2.21 - } 2.22 -#endif 2.23 - 2.24 - result = paging_fault(va, regs); 2.25 - 2.26 -#if 0 2.27 - if ( !result ) 2.28 - { 2.29 - eip = __vmread(GUEST_RIP); 2.30 - printk("vmx pgfault to guest va=%lx eip=%lx\n", va, eip); 2.31 - } 2.32 -#endif 2.33 - 2.34 - return result; 2.35 -} 2.36 - 2.37 static void vmx_do_no_device_fault(void) 2.38 { 2.39 struct vcpu *v = current; 2.40 @@ -2559,7 +2529,7 @@ asmlinkage void vmx_vmexit_handler(struc 2.41 (unsigned long)regs->ecx, (unsigned long)regs->edx, 2.42 (unsigned long)regs->esi, (unsigned long)regs->edi); 2.43 2.44 - if ( vmx_do_page_fault(exit_qualification, regs) ) 2.45 + if ( paging_fault(exit_qualification, regs) ) 2.46 { 2.47 HVMTRACE_2D(PF_XEN, v, exit_qualification, regs->error_code); 2.48 break;