debuggers.hg
changeset 11077:bfc69471550e
[IA64] fix a fetch code bug
Fetch code may fail, if there is no corresponding tlb entry
in THASH-VTLB. This patch adds "retry mechanism" to resolve
this issue.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
Fetch code may fail, if there is no corresponding tlb entry
in THASH-VTLB. This patch adds "retry mechanism" to resolve
this issue.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author | awilliam@xenbuild.aw |
---|---|
date | Wed Aug 09 08:01:52 2006 -0600 (2006-08-09) |
parents | 15498beef5d8 |
children | 54550e85f25a |
files | xen/arch/ia64/vmx/mmio.c xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_virt.c xen/arch/ia64/xen/faults.c xen/include/asm-ia64/bundle.h xen/include/asm-ia64/vmmu.h |
line diff
1.1 --- a/xen/arch/ia64/vmx/mmio.c Tue Aug 08 14:42:34 2006 -0600 1.2 +++ b/xen/arch/ia64/vmx/mmio.c Wed Aug 09 08:01:52 2006 -0600 1.3 @@ -433,7 +433,10 @@ void emulate_io_inst(VCPU *vcpu, u64 pad 1.4 u64 data, value,post_update, slot1a, slot1b, temp; 1.5 INST64 inst; 1.6 regs=vcpu_regs(vcpu); 1.7 - bundle = __vmx_get_domain_bundle(regs->cr_iip); 1.8 + if (IA64_RETRY == __vmx_get_domain_bundle(regs->cr_iip, &bundle)) { 1.9 + /* if fetch code fail, return and try again */ 1.10 + return; 1.11 + } 1.12 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri; 1.13 if (!slot) inst.inst = bundle.slot0; 1.14 else if (slot == 1){
2.1 --- a/xen/arch/ia64/vmx/vmmu.c Tue Aug 08 14:42:34 2006 -0600 2.2 +++ b/xen/arch/ia64/vmx/vmmu.c Wed Aug 09 08:01:52 2006 -0600 2.3 @@ -305,13 +305,13 @@ int unimplemented_gva(VCPU *vcpu,u64 vad 2.4 2.5 2.6 /* 2.7 - * Prefetch guest bundle code. 2.8 + * Fetch guest bundle code. 2.9 * INPUT: 2.10 - * code: buffer pointer to hold the read data. 2.11 - * num: number of dword (8byts) to read. 2.12 + * gip: guest ip 2.13 + * pbundle: used to return fetched bundle. 2.14 */ 2.15 -int 2.16 -fetch_code(VCPU *vcpu, u64 gip, u64 *code1, u64 *code2) 2.17 +unsigned long 2.18 +fetch_code(VCPU *vcpu, u64 gip, IA64_BUNDLE *pbundle) 2.19 { 2.20 u64 gpip=0; // guest physical IP 2.21 u64 *vpa; 2.22 @@ -336,8 +336,10 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod 2.23 maddr = (mfn << PAGE_SHIFT) | (gpip & (PAGE_SIZE - 1)); 2.24 }else{ 2.25 tlb = vhpt_lookup(gip); 2.26 - if( tlb == NULL) 2.27 - panic_domain(vcpu_regs(vcpu),"No entry found in ITLB and DTLB\n"); 2.28 + if (tlb == NULL) { 2.29 + ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2); 2.30 + return IA64_RETRY; 2.31 + } 2.32 mfn = tlb->ppn >> (PAGE_SHIFT - ARCH_PAGE_SHIFT); 2.33 maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) | 2.34 (gip & (PSIZE(tlb->ps) - 1)); 2.35 @@ -354,10 +356,10 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod 2.36 } 2.37 vpa = (u64 *)__va(maddr); 2.38 2.39 - *code1 = *vpa++; 2.40 - *code2 = *vpa; 2.41 + pbundle->i64[0] = *vpa++; 2.42 + pbundle->i64[1] = *vpa; 2.43 put_page(page); 2.44 - return 1; 2.45 + return IA64_NO_FAULT; 2.46 } 2.47 2.48 IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
3.1 --- a/xen/arch/ia64/vmx/vmx_virt.c Tue Aug 08 14:42:34 2006 -0600 3.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c Wed Aug 09 08:01:52 2006 -0600 3.3 @@ -1334,11 +1334,10 @@ IA64FAULT vmx_emul_mov_from_cr(VCPU *vcp 3.4 3.5 //#define BYPASS_VMAL_OPCODE 3.6 extern IA64_SLOT_TYPE slot_types[0x20][3]; 3.7 -IA64_BUNDLE __vmx_get_domain_bundle(u64 iip) 3.8 +unsigned long 3.9 +__vmx_get_domain_bundle(u64 iip, IA64_BUNDLE *pbundle) 3.10 { 3.11 - IA64_BUNDLE bundle; 3.12 - fetch_code( current, iip, &bundle.i64[0], &bundle.i64[1]); 3.13 - return bundle; 3.14 + return fetch_code(current, iip, pbundle); 3.15 } 3.16 3.17 /** Emulate a privileged operation.
4.1 --- a/xen/arch/ia64/xen/faults.c Tue Aug 08 14:42:34 2006 -0600 4.2 +++ b/xen/arch/ia64/xen/faults.c Wed Aug 09 08:01:52 2006 -0600 4.3 @@ -323,8 +323,10 @@ handle_fpu_swa (int fp_fault, struct pt_ 4.4 if (!fp_fault && (ia64_psr(regs)->ri == 0)) 4.5 fault_ip -= 16; 4.6 4.7 - if (VMX_DOMAIN(current)) 4.8 - bundle = __vmx_get_domain_bundle(fault_ip); 4.9 + if (VMX_DOMAIN(current)) { 4.10 + if (IA64_RETRY == __vmx_get_domain_bundle(fault_ip, &bundle)) 4.11 + return IA64_RETRY; 4.12 + } 4.13 else 4.14 bundle = __get_domain_bundle(fault_ip); 4.15 4.16 @@ -555,6 +557,7 @@ ia64_handle_reflection (unsigned long if 4.17 struct vcpu *v = current; 4.18 unsigned long check_lazy_cover = 0; 4.19 unsigned long psr = regs->cr_ipsr; 4.20 + unsigned long status; 4.21 4.22 /* Following faults shouldn'g be seen from Xen itself */ 4.23 BUG_ON (!(psr & IA64_PSR_CPL)); 4.24 @@ -615,14 +618,23 @@ ia64_handle_reflection (unsigned long if 4.25 // FIXME: Should we handle unaligned refs in Xen?? 4.26 vector = IA64_UNALIGNED_REF_VECTOR; break; 4.27 case 32: 4.28 - if (!(handle_fpu_swa(1, regs, isr))) { 4.29 + status = handle_fpu_swa(1, regs, isr); 4.30 + if (!status) { 4.31 vcpu_increment_iip(v); 4.32 return; 4.33 } 4.34 + // fetch code fail 4.35 + if (IA64_RETRY == status) 4.36 + return; 4.37 printf("ia64_handle_reflection: handling FP fault\n"); 4.38 vector = IA64_FP_FAULT_VECTOR; break; 4.39 case 33: 4.40 - if (!(handle_fpu_swa(0, regs, isr))) return; 4.41 + status = handle_fpu_swa(0, regs, isr); 4.42 + if (!status) 4.43 + return; 4.44 + // fetch code fail 4.45 + if (IA64_RETRY == status) 4.46 + return; 4.47 printf("ia64_handle_reflection: handling FP trap\n"); 4.48 vector = IA64_FP_TRAP_VECTOR; break; 4.49 case 34:
5.1 --- a/xen/include/asm-ia64/bundle.h Tue Aug 08 14:42:34 2006 -0600 5.2 +++ b/xen/include/asm-ia64/bundle.h Wed Aug 09 08:01:52 2006 -0600 5.3 @@ -223,7 +223,7 @@ typedef union U_INST64 { 5.4 INST64_M47 M47; // purge translation entry 5.5 } INST64; 5.6 5.7 -extern IA64_BUNDLE __vmx_get_domain_bundle(unsigned long iip); 5.8 +extern unsigned long __vmx_get_domain_bundle(unsigned long iip, IA64_BUNDLE *pbundle); 5.9 extern IA64_BUNDLE __get_domain_bundle(unsigned long iip); 5.10 5.11 #define MASK_41 ((unsigned long)0x1ffffffffff)
6.1 --- a/xen/include/asm-ia64/vmmu.h Tue Aug 08 14:42:34 2006 -0600 6.2 +++ b/xen/include/asm-ia64/vmmu.h Wed Aug 09 08:01:52 2006 -0600 6.3 @@ -40,6 +40,7 @@ 6.4 #include <asm/tlb.h> 6.5 #include <asm/regionreg.h> 6.6 #include <asm/vmx_mm_def.h> 6.7 +#include <asm/bundle.h> 6.8 //#define THASH_TLB_TR 0 6.9 //#define THASH_TLB_TC 1 6.10 6.11 @@ -299,7 +300,7 @@ extern void free_domain_tlb(struct vcpu 6.12 extern thash_data_t * vsa_thash(PTA vpta, u64 va, u64 vrr, u64 *tag); 6.13 extern thash_data_t * vhpt_lookup(u64 va); 6.14 extern void machine_tlb_purge(u64 va, u64 ps); 6.15 -extern int fetch_code(struct vcpu *vcpu, u64 gip, u64 *code1, u64 *code2); 6.16 +extern unsigned long fetch_code(struct vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle); 6.17 extern void emulate_io_inst(struct vcpu *vcpu, u64 padr, u64 ma); 6.18 extern int vhpt_enabled(struct vcpu *vcpu, uint64_t vadr, vhpt_ref_t ref); 6.19 extern void vtlb_insert(struct vcpu *vcpu, u64 pte, u64 itir, u64 va);