debuggers.hg
changeset 16700:e818c24cec03
hvm: For functions which translate virtual addresses to machine
addresses, page faults should only be raised when the gva->gfn
translation fails. These should be distinguished from gfn->mfn
translation failures.
The main effect of this is to change the behaviour of functions
derived from __hvm_copy(), which now returns a three-way enumeration,
and also can automatically inject #PF when the gva->gfn translation
fails.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
addresses, page faults should only be raised when the gva->gfn
translation fails. These should be distinguished from gfn->mfn
translation failures.
The main effect of this is to change the behaviour of functions
derived from __hvm_copy(), which now returns a three-way enumeration,
and also can automatically inject #PF when the gva->gfn translation
fails.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Thu Dec 27 12:00:30 2007 +0000 (2007-12-27) |
parents | 1e3e30670ce4 |
children | d5f0afb58589 |
files | xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/io.c xen/arch/x86/hvm/platform.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/realmode.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/include/asm-x86/hvm/support.h |
line diff
1.1 --- a/xen/arch/x86/hvm/hvm.c Thu Dec 27 10:41:43 2007 +0000 1.2 +++ b/xen/arch/x86/hvm/hvm.c Thu Dec 27 12:00:30 2007 +0000 1.3 @@ -1251,7 +1251,7 @@ void hvm_task_switch( 1.4 if ( hvm_virtual_to_linear_addr(x86_seg_ss, ®, regs->esp, 1.5 4, hvm_access_write, 32, 1.6 &linear_addr) ) 1.7 - hvm_copy_to_guest_virt(linear_addr, &errcode, 4); 1.8 + hvm_copy_to_guest_virt_nofault(linear_addr, &errcode, 4); 1.9 } 1.10 1.11 out: 1.12 @@ -1269,24 +1269,26 @@ void hvm_task_switch( 1.13 * @fetch = copy is an instruction fetch? 1.14 * Returns number of bytes failed to copy (0 == complete success). 1.15 */ 1.16 -static int __hvm_copy(void *buf, paddr_t addr, int size, int dir, 1.17 - int virt, int fetch) 1.18 +static enum hvm_copy_result __hvm_copy( 1.19 + void *buf, paddr_t addr, int size, int dir, int virt, int fetch) 1.20 { 1.21 - struct segment_register sreg; 1.22 unsigned long gfn, mfn; 1.23 p2m_type_t p2mt; 1.24 char *p; 1.25 int count, todo; 1.26 uint32_t pfec = PFEC_page_present; 1.27 1.28 - hvm_get_segment_register(current, x86_seg_ss, &sreg); 1.29 - 1.30 - if ( dir ) 1.31 - pfec |= PFEC_write_access; 1.32 - if ( sreg.attr.fields.dpl == 3 ) 1.33 - pfec |= PFEC_user_mode; 1.34 - if ( fetch ) 1.35 - pfec |= PFEC_insn_fetch; 1.36 + if ( virt ) 1.37 + { 1.38 + struct segment_register sreg; 1.39 + hvm_get_segment_register(current, x86_seg_ss, &sreg); 1.40 + if ( sreg.attr.fields.dpl == 3 ) 1.41 + pfec |= PFEC_user_mode; 1.42 + if ( dir ) 1.43 + pfec |= PFEC_write_access; 1.44 + if ( fetch ) 1.45 + pfec |= PFEC_insn_fetch; 1.46 + } 1.47 1.48 todo = size; 1.49 while ( todo > 0 ) 1.50 @@ -1294,14 +1296,24 @@ static int __hvm_copy(void *buf, paddr_t 1.51 count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo); 1.52 1.53 if ( virt ) 1.54 + { 1.55 gfn = paging_gva_to_gfn(current, addr, &pfec); 1.56 + if ( gfn == INVALID_GFN ) 1.57 + { 1.58 + if ( virt == 2 ) /* 2 means generate a fault */ 1.59 + hvm_inject_exception(TRAP_page_fault, pfec, addr); 1.60 + return HVMCOPY_bad_gva_to_gfn; 1.61 + } 1.62 + } 1.63 else 1.64 + { 1.65 gfn = addr >> PAGE_SHIFT; 1.66 - 1.67 + } 1.68 + 1.69 mfn = mfn_x(gfn_to_mfn_current(gfn, &p2mt)); 1.70 1.71 if ( !p2m_is_ram(p2mt) ) 1.72 - return todo; 1.73 + return HVMCOPY_bad_gfn_to_mfn; 1.74 ASSERT(mfn_valid(mfn)); 1.75 1.76 p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK); 1.77 @@ -1321,30 +1333,53 @@ static int __hvm_copy(void *buf, paddr_t 1.78 todo -= count; 1.79 } 1.80 1.81 - return 0; 1.82 + return HVMCOPY_okay; 1.83 } 1.84 1.85 -int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size) 1.86 +enum hvm_copy_result hvm_copy_to_guest_phys( 1.87 + paddr_t paddr, void *buf, int size) 1.88 { 1.89 return __hvm_copy(buf, paddr, size, 1, 0, 0); 1.90 } 1.91 1.92 -int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size) 1.93 +enum hvm_copy_result hvm_copy_from_guest_phys( 1.94 + void *buf, paddr_t paddr, int size) 1.95 { 1.96 return __hvm_copy(buf, paddr, size, 0, 0, 0); 1.97 } 1.98 1.99 -int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size) 1.100 +enum hvm_copy_result hvm_copy_to_guest_virt( 1.101 + unsigned long vaddr, void *buf, int size) 1.102 +{ 1.103 + return __hvm_copy(buf, vaddr, size, 1, 2, 0); 1.104 +} 1.105 + 1.106 +enum hvm_copy_result hvm_copy_from_guest_virt( 1.107 + void *buf, unsigned long vaddr, int size) 1.108 +{ 1.109 + return __hvm_copy(buf, vaddr, size, 0, 2, 0); 1.110 +} 1.111 + 1.112 +enum hvm_copy_result hvm_fetch_from_guest_virt( 1.113 + void *buf, unsigned long vaddr, int size) 1.114 +{ 1.115 + return __hvm_copy(buf, vaddr, size, 0, 2, hvm_nx_enabled(current)); 1.116 +} 1.117 + 1.118 +enum hvm_copy_result hvm_copy_to_guest_virt_nofault( 1.119 + unsigned long vaddr, void *buf, int size) 1.120 { 1.121 return __hvm_copy(buf, vaddr, size, 1, 1, 0); 1.122 } 1.123 1.124 -int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size) 1.125 +enum hvm_copy_result hvm_copy_from_guest_virt_nofault( 1.126 + void *buf, unsigned long vaddr, int size) 1.127 { 1.128 return __hvm_copy(buf, vaddr, size, 0, 1, 0); 1.129 } 1.130 1.131 -int hvm_fetch_from_guest_virt(void *buf, unsigned long vaddr, int size) 1.132 +enum hvm_copy_result hvm_fetch_from_guest_virt_nofault( 1.133 + void *buf, unsigned long vaddr, int size) 1.134 { 1.135 return __hvm_copy(buf, vaddr, size, 0, 1, hvm_nx_enabled(current)); 1.136 }
2.1 --- a/xen/arch/x86/hvm/io.c Thu Dec 27 10:41:43 2007 +0000 2.2 +++ b/xen/arch/x86/hvm/io.c Thu Dec 27 12:00:30 2007 +0000 2.3 @@ -435,17 +435,8 @@ static void hvm_pio_assist(struct cpu_us 2.4 if ( hvm_paging_enabled(current) ) 2.5 { 2.6 int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size); 2.7 - if ( rv != 0 ) 2.8 - { 2.9 - /* Failed on the page-spanning copy. Inject PF into 2.10 - * the guest for the address where we failed. */ 2.11 - addr += p->size - rv; 2.12 - gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side " 2.13 - "of a page-spanning PIO: va=%#lx\n", addr); 2.14 - hvm_inject_exception(TRAP_page_fault, 2.15 - PFEC_write_access, addr); 2.16 - return; 2.17 - } 2.18 + if ( rv == HVMCOPY_bad_gva_to_gfn ) 2.19 + return; /* exception already injected */ 2.20 } 2.21 else 2.22 (void)hvm_copy_to_guest_phys(addr, &p->data, p->size); 2.23 @@ -569,17 +560,8 @@ static void hvm_mmio_assist(struct cpu_u 2.24 if (hvm_paging_enabled(current)) 2.25 { 2.26 int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size); 2.27 - if ( rv != 0 ) 2.28 - { 2.29 - /* Failed on the page-spanning copy. Inject PF into 2.30 - * the guest for the address where we failed. */ 2.31 - addr += p->size - rv; 2.32 - gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side of " 2.33 - "a page-spanning MMIO: va=%#lx\n", addr); 2.34 - hvm_inject_exception(TRAP_page_fault, 2.35 - PFEC_write_access, addr); 2.36 - return; 2.37 - } 2.38 + if ( rv == HVMCOPY_bad_gva_to_gfn ) 2.39 + return; /* exception already injected */ 2.40 } 2.41 else 2.42 (void)hvm_copy_to_guest_phys(addr, &p->data, p->size); 2.43 @@ -812,14 +794,8 @@ static void hvm_mmio_assist(struct cpu_u 2.44 { 2.45 unsigned long addr = mmio_opp->addr; 2.46 int rv = hvm_copy_to_guest_virt(addr, &p->data, size); 2.47 - if ( rv != 0 ) 2.48 - { 2.49 - addr += p->size - rv; 2.50 - gdprintk(XENLOG_DEBUG, "Pagefault emulating PUSH from MMIO:" 2.51 - " va=%#lx\n", addr); 2.52 - hvm_inject_exception(TRAP_page_fault, PFEC_write_access, addr); 2.53 - return; 2.54 - } 2.55 + if ( rv == HVMCOPY_bad_gva_to_gfn ) 2.56 + return; /* exception already injected */ 2.57 } 2.58 break; 2.59 }
3.1 --- a/xen/arch/x86/hvm/platform.c Thu Dec 27 10:41:43 2007 +0000 3.2 +++ b/xen/arch/x86/hvm/platform.c Thu Dec 27 12:00:30 2007 +0000 3.3 @@ -829,11 +829,12 @@ static int mmio_decode(int address_bytes 3.4 } 3.5 } 3.6 3.7 -int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int inst_len) 3.8 +int inst_copy_from_guest( 3.9 + unsigned char *buf, unsigned long guest_eip, int inst_len) 3.10 { 3.11 if ( inst_len > MAX_INST_LEN || inst_len <= 0 ) 3.12 return 0; 3.13 - if ( hvm_fetch_from_guest_virt(buf, guest_eip, inst_len) ) 3.14 + if ( hvm_fetch_from_guest_virt_nofault(buf, guest_eip, inst_len) ) 3.15 return 0; 3.16 return inst_len; 3.17 } 3.18 @@ -1150,21 +1151,11 @@ void handle_mmio(paddr_t gpa) 3.19 if ( hvm_paging_enabled(v) ) 3.20 { 3.21 int rv = hvm_copy_from_guest_virt(&value, addr, size); 3.22 - if ( rv != 0 ) 3.23 - { 3.24 - /* Failed on the page-spanning copy. Inject PF into 3.25 - * the guest for the address where we failed */ 3.26 - regs->eip -= inst_len; /* do not advance %eip */ 3.27 - /* Must set CR2 at the failing address */ 3.28 - addr += size - rv; 3.29 - gdprintk(XENLOG_DEBUG, "Pagefault on non-io side of a " 3.30 - "page-spanning MMIO: va=%#lx\n", addr); 3.31 - hvm_inject_exception(TRAP_page_fault, 0, addr); 3.32 - return; 3.33 - } 3.34 + if ( rv == HVMCOPY_bad_gva_to_gfn ) 3.35 + return; /* exception already injected */ 3.36 } 3.37 else 3.38 - (void) hvm_copy_from_guest_phys(&value, addr, size); 3.39 + (void)hvm_copy_from_guest_phys(&value, addr, size); 3.40 } else /* dir != IOREQ_WRITE */ 3.41 /* Remember where to write the result, as a *VA*. 3.42 * Must be a VA so we can handle the page overlap 3.43 @@ -1325,7 +1316,8 @@ unsigned long copy_to_user_hvm(void *to, 3.44 return 0; 3.45 } 3.46 3.47 - return hvm_copy_to_guest_virt((unsigned long)to, (void *)from, len); 3.48 + return hvm_copy_to_guest_virt_nofault( 3.49 + (unsigned long)to, (void *)from, len); 3.50 } 3.51 3.52 unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len) 3.53 @@ -1336,7 +1328,8 @@ unsigned long copy_from_user_hvm(void *t 3.54 return 0; 3.55 } 3.56 3.57 - return hvm_copy_from_guest_virt(to, (unsigned long)from, len); 3.58 + return hvm_copy_from_guest_virt_nofault( 3.59 + to, (unsigned long)from, len); 3.60 } 3.61 3.62 /*
4.1 --- a/xen/arch/x86/hvm/svm/svm.c Thu Dec 27 10:41:43 2007 +0000 4.2 +++ b/xen/arch/x86/hvm/svm/svm.c Thu Dec 27 12:00:30 2007 +0000 4.3 @@ -1468,20 +1468,13 @@ static void svm_io_instruction(struct vc 4.4 if ( hvm_paging_enabled(current) ) 4.5 { 4.6 int rv = hvm_copy_from_guest_virt(&value, addr, size); 4.7 - if ( rv != 0 ) 4.8 - { 4.9 - /* Failed on the page-spanning copy. Inject PF into 4.10 - * the guest for the address where we failed. */ 4.11 - addr += size - rv; 4.12 - gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side " 4.13 - "of a page-spanning PIO: va=%#lx\n", addr); 4.14 - svm_inject_exception(TRAP_page_fault, 0, addr); 4.15 - return; 4.16 - } 4.17 + if ( rv == HVMCOPY_bad_gva_to_gfn ) 4.18 + return; /* exception already injected */ 4.19 } 4.20 else 4.21 - (void) hvm_copy_from_guest_phys(&value, addr, size); 4.22 - } else /* dir != IOREQ_WRITE */ 4.23 + (void)hvm_copy_from_guest_phys(&value, addr, size); 4.24 + } 4.25 + else /* dir != IOREQ_WRITE */ 4.26 /* Remember where to write the result, as a *VA*. 4.27 * Must be a VA so we can handle the page overlap 4.28 * correctly in hvm_pio_assist() */ 4.29 @@ -1705,7 +1698,8 @@ static void svm_cr_access( 4.30 offset = ( addr_size == 4 ) ? offset : ( offset & 0xFFFF ); 4.31 addr = hvm_get_segment_base(v, seg); 4.32 addr += offset; 4.33 - hvm_copy_to_guest_virt(addr,&value,2); 4.34 + result = (hvm_copy_to_guest_virt(addr, &value, 2) 4.35 + != HVMCOPY_bad_gva_to_gfn); 4.36 } 4.37 else 4.38 {
5.1 --- a/xen/arch/x86/hvm/vmx/realmode.c Thu Dec 27 10:41:43 2007 +0000 5.2 +++ b/xen/arch/x86/hvm/vmx/realmode.c Thu Dec 27 12:00:30 2007 +0000 5.3 @@ -119,22 +119,13 @@ realmode_read( 5.4 struct realmode_emulate_ctxt *rm_ctxt) 5.5 { 5.6 uint32_t addr = rm_ctxt->seg_reg[seg].base + offset; 5.7 - int todo; 5.8 5.9 *val = 0; 5.10 - todo = hvm_copy_from_guest_phys(val, addr, bytes); 5.11 5.12 - if ( todo ) 5.13 + if ( hvm_copy_from_guest_phys(val, addr, bytes) ) 5.14 { 5.15 struct vcpu *curr = current; 5.16 5.17 - if ( todo != bytes ) 5.18 - { 5.19 - gdprintk(XENLOG_WARNING, "RM: Partial read at %08x (%d/%d)\n", 5.20 - addr, todo, bytes); 5.21 - return X86EMUL_UNHANDLEABLE; 5.22 - } 5.23 - 5.24 if ( curr->arch.hvm_vmx.real_mode_io_in_progress ) 5.25 return X86EMUL_UNHANDLEABLE; 5.26 5.27 @@ -203,21 +194,11 @@ realmode_emulate_write( 5.28 struct realmode_emulate_ctxt *rm_ctxt = 5.29 container_of(ctxt, struct realmode_emulate_ctxt, ctxt); 5.30 uint32_t addr = rm_ctxt->seg_reg[seg].base + offset; 5.31 - int todo; 5.32 5.33 - todo = hvm_copy_to_guest_phys(addr, &val, bytes); 5.34 - 5.35 - if ( todo ) 5.36 + if ( hvm_copy_to_guest_phys(addr, &val, bytes) ) 5.37 { 5.38 struct vcpu *curr = current; 5.39 5.40 - if ( todo != bytes ) 5.41 - { 5.42 - gdprintk(XENLOG_WARNING, "RM: Partial write at %08x (%d/%d)\n", 5.43 - addr, todo, bytes); 5.44 - return X86EMUL_UNHANDLEABLE; 5.45 - } 5.46 - 5.47 if ( curr->arch.hvm_vmx.real_mode_io_in_progress ) 5.48 return X86EMUL_UNHANDLEABLE; 5.49
6.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Thu Dec 27 10:41:43 2007 +0000 6.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Dec 27 12:00:30 2007 +0000 6.3 @@ -1629,20 +1629,13 @@ static void vmx_send_str_pio(struct cpu_ 6.4 if ( hvm_paging_enabled(current) ) 6.5 { 6.6 int rv = hvm_copy_from_guest_virt(&value, addr, size); 6.7 - if ( rv != 0 ) 6.8 - { 6.9 - /* Failed on the page-spanning copy. Inject PF into 6.10 - * the guest for the address where we failed. */ 6.11 - addr += size - rv; 6.12 - gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side " 6.13 - "of a page-spanning PIO: va=%#lx\n", addr); 6.14 - vmx_inject_exception(TRAP_page_fault, 0, addr); 6.15 - return; 6.16 - } 6.17 + if ( rv == HVMCOPY_bad_gva_to_gfn ) 6.18 + return; /* exception already injected */ 6.19 } 6.20 else 6.21 - (void) hvm_copy_from_guest_phys(&value, addr, size); 6.22 - } else /* dir != IOREQ_WRITE */ 6.23 + (void)hvm_copy_from_guest_phys(&value, addr, size); 6.24 + } 6.25 + else /* dir != IOREQ_WRITE */ 6.26 /* Remember where to write the result, as a *VA*. 6.27 * Must be a VA so we can handle the page overlap 6.28 * correctly in hvm_pio_assist() */
7.1 --- a/xen/arch/x86/mm/shadow/common.c Thu Dec 27 10:41:43 2007 +0000 7.2 +++ b/xen/arch/x86/mm/shadow/common.c Thu Dec 27 12:00:30 2007 +0000 7.3 @@ -141,9 +141,8 @@ hvm_read(enum x86_segment seg, 7.4 enum hvm_access_type access_type, 7.5 struct sh_emulate_ctxt *sh_ctxt) 7.6 { 7.7 - struct segment_register *sreg; 7.8 unsigned long addr; 7.9 - int rc, errcode; 7.10 + int rc; 7.11 7.12 rc = hvm_translate_linear_addr( 7.13 seg, offset, bytes, access_type, sh_ctxt, &addr); 7.14 @@ -157,19 +156,17 @@ hvm_read(enum x86_segment seg, 7.15 else 7.16 rc = hvm_copy_from_guest_virt(val, addr, bytes); 7.17 7.18 - if ( rc == 0 ) 7.19 + switch ( rc ) 7.20 + { 7.21 + case HVMCOPY_okay: 7.22 return X86EMUL_OKAY; 7.23 - 7.24 - /* If we got here, there was nothing mapped here, or a bad GFN 7.25 - * was mapped here. This should never happen: we're here because 7.26 - * of a write fault at the end of the instruction we're emulating. */ 7.27 - SHADOW_PRINTK("read failed to va %#lx\n", addr); 7.28 - sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt); 7.29 - errcode = (sreg->attr.fields.dpl == 3) ? PFEC_user_mode : 0; 7.30 - if ( access_type == hvm_access_insn_fetch ) 7.31 - errcode |= PFEC_insn_fetch; 7.32 - hvm_inject_exception(TRAP_page_fault, errcode, addr + bytes - rc); 7.33 - return X86EMUL_EXCEPTION; 7.34 + case HVMCOPY_bad_gva_to_gfn: 7.35 + return X86EMUL_EXCEPTION; 7.36 + default: 7.37 + break; 7.38 + } 7.39 + 7.40 + return X86EMUL_UNHANDLEABLE; 7.41 } 7.42 7.43 static int 7.44 @@ -399,7 +396,7 @@ struct x86_emulate_ops *shadow_init_emul 7.45 (!hvm_translate_linear_addr( 7.46 x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf), 7.47 hvm_access_insn_fetch, sh_ctxt, &addr) && 7.48 - !hvm_fetch_from_guest_virt( 7.49 + !hvm_fetch_from_guest_virt_nofault( 7.50 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf))) 7.51 ? sizeof(sh_ctxt->insn_buf) : 0; 7.52 7.53 @@ -427,7 +424,7 @@ void shadow_continue_emulation(struct sh 7.54 (!hvm_translate_linear_addr( 7.55 x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf), 7.56 hvm_access_insn_fetch, sh_ctxt, &addr) && 7.57 - !hvm_fetch_from_guest_virt( 7.58 + !hvm_fetch_from_guest_virt_nofault( 7.59 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf))) 7.60 ? sizeof(sh_ctxt->insn_buf) : 0; 7.61 sh_ctxt->insn_buf_eip = regs->eip;
8.1 --- a/xen/arch/x86/mm/shadow/multi.c Thu Dec 27 10:41:43 2007 +0000 8.2 +++ b/xen/arch/x86/mm/shadow/multi.c Thu Dec 27 12:00:30 2007 +0000 8.3 @@ -3984,6 +3984,8 @@ int sh_remove_l3_shadow(struct vcpu *v, 8.4 /* Handling HVM guest writes to pagetables */ 8.5 8.6 /* Translate a VA to an MFN, injecting a page-fault if we fail */ 8.7 +#define BAD_GVA_TO_GFN (~0UL) 8.8 +#define BAD_GFN_TO_MFN (~1UL) 8.9 static mfn_t emulate_gva_to_mfn(struct vcpu *v, 8.10 unsigned long vaddr, 8.11 struct sh_emulate_ctxt *sh_ctxt) 8.12 @@ -4001,7 +4003,7 @@ static mfn_t emulate_gva_to_mfn(struct v 8.13 hvm_inject_exception(TRAP_page_fault, pfec, vaddr); 8.14 else 8.15 propagate_page_fault(vaddr, pfec); 8.16 - return _mfn(INVALID_MFN); 8.17 + return _mfn(BAD_GVA_TO_GFN); 8.18 } 8.19 8.20 /* Translate the GFN to an MFN */ 8.21 @@ -4013,11 +4015,14 @@ static mfn_t emulate_gva_to_mfn(struct v 8.22 return mfn; 8.23 } 8.24 8.25 - return _mfn(INVALID_MFN); 8.26 + return _mfn(BAD_GFN_TO_MFN); 8.27 } 8.28 8.29 /* Check that the user is allowed to perform this write. 8.30 * Returns a mapped pointer to write to, or NULL for error. */ 8.31 +#define MAPPING_UNHANDLEABLE ((void *)0) 8.32 +#define MAPPING_EXCEPTION ((void *)1) 8.33 +#define emulate_map_dest_failed(rc) ((unsigned long)(rc) <= 1) 8.34 static void *emulate_map_dest(struct vcpu *v, 8.35 unsigned long vaddr, 8.36 u32 bytes, 8.37 @@ -4030,11 +4035,12 @@ static void *emulate_map_dest(struct vcp 8.38 /* We don't emulate user-mode writes to page tables */ 8.39 sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt); 8.40 if ( sreg->attr.fields.dpl == 3 ) 8.41 - return NULL; 8.42 + return MAPPING_UNHANDLEABLE; 8.43 8.44 sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt); 8.45 if ( !mfn_valid(sh_ctxt->mfn1) ) 8.46 - return NULL; 8.47 + return ((mfn_x(sh_ctxt->mfn1) == BAD_GVA_TO_GFN) ? 8.48 + MAPPING_EXCEPTION : MAPPING_UNHANDLEABLE); 8.49 8.50 /* Unaligned writes mean probably this isn't a pagetable */ 8.51 if ( vaddr & (bytes - 1) ) 8.52 @@ -4051,13 +4057,14 @@ static void *emulate_map_dest(struct vcp 8.53 /* Cross-page emulated writes are only supported for HVM guests; 8.54 * PV guests ought to know better */ 8.55 if ( !is_hvm_vcpu(v) ) 8.56 - return NULL; 8.57 + return MAPPING_UNHANDLEABLE; 8.58 8.59 /* This write crosses a page boundary. Translate the second page */ 8.60 sh_ctxt->mfn2 = emulate_gva_to_mfn(v, (vaddr + bytes - 1) & PAGE_MASK, 8.61 sh_ctxt); 8.62 if ( !mfn_valid(sh_ctxt->mfn2) ) 8.63 - return NULL; 8.64 + return ((mfn_x(sh_ctxt->mfn1) == BAD_GVA_TO_GFN) ? 8.65 + MAPPING_EXCEPTION : MAPPING_UNHANDLEABLE); 8.66 8.67 /* Cross-page writes mean probably not a pagetable */ 8.68 sh_remove_shadows(v, sh_ctxt->mfn2, 0, 0 /* Slow, can fail */ ); 8.69 @@ -4075,7 +4082,7 @@ static void *emulate_map_dest(struct vcp 8.70 flush_tlb_local(); 8.71 map += (vaddr & ~PAGE_MASK); 8.72 } 8.73 - 8.74 + 8.75 #if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY) 8.76 /* Remember if the bottom bit was clear, so we can choose not to run 8.77 * the change through the verify code if it's still clear afterwards */ 8.78 @@ -4172,10 +4179,11 @@ sh_x86_emulate_write(struct vcpu *v, uns 8.79 8.80 shadow_lock(v->domain); 8.81 addr = emulate_map_dest(v, vaddr, bytes, sh_ctxt); 8.82 - if ( addr == NULL ) 8.83 + if ( emulate_map_dest_failed(addr) ) 8.84 { 8.85 shadow_unlock(v->domain); 8.86 - return X86EMUL_EXCEPTION; 8.87 + return ((addr == MAPPING_EXCEPTION) ? 8.88 + X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE); 8.89 } 8.90 8.91 memcpy(addr, src, bytes); 8.92 @@ -4202,10 +4210,11 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u 8.93 shadow_lock(v->domain); 8.94 8.95 addr = emulate_map_dest(v, vaddr, bytes, sh_ctxt); 8.96 - if ( addr == NULL ) 8.97 + if ( emulate_map_dest_failed(addr) ) 8.98 { 8.99 shadow_unlock(v->domain); 8.100 - return X86EMUL_EXCEPTION; 8.101 + return ((addr == MAPPING_EXCEPTION) ? 8.102 + X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE); 8.103 } 8.104 8.105 switch ( bytes ) 8.106 @@ -4249,10 +4258,11 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v, 8.107 shadow_lock(v->domain); 8.108 8.109 addr = emulate_map_dest(v, vaddr, 8, sh_ctxt); 8.110 - if ( addr == NULL ) 8.111 + if ( emulate_map_dest_failed(addr) ) 8.112 { 8.113 shadow_unlock(v->domain); 8.114 - return X86EMUL_EXCEPTION; 8.115 + return ((addr == MAPPING_EXCEPTION) ? 8.116 + X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE); 8.117 } 8.118 8.119 old = (((u64) old_hi) << 32) | (u64) old_lo;
9.1 --- a/xen/include/asm-x86/hvm/support.h Thu Dec 27 10:41:43 2007 +0000 9.2 +++ b/xen/include/asm-x86/hvm/support.h Thu Dec 27 12:00:30 2007 +0000 9.3 @@ -82,11 +82,50 @@ extern char hvm_io_bitmap[]; 9.4 9.5 void hvm_enable(struct hvm_function_table *); 9.6 9.7 -int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size); 9.8 -int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size); 9.9 -int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size); 9.10 -int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size); 9.11 -int hvm_fetch_from_guest_virt(void *buf, unsigned long vaddr, int size); 9.12 +enum hvm_copy_result { 9.13 + HVMCOPY_okay = 0, 9.14 + HVMCOPY_bad_gva_to_gfn, 9.15 + HVMCOPY_bad_gfn_to_mfn 9.16 +}; 9.17 + 9.18 +/* 9.19 + * Copy to/from a guest physical address. 9.20 + * Returns HVMCOPY_okay, else HVMCOPY_bad_gfn_to_mfn if the given physical 9.21 + * address range does not map entirely onto ordinary machine memory. 9.22 + */ 9.23 +enum hvm_copy_result hvm_copy_to_guest_phys( 9.24 + paddr_t paddr, void *buf, int size); 9.25 +enum hvm_copy_result hvm_copy_from_guest_phys( 9.26 + void *buf, paddr_t paddr, int size); 9.27 + 9.28 +/* 9.29 + * Copy to/from a guest virtual address. 9.30 + * Returns: 9.31 + * HVMCOPY_okay: Copy was entirely successful. 9.32 + * HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to 9.33 + * ordinary machine memory. 9.34 + * HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid 9.35 + * mapping to a guest physical address. In this case 9.36 + * a page fault exception is automatically queued 9.37 + * for injection into the current HVM VCPU. 9.38 + */ 9.39 +enum hvm_copy_result hvm_copy_to_guest_virt( 9.40 + unsigned long vaddr, void *buf, int size); 9.41 +enum hvm_copy_result hvm_copy_from_guest_virt( 9.42 + void *buf, unsigned long vaddr, int size); 9.43 +enum hvm_copy_result hvm_fetch_from_guest_virt( 9.44 + void *buf, unsigned long vaddr, int size); 9.45 + 9.46 +/* 9.47 + * As above (copy to/from a guest virtual address), but no fault is generated 9.48 + * when HVMCOPY_bad_gva_to_gfn is returned. 9.49 + */ 9.50 +enum hvm_copy_result hvm_copy_to_guest_virt_nofault( 9.51 + unsigned long vaddr, void *buf, int size); 9.52 +enum hvm_copy_result hvm_copy_from_guest_virt_nofault( 9.53 + void *buf, unsigned long vaddr, int size); 9.54 +enum hvm_copy_result hvm_fetch_from_guest_virt_nofault( 9.55 + void *buf, unsigned long vaddr, int size); 9.56 9.57 void hvm_print_line(struct vcpu *v, const char c); 9.58 void hlt_timer_fn(void *data);