debuggers.hg
changeset 18058:8803b305b06c
x86: Do not allow write access to p2m_ram_ro memory type.
Log and discard such access attempts.
Signed-off-by: Trolle Selander <trolle.selander@eu.citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Log and discard such access attempts.
Signed-off-by: Trolle Selander <trolle.selander@eu.citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Thu Jul 10 15:19:56 2008 +0100 (2008-07-10) |
parents | 26714991f242 |
children | 7749f135140a |
files | xen/arch/x86/hvm/hvm.c xen/arch/x86/mm/shadow/multi.c |
line diff
1.1 --- a/xen/arch/x86/hvm/hvm.c Thu Jul 10 14:20:15 2008 +0100 1.2 +++ b/xen/arch/x86/hvm/hvm.c Thu Jul 10 15:19:56 2008 +0100 1.3 @@ -1496,8 +1496,19 @@ static enum hvm_copy_result __hvm_copy( 1.4 1.5 if ( flags & HVMCOPY_to_guest ) 1.6 { 1.7 - memcpy(p, buf, count); 1.8 - paging_mark_dirty(curr->domain, mfn); 1.9 + if ( p2mt == p2m_ram_ro ) 1.10 + { 1.11 + static unsigned long lastpage; 1.12 + if ( xchg(&lastpage, gfn) != gfn ) 1.13 + gdprintk(XENLOG_DEBUG, "guest attempted write to read-only" 1.14 + " memory page. gfn=%#lx, mfn=%#lx\n", 1.15 + gfn, mfn); 1.16 + } 1.17 + else 1.18 + { 1.19 + memcpy(p, buf, count); 1.20 + paging_mark_dirty(curr->domain, mfn); 1.21 + } 1.22 } 1.23 else 1.24 {
2.1 --- a/xen/arch/x86/mm/shadow/multi.c Thu Jul 10 14:20:15 2008 +0100 2.2 +++ b/xen/arch/x86/mm/shadow/multi.c Thu Jul 10 15:19:56 2008 +0100 2.3 @@ -3344,15 +3344,24 @@ static int sh_page_fault(struct vcpu *v, 2.4 } 2.5 } 2.6 2.7 - /* Need to hand off device-model MMIO and writes to read-only 2.8 - * memory to the device model */ 2.9 - if ( p2mt == p2m_mmio_dm 2.10 - || (p2mt == p2m_ram_ro && ft == ft_demand_write) ) 2.11 + /* Need to hand off device-model MMIO to the device model */ 2.12 + if ( p2mt == p2m_mmio_dm ) 2.13 { 2.14 gpa = guest_walk_to_gpa(&gw); 2.15 goto mmio; 2.16 } 2.17 2.18 + /* Log attempts to write to read-only memory */ 2.19 + if ( (p2mt == p2m_ram_ro) && (ft == ft_demand_write) ) 2.20 + { 2.21 + static unsigned long lastpage = 0; 2.22 + if ( xchg(&lastpage, va & PAGE_MASK) != (va & PAGE_MASK) ) 2.23 + gdprintk(XENLOG_DEBUG, "guest attempted write to read-only memory" 2.24 + " page. va page=%#lx, mfn=%#lx\n", 2.25 + va & PAGE_MASK, mfn_x(gmfn)); 2.26 + goto emulate; /* skip over the instruction */ 2.27 + } 2.28 + 2.29 /* In HVM guests, we force CR0.WP always to be set, so that the 2.30 * pagetables are always write-protected. If the guest thinks 2.31 * CR0.WP is clear, we must emulate faulting supervisor writes to 2.32 @@ -4587,6 +4596,7 @@ int sh_remove_l3_shadow(struct vcpu *v, 2.33 /* Translate a VA to an MFN, injecting a page-fault if we fail */ 2.34 #define BAD_GVA_TO_GFN (~0UL) 2.35 #define BAD_GFN_TO_MFN (~1UL) 2.36 +#define READONLY_GFN (~2UL) 2.37 static mfn_t emulate_gva_to_mfn(struct vcpu *v, 2.38 unsigned long vaddr, 2.39 struct sh_emulate_ctxt *sh_ctxt) 2.40 @@ -4609,21 +4619,22 @@ static mfn_t emulate_gva_to_mfn(struct v 2.41 2.42 /* Translate the GFN to an MFN */ 2.43 mfn = gfn_to_mfn(v->domain, _gfn(gfn), &p2mt); 2.44 - if ( p2m_is_ram(p2mt) ) 2.45 - { 2.46 - ASSERT(mfn_valid(mfn)); 2.47 - v->arch.paging.last_write_was_pt = !!sh_mfn_is_a_page_table(mfn); 2.48 - return mfn; 2.49 - } 2.50 - 2.51 - return _mfn(BAD_GFN_TO_MFN); 2.52 + if ( p2mt == p2m_ram_ro ) 2.53 + return _mfn(READONLY_GFN); 2.54 + if ( !p2m_is_ram(p2mt) ) 2.55 + return _mfn(BAD_GFN_TO_MFN); 2.56 + 2.57 + ASSERT(mfn_valid(mfn)); 2.58 + v->arch.paging.last_write_was_pt = !!sh_mfn_is_a_page_table(mfn); 2.59 + return mfn; 2.60 } 2.61 2.62 /* Check that the user is allowed to perform this write. 2.63 * Returns a mapped pointer to write to, or NULL for error. */ 2.64 -#define MAPPING_UNHANDLEABLE ((void *)0) 2.65 -#define MAPPING_EXCEPTION ((void *)1) 2.66 -#define emulate_map_dest_failed(rc) ((unsigned long)(rc) <= 1) 2.67 +#define MAPPING_UNHANDLEABLE ((void *)(unsigned long)X86EMUL_UNHANDLEABLE) 2.68 +#define MAPPING_EXCEPTION ((void *)(unsigned long)X86EMUL_EXCEPTION) 2.69 +#define MAPPING_SILENT_FAIL ((void *)(unsigned long)X86EMUL_OKAY) 2.70 +#define emulate_map_dest_failed(rc) ((unsigned long)(rc) <= 3) 2.71 static void *emulate_map_dest(struct vcpu *v, 2.72 unsigned long vaddr, 2.73 u32 bytes, 2.74 @@ -4641,7 +4652,9 @@ static void *emulate_map_dest(struct vcp 2.75 sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt); 2.76 if ( !mfn_valid(sh_ctxt->mfn1) ) 2.77 return ((mfn_x(sh_ctxt->mfn1) == BAD_GVA_TO_GFN) ? 2.78 - MAPPING_EXCEPTION : MAPPING_UNHANDLEABLE); 2.79 + MAPPING_EXCEPTION : 2.80 + (mfn_x(sh_ctxt->mfn1) == READONLY_GFN) ? 2.81 + MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE); 2.82 2.83 /* Unaligned writes mean probably this isn't a pagetable */ 2.84 if ( vaddr & (bytes - 1) ) 2.85 @@ -4665,7 +4678,9 @@ static void *emulate_map_dest(struct vcp 2.86 sh_ctxt); 2.87 if ( !mfn_valid(sh_ctxt->mfn2) ) 2.88 return ((mfn_x(sh_ctxt->mfn2) == BAD_GVA_TO_GFN) ? 2.89 - MAPPING_EXCEPTION : MAPPING_UNHANDLEABLE); 2.90 + MAPPING_EXCEPTION : 2.91 + (mfn_x(sh_ctxt->mfn2) == READONLY_GFN) ? 2.92 + MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE); 2.93 2.94 /* Cross-page writes mean probably not a pagetable */ 2.95 sh_remove_shadows(v, sh_ctxt->mfn2, 0, 0 /* Slow, can fail */ ); 2.96 @@ -4782,8 +4797,7 @@ sh_x86_emulate_write(struct vcpu *v, uns 2.97 2.98 addr = emulate_map_dest(v, vaddr, bytes, sh_ctxt); 2.99 if ( emulate_map_dest_failed(addr) ) 2.100 - return ((addr == MAPPING_EXCEPTION) ? 2.101 - X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE); 2.102 + return (long)addr; 2.103 2.104 shadow_lock(v->domain); 2.105 memcpy(addr, src, bytes); 2.106 @@ -4809,8 +4823,7 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u 2.107 2.108 addr = emulate_map_dest(v, vaddr, bytes, sh_ctxt); 2.109 if ( emulate_map_dest_failed(addr) ) 2.110 - return ((addr == MAPPING_EXCEPTION) ? 2.111 - X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE); 2.112 + return (long)addr; 2.113 2.114 shadow_lock(v->domain); 2.115 switch ( bytes ) 2.116 @@ -4854,8 +4867,7 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v, 2.117 2.118 addr = emulate_map_dest(v, vaddr, 8, sh_ctxt); 2.119 if ( emulate_map_dest_failed(addr) ) 2.120 - return ((addr == MAPPING_EXCEPTION) ? 2.121 - X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE); 2.122 + return (long)addr; 2.123 2.124 old = (((u64) old_hi) << 32) | (u64) old_lo; 2.125 new = (((u64) new_hi) << 32) | (u64) new_lo;