debuggers.hg
changeset 22274:02e199c96ece
x86 hvm: Factor out hvm_map_guest_frame_{rw,ro} from hvm_map_entry
This allows us to map pages from guest physical addresses.
This will be used with nested virtualization.
Signed-off-by: Uwe Dannowski <Uwe.Dannowski@amd.com>
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
Signed-off-by: Keir Fraser <keir@xen.org>
This allows us to map pages from guest physical addresses.
This will be used with nested virtualization.
Signed-off-by: Uwe Dannowski <Uwe.Dannowski@amd.com>
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
Signed-off-by: Keir Fraser <keir@xen.org>
author | Keir Fraser <keir@xen.org> |
---|---|
date | Wed Oct 06 11:00:19 2010 +0100 (2010-10-06) |
parents | 368957d8b063 |
children | 1385b15e168f |
files | xen/arch/x86/hvm/hvm.c xen/include/asm-x86/hvm/hvm.h |
line diff
1.1 --- a/xen/arch/x86/hvm/hvm.c Tue Oct 05 17:51:28 2010 +0100 1.2 +++ b/xen/arch/x86/hvm/hvm.c Wed Oct 06 11:00:19 2010 +0100 1.3 @@ -1356,55 +1356,84 @@ int hvm_virtual_to_linear_addr( 1.4 return 0; 1.5 } 1.6 1.7 +static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable) 1.8 +{ 1.9 + unsigned long mfn; 1.10 + p2m_type_t p2mt; 1.11 + struct p2m_domain *p2m = p2m_get_hostp2m(current->domain); 1.12 + 1.13 + mfn = mfn_x(writable 1.14 + ? gfn_to_mfn_unshare(p2m, gfn, &p2mt, 0) 1.15 + : gfn_to_mfn(p2m, gfn, &p2mt)); 1.16 + if ( (p2m_is_shared(p2mt) && writable) || !p2m_is_ram(p2mt) ) 1.17 + return NULL; 1.18 + if ( p2m_is_paging(p2mt) ) 1.19 + { 1.20 + p2m_mem_paging_populate(p2m, gfn); 1.21 + return NULL; 1.22 + } 1.23 + 1.24 + ASSERT(mfn_valid(mfn)); 1.25 + 1.26 + if ( writable ) 1.27 + paging_mark_dirty(current->domain, mfn); 1.28 + 1.29 + return map_domain_page(mfn); 1.30 +} 1.31 + 1.32 +void *hvm_map_guest_frame_rw(unsigned long gfn) 1.33 +{ 1.34 + return __hvm_map_guest_frame(gfn, 1); 1.35 +} 1.36 + 1.37 +void *hvm_map_guest_frame_ro(unsigned long gfn) 1.38 +{ 1.39 + return __hvm_map_guest_frame(gfn, 0); 1.40 +} 1.41 + 1.42 +void hvm_unmap_guest_frame(void *p) 1.43 +{ 1.44 + if ( p ) 1.45 + unmap_domain_page(p); 1.46 +} 1.47 + 1.48 static void *hvm_map_entry(unsigned long va) 1.49 { 1.50 - unsigned long gfn, mfn; 1.51 - p2m_type_t p2mt; 1.52 + unsigned long gfn; 1.53 uint32_t pfec; 1.54 - struct vcpu *v = current; 1.55 - struct p2m_domain *p2m = p2m_get_hostp2m(v->domain); 1.56 + char *v; 1.57 1.58 if ( ((va & ~PAGE_MASK) + 8) > PAGE_SIZE ) 1.59 { 1.60 gdprintk(XENLOG_ERR, "Descriptor table entry " 1.61 "straddles page boundary\n"); 1.62 - domain_crash(current->domain); 1.63 - return NULL; 1.64 + goto fail; 1.65 } 1.66 1.67 - /* We're mapping on behalf of the segment-load logic, which might 1.68 - * write the accessed flags in the descriptors (in 32-bit mode), but 1.69 - * we still treat it as a kernel-mode read (i.e. no access checks). */ 1.70 + /* 1.71 + * We're mapping on behalf of the segment-load logic, which might write 1.72 + * the accessed flags in the descriptors (in 32-bit mode), but we still 1.73 + * treat it as a kernel-mode read (i.e. no access checks). 1.74 + */ 1.75 pfec = PFEC_page_present; 1.76 gfn = paging_gva_to_gfn(current, va, &pfec); 1.77 - if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared ) 1.78 - return NULL; 1.79 - mfn = mfn_x(gfn_to_mfn_unshare(p2m, gfn, &p2mt, 0)); 1.80 - if ( p2m_is_paging(p2mt) ) 1.81 - { 1.82 - p2m_mem_paging_populate(p2m, gfn); 1.83 - return NULL; 1.84 - } 1.85 - if ( p2m_is_shared(p2mt) ) 1.86 - return NULL; 1.87 - if ( !p2m_is_ram(p2mt) ) 1.88 - { 1.89 - gdprintk(XENLOG_ERR, "Failed to look up descriptor table entry\n"); 1.90 - domain_crash(current->domain); 1.91 - return NULL; 1.92 - } 1.93 - 1.94 - ASSERT(mfn_valid(mfn)); 1.95 - 1.96 - paging_mark_dirty(current->domain, mfn); 1.97 - 1.98 - return (char *)map_domain_page(mfn) + (va & ~PAGE_MASK); 1.99 + if ( (pfec == PFEC_page_paged) || (pfec == PFEC_page_shared) ) 1.100 + goto fail; 1.101 + 1.102 + v = hvm_map_guest_frame_rw(gfn); 1.103 + if ( v == NULL ) 1.104 + goto fail; 1.105 + 1.106 + return v + (va & ~PAGE_MASK); 1.107 + 1.108 + fail: 1.109 + domain_crash(current->domain); 1.110 + return NULL; 1.111 } 1.112 1.113 static void hvm_unmap_entry(void *p) 1.114 { 1.115 - if ( p ) 1.116 - unmap_domain_page(p); 1.117 + hvm_unmap_guest_frame(p); 1.118 } 1.119 1.120 static int hvm_load_segment_selector(
2.1 --- a/xen/include/asm-x86/hvm/hvm.h Tue Oct 05 17:51:28 2010 +0100 2.2 +++ b/xen/include/asm-x86/hvm/hvm.h Wed Oct 06 11:00:19 2010 +0100 2.3 @@ -344,6 +344,10 @@ int hvm_virtual_to_linear_addr( 2.4 unsigned int addr_size, 2.5 unsigned long *linear_addr); 2.6 2.7 +void *hvm_map_guest_frame_rw(unsigned long gfn); 2.8 +void *hvm_map_guest_frame_ro(unsigned long gfn); 2.9 +void hvm_unmap_guest_frame(void *p); 2.10 + 2.11 static inline void hvm_set_info_guest(struct vcpu *v) 2.12 { 2.13 if ( hvm_funcs.set_info_guest )