debuggers.hg
changeset 18059:7749f135140a
x86 hvm: New hvm_op "set_mem_type" which allows marking ram page
ranges as ro, rw, or mmio_dm.
Signed-off-by: Trolle Selander <trolle.selander@eu.citrix.com>
ranges as ro, rw, or mmio_dm.
Signed-off-by: Trolle Selander <trolle.selander@eu.citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Thu Jul 10 15:30:39 2008 +0100 (2008-07-10) |
parents | 8803b305b06c |
children | b7598d2e4791 |
files | tools/libxc/xc_misc.c tools/libxc/xenctrl.h xen/arch/x86/hvm/hvm.c xen/include/public/hvm/hvm_op.h |
line diff
1.1 --- a/tools/libxc/xc_misc.c Thu Jul 10 15:19:56 2008 +0100 1.2 +++ b/tools/libxc/xc_misc.c Thu Jul 10 15:30:39 2008 +0100 1.3 @@ -295,6 +295,36 @@ int xc_hvm_modified_memory( 1.4 return rc; 1.5 } 1.6 1.7 +int xc_hvm_set_mem_type( 1.8 + int xc_handle, domid_t dom, hvmmem_type_t mem_type, uint64_t first_pfn, uint64_t nr) 1.9 +{ 1.10 + DECLARE_HYPERCALL; 1.11 + struct xen_hvm_set_mem_type arg; 1.12 + int rc; 1.13 + 1.14 + hypercall.op = __HYPERVISOR_hvm_op; 1.15 + hypercall.arg[0] = HVMOP_set_mem_type; 1.16 + hypercall.arg[1] = (unsigned long)&arg; 1.17 + 1.18 + arg.domid = dom; 1.19 + arg.hvmmem_type = mem_type; 1.20 + arg.first_pfn = first_pfn; 1.21 + arg.nr = nr; 1.22 + 1.23 + if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 ) 1.24 + { 1.25 + PERROR("Could not lock memory"); 1.26 + return rc; 1.27 + } 1.28 + 1.29 + rc = do_xen_hypercall(xc_handle, &hypercall); 1.30 + 1.31 + unlock_pages(&arg, sizeof(arg)); 1.32 + 1.33 + return rc; 1.34 +} 1.35 + 1.36 + 1.37 void *xc_map_foreign_pages(int xc_handle, uint32_t dom, int prot, 1.38 const xen_pfn_t *arr, int num) 1.39 {
2.1 --- a/tools/libxc/xenctrl.h Thu Jul 10 15:19:56 2008 +0100 2.2 +++ b/tools/libxc/xenctrl.h Thu Jul 10 15:30:39 2008 +0100 2.3 @@ -27,6 +27,7 @@ 2.4 #include <xen/event_channel.h> 2.5 #include <xen/sched.h> 2.6 #include <xen/memory.h> 2.7 +#include <xen/hvm/params.h> 2.8 #include <xen/xsm/acm.h> 2.9 #include <xen/xsm/acm_ops.h> 2.10 #include <xen/xsm/flask_op.h> 2.11 @@ -942,6 +943,14 @@ int xc_hvm_track_dirty_vram( 2.12 int xc_hvm_modified_memory( 2.13 int xc_handle, domid_t dom, uint64_t first_pfn, uint64_t nr); 2.14 2.15 +/* 2.16 + * Set a range of memory to a specific type. 2.17 + * Allowed types are HVMMEM_ram_rw, HVMMEM_ram_ro, HVMMEM_mmio_dm 2.18 + */ 2.19 +int xc_hvm_set_mem_type( 2.20 + int xc_handle, domid_t dom, hvmmem_type_t memtype, uint64_t first_pfn, uint64_t nr); 2.21 + 2.22 + 2.23 typedef enum { 2.24 XC_ERROR_NONE = 0, 2.25 XC_INTERNAL_ERROR = 1,
3.1 --- a/xen/arch/x86/hvm/hvm.c Thu Jul 10 15:19:56 2008 +0100 3.2 +++ b/xen/arch/x86/hvm/hvm.c Thu Jul 10 15:30:39 2008 +0100 3.3 @@ -2611,6 +2611,65 @@ long do_hvm_op(unsigned long op, XEN_GUE 3.4 break; 3.5 } 3.6 3.7 + case HVMOP_set_mem_type: 3.8 + { 3.9 + struct xen_hvm_set_mem_type a; 3.10 + struct domain *d; 3.11 + unsigned long pfn; 3.12 + 3.13 + /* Interface types to internal p2m types */ 3.14 + p2m_type_t memtype[] = { 3.15 + p2m_ram_rw, /* HVMMEM_ram_rw */ 3.16 + p2m_ram_ro, /* HVMMEM_ram_ro */ 3.17 + p2m_mmio_dm /* HVMMEM_mmio_dm */ 3.18 + }; 3.19 + 3.20 + if ( copy_from_guest(&a, arg, 1) ) 3.21 + return -EFAULT; 3.22 + 3.23 + if ( a.domid == DOMID_SELF ) 3.24 + { 3.25 + d = rcu_lock_current_domain(); 3.26 + } 3.27 + else 3.28 + { 3.29 + if ( (d = rcu_lock_domain_by_id(a.domid)) == NULL ) 3.30 + return -ESRCH; 3.31 + if ( !IS_PRIV_FOR(current->domain, d) ) 3.32 + { 3.33 + rc = -EPERM; 3.34 + goto param_fail4; 3.35 + } 3.36 + } 3.37 + 3.38 + rc = -EINVAL; 3.39 + if ( !is_hvm_domain(d) ) 3.40 + goto param_fail4; 3.41 + 3.42 + rc = -EINVAL; 3.43 + if ( (a.first_pfn > domain_get_maximum_gpfn(d)) || 3.44 + ((a.first_pfn + a.nr - 1) < a.first_pfn) || 3.45 + ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) ) 3.46 + goto param_fail4; 3.47 + 3.48 + if ( a.hvmmem_type >= ARRAY_SIZE(memtype) ) 3.49 + goto param_fail4; 3.50 + 3.51 + rc = 0; 3.52 + 3.53 + for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ ) 3.54 + { 3.55 + p2m_type_t t; 3.56 + mfn_t mfn; 3.57 + mfn = gfn_to_mfn(d, pfn, &t); 3.58 + p2m_change_type(d, pfn, t, memtype[a.hvmmem_type]); 3.59 + } 3.60 + 3.61 + param_fail4: 3.62 + rcu_unlock_domain(d); 3.63 + break; 3.64 + } 3.65 + 3.66 default: 3.67 { 3.68 gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
4.1 --- a/xen/include/public/hvm/hvm_op.h Thu Jul 10 15:19:56 2008 +0100 4.2 +++ b/xen/include/public/hvm/hvm_op.h Thu Jul 10 15:30:39 2008 +0100 4.3 @@ -105,6 +105,27 @@ struct xen_hvm_modified_memory { 4.4 typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t; 4.5 DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t); 4.6 4.7 +#define HVMOP_set_mem_type 8 4.8 +typedef enum { 4.9 + HVMMEM_ram_rw, /* Normal read/write guest RAM */ 4.10 + HVMMEM_ram_ro, /* Read-only; writes are discarded */ 4.11 + HVMMEM_mmio_dm, /* Reads and write go to the device model */ 4.12 +} hvmmem_type_t; 4.13 +/* Notify that a region of memory is to be treated in a specific way. */ 4.14 +struct xen_hvm_set_mem_type { 4.15 + /* Domain to be updated. */ 4.16 + domid_t domid; 4.17 + /* Memory type */ 4.18 + hvmmem_type_t hvmmem_type; 4.19 + /* First pfn. */ 4.20 + uint64_aligned_t first_pfn; 4.21 + /* Number of pages. */ 4.22 + uint64_aligned_t nr; 4.23 +}; 4.24 +typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t; 4.25 +DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t); 4.26 + 4.27 + 4.28 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ 4.29 4.30 #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */