debuggers.hg
changeset 22291:490b7420deba
libxc: pass an xc_interface handle to page locking functions
Not actually used here but useful to confirm that a handle is passed
down to each location where it will be required once we switch to
hypercall buffers.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Not actually used here but useful to confirm that a handle is passed
down to each location where it will be required once we switch to
hypercall buffers.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
author | Ian Campbell <ian.campbell@citrix.com> |
---|---|
date | Mon Oct 18 16:43:58 2010 +0100 (2010-10-18) |
parents | 86cf01f3d737 |
children | ffe0cdb77a7b |
files | tools/libxc/xc_acm.c tools/libxc/xc_cpupool.c tools/libxc/xc_domain.c tools/libxc/xc_domain_restore.c tools/libxc/xc_domain_save.c tools/libxc/xc_evtchn.c tools/libxc/xc_flask.c tools/libxc/xc_linux.c tools/libxc/xc_misc.c tools/libxc/xc_offline_page.c tools/libxc/xc_pm.c tools/libxc/xc_private.c tools/libxc/xc_private.h tools/libxc/xc_resume.c tools/libxc/xc_tbuf.c tools/libxc/xc_tmem.c |
line diff
1.1 --- a/tools/libxc/xc_acm.c Mon Oct 18 16:43:15 2010 +0100 1.2 +++ b/tools/libxc/xc_acm.c Mon Oct 18 16:43:58 2010 +0100 1.3 @@ -92,7 +92,7 @@ int xc_acm_op(xc_interface *xch, int cmd 1.4 1.5 hypercall.op = __HYPERVISOR_xsm_op; 1.6 hypercall.arg[0] = (unsigned long)&acmctl; 1.7 - if ( lock_pages(&acmctl, sizeof(acmctl)) != 0) 1.8 + if ( lock_pages(xch, &acmctl, sizeof(acmctl)) != 0) 1.9 { 1.10 PERROR("Could not lock memory for Xen hypercall"); 1.11 return -EFAULT; 1.12 @@ -103,7 +103,7 @@ int xc_acm_op(xc_interface *xch, int cmd 1.13 DPRINTF("acmctl operation failed -- need to" 1.14 " rebuild the user-space tool set?\n"); 1.15 } 1.16 - unlock_pages(&acmctl, sizeof(acmctl)); 1.17 + unlock_pages(xch, &acmctl, sizeof(acmctl)); 1.18 1.19 switch (cmd) { 1.20 case ACMOP_getdecision: {
2.1 --- a/tools/libxc/xc_cpupool.c Mon Oct 18 16:43:15 2010 +0100 2.2 +++ b/tools/libxc/xc_cpupool.c Mon Oct 18 16:43:58 2010 +0100 2.3 @@ -85,13 +85,13 @@ int xc_cpupool_getinfo(xc_interface *xch 2.4 set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); 2.5 sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8; 2.6 2.7 - if ( (err = lock_pages(local, sizeof(local))) != 0 ) 2.8 + if ( (err = lock_pages(xch, local, sizeof(local))) != 0 ) 2.9 { 2.10 PERROR("Could not lock memory for Xen hypercall"); 2.11 break; 2.12 } 2.13 err = do_sysctl_save(xch, &sysctl); 2.14 - unlock_pages(local, sizeof (local)); 2.15 + unlock_pages(xch, local, sizeof (local)); 2.16 2.17 if ( err < 0 ) 2.18 break; 2.19 @@ -161,14 +161,14 @@ int xc_cpupool_freeinfo(xc_interface *xc 2.20 set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); 2.21 sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8; 2.22 2.23 - if ( (err = lock_pages(local, sizeof(local))) != 0 ) 2.24 + if ( (err = lock_pages(xch, local, sizeof(local))) != 0 ) 2.25 { 2.26 PERROR("Could not lock memory for Xen hypercall"); 2.27 return err; 2.28 } 2.29 2.30 err = do_sysctl_save(xch, &sysctl); 2.31 - unlock_pages(local, sizeof (local)); 2.32 + unlock_pages(xch, local, sizeof (local)); 2.33 2.34 if (err < 0) 2.35 return err;
3.1 --- a/tools/libxc/xc_domain.c Mon Oct 18 16:43:15 2010 +0100 3.2 +++ b/tools/libxc/xc_domain.c Mon Oct 18 16:43:58 2010 +0100 3.3 @@ -94,7 +94,7 @@ int xc_domain_shutdown(xc_interface *xch 3.4 arg.domain_id = domid; 3.5 arg.reason = reason; 3.6 3.7 - if ( lock_pages(&arg, sizeof(arg)) != 0 ) 3.8 + if ( lock_pages(xch, &arg, sizeof(arg)) != 0 ) 3.9 { 3.10 PERROR("Could not lock memory for Xen hypercall"); 3.11 goto out1; 3.12 @@ -102,7 +102,7 @@ int xc_domain_shutdown(xc_interface *xch 3.13 3.14 ret = do_xen_hypercall(xch, &hypercall); 3.15 3.16 - unlock_pages(&arg, sizeof(arg)); 3.17 + unlock_pages(xch, &arg, sizeof(arg)); 3.18 3.19 out1: 3.20 return ret; 3.21 @@ -133,7 +133,7 @@ int xc_vcpu_setaffinity(xc_interface *xc 3.22 3.23 domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; 3.24 3.25 - if ( lock_pages(local, cpusize) != 0 ) 3.26 + if ( lock_pages(xch, local, cpusize) != 0 ) 3.27 { 3.28 PERROR("Could not lock memory for Xen hypercall"); 3.29 goto out; 3.30 @@ -141,7 +141,7 @@ int xc_vcpu_setaffinity(xc_interface *xc 3.31 3.32 ret = do_domctl(xch, &domctl); 3.33 3.34 - unlock_pages(local, cpusize); 3.35 + unlock_pages(xch, local, cpusize); 3.36 3.37 out: 3.38 free(local); 3.39 @@ -172,7 +172,7 @@ int xc_vcpu_getaffinity(xc_interface *xc 3.40 set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); 3.41 domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; 3.42 3.43 - if ( lock_pages(local, sizeof(local)) != 0 ) 3.44 + if ( lock_pages(xch, local, sizeof(local)) != 0 ) 3.45 { 3.46 PERROR("Could not lock memory for Xen hypercall"); 3.47 goto out; 3.48 @@ -180,7 +180,7 @@ int xc_vcpu_getaffinity(xc_interface *xc 3.49 3.50 ret = do_domctl(xch, &domctl); 3.51 3.52 - unlock_pages(local, sizeof (local)); 3.53 + unlock_pages(xch, local, sizeof (local)); 3.54 bitmap_byte_to_64(cpumap, local, cpusize * 8); 3.55 out: 3.56 free(local); 3.57 @@ -257,7 +257,7 @@ int xc_domain_getinfolist(xc_interface * 3.58 int ret = 0; 3.59 DECLARE_SYSCTL; 3.60 3.61 - if ( lock_pages(info, max_domains*sizeof(xc_domaininfo_t)) != 0 ) 3.62 + if ( lock_pages(xch, info, max_domains*sizeof(xc_domaininfo_t)) != 0 ) 3.63 return -1; 3.64 3.65 sysctl.cmd = XEN_SYSCTL_getdomaininfolist; 3.66 @@ -270,7 +270,7 @@ int xc_domain_getinfolist(xc_interface * 3.67 else 3.68 ret = sysctl.u.getdomaininfolist.num_domains; 3.69 3.70 - unlock_pages(info, max_domains*sizeof(xc_domaininfo_t)); 3.71 + unlock_pages(xch, info, max_domains*sizeof(xc_domaininfo_t)); 3.72 3.73 return ret; 3.74 } 3.75 @@ -290,13 +290,13 @@ int xc_domain_hvm_getcontext(xc_interfac 3.76 set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf); 3.77 3.78 if ( ctxt_buf ) 3.79 - if ( (ret = lock_pages(ctxt_buf, size)) != 0 ) 3.80 + if ( (ret = lock_pages(xch, ctxt_buf, size)) != 0 ) 3.81 return ret; 3.82 3.83 ret = do_domctl(xch, &domctl); 3.84 3.85 if ( ctxt_buf ) 3.86 - unlock_pages(ctxt_buf, size); 3.87 + unlock_pages(xch, ctxt_buf, size); 3.88 3.89 return (ret < 0 ? -1 : domctl.u.hvmcontext.size); 3.90 } 3.91 @@ -322,13 +322,13 @@ int xc_domain_hvm_getcontext_partial(xc_ 3.92 domctl.u.hvmcontext_partial.instance = instance; 3.93 set_xen_guest_handle(domctl.u.hvmcontext_partial.buffer, ctxt_buf); 3.94 3.95 - if ( (ret = lock_pages(ctxt_buf, size)) != 0 ) 3.96 + if ( (ret = lock_pages(xch, ctxt_buf, size)) != 0 ) 3.97 return ret; 3.98 3.99 ret = do_domctl(xch, &domctl); 3.100 3.101 if ( ctxt_buf ) 3.102 - unlock_pages(ctxt_buf, size); 3.103 + unlock_pages(xch, ctxt_buf, size); 3.104 3.105 return ret ? -1 : 0; 3.106 } 3.107 @@ -347,12 +347,12 @@ int xc_domain_hvm_setcontext(xc_interfac 3.108 domctl.u.hvmcontext.size = size; 3.109 set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf); 3.110 3.111 - if ( (ret = lock_pages(ctxt_buf, size)) != 0 ) 3.112 + if ( (ret = lock_pages(xch, ctxt_buf, size)) != 0 ) 3.113 return ret; 3.114 3.115 ret = do_domctl(xch, &domctl); 3.116 3.117 - unlock_pages(ctxt_buf, size); 3.118 + unlock_pages(xch, ctxt_buf, size); 3.119 3.120 return ret; 3.121 } 3.122 @@ -372,10 +372,10 @@ int xc_vcpu_getcontext(xc_interface *xch 3.123 set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt->c); 3.124 3.125 3.126 - if ( (rc = lock_pages(ctxt, sz)) != 0 ) 3.127 + if ( (rc = lock_pages(xch, ctxt, sz)) != 0 ) 3.128 return rc; 3.129 rc = do_domctl(xch, &domctl); 3.130 - unlock_pages(ctxt, sz); 3.131 + unlock_pages(xch, ctxt, sz); 3.132 3.133 return rc; 3.134 } 3.135 @@ -394,7 +394,7 @@ int xc_watchdog(xc_interface *xch, 3.136 arg.id = id; 3.137 arg.timeout = timeout; 3.138 3.139 - if ( lock_pages(&arg, sizeof(arg)) != 0 ) 3.140 + if ( lock_pages(xch, &arg, sizeof(arg)) != 0 ) 3.141 { 3.142 PERROR("Could not lock memory for Xen hypercall"); 3.143 goto out1; 3.144 @@ -402,7 +402,7 @@ int xc_watchdog(xc_interface *xch, 3.145 3.146 ret = do_xen_hypercall(xch, &hypercall); 3.147 3.148 - unlock_pages(&arg, sizeof(arg)); 3.149 + unlock_pages(xch, &arg, sizeof(arg)); 3.150 3.151 out1: 3.152 return ret; 3.153 @@ -488,7 +488,7 @@ int xc_domain_set_memmap_limit(xc_interf 3.154 3.155 set_xen_guest_handle(fmap.map.buffer, &e820); 3.156 3.157 - if ( lock_pages(&fmap, sizeof(fmap)) || lock_pages(&e820, sizeof(e820)) ) 3.158 + if ( lock_pages(xch, &fmap, sizeof(fmap)) || lock_pages(xch, &e820, sizeof(e820)) ) 3.159 { 3.160 PERROR("Could not lock memory for Xen hypercall"); 3.161 rc = -1; 3.162 @@ -498,8 +498,8 @@ int xc_domain_set_memmap_limit(xc_interf 3.163 rc = xc_memory_op(xch, XENMEM_set_memory_map, &fmap); 3.164 3.165 out: 3.166 - unlock_pages(&fmap, sizeof(fmap)); 3.167 - unlock_pages(&e820, sizeof(e820)); 3.168 + unlock_pages(xch, &fmap, sizeof(fmap)); 3.169 + unlock_pages(xch, &e820, sizeof(e820)); 3.170 return rc; 3.171 } 3.172 #else 3.173 @@ -564,7 +564,7 @@ int xc_domain_get_tsc_info(xc_interface 3.174 domctl.cmd = XEN_DOMCTL_gettscinfo; 3.175 domctl.domain = (domid_t)domid; 3.176 set_xen_guest_handle(domctl.u.tsc_info.out_info, &info); 3.177 - if ( (rc = lock_pages(&info, sizeof(info))) != 0 ) 3.178 + if ( (rc = lock_pages(xch, &info, sizeof(info))) != 0 ) 3.179 return rc; 3.180 rc = do_domctl(xch, &domctl); 3.181 if ( rc == 0 ) 3.182 @@ -574,7 +574,7 @@ int xc_domain_get_tsc_info(xc_interface 3.183 *gtsc_khz = info.gtsc_khz; 3.184 *incarnation = info.incarnation; 3.185 } 3.186 - unlock_pages(&info,sizeof(info)); 3.187 + unlock_pages(xch, &info,sizeof(info)); 3.188 return rc; 3.189 } 3.190 3.191 @@ -849,11 +849,11 @@ int xc_vcpu_setcontext(xc_interface *xch 3.192 domctl.u.vcpucontext.vcpu = vcpu; 3.193 set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt->c); 3.194 3.195 - if ( (rc = lock_pages(ctxt, sz)) != 0 ) 3.196 + if ( (rc = lock_pages(xch, ctxt, sz)) != 0 ) 3.197 return rc; 3.198 rc = do_domctl(xch, &domctl); 3.199 3.200 - unlock_pages(ctxt, sz); 3.201 + unlock_pages(xch, ctxt, sz); 3.202 3.203 return rc; 3.204 } 3.205 @@ -917,10 +917,10 @@ int xc_set_hvm_param(xc_interface *handl 3.206 arg.domid = dom; 3.207 arg.index = param; 3.208 arg.value = value; 3.209 - if ( lock_pages(&arg, sizeof(arg)) != 0 ) 3.210 + if ( lock_pages(handle, &arg, sizeof(arg)) != 0 ) 3.211 return -1; 3.212 rc = do_xen_hypercall(handle, &hypercall); 3.213 - unlock_pages(&arg, sizeof(arg)); 3.214 + unlock_pages(handle, &arg, sizeof(arg)); 3.215 return rc; 3.216 } 3.217 3.218 @@ -935,10 +935,10 @@ int xc_get_hvm_param(xc_interface *handl 3.219 hypercall.arg[1] = (unsigned long)&arg; 3.220 arg.domid = dom; 3.221 arg.index = param; 3.222 - if ( lock_pages(&arg, sizeof(arg)) != 0 ) 3.223 + if ( lock_pages(handle, &arg, sizeof(arg)) != 0 ) 3.224 return -1; 3.225 rc = do_xen_hypercall(handle, &hypercall); 3.226 - unlock_pages(&arg, sizeof(arg)); 3.227 + unlock_pages(handle, &arg, sizeof(arg)); 3.228 *value = arg.value; 3.229 return rc; 3.230 } 3.231 @@ -988,13 +988,13 @@ int xc_get_device_group( 3.232 3.233 set_xen_guest_handle(domctl.u.get_device_group.sdev_array, sdev_array); 3.234 3.235 - if ( lock_pages(sdev_array, max_sdevs * sizeof(*sdev_array)) != 0 ) 3.236 + if ( lock_pages(xch, sdev_array, max_sdevs * sizeof(*sdev_array)) != 0 ) 3.237 { 3.238 PERROR("Could not lock memory for xc_get_device_group"); 3.239 return -ENOMEM; 3.240 } 3.241 rc = do_domctl(xch, &domctl); 3.242 - unlock_pages(sdev_array, max_sdevs * sizeof(*sdev_array)); 3.243 + unlock_pages(xch, sdev_array, max_sdevs * sizeof(*sdev_array)); 3.244 3.245 *num_sdevs = domctl.u.get_device_group.num_sdevs; 3.246 return rc;
4.1 --- a/tools/libxc/xc_domain_restore.c Mon Oct 18 16:43:15 2010 +0100 4.2 +++ b/tools/libxc/xc_domain_restore.c Mon Oct 18 16:43:58 2010 +0100 4.3 @@ -1181,13 +1181,13 @@ int xc_domain_restore(xc_interface *xch, 4.4 memset(ctx->p2m_batch, 0, 4.5 ROUNDUP(MAX_BATCH_SIZE * sizeof(xen_pfn_t), PAGE_SHIFT)); 4.6 4.7 - if ( lock_pages(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) ) 4.8 + if ( lock_pages(xch, region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) ) 4.9 { 4.10 PERROR("Could not lock region_mfn"); 4.11 goto out; 4.12 } 4.13 4.14 - if ( lock_pages(ctx->p2m_batch, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) ) 4.15 + if ( lock_pages(xch, ctx->p2m_batch, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) ) 4.16 { 4.17 ERROR("Could not lock p2m_batch"); 4.18 goto out; 4.19 @@ -1547,7 +1547,7 @@ int xc_domain_restore(xc_interface *xch, 4.20 } 4.21 } 4.22 4.23 - if ( lock_pages(&ctxt, sizeof(ctxt)) ) 4.24 + if ( lock_pages(xch, &ctxt, sizeof(ctxt)) ) 4.25 { 4.26 PERROR("Unable to lock ctxt"); 4.27 return 1;
5.1 --- a/tools/libxc/xc_domain_save.c Mon Oct 18 16:43:15 2010 +0100 5.2 +++ b/tools/libxc/xc_domain_save.c Mon Oct 18 16:43:58 2010 +0100 5.3 @@ -1046,14 +1046,14 @@ int xc_domain_save(xc_interface *xch, in 5.4 5.5 memset(to_send, 0xff, BITMAP_SIZE); 5.6 5.7 - if ( lock_pages(to_send, BITMAP_SIZE) ) 5.8 + if ( lock_pages(xch, to_send, BITMAP_SIZE) ) 5.9 { 5.10 PERROR("Unable to lock to_send"); 5.11 return 1; 5.12 } 5.13 5.14 /* (to fix is local only) */ 5.15 - if ( lock_pages(to_skip, BITMAP_SIZE) ) 5.16 + if ( lock_pages(xch, to_skip, BITMAP_SIZE) ) 5.17 { 5.18 PERROR("Unable to lock to_skip"); 5.19 return 1; 5.20 @@ -1091,7 +1091,7 @@ int xc_domain_save(xc_interface *xch, in 5.21 memset(pfn_type, 0, 5.22 ROUNDUP(MAX_BATCH_SIZE * sizeof(*pfn_type), PAGE_SHIFT)); 5.23 5.24 - if ( lock_pages(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type)) ) 5.25 + if ( lock_pages(xch, pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type)) ) 5.26 { 5.27 PERROR("Unable to lock pfn_type array"); 5.28 goto out;
6.1 --- a/tools/libxc/xc_evtchn.c Mon Oct 18 16:43:15 2010 +0100 6.2 +++ b/tools/libxc/xc_evtchn.c Mon Oct 18 16:43:58 2010 +0100 6.3 @@ -33,7 +33,7 @@ static int do_evtchn_op(xc_interface *xc 6.4 hypercall.arg[0] = cmd; 6.5 hypercall.arg[1] = (unsigned long)arg; 6.6 6.7 - if ( lock_pages(arg, arg_size) != 0 ) 6.8 + if ( lock_pages(xch, arg, arg_size) != 0 ) 6.9 { 6.10 PERROR("do_evtchn_op: arg lock failed"); 6.11 goto out; 6.12 @@ -42,7 +42,7 @@ static int do_evtchn_op(xc_interface *xc 6.13 if ((ret = do_xen_hypercall(xch, &hypercall)) < 0 && !silently_fail) 6.14 ERROR("do_evtchn_op: HYPERVISOR_event_channel_op failed: %d", ret); 6.15 6.16 - unlock_pages(arg, arg_size); 6.17 + unlock_pages(xch, arg, arg_size); 6.18 out: 6.19 return ret; 6.20 }
7.1 --- a/tools/libxc/xc_flask.c Mon Oct 18 16:43:15 2010 +0100 7.2 +++ b/tools/libxc/xc_flask.c Mon Oct 18 16:43:58 2010 +0100 7.3 @@ -44,7 +44,7 @@ int xc_flask_op(xc_interface *xch, flask 7.4 hypercall.op = __HYPERVISOR_xsm_op; 7.5 hypercall.arg[0] = (unsigned long)op; 7.6 7.7 - if ( lock_pages(op, sizeof(*op)) != 0 ) 7.8 + if ( lock_pages(xch, op, sizeof(*op)) != 0 ) 7.9 { 7.10 PERROR("Could not lock memory for Xen hypercall"); 7.11 goto out; 7.12 @@ -56,7 +56,7 @@ int xc_flask_op(xc_interface *xch, flask 7.13 fprintf(stderr, "XSM operation failed!\n"); 7.14 } 7.15 7.16 - unlock_pages(op, sizeof(*op)); 7.17 + unlock_pages(xch, op, sizeof(*op)); 7.18 7.19 out: 7.20 return ret;
8.1 --- a/tools/libxc/xc_linux.c Mon Oct 18 16:43:15 2010 +0100 8.2 +++ b/tools/libxc/xc_linux.c Mon Oct 18 16:43:58 2010 +0100 8.3 @@ -618,7 +618,7 @@ int xc_gnttab_op(xc_interface *xch, int 8.4 hypercall.arg[1] = (unsigned long)op; 8.5 hypercall.arg[2] = count; 8.6 8.7 - if ( lock_pages(op, count* op_size) != 0 ) 8.8 + if ( lock_pages(xch, op, count* op_size) != 0 ) 8.9 { 8.10 PERROR("Could not lock memory for Xen hypercall"); 8.11 goto out1; 8.12 @@ -626,7 +626,7 @@ int xc_gnttab_op(xc_interface *xch, int 8.13 8.14 ret = do_xen_hypercall(xch, &hypercall); 8.15 8.16 - unlock_pages(op, count * op_size); 8.17 + unlock_pages(xch, op, count * op_size); 8.18 8.19 out1: 8.20 return ret; 8.21 @@ -670,7 +670,7 @@ static void *_gnttab_map_table(xc_interf 8.22 *gnt_num = query.nr_frames * (PAGE_SIZE / sizeof(grant_entry_v1_t) ); 8.23 8.24 frame_list = malloc(query.nr_frames * sizeof(unsigned long)); 8.25 - if ( !frame_list || lock_pages(frame_list, 8.26 + if ( !frame_list || lock_pages(xch, frame_list, 8.27 query.nr_frames * sizeof(unsigned long)) ) 8.28 { 8.29 ERROR("Alloc/lock frame_list in xc_gnttab_map_table\n"); 8.30 @@ -714,7 +714,7 @@ static void *_gnttab_map_table(xc_interf 8.31 err: 8.32 if ( frame_list ) 8.33 { 8.34 - unlock_pages(frame_list, query.nr_frames * sizeof(unsigned long)); 8.35 + unlock_pages(xch, frame_list, query.nr_frames * sizeof(unsigned long)); 8.36 free(frame_list); 8.37 } 8.38 if ( pfn_list )
9.1 --- a/tools/libxc/xc_misc.c Mon Oct 18 16:43:15 2010 +0100 9.2 +++ b/tools/libxc/xc_misc.c Mon Oct 18 16:43:58 2010 +0100 9.3 @@ -42,7 +42,7 @@ int xc_readconsolering(xc_interface *xch 9.4 sysctl.u.readconsole.incremental = incremental; 9.5 } 9.6 9.7 - if ( (ret = lock_pages(buffer, nr_chars)) != 0 ) 9.8 + if ( (ret = lock_pages(xch, buffer, nr_chars)) != 0 ) 9.9 return ret; 9.10 9.11 if ( (ret = do_sysctl(xch, &sysctl)) == 0 ) 9.12 @@ -52,7 +52,7 @@ int xc_readconsolering(xc_interface *xch 9.13 *pindex = sysctl.u.readconsole.index; 9.14 } 9.15 9.16 - unlock_pages(buffer, nr_chars); 9.17 + unlock_pages(xch, buffer, nr_chars); 9.18 9.19 return ret; 9.20 } 9.21 @@ -66,12 +66,12 @@ int xc_send_debug_keys(xc_interface *xch 9.22 set_xen_guest_handle(sysctl.u.debug_keys.keys, keys); 9.23 sysctl.u.debug_keys.nr_keys = len; 9.24 9.25 - if ( (ret = lock_pages(keys, len)) != 0 ) 9.26 + if ( (ret = lock_pages(xch, keys, len)) != 0 ) 9.27 return ret; 9.28 9.29 ret = do_sysctl(xch, &sysctl); 9.30 9.31 - unlock_pages(keys, len); 9.32 + unlock_pages(xch, keys, len); 9.33 9.34 return ret; 9.35 } 9.36 @@ -154,7 +154,7 @@ int xc_mca_op(xc_interface *xch, struct 9.37 DECLARE_HYPERCALL; 9.38 9.39 mc->interface_version = XEN_MCA_INTERFACE_VERSION; 9.40 - if ( lock_pages(mc, sizeof(mc)) ) 9.41 + if ( lock_pages(xch, mc, sizeof(mc)) ) 9.42 { 9.43 PERROR("Could not lock xen_mc memory"); 9.44 return -EINVAL; 9.45 @@ -163,7 +163,7 @@ int xc_mca_op(xc_interface *xch, struct 9.46 hypercall.op = __HYPERVISOR_mca; 9.47 hypercall.arg[0] = (unsigned long)mc; 9.48 ret = do_xen_hypercall(xch, &hypercall); 9.49 - unlock_pages(mc, sizeof(mc)); 9.50 + unlock_pages(xch, mc, sizeof(mc)); 9.51 return ret; 9.52 } 9.53 #endif 9.54 @@ -227,12 +227,12 @@ int xc_getcpuinfo(xc_interface *xch, int 9.55 sysctl.u.getcpuinfo.max_cpus = max_cpus; 9.56 set_xen_guest_handle(sysctl.u.getcpuinfo.info, info); 9.57 9.58 - if ( (rc = lock_pages(info, max_cpus*sizeof(*info))) != 0 ) 9.59 + if ( (rc = lock_pages(xch, info, max_cpus*sizeof(*info))) != 0 ) 9.60 return rc; 9.61 9.62 rc = do_sysctl(xch, &sysctl); 9.63 9.64 - unlock_pages(info, max_cpus*sizeof(*info)); 9.65 + unlock_pages(xch, info, max_cpus*sizeof(*info)); 9.66 9.67 if ( nr_cpus ) 9.68 *nr_cpus = sysctl.u.getcpuinfo.nr_cpus; 9.69 @@ -250,7 +250,7 @@ int xc_hvm_set_pci_intx_level( 9.70 struct xen_hvm_set_pci_intx_level _arg, *arg = &_arg; 9.71 int rc; 9.72 9.73 - if ( (rc = hcall_buf_prep((void **)&arg, sizeof(*arg))) != 0 ) 9.74 + if ( (rc = hcall_buf_prep(xch, (void **)&arg, sizeof(*arg))) != 0 ) 9.75 { 9.76 PERROR("Could not lock memory"); 9.77 return rc; 9.78 @@ -269,7 +269,7 @@ int xc_hvm_set_pci_intx_level( 9.79 9.80 rc = do_xen_hypercall(xch, &hypercall); 9.81 9.82 - hcall_buf_release((void **)&arg, sizeof(*arg)); 9.83 + hcall_buf_release(xch, (void **)&arg, sizeof(*arg)); 9.84 9.85 return rc; 9.86 } 9.87 @@ -283,7 +283,7 @@ int xc_hvm_set_isa_irq_level( 9.88 struct xen_hvm_set_isa_irq_level _arg, *arg = &_arg; 9.89 int rc; 9.90 9.91 - if ( (rc = hcall_buf_prep((void **)&arg, sizeof(*arg))) != 0 ) 9.92 + if ( (rc = hcall_buf_prep(xch, (void **)&arg, sizeof(*arg))) != 0 ) 9.93 { 9.94 PERROR("Could not lock memory"); 9.95 return rc; 9.96 @@ -299,7 +299,7 @@ int xc_hvm_set_isa_irq_level( 9.97 9.98 rc = do_xen_hypercall(xch, &hypercall); 9.99 9.100 - hcall_buf_release((void **)&arg, sizeof(*arg)); 9.101 + hcall_buf_release(xch, (void **)&arg, sizeof(*arg)); 9.102 9.103 return rc; 9.104 } 9.105 @@ -319,7 +319,7 @@ int xc_hvm_set_pci_link_route( 9.106 arg.link = link; 9.107 arg.isa_irq = isa_irq; 9.108 9.109 - if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 ) 9.110 + if ( (rc = lock_pages(xch, &arg, sizeof(arg))) != 0 ) 9.111 { 9.112 PERROR("Could not lock memory"); 9.113 return rc; 9.114 @@ -327,7 +327,7 @@ int xc_hvm_set_pci_link_route( 9.115 9.116 rc = do_xen_hypercall(xch, &hypercall); 9.117 9.118 - unlock_pages(&arg, sizeof(arg)); 9.119 + unlock_pages(xch, &arg, sizeof(arg)); 9.120 9.121 return rc; 9.122 } 9.123 @@ -350,7 +350,7 @@ int xc_hvm_track_dirty_vram( 9.124 arg.nr = nr; 9.125 set_xen_guest_handle(arg.dirty_bitmap, (uint8_t *)dirty_bitmap); 9.126 9.127 - if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 ) 9.128 + if ( (rc = lock_pages(xch, &arg, sizeof(arg))) != 0 ) 9.129 { 9.130 PERROR("Could not lock memory"); 9.131 return rc; 9.132 @@ -358,7 +358,7 @@ int xc_hvm_track_dirty_vram( 9.133 9.134 rc = do_xen_hypercall(xch, &hypercall); 9.135 9.136 - unlock_pages(&arg, sizeof(arg)); 9.137 + unlock_pages(xch, &arg, sizeof(arg)); 9.138 9.139 return rc; 9.140 } 9.141 @@ -378,7 +378,7 @@ int xc_hvm_modified_memory( 9.142 arg.first_pfn = first_pfn; 9.143 arg.nr = nr; 9.144 9.145 - if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 ) 9.146 + if ( (rc = lock_pages(xch, &arg, sizeof(arg))) != 0 ) 9.147 { 9.148 PERROR("Could not lock memory"); 9.149 return rc; 9.150 @@ -386,7 +386,7 @@ int xc_hvm_modified_memory( 9.151 9.152 rc = do_xen_hypercall(xch, &hypercall); 9.153 9.154 - unlock_pages(&arg, sizeof(arg)); 9.155 + unlock_pages(xch, &arg, sizeof(arg)); 9.156 9.157 return rc; 9.158 } 9.159 @@ -407,7 +407,7 @@ int xc_hvm_set_mem_type( 9.160 arg.first_pfn = first_pfn; 9.161 arg.nr = nr; 9.162 9.163 - if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 ) 9.164 + if ( (rc = lock_pages(xch, &arg, sizeof(arg))) != 0 ) 9.165 { 9.166 PERROR("Could not lock memory"); 9.167 return rc; 9.168 @@ -415,7 +415,7 @@ int xc_hvm_set_mem_type( 9.169 9.170 rc = do_xen_hypercall(xch, &hypercall); 9.171 9.172 - unlock_pages(&arg, sizeof(arg)); 9.173 + unlock_pages(xch, &arg, sizeof(arg)); 9.174 9.175 return rc; 9.176 }
10.1 --- a/tools/libxc/xc_offline_page.c Mon Oct 18 16:43:15 2010 +0100 10.2 +++ b/tools/libxc/xc_offline_page.c Mon Oct 18 16:43:58 2010 +0100 10.3 @@ -71,7 +71,7 @@ int xc_mark_page_online(xc_interface *xc 10.4 if ( !status || (end < start) ) 10.5 return -EINVAL; 10.6 10.7 - if (lock_pages(status, sizeof(uint32_t)*(end - start + 1))) 10.8 + if (lock_pages(xch, status, sizeof(uint32_t)*(end - start + 1))) 10.9 { 10.10 ERROR("Could not lock memory for xc_mark_page_online\n"); 10.11 return -EINVAL; 10.12 @@ -84,7 +84,7 @@ int xc_mark_page_online(xc_interface *xc 10.13 set_xen_guest_handle(sysctl.u.page_offline.status, status); 10.14 ret = xc_sysctl(xch, &sysctl); 10.15 10.16 - unlock_pages(status, sizeof(uint32_t)*(end - start + 1)); 10.17 + unlock_pages(xch, status, sizeof(uint32_t)*(end - start + 1)); 10.18 10.19 return ret; 10.20 } 10.21 @@ -98,7 +98,7 @@ int xc_mark_page_offline(xc_interface *x 10.22 if ( !status || (end < start) ) 10.23 return -EINVAL; 10.24 10.25 - if (lock_pages(status, sizeof(uint32_t)*(end - start + 1))) 10.26 + if (lock_pages(xch, status, sizeof(uint32_t)*(end - start + 1))) 10.27 { 10.28 ERROR("Could not lock memory for xc_mark_page_offline"); 10.29 return -EINVAL; 10.30 @@ -111,7 +111,7 @@ int xc_mark_page_offline(xc_interface *x 10.31 set_xen_guest_handle(sysctl.u.page_offline.status, status); 10.32 ret = xc_sysctl(xch, &sysctl); 10.33 10.34 - unlock_pages(status, sizeof(uint32_t)*(end - start + 1)); 10.35 + unlock_pages(xch, status, sizeof(uint32_t)*(end - start + 1)); 10.36 10.37 return ret; 10.38 } 10.39 @@ -125,7 +125,7 @@ int xc_query_page_offline_status(xc_inte 10.40 if ( !status || (end < start) ) 10.41 return -EINVAL; 10.42 10.43 - if (lock_pages(status, sizeof(uint32_t)*(end - start + 1))) 10.44 + if (lock_pages(xch, status, sizeof(uint32_t)*(end - start + 1))) 10.45 { 10.46 ERROR("Could not lock memory for xc_query_page_offline_status\n"); 10.47 return -EINVAL; 10.48 @@ -138,7 +138,7 @@ int xc_query_page_offline_status(xc_inte 10.49 set_xen_guest_handle(sysctl.u.page_offline.status, status); 10.50 ret = xc_sysctl(xch, &sysctl); 10.51 10.52 - unlock_pages(status, sizeof(uint32_t)*(end - start + 1)); 10.53 + unlock_pages(xch, status, sizeof(uint32_t)*(end - start + 1)); 10.54 10.55 return ret; 10.56 } 10.57 @@ -291,7 +291,7 @@ static int init_mem_info(xc_interface *x 10.58 minfo->pfn_type[i] = pfn_to_mfn(i, minfo->p2m_table, 10.59 minfo->guest_width); 10.60 10.61 - if ( lock_pages(minfo->pfn_type, minfo->p2m_size * sizeof(*minfo->pfn_type)) ) 10.62 + if ( lock_pages(xch, minfo->pfn_type, minfo->p2m_size * sizeof(*minfo->pfn_type)) ) 10.63 { 10.64 ERROR("Unable to lock pfn_type array"); 10.65 goto failed; 10.66 @@ -310,7 +310,7 @@ static int init_mem_info(xc_interface *x 10.67 return 0; 10.68 10.69 unlock: 10.70 - unlock_pages(minfo->pfn_type, minfo->p2m_size * sizeof(*minfo->pfn_type)); 10.71 + unlock_pages(xch, minfo->pfn_type, minfo->p2m_size * sizeof(*minfo->pfn_type)); 10.72 failed: 10.73 if (minfo->pfn_type) 10.74 {
11.1 --- a/tools/libxc/xc_pm.c Mon Oct 18 16:43:15 2010 +0100 11.2 +++ b/tools/libxc/xc_pm.c Mon Oct 18 16:43:58 2010 +0100 11.3 @@ -53,14 +53,14 @@ int xc_pm_get_pxstat(xc_interface *xch, 11.4 if ( (ret = xc_pm_get_max_px(xch, cpuid, &max_px)) != 0) 11.5 return ret; 11.6 11.7 - if ( (ret = lock_pages(pxpt->trans_pt, 11.8 + if ( (ret = lock_pages(xch, pxpt->trans_pt, 11.9 max_px * max_px * sizeof(uint64_t))) != 0 ) 11.10 return ret; 11.11 11.12 - if ( (ret = lock_pages(pxpt->pt, 11.13 + if ( (ret = lock_pages(xch, pxpt->pt, 11.14 max_px * sizeof(struct xc_px_val))) != 0 ) 11.15 { 11.16 - unlock_pages(pxpt->trans_pt, max_px * max_px * sizeof(uint64_t)); 11.17 + unlock_pages(xch, pxpt->trans_pt, max_px * max_px * sizeof(uint64_t)); 11.18 return ret; 11.19 } 11.20 11.21 @@ -75,8 +75,8 @@ int xc_pm_get_pxstat(xc_interface *xch, 11.22 ret = xc_sysctl(xch, &sysctl); 11.23 if ( ret ) 11.24 { 11.25 - unlock_pages(pxpt->trans_pt, max_px * max_px * sizeof(uint64_t)); 11.26 - unlock_pages(pxpt->pt, max_px * sizeof(struct xc_px_val)); 11.27 + unlock_pages(xch, pxpt->trans_pt, max_px * max_px * sizeof(uint64_t)); 11.28 + unlock_pages(xch, pxpt->pt, max_px * sizeof(struct xc_px_val)); 11.29 return ret; 11.30 } 11.31 11.32 @@ -85,8 +85,8 @@ int xc_pm_get_pxstat(xc_interface *xch, 11.33 pxpt->last = sysctl.u.get_pmstat.u.getpx.last; 11.34 pxpt->cur = sysctl.u.get_pmstat.u.getpx.cur; 11.35 11.36 - unlock_pages(pxpt->trans_pt, max_px * max_px * sizeof(uint64_t)); 11.37 - unlock_pages(pxpt->pt, max_px * sizeof(struct xc_px_val)); 11.38 + unlock_pages(xch, pxpt->trans_pt, max_px * max_px * sizeof(uint64_t)); 11.39 + unlock_pages(xch, pxpt->pt, max_px * sizeof(struct xc_px_val)); 11.40 11.41 return ret; 11.42 } 11.43 @@ -128,11 +128,11 @@ int xc_pm_get_cxstat(xc_interface *xch, 11.44 if ( (ret = xc_pm_get_max_cx(xch, cpuid, &max_cx)) ) 11.45 goto unlock_0; 11.46 11.47 - if ( (ret = lock_pages(cxpt, sizeof(struct xc_cx_stat))) ) 11.48 + if ( (ret = lock_pages(xch, cxpt, sizeof(struct xc_cx_stat))) ) 11.49 goto unlock_0; 11.50 - if ( (ret = lock_pages(cxpt->triggers, max_cx * sizeof(uint64_t))) ) 11.51 + if ( (ret = lock_pages(xch, cxpt->triggers, max_cx * sizeof(uint64_t))) ) 11.52 goto unlock_1; 11.53 - if ( (ret = lock_pages(cxpt->residencies, max_cx * sizeof(uint64_t))) ) 11.54 + if ( (ret = lock_pages(xch, cxpt->residencies, max_cx * sizeof(uint64_t))) ) 11.55 goto unlock_2; 11.56 11.57 sysctl.cmd = XEN_SYSCTL_get_pmstat; 11.58 @@ -155,11 +155,11 @@ int xc_pm_get_cxstat(xc_interface *xch, 11.59 cxpt->cc6 = sysctl.u.get_pmstat.u.getcx.cc6; 11.60 11.61 unlock_3: 11.62 - unlock_pages(cxpt->residencies, max_cx * sizeof(uint64_t)); 11.63 + unlock_pages(xch, cxpt->residencies, max_cx * sizeof(uint64_t)); 11.64 unlock_2: 11.65 - unlock_pages(cxpt->triggers, max_cx * sizeof(uint64_t)); 11.66 + unlock_pages(xch, cxpt->triggers, max_cx * sizeof(uint64_t)); 11.67 unlock_1: 11.68 - unlock_pages(cxpt, sizeof(struct xc_cx_stat)); 11.69 + unlock_pages(xch, cxpt, sizeof(struct xc_cx_stat)); 11.70 unlock_0: 11.71 return ret; 11.72 } 11.73 @@ -200,13 +200,13 @@ int xc_get_cpufreq_para(xc_interface *xc 11.74 (!user_para->scaling_available_governors) ) 11.75 return -EINVAL; 11.76 11.77 - if ( (ret = lock_pages(user_para->affected_cpus, 11.78 + if ( (ret = lock_pages(xch, user_para->affected_cpus, 11.79 user_para->cpu_num * sizeof(uint32_t))) ) 11.80 goto unlock_1; 11.81 - if ( (ret = lock_pages(user_para->scaling_available_frequencies, 11.82 + if ( (ret = lock_pages(xch, user_para->scaling_available_frequencies, 11.83 user_para->freq_num * sizeof(uint32_t))) ) 11.84 goto unlock_2; 11.85 - if ( (ret = lock_pages(user_para->scaling_available_governors, 11.86 + if ( (ret = lock_pages(xch, user_para->scaling_available_governors, 11.87 user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char))) ) 11.88 goto unlock_3; 11.89 11.90 @@ -263,13 +263,13 @@ int xc_get_cpufreq_para(xc_interface *xc 11.91 } 11.92 11.93 unlock_4: 11.94 - unlock_pages(user_para->scaling_available_governors, 11.95 + unlock_pages(xch, user_para->scaling_available_governors, 11.96 user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char)); 11.97 unlock_3: 11.98 - unlock_pages(user_para->scaling_available_frequencies, 11.99 + unlock_pages(xch, user_para->scaling_available_frequencies, 11.100 user_para->freq_num * sizeof(uint32_t)); 11.101 unlock_2: 11.102 - unlock_pages(user_para->affected_cpus, 11.103 + unlock_pages(xch, user_para->affected_cpus, 11.104 user_para->cpu_num * sizeof(uint32_t)); 11.105 unlock_1: 11.106 return ret;
12.1 --- a/tools/libxc/xc_private.c Mon Oct 18 16:43:15 2010 +0100 12.2 +++ b/tools/libxc/xc_private.c Mon Oct 18 16:43:58 2010 +0100 12.3 @@ -71,7 +71,7 @@ xc_interface *xc_interface_open(xentooll 12.4 return 0; 12.5 } 12.6 12.7 -static void xc_clean_hcall_buf(void); 12.8 +static void xc_clean_hcall_buf(xc_interface *xch); 12.9 12.10 int xc_interface_close(xc_interface *xch) 12.11 { 12.12 @@ -85,7 +85,7 @@ int xc_interface_close(xc_interface *xch 12.13 if (rc) PERROR("Could not close hypervisor interface"); 12.14 } 12.15 12.16 - xc_clean_hcall_buf(); 12.17 + xc_clean_hcall_buf(xch); 12.18 12.19 free(xch); 12.20 return rc; 12.21 @@ -193,17 +193,17 @@ void xc_report_progress_step(xc_interfac 12.22 12.23 #ifdef __sun__ 12.24 12.25 -int lock_pages(void *addr, size_t len) { return 0; } 12.26 -void unlock_pages(void *addr, size_t len) { } 12.27 +int lock_pages(xc_interface *xch, void *addr, size_t len) { return 0; } 12.28 +void unlock_pages(xc_interface *xch, void *addr, size_t len) { } 12.29 12.30 -int hcall_buf_prep(void **addr, size_t len) { return 0; } 12.31 -void hcall_buf_release(void **addr, size_t len) { } 12.32 +int hcall_buf_prep(xc_interface *xch, void **addr, size_t len) { return 0; } 12.33 +void hcall_buf_release(xc_interface *xch, void **addr, size_t len) { } 12.34 12.35 -static void xc_clean_hcall_buf(void) { } 12.36 +static void xc_clean_hcall_buf(xc_interface *xch) { } 12.37 12.38 #else /* !__sun__ */ 12.39 12.40 -int lock_pages(void *addr, size_t len) 12.41 +int lock_pages(xc_interface *xch, void *addr, size_t len) 12.42 { 12.43 int e; 12.44 void *laddr = (void *)((unsigned long)addr & PAGE_MASK); 12.45 @@ -213,7 +213,7 @@ int lock_pages(void *addr, size_t len) 12.46 return e; 12.47 } 12.48 12.49 -void unlock_pages(void *addr, size_t len) 12.50 +void unlock_pages(xc_interface *xch, void *addr, size_t len) 12.51 { 12.52 void *laddr = (void *)((unsigned long)addr & PAGE_MASK); 12.53 size_t llen = (len + ((unsigned long)addr - (unsigned long)laddr) + 12.54 @@ -226,6 +226,7 @@ void unlock_pages(void *addr, size_t len 12.55 static pthread_key_t hcall_buf_pkey; 12.56 static pthread_once_t hcall_buf_pkey_once = PTHREAD_ONCE_INIT; 12.57 struct hcall_buf { 12.58 + xc_interface *xch; 12.59 void *buf; 12.60 void *oldbuf; 12.61 }; 12.62 @@ -238,7 +239,7 @@ static void _xc_clean_hcall_buf(void *m) 12.63 { 12.64 if ( hcall_buf->buf ) 12.65 { 12.66 - unlock_pages(hcall_buf->buf, PAGE_SIZE); 12.67 + unlock_pages(hcall_buf->xch, hcall_buf->buf, PAGE_SIZE); 12.68 free(hcall_buf->buf); 12.69 } 12.70 12.71 @@ -253,14 +254,14 @@ static void _xc_init_hcall_buf(void) 12.72 pthread_key_create(&hcall_buf_pkey, _xc_clean_hcall_buf); 12.73 } 12.74 12.75 -static void xc_clean_hcall_buf(void) 12.76 +static void xc_clean_hcall_buf(xc_interface *xch) 12.77 { 12.78 pthread_once(&hcall_buf_pkey_once, _xc_init_hcall_buf); 12.79 12.80 _xc_clean_hcall_buf(pthread_getspecific(hcall_buf_pkey)); 12.81 } 12.82 12.83 -int hcall_buf_prep(void **addr, size_t len) 12.84 +int hcall_buf_prep(xc_interface *xch, void **addr, size_t len) 12.85 { 12.86 struct hcall_buf *hcall_buf; 12.87 12.88 @@ -272,13 +273,14 @@ int hcall_buf_prep(void **addr, size_t l 12.89 hcall_buf = calloc(1, sizeof(*hcall_buf)); 12.90 if ( !hcall_buf ) 12.91 goto out; 12.92 + hcall_buf->xch = xch; 12.93 pthread_setspecific(hcall_buf_pkey, hcall_buf); 12.94 } 12.95 12.96 if ( !hcall_buf->buf ) 12.97 { 12.98 hcall_buf->buf = xc_memalign(PAGE_SIZE, PAGE_SIZE); 12.99 - if ( !hcall_buf->buf || lock_pages(hcall_buf->buf, PAGE_SIZE) ) 12.100 + if ( !hcall_buf->buf || lock_pages(xch, hcall_buf->buf, PAGE_SIZE) ) 12.101 { 12.102 free(hcall_buf->buf); 12.103 hcall_buf->buf = NULL; 12.104 @@ -295,10 +297,10 @@ int hcall_buf_prep(void **addr, size_t l 12.105 } 12.106 12.107 out: 12.108 - return lock_pages(*addr, len); 12.109 + return lock_pages(xch, *addr, len); 12.110 } 12.111 12.112 -void hcall_buf_release(void **addr, size_t len) 12.113 +void hcall_buf_release(xc_interface *xch, void **addr, size_t len) 12.114 { 12.115 struct hcall_buf *hcall_buf = pthread_getspecific(hcall_buf_pkey); 12.116 12.117 @@ -310,7 +312,7 @@ void hcall_buf_release(void **addr, size 12.118 } 12.119 else 12.120 { 12.121 - unlock_pages(*addr, len); 12.122 + unlock_pages(xch, *addr, len); 12.123 } 12.124 } 12.125 12.126 @@ -337,7 +339,7 @@ int xc_mmuext_op( 12.127 DECLARE_HYPERCALL; 12.128 long ret = -EINVAL; 12.129 12.130 - if ( hcall_buf_prep((void **)&op, nr_ops*sizeof(*op)) != 0 ) 12.131 + if ( hcall_buf_prep(xch, (void **)&op, nr_ops*sizeof(*op)) != 0 ) 12.132 { 12.133 PERROR("Could not lock memory for Xen hypercall"); 12.134 goto out1; 12.135 @@ -351,7 +353,7 @@ int xc_mmuext_op( 12.136 12.137 ret = do_xen_hypercall(xch, &hypercall); 12.138 12.139 - hcall_buf_release((void **)&op, nr_ops*sizeof(*op)); 12.140 + hcall_buf_release(xch, (void **)&op, nr_ops*sizeof(*op)); 12.141 12.142 out1: 12.143 return ret; 12.144 @@ -371,7 +373,7 @@ static int flush_mmu_updates(xc_interfac 12.145 hypercall.arg[2] = 0; 12.146 hypercall.arg[3] = mmu->subject; 12.147 12.148 - if ( lock_pages(mmu->updates, sizeof(mmu->updates)) != 0 ) 12.149 + if ( lock_pages(xch, mmu->updates, sizeof(mmu->updates)) != 0 ) 12.150 { 12.151 PERROR("flush_mmu_updates: mmu updates lock_pages failed"); 12.152 err = 1; 12.153 @@ -386,7 +388,7 @@ static int flush_mmu_updates(xc_interfac 12.154 12.155 mmu->idx = 0; 12.156 12.157 - unlock_pages(mmu->updates, sizeof(mmu->updates)); 12.158 + unlock_pages(xch, mmu->updates, sizeof(mmu->updates)); 12.159 12.160 out: 12.161 return err; 12.162 @@ -438,38 +440,38 @@ int xc_memory_op(xc_interface *xch, 12.163 case XENMEM_increase_reservation: 12.164 case XENMEM_decrease_reservation: 12.165 case XENMEM_populate_physmap: 12.166 - if ( lock_pages(reservation, sizeof(*reservation)) != 0 ) 12.167 + if ( lock_pages(xch, reservation, sizeof(*reservation)) != 0 ) 12.168 { 12.169 PERROR("Could not lock"); 12.170 goto out1; 12.171 } 12.172 get_xen_guest_handle(extent_start, reservation->extent_start); 12.173 if ( (extent_start != NULL) && 12.174 - (lock_pages(extent_start, 12.175 + (lock_pages(xch, extent_start, 12.176 reservation->nr_extents * sizeof(xen_pfn_t)) != 0) ) 12.177 { 12.178 PERROR("Could not lock"); 12.179 - unlock_pages(reservation, sizeof(*reservation)); 12.180 + unlock_pages(xch, reservation, sizeof(*reservation)); 12.181 goto out1; 12.182 } 12.183 break; 12.184 case XENMEM_machphys_mfn_list: 12.185 - if ( lock_pages(xmml, sizeof(*xmml)) != 0 ) 12.186 + if ( lock_pages(xch, xmml, sizeof(*xmml)) != 0 ) 12.187 { 12.188 PERROR("Could not lock"); 12.189 goto out1; 12.190 } 12.191 get_xen_guest_handle(extent_start, xmml->extent_start); 12.192 - if ( lock_pages(extent_start, 12.193 + if ( lock_pages(xch, extent_start, 12.194 xmml->max_extents * sizeof(xen_pfn_t)) != 0 ) 12.195 { 12.196 PERROR("Could not lock"); 12.197 - unlock_pages(xmml, sizeof(*xmml)); 12.198 + unlock_pages(xch, xmml, sizeof(*xmml)); 12.199 goto out1; 12.200 } 12.201 break; 12.202 case XENMEM_add_to_physmap: 12.203 - if ( lock_pages(arg, sizeof(struct xen_add_to_physmap)) ) 12.204 + if ( lock_pages(xch, arg, sizeof(struct xen_add_to_physmap)) ) 12.205 { 12.206 PERROR("Could not lock"); 12.207 goto out1; 12.208 @@ -478,7 +480,7 @@ int xc_memory_op(xc_interface *xch, 12.209 case XENMEM_current_reservation: 12.210 case XENMEM_maximum_reservation: 12.211 case XENMEM_maximum_gpfn: 12.212 - if ( lock_pages(arg, sizeof(domid_t)) ) 12.213 + if ( lock_pages(xch, arg, sizeof(domid_t)) ) 12.214 { 12.215 PERROR("Could not lock"); 12.216 goto out1; 12.217 @@ -486,7 +488,7 @@ int xc_memory_op(xc_interface *xch, 12.218 break; 12.219 case XENMEM_set_pod_target: 12.220 case XENMEM_get_pod_target: 12.221 - if ( lock_pages(arg, sizeof(struct xen_pod_target)) ) 12.222 + if ( lock_pages(xch, arg, sizeof(struct xen_pod_target)) ) 12.223 { 12.224 PERROR("Could not lock"); 12.225 goto out1; 12.226 @@ -501,29 +503,29 @@ int xc_memory_op(xc_interface *xch, 12.227 case XENMEM_increase_reservation: 12.228 case XENMEM_decrease_reservation: 12.229 case XENMEM_populate_physmap: 12.230 - unlock_pages(reservation, sizeof(*reservation)); 12.231 + unlock_pages(xch, reservation, sizeof(*reservation)); 12.232 get_xen_guest_handle(extent_start, reservation->extent_start); 12.233 if ( extent_start != NULL ) 12.234 - unlock_pages(extent_start, 12.235 + unlock_pages(xch, extent_start, 12.236 reservation->nr_extents * sizeof(xen_pfn_t)); 12.237 break; 12.238 case XENMEM_machphys_mfn_list: 12.239 - unlock_pages(xmml, sizeof(*xmml)); 12.240 + unlock_pages(xch, xmml, sizeof(*xmml)); 12.241 get_xen_guest_handle(extent_start, xmml->extent_start); 12.242 - unlock_pages(extent_start, 12.243 + unlock_pages(xch, extent_start, 12.244 xmml->max_extents * sizeof(xen_pfn_t)); 12.245 break; 12.246 case XENMEM_add_to_physmap: 12.247 - unlock_pages(arg, sizeof(struct xen_add_to_physmap)); 12.248 + unlock_pages(xch, arg, sizeof(struct xen_add_to_physmap)); 12.249 break; 12.250 case XENMEM_current_reservation: 12.251 case XENMEM_maximum_reservation: 12.252 case XENMEM_maximum_gpfn: 12.253 - unlock_pages(arg, sizeof(domid_t)); 12.254 + unlock_pages(xch, arg, sizeof(domid_t)); 12.255 break; 12.256 case XENMEM_set_pod_target: 12.257 case XENMEM_get_pod_target: 12.258 - unlock_pages(arg, sizeof(struct xen_pod_target)); 12.259 + unlock_pages(xch, arg, sizeof(struct xen_pod_target)); 12.260 break; 12.261 } 12.262 12.263 @@ -565,7 +567,7 @@ int xc_get_pfn_list(xc_interface *xch, 12.264 memset(pfn_buf, 0, max_pfns * sizeof(*pfn_buf)); 12.265 #endif 12.266 12.267 - if ( lock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf)) != 0 ) 12.268 + if ( lock_pages(xch, pfn_buf, max_pfns * sizeof(*pfn_buf)) != 0 ) 12.269 { 12.270 PERROR("xc_get_pfn_list: pfn_buf lock failed"); 12.271 return -1; 12.272 @@ -573,7 +575,7 @@ int xc_get_pfn_list(xc_interface *xch, 12.273 12.274 ret = do_domctl(xch, &domctl); 12.275 12.276 - unlock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf)); 12.277 + unlock_pages(xch, pfn_buf, max_pfns * sizeof(*pfn_buf)); 12.278 12.279 return (ret < 0) ? -1 : domctl.u.getmemlist.num_pfns; 12.280 } 12.281 @@ -648,7 +650,7 @@ int xc_version(xc_interface *xch, int cm 12.282 break; 12.283 } 12.284 12.285 - if ( (argsize != 0) && (lock_pages(arg, argsize) != 0) ) 12.286 + if ( (argsize != 0) && (lock_pages(xch, arg, argsize) != 0) ) 12.287 { 12.288 PERROR("Could not lock memory for version hypercall"); 12.289 return -ENOMEM; 12.290 @@ -662,7 +664,7 @@ int xc_version(xc_interface *xch, int cm 12.291 rc = do_xen_version(xch, cmd, arg); 12.292 12.293 if ( argsize != 0 ) 12.294 - unlock_pages(arg, argsize); 12.295 + unlock_pages(xch, arg, argsize); 12.296 12.297 return rc; 12.298 }
13.1 --- a/tools/libxc/xc_private.h Mon Oct 18 16:43:15 2010 +0100 13.2 +++ b/tools/libxc/xc_private.h Mon Oct 18 16:43:58 2010 +0100 13.3 @@ -100,11 +100,11 @@ void xc_report_progress_step(xc_interfac 13.4 13.5 void *xc_memalign(size_t alignment, size_t size); 13.6 13.7 -int lock_pages(void *addr, size_t len); 13.8 -void unlock_pages(void *addr, size_t len); 13.9 +int lock_pages(xc_interface *xch, void *addr, size_t len); 13.10 +void unlock_pages(xc_interface *xch, void *addr, size_t len); 13.11 13.12 -int hcall_buf_prep(void **addr, size_t len); 13.13 -void hcall_buf_release(void **addr, size_t len); 13.14 +int hcall_buf_prep(xc_interface *xch, void **addr, size_t len); 13.15 +void hcall_buf_release(xc_interface *xch, void **addr, size_t len); 13.16 13.17 int do_xen_hypercall(xc_interface *xch, privcmd_hypercall_t *hypercall); 13.18 13.19 @@ -125,7 +125,7 @@ static inline int do_physdev_op(xc_inter 13.20 13.21 DECLARE_HYPERCALL; 13.22 13.23 - if ( hcall_buf_prep(&op, len) != 0 ) 13.24 + if ( hcall_buf_prep(xch, &op, len) != 0 ) 13.25 { 13.26 PERROR("Could not lock memory for Xen hypercall"); 13.27 goto out1; 13.28 @@ -142,7 +142,7 @@ static inline int do_physdev_op(xc_inter 13.29 " rebuild the user-space tool set?\n"); 13.30 } 13.31 13.32 - hcall_buf_release(&op, len); 13.33 + hcall_buf_release(xch, &op, len); 13.34 13.35 out1: 13.36 return ret; 13.37 @@ -153,7 +153,7 @@ static inline int do_domctl(xc_interface 13.38 int ret = -1; 13.39 DECLARE_HYPERCALL; 13.40 13.41 - if ( hcall_buf_prep((void **)&domctl, sizeof(*domctl)) != 0 ) 13.42 + if ( hcall_buf_prep(xch, (void **)&domctl, sizeof(*domctl)) != 0 ) 13.43 { 13.44 PERROR("Could not lock memory for Xen hypercall"); 13.45 goto out1; 13.46 @@ -171,7 +171,7 @@ static inline int do_domctl(xc_interface 13.47 " rebuild the user-space tool set?\n"); 13.48 } 13.49 13.50 - hcall_buf_release((void **)&domctl, sizeof(*domctl)); 13.51 + hcall_buf_release(xch, (void **)&domctl, sizeof(*domctl)); 13.52 13.53 out1: 13.54 return ret; 13.55 @@ -182,7 +182,7 @@ static inline int do_sysctl(xc_interface 13.56 int ret = -1; 13.57 DECLARE_HYPERCALL; 13.58 13.59 - if ( hcall_buf_prep((void **)&sysctl, sizeof(*sysctl)) != 0 ) 13.60 + if ( hcall_buf_prep(xch, (void **)&sysctl, sizeof(*sysctl)) != 0 ) 13.61 { 13.62 PERROR("Could not lock memory for Xen hypercall"); 13.63 goto out1; 13.64 @@ -200,7 +200,7 @@ static inline int do_sysctl(xc_interface 13.65 " rebuild the user-space tool set?\n"); 13.66 } 13.67 13.68 - hcall_buf_release((void **)&sysctl, sizeof(*sysctl)); 13.69 + hcall_buf_release(xch, (void **)&sysctl, sizeof(*sysctl)); 13.70 13.71 out1: 13.72 return ret;
14.1 --- a/tools/libxc/xc_resume.c Mon Oct 18 16:43:15 2010 +0100 14.2 +++ b/tools/libxc/xc_resume.c Mon Oct 18 16:43:58 2010 +0100 14.3 @@ -196,7 +196,7 @@ static int xc_domain_resume_any(xc_inter 14.4 goto out; 14.5 } 14.6 14.7 - if ( lock_pages(&ctxt, sizeof(ctxt)) ) 14.8 + if ( lock_pages(xch, &ctxt, sizeof(ctxt)) ) 14.9 { 14.10 ERROR("Unable to lock ctxt"); 14.11 goto out; 14.12 @@ -235,7 +235,7 @@ static int xc_domain_resume_any(xc_inter 14.13 14.14 #if defined(__i386__) || defined(__x86_64__) 14.15 out: 14.16 - unlock_pages((void *)&ctxt, sizeof ctxt); 14.17 + unlock_pages(xch, (void *)&ctxt, sizeof ctxt); 14.18 if (p2m) 14.19 munmap(p2m, P2M_FL_ENTRIES*PAGE_SIZE); 14.20 if (p2m_frame_list)
15.1 --- a/tools/libxc/xc_tbuf.c Mon Oct 18 16:43:15 2010 +0100 15.2 +++ b/tools/libxc/xc_tbuf.c Mon Oct 18 16:43:58 2010 +0100 15.3 @@ -129,7 +129,7 @@ int xc_tbuf_set_cpu_mask(xc_interface *x 15.4 set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, bytemap); 15.5 sysctl.u.tbuf_op.cpu_mask.nr_cpus = sizeof(bytemap) * 8; 15.6 15.7 - if ( lock_pages(&bytemap, sizeof(bytemap)) != 0 ) 15.8 + if ( lock_pages(xch, &bytemap, sizeof(bytemap)) != 0 ) 15.9 { 15.10 PERROR("Could not lock memory for Xen hypercall"); 15.11 goto out; 15.12 @@ -137,7 +137,7 @@ int xc_tbuf_set_cpu_mask(xc_interface *x 15.13 15.14 ret = do_sysctl(xch, &sysctl); 15.15 15.16 - unlock_pages(&bytemap, sizeof(bytemap)); 15.17 + unlock_pages(xch, &bytemap, sizeof(bytemap)); 15.18 15.19 out: 15.20 return ret;
16.1 --- a/tools/libxc/xc_tmem.c Mon Oct 18 16:43:15 2010 +0100 16.2 +++ b/tools/libxc/xc_tmem.c Mon Oct 18 16:43:58 2010 +0100 16.3 @@ -28,7 +28,7 @@ static int do_tmem_op(xc_interface *xch, 16.4 16.5 hypercall.op = __HYPERVISOR_tmem_op; 16.6 hypercall.arg[0] = (unsigned long)op; 16.7 - if (lock_pages(op, sizeof(*op)) != 0) 16.8 + if (lock_pages(xch, op, sizeof(*op)) != 0) 16.9 { 16.10 PERROR("Could not lock memory for Xen hypercall"); 16.11 return -EFAULT; 16.12 @@ -39,7 +39,7 @@ static int do_tmem_op(xc_interface *xch, 16.13 DPRINTF("tmem operation failed -- need to" 16.14 " rebuild the user-space tool set?\n"); 16.15 } 16.16 - unlock_pages(op, sizeof(*op)); 16.17 + unlock_pages(xch, op, sizeof(*op)); 16.18 16.19 return ret; 16.20 } 16.21 @@ -69,7 +69,7 @@ int xc_tmem_control(xc_interface *xch, 16.22 op.u.ctrl.oid[2] = 0; 16.23 16.24 if (subop == TMEMC_LIST) { 16.25 - if ((arg1 != 0) && (lock_pages(buf, arg1) != 0)) 16.26 + if ((arg1 != 0) && (lock_pages(xch, buf, arg1) != 0)) 16.27 { 16.28 PERROR("Could not lock memory for Xen hypercall"); 16.29 return -ENOMEM; 16.30 @@ -85,7 +85,7 @@ int xc_tmem_control(xc_interface *xch, 16.31 16.32 if (subop == TMEMC_LIST) { 16.33 if (arg1 != 0) 16.34 - unlock_pages(buf, arg1); 16.35 + unlock_pages(xch, buf, arg1); 16.36 } 16.37 16.38 return rc; 16.39 @@ -115,7 +115,7 @@ int xc_tmem_control_oid(xc_interface *xc 16.40 op.u.ctrl.oid[2] = oid.oid[2]; 16.41 16.42 if (subop == TMEMC_LIST) { 16.43 - if ((arg1 != 0) && (lock_pages(buf, arg1) != 0)) 16.44 + if ((arg1 != 0) && (lock_pages(xch, buf, arg1) != 0)) 16.45 { 16.46 PERROR("Could not lock memory for Xen hypercall"); 16.47 return -ENOMEM; 16.48 @@ -131,7 +131,7 @@ int xc_tmem_control_oid(xc_interface *xc 16.49 16.50 if (subop == TMEMC_LIST) { 16.51 if (arg1 != 0) 16.52 - unlock_pages(buf, arg1); 16.53 + unlock_pages(xch, buf, arg1); 16.54 } 16.55 16.56 return rc;