debuggers.hg

view tools/libxc/xc_misc.c @ 20837:0b138a019292

libxc: use new (replacement) mmap-batch ioctl

Replace all calls to xc_map_foreign_batch() where the caller doesn't
look at the passed in array to check for errors by calls to
xc_map_foreign_pages(). Replace all remaining calls by such to the
newly introduced xc_map_foreign_bulk().

As a sideband modification (needed while writing the patch to ensure
they're unused) eliminate unused parameters to
uncanonicalize_pagetable() and xc_map_foreign_batch_single(). Also
unmap live_p2m_frame_list earlier in map_and_save_p2m_table(),
reducing the peak amount of virtual address space required.

All supported OSes other than Linux continue to use the old ioctl for
the time being.

Also change libxc's MAJOR to 4.0 to reflect the API change.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 13 08:12:56 2010 +0000 (2010-01-13)
parents 10c0942ed240
children fbe8f32fa257
line source
1 /******************************************************************************
2 * xc_misc.c
3 *
4 * Miscellaneous control interface functions.
5 */
7 #include "xc_private.h"
8 #include <xen/hvm/hvm_op.h>
10 int xc_readconsolering(int xc_handle,
11 char **pbuffer,
12 unsigned int *pnr_chars,
13 int clear, int incremental, uint32_t *pindex)
14 {
15 int ret;
16 DECLARE_SYSCTL;
17 char *buffer = *pbuffer;
18 unsigned int nr_chars = *pnr_chars;
20 sysctl.cmd = XEN_SYSCTL_readconsole;
21 set_xen_guest_handle(sysctl.u.readconsole.buffer, buffer);
22 sysctl.u.readconsole.count = nr_chars;
23 sysctl.u.readconsole.clear = clear;
24 sysctl.u.readconsole.incremental = 0;
25 if ( pindex )
26 {
27 sysctl.u.readconsole.index = *pindex;
28 sysctl.u.readconsole.incremental = incremental;
29 }
31 if ( (ret = lock_pages(buffer, nr_chars)) != 0 )
32 return ret;
34 if ( (ret = do_sysctl(xc_handle, &sysctl)) == 0 )
35 {
36 *pnr_chars = sysctl.u.readconsole.count;
37 if ( pindex )
38 *pindex = sysctl.u.readconsole.index;
39 }
41 unlock_pages(buffer, nr_chars);
43 return ret;
44 }
46 int xc_send_debug_keys(int xc_handle, char *keys)
47 {
48 int ret, len = strlen(keys);
49 DECLARE_SYSCTL;
51 sysctl.cmd = XEN_SYSCTL_debug_keys;
52 set_xen_guest_handle(sysctl.u.debug_keys.keys, keys);
53 sysctl.u.debug_keys.nr_keys = len;
55 if ( (ret = lock_pages(keys, len)) != 0 )
56 return ret;
58 ret = do_sysctl(xc_handle, &sysctl);
60 unlock_pages(keys, len);
62 return ret;
63 }
65 int xc_physinfo(int xc_handle,
66 xc_physinfo_t *put_info)
67 {
68 int ret;
69 DECLARE_SYSCTL;
71 sysctl.cmd = XEN_SYSCTL_physinfo;
73 memcpy(&sysctl.u.physinfo, put_info, sizeof(*put_info));
75 if ( (ret = do_sysctl(xc_handle, &sysctl)) != 0 )
76 return ret;
78 memcpy(put_info, &sysctl.u.physinfo, sizeof(*put_info));
80 return 0;
81 }
83 int xc_sched_id(int xc_handle,
84 int *sched_id)
85 {
86 int ret;
87 DECLARE_SYSCTL;
89 sysctl.cmd = XEN_SYSCTL_sched_id;
91 if ( (ret = do_sysctl(xc_handle, &sysctl)) != 0 )
92 return ret;
94 *sched_id = sysctl.u.sched_id.sched_id;
96 return 0;
97 }
99 int xc_perfc_control(int xc_handle,
100 uint32_t opcode,
101 xc_perfc_desc_t *desc,
102 xc_perfc_val_t *val,
103 int *nbr_desc,
104 int *nbr_val)
105 {
106 int rc;
107 DECLARE_SYSCTL;
109 sysctl.cmd = XEN_SYSCTL_perfc_op;
110 sysctl.u.perfc_op.cmd = opcode;
111 set_xen_guest_handle(sysctl.u.perfc_op.desc, desc);
112 set_xen_guest_handle(sysctl.u.perfc_op.val, val);
114 rc = do_sysctl(xc_handle, &sysctl);
116 if ( nbr_desc )
117 *nbr_desc = sysctl.u.perfc_op.nr_counters;
118 if ( nbr_val )
119 *nbr_val = sysctl.u.perfc_op.nr_vals;
121 return rc;
122 }
124 int xc_lockprof_control(int xc_handle,
125 uint32_t opcode,
126 uint32_t *n_elems,
127 uint64_t *time,
128 xc_lockprof_data_t *data)
129 {
130 int rc;
131 DECLARE_SYSCTL;
133 sysctl.cmd = XEN_SYSCTL_lockprof_op;
134 sysctl.u.lockprof_op.cmd = opcode;
135 sysctl.u.lockprof_op.max_elem = n_elems ? *n_elems : 0;
136 set_xen_guest_handle(sysctl.u.lockprof_op.data, data);
138 rc = do_sysctl(xc_handle, &sysctl);
140 if (n_elems)
141 *n_elems = sysctl.u.lockprof_op.nr_elem;
142 if (time)
143 *time = sysctl.u.lockprof_op.time;
145 return rc;
146 }
148 int xc_getcpuinfo(int xc_handle, int max_cpus,
149 xc_cpuinfo_t *info, int *nr_cpus)
150 {
151 int rc;
152 DECLARE_SYSCTL;
154 sysctl.cmd = XEN_SYSCTL_getcpuinfo;
155 sysctl.u.getcpuinfo.max_cpus = max_cpus;
156 set_xen_guest_handle(sysctl.u.getcpuinfo.info, info);
158 if ( (rc = lock_pages(info, max_cpus*sizeof(*info))) != 0 )
159 return rc;
161 rc = do_sysctl(xc_handle, &sysctl);
163 unlock_pages(info, max_cpus*sizeof(*info));
165 if ( nr_cpus )
166 *nr_cpus = sysctl.u.getcpuinfo.nr_cpus;
168 return rc;
169 }
172 int xc_hvm_set_pci_intx_level(
173 int xc_handle, domid_t dom,
174 uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
175 unsigned int level)
176 {
177 DECLARE_HYPERCALL;
178 struct xen_hvm_set_pci_intx_level arg;
179 int rc;
181 hypercall.op = __HYPERVISOR_hvm_op;
182 hypercall.arg[0] = HVMOP_set_pci_intx_level;
183 hypercall.arg[1] = (unsigned long)&arg;
185 arg.domid = dom;
186 arg.domain = domain;
187 arg.bus = bus;
188 arg.device = device;
189 arg.intx = intx;
190 arg.level = level;
192 if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
193 {
194 PERROR("Could not lock memory");
195 return rc;
196 }
198 rc = do_xen_hypercall(xc_handle, &hypercall);
200 unlock_pages(&arg, sizeof(arg));
202 return rc;
203 }
205 int xc_hvm_set_isa_irq_level(
206 int xc_handle, domid_t dom,
207 uint8_t isa_irq,
208 unsigned int level)
209 {
210 DECLARE_HYPERCALL;
211 struct xen_hvm_set_isa_irq_level arg;
212 int rc;
214 hypercall.op = __HYPERVISOR_hvm_op;
215 hypercall.arg[0] = HVMOP_set_isa_irq_level;
216 hypercall.arg[1] = (unsigned long)&arg;
218 arg.domid = dom;
219 arg.isa_irq = isa_irq;
220 arg.level = level;
222 if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
223 {
224 PERROR("Could not lock memory");
225 return rc;
226 }
228 rc = do_xen_hypercall(xc_handle, &hypercall);
230 unlock_pages(&arg, sizeof(arg));
232 return rc;
233 }
235 int xc_hvm_set_pci_link_route(
236 int xc_handle, domid_t dom, uint8_t link, uint8_t isa_irq)
237 {
238 DECLARE_HYPERCALL;
239 struct xen_hvm_set_pci_link_route arg;
240 int rc;
242 hypercall.op = __HYPERVISOR_hvm_op;
243 hypercall.arg[0] = HVMOP_set_pci_link_route;
244 hypercall.arg[1] = (unsigned long)&arg;
246 arg.domid = dom;
247 arg.link = link;
248 arg.isa_irq = isa_irq;
250 if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
251 {
252 PERROR("Could not lock memory");
253 return rc;
254 }
256 rc = do_xen_hypercall(xc_handle, &hypercall);
258 unlock_pages(&arg, sizeof(arg));
260 return rc;
261 }
263 int xc_hvm_track_dirty_vram(
264 int xc_handle, domid_t dom,
265 uint64_t first_pfn, uint64_t nr,
266 unsigned long *dirty_bitmap)
267 {
268 DECLARE_HYPERCALL;
269 struct xen_hvm_track_dirty_vram arg;
270 int rc;
272 hypercall.op = __HYPERVISOR_hvm_op;
273 hypercall.arg[0] = HVMOP_track_dirty_vram;
274 hypercall.arg[1] = (unsigned long)&arg;
276 arg.domid = dom;
277 arg.first_pfn = first_pfn;
278 arg.nr = nr;
279 set_xen_guest_handle(arg.dirty_bitmap, (uint8_t *)dirty_bitmap);
281 if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
282 {
283 PERROR("Could not lock memory");
284 return rc;
285 }
287 rc = do_xen_hypercall(xc_handle, &hypercall);
289 unlock_pages(&arg, sizeof(arg));
291 return rc;
292 }
294 int xc_hvm_modified_memory(
295 int xc_handle, domid_t dom, uint64_t first_pfn, uint64_t nr)
296 {
297 DECLARE_HYPERCALL;
298 struct xen_hvm_modified_memory arg;
299 int rc;
301 hypercall.op = __HYPERVISOR_hvm_op;
302 hypercall.arg[0] = HVMOP_modified_memory;
303 hypercall.arg[1] = (unsigned long)&arg;
305 arg.domid = dom;
306 arg.first_pfn = first_pfn;
307 arg.nr = nr;
309 if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
310 {
311 PERROR("Could not lock memory");
312 return rc;
313 }
315 rc = do_xen_hypercall(xc_handle, &hypercall);
317 unlock_pages(&arg, sizeof(arg));
319 return rc;
320 }
322 int xc_hvm_set_mem_type(
323 int xc_handle, domid_t dom, hvmmem_type_t mem_type, uint64_t first_pfn, uint64_t nr)
324 {
325 DECLARE_HYPERCALL;
326 struct xen_hvm_set_mem_type arg;
327 int rc;
329 hypercall.op = __HYPERVISOR_hvm_op;
330 hypercall.arg[0] = HVMOP_set_mem_type;
331 hypercall.arg[1] = (unsigned long)&arg;
333 arg.domid = dom;
334 arg.hvmmem_type = mem_type;
335 arg.first_pfn = first_pfn;
336 arg.nr = nr;
338 if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
339 {
340 PERROR("Could not lock memory");
341 return rc;
342 }
344 rc = do_xen_hypercall(xc_handle, &hypercall);
346 unlock_pages(&arg, sizeof(arg));
348 return rc;
349 }
352 void *xc_map_foreign_pages(int xc_handle, uint32_t dom, int prot,
353 const xen_pfn_t *arr, int num)
354 {
355 void *res;
356 int i, *err;
358 if (num < 0) {
359 errno = -EINVAL;
360 return NULL;
361 }
363 err = calloc(num, sizeof(*err));
364 if (!err)
365 return NULL;
367 res = xc_map_foreign_bulk(xc_handle, dom, prot, arr, err, num);
368 if (res) {
369 for (i = 0; i < num; i++) {
370 if (err[i]) {
371 errno = -err[i];
372 munmap(res, num * PAGE_SIZE);
373 res = NULL;
374 break;
375 }
376 }
377 }
379 free(err);
380 return res;
381 }
383 /* stub for all not yet converted OSes */
384 void *
385 #ifdef __GNUC__
386 __attribute__((__weak__))
387 #endif
388 xc_map_foreign_bulk(int xc_handle, uint32_t dom, int prot,
389 const xen_pfn_t *arr, int *err, unsigned int num)
390 {
391 xen_pfn_t *pfn;
392 unsigned int i;
393 void *ret;
395 if ((int)num <= 0) {
396 errno = EINVAL;
397 return NULL;
398 }
400 pfn = calloc(num, sizeof(*pfn));
401 if (!pfn) {
402 errno = ENOMEM;
403 return NULL;
404 }
406 memcpy(pfn, arr, num * sizeof(*arr));
407 ret = xc_map_foreign_batch(xc_handle, dom, prot, pfn, num);
409 if (ret) {
410 for (i = 0; i < num; ++i)
411 switch (pfn[i] ^ arr[i]) {
412 case 0:
413 err[i] = 0;
414 break;
415 default:
416 err[i] = -EINVAL;
417 break;
418 }
419 }
421 free(pfn);
423 return ret;
424 }
426 /*
427 * Local variables:
428 * mode: C
429 * c-set-style: "BSD"
430 * c-basic-offset: 4
431 * tab-width: 4
432 * indent-tabs-mode: nil
433 * End:
434 */