debuggers.hg

view tools/libxc/xc_misc.c @ 21164:28e5409e3fb3

Host Numa information in dom0

'xm info' command now also gives the cpu topology & host numa
information. This will be later used to build guest numa support. The
patch basically changes physinfo sysctl, and adds topology_info &
numa_info sysctls, and also changes the python & libxc code
accordingly.

Signed-off-by: Nitin A Kamble <nitin.a.kamble@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Apr 07 16:22:05 2010 +0100 (2010-04-07)
parents 3c3759296796
children 779c0ef9682c
line source
1 /******************************************************************************
2 * xc_misc.c
3 *
4 * Miscellaneous control interface functions.
5 */
7 #include "xc_private.h"
8 #include <xen/hvm/hvm_op.h>
10 int xc_readconsolering(int xc_handle,
11 char **pbuffer,
12 unsigned int *pnr_chars,
13 int clear, int incremental, uint32_t *pindex)
14 {
15 int ret;
16 DECLARE_SYSCTL;
17 char *buffer = *pbuffer;
18 unsigned int nr_chars = *pnr_chars;
20 sysctl.cmd = XEN_SYSCTL_readconsole;
21 set_xen_guest_handle(sysctl.u.readconsole.buffer, buffer);
22 sysctl.u.readconsole.count = nr_chars;
23 sysctl.u.readconsole.clear = clear;
24 sysctl.u.readconsole.incremental = 0;
25 if ( pindex )
26 {
27 sysctl.u.readconsole.index = *pindex;
28 sysctl.u.readconsole.incremental = incremental;
29 }
31 if ( (ret = lock_pages(buffer, nr_chars)) != 0 )
32 return ret;
34 if ( (ret = do_sysctl(xc_handle, &sysctl)) == 0 )
35 {
36 *pnr_chars = sysctl.u.readconsole.count;
37 if ( pindex )
38 *pindex = sysctl.u.readconsole.index;
39 }
41 unlock_pages(buffer, nr_chars);
43 return ret;
44 }
46 int xc_send_debug_keys(int xc_handle, char *keys)
47 {
48 int ret, len = strlen(keys);
49 DECLARE_SYSCTL;
51 sysctl.cmd = XEN_SYSCTL_debug_keys;
52 set_xen_guest_handle(sysctl.u.debug_keys.keys, keys);
53 sysctl.u.debug_keys.nr_keys = len;
55 if ( (ret = lock_pages(keys, len)) != 0 )
56 return ret;
58 ret = do_sysctl(xc_handle, &sysctl);
60 unlock_pages(keys, len);
62 return ret;
63 }
65 int xc_physinfo(int xc_handle,
66 xc_physinfo_t *put_info)
67 {
68 int ret;
69 DECLARE_SYSCTL;
71 sysctl.cmd = XEN_SYSCTL_physinfo;
73 memcpy(&sysctl.u.physinfo, put_info, sizeof(*put_info));
75 if ( (ret = do_sysctl(xc_handle, &sysctl)) != 0 )
76 return ret;
78 memcpy(put_info, &sysctl.u.physinfo, sizeof(*put_info));
80 return 0;
81 }
83 int xc_topologyinfo(int xc_handle,
84 xc_topologyinfo_t *put_info)
85 {
86 int ret;
87 DECLARE_SYSCTL;
89 sysctl.cmd = XEN_SYSCTL_topologyinfo;
91 memcpy(&sysctl.u.topologyinfo, put_info, sizeof(*put_info));
93 if ( (ret = do_sysctl(xc_handle, &sysctl)) != 0 )
94 return ret;
96 memcpy(put_info, &sysctl.u.topologyinfo, sizeof(*put_info));
98 return 0;
99 }
101 int xc_numainfo(int xc_handle,
102 xc_numainfo_t *put_info)
103 {
104 int ret;
105 DECLARE_SYSCTL;
107 sysctl.cmd = XEN_SYSCTL_numainfo;
109 memcpy(&sysctl.u.numainfo, put_info, sizeof(*put_info));
111 if ((ret = do_sysctl(xc_handle, &sysctl)) != 0)
112 return ret;
114 memcpy(put_info, &sysctl.u.numainfo, sizeof(*put_info));
116 return 0;
117 }
120 int xc_sched_id(int xc_handle,
121 int *sched_id)
122 {
123 int ret;
124 DECLARE_SYSCTL;
126 sysctl.cmd = XEN_SYSCTL_sched_id;
128 if ( (ret = do_sysctl(xc_handle, &sysctl)) != 0 )
129 return ret;
131 *sched_id = sysctl.u.sched_id.sched_id;
133 return 0;
134 }
136 #if defined(__i386__) || defined(__x86_64__)
137 int xc_mca_op(int xc_handle, struct xen_mc *mc)
138 {
139 int ret = 0;
140 DECLARE_HYPERCALL;
142 mc->interface_version = XEN_MCA_INTERFACE_VERSION;
143 if ( lock_pages(mc, sizeof(mc)) )
144 {
145 PERROR("Could not lock xen_mc memory\n");
146 return -EINVAL;
147 }
149 hypercall.op = __HYPERVISOR_mca;
150 hypercall.arg[0] = (unsigned long)mc;
151 ret = do_xen_hypercall(xc_handle, &hypercall);
152 unlock_pages(mc, sizeof(mc));
153 return ret;
154 }
155 #endif
157 int xc_perfc_control(int xc_handle,
158 uint32_t opcode,
159 xc_perfc_desc_t *desc,
160 xc_perfc_val_t *val,
161 int *nbr_desc,
162 int *nbr_val)
163 {
164 int rc;
165 DECLARE_SYSCTL;
167 sysctl.cmd = XEN_SYSCTL_perfc_op;
168 sysctl.u.perfc_op.cmd = opcode;
169 set_xen_guest_handle(sysctl.u.perfc_op.desc, desc);
170 set_xen_guest_handle(sysctl.u.perfc_op.val, val);
172 rc = do_sysctl(xc_handle, &sysctl);
174 if ( nbr_desc )
175 *nbr_desc = sysctl.u.perfc_op.nr_counters;
176 if ( nbr_val )
177 *nbr_val = sysctl.u.perfc_op.nr_vals;
179 return rc;
180 }
182 int xc_lockprof_control(int xc_handle,
183 uint32_t opcode,
184 uint32_t *n_elems,
185 uint64_t *time,
186 xc_lockprof_data_t *data)
187 {
188 int rc;
189 DECLARE_SYSCTL;
191 sysctl.cmd = XEN_SYSCTL_lockprof_op;
192 sysctl.u.lockprof_op.cmd = opcode;
193 sysctl.u.lockprof_op.max_elem = n_elems ? *n_elems : 0;
194 set_xen_guest_handle(sysctl.u.lockprof_op.data, data);
196 rc = do_sysctl(xc_handle, &sysctl);
198 if (n_elems)
199 *n_elems = sysctl.u.lockprof_op.nr_elem;
200 if (time)
201 *time = sysctl.u.lockprof_op.time;
203 return rc;
204 }
206 int xc_getcpuinfo(int xc_handle, int max_cpus,
207 xc_cpuinfo_t *info, int *nr_cpus)
208 {
209 int rc;
210 DECLARE_SYSCTL;
212 sysctl.cmd = XEN_SYSCTL_getcpuinfo;
213 sysctl.u.getcpuinfo.max_cpus = max_cpus;
214 set_xen_guest_handle(sysctl.u.getcpuinfo.info, info);
216 if ( (rc = lock_pages(info, max_cpus*sizeof(*info))) != 0 )
217 return rc;
219 rc = do_sysctl(xc_handle, &sysctl);
221 unlock_pages(info, max_cpus*sizeof(*info));
223 if ( nr_cpus )
224 *nr_cpus = sysctl.u.getcpuinfo.nr_cpus;
226 return rc;
227 }
230 int xc_hvm_set_pci_intx_level(
231 int xc_handle, domid_t dom,
232 uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
233 unsigned int level)
234 {
235 DECLARE_HYPERCALL;
236 struct xen_hvm_set_pci_intx_level _arg, *arg = &_arg;
237 int rc;
239 if ( (rc = hcall_buf_prep((void **)&arg, sizeof(*arg))) != 0 )
240 {
241 PERROR("Could not lock memory");
242 return rc;
243 }
245 hypercall.op = __HYPERVISOR_hvm_op;
246 hypercall.arg[0] = HVMOP_set_pci_intx_level;
247 hypercall.arg[1] = (unsigned long)arg;
249 arg->domid = dom;
250 arg->domain = domain;
251 arg->bus = bus;
252 arg->device = device;
253 arg->intx = intx;
254 arg->level = level;
256 rc = do_xen_hypercall(xc_handle, &hypercall);
258 hcall_buf_release((void **)&arg, sizeof(*arg));
260 return rc;
261 }
263 int xc_hvm_set_isa_irq_level(
264 int xc_handle, domid_t dom,
265 uint8_t isa_irq,
266 unsigned int level)
267 {
268 DECLARE_HYPERCALL;
269 struct xen_hvm_set_isa_irq_level _arg, *arg = &_arg;
270 int rc;
272 if ( (rc = hcall_buf_prep((void **)&arg, sizeof(*arg))) != 0 )
273 {
274 PERROR("Could not lock memory");
275 return rc;
276 }
278 hypercall.op = __HYPERVISOR_hvm_op;
279 hypercall.arg[0] = HVMOP_set_isa_irq_level;
280 hypercall.arg[1] = (unsigned long)arg;
282 arg->domid = dom;
283 arg->isa_irq = isa_irq;
284 arg->level = level;
286 rc = do_xen_hypercall(xc_handle, &hypercall);
288 hcall_buf_release((void **)&arg, sizeof(*arg));
290 return rc;
291 }
293 int xc_hvm_set_pci_link_route(
294 int xc_handle, domid_t dom, uint8_t link, uint8_t isa_irq)
295 {
296 DECLARE_HYPERCALL;
297 struct xen_hvm_set_pci_link_route arg;
298 int rc;
300 hypercall.op = __HYPERVISOR_hvm_op;
301 hypercall.arg[0] = HVMOP_set_pci_link_route;
302 hypercall.arg[1] = (unsigned long)&arg;
304 arg.domid = dom;
305 arg.link = link;
306 arg.isa_irq = isa_irq;
308 if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
309 {
310 PERROR("Could not lock memory");
311 return rc;
312 }
314 rc = do_xen_hypercall(xc_handle, &hypercall);
316 unlock_pages(&arg, sizeof(arg));
318 return rc;
319 }
321 int xc_hvm_track_dirty_vram(
322 int xc_handle, domid_t dom,
323 uint64_t first_pfn, uint64_t nr,
324 unsigned long *dirty_bitmap)
325 {
326 DECLARE_HYPERCALL;
327 struct xen_hvm_track_dirty_vram arg;
328 int rc;
330 hypercall.op = __HYPERVISOR_hvm_op;
331 hypercall.arg[0] = HVMOP_track_dirty_vram;
332 hypercall.arg[1] = (unsigned long)&arg;
334 arg.domid = dom;
335 arg.first_pfn = first_pfn;
336 arg.nr = nr;
337 set_xen_guest_handle(arg.dirty_bitmap, (uint8_t *)dirty_bitmap);
339 if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
340 {
341 PERROR("Could not lock memory");
342 return rc;
343 }
345 rc = do_xen_hypercall(xc_handle, &hypercall);
347 unlock_pages(&arg, sizeof(arg));
349 return rc;
350 }
352 int xc_hvm_modified_memory(
353 int xc_handle, domid_t dom, uint64_t first_pfn, uint64_t nr)
354 {
355 DECLARE_HYPERCALL;
356 struct xen_hvm_modified_memory arg;
357 int rc;
359 hypercall.op = __HYPERVISOR_hvm_op;
360 hypercall.arg[0] = HVMOP_modified_memory;
361 hypercall.arg[1] = (unsigned long)&arg;
363 arg.domid = dom;
364 arg.first_pfn = first_pfn;
365 arg.nr = nr;
367 if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
368 {
369 PERROR("Could not lock memory");
370 return rc;
371 }
373 rc = do_xen_hypercall(xc_handle, &hypercall);
375 unlock_pages(&arg, sizeof(arg));
377 return rc;
378 }
380 int xc_hvm_set_mem_type(
381 int xc_handle, domid_t dom, hvmmem_type_t mem_type, uint64_t first_pfn, uint64_t nr)
382 {
383 DECLARE_HYPERCALL;
384 struct xen_hvm_set_mem_type arg;
385 int rc;
387 hypercall.op = __HYPERVISOR_hvm_op;
388 hypercall.arg[0] = HVMOP_set_mem_type;
389 hypercall.arg[1] = (unsigned long)&arg;
391 arg.domid = dom;
392 arg.hvmmem_type = mem_type;
393 arg.first_pfn = first_pfn;
394 arg.nr = nr;
396 if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
397 {
398 PERROR("Could not lock memory");
399 return rc;
400 }
402 rc = do_xen_hypercall(xc_handle, &hypercall);
404 unlock_pages(&arg, sizeof(arg));
406 return rc;
407 }
410 /* stub for all not yet converted OSes */
411 void *
412 #ifdef __GNUC__
413 __attribute__((__weak__))
414 #endif
415 xc_map_foreign_bulk(int xc_handle, uint32_t dom, int prot,
416 const xen_pfn_t *arr, int *err, unsigned int num)
417 {
418 xen_pfn_t *pfn;
419 unsigned int i;
420 void *ret;
422 if ((int)num <= 0) {
423 errno = EINVAL;
424 return NULL;
425 }
427 pfn = malloc(num * sizeof(*pfn));
428 if (!pfn) {
429 errno = ENOMEM;
430 return NULL;
431 }
433 memcpy(pfn, arr, num * sizeof(*arr));
434 ret = xc_map_foreign_batch(xc_handle, dom, prot, pfn, num);
436 if (ret) {
437 for (i = 0; i < num; ++i)
438 switch (pfn[i] ^ arr[i]) {
439 case 0:
440 err[i] = 0;
441 break;
442 default:
443 err[i] = -EINVAL;
444 break;
445 }
446 } else
447 memset(err, 0, num * sizeof(*err));
449 free(pfn);
451 return ret;
452 }
454 void *xc_map_foreign_pages(int xc_handle, uint32_t dom, int prot,
455 const xen_pfn_t *arr, int num)
456 {
457 void *res;
458 int i, *err;
460 if (num < 0) {
461 errno = -EINVAL;
462 return NULL;
463 }
465 err = malloc(num * sizeof(*err));
466 if (!err)
467 return NULL;
469 res = xc_map_foreign_bulk(xc_handle, dom, prot, arr, err, num);
470 if (res) {
471 for (i = 0; i < num; i++) {
472 if (err[i]) {
473 errno = -err[i];
474 munmap(res, num * PAGE_SIZE);
475 res = NULL;
476 break;
477 }
478 }
479 }
481 free(err);
482 return res;
483 }
485 /*
486 * Local variables:
487 * mode: C
488 * c-set-style: "BSD"
489 * c-basic-offset: 4
490 * tab-width: 4
491 * indent-tabs-mode: nil
492 * End:
493 */