debuggers.hg

view tools/libxc/xenctrl.h @ 21067:b4a1832a916f

Update Xen version to 4.0.0-rc6
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 09 18:18:05 2010 +0000 (2010-03-09)
parents a06e9def02bb
children b64a8d2a80ad
line source
1 /******************************************************************************
2 * xenctrl.h
3 *
4 * A library for low-level access to the Xen control interfaces.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 *
8 * xc_gnttab functions:
9 * Copyright (c) 2007-2008, D G Murray <Derek.Murray@cl.cam.ac.uk>
10 */
12 #ifndef XENCTRL_H
13 #define XENCTRL_H
15 /* Tell the Xen public headers we are a user-space tools build. */
16 #ifndef __XEN_TOOLS__
17 #define __XEN_TOOLS__ 1
18 #endif
20 #include <stddef.h>
21 #include <stdint.h>
22 #include <xen/xen.h>
23 #include <xen/domctl.h>
24 #include <xen/physdev.h>
25 #include <xen/sysctl.h>
26 #include <xen/version.h>
27 #include <xen/event_channel.h>
28 #include <xen/sched.h>
29 #include <xen/memory.h>
30 #include <xen/grant_table.h>
31 #include <xen/hvm/params.h>
32 #include <xen/xsm/acm.h>
33 #include <xen/xsm/acm_ops.h>
34 #include <xen/xsm/flask_op.h>
35 #include <xen/tmem.h>
37 #if defined(__i386__) || defined(__x86_64__)
38 #include <xen/foreign/x86_32.h>
39 #include <xen/foreign/x86_64.h>
40 #include <xen/arch-x86/xen-mca.h>
41 #endif
43 #ifdef __ia64__
44 #define XC_PAGE_SHIFT 14
45 #else
46 #define XC_PAGE_SHIFT 12
47 #endif
48 #define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT)
49 #define XC_PAGE_MASK (~(XC_PAGE_SIZE-1))
51 #define INVALID_MFN (~0UL)
53 /*
54 * DEFINITIONS FOR CPU BARRIERS
55 */
57 #if defined(__i386__)
58 #define xen_mb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
59 #define xen_rmb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
60 #define xen_wmb() asm volatile ( "" : : : "memory")
61 #elif defined(__x86_64__)
62 #define xen_mb() asm volatile ( "mfence" : : : "memory")
63 #define xen_rmb() asm volatile ( "lfence" : : : "memory")
64 #define xen_wmb() asm volatile ( "" : : : "memory")
65 #elif defined(__ia64__)
66 #define xen_mb() asm volatile ("mf" ::: "memory")
67 #define xen_rmb() asm volatile ("mf" ::: "memory")
68 #define xen_wmb() asm volatile ("mf" ::: "memory")
69 #else
70 #error "Define barriers"
71 #endif
73 /*
74 * INITIALIZATION FUNCTIONS
75 */
77 /**
78 * This function opens a handle to the hypervisor interface. This function can
79 * be called multiple times within a single process. Multiple processes can
80 * have an open hypervisor interface at the same time.
81 *
82 * Each call to this function should have a corresponding call to
83 * xc_interface_close().
84 *
85 * This function can fail if the caller does not have superuser permission or
86 * if a Xen-enabled kernel is not currently running.
87 *
88 * @return a handle to the hypervisor interface or -1 on failure
89 */
90 int xc_interface_open(void);
92 /**
93 * This function closes an open hypervisor interface.
94 *
95 * This function can fail if the handle does not represent an open interface or
96 * if there were problems closing the interface.
97 *
98 * @parm xc_handle a handle to an open hypervisor interface
99 * @return 0 on success, -1 otherwise.
100 */
101 int xc_interface_close(int xc_handle);
103 /*
104 * KERNEL INTERFACES
105 */
107 /*
108 * Resolve a kernel device name (e.g., "evtchn", "blktap0") into a kernel
109 * device number. Returns -1 on error (and sets errno).
110 */
111 int xc_find_device_number(const char *name);
113 /*
114 * DOMAIN DEBUGGING FUNCTIONS
115 */
117 typedef struct xc_core_header {
118 unsigned int xch_magic;
119 unsigned int xch_nr_vcpus;
120 unsigned int xch_nr_pages;
121 unsigned int xch_ctxt_offset;
122 unsigned int xch_index_offset;
123 unsigned int xch_pages_offset;
124 } xc_core_header_t;
126 #define XC_CORE_MAGIC 0xF00FEBED
127 #define XC_CORE_MAGIC_HVM 0xF00FEBEE
129 #ifdef __linux__
131 #include <sys/ptrace.h>
132 #include <thread_db.h>
134 typedef void (*thr_ev_handler_t)(long);
136 void xc_register_event_handler(
137 thr_ev_handler_t h,
138 td_event_e e);
140 long xc_ptrace(
141 int xc_handle,
142 enum __ptrace_request request,
143 uint32_t domid,
144 long addr,
145 long data);
147 int xc_waitdomain(
148 int xc_handle,
149 int domain,
150 int *status,
151 int options);
153 #endif /* __linux__ */
155 /*
156 * DOMAIN MANAGEMENT FUNCTIONS
157 */
159 typedef struct xc_dominfo {
160 uint32_t domid;
161 uint32_t ssidref;
162 unsigned int dying:1, crashed:1, shutdown:1,
163 paused:1, blocked:1, running:1,
164 hvm:1, debugged:1;
165 unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
166 unsigned long nr_pages; /* current number, not maximum */
167 unsigned long nr_shared_pages;
168 unsigned long shared_info_frame;
169 uint64_t cpu_time;
170 unsigned long max_memkb;
171 unsigned int nr_online_vcpus;
172 unsigned int max_vcpu_id;
173 xen_domain_handle_t handle;
174 } xc_dominfo_t;
176 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
178 typedef union
179 {
180 #if defined(__i386__) || defined(__x86_64__)
181 vcpu_guest_context_x86_64_t x64;
182 vcpu_guest_context_x86_32_t x32;
183 #endif
184 vcpu_guest_context_t c;
185 } vcpu_guest_context_any_t;
187 typedef union
188 {
189 #if defined(__i386__) || defined(__x86_64__)
190 shared_info_x86_64_t x64;
191 shared_info_x86_32_t x32;
192 #endif
193 shared_info_t s;
194 } shared_info_any_t;
196 typedef union
197 {
198 #if defined(__i386__) || defined(__x86_64__)
199 start_info_x86_64_t x64;
200 start_info_x86_32_t x32;
201 #endif
202 start_info_t s;
203 } start_info_any_t;
206 int xc_domain_create(int xc_handle,
207 uint32_t ssidref,
208 xen_domain_handle_t handle,
209 uint32_t flags,
210 uint32_t *pdomid);
213 /* Functions to produce a dump of a given domain
214 * xc_domain_dumpcore - produces a dump to a specified file
215 * xc_domain_dumpcore_via_callback - produces a dump, using a specified
216 * callback function
217 */
218 int xc_domain_dumpcore(int xc_handle,
219 uint32_t domid,
220 const char *corename);
222 /* Define the callback function type for xc_domain_dumpcore_via_callback.
223 *
224 * This function is called by the coredump code for every "write",
225 * and passes an opaque object for the use of the function and
226 * created by the caller of xc_domain_dumpcore_via_callback.
227 */
228 typedef int (dumpcore_rtn_t)(void *arg, char *buffer, unsigned int length);
230 int xc_domain_dumpcore_via_callback(int xc_handle,
231 uint32_t domid,
232 void *arg,
233 dumpcore_rtn_t dump_rtn);
235 /*
236 * This function sets the maximum number of vcpus that a domain may create.
237 *
238 * @parm xc_handle a handle to an open hypervisor interface.
239 * @parm domid the domain id in which vcpus are to be created.
240 * @parm max the maximum number of vcpus that the domain may create.
241 * @return 0 on success, -1 on failure.
242 */
243 int xc_domain_max_vcpus(int xc_handle,
244 uint32_t domid,
245 unsigned int max);
247 /**
248 * This function pauses a domain. A paused domain still exists in memory
249 * however it does not receive any timeslices from the hypervisor.
250 *
251 * @parm xc_handle a handle to an open hypervisor interface
252 * @parm domid the domain id to pause
253 * @return 0 on success, -1 on failure.
254 */
255 int xc_domain_pause(int xc_handle,
256 uint32_t domid);
257 /**
258 * This function unpauses a domain. The domain should have been previously
259 * paused.
260 *
261 * @parm xc_handle a handle to an open hypervisor interface
262 * @parm domid the domain id to unpause
263 * return 0 on success, -1 on failure
264 */
265 int xc_domain_unpause(int xc_handle,
266 uint32_t domid);
268 /**
269 * This function will destroy a domain. Destroying a domain removes the domain
270 * completely from memory. This function should be called after sending the
271 * domain a SHUTDOWN control message to free up the domain resources.
272 *
273 * @parm xc_handle a handle to an open hypervisor interface
274 * @parm domid the domain id to destroy
275 * @return 0 on success, -1 on failure
276 */
277 int xc_domain_destroy(int xc_handle,
278 uint32_t domid);
281 /**
282 * This function resumes a suspended domain. The domain should have
283 * been previously suspended.
284 *
285 * @parm xc_handle a handle to an open hypervisor interface
286 * @parm domid the domain id to resume
287 * @parm fast use cooperative resume (guest must support this)
288 * return 0 on success, -1 on failure
289 */
290 int xc_domain_resume(int xc_handle,
291 uint32_t domid,
292 int fast);
294 /**
295 * This function will shutdown a domain. This is intended for use in
296 * fully-virtualized domains where this operation is analogous to the
297 * sched_op operations in a paravirtualized domain. The caller is
298 * expected to give the reason for the shutdown.
299 *
300 * @parm xc_handle a handle to an open hypervisor interface
301 * @parm domid the domain id to destroy
302 * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
303 * @return 0 on success, -1 on failure
304 */
305 int xc_domain_shutdown(int xc_handle,
306 uint32_t domid,
307 int reason);
309 int xc_vcpu_setaffinity(int xc_handle,
310 uint32_t domid,
311 int vcpu,
312 uint64_t cpumap);
313 int xc_vcpu_getaffinity(int xc_handle,
314 uint32_t domid,
315 int vcpu,
316 uint64_t *cpumap);
318 /**
319 * This function will return information about one or more domains. It is
320 * designed to iterate over the list of domains. If a single domain is
321 * requested, this function will return the next domain in the list - if
322 * one exists. It is, therefore, important in this case to make sure the
323 * domain requested was the one returned.
324 *
325 * @parm xc_handle a handle to an open hypervisor interface
326 * @parm first_domid the first domain to enumerate information from. Domains
327 * are currently enumerate in order of creation.
328 * @parm max_doms the number of elements in info
329 * @parm info an array of max_doms size that will contain the information for
330 * the enumerated domains.
331 * @return the number of domains enumerated or -1 on error
332 */
333 int xc_domain_getinfo(int xc_handle,
334 uint32_t first_domid,
335 unsigned int max_doms,
336 xc_dominfo_t *info);
339 /**
340 * This function will set the execution context for the specified vcpu.
341 *
342 * @parm xc_handle a handle to an open hypervisor interface
343 * @parm domid the domain to set the vcpu context for
344 * @parm vcpu the vcpu number for the context
345 * @parm ctxt pointer to the the cpu context with the values to set
346 * @return the number of domains enumerated or -1 on error
347 */
348 int xc_vcpu_setcontext(int xc_handle,
349 uint32_t domid,
350 uint32_t vcpu,
351 vcpu_guest_context_any_t *ctxt);
352 /**
353 * This function will return information about one or more domains, using a
354 * single hypercall. The domain information will be stored into the supplied
355 * array of xc_domaininfo_t structures.
356 *
357 * @parm xc_handle a handle to an open hypervisor interface
358 * @parm first_domain the first domain to enumerate information from.
359 * Domains are currently enumerate in order of creation.
360 * @parm max_domains the number of elements in info
361 * @parm info an array of max_doms size that will contain the information for
362 * the enumerated domains.
363 * @return the number of domains enumerated or -1 on error
364 */
365 int xc_domain_getinfolist(int xc_handle,
366 uint32_t first_domain,
367 unsigned int max_domains,
368 xc_domaininfo_t *info);
370 /**
371 * This function returns information about the context of a hvm domain
372 * @parm xc_handle a handle to an open hypervisor interface
373 * @parm domid the domain to get information from
374 * @parm ctxt_buf a pointer to a structure to store the execution context of
375 * the hvm domain
376 * @parm size the size of ctxt_buf in bytes
377 * @return 0 on success, -1 on failure
378 */
379 int xc_domain_hvm_getcontext(int xc_handle,
380 uint32_t domid,
381 uint8_t *ctxt_buf,
382 uint32_t size);
385 /**
386 * This function returns one element of the context of a hvm domain
387 * @parm xc_handle a handle to an open hypervisor interface
388 * @parm domid the domain to get information from
389 * @parm typecode which type of elemnt required
390 * @parm instance which instance of the type
391 * @parm ctxt_buf a pointer to a structure to store the execution context of
392 * the hvm domain
393 * @parm size the size of ctxt_buf (must be >= HVM_SAVE_LENGTH(typecode))
394 * @return 0 on success, -1 on failure
395 */
396 int xc_domain_hvm_getcontext_partial(int xc_handle,
397 uint32_t domid,
398 uint16_t typecode,
399 uint16_t instance,
400 void *ctxt_buf,
401 uint32_t size);
403 /**
404 * This function will set the context for hvm domain
405 *
406 * @parm xc_handle a handle to an open hypervisor interface
407 * @parm domid the domain to set the hvm domain context for
408 * @parm hvm_ctxt pointer to the the hvm context with the values to set
409 * @parm size the size of hvm_ctxt in bytes
410 * @return 0 on success, -1 on failure
411 */
412 int xc_domain_hvm_setcontext(int xc_handle,
413 uint32_t domid,
414 uint8_t *hvm_ctxt,
415 uint32_t size);
417 /**
418 * This function returns information about the execution context of a
419 * particular vcpu of a domain.
420 *
421 * @parm xc_handle a handle to an open hypervisor interface
422 * @parm domid the domain to get information from
423 * @parm vcpu the vcpu number
424 * @parm ctxt a pointer to a structure to store the execution context of the
425 * domain
426 * @return 0 on success, -1 on failure
427 */
428 int xc_vcpu_getcontext(int xc_handle,
429 uint32_t domid,
430 uint32_t vcpu,
431 vcpu_guest_context_any_t *ctxt);
433 typedef xen_domctl_getvcpuinfo_t xc_vcpuinfo_t;
434 int xc_vcpu_getinfo(int xc_handle,
435 uint32_t domid,
436 uint32_t vcpu,
437 xc_vcpuinfo_t *info);
439 long long xc_domain_get_cpu_usage(int xc_handle,
440 domid_t domid,
441 int vcpu);
443 int xc_domain_sethandle(int xc_handle, uint32_t domid,
444 xen_domain_handle_t handle);
446 typedef xen_domctl_shadow_op_stats_t xc_shadow_op_stats_t;
447 int xc_shadow_control(int xc_handle,
448 uint32_t domid,
449 unsigned int sop,
450 unsigned long *dirty_bitmap,
451 unsigned long pages,
452 unsigned long *mb,
453 uint32_t mode,
454 xc_shadow_op_stats_t *stats);
456 int xc_sedf_domain_set(int xc_handle,
457 uint32_t domid,
458 uint64_t period, uint64_t slice,
459 uint64_t latency, uint16_t extratime,
460 uint16_t weight);
462 int xc_sedf_domain_get(int xc_handle,
463 uint32_t domid,
464 uint64_t* period, uint64_t *slice,
465 uint64_t *latency, uint16_t *extratime,
466 uint16_t *weight);
468 int xc_sched_credit_domain_set(int xc_handle,
469 uint32_t domid,
470 struct xen_domctl_sched_credit *sdom);
472 int xc_sched_credit_domain_get(int xc_handle,
473 uint32_t domid,
474 struct xen_domctl_sched_credit *sdom);
476 /**
477 * This function sends a trigger to a domain.
478 *
479 * @parm xc_handle a handle to an open hypervisor interface
480 * @parm domid the domain id to send trigger
481 * @parm trigger the trigger type
482 * @parm vcpu the vcpu number to send trigger
483 * return 0 on success, -1 on failure
484 */
485 int xc_domain_send_trigger(int xc_handle,
486 uint32_t domid,
487 uint32_t trigger,
488 uint32_t vcpu);
490 /**
491 * This function enables or disable debugging of a domain.
492 *
493 * @parm xc_handle a handle to an open hypervisor interface
494 * @parm domid the domain id to send trigger
495 * @parm enable true to enable debugging
496 * return 0 on success, -1 on failure
497 */
498 int xc_domain_setdebugging(int xc_handle,
499 uint32_t domid,
500 unsigned int enable);
502 /*
503 * EVENT CHANNEL FUNCTIONS
504 */
506 /* A port identifier is guaranteed to fit in 31 bits. */
507 typedef int evtchn_port_or_error_t;
509 /**
510 * This function allocates an unbound port. Ports are named endpoints used for
511 * interdomain communication. This function is most useful in opening a
512 * well-known port within a domain to receive events on.
513 *
514 * NOTE: If you are allocating a *local* unbound port, you probably want to
515 * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
516 * ports *only* during domain creation.
517 *
518 * @parm xc_handle a handle to an open hypervisor interface
519 * @parm dom the ID of the local domain (the 'allocatee')
520 * @parm remote_dom the ID of the domain who will later bind
521 * @return allocated port (in @dom) on success, -1 on failure
522 */
523 evtchn_port_or_error_t
524 xc_evtchn_alloc_unbound(int xc_handle,
525 uint32_t dom,
526 uint32_t remote_dom);
528 int xc_evtchn_reset(int xc_handle,
529 uint32_t dom);
531 typedef struct evtchn_status xc_evtchn_status_t;
532 int xc_evtchn_status(int xc_handle, xc_evtchn_status_t *status);
534 /*
535 * Return a handle to the event channel driver, or -1 on failure, in which case
536 * errno will be set appropriately.
537 */
538 int xc_evtchn_open(void);
540 /*
541 * Close a handle previously allocated with xc_evtchn_open().
542 */
543 int xc_evtchn_close(int xce_handle);
545 /*
546 * Return an fd that can be select()ed on for further calls to
547 * xc_evtchn_pending().
548 */
549 int xc_evtchn_fd(int xce_handle);
551 /*
552 * Notify the given event channel. Returns -1 on failure, in which case
553 * errno will be set appropriately.
554 */
555 int xc_evtchn_notify(int xce_handle, evtchn_port_t port);
557 /*
558 * Returns a new event port awaiting interdomain connection from the given
559 * domain ID, or -1 on failure, in which case errno will be set appropriately.
560 */
561 evtchn_port_or_error_t
562 xc_evtchn_bind_unbound_port(int xce_handle, int domid);
564 /*
565 * Returns a new event port bound to the remote port for the given domain ID,
566 * or -1 on failure, in which case errno will be set appropriately.
567 */
568 evtchn_port_or_error_t
569 xc_evtchn_bind_interdomain(int xce_handle, int domid,
570 evtchn_port_t remote_port);
572 /*
573 * Bind an event channel to the given VIRQ. Returns the event channel bound to
574 * the VIRQ, or -1 on failure, in which case errno will be set appropriately.
575 */
576 evtchn_port_or_error_t
577 xc_evtchn_bind_virq(int xce_handle, unsigned int virq);
579 /*
580 * Unbind the given event channel. Returns -1 on failure, in which case errno
581 * will be set appropriately.
582 */
583 int xc_evtchn_unbind(int xce_handle, evtchn_port_t port);
585 /*
586 * Return the next event channel to become pending, or -1 on failure, in which
587 * case errno will be set appropriately.
588 */
589 evtchn_port_or_error_t
590 xc_evtchn_pending(int xce_handle);
592 /*
593 * Unmask the given event channel. Returns -1 on failure, in which case errno
594 * will be set appropriately.
595 */
596 int xc_evtchn_unmask(int xce_handle, evtchn_port_t port);
598 int xc_physdev_pci_access_modify(int xc_handle,
599 uint32_t domid,
600 int bus,
601 int dev,
602 int func,
603 int enable);
605 int xc_readconsolering(int xc_handle,
606 char **pbuffer,
607 unsigned int *pnr_chars,
608 int clear, int incremental, uint32_t *pindex);
610 int xc_send_debug_keys(int xc_handle, char *keys);
612 typedef xen_sysctl_physinfo_t xc_physinfo_t;
613 typedef uint32_t xc_cpu_to_node_t;
614 int xc_physinfo(int xc_handle,
615 xc_physinfo_t *info);
617 int xc_sched_id(int xc_handle,
618 int *sched_id);
620 typedef xen_sysctl_cpuinfo_t xc_cpuinfo_t;
621 int xc_getcpuinfo(int xc_handle, int max_cpus,
622 xc_cpuinfo_t *info, int *nr_cpus);
624 int xc_domain_setmaxmem(int xc_handle,
625 uint32_t domid,
626 unsigned int max_memkb);
628 int xc_domain_set_memmap_limit(int xc_handle,
629 uint32_t domid,
630 unsigned long map_limitkb);
632 int xc_domain_set_time_offset(int xc_handle,
633 uint32_t domid,
634 int32_t time_offset_seconds);
636 int xc_domain_set_tsc_info(int xc_handle,
637 uint32_t domid,
638 uint32_t tsc_mode,
639 uint64_t elapsed_nsec,
640 uint32_t gtsc_khz,
641 uint32_t incarnation);
643 int xc_domain_get_tsc_info(int xc_handle,
644 uint32_t domid,
645 uint32_t *tsc_mode,
646 uint64_t *elapsed_nsec,
647 uint32_t *gtsc_khz,
648 uint32_t *incarnation);
650 int xc_domain_disable_migrate(int xc_handle, uint32_t domid);
652 int xc_domain_memory_increase_reservation(int xc_handle,
653 uint32_t domid,
654 unsigned long nr_extents,
655 unsigned int extent_order,
656 unsigned int mem_flags,
657 xen_pfn_t *extent_start);
659 int xc_domain_memory_decrease_reservation(int xc_handle,
660 uint32_t domid,
661 unsigned long nr_extents,
662 unsigned int extent_order,
663 xen_pfn_t *extent_start);
665 int xc_domain_memory_populate_physmap(int xc_handle,
666 uint32_t domid,
667 unsigned long nr_extents,
668 unsigned int extent_order,
669 unsigned int mem_flags,
670 xen_pfn_t *extent_start);
672 int xc_domain_memory_set_pod_target(int xc_handle,
673 uint32_t domid,
674 uint64_t target_pages,
675 uint64_t *tot_pages,
676 uint64_t *pod_cache_pages,
677 uint64_t *pod_entries);
679 int xc_domain_memory_get_pod_target(int xc_handle,
680 uint32_t domid,
681 uint64_t *tot_pages,
682 uint64_t *pod_cache_pages,
683 uint64_t *pod_entries);
685 int xc_domain_ioport_permission(int xc_handle,
686 uint32_t domid,
687 uint32_t first_port,
688 uint32_t nr_ports,
689 uint32_t allow_access);
691 int xc_domain_irq_permission(int xc_handle,
692 uint32_t domid,
693 uint8_t pirq,
694 uint8_t allow_access);
696 int xc_domain_iomem_permission(int xc_handle,
697 uint32_t domid,
698 unsigned long first_mfn,
699 unsigned long nr_mfns,
700 uint8_t allow_access);
702 int xc_domain_pin_memory_cacheattr(int xc_handle,
703 uint32_t domid,
704 uint64_t start,
705 uint64_t end,
706 uint32_t type);
708 unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
709 unsigned long mfn);
711 typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
712 typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
713 /* IMPORTANT: The caller is responsible for mlock()'ing the @desc and @val
714 arrays. */
715 int xc_perfc_control(int xc_handle,
716 uint32_t op,
717 xc_perfc_desc_t *desc,
718 xc_perfc_val_t *val,
719 int *nbr_desc,
720 int *nbr_val);
722 typedef xen_sysctl_lockprof_data_t xc_lockprof_data_t;
723 /* IMPORTANT: The caller is responsible for mlock()'ing the @data array. */
724 int xc_lockprof_control(int xc_handle,
725 uint32_t opcode,
726 uint32_t *n_elems,
727 uint64_t *time,
728 xc_lockprof_data_t *data);
730 /**
731 * Memory maps a range within one domain to a local address range. Mappings
732 * should be unmapped with munmap and should follow the same rules as mmap
733 * regarding page alignment. Returns NULL on failure.
734 *
735 * In Linux, the ring queue for the control channel is accessible by mapping
736 * the shared_info_frame (from xc_domain_getinfo()) + 2048. The structure
737 * stored there is of type control_if_t.
738 *
739 * @parm xc_handle a handle on an open hypervisor interface
740 * @parm dom the domain to map memory from
741 * @parm size the amount of memory to map (in multiples of page size)
742 * @parm prot same flag as in mmap().
743 * @parm mfn the frame address to map.
744 */
745 void *xc_map_foreign_range(int xc_handle, uint32_t dom,
746 int size, int prot,
747 unsigned long mfn );
749 void *xc_map_foreign_pages(int xc_handle, uint32_t dom, int prot,
750 const xen_pfn_t *arr, int num );
752 /**
753 * DEPRECATED - use xc_map_foreign_bulk() instead.
754 *
755 * Like xc_map_foreign_pages(), except it can succeeed partially.
756 * When a page cannot be mapped, its PFN in @arr is or'ed with
757 * 0xF0000000 to indicate the error.
758 */
759 void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
760 xen_pfn_t *arr, int num );
762 /**
763 * Like xc_map_foreign_pages(), except it can succeed partially.
764 * When a page cannot be mapped, its respective field in @err is
765 * set to the corresponding errno value.
766 */
767 void *xc_map_foreign_bulk(int xc_handle, uint32_t dom, int prot,
768 const xen_pfn_t *arr, int *err, unsigned int num);
770 /**
771 * Translates a virtual address in the context of a given domain and
772 * vcpu returning the GFN containing the address (that is, an MFN for
773 * PV guests, a PFN for HVM guests). Returns 0 for failure.
774 *
775 * @parm xc_handle a handle on an open hypervisor interface
776 * @parm dom the domain to perform the translation in
777 * @parm vcpu the vcpu to perform the translation on
778 * @parm virt the virtual address to translate
779 */
780 unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
781 int vcpu, unsigned long long virt);
784 /**
785 * DEPRECATED. Avoid using this, as it does not correctly account for PFNs
786 * without a backing MFN.
787 */
788 int xc_get_pfn_list(int xc_handle, uint32_t domid, uint64_t *pfn_buf,
789 unsigned long max_pfns);
791 unsigned long xc_ia64_fpsr_default(void);
793 int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
794 unsigned long dst_pfn, const char *src_page);
796 int xc_clear_domain_page(int xc_handle, uint32_t domid,
797 unsigned long dst_pfn);
799 long xc_get_max_pages(int xc_handle, uint32_t domid);
801 int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops,
802 domid_t dom);
804 int xc_memory_op(int xc_handle, int cmd, void *arg);
807 /* Get current total pages allocated to a domain. */
808 long xc_get_tot_pages(int xc_handle, uint32_t domid);
810 /**
811 * This function retrieves the the number of bytes available
812 * in the heap in a specific range of address-widths and nodes.
813 *
814 * @parm xc_handle a handle to an open hypervisor interface
815 * @parm domid the domain to query
816 * @parm min_width the smallest address width to query (0 if don't care)
817 * @parm max_width the largest address width to query (0 if don't care)
818 * @parm node the node to query (-1 for all)
819 * @parm *bytes caller variable to put total bytes counted
820 * @return 0 on success, <0 on failure.
821 */
822 int xc_availheap(int xc_handle, int min_width, int max_width, int node,
823 uint64_t *bytes);
825 /*
826 * Trace Buffer Operations
827 */
829 /**
830 * xc_tbuf_enable - enable tracing buffers
831 *
832 * @parm xc_handle a handle to an open hypervisor interface
833 * @parm cnt size of tracing buffers to create (in pages)
834 * @parm mfn location to store mfn of the trace buffers to
835 * @parm size location to store the size (in bytes) of a trace buffer to
836 *
837 * Gets the machine address of the trace pointer area and the size of the
838 * per CPU buffers.
839 */
840 int xc_tbuf_enable(int xc_handle, unsigned long pages,
841 unsigned long *mfn, unsigned long *size);
843 /*
844 * Disable tracing buffers.
845 */
846 int xc_tbuf_disable(int xc_handle);
848 /**
849 * This function sets the size of the trace buffers. Setting the size
850 * is currently a one-shot operation that may be performed either at boot
851 * time or via this interface, not both. The buffer size must be set before
852 * enabling tracing.
853 *
854 * @parm xc_handle a handle to an open hypervisor interface
855 * @parm size the size in pages per cpu for the trace buffers
856 * @return 0 on success, -1 on failure.
857 */
858 int xc_tbuf_set_size(int xc_handle, unsigned long size);
860 /**
861 * This function retrieves the current size of the trace buffers.
862 * Note that the size returned is in terms of bytes, not pages.
864 * @parm xc_handle a handle to an open hypervisor interface
865 * @parm size will contain the size in bytes for the trace buffers
866 * @return 0 on success, -1 on failure.
867 */
868 int xc_tbuf_get_size(int xc_handle, unsigned long *size);
870 int xc_tbuf_set_cpu_mask(int xc_handle, uint32_t mask);
872 int xc_tbuf_set_evt_mask(int xc_handle, uint32_t mask);
874 int xc_domctl(int xc_handle, struct xen_domctl *domctl);
875 int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl);
877 int xc_version(int xc_handle, int cmd, void *arg);
879 int xc_acm_op(int xc_handle, int cmd, void *arg, unsigned long arg_size);
881 int xc_flask_op(int xc_handle, flask_op_t *op);
883 /*
884 * Subscribe to state changes in a domain via evtchn.
885 * Returns -1 on failure, in which case errno will be set appropriately.
886 */
887 int xc_domain_subscribe_for_suspend(
888 int xc_handle, domid_t domid, evtchn_port_t port);
890 /**************************
891 * GRANT TABLE OPERATIONS *
892 **************************/
894 /*
895 * Return a handle to the grant table driver, or -1 on failure, in which case
896 * errno will be set appropriately.
897 */
898 int xc_gnttab_open(void);
900 /*
901 * Close a handle previously allocated with xc_gnttab_open().
902 */
903 int xc_gnttab_close(int xcg_handle);
905 /*
906 * Memory maps a grant reference from one domain to a local address range.
907 * Mappings should be unmapped with xc_gnttab_munmap. Returns NULL on failure.
908 *
909 * @parm xcg_handle a handle on an open grant table interface
910 * @parm domid the domain to map memory from
911 * @parm ref the grant reference ID to map
912 * @parm prot same flag as in mmap()
913 */
914 void *xc_gnttab_map_grant_ref(int xcg_handle,
915 uint32_t domid,
916 uint32_t ref,
917 int prot);
919 /**
920 * Memory maps one or more grant references from one or more domains to a
921 * contiguous local address range. Mappings should be unmapped with
922 * xc_gnttab_munmap. Returns NULL on failure.
923 *
924 * @parm xcg_handle a handle on an open grant table interface
925 * @parm count the number of grant references to be mapped
926 * @parm domids an array of @count domain IDs by which the corresponding @refs
927 * were granted
928 * @parm refs an array of @count grant references to be mapped
929 * @parm prot same flag as in mmap()
930 */
931 void *xc_gnttab_map_grant_refs(int xcg_handle,
932 uint32_t count,
933 uint32_t *domids,
934 uint32_t *refs,
935 int prot);
937 /**
938 * Memory maps one or more grant references from one domain to a
939 * contiguous local address range. Mappings should be unmapped with
940 * xc_gnttab_munmap. Returns NULL on failure.
941 *
942 * @parm xcg_handle a handle on an open grant table interface
943 * @parm count the number of grant references to be mapped
944 * @parm domid the domain to map memory from
945 * @parm refs an array of @count grant references to be mapped
946 * @parm prot same flag as in mmap()
947 */
948 void *xc_gnttab_map_domain_grant_refs(int xcg_handle,
949 uint32_t count,
950 uint32_t domid,
951 uint32_t *refs,
952 int prot);
954 /*
955 * Unmaps the @count pages starting at @start_address, which were mapped by a
956 * call to xc_gnttab_map_grant_ref or xc_gnttab_map_grant_refs. Returns zero
957 * on success, otherwise sets errno and returns non-zero.
958 */
959 int xc_gnttab_munmap(int xcg_handle,
960 void *start_address,
961 uint32_t count);
963 /*
964 * Sets the maximum number of grants that may be mapped by the given instance
965 * to @count.
966 *
967 * N.B. This function must be called after opening the handle, and before any
968 * other functions are invoked on it.
969 *
970 * N.B. When variable-length grants are mapped, fragmentation may be observed,
971 * and it may not be possible to satisfy requests up to the maximum number
972 * of grants.
973 */
974 int xc_gnttab_set_max_grants(int xcg_handle,
975 uint32_t count);
977 int xc_gnttab_op(int xc_handle, int cmd,
978 void * op, int op_size, int count);
980 int xc_gnttab_get_version(int xc_handle, int domid);
981 grant_entry_v1_t *xc_gnttab_map_table_v1(int xc_handle, int domid, int *gnt_num);
982 grant_entry_v2_t *xc_gnttab_map_table_v2(int xc_handle, int domid, int *gnt_num);
984 int xc_physdev_map_pirq(int xc_handle,
985 int domid,
986 int index,
987 int *pirq);
989 int xc_physdev_map_pirq_msi(int xc_handle,
990 int domid,
991 int index,
992 int *pirq,
993 int devfn,
994 int bus,
995 int entry_nr,
996 uint64_t table_base);
998 int xc_physdev_unmap_pirq(int xc_handle,
999 int domid,
1000 int pirq);
1002 int xc_hvm_set_pci_intx_level(
1003 int xc_handle, domid_t dom,
1004 uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
1005 unsigned int level);
1006 int xc_hvm_set_isa_irq_level(
1007 int xc_handle, domid_t dom,
1008 uint8_t isa_irq,
1009 unsigned int level);
1011 int xc_hvm_set_pci_link_route(
1012 int xc_handle, domid_t dom, uint8_t link, uint8_t isa_irq);
1015 /*
1016 * Track dirty bit changes in the VRAM area
1018 * All of this is done atomically:
1019 * - get the dirty bitmap since the last call
1020 * - set up dirty tracking area for period up to the next call
1021 * - clear the dirty tracking area.
1023 * Returns -ENODATA and does not fill bitmap if the area has changed since the
1024 * last call.
1025 */
1026 int xc_hvm_track_dirty_vram(
1027 int xc_handle, domid_t dom,
1028 uint64_t first_pfn, uint64_t nr,
1029 unsigned long *bitmap);
1031 /*
1032 * Notify that some pages got modified by the Device Model
1033 */
1034 int xc_hvm_modified_memory(
1035 int xc_handle, domid_t dom, uint64_t first_pfn, uint64_t nr);
1037 /*
1038 * Set a range of memory to a specific type.
1039 * Allowed types are HVMMEM_ram_rw, HVMMEM_ram_ro, HVMMEM_mmio_dm
1040 */
1041 int xc_hvm_set_mem_type(
1042 int xc_handle, domid_t dom, hvmmem_type_t memtype, uint64_t first_pfn, uint64_t nr);
1045 typedef enum {
1046 XC_ERROR_NONE = 0,
1047 XC_INTERNAL_ERROR = 1,
1048 XC_INVALID_KERNEL = 2,
1049 XC_INVALID_PARAM = 3,
1050 XC_OUT_OF_MEMORY = 4,
1051 } xc_error_code;
1053 #define XC_MAX_ERROR_MSG_LEN 1024
1054 typedef struct {
1055 int code;
1056 char message[XC_MAX_ERROR_MSG_LEN];
1057 } xc_error;
1059 /*
1060 * Return a pointer to the last error. This pointer and the
1061 * data pointed to are only valid until the next call to
1062 * libxc.
1063 */
1064 const xc_error *xc_get_last_error(void);
1066 /*
1067 * Clear the last error
1068 */
1069 void xc_clear_last_error(void);
1071 typedef void (*xc_error_handler)(const xc_error *err);
1073 /*
1074 * The default error handler which prints to stderr
1075 */
1076 void xc_default_error_handler(const xc_error *err);
1078 /*
1079 * Convert an error code into a text description
1080 */
1081 const char *xc_error_code_to_desc(int code);
1083 /*
1084 * Registers a callback to handle errors
1085 */
1086 xc_error_handler xc_set_error_handler(xc_error_handler handler);
1088 int xc_set_hvm_param(int handle, domid_t dom, int param, unsigned long value);
1089 int xc_get_hvm_param(int handle, domid_t dom, int param, unsigned long *value);
1091 /* IA64 specific, nvram save */
1092 int xc_ia64_save_to_nvram(int xc_handle, uint32_t dom);
1094 /* IA64 specific, nvram init */
1095 int xc_ia64_nvram_init(int xc_handle, char *dom_name, uint32_t dom);
1097 /* IA64 specific, set guest OS type optimizations */
1098 int xc_ia64_set_os_type(int xc_handle, char *guest_os_type, uint32_t dom);
1100 /* HVM guest pass-through */
1101 int xc_assign_device(int xc_handle,
1102 uint32_t domid,
1103 uint32_t machine_bdf);
1105 int xc_get_device_group(int xc_handle,
1106 uint32_t domid,
1107 uint32_t machine_bdf,
1108 uint32_t max_sdevs,
1109 uint32_t *num_sdevs,
1110 uint32_t *sdev_array);
1112 int xc_test_assign_device(int xc_handle,
1113 uint32_t domid,
1114 uint32_t machine_bdf);
1116 int xc_deassign_device(int xc_handle,
1117 uint32_t domid,
1118 uint32_t machine_bdf);
1120 int xc_domain_memory_mapping(int xc_handle,
1121 uint32_t domid,
1122 unsigned long first_gfn,
1123 unsigned long first_mfn,
1124 unsigned long nr_mfns,
1125 uint32_t add_mapping);
1127 int xc_domain_ioport_mapping(int xc_handle,
1128 uint32_t domid,
1129 uint32_t first_gport,
1130 uint32_t first_mport,
1131 uint32_t nr_ports,
1132 uint32_t add_mapping);
1134 int xc_domain_update_msi_irq(
1135 int xc_handle,
1136 uint32_t domid,
1137 uint32_t gvec,
1138 uint32_t pirq,
1139 uint32_t gflags,
1140 uint64_t gtable);
1142 int xc_domain_unbind_msi_irq(int xc_handle,
1143 uint32_t domid,
1144 uint32_t gvec,
1145 uint32_t pirq,
1146 uint32_t gflags);
1148 int xc_domain_bind_pt_irq(int xc_handle,
1149 uint32_t domid,
1150 uint8_t machine_irq,
1151 uint8_t irq_type,
1152 uint8_t bus,
1153 uint8_t device,
1154 uint8_t intx,
1155 uint8_t isa_irq);
1157 int xc_domain_unbind_pt_irq(int xc_handle,
1158 uint32_t domid,
1159 uint8_t machine_irq,
1160 uint8_t irq_type,
1161 uint8_t bus,
1162 uint8_t device,
1163 uint8_t intx,
1164 uint8_t isa_irq);
1166 int xc_domain_bind_pt_pci_irq(int xc_handle,
1167 uint32_t domid,
1168 uint8_t machine_irq,
1169 uint8_t bus,
1170 uint8_t device,
1171 uint8_t intx);
1173 int xc_domain_bind_pt_isa_irq(int xc_handle,
1174 uint32_t domid,
1175 uint8_t machine_irq);
1177 int xc_domain_set_machine_address_size(int handle,
1178 uint32_t domid,
1179 unsigned int width);
1180 int xc_domain_get_machine_address_size(int handle,
1181 uint32_t domid);
1183 int xc_domain_suppress_spurious_page_faults(int handle,
1184 uint32_t domid);
1186 /* Set the target domain */
1187 int xc_domain_set_target(int xc_handle,
1188 uint32_t domid,
1189 uint32_t target);
1191 /* Control the domain for debug */
1192 int xc_domain_debug_control(int xc_handle,
1193 uint32_t domid,
1194 uint32_t sop,
1195 uint32_t vcpu);
1197 #if defined(__i386__) || defined(__x86_64__)
1198 int xc_cpuid_check(int xc,
1199 const unsigned int *input,
1200 const char **config,
1201 char **config_transformed);
1202 int xc_cpuid_set(int xc,
1203 domid_t domid,
1204 const unsigned int *input,
1205 const char **config,
1206 char **config_transformed);
1207 int xc_cpuid_apply_policy(int xc,
1208 domid_t domid);
1209 void xc_cpuid_to_str(const unsigned int *regs,
1210 char **strs);
1211 int xc_mca_op(int xc_handle, struct xen_mc *mc);
1212 #endif
1214 struct xc_px_val {
1215 uint64_t freq; /* Px core frequency */
1216 uint64_t residency; /* Px residency time */
1217 uint64_t count; /* Px transition count */
1218 };
1220 struct xc_px_stat {
1221 uint8_t total; /* total Px states */
1222 uint8_t usable; /* usable Px states */
1223 uint8_t last; /* last Px state */
1224 uint8_t cur; /* current Px state */
1225 uint64_t *trans_pt; /* Px transition table */
1226 struct xc_px_val *pt;
1227 };
1229 int xc_pm_get_max_px(int xc_handle, int cpuid, int *max_px);
1230 int xc_pm_get_pxstat(int xc_handle, int cpuid, struct xc_px_stat *pxpt);
1231 int xc_pm_reset_pxstat(int xc_handle, int cpuid);
1233 struct xc_cx_stat {
1234 uint32_t nr; /* entry nr in triggers & residencies, including C0 */
1235 uint32_t last; /* last Cx state */
1236 uint64_t idle_time; /* idle time from boot */
1237 uint64_t *triggers; /* Cx trigger counts */
1238 uint64_t *residencies; /* Cx residencies */
1239 };
1240 typedef struct xc_cx_stat xc_cx_stat_t;
1242 int xc_pm_get_max_cx(int xc_handle, int cpuid, int *max_cx);
1243 int xc_pm_get_cxstat(int xc_handle, int cpuid, struct xc_cx_stat *cxpt);
1244 int xc_pm_reset_cxstat(int xc_handle, int cpuid);
1246 int xc_cpu_online(int xc_handle, int cpu);
1247 int xc_cpu_offline(int xc_handle, int cpu);
1249 /*
1250 * cpufreq para name of this structure named
1251 * same as sysfs file name of native linux
1252 */
1253 typedef xen_userspace_t xc_userspace_t;
1254 typedef xen_ondemand_t xc_ondemand_t;
1256 struct xc_get_cpufreq_para {
1257 /* IN/OUT variable */
1258 uint32_t cpu_num;
1259 uint32_t freq_num;
1260 uint32_t gov_num;
1262 /* for all governors */
1263 /* OUT variable */
1264 uint32_t *affected_cpus;
1265 uint32_t *scaling_available_frequencies;
1266 char *scaling_available_governors;
1267 char scaling_driver[CPUFREQ_NAME_LEN];
1269 uint32_t cpuinfo_cur_freq;
1270 uint32_t cpuinfo_max_freq;
1271 uint32_t cpuinfo_min_freq;
1272 uint32_t scaling_cur_freq;
1274 char scaling_governor[CPUFREQ_NAME_LEN];
1275 uint32_t scaling_max_freq;
1276 uint32_t scaling_min_freq;
1278 /* for specific governor */
1279 union {
1280 xc_userspace_t userspace;
1281 xc_ondemand_t ondemand;
1282 } u;
1283 };
1285 int xc_get_cpufreq_para(int xc_handle, int cpuid,
1286 struct xc_get_cpufreq_para *user_para);
1287 int xc_set_cpufreq_gov(int xc_handle, int cpuid, char *govname);
1288 int xc_set_cpufreq_para(int xc_handle, int cpuid,
1289 int ctrl_type, int ctrl_value);
1290 int xc_get_cpufreq_avgfreq(int xc_handle, int cpuid, int *avg_freq);
1292 struct xc_get_cputopo {
1293 /* IN: maximum addressable entry in
1294 * the caller-provided cpu_to_core/socket.
1295 */
1296 uint32_t max_cpus;
1297 uint32_t *cpu_to_core;
1298 uint32_t *cpu_to_socket;
1300 /* OUT: number of cpus returned
1301 * If OUT is greater than IN then the cpu_to_core/socket is truncated!
1302 */
1303 uint32_t nr_cpus;
1304 };
1306 int xc_get_cputopo(int xc_handle, struct xc_get_cputopo *info);
1308 int xc_set_sched_opt_smt(int xc_handle, uint32_t value);
1309 int xc_set_vcpu_migration_delay(int xc_handle, uint32_t value);
1310 int xc_get_vcpu_migration_delay(int xc_handle, uint32_t *value);
1312 int xc_get_cpuidle_max_cstate(int xc_handle, uint32_t *value);
1313 int xc_set_cpuidle_max_cstate(int xc_handle, uint32_t value);
1315 int xc_enable_turbo(int xc_handle, int cpuid);
1316 int xc_disable_turbo(int xc_handle, int cpuid);
1317 /**
1318 * tmem operations
1319 */
1320 int xc_tmem_control(int xc, int32_t pool_id, uint32_t subop, uint32_t cli_id,
1321 uint32_t arg1, uint32_t arg2, uint64_t arg3, void *buf);
1322 int xc_tmem_auth(int xc_handle, int cli_id, char *uuid_str, int arg1);
1323 int xc_tmem_save(int xc_handle, int dom, int live, int fd, int field_marker);
1324 int xc_tmem_save_extra(int xc_handle, int dom, int fd, int field_marker);
1325 void xc_tmem_save_done(int xc_handle, int dom);
1326 int xc_tmem_restore(int xc_handle, int dom, int fd);
1327 int xc_tmem_restore_extra(int xc_handle, int dom, int fd);
1329 /**
1330 * mem_event operations
1331 */
1332 int xc_mem_event_control(int xc_handle, domid_t domain_id, unsigned int op,
1333 unsigned int mode, void *shared_page,
1334 void *ring_page, unsigned long gfn);
1336 int xc_mem_event_enable(int xc_handle, domid_t domain_id,
1337 void *shared_page, void *ring_page);
1338 int xc_mem_event_disable(int xc_handle, domid_t domain_id);
1340 int xc_mem_paging_nominate(int xc_handle, domid_t domain_id,
1341 unsigned long gfn);
1342 int xc_mem_paging_evict(int xc_handle, domid_t domain_id, unsigned long gfn);
1343 int xc_mem_paging_prep(int xc_handle, domid_t domain_id, unsigned long gfn);
1344 int xc_mem_paging_resume(int xc_handle, domid_t domain_id,
1345 unsigned long gfn);
1347 /**
1348 * memshr operations
1349 */
1350 int xc_memshr_control(int xc_handle,
1351 uint32_t domid,
1352 int enable);
1353 int xc_memshr_nominate_gfn(int xc_handle,
1354 uint32_t domid,
1355 unsigned long gfn,
1356 uint64_t *handle);
1357 int xc_memshr_nominate_gref(int xc_handle,
1358 uint32_t domid,
1359 grant_ref_t gref,
1360 uint64_t *handle);
1361 int xc_memshr_share(int xc_handle,
1362 uint64_t source_handle,
1363 uint64_t client_handle);
1364 int xc_memshr_domain_resume(int xc_handle,
1365 uint32_t domid);
1366 int xc_memshr_debug_gfn(int xc_handle,
1367 uint32_t domid,
1368 unsigned long gfn);
1369 int xc_memshr_debug_mfn(int xc_handle,
1370 uint32_t domid,
1371 unsigned long mfn);
1372 int xc_memshr_debug_gref(int xc_handle,
1373 uint32_t domid,
1374 grant_ref_t gref);
1376 #endif /* XENCTRL_H */