debuggers.hg

view tools/libxc/xenctrl.h @ 20838:0447c5532e9f

x86: add and use XEN_DOMCTL_getpageframeinfo3

To support wider than 28-bit MFNs, add XEN_DOMCTL_getpageframeinfo3
(with the type replacing the passed in MFN rather than getting or-ed
into it) to properly back xc_get_pfn_type_batch().

With xc_get_pfn_type_batch() only used internally to libxc, move its
prototype from xenctrl.h to xc_private.h.

This also fixes a couple of bugs in pre-existing code:
- the failure path for init_mem_info() leaked minfo->pfn_type,
- one error path of the XEN_DOMCTL_getpageframeinfo2 handler used
put_domain() where rcu_unlock_domain() was meant, and
- the XEN_DOMCTL_getpageframeinfo2 handler could call
xsm_getpageframeinfo() with an invalid struct page_info pointer.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 13 08:14:01 2010 +0000 (2010-01-13)
parents 0b138a019292
children a7546e45ca83
line source
1 /******************************************************************************
2 * xenctrl.h
3 *
4 * A library for low-level access to the Xen control interfaces.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 *
8 * xc_gnttab functions:
9 * Copyright (c) 2007-2008, D G Murray <Derek.Murray@cl.cam.ac.uk>
10 */
12 #ifndef XENCTRL_H
13 #define XENCTRL_H
15 /* Tell the Xen public headers we are a user-space tools build. */
16 #ifndef __XEN_TOOLS__
17 #define __XEN_TOOLS__ 1
18 #endif
20 #include <stddef.h>
21 #include <stdint.h>
22 #include <xen/xen.h>
23 #include <xen/domctl.h>
24 #include <xen/physdev.h>
25 #include <xen/sysctl.h>
26 #include <xen/version.h>
27 #include <xen/event_channel.h>
28 #include <xen/sched.h>
29 #include <xen/memory.h>
30 #include <xen/grant_table.h>
31 #include <xen/hvm/params.h>
32 #include <xen/xsm/acm.h>
33 #include <xen/xsm/acm_ops.h>
34 #include <xen/xsm/flask_op.h>
35 #include <xen/tmem.h>
37 #if defined(__i386__) || defined(__x86_64__)
38 #include <xen/foreign/x86_32.h>
39 #include <xen/foreign/x86_64.h>
40 #endif
42 #ifdef __ia64__
43 #define XC_PAGE_SHIFT 14
44 #else
45 #define XC_PAGE_SHIFT 12
46 #endif
47 #define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT)
48 #define XC_PAGE_MASK (~(XC_PAGE_SIZE-1))
50 #define INVALID_MFN (~0UL)
52 /*
53 * DEFINITIONS FOR CPU BARRIERS
54 */
56 #if defined(__i386__)
57 #define xen_mb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
58 #define xen_rmb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
59 #define xen_wmb() asm volatile ( "" : : : "memory")
60 #elif defined(__x86_64__)
61 #define xen_mb() asm volatile ( "mfence" : : : "memory")
62 #define xen_rmb() asm volatile ( "lfence" : : : "memory")
63 #define xen_wmb() asm volatile ( "" : : : "memory")
64 #elif defined(__ia64__)
65 #define xen_mb() asm volatile ("mf" ::: "memory")
66 #define xen_rmb() asm volatile ("mf" ::: "memory")
67 #define xen_wmb() asm volatile ("mf" ::: "memory")
68 #else
69 #error "Define barriers"
70 #endif
72 /*
73 * INITIALIZATION FUNCTIONS
74 */
76 /**
77 * This function opens a handle to the hypervisor interface. This function can
78 * be called multiple times within a single process. Multiple processes can
79 * have an open hypervisor interface at the same time.
80 *
81 * Each call to this function should have a corresponding call to
82 * xc_interface_close().
83 *
84 * This function can fail if the caller does not have superuser permission or
85 * if a Xen-enabled kernel is not currently running.
86 *
87 * @return a handle to the hypervisor interface or -1 on failure
88 */
89 int xc_interface_open(void);
91 /**
92 * This function closes an open hypervisor interface.
93 *
94 * This function can fail if the handle does not represent an open interface or
95 * if there were problems closing the interface.
96 *
97 * @parm xc_handle a handle to an open hypervisor interface
98 * @return 0 on success, -1 otherwise.
99 */
100 int xc_interface_close(int xc_handle);
102 /*
103 * KERNEL INTERFACES
104 */
106 /*
107 * Resolve a kernel device name (e.g., "evtchn", "blktap0") into a kernel
108 * device number. Returns -1 on error (and sets errno).
109 */
110 int xc_find_device_number(const char *name);
112 /*
113 * DOMAIN DEBUGGING FUNCTIONS
114 */
116 typedef struct xc_core_header {
117 unsigned int xch_magic;
118 unsigned int xch_nr_vcpus;
119 unsigned int xch_nr_pages;
120 unsigned int xch_ctxt_offset;
121 unsigned int xch_index_offset;
122 unsigned int xch_pages_offset;
123 } xc_core_header_t;
125 #define XC_CORE_MAGIC 0xF00FEBED
126 #define XC_CORE_MAGIC_HVM 0xF00FEBEE
128 #ifdef __linux__
130 #include <sys/ptrace.h>
131 #include <thread_db.h>
133 typedef void (*thr_ev_handler_t)(long);
135 void xc_register_event_handler(
136 thr_ev_handler_t h,
137 td_event_e e);
139 long xc_ptrace(
140 int xc_handle,
141 enum __ptrace_request request,
142 uint32_t domid,
143 long addr,
144 long data);
146 int xc_waitdomain(
147 int xc_handle,
148 int domain,
149 int *status,
150 int options);
152 #endif /* __linux__ */
154 /*
155 * DOMAIN MANAGEMENT FUNCTIONS
156 */
158 typedef struct xc_dominfo {
159 uint32_t domid;
160 uint32_t ssidref;
161 unsigned int dying:1, crashed:1, shutdown:1,
162 paused:1, blocked:1, running:1,
163 hvm:1, debugged:1;
164 unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
165 unsigned long nr_pages; /* current number, not maximum */
166 unsigned long nr_shared_pages;
167 unsigned long shared_info_frame;
168 uint64_t cpu_time;
169 unsigned long max_memkb;
170 unsigned int nr_online_vcpus;
171 unsigned int max_vcpu_id;
172 xen_domain_handle_t handle;
173 } xc_dominfo_t;
175 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
177 typedef union
178 {
179 #if defined(__i386__) || defined(__x86_64__)
180 vcpu_guest_context_x86_64_t x64;
181 vcpu_guest_context_x86_32_t x32;
182 #endif
183 vcpu_guest_context_t c;
184 } vcpu_guest_context_any_t;
186 typedef union
187 {
188 #if defined(__i386__) || defined(__x86_64__)
189 shared_info_x86_64_t x64;
190 shared_info_x86_32_t x32;
191 #endif
192 shared_info_t s;
193 } shared_info_any_t;
195 typedef union
196 {
197 #if defined(__i386__) || defined(__x86_64__)
198 start_info_x86_64_t x64;
199 start_info_x86_32_t x32;
200 #endif
201 start_info_t s;
202 } start_info_any_t;
205 int xc_domain_create(int xc_handle,
206 uint32_t ssidref,
207 xen_domain_handle_t handle,
208 uint32_t flags,
209 uint32_t *pdomid);
212 /* Functions to produce a dump of a given domain
213 * xc_domain_dumpcore - produces a dump to a specified file
214 * xc_domain_dumpcore_via_callback - produces a dump, using a specified
215 * callback function
216 */
217 int xc_domain_dumpcore(int xc_handle,
218 uint32_t domid,
219 const char *corename);
221 /* Define the callback function type for xc_domain_dumpcore_via_callback.
222 *
223 * This function is called by the coredump code for every "write",
224 * and passes an opaque object for the use of the function and
225 * created by the caller of xc_domain_dumpcore_via_callback.
226 */
227 typedef int (dumpcore_rtn_t)(void *arg, char *buffer, unsigned int length);
229 int xc_domain_dumpcore_via_callback(int xc_handle,
230 uint32_t domid,
231 void *arg,
232 dumpcore_rtn_t dump_rtn);
234 /*
235 * This function sets the maximum number of vcpus that a domain may create.
236 *
237 * @parm xc_handle a handle to an open hypervisor interface.
238 * @parm domid the domain id in which vcpus are to be created.
239 * @parm max the maximum number of vcpus that the domain may create.
240 * @return 0 on success, -1 on failure.
241 */
242 int xc_domain_max_vcpus(int xc_handle,
243 uint32_t domid,
244 unsigned int max);
246 /**
247 * This function pauses a domain. A paused domain still exists in memory
248 * however it does not receive any timeslices from the hypervisor.
249 *
250 * @parm xc_handle a handle to an open hypervisor interface
251 * @parm domid the domain id to pause
252 * @return 0 on success, -1 on failure.
253 */
254 int xc_domain_pause(int xc_handle,
255 uint32_t domid);
256 /**
257 * This function unpauses a domain. The domain should have been previously
258 * paused.
259 *
260 * @parm xc_handle a handle to an open hypervisor interface
261 * @parm domid the domain id to unpause
262 * return 0 on success, -1 on failure
263 */
264 int xc_domain_unpause(int xc_handle,
265 uint32_t domid);
267 /**
268 * This function will destroy a domain. Destroying a domain removes the domain
269 * completely from memory. This function should be called after sending the
270 * domain a SHUTDOWN control message to free up the domain resources.
271 *
272 * @parm xc_handle a handle to an open hypervisor interface
273 * @parm domid the domain id to destroy
274 * @return 0 on success, -1 on failure
275 */
276 int xc_domain_destroy(int xc_handle,
277 uint32_t domid);
280 /**
281 * This function resumes a suspended domain. The domain should have
282 * been previously suspended.
283 *
284 * @parm xc_handle a handle to an open hypervisor interface
285 * @parm domid the domain id to resume
286 * @parm fast use cooperative resume (guest must support this)
287 * return 0 on success, -1 on failure
288 */
289 int xc_domain_resume(int xc_handle,
290 uint32_t domid,
291 int fast);
293 /**
294 * This function will shutdown a domain. This is intended for use in
295 * fully-virtualized domains where this operation is analogous to the
296 * sched_op operations in a paravirtualized domain. The caller is
297 * expected to give the reason for the shutdown.
298 *
299 * @parm xc_handle a handle to an open hypervisor interface
300 * @parm domid the domain id to destroy
301 * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
302 * @return 0 on success, -1 on failure
303 */
304 int xc_domain_shutdown(int xc_handle,
305 uint32_t domid,
306 int reason);
308 int xc_vcpu_setaffinity(int xc_handle,
309 uint32_t domid,
310 int vcpu,
311 uint64_t cpumap);
312 int xc_vcpu_getaffinity(int xc_handle,
313 uint32_t domid,
314 int vcpu,
315 uint64_t *cpumap);
317 /**
318 * This function will return information about one or more domains. It is
319 * designed to iterate over the list of domains. If a single domain is
320 * requested, this function will return the next domain in the list - if
321 * one exists. It is, therefore, important in this case to make sure the
322 * domain requested was the one returned.
323 *
324 * @parm xc_handle a handle to an open hypervisor interface
325 * @parm first_domid the first domain to enumerate information from. Domains
326 * are currently enumerate in order of creation.
327 * @parm max_doms the number of elements in info
328 * @parm info an array of max_doms size that will contain the information for
329 * the enumerated domains.
330 * @return the number of domains enumerated or -1 on error
331 */
332 int xc_domain_getinfo(int xc_handle,
333 uint32_t first_domid,
334 unsigned int max_doms,
335 xc_dominfo_t *info);
338 /**
339 * This function will set the execution context for the specified vcpu.
340 *
341 * @parm xc_handle a handle to an open hypervisor interface
342 * @parm domid the domain to set the vcpu context for
343 * @parm vcpu the vcpu number for the context
344 * @parm ctxt pointer to the the cpu context with the values to set
345 * @return the number of domains enumerated or -1 on error
346 */
347 int xc_vcpu_setcontext(int xc_handle,
348 uint32_t domid,
349 uint32_t vcpu,
350 vcpu_guest_context_any_t *ctxt);
351 /**
352 * This function will return information about one or more domains, using a
353 * single hypercall. The domain information will be stored into the supplied
354 * array of xc_domaininfo_t structures.
355 *
356 * @parm xc_handle a handle to an open hypervisor interface
357 * @parm first_domain the first domain to enumerate information from.
358 * Domains are currently enumerate in order of creation.
359 * @parm max_domains the number of elements in info
360 * @parm info an array of max_doms size that will contain the information for
361 * the enumerated domains.
362 * @return the number of domains enumerated or -1 on error
363 */
364 int xc_domain_getinfolist(int xc_handle,
365 uint32_t first_domain,
366 unsigned int max_domains,
367 xc_domaininfo_t *info);
369 /**
370 * This function returns information about the context of a hvm domain
371 * @parm xc_handle a handle to an open hypervisor interface
372 * @parm domid the domain to get information from
373 * @parm ctxt_buf a pointer to a structure to store the execution context of
374 * the hvm domain
375 * @parm size the size of ctxt_buf in bytes
376 * @return 0 on success, -1 on failure
377 */
378 int xc_domain_hvm_getcontext(int xc_handle,
379 uint32_t domid,
380 uint8_t *ctxt_buf,
381 uint32_t size);
384 /**
385 * This function returns one element of the context of a hvm domain
386 * @parm xc_handle a handle to an open hypervisor interface
387 * @parm domid the domain to get information from
388 * @parm typecode which type of elemnt required
389 * @parm instance which instance of the type
390 * @parm ctxt_buf a pointer to a structure to store the execution context of
391 * the hvm domain
392 * @parm size the size of ctxt_buf (must be >= HVM_SAVE_LENGTH(typecode))
393 * @return 0 on success, -1 on failure
394 */
395 int xc_domain_hvm_getcontext_partial(int xc_handle,
396 uint32_t domid,
397 uint16_t typecode,
398 uint16_t instance,
399 void *ctxt_buf,
400 uint32_t size);
402 /**
403 * This function will set the context for hvm domain
404 *
405 * @parm xc_handle a handle to an open hypervisor interface
406 * @parm domid the domain to set the hvm domain context for
407 * @parm hvm_ctxt pointer to the the hvm context with the values to set
408 * @parm size the size of hvm_ctxt in bytes
409 * @return 0 on success, -1 on failure
410 */
411 int xc_domain_hvm_setcontext(int xc_handle,
412 uint32_t domid,
413 uint8_t *hvm_ctxt,
414 uint32_t size);
416 /**
417 * This function returns information about the execution context of a
418 * particular vcpu of a domain.
419 *
420 * @parm xc_handle a handle to an open hypervisor interface
421 * @parm domid the domain to get information from
422 * @parm vcpu the vcpu number
423 * @parm ctxt a pointer to a structure to store the execution context of the
424 * domain
425 * @return 0 on success, -1 on failure
426 */
427 int xc_vcpu_getcontext(int xc_handle,
428 uint32_t domid,
429 uint32_t vcpu,
430 vcpu_guest_context_any_t *ctxt);
432 typedef xen_domctl_getvcpuinfo_t xc_vcpuinfo_t;
433 int xc_vcpu_getinfo(int xc_handle,
434 uint32_t domid,
435 uint32_t vcpu,
436 xc_vcpuinfo_t *info);
438 long long xc_domain_get_cpu_usage(int xc_handle,
439 domid_t domid,
440 int vcpu);
442 int xc_domain_sethandle(int xc_handle, uint32_t domid,
443 xen_domain_handle_t handle);
445 typedef xen_domctl_shadow_op_stats_t xc_shadow_op_stats_t;
446 int xc_shadow_control(int xc_handle,
447 uint32_t domid,
448 unsigned int sop,
449 unsigned long *dirty_bitmap,
450 unsigned long pages,
451 unsigned long *mb,
452 uint32_t mode,
453 xc_shadow_op_stats_t *stats);
455 int xc_sedf_domain_set(int xc_handle,
456 uint32_t domid,
457 uint64_t period, uint64_t slice,
458 uint64_t latency, uint16_t extratime,
459 uint16_t weight);
461 int xc_sedf_domain_get(int xc_handle,
462 uint32_t domid,
463 uint64_t* period, uint64_t *slice,
464 uint64_t *latency, uint16_t *extratime,
465 uint16_t *weight);
467 int xc_sched_credit_domain_set(int xc_handle,
468 uint32_t domid,
469 struct xen_domctl_sched_credit *sdom);
471 int xc_sched_credit_domain_get(int xc_handle,
472 uint32_t domid,
473 struct xen_domctl_sched_credit *sdom);
475 /**
476 * This function sends a trigger to a domain.
477 *
478 * @parm xc_handle a handle to an open hypervisor interface
479 * @parm domid the domain id to send trigger
480 * @parm trigger the trigger type
481 * @parm vcpu the vcpu number to send trigger
482 * return 0 on success, -1 on failure
483 */
484 int xc_domain_send_trigger(int xc_handle,
485 uint32_t domid,
486 uint32_t trigger,
487 uint32_t vcpu);
489 /**
490 * This function enables or disable debugging of a domain.
491 *
492 * @parm xc_handle a handle to an open hypervisor interface
493 * @parm domid the domain id to send trigger
494 * @parm enable true to enable debugging
495 * return 0 on success, -1 on failure
496 */
497 int xc_domain_setdebugging(int xc_handle,
498 uint32_t domid,
499 unsigned int enable);
501 /*
502 * EVENT CHANNEL FUNCTIONS
503 */
505 /* A port identifier is guaranteed to fit in 31 bits. */
506 typedef int evtchn_port_or_error_t;
508 /**
509 * This function allocates an unbound port. Ports are named endpoints used for
510 * interdomain communication. This function is most useful in opening a
511 * well-known port within a domain to receive events on.
512 *
513 * NOTE: If you are allocating a *local* unbound port, you probably want to
514 * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
515 * ports *only* during domain creation.
516 *
517 * @parm xc_handle a handle to an open hypervisor interface
518 * @parm dom the ID of the local domain (the 'allocatee')
519 * @parm remote_dom the ID of the domain who will later bind
520 * @return allocated port (in @dom) on success, -1 on failure
521 */
522 evtchn_port_or_error_t
523 xc_evtchn_alloc_unbound(int xc_handle,
524 uint32_t dom,
525 uint32_t remote_dom);
527 int xc_evtchn_reset(int xc_handle,
528 uint32_t dom);
530 typedef struct evtchn_status xc_evtchn_status_t;
531 int xc_evtchn_status(int xc_handle, xc_evtchn_status_t *status);
533 /*
534 * Return a handle to the event channel driver, or -1 on failure, in which case
535 * errno will be set appropriately.
536 */
537 int xc_evtchn_open(void);
539 /*
540 * Close a handle previously allocated with xc_evtchn_open().
541 */
542 int xc_evtchn_close(int xce_handle);
544 /*
545 * Return an fd that can be select()ed on for further calls to
546 * xc_evtchn_pending().
547 */
548 int xc_evtchn_fd(int xce_handle);
550 /*
551 * Notify the given event channel. Returns -1 on failure, in which case
552 * errno will be set appropriately.
553 */
554 int xc_evtchn_notify(int xce_handle, evtchn_port_t port);
556 /*
557 * Returns a new event port awaiting interdomain connection from the given
558 * domain ID, or -1 on failure, in which case errno will be set appropriately.
559 */
560 evtchn_port_or_error_t
561 xc_evtchn_bind_unbound_port(int xce_handle, int domid);
563 /*
564 * Returns a new event port bound to the remote port for the given domain ID,
565 * or -1 on failure, in which case errno will be set appropriately.
566 */
567 evtchn_port_or_error_t
568 xc_evtchn_bind_interdomain(int xce_handle, int domid,
569 evtchn_port_t remote_port);
571 /*
572 * Bind an event channel to the given VIRQ. Returns the event channel bound to
573 * the VIRQ, or -1 on failure, in which case errno will be set appropriately.
574 */
575 evtchn_port_or_error_t
576 xc_evtchn_bind_virq(int xce_handle, unsigned int virq);
578 /*
579 * Unbind the given event channel. Returns -1 on failure, in which case errno
580 * will be set appropriately.
581 */
582 int xc_evtchn_unbind(int xce_handle, evtchn_port_t port);
584 /*
585 * Return the next event channel to become pending, or -1 on failure, in which
586 * case errno will be set appropriately.
587 */
588 evtchn_port_or_error_t
589 xc_evtchn_pending(int xce_handle);
591 /*
592 * Unmask the given event channel. Returns -1 on failure, in which case errno
593 * will be set appropriately.
594 */
595 int xc_evtchn_unmask(int xce_handle, evtchn_port_t port);
597 int xc_physdev_pci_access_modify(int xc_handle,
598 uint32_t domid,
599 int bus,
600 int dev,
601 int func,
602 int enable);
604 int xc_readconsolering(int xc_handle,
605 char **pbuffer,
606 unsigned int *pnr_chars,
607 int clear, int incremental, uint32_t *pindex);
609 int xc_send_debug_keys(int xc_handle, char *keys);
611 typedef xen_sysctl_physinfo_t xc_physinfo_t;
612 typedef uint32_t xc_cpu_to_node_t;
613 int xc_physinfo(int xc_handle,
614 xc_physinfo_t *info);
616 int xc_sched_id(int xc_handle,
617 int *sched_id);
619 typedef xen_sysctl_cpuinfo_t xc_cpuinfo_t;
620 int xc_getcpuinfo(int xc_handle, int max_cpus,
621 xc_cpuinfo_t *info, int *nr_cpus);
623 int xc_domain_setmaxmem(int xc_handle,
624 uint32_t domid,
625 unsigned int max_memkb);
627 int xc_domain_set_memmap_limit(int xc_handle,
628 uint32_t domid,
629 unsigned long map_limitkb);
631 int xc_domain_set_time_offset(int xc_handle,
632 uint32_t domid,
633 int32_t time_offset_seconds);
635 int xc_domain_set_tsc_info(int xc_handle,
636 uint32_t domid,
637 uint32_t tsc_mode,
638 uint64_t elapsed_nsec,
639 uint32_t gtsc_khz,
640 uint32_t incarnation);
642 int xc_domain_get_tsc_info(int xc_handle,
643 uint32_t domid,
644 uint32_t *tsc_mode,
645 uint64_t *elapsed_nsec,
646 uint32_t *gtsc_khz,
647 uint32_t *incarnation);
649 int xc_domain_disable_migrate(int xc_handle, uint32_t domid);
651 int xc_domain_memory_increase_reservation(int xc_handle,
652 uint32_t domid,
653 unsigned long nr_extents,
654 unsigned int extent_order,
655 unsigned int mem_flags,
656 xen_pfn_t *extent_start);
658 int xc_domain_memory_decrease_reservation(int xc_handle,
659 uint32_t domid,
660 unsigned long nr_extents,
661 unsigned int extent_order,
662 xen_pfn_t *extent_start);
664 int xc_domain_memory_populate_physmap(int xc_handle,
665 uint32_t domid,
666 unsigned long nr_extents,
667 unsigned int extent_order,
668 unsigned int mem_flags,
669 xen_pfn_t *extent_start);
671 int xc_domain_memory_set_pod_target(int xc_handle,
672 uint32_t domid,
673 uint64_t target_pages,
674 uint64_t *tot_pages,
675 uint64_t *pod_cache_pages,
676 uint64_t *pod_entries);
678 int xc_domain_memory_get_pod_target(int xc_handle,
679 uint32_t domid,
680 uint64_t *tot_pages,
681 uint64_t *pod_cache_pages,
682 uint64_t *pod_entries);
684 int xc_domain_ioport_permission(int xc_handle,
685 uint32_t domid,
686 uint32_t first_port,
687 uint32_t nr_ports,
688 uint32_t allow_access);
690 int xc_domain_irq_permission(int xc_handle,
691 uint32_t domid,
692 uint8_t pirq,
693 uint8_t allow_access);
695 int xc_domain_iomem_permission(int xc_handle,
696 uint32_t domid,
697 unsigned long first_mfn,
698 unsigned long nr_mfns,
699 uint8_t allow_access);
701 int xc_domain_pin_memory_cacheattr(int xc_handle,
702 uint32_t domid,
703 uint64_t start,
704 uint64_t end,
705 uint32_t type);
707 unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
708 unsigned long mfn);
710 typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
711 typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
712 /* IMPORTANT: The caller is responsible for mlock()'ing the @desc and @val
713 arrays. */
714 int xc_perfc_control(int xc_handle,
715 uint32_t op,
716 xc_perfc_desc_t *desc,
717 xc_perfc_val_t *val,
718 int *nbr_desc,
719 int *nbr_val);
721 typedef xen_sysctl_lockprof_data_t xc_lockprof_data_t;
722 /* IMPORTANT: The caller is responsible for mlock()'ing the @data array. */
723 int xc_lockprof_control(int xc_handle,
724 uint32_t opcode,
725 uint32_t *n_elems,
726 uint64_t *time,
727 xc_lockprof_data_t *data);
729 /**
730 * Memory maps a range within one domain to a local address range. Mappings
731 * should be unmapped with munmap and should follow the same rules as mmap
732 * regarding page alignment. Returns NULL on failure.
733 *
734 * In Linux, the ring queue for the control channel is accessible by mapping
735 * the shared_info_frame (from xc_domain_getinfo()) + 2048. The structure
736 * stored there is of type control_if_t.
737 *
738 * @parm xc_handle a handle on an open hypervisor interface
739 * @parm dom the domain to map memory from
740 * @parm size the amount of memory to map (in multiples of page size)
741 * @parm prot same flag as in mmap().
742 * @parm mfn the frame address to map.
743 */
744 void *xc_map_foreign_range(int xc_handle, uint32_t dom,
745 int size, int prot,
746 unsigned long mfn );
748 void *xc_map_foreign_pages(int xc_handle, uint32_t dom, int prot,
749 const xen_pfn_t *arr, int num );
751 /**
752 * DEPRECATED - use xc_map_foreign_bulk() instead.
753 *
754 * Like xc_map_foreign_pages(), except it can succeeed partially.
755 * When a page cannot be mapped, its PFN in @arr is or'ed with
756 * 0xF0000000 to indicate the error.
757 */
758 void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
759 xen_pfn_t *arr, int num );
761 /**
762 * Like xc_map_foreign_pages(), except it can succeed partially.
763 * When a page cannot be mapped, its respective field in @err is
764 * set to the corresponding errno value.
765 */
766 void *xc_map_foreign_bulk(int xc_handle, uint32_t dom, int prot,
767 const xen_pfn_t *arr, int *err, unsigned int num);
769 /**
770 * Translates a virtual address in the context of a given domain and
771 * vcpu returning the GFN containing the address (that is, an MFN for
772 * PV guests, a PFN for HVM guests). Returns 0 for failure.
773 *
774 * @parm xc_handle a handle on an open hypervisor interface
775 * @parm dom the domain to perform the translation in
776 * @parm vcpu the vcpu to perform the translation on
777 * @parm virt the virtual address to translate
778 */
779 unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
780 int vcpu, unsigned long long virt);
783 /**
784 * DEPRECATED. Avoid using this, as it does not correctly account for PFNs
785 * without a backing MFN.
786 */
787 int xc_get_pfn_list(int xc_handle, uint32_t domid, uint64_t *pfn_buf,
788 unsigned long max_pfns);
790 unsigned long xc_ia64_fpsr_default(void);
792 int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
793 unsigned long dst_pfn, const char *src_page);
795 int xc_clear_domain_page(int xc_handle, uint32_t domid,
796 unsigned long dst_pfn);
798 long xc_get_max_pages(int xc_handle, uint32_t domid);
800 int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops,
801 domid_t dom);
803 int xc_memory_op(int xc_handle, int cmd, void *arg);
806 /* Get current total pages allocated to a domain. */
807 long xc_get_tot_pages(int xc_handle, uint32_t domid);
809 /**
810 * This function retrieves the the number of bytes available
811 * in the heap in a specific range of address-widths and nodes.
812 *
813 * @parm xc_handle a handle to an open hypervisor interface
814 * @parm domid the domain to query
815 * @parm min_width the smallest address width to query (0 if don't care)
816 * @parm max_width the largest address width to query (0 if don't care)
817 * @parm node the node to query (-1 for all)
818 * @parm *bytes caller variable to put total bytes counted
819 * @return 0 on success, <0 on failure.
820 */
821 int xc_availheap(int xc_handle, int min_width, int max_width, int node,
822 uint64_t *bytes);
824 /*
825 * Trace Buffer Operations
826 */
828 /**
829 * xc_tbuf_enable - enable tracing buffers
830 *
831 * @parm xc_handle a handle to an open hypervisor interface
832 * @parm cnt size of tracing buffers to create (in pages)
833 * @parm mfn location to store mfn of the trace buffers to
834 * @parm size location to store the size (in bytes) of a trace buffer to
835 *
836 * Gets the machine address of the trace pointer area and the size of the
837 * per CPU buffers.
838 */
839 int xc_tbuf_enable(int xc_handle, unsigned long pages,
840 unsigned long *mfn, unsigned long *size);
842 /*
843 * Disable tracing buffers.
844 */
845 int xc_tbuf_disable(int xc_handle);
847 /**
848 * This function sets the size of the trace buffers. Setting the size
849 * is currently a one-shot operation that may be performed either at boot
850 * time or via this interface, not both. The buffer size must be set before
851 * enabling tracing.
852 *
853 * @parm xc_handle a handle to an open hypervisor interface
854 * @parm size the size in pages per cpu for the trace buffers
855 * @return 0 on success, -1 on failure.
856 */
857 int xc_tbuf_set_size(int xc_handle, unsigned long size);
859 /**
860 * This function retrieves the current size of the trace buffers.
861 * Note that the size returned is in terms of bytes, not pages.
863 * @parm xc_handle a handle to an open hypervisor interface
864 * @parm size will contain the size in bytes for the trace buffers
865 * @return 0 on success, -1 on failure.
866 */
867 int xc_tbuf_get_size(int xc_handle, unsigned long *size);
869 int xc_tbuf_set_cpu_mask(int xc_handle, uint32_t mask);
871 int xc_tbuf_set_evt_mask(int xc_handle, uint32_t mask);
873 int xc_domctl(int xc_handle, struct xen_domctl *domctl);
874 int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl);
876 int xc_version(int xc_handle, int cmd, void *arg);
878 int xc_acm_op(int xc_handle, int cmd, void *arg, unsigned long arg_size);
880 int xc_flask_op(int xc_handle, flask_op_t *op);
882 /*
883 * Subscribe to state changes in a domain via evtchn.
884 * Returns -1 on failure, in which case errno will be set appropriately.
885 */
886 int xc_domain_subscribe_for_suspend(
887 int xc_handle, domid_t domid, evtchn_port_t port);
889 /**************************
890 * GRANT TABLE OPERATIONS *
891 **************************/
893 /*
894 * Return a handle to the grant table driver, or -1 on failure, in which case
895 * errno will be set appropriately.
896 */
897 int xc_gnttab_open(void);
899 /*
900 * Close a handle previously allocated with xc_gnttab_open().
901 */
902 int xc_gnttab_close(int xcg_handle);
904 /*
905 * Memory maps a grant reference from one domain to a local address range.
906 * Mappings should be unmapped with xc_gnttab_munmap. Returns NULL on failure.
907 *
908 * @parm xcg_handle a handle on an open grant table interface
909 * @parm domid the domain to map memory from
910 * @parm ref the grant reference ID to map
911 * @parm prot same flag as in mmap()
912 */
913 void *xc_gnttab_map_grant_ref(int xcg_handle,
914 uint32_t domid,
915 uint32_t ref,
916 int prot);
918 /**
919 * Memory maps one or more grant references from one or more domains to a
920 * contiguous local address range. Mappings should be unmapped with
921 * xc_gnttab_munmap. Returns NULL on failure.
922 *
923 * @parm xcg_handle a handle on an open grant table interface
924 * @parm count the number of grant references to be mapped
925 * @parm domids an array of @count domain IDs by which the corresponding @refs
926 * were granted
927 * @parm refs an array of @count grant references to be mapped
928 * @parm prot same flag as in mmap()
929 */
930 void *xc_gnttab_map_grant_refs(int xcg_handle,
931 uint32_t count,
932 uint32_t *domids,
933 uint32_t *refs,
934 int prot);
936 /**
937 * Memory maps one or more grant references from one domain to a
938 * contiguous local address range. Mappings should be unmapped with
939 * xc_gnttab_munmap. Returns NULL on failure.
940 *
941 * @parm xcg_handle a handle on an open grant table interface
942 * @parm count the number of grant references to be mapped
943 * @parm domid the domain to map memory from
944 * @parm refs an array of @count grant references to be mapped
945 * @parm prot same flag as in mmap()
946 */
947 void *xc_gnttab_map_domain_grant_refs(int xcg_handle,
948 uint32_t count,
949 uint32_t domid,
950 uint32_t *refs,
951 int prot);
953 /*
954 * Unmaps the @count pages starting at @start_address, which were mapped by a
955 * call to xc_gnttab_map_grant_ref or xc_gnttab_map_grant_refs. Returns zero
956 * on success, otherwise sets errno and returns non-zero.
957 */
958 int xc_gnttab_munmap(int xcg_handle,
959 void *start_address,
960 uint32_t count);
962 /*
963 * Sets the maximum number of grants that may be mapped by the given instance
964 * to @count.
965 *
966 * N.B. This function must be called after opening the handle, and before any
967 * other functions are invoked on it.
968 *
969 * N.B. When variable-length grants are mapped, fragmentation may be observed,
970 * and it may not be possible to satisfy requests up to the maximum number
971 * of grants.
972 */
973 int xc_gnttab_set_max_grants(int xcg_handle,
974 uint32_t count);
976 int xc_gnttab_op(int xc_handle, int cmd,
977 void * op, int op_size, int count);
979 int xc_gnttab_get_version(int xc_handle, int domid);
980 grant_entry_v1_t *xc_gnttab_map_table_v1(int xc_handle, int domid, int *gnt_num);
981 grant_entry_v2_t *xc_gnttab_map_table_v2(int xc_handle, int domid, int *gnt_num);
983 int xc_physdev_map_pirq(int xc_handle,
984 int domid,
985 int index,
986 int *pirq);
988 int xc_physdev_map_pirq_msi(int xc_handle,
989 int domid,
990 int index,
991 int *pirq,
992 int devfn,
993 int bus,
994 int entry_nr,
995 uint64_t table_base);
997 int xc_physdev_unmap_pirq(int xc_handle,
998 int domid,
999 int pirq);
1001 int xc_hvm_set_pci_intx_level(
1002 int xc_handle, domid_t dom,
1003 uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
1004 unsigned int level);
1005 int xc_hvm_set_isa_irq_level(
1006 int xc_handle, domid_t dom,
1007 uint8_t isa_irq,
1008 unsigned int level);
1010 int xc_hvm_set_pci_link_route(
1011 int xc_handle, domid_t dom, uint8_t link, uint8_t isa_irq);
1014 /*
1015 * Track dirty bit changes in the VRAM area
1017 * All of this is done atomically:
1018 * - get the dirty bitmap since the last call
1019 * - set up dirty tracking area for period up to the next call
1020 * - clear the dirty tracking area.
1022 * Returns -ENODATA and does not fill bitmap if the area has changed since the
1023 * last call.
1024 */
1025 int xc_hvm_track_dirty_vram(
1026 int xc_handle, domid_t dom,
1027 uint64_t first_pfn, uint64_t nr,
1028 unsigned long *bitmap);
1030 /*
1031 * Notify that some pages got modified by the Device Model
1032 */
1033 int xc_hvm_modified_memory(
1034 int xc_handle, domid_t dom, uint64_t first_pfn, uint64_t nr);
1036 /*
1037 * Set a range of memory to a specific type.
1038 * Allowed types are HVMMEM_ram_rw, HVMMEM_ram_ro, HVMMEM_mmio_dm
1039 */
1040 int xc_hvm_set_mem_type(
1041 int xc_handle, domid_t dom, hvmmem_type_t memtype, uint64_t first_pfn, uint64_t nr);
1044 typedef enum {
1045 XC_ERROR_NONE = 0,
1046 XC_INTERNAL_ERROR = 1,
1047 XC_INVALID_KERNEL = 2,
1048 XC_INVALID_PARAM = 3,
1049 XC_OUT_OF_MEMORY = 4,
1050 } xc_error_code;
1052 #define XC_MAX_ERROR_MSG_LEN 1024
1053 typedef struct {
1054 int code;
1055 char message[XC_MAX_ERROR_MSG_LEN];
1056 } xc_error;
1058 /*
1059 * Return a pointer to the last error. This pointer and the
1060 * data pointed to are only valid until the next call to
1061 * libxc.
1062 */
1063 const xc_error *xc_get_last_error(void);
1065 /*
1066 * Clear the last error
1067 */
1068 void xc_clear_last_error(void);
1070 typedef void (*xc_error_handler)(const xc_error *err);
1072 /*
1073 * The default error handler which prints to stderr
1074 */
1075 void xc_default_error_handler(const xc_error *err);
1077 /*
1078 * Convert an error code into a text description
1079 */
1080 const char *xc_error_code_to_desc(int code);
1082 /*
1083 * Registers a callback to handle errors
1084 */
1085 xc_error_handler xc_set_error_handler(xc_error_handler handler);
1087 int xc_set_hvm_param(int handle, domid_t dom, int param, unsigned long value);
1088 int xc_get_hvm_param(int handle, domid_t dom, int param, unsigned long *value);
1090 /* IA64 specific, nvram save */
1091 int xc_ia64_save_to_nvram(int xc_handle, uint32_t dom);
1093 /* IA64 specific, nvram init */
1094 int xc_ia64_nvram_init(int xc_handle, char *dom_name, uint32_t dom);
1096 /* IA64 specific, set guest OS type optimizations */
1097 int xc_ia64_set_os_type(int xc_handle, char *guest_os_type, uint32_t dom);
1099 /* HVM guest pass-through */
1100 int xc_assign_device(int xc_handle,
1101 uint32_t domid,
1102 uint32_t machine_bdf);
1104 int xc_get_device_group(int xc_handle,
1105 uint32_t domid,
1106 uint32_t machine_bdf,
1107 uint32_t max_sdevs,
1108 uint32_t *num_sdevs,
1109 uint32_t *sdev_array);
1111 int xc_test_assign_device(int xc_handle,
1112 uint32_t domid,
1113 uint32_t machine_bdf);
1115 int xc_deassign_device(int xc_handle,
1116 uint32_t domid,
1117 uint32_t machine_bdf);
1119 int xc_domain_memory_mapping(int xc_handle,
1120 uint32_t domid,
1121 unsigned long first_gfn,
1122 unsigned long first_mfn,
1123 unsigned long nr_mfns,
1124 uint32_t add_mapping);
1126 int xc_domain_ioport_mapping(int xc_handle,
1127 uint32_t domid,
1128 uint32_t first_gport,
1129 uint32_t first_mport,
1130 uint32_t nr_ports,
1131 uint32_t add_mapping);
1133 int xc_domain_update_msi_irq(
1134 int xc_handle,
1135 uint32_t domid,
1136 uint32_t gvec,
1137 uint32_t pirq,
1138 uint32_t gflags,
1139 uint64_t gtable);
1141 int xc_domain_unbind_msi_irq(int xc_handle,
1142 uint32_t domid,
1143 uint32_t gvec,
1144 uint32_t pirq,
1145 uint32_t gflags);
1147 int xc_domain_bind_pt_irq(int xc_handle,
1148 uint32_t domid,
1149 uint8_t machine_irq,
1150 uint8_t irq_type,
1151 uint8_t bus,
1152 uint8_t device,
1153 uint8_t intx,
1154 uint8_t isa_irq);
1156 int xc_domain_unbind_pt_irq(int xc_handle,
1157 uint32_t domid,
1158 uint8_t machine_irq,
1159 uint8_t irq_type,
1160 uint8_t bus,
1161 uint8_t device,
1162 uint8_t intx,
1163 uint8_t isa_irq);
1165 int xc_domain_bind_pt_pci_irq(int xc_handle,
1166 uint32_t domid,
1167 uint8_t machine_irq,
1168 uint8_t bus,
1169 uint8_t device,
1170 uint8_t intx);
1172 int xc_domain_bind_pt_isa_irq(int xc_handle,
1173 uint32_t domid,
1174 uint8_t machine_irq);
1176 int xc_domain_set_machine_address_size(int handle,
1177 uint32_t domid,
1178 unsigned int width);
1179 int xc_domain_get_machine_address_size(int handle,
1180 uint32_t domid);
1182 int xc_domain_suppress_spurious_page_faults(int handle,
1183 uint32_t domid);
1185 /* Set the target domain */
1186 int xc_domain_set_target(int xc_handle,
1187 uint32_t domid,
1188 uint32_t target);
1190 /* Control the domain for debug */
1191 int xc_domain_debug_control(int xc_handle,
1192 uint32_t domid,
1193 uint32_t sop,
1194 uint32_t vcpu);
1196 #if defined(__i386__) || defined(__x86_64__)
1197 int xc_cpuid_check(int xc,
1198 const unsigned int *input,
1199 const char **config,
1200 char **config_transformed);
1201 int xc_cpuid_set(int xc,
1202 domid_t domid,
1203 const unsigned int *input,
1204 const char **config,
1205 char **config_transformed);
1206 int xc_cpuid_apply_policy(int xc,
1207 domid_t domid);
1208 void xc_cpuid_to_str(const unsigned int *regs,
1209 char **strs);
1210 #endif
1212 struct xc_px_val {
1213 uint64_t freq; /* Px core frequency */
1214 uint64_t residency; /* Px residency time */
1215 uint64_t count; /* Px transition count */
1216 };
1218 struct xc_px_stat {
1219 uint8_t total; /* total Px states */
1220 uint8_t usable; /* usable Px states */
1221 uint8_t last; /* last Px state */
1222 uint8_t cur; /* current Px state */
1223 uint64_t *trans_pt; /* Px transition table */
1224 struct xc_px_val *pt;
1225 };
1227 int xc_pm_get_max_px(int xc_handle, int cpuid, int *max_px);
1228 int xc_pm_get_pxstat(int xc_handle, int cpuid, struct xc_px_stat *pxpt);
1229 int xc_pm_reset_pxstat(int xc_handle, int cpuid);
1231 struct xc_cx_stat {
1232 uint32_t nr; /* entry nr in triggers & residencies, including C0 */
1233 uint32_t last; /* last Cx state */
1234 uint64_t idle_time; /* idle time from boot */
1235 uint64_t *triggers; /* Cx trigger counts */
1236 uint64_t *residencies; /* Cx residencies */
1237 };
1238 typedef struct xc_cx_stat xc_cx_stat_t;
1240 int xc_pm_get_max_cx(int xc_handle, int cpuid, int *max_cx);
1241 int xc_pm_get_cxstat(int xc_handle, int cpuid, struct xc_cx_stat *cxpt);
1242 int xc_pm_reset_cxstat(int xc_handle, int cpuid);
1244 int xc_cpu_online(int xc_handle, int cpu);
1245 int xc_cpu_offline(int xc_handle, int cpu);
1247 /*
1248 * cpufreq para name of this structure named
1249 * same as sysfs file name of native linux
1250 */
1251 typedef xen_userspace_t xc_userspace_t;
1252 typedef xen_ondemand_t xc_ondemand_t;
1254 struct xc_get_cpufreq_para {
1255 /* IN/OUT variable */
1256 uint32_t cpu_num;
1257 uint32_t freq_num;
1258 uint32_t gov_num;
1260 /* for all governors */
1261 /* OUT variable */
1262 uint32_t *affected_cpus;
1263 uint32_t *scaling_available_frequencies;
1264 char *scaling_available_governors;
1265 char scaling_driver[CPUFREQ_NAME_LEN];
1267 uint32_t cpuinfo_cur_freq;
1268 uint32_t cpuinfo_max_freq;
1269 uint32_t cpuinfo_min_freq;
1270 uint32_t scaling_cur_freq;
1272 char scaling_governor[CPUFREQ_NAME_LEN];
1273 uint32_t scaling_max_freq;
1274 uint32_t scaling_min_freq;
1276 /* for specific governor */
1277 union {
1278 xc_userspace_t userspace;
1279 xc_ondemand_t ondemand;
1280 } u;
1281 };
1283 int xc_get_cpufreq_para(int xc_handle, int cpuid,
1284 struct xc_get_cpufreq_para *user_para);
1285 int xc_set_cpufreq_gov(int xc_handle, int cpuid, char *govname);
1286 int xc_set_cpufreq_para(int xc_handle, int cpuid,
1287 int ctrl_type, int ctrl_value);
1288 int xc_get_cpufreq_avgfreq(int xc_handle, int cpuid, int *avg_freq);
1290 struct xc_get_cputopo {
1291 /* IN: maximum addressable entry in
1292 * the caller-provided cpu_to_core/socket.
1293 */
1294 uint32_t max_cpus;
1295 uint32_t *cpu_to_core;
1296 uint32_t *cpu_to_socket;
1298 /* OUT: number of cpus returned
1299 * If OUT is greater than IN then the cpu_to_core/socket is truncated!
1300 */
1301 uint32_t nr_cpus;
1302 };
1304 int xc_get_cputopo(int xc_handle, struct xc_get_cputopo *info);
1306 int xc_set_sched_opt_smt(int xc_handle, uint32_t value);
1307 int xc_set_vcpu_migration_delay(int xc_handle, uint32_t value);
1308 int xc_get_vcpu_migration_delay(int xc_handle, uint32_t *value);
1310 int xc_get_cpuidle_max_cstate(int xc_handle, uint32_t *value);
1311 int xc_set_cpuidle_max_cstate(int xc_handle, uint32_t value);
1313 /**
1314 * tmem operations
1315 */
1316 int xc_tmem_control(int xc, int32_t pool_id, uint32_t subop, uint32_t cli_id,
1317 uint32_t arg1, uint32_t arg2, uint64_t arg3, void *buf);
1318 int xc_tmem_auth(int xc_handle, int cli_id, char *uuid_str, int arg1);
1319 int xc_tmem_save(int xc_handle, int dom, int live, int fd, int field_marker);
1320 int xc_tmem_save_extra(int xc_handle, int dom, int fd, int field_marker);
1321 void xc_tmem_save_done(int xc_handle, int dom);
1322 int xc_tmem_restore(int xc_handle, int dom, int fd);
1323 int xc_tmem_restore_extra(int xc_handle, int dom, int fd);
1325 /**
1326 * mem_event operations
1327 */
1328 int xc_mem_event_control(int xc_handle, domid_t domain_id, unsigned int op,
1329 unsigned int mode, void *shared_page,
1330 void *ring_page, unsigned long gfn);
1332 int xc_mem_event_enable(int xc_handle, domid_t domain_id,
1333 void *shared_page, void *ring_page);
1334 int xc_mem_event_disable(int xc_handle, domid_t domain_id);
1336 int xc_mem_paging_nominate(int xc_handle, domid_t domain_id,
1337 unsigned long gfn);
1338 int xc_mem_paging_evict(int xc_handle, domid_t domain_id, unsigned long gfn);
1339 int xc_mem_paging_prep(int xc_handle, domid_t domain_id, unsigned long gfn);
1340 int xc_mem_paging_resume(int xc_handle, domid_t domain_id,
1341 unsigned long gfn);
1343 /**
1344 * memshr operations
1345 */
1346 int xc_memshr_control(int xc_handle,
1347 uint32_t domid,
1348 int enable);
1349 int xc_memshr_nominate_gfn(int xc_handle,
1350 uint32_t domid,
1351 unsigned long gfn,
1352 uint64_t *handle);
1353 int xc_memshr_nominate_gref(int xc_handle,
1354 uint32_t domid,
1355 grant_ref_t gref,
1356 uint64_t *handle);
1357 int xc_memshr_share(int xc_handle,
1358 uint64_t source_handle,
1359 uint64_t client_handle);
1360 int xc_memshr_domain_resume(int xc_handle,
1361 uint32_t domid);
1362 int xc_memshr_debug_gfn(int xc_handle,
1363 uint32_t domid,
1364 unsigned long gfn);
1365 int xc_memshr_debug_mfn(int xc_handle,
1366 uint32_t domid,
1367 unsigned long mfn);
1368 int xc_memshr_debug_gref(int xc_handle,
1369 uint32_t domid,
1370 grant_ref_t gref);
1372 #endif /* XENCTRL_H */