debuggers.hg

view tools/libxc/xenctrl.h @ 20837:0b138a019292

libxc: use new (replacement) mmap-batch ioctl

Replace all calls to xc_map_foreign_batch() where the caller doesn't
look at the passed in array to check for errors by calls to
xc_map_foreign_pages(). Replace all remaining calls by such to the
newly introduced xc_map_foreign_bulk().

As a sideband modification (needed while writing the patch to ensure
they're unused) eliminate unused parameters to
uncanonicalize_pagetable() and xc_map_foreign_batch_single(). Also
unmap live_p2m_frame_list earlier in map_and_save_p2m_table(),
reducing the peak amount of virtual address space required.

All supported OSes other than Linux continue to use the old ioctl for
the time being.

Also change libxc's MAJOR to 4.0 to reflect the API change.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 13 08:12:56 2010 +0000 (2010-01-13)
parents c34435067298
children 0447c5532e9f
line source
1 /******************************************************************************
2 * xenctrl.h
3 *
4 * A library for low-level access to the Xen control interfaces.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 *
8 * xc_gnttab functions:
9 * Copyright (c) 2007-2008, D G Murray <Derek.Murray@cl.cam.ac.uk>
10 */
12 #ifndef XENCTRL_H
13 #define XENCTRL_H
15 /* Tell the Xen public headers we are a user-space tools build. */
16 #ifndef __XEN_TOOLS__
17 #define __XEN_TOOLS__ 1
18 #endif
20 #include <stddef.h>
21 #include <stdint.h>
22 #include <xen/xen.h>
23 #include <xen/domctl.h>
24 #include <xen/physdev.h>
25 #include <xen/sysctl.h>
26 #include <xen/version.h>
27 #include <xen/event_channel.h>
28 #include <xen/sched.h>
29 #include <xen/memory.h>
30 #include <xen/grant_table.h>
31 #include <xen/hvm/params.h>
32 #include <xen/xsm/acm.h>
33 #include <xen/xsm/acm_ops.h>
34 #include <xen/xsm/flask_op.h>
35 #include <xen/tmem.h>
37 #if defined(__i386__) || defined(__x86_64__)
38 #include <xen/foreign/x86_32.h>
39 #include <xen/foreign/x86_64.h>
40 #endif
42 #ifdef __ia64__
43 #define XC_PAGE_SHIFT 14
44 #else
45 #define XC_PAGE_SHIFT 12
46 #endif
47 #define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT)
48 #define XC_PAGE_MASK (~(XC_PAGE_SIZE-1))
50 #define INVALID_MFN (~0UL)
52 /*
53 * DEFINITIONS FOR CPU BARRIERS
54 */
56 #if defined(__i386__)
57 #define xen_mb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
58 #define xen_rmb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
59 #define xen_wmb() asm volatile ( "" : : : "memory")
60 #elif defined(__x86_64__)
61 #define xen_mb() asm volatile ( "mfence" : : : "memory")
62 #define xen_rmb() asm volatile ( "lfence" : : : "memory")
63 #define xen_wmb() asm volatile ( "" : : : "memory")
64 #elif defined(__ia64__)
65 #define xen_mb() asm volatile ("mf" ::: "memory")
66 #define xen_rmb() asm volatile ("mf" ::: "memory")
67 #define xen_wmb() asm volatile ("mf" ::: "memory")
68 #else
69 #error "Define barriers"
70 #endif
72 /*
73 * INITIALIZATION FUNCTIONS
74 */
76 /**
77 * This function opens a handle to the hypervisor interface. This function can
78 * be called multiple times within a single process. Multiple processes can
79 * have an open hypervisor interface at the same time.
80 *
81 * Each call to this function should have a corresponding call to
82 * xc_interface_close().
83 *
84 * This function can fail if the caller does not have superuser permission or
85 * if a Xen-enabled kernel is not currently running.
86 *
87 * @return a handle to the hypervisor interface or -1 on failure
88 */
89 int xc_interface_open(void);
91 /**
92 * This function closes an open hypervisor interface.
93 *
94 * This function can fail if the handle does not represent an open interface or
95 * if there were problems closing the interface.
96 *
97 * @parm xc_handle a handle to an open hypervisor interface
98 * @return 0 on success, -1 otherwise.
99 */
100 int xc_interface_close(int xc_handle);
102 /*
103 * KERNEL INTERFACES
104 */
106 /*
107 * Resolve a kernel device name (e.g., "evtchn", "blktap0") into a kernel
108 * device number. Returns -1 on error (and sets errno).
109 */
110 int xc_find_device_number(const char *name);
112 /*
113 * DOMAIN DEBUGGING FUNCTIONS
114 */
116 typedef struct xc_core_header {
117 unsigned int xch_magic;
118 unsigned int xch_nr_vcpus;
119 unsigned int xch_nr_pages;
120 unsigned int xch_ctxt_offset;
121 unsigned int xch_index_offset;
122 unsigned int xch_pages_offset;
123 } xc_core_header_t;
125 #define XC_CORE_MAGIC 0xF00FEBED
126 #define XC_CORE_MAGIC_HVM 0xF00FEBEE
128 #ifdef __linux__
130 #include <sys/ptrace.h>
131 #include <thread_db.h>
133 typedef void (*thr_ev_handler_t)(long);
135 void xc_register_event_handler(
136 thr_ev_handler_t h,
137 td_event_e e);
139 long xc_ptrace(
140 int xc_handle,
141 enum __ptrace_request request,
142 uint32_t domid,
143 long addr,
144 long data);
146 int xc_waitdomain(
147 int xc_handle,
148 int domain,
149 int *status,
150 int options);
152 #endif /* __linux__ */
154 /*
155 * DOMAIN MANAGEMENT FUNCTIONS
156 */
158 typedef struct xc_dominfo {
159 uint32_t domid;
160 uint32_t ssidref;
161 unsigned int dying:1, crashed:1, shutdown:1,
162 paused:1, blocked:1, running:1,
163 hvm:1, debugged:1;
164 unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
165 unsigned long nr_pages; /* current number, not maximum */
166 unsigned long nr_shared_pages;
167 unsigned long shared_info_frame;
168 uint64_t cpu_time;
169 unsigned long max_memkb;
170 unsigned int nr_online_vcpus;
171 unsigned int max_vcpu_id;
172 xen_domain_handle_t handle;
173 } xc_dominfo_t;
175 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
177 typedef union
178 {
179 #if defined(__i386__) || defined(__x86_64__)
180 vcpu_guest_context_x86_64_t x64;
181 vcpu_guest_context_x86_32_t x32;
182 #endif
183 vcpu_guest_context_t c;
184 } vcpu_guest_context_any_t;
186 typedef union
187 {
188 #if defined(__i386__) || defined(__x86_64__)
189 shared_info_x86_64_t x64;
190 shared_info_x86_32_t x32;
191 #endif
192 shared_info_t s;
193 } shared_info_any_t;
195 typedef union
196 {
197 #if defined(__i386__) || defined(__x86_64__)
198 start_info_x86_64_t x64;
199 start_info_x86_32_t x32;
200 #endif
201 start_info_t s;
202 } start_info_any_t;
205 int xc_domain_create(int xc_handle,
206 uint32_t ssidref,
207 xen_domain_handle_t handle,
208 uint32_t flags,
209 uint32_t *pdomid);
212 /* Functions to produce a dump of a given domain
213 * xc_domain_dumpcore - produces a dump to a specified file
214 * xc_domain_dumpcore_via_callback - produces a dump, using a specified
215 * callback function
216 */
217 int xc_domain_dumpcore(int xc_handle,
218 uint32_t domid,
219 const char *corename);
221 /* Define the callback function type for xc_domain_dumpcore_via_callback.
222 *
223 * This function is called by the coredump code for every "write",
224 * and passes an opaque object for the use of the function and
225 * created by the caller of xc_domain_dumpcore_via_callback.
226 */
227 typedef int (dumpcore_rtn_t)(void *arg, char *buffer, unsigned int length);
229 int xc_domain_dumpcore_via_callback(int xc_handle,
230 uint32_t domid,
231 void *arg,
232 dumpcore_rtn_t dump_rtn);
234 /*
235 * This function sets the maximum number of vcpus that a domain may create.
236 *
237 * @parm xc_handle a handle to an open hypervisor interface.
238 * @parm domid the domain id in which vcpus are to be created.
239 * @parm max the maximum number of vcpus that the domain may create.
240 * @return 0 on success, -1 on failure.
241 */
242 int xc_domain_max_vcpus(int xc_handle,
243 uint32_t domid,
244 unsigned int max);
246 /**
247 * This function pauses a domain. A paused domain still exists in memory
248 * however it does not receive any timeslices from the hypervisor.
249 *
250 * @parm xc_handle a handle to an open hypervisor interface
251 * @parm domid the domain id to pause
252 * @return 0 on success, -1 on failure.
253 */
254 int xc_domain_pause(int xc_handle,
255 uint32_t domid);
256 /**
257 * This function unpauses a domain. The domain should have been previously
258 * paused.
259 *
260 * @parm xc_handle a handle to an open hypervisor interface
261 * @parm domid the domain id to unpause
262 * return 0 on success, -1 on failure
263 */
264 int xc_domain_unpause(int xc_handle,
265 uint32_t domid);
267 /**
268 * This function will destroy a domain. Destroying a domain removes the domain
269 * completely from memory. This function should be called after sending the
270 * domain a SHUTDOWN control message to free up the domain resources.
271 *
272 * @parm xc_handle a handle to an open hypervisor interface
273 * @parm domid the domain id to destroy
274 * @return 0 on success, -1 on failure
275 */
276 int xc_domain_destroy(int xc_handle,
277 uint32_t domid);
280 /**
281 * This function resumes a suspended domain. The domain should have
282 * been previously suspended.
283 *
284 * @parm xc_handle a handle to an open hypervisor interface
285 * @parm domid the domain id to resume
286 * @parm fast use cooperative resume (guest must support this)
287 * return 0 on success, -1 on failure
288 */
289 int xc_domain_resume(int xc_handle,
290 uint32_t domid,
291 int fast);
293 /**
294 * This function will shutdown a domain. This is intended for use in
295 * fully-virtualized domains where this operation is analogous to the
296 * sched_op operations in a paravirtualized domain. The caller is
297 * expected to give the reason for the shutdown.
298 *
299 * @parm xc_handle a handle to an open hypervisor interface
300 * @parm domid the domain id to destroy
301 * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
302 * @return 0 on success, -1 on failure
303 */
304 int xc_domain_shutdown(int xc_handle,
305 uint32_t domid,
306 int reason);
308 int xc_vcpu_setaffinity(int xc_handle,
309 uint32_t domid,
310 int vcpu,
311 uint64_t cpumap);
312 int xc_vcpu_getaffinity(int xc_handle,
313 uint32_t domid,
314 int vcpu,
315 uint64_t *cpumap);
317 /**
318 * This function will return information about one or more domains. It is
319 * designed to iterate over the list of domains. If a single domain is
320 * requested, this function will return the next domain in the list - if
321 * one exists. It is, therefore, important in this case to make sure the
322 * domain requested was the one returned.
323 *
324 * @parm xc_handle a handle to an open hypervisor interface
325 * @parm first_domid the first domain to enumerate information from. Domains
326 * are currently enumerate in order of creation.
327 * @parm max_doms the number of elements in info
328 * @parm info an array of max_doms size that will contain the information for
329 * the enumerated domains.
330 * @return the number of domains enumerated or -1 on error
331 */
332 int xc_domain_getinfo(int xc_handle,
333 uint32_t first_domid,
334 unsigned int max_doms,
335 xc_dominfo_t *info);
338 /**
339 * This function will set the execution context for the specified vcpu.
340 *
341 * @parm xc_handle a handle to an open hypervisor interface
342 * @parm domid the domain to set the vcpu context for
343 * @parm vcpu the vcpu number for the context
344 * @parm ctxt pointer to the the cpu context with the values to set
345 * @return the number of domains enumerated or -1 on error
346 */
347 int xc_vcpu_setcontext(int xc_handle,
348 uint32_t domid,
349 uint32_t vcpu,
350 vcpu_guest_context_any_t *ctxt);
351 /**
352 * This function will return information about one or more domains, using a
353 * single hypercall. The domain information will be stored into the supplied
354 * array of xc_domaininfo_t structures.
355 *
356 * @parm xc_handle a handle to an open hypervisor interface
357 * @parm first_domain the first domain to enumerate information from.
358 * Domains are currently enumerate in order of creation.
359 * @parm max_domains the number of elements in info
360 * @parm info an array of max_doms size that will contain the information for
361 * the enumerated domains.
362 * @return the number of domains enumerated or -1 on error
363 */
364 int xc_domain_getinfolist(int xc_handle,
365 uint32_t first_domain,
366 unsigned int max_domains,
367 xc_domaininfo_t *info);
369 /**
370 * This function returns information about the context of a hvm domain
371 * @parm xc_handle a handle to an open hypervisor interface
372 * @parm domid the domain to get information from
373 * @parm ctxt_buf a pointer to a structure to store the execution context of
374 * the hvm domain
375 * @parm size the size of ctxt_buf in bytes
376 * @return 0 on success, -1 on failure
377 */
378 int xc_domain_hvm_getcontext(int xc_handle,
379 uint32_t domid,
380 uint8_t *ctxt_buf,
381 uint32_t size);
384 /**
385 * This function returns one element of the context of a hvm domain
386 * @parm xc_handle a handle to an open hypervisor interface
387 * @parm domid the domain to get information from
388 * @parm typecode which type of elemnt required
389 * @parm instance which instance of the type
390 * @parm ctxt_buf a pointer to a structure to store the execution context of
391 * the hvm domain
392 * @parm size the size of ctxt_buf (must be >= HVM_SAVE_LENGTH(typecode))
393 * @return 0 on success, -1 on failure
394 */
395 int xc_domain_hvm_getcontext_partial(int xc_handle,
396 uint32_t domid,
397 uint16_t typecode,
398 uint16_t instance,
399 void *ctxt_buf,
400 uint32_t size);
402 /**
403 * This function will set the context for hvm domain
404 *
405 * @parm xc_handle a handle to an open hypervisor interface
406 * @parm domid the domain to set the hvm domain context for
407 * @parm hvm_ctxt pointer to the the hvm context with the values to set
408 * @parm size the size of hvm_ctxt in bytes
409 * @return 0 on success, -1 on failure
410 */
411 int xc_domain_hvm_setcontext(int xc_handle,
412 uint32_t domid,
413 uint8_t *hvm_ctxt,
414 uint32_t size);
416 /**
417 * This function returns information about the execution context of a
418 * particular vcpu of a domain.
419 *
420 * @parm xc_handle a handle to an open hypervisor interface
421 * @parm domid the domain to get information from
422 * @parm vcpu the vcpu number
423 * @parm ctxt a pointer to a structure to store the execution context of the
424 * domain
425 * @return 0 on success, -1 on failure
426 */
427 int xc_vcpu_getcontext(int xc_handle,
428 uint32_t domid,
429 uint32_t vcpu,
430 vcpu_guest_context_any_t *ctxt);
432 typedef xen_domctl_getvcpuinfo_t xc_vcpuinfo_t;
433 int xc_vcpu_getinfo(int xc_handle,
434 uint32_t domid,
435 uint32_t vcpu,
436 xc_vcpuinfo_t *info);
438 long long xc_domain_get_cpu_usage(int xc_handle,
439 domid_t domid,
440 int vcpu);
442 int xc_domain_sethandle(int xc_handle, uint32_t domid,
443 xen_domain_handle_t handle);
445 typedef xen_domctl_shadow_op_stats_t xc_shadow_op_stats_t;
446 int xc_shadow_control(int xc_handle,
447 uint32_t domid,
448 unsigned int sop,
449 unsigned long *dirty_bitmap,
450 unsigned long pages,
451 unsigned long *mb,
452 uint32_t mode,
453 xc_shadow_op_stats_t *stats);
455 int xc_sedf_domain_set(int xc_handle,
456 uint32_t domid,
457 uint64_t period, uint64_t slice,
458 uint64_t latency, uint16_t extratime,
459 uint16_t weight);
461 int xc_sedf_domain_get(int xc_handle,
462 uint32_t domid,
463 uint64_t* period, uint64_t *slice,
464 uint64_t *latency, uint16_t *extratime,
465 uint16_t *weight);
467 int xc_sched_credit_domain_set(int xc_handle,
468 uint32_t domid,
469 struct xen_domctl_sched_credit *sdom);
471 int xc_sched_credit_domain_get(int xc_handle,
472 uint32_t domid,
473 struct xen_domctl_sched_credit *sdom);
475 /**
476 * This function sends a trigger to a domain.
477 *
478 * @parm xc_handle a handle to an open hypervisor interface
479 * @parm domid the domain id to send trigger
480 * @parm trigger the trigger type
481 * @parm vcpu the vcpu number to send trigger
482 * return 0 on success, -1 on failure
483 */
484 int xc_domain_send_trigger(int xc_handle,
485 uint32_t domid,
486 uint32_t trigger,
487 uint32_t vcpu);
489 /**
490 * This function enables or disable debugging of a domain.
491 *
492 * @parm xc_handle a handle to an open hypervisor interface
493 * @parm domid the domain id to send trigger
494 * @parm enable true to enable debugging
495 * return 0 on success, -1 on failure
496 */
497 int xc_domain_setdebugging(int xc_handle,
498 uint32_t domid,
499 unsigned int enable);
501 /*
502 * EVENT CHANNEL FUNCTIONS
503 */
505 /* A port identifier is guaranteed to fit in 31 bits. */
506 typedef int evtchn_port_or_error_t;
508 /**
509 * This function allocates an unbound port. Ports are named endpoints used for
510 * interdomain communication. This function is most useful in opening a
511 * well-known port within a domain to receive events on.
512 *
513 * NOTE: If you are allocating a *local* unbound port, you probably want to
514 * use xc_evtchn_bind_unbound_port(). This function is intended for allocating
515 * ports *only* during domain creation.
516 *
517 * @parm xc_handle a handle to an open hypervisor interface
518 * @parm dom the ID of the local domain (the 'allocatee')
519 * @parm remote_dom the ID of the domain who will later bind
520 * @return allocated port (in @dom) on success, -1 on failure
521 */
522 evtchn_port_or_error_t
523 xc_evtchn_alloc_unbound(int xc_handle,
524 uint32_t dom,
525 uint32_t remote_dom);
527 int xc_evtchn_reset(int xc_handle,
528 uint32_t dom);
530 typedef struct evtchn_status xc_evtchn_status_t;
531 int xc_evtchn_status(int xc_handle, xc_evtchn_status_t *status);
533 /*
534 * Return a handle to the event channel driver, or -1 on failure, in which case
535 * errno will be set appropriately.
536 */
537 int xc_evtchn_open(void);
539 /*
540 * Close a handle previously allocated with xc_evtchn_open().
541 */
542 int xc_evtchn_close(int xce_handle);
544 /*
545 * Return an fd that can be select()ed on for further calls to
546 * xc_evtchn_pending().
547 */
548 int xc_evtchn_fd(int xce_handle);
550 /*
551 * Notify the given event channel. Returns -1 on failure, in which case
552 * errno will be set appropriately.
553 */
554 int xc_evtchn_notify(int xce_handle, evtchn_port_t port);
556 /*
557 * Returns a new event port awaiting interdomain connection from the given
558 * domain ID, or -1 on failure, in which case errno will be set appropriately.
559 */
560 evtchn_port_or_error_t
561 xc_evtchn_bind_unbound_port(int xce_handle, int domid);
563 /*
564 * Returns a new event port bound to the remote port for the given domain ID,
565 * or -1 on failure, in which case errno will be set appropriately.
566 */
567 evtchn_port_or_error_t
568 xc_evtchn_bind_interdomain(int xce_handle, int domid,
569 evtchn_port_t remote_port);
571 /*
572 * Bind an event channel to the given VIRQ. Returns the event channel bound to
573 * the VIRQ, or -1 on failure, in which case errno will be set appropriately.
574 */
575 evtchn_port_or_error_t
576 xc_evtchn_bind_virq(int xce_handle, unsigned int virq);
578 /*
579 * Unbind the given event channel. Returns -1 on failure, in which case errno
580 * will be set appropriately.
581 */
582 int xc_evtchn_unbind(int xce_handle, evtchn_port_t port);
584 /*
585 * Return the next event channel to become pending, or -1 on failure, in which
586 * case errno will be set appropriately.
587 */
588 evtchn_port_or_error_t
589 xc_evtchn_pending(int xce_handle);
591 /*
592 * Unmask the given event channel. Returns -1 on failure, in which case errno
593 * will be set appropriately.
594 */
595 int xc_evtchn_unmask(int xce_handle, evtchn_port_t port);
597 int xc_physdev_pci_access_modify(int xc_handle,
598 uint32_t domid,
599 int bus,
600 int dev,
601 int func,
602 int enable);
604 int xc_readconsolering(int xc_handle,
605 char **pbuffer,
606 unsigned int *pnr_chars,
607 int clear, int incremental, uint32_t *pindex);
609 int xc_send_debug_keys(int xc_handle, char *keys);
611 typedef xen_sysctl_physinfo_t xc_physinfo_t;
612 typedef uint32_t xc_cpu_to_node_t;
613 int xc_physinfo(int xc_handle,
614 xc_physinfo_t *info);
616 int xc_sched_id(int xc_handle,
617 int *sched_id);
619 typedef xen_sysctl_cpuinfo_t xc_cpuinfo_t;
620 int xc_getcpuinfo(int xc_handle, int max_cpus,
621 xc_cpuinfo_t *info, int *nr_cpus);
623 int xc_domain_setmaxmem(int xc_handle,
624 uint32_t domid,
625 unsigned int max_memkb);
627 int xc_domain_set_memmap_limit(int xc_handle,
628 uint32_t domid,
629 unsigned long map_limitkb);
631 int xc_domain_set_time_offset(int xc_handle,
632 uint32_t domid,
633 int32_t time_offset_seconds);
635 int xc_domain_set_tsc_info(int xc_handle,
636 uint32_t domid,
637 uint32_t tsc_mode,
638 uint64_t elapsed_nsec,
639 uint32_t gtsc_khz,
640 uint32_t incarnation);
642 int xc_domain_get_tsc_info(int xc_handle,
643 uint32_t domid,
644 uint32_t *tsc_mode,
645 uint64_t *elapsed_nsec,
646 uint32_t *gtsc_khz,
647 uint32_t *incarnation);
649 int xc_domain_disable_migrate(int xc_handle, uint32_t domid);
651 int xc_domain_memory_increase_reservation(int xc_handle,
652 uint32_t domid,
653 unsigned long nr_extents,
654 unsigned int extent_order,
655 unsigned int mem_flags,
656 xen_pfn_t *extent_start);
658 int xc_domain_memory_decrease_reservation(int xc_handle,
659 uint32_t domid,
660 unsigned long nr_extents,
661 unsigned int extent_order,
662 xen_pfn_t *extent_start);
664 int xc_domain_memory_populate_physmap(int xc_handle,
665 uint32_t domid,
666 unsigned long nr_extents,
667 unsigned int extent_order,
668 unsigned int mem_flags,
669 xen_pfn_t *extent_start);
671 int xc_domain_memory_set_pod_target(int xc_handle,
672 uint32_t domid,
673 uint64_t target_pages,
674 uint64_t *tot_pages,
675 uint64_t *pod_cache_pages,
676 uint64_t *pod_entries);
678 int xc_domain_memory_get_pod_target(int xc_handle,
679 uint32_t domid,
680 uint64_t *tot_pages,
681 uint64_t *pod_cache_pages,
682 uint64_t *pod_entries);
684 int xc_domain_ioport_permission(int xc_handle,
685 uint32_t domid,
686 uint32_t first_port,
687 uint32_t nr_ports,
688 uint32_t allow_access);
690 int xc_domain_irq_permission(int xc_handle,
691 uint32_t domid,
692 uint8_t pirq,
693 uint8_t allow_access);
695 int xc_domain_iomem_permission(int xc_handle,
696 uint32_t domid,
697 unsigned long first_mfn,
698 unsigned long nr_mfns,
699 uint8_t allow_access);
701 int xc_domain_pin_memory_cacheattr(int xc_handle,
702 uint32_t domid,
703 uint64_t start,
704 uint64_t end,
705 uint32_t type);
707 unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
708 unsigned long mfn);
710 typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
711 typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
712 /* IMPORTANT: The caller is responsible for mlock()'ing the @desc and @val
713 arrays. */
714 int xc_perfc_control(int xc_handle,
715 uint32_t op,
716 xc_perfc_desc_t *desc,
717 xc_perfc_val_t *val,
718 int *nbr_desc,
719 int *nbr_val);
721 typedef xen_sysctl_lockprof_data_t xc_lockprof_data_t;
722 /* IMPORTANT: The caller is responsible for mlock()'ing the @data array. */
723 int xc_lockprof_control(int xc_handle,
724 uint32_t opcode,
725 uint32_t *n_elems,
726 uint64_t *time,
727 xc_lockprof_data_t *data);
729 /**
730 * Memory maps a range within one domain to a local address range. Mappings
731 * should be unmapped with munmap and should follow the same rules as mmap
732 * regarding page alignment. Returns NULL on failure.
733 *
734 * In Linux, the ring queue for the control channel is accessible by mapping
735 * the shared_info_frame (from xc_domain_getinfo()) + 2048. The structure
736 * stored there is of type control_if_t.
737 *
738 * @parm xc_handle a handle on an open hypervisor interface
739 * @parm dom the domain to map memory from
740 * @parm size the amount of memory to map (in multiples of page size)
741 * @parm prot same flag as in mmap().
742 * @parm mfn the frame address to map.
743 */
744 void *xc_map_foreign_range(int xc_handle, uint32_t dom,
745 int size, int prot,
746 unsigned long mfn );
748 void *xc_map_foreign_pages(int xc_handle, uint32_t dom, int prot,
749 const xen_pfn_t *arr, int num );
751 /**
752 * DEPRECATED - use xc_map_foreign_bulk() instead.
753 *
754 * Like xc_map_foreign_pages(), except it can succeeed partially.
755 * When a page cannot be mapped, its PFN in @arr is or'ed with
756 * 0xF0000000 to indicate the error.
757 */
758 void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
759 xen_pfn_t *arr, int num );
761 /**
762 * Like xc_map_foreign_pages(), except it can succeed partially.
763 * When a page cannot be mapped, its respective field in @err is
764 * set to the corresponding errno value.
765 */
766 void *xc_map_foreign_bulk(int xc_handle, uint32_t dom, int prot,
767 const xen_pfn_t *arr, int *err, unsigned int num);
769 /**
770 * Translates a virtual address in the context of a given domain and
771 * vcpu returning the GFN containing the address (that is, an MFN for
772 * PV guests, a PFN for HVM guests). Returns 0 for failure.
773 *
774 * @parm xc_handle a handle on an open hypervisor interface
775 * @parm dom the domain to perform the translation in
776 * @parm vcpu the vcpu to perform the translation on
777 * @parm virt the virtual address to translate
778 */
779 unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
780 int vcpu, unsigned long long virt);
783 /**
784 * DEPRECATED. Avoid using this, as it does not correctly account for PFNs
785 * without a backing MFN.
786 */
787 int xc_get_pfn_list(int xc_handle, uint32_t domid, uint64_t *pfn_buf,
788 unsigned long max_pfns);
790 unsigned long xc_ia64_fpsr_default(void);
792 int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
793 unsigned long dst_pfn, const char *src_page);
795 int xc_clear_domain_page(int xc_handle, uint32_t domid,
796 unsigned long dst_pfn);
798 long xc_get_max_pages(int xc_handle, uint32_t domid);
800 int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops,
801 domid_t dom);
803 int xc_memory_op(int xc_handle, int cmd, void *arg);
805 int xc_get_pfn_type_batch(int xc_handle, uint32_t dom,
806 int num, uint32_t *arr);
809 /* Get current total pages allocated to a domain. */
810 long xc_get_tot_pages(int xc_handle, uint32_t domid);
812 /**
813 * This function retrieves the the number of bytes available
814 * in the heap in a specific range of address-widths and nodes.
815 *
816 * @parm xc_handle a handle to an open hypervisor interface
817 * @parm domid the domain to query
818 * @parm min_width the smallest address width to query (0 if don't care)
819 * @parm max_width the largest address width to query (0 if don't care)
820 * @parm node the node to query (-1 for all)
821 * @parm *bytes caller variable to put total bytes counted
822 * @return 0 on success, <0 on failure.
823 */
824 int xc_availheap(int xc_handle, int min_width, int max_width, int node,
825 uint64_t *bytes);
827 /*
828 * Trace Buffer Operations
829 */
831 /**
832 * xc_tbuf_enable - enable tracing buffers
833 *
834 * @parm xc_handle a handle to an open hypervisor interface
835 * @parm cnt size of tracing buffers to create (in pages)
836 * @parm mfn location to store mfn of the trace buffers to
837 * @parm size location to store the size (in bytes) of a trace buffer to
838 *
839 * Gets the machine address of the trace pointer area and the size of the
840 * per CPU buffers.
841 */
842 int xc_tbuf_enable(int xc_handle, unsigned long pages,
843 unsigned long *mfn, unsigned long *size);
845 /*
846 * Disable tracing buffers.
847 */
848 int xc_tbuf_disable(int xc_handle);
850 /**
851 * This function sets the size of the trace buffers. Setting the size
852 * is currently a one-shot operation that may be performed either at boot
853 * time or via this interface, not both. The buffer size must be set before
854 * enabling tracing.
855 *
856 * @parm xc_handle a handle to an open hypervisor interface
857 * @parm size the size in pages per cpu for the trace buffers
858 * @return 0 on success, -1 on failure.
859 */
860 int xc_tbuf_set_size(int xc_handle, unsigned long size);
862 /**
863 * This function retrieves the current size of the trace buffers.
864 * Note that the size returned is in terms of bytes, not pages.
866 * @parm xc_handle a handle to an open hypervisor interface
867 * @parm size will contain the size in bytes for the trace buffers
868 * @return 0 on success, -1 on failure.
869 */
870 int xc_tbuf_get_size(int xc_handle, unsigned long *size);
872 int xc_tbuf_set_cpu_mask(int xc_handle, uint32_t mask);
874 int xc_tbuf_set_evt_mask(int xc_handle, uint32_t mask);
876 int xc_domctl(int xc_handle, struct xen_domctl *domctl);
877 int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl);
879 int xc_version(int xc_handle, int cmd, void *arg);
881 int xc_acm_op(int xc_handle, int cmd, void *arg, unsigned long arg_size);
883 int xc_flask_op(int xc_handle, flask_op_t *op);
885 /*
886 * Subscribe to state changes in a domain via evtchn.
887 * Returns -1 on failure, in which case errno will be set appropriately.
888 */
889 int xc_domain_subscribe_for_suspend(
890 int xc_handle, domid_t domid, evtchn_port_t port);
892 /**************************
893 * GRANT TABLE OPERATIONS *
894 **************************/
896 /*
897 * Return a handle to the grant table driver, or -1 on failure, in which case
898 * errno will be set appropriately.
899 */
900 int xc_gnttab_open(void);
902 /*
903 * Close a handle previously allocated with xc_gnttab_open().
904 */
905 int xc_gnttab_close(int xcg_handle);
907 /*
908 * Memory maps a grant reference from one domain to a local address range.
909 * Mappings should be unmapped with xc_gnttab_munmap. Returns NULL on failure.
910 *
911 * @parm xcg_handle a handle on an open grant table interface
912 * @parm domid the domain to map memory from
913 * @parm ref the grant reference ID to map
914 * @parm prot same flag as in mmap()
915 */
916 void *xc_gnttab_map_grant_ref(int xcg_handle,
917 uint32_t domid,
918 uint32_t ref,
919 int prot);
921 /**
922 * Memory maps one or more grant references from one or more domains to a
923 * contiguous local address range. Mappings should be unmapped with
924 * xc_gnttab_munmap. Returns NULL on failure.
925 *
926 * @parm xcg_handle a handle on an open grant table interface
927 * @parm count the number of grant references to be mapped
928 * @parm domids an array of @count domain IDs by which the corresponding @refs
929 * were granted
930 * @parm refs an array of @count grant references to be mapped
931 * @parm prot same flag as in mmap()
932 */
933 void *xc_gnttab_map_grant_refs(int xcg_handle,
934 uint32_t count,
935 uint32_t *domids,
936 uint32_t *refs,
937 int prot);
939 /**
940 * Memory maps one or more grant references from one domain to a
941 * contiguous local address range. Mappings should be unmapped with
942 * xc_gnttab_munmap. Returns NULL on failure.
943 *
944 * @parm xcg_handle a handle on an open grant table interface
945 * @parm count the number of grant references to be mapped
946 * @parm domid the domain to map memory from
947 * @parm refs an array of @count grant references to be mapped
948 * @parm prot same flag as in mmap()
949 */
950 void *xc_gnttab_map_domain_grant_refs(int xcg_handle,
951 uint32_t count,
952 uint32_t domid,
953 uint32_t *refs,
954 int prot);
956 /*
957 * Unmaps the @count pages starting at @start_address, which were mapped by a
958 * call to xc_gnttab_map_grant_ref or xc_gnttab_map_grant_refs. Returns zero
959 * on success, otherwise sets errno and returns non-zero.
960 */
961 int xc_gnttab_munmap(int xcg_handle,
962 void *start_address,
963 uint32_t count);
965 /*
966 * Sets the maximum number of grants that may be mapped by the given instance
967 * to @count.
968 *
969 * N.B. This function must be called after opening the handle, and before any
970 * other functions are invoked on it.
971 *
972 * N.B. When variable-length grants are mapped, fragmentation may be observed,
973 * and it may not be possible to satisfy requests up to the maximum number
974 * of grants.
975 */
976 int xc_gnttab_set_max_grants(int xcg_handle,
977 uint32_t count);
979 int xc_gnttab_op(int xc_handle, int cmd,
980 void * op, int op_size, int count);
982 int xc_gnttab_get_version(int xc_handle, int domid);
983 grant_entry_v1_t *xc_gnttab_map_table_v1(int xc_handle, int domid, int *gnt_num);
984 grant_entry_v2_t *xc_gnttab_map_table_v2(int xc_handle, int domid, int *gnt_num);
986 int xc_physdev_map_pirq(int xc_handle,
987 int domid,
988 int index,
989 int *pirq);
991 int xc_physdev_map_pirq_msi(int xc_handle,
992 int domid,
993 int index,
994 int *pirq,
995 int devfn,
996 int bus,
997 int entry_nr,
998 uint64_t table_base);
1000 int xc_physdev_unmap_pirq(int xc_handle,
1001 int domid,
1002 int pirq);
1004 int xc_hvm_set_pci_intx_level(
1005 int xc_handle, domid_t dom,
1006 uint8_t domain, uint8_t bus, uint8_t device, uint8_t intx,
1007 unsigned int level);
1008 int xc_hvm_set_isa_irq_level(
1009 int xc_handle, domid_t dom,
1010 uint8_t isa_irq,
1011 unsigned int level);
1013 int xc_hvm_set_pci_link_route(
1014 int xc_handle, domid_t dom, uint8_t link, uint8_t isa_irq);
1017 /*
1018 * Track dirty bit changes in the VRAM area
1020 * All of this is done atomically:
1021 * - get the dirty bitmap since the last call
1022 * - set up dirty tracking area for period up to the next call
1023 * - clear the dirty tracking area.
1025 * Returns -ENODATA and does not fill bitmap if the area has changed since the
1026 * last call.
1027 */
1028 int xc_hvm_track_dirty_vram(
1029 int xc_handle, domid_t dom,
1030 uint64_t first_pfn, uint64_t nr,
1031 unsigned long *bitmap);
1033 /*
1034 * Notify that some pages got modified by the Device Model
1035 */
1036 int xc_hvm_modified_memory(
1037 int xc_handle, domid_t dom, uint64_t first_pfn, uint64_t nr);
1039 /*
1040 * Set a range of memory to a specific type.
1041 * Allowed types are HVMMEM_ram_rw, HVMMEM_ram_ro, HVMMEM_mmio_dm
1042 */
1043 int xc_hvm_set_mem_type(
1044 int xc_handle, domid_t dom, hvmmem_type_t memtype, uint64_t first_pfn, uint64_t nr);
1047 typedef enum {
1048 XC_ERROR_NONE = 0,
1049 XC_INTERNAL_ERROR = 1,
1050 XC_INVALID_KERNEL = 2,
1051 XC_INVALID_PARAM = 3,
1052 XC_OUT_OF_MEMORY = 4,
1053 } xc_error_code;
1055 #define XC_MAX_ERROR_MSG_LEN 1024
1056 typedef struct {
1057 int code;
1058 char message[XC_MAX_ERROR_MSG_LEN];
1059 } xc_error;
1061 /*
1062 * Return a pointer to the last error. This pointer and the
1063 * data pointed to are only valid until the next call to
1064 * libxc.
1065 */
1066 const xc_error *xc_get_last_error(void);
1068 /*
1069 * Clear the last error
1070 */
1071 void xc_clear_last_error(void);
1073 typedef void (*xc_error_handler)(const xc_error *err);
1075 /*
1076 * The default error handler which prints to stderr
1077 */
1078 void xc_default_error_handler(const xc_error *err);
1080 /*
1081 * Convert an error code into a text description
1082 */
1083 const char *xc_error_code_to_desc(int code);
1085 /*
1086 * Registers a callback to handle errors
1087 */
1088 xc_error_handler xc_set_error_handler(xc_error_handler handler);
1090 int xc_set_hvm_param(int handle, domid_t dom, int param, unsigned long value);
1091 int xc_get_hvm_param(int handle, domid_t dom, int param, unsigned long *value);
1093 /* IA64 specific, nvram save */
1094 int xc_ia64_save_to_nvram(int xc_handle, uint32_t dom);
1096 /* IA64 specific, nvram init */
1097 int xc_ia64_nvram_init(int xc_handle, char *dom_name, uint32_t dom);
1099 /* IA64 specific, set guest OS type optimizations */
1100 int xc_ia64_set_os_type(int xc_handle, char *guest_os_type, uint32_t dom);
1102 /* HVM guest pass-through */
1103 int xc_assign_device(int xc_handle,
1104 uint32_t domid,
1105 uint32_t machine_bdf);
1107 int xc_get_device_group(int xc_handle,
1108 uint32_t domid,
1109 uint32_t machine_bdf,
1110 uint32_t max_sdevs,
1111 uint32_t *num_sdevs,
1112 uint32_t *sdev_array);
1114 int xc_test_assign_device(int xc_handle,
1115 uint32_t domid,
1116 uint32_t machine_bdf);
1118 int xc_deassign_device(int xc_handle,
1119 uint32_t domid,
1120 uint32_t machine_bdf);
1122 int xc_domain_memory_mapping(int xc_handle,
1123 uint32_t domid,
1124 unsigned long first_gfn,
1125 unsigned long first_mfn,
1126 unsigned long nr_mfns,
1127 uint32_t add_mapping);
1129 int xc_domain_ioport_mapping(int xc_handle,
1130 uint32_t domid,
1131 uint32_t first_gport,
1132 uint32_t first_mport,
1133 uint32_t nr_ports,
1134 uint32_t add_mapping);
1136 int xc_domain_update_msi_irq(
1137 int xc_handle,
1138 uint32_t domid,
1139 uint32_t gvec,
1140 uint32_t pirq,
1141 uint32_t gflags,
1142 uint64_t gtable);
1144 int xc_domain_unbind_msi_irq(int xc_handle,
1145 uint32_t domid,
1146 uint32_t gvec,
1147 uint32_t pirq,
1148 uint32_t gflags);
1150 int xc_domain_bind_pt_irq(int xc_handle,
1151 uint32_t domid,
1152 uint8_t machine_irq,
1153 uint8_t irq_type,
1154 uint8_t bus,
1155 uint8_t device,
1156 uint8_t intx,
1157 uint8_t isa_irq);
1159 int xc_domain_unbind_pt_irq(int xc_handle,
1160 uint32_t domid,
1161 uint8_t machine_irq,
1162 uint8_t irq_type,
1163 uint8_t bus,
1164 uint8_t device,
1165 uint8_t intx,
1166 uint8_t isa_irq);
1168 int xc_domain_bind_pt_pci_irq(int xc_handle,
1169 uint32_t domid,
1170 uint8_t machine_irq,
1171 uint8_t bus,
1172 uint8_t device,
1173 uint8_t intx);
1175 int xc_domain_bind_pt_isa_irq(int xc_handle,
1176 uint32_t domid,
1177 uint8_t machine_irq);
1179 int xc_domain_set_machine_address_size(int handle,
1180 uint32_t domid,
1181 unsigned int width);
1182 int xc_domain_get_machine_address_size(int handle,
1183 uint32_t domid);
1185 int xc_domain_suppress_spurious_page_faults(int handle,
1186 uint32_t domid);
1188 /* Set the target domain */
1189 int xc_domain_set_target(int xc_handle,
1190 uint32_t domid,
1191 uint32_t target);
1193 /* Control the domain for debug */
1194 int xc_domain_debug_control(int xc_handle,
1195 uint32_t domid,
1196 uint32_t sop,
1197 uint32_t vcpu);
1199 #if defined(__i386__) || defined(__x86_64__)
1200 int xc_cpuid_check(int xc,
1201 const unsigned int *input,
1202 const char **config,
1203 char **config_transformed);
1204 int xc_cpuid_set(int xc,
1205 domid_t domid,
1206 const unsigned int *input,
1207 const char **config,
1208 char **config_transformed);
1209 int xc_cpuid_apply_policy(int xc,
1210 domid_t domid);
1211 void xc_cpuid_to_str(const unsigned int *regs,
1212 char **strs);
1213 #endif
1215 struct xc_px_val {
1216 uint64_t freq; /* Px core frequency */
1217 uint64_t residency; /* Px residency time */
1218 uint64_t count; /* Px transition count */
1219 };
1221 struct xc_px_stat {
1222 uint8_t total; /* total Px states */
1223 uint8_t usable; /* usable Px states */
1224 uint8_t last; /* last Px state */
1225 uint8_t cur; /* current Px state */
1226 uint64_t *trans_pt; /* Px transition table */
1227 struct xc_px_val *pt;
1228 };
1230 int xc_pm_get_max_px(int xc_handle, int cpuid, int *max_px);
1231 int xc_pm_get_pxstat(int xc_handle, int cpuid, struct xc_px_stat *pxpt);
1232 int xc_pm_reset_pxstat(int xc_handle, int cpuid);
1234 struct xc_cx_stat {
1235 uint32_t nr; /* entry nr in triggers & residencies, including C0 */
1236 uint32_t last; /* last Cx state */
1237 uint64_t idle_time; /* idle time from boot */
1238 uint64_t *triggers; /* Cx trigger counts */
1239 uint64_t *residencies; /* Cx residencies */
1240 };
1241 typedef struct xc_cx_stat xc_cx_stat_t;
1243 int xc_pm_get_max_cx(int xc_handle, int cpuid, int *max_cx);
1244 int xc_pm_get_cxstat(int xc_handle, int cpuid, struct xc_cx_stat *cxpt);
1245 int xc_pm_reset_cxstat(int xc_handle, int cpuid);
1247 int xc_cpu_online(int xc_handle, int cpu);
1248 int xc_cpu_offline(int xc_handle, int cpu);
1250 /*
1251 * cpufreq para name of this structure named
1252 * same as sysfs file name of native linux
1253 */
1254 typedef xen_userspace_t xc_userspace_t;
1255 typedef xen_ondemand_t xc_ondemand_t;
1257 struct xc_get_cpufreq_para {
1258 /* IN/OUT variable */
1259 uint32_t cpu_num;
1260 uint32_t freq_num;
1261 uint32_t gov_num;
1263 /* for all governors */
1264 /* OUT variable */
1265 uint32_t *affected_cpus;
1266 uint32_t *scaling_available_frequencies;
1267 char *scaling_available_governors;
1268 char scaling_driver[CPUFREQ_NAME_LEN];
1270 uint32_t cpuinfo_cur_freq;
1271 uint32_t cpuinfo_max_freq;
1272 uint32_t cpuinfo_min_freq;
1273 uint32_t scaling_cur_freq;
1275 char scaling_governor[CPUFREQ_NAME_LEN];
1276 uint32_t scaling_max_freq;
1277 uint32_t scaling_min_freq;
1279 /* for specific governor */
1280 union {
1281 xc_userspace_t userspace;
1282 xc_ondemand_t ondemand;
1283 } u;
1284 };
1286 int xc_get_cpufreq_para(int xc_handle, int cpuid,
1287 struct xc_get_cpufreq_para *user_para);
1288 int xc_set_cpufreq_gov(int xc_handle, int cpuid, char *govname);
1289 int xc_set_cpufreq_para(int xc_handle, int cpuid,
1290 int ctrl_type, int ctrl_value);
1291 int xc_get_cpufreq_avgfreq(int xc_handle, int cpuid, int *avg_freq);
1293 struct xc_get_cputopo {
1294 /* IN: maximum addressable entry in
1295 * the caller-provided cpu_to_core/socket.
1296 */
1297 uint32_t max_cpus;
1298 uint32_t *cpu_to_core;
1299 uint32_t *cpu_to_socket;
1301 /* OUT: number of cpus returned
1302 * If OUT is greater than IN then the cpu_to_core/socket is truncated!
1303 */
1304 uint32_t nr_cpus;
1305 };
1307 int xc_get_cputopo(int xc_handle, struct xc_get_cputopo *info);
1309 int xc_set_sched_opt_smt(int xc_handle, uint32_t value);
1310 int xc_set_vcpu_migration_delay(int xc_handle, uint32_t value);
1311 int xc_get_vcpu_migration_delay(int xc_handle, uint32_t *value);
1313 int xc_get_cpuidle_max_cstate(int xc_handle, uint32_t *value);
1314 int xc_set_cpuidle_max_cstate(int xc_handle, uint32_t value);
1316 /**
1317 * tmem operations
1318 */
1319 int xc_tmem_control(int xc, int32_t pool_id, uint32_t subop, uint32_t cli_id,
1320 uint32_t arg1, uint32_t arg2, uint64_t arg3, void *buf);
1321 int xc_tmem_auth(int xc_handle, int cli_id, char *uuid_str, int arg1);
1322 int xc_tmem_save(int xc_handle, int dom, int live, int fd, int field_marker);
1323 int xc_tmem_save_extra(int xc_handle, int dom, int fd, int field_marker);
1324 void xc_tmem_save_done(int xc_handle, int dom);
1325 int xc_tmem_restore(int xc_handle, int dom, int fd);
1326 int xc_tmem_restore_extra(int xc_handle, int dom, int fd);
1328 /**
1329 * mem_event operations
1330 */
1331 int xc_mem_event_control(int xc_handle, domid_t domain_id, unsigned int op,
1332 unsigned int mode, void *shared_page,
1333 void *ring_page, unsigned long gfn);
1335 int xc_mem_event_enable(int xc_handle, domid_t domain_id,
1336 void *shared_page, void *ring_page);
1337 int xc_mem_event_disable(int xc_handle, domid_t domain_id);
1339 int xc_mem_paging_nominate(int xc_handle, domid_t domain_id,
1340 unsigned long gfn);
1341 int xc_mem_paging_evict(int xc_handle, domid_t domain_id, unsigned long gfn);
1342 int xc_mem_paging_prep(int xc_handle, domid_t domain_id, unsigned long gfn);
1343 int xc_mem_paging_resume(int xc_handle, domid_t domain_id,
1344 unsigned long gfn);
1346 /**
1347 * memshr operations
1348 */
1349 int xc_memshr_control(int xc_handle,
1350 uint32_t domid,
1351 int enable);
1352 int xc_memshr_nominate_gfn(int xc_handle,
1353 uint32_t domid,
1354 unsigned long gfn,
1355 uint64_t *handle);
1356 int xc_memshr_nominate_gref(int xc_handle,
1357 uint32_t domid,
1358 grant_ref_t gref,
1359 uint64_t *handle);
1360 int xc_memshr_share(int xc_handle,
1361 uint64_t source_handle,
1362 uint64_t client_handle);
1363 int xc_memshr_domain_resume(int xc_handle,
1364 uint32_t domid);
1365 int xc_memshr_debug_gfn(int xc_handle,
1366 uint32_t domid,
1367 unsigned long gfn);
1368 int xc_memshr_debug_mfn(int xc_handle,
1369 uint32_t domid,
1370 unsigned long mfn);
1371 int xc_memshr_debug_gref(int xc_handle,
1372 uint32_t domid,
1373 grant_ref_t gref);
1375 #endif /* XENCTRL_H */