debuggers.hg

view tools/libxc/xc_linux.c @ 20837:0b138a019292

libxc: use new (replacement) mmap-batch ioctl

Replace all calls to xc_map_foreign_batch() where the caller doesn't
look at the passed in array to check for errors by calls to
xc_map_foreign_pages(). Replace all remaining calls by such to the
newly introduced xc_map_foreign_bulk().

As a sideband modification (needed while writing the patch to ensure
they're unused) eliminate unused parameters to
uncanonicalize_pagetable() and xc_map_foreign_batch_single(). Also
unmap live_p2m_frame_list earlier in map_and_save_p2m_table(),
reducing the peak amount of virtual address space required.

All supported OSes other than Linux continue to use the old ioctl for
the time being.

Also change libxc's MAJOR to 4.0 to reflect the API change.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 13 08:12:56 2010 +0000 (2010-01-13)
parents fe42b16855aa
children 4b8843ecd553
line source
1 /******************************************************************************
2 *
3 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
4 * Use is subject to license terms.
5 *
6 * xc_gnttab functions:
7 * Copyright (c) 2007-2008, D G Murray <Derek.Murray@cl.cam.ac.uk>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation, version 2 of the
12 * License.
13 */
15 #include "xc_private.h"
17 #include <xen/memory.h>
18 #include <xen/sys/evtchn.h>
19 #include <xen/sys/gntdev.h>
20 #include <unistd.h>
21 #include <fcntl.h>
23 int xc_interface_open(void)
24 {
25 int flags, saved_errno;
26 int fd = open("/proc/xen/privcmd", O_RDWR);
28 if ( fd == -1 )
29 {
30 PERROR("Could not obtain handle on privileged command interface");
31 return -1;
32 }
34 /* Although we return the file handle as the 'xc handle' the API
35 does not specify / guarentee that this integer is in fact
36 a file handle. Thus we must take responsiblity to ensure
37 it doesn't propagate (ie leak) outside the process */
38 if ( (flags = fcntl(fd, F_GETFD)) < 0 )
39 {
40 PERROR("Could not get file handle flags");
41 goto error;
42 }
44 flags |= FD_CLOEXEC;
46 if ( fcntl(fd, F_SETFD, flags) < 0 )
47 {
48 PERROR("Could not set file handle flags");
49 goto error;
50 }
52 return fd;
54 error:
55 saved_errno = errno;
56 close(fd);
57 errno = saved_errno;
59 return -1;
60 }
62 int xc_interface_close(int xc_handle)
63 {
64 return close(xc_handle);
65 }
67 static int xc_map_foreign_batch_single(int xc_handle, uint32_t dom,
68 xen_pfn_t *mfn, unsigned long addr)
69 {
70 privcmd_mmapbatch_t ioctlx;
71 int rc;
73 ioctlx.num = 1;
74 ioctlx.dom = dom;
75 ioctlx.addr = addr;
76 ioctlx.arr = mfn;
78 do
79 {
80 *mfn ^= XEN_DOMCTL_PFINFO_PAGEDTAB;
81 usleep(100);
82 rc = ioctl(xc_handle, IOCTL_PRIVCMD_MMAPBATCH, &ioctlx);
83 }
84 while ( (rc < 0) && (errno == ENOENT) );
86 return rc;
87 }
89 void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
90 xen_pfn_t *arr, int num)
91 {
92 privcmd_mmapbatch_t ioctlx;
93 void *addr;
94 int rc;
96 addr = mmap(NULL, num << PAGE_SHIFT, prot, MAP_SHARED, xc_handle, 0);
97 if ( addr == MAP_FAILED )
98 {
99 perror("xc_map_foreign_batch: mmap failed");
100 return NULL;
101 }
103 ioctlx.num = num;
104 ioctlx.dom = dom;
105 ioctlx.addr = (unsigned long)addr;
106 ioctlx.arr = arr;
108 rc = ioctl(xc_handle, IOCTL_PRIVCMD_MMAPBATCH, &ioctlx);
109 if ( (rc < 0) && (errno == ENOENT) )
110 {
111 int i;
113 for ( i = 0; i < num; i++ )
114 {
115 if ( (arr[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) ==
116 XEN_DOMCTL_PFINFO_PAGEDTAB )
117 {
118 unsigned long paged_addr = (unsigned long)addr + (i << PAGE_SHIFT);
119 rc = xc_map_foreign_batch_single(xc_handle, dom, &arr[i],
120 paged_addr);
121 if ( rc < 0 )
122 goto out;
123 }
124 }
125 }
127 out:
128 if ( rc < 0 )
129 {
130 int saved_errno = errno;
131 perror("xc_map_foreign_batch: ioctl failed");
132 (void)munmap(addr, num << PAGE_SHIFT);
133 errno = saved_errno;
134 return NULL;
135 }
137 return addr;
138 }
140 void *xc_map_foreign_bulk(int xc_handle, uint32_t dom, int prot,
141 const xen_pfn_t *arr, int *err, unsigned int num)
142 {
143 privcmd_mmapbatch_v2_t ioctlx;
144 void *addr;
145 unsigned int i;
146 int rc;
148 addr = mmap(NULL, (unsigned long)num << PAGE_SHIFT, prot, MAP_SHARED,
149 xc_handle, 0);
150 if ( addr == MAP_FAILED )
151 {
152 perror("xc_map_foreign_batch: mmap failed");
153 return NULL;
154 }
156 ioctlx.num = num;
157 ioctlx.dom = dom;
158 ioctlx.addr = (unsigned long)addr;
159 ioctlx.arr = arr;
160 ioctlx.err = err;
162 rc = ioctl(xc_handle, IOCTL_PRIVCMD_MMAPBATCH_V2, &ioctlx);
164 if ( rc < 0 && errno == ENOENT )
165 {
166 for ( i = rc = 0; rc == 0 && i < num; i++ )
167 {
168 if ( err[i] != -ENOENT )
169 continue;
171 ioctlx.num = 1;
172 ioctlx.dom = dom;
173 ioctlx.addr = (unsigned long)addr + ((unsigned long)i<<PAGE_SHIFT);
174 ioctlx.arr = arr + i;
175 ioctlx.err = err + i;
176 do {
177 usleep(100);
178 rc = ioctl(xc_handle, IOCTL_PRIVCMD_MMAPBATCH_V2, &ioctlx);
179 } while ( rc < 0 && err[i] == -ENOENT );
180 }
181 }
183 if ( rc < 0 && errno == ENOTTY && (int)num > 0 )
184 {
185 /*
186 * IOCTL_PRIVCMD_MMAPBATCH_V2 is not supported - fall back to
187 * IOCTL_PRIVCMD_MMAPBATCH.
188 */
189 xen_pfn_t *pfn = calloc(num, sizeof(*pfn));
191 if ( pfn )
192 {
193 privcmd_mmapbatch_t ioctlx;
195 memcpy(pfn, arr, num * sizeof(*arr));
197 ioctlx.num = num;
198 ioctlx.dom = dom;
199 ioctlx.addr = (unsigned long)addr;
200 ioctlx.arr = pfn;
202 rc = ioctl(xc_handle, IOCTL_PRIVCMD_MMAPBATCH, &ioctlx);
204 rc = rc < 0 ? -errno : 0;
206 for ( i = 0; i < num; ++i )
207 {
208 switch ( pfn[i] ^ arr[i] )
209 {
210 case 0:
211 err[i] = rc != -ENOENT ? rc : 0;
212 continue;
213 default:
214 err[i] = -EINVAL;
215 continue;
216 case XEN_DOMCTL_PFINFO_PAGEDTAB:
217 if ( rc != -ENOENT )
218 {
219 err[i] = rc ?: -EINVAL;
220 continue;
221 }
222 rc = xc_map_foreign_batch_single(xc_handle, dom, pfn + i,
223 (unsigned long)addr + ((unsigned long)i<<PAGE_SHIFT));
224 if ( rc < 0 )
225 {
226 rc = -errno;
227 break;
228 }
229 rc = -ENOENT;
230 continue;
231 }
232 break;
233 }
235 free(pfn);
237 if ( rc == -ENOENT && i == num )
238 rc = 0;
239 else if ( rc )
240 {
241 errno = -rc;
242 rc = -1;
243 }
244 }
245 else
246 {
247 errno = -ENOMEM;
248 rc = -1;
249 }
250 }
252 if ( rc < 0 )
253 {
254 int saved_errno = errno;
256 perror("xc_map_foreign_bulk: ioctl failed");
257 (void)munmap(addr, (unsigned long)num << PAGE_SHIFT);
258 errno = saved_errno;
259 return NULL;
260 }
262 return addr;
263 }
265 void *xc_map_foreign_range(int xc_handle, uint32_t dom, int size, int prot,
266 unsigned long mfn)
267 {
268 xen_pfn_t *arr;
269 int num;
270 int i;
271 void *ret;
273 num = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
274 arr = calloc(num, sizeof(xen_pfn_t));
276 for ( i = 0; i < num; i++ )
277 arr[i] = mfn + i;
279 ret = xc_map_foreign_pages(xc_handle, dom, prot, arr, num);
280 free(arr);
281 return ret;
282 }
284 void *xc_map_foreign_ranges(int xc_handle, uint32_t dom, size_t size, int prot,
285 size_t chunksize, privcmd_mmap_entry_t entries[],
286 int nentries)
287 {
288 xen_pfn_t *arr;
289 int num_per_entry;
290 int num;
291 int i;
292 int j;
293 void *ret;
295 num_per_entry = chunksize >> PAGE_SHIFT;
296 num = num_per_entry * nentries;
297 arr = calloc(num, sizeof(xen_pfn_t));
299 for ( i = 0; i < nentries; i++ )
300 for ( j = 0; j < num_per_entry; j++ )
301 arr[i * num_per_entry + j] = entries[i].mfn + j;
303 ret = xc_map_foreign_pages(xc_handle, dom, prot, arr, num);
304 free(arr);
305 return ret;
306 }
308 static int do_privcmd(int xc_handle, unsigned int cmd, unsigned long data)
309 {
310 return ioctl(xc_handle, cmd, data);
311 }
313 int do_xen_hypercall(int xc_handle, privcmd_hypercall_t *hypercall)
314 {
315 return do_privcmd(xc_handle, IOCTL_PRIVCMD_HYPERCALL,
316 (unsigned long)hypercall);
317 }
319 #define MTAB "/proc/mounts"
320 #define MAX_PATH 255
321 #define _STR(x) #x
322 #define STR(x) _STR(x)
324 static int find_sysfsdir(char *sysfsdir)
325 {
326 FILE *fp;
327 char type[MAX_PATH + 1];
329 if ( (fp = fopen(MTAB, "r")) == NULL )
330 return -1;
332 while ( fscanf(fp, "%*s %"STR(MAX_PATH)"s %"STR(MAX_PATH)"s %*s %*d %*d\n",
333 sysfsdir, type) == 2 )
334 if ( strncmp(type, "sysfs", 5) == 0 )
335 break;
337 fclose(fp);
339 return ((strncmp(type, "sysfs", 5) == 0) ? 0 : -1);
340 }
342 int xc_find_device_number(const char *name)
343 {
344 FILE *fp;
345 int i, major, minor;
346 char sysfsdir[MAX_PATH + 1];
347 static char *classlist[] = { "xen", "misc" };
349 for ( i = 0; i < (sizeof(classlist) / sizeof(classlist[0])); i++ )
350 {
351 if ( find_sysfsdir(sysfsdir) < 0 )
352 goto not_found;
354 /* <base>/class/<classname>/<devname>/dev */
355 strncat(sysfsdir, "/class/", MAX_PATH);
356 strncat(sysfsdir, classlist[i], MAX_PATH);
357 strncat(sysfsdir, "/", MAX_PATH);
358 strncat(sysfsdir, name, MAX_PATH);
359 strncat(sysfsdir, "/dev", MAX_PATH);
361 if ( (fp = fopen(sysfsdir, "r")) != NULL )
362 goto found;
363 }
365 not_found:
366 errno = -ENOENT;
367 return -1;
369 found:
370 if ( fscanf(fp, "%d:%d", &major, &minor) != 2 )
371 {
372 fclose(fp);
373 goto not_found;
374 }
376 fclose(fp);
378 return makedev(major, minor);
379 }
381 #define EVTCHN_DEV_NAME "/dev/xen/evtchn"
383 int xc_evtchn_open(void)
384 {
385 struct stat st;
386 int fd;
387 int devnum;
389 devnum = xc_find_device_number("evtchn");
391 /* Make sure any existing device file links to correct device. */
392 if ( (lstat(EVTCHN_DEV_NAME, &st) != 0) || !S_ISCHR(st.st_mode) ||
393 (st.st_rdev != devnum) )
394 (void)unlink(EVTCHN_DEV_NAME);
396 reopen:
397 if ( (fd = open(EVTCHN_DEV_NAME, O_RDWR)) == -1 )
398 {
399 if ( (errno == ENOENT) &&
400 ((mkdir("/dev/xen", 0755) == 0) || (errno == EEXIST)) &&
401 (mknod(EVTCHN_DEV_NAME, S_IFCHR|0600, devnum) == 0) )
402 goto reopen;
404 PERROR("Could not open event channel interface");
405 return -1;
406 }
408 return fd;
409 }
411 int xc_evtchn_close(int xce_handle)
412 {
413 return close(xce_handle);
414 }
416 int xc_evtchn_fd(int xce_handle)
417 {
418 return xce_handle;
419 }
421 int xc_evtchn_notify(int xce_handle, evtchn_port_t port)
422 {
423 struct ioctl_evtchn_notify notify;
425 notify.port = port;
427 return ioctl(xce_handle, IOCTL_EVTCHN_NOTIFY, &notify);
428 }
430 evtchn_port_or_error_t
431 xc_evtchn_bind_unbound_port(int xce_handle, int domid)
432 {
433 struct ioctl_evtchn_bind_unbound_port bind;
435 bind.remote_domain = domid;
437 return ioctl(xce_handle, IOCTL_EVTCHN_BIND_UNBOUND_PORT, &bind);
438 }
440 evtchn_port_or_error_t
441 xc_evtchn_bind_interdomain(int xce_handle, int domid,
442 evtchn_port_t remote_port)
443 {
444 struct ioctl_evtchn_bind_interdomain bind;
446 bind.remote_domain = domid;
447 bind.remote_port = remote_port;
449 return ioctl(xce_handle, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
450 }
452 evtchn_port_or_error_t
453 xc_evtchn_bind_virq(int xce_handle, unsigned int virq)
454 {
455 struct ioctl_evtchn_bind_virq bind;
457 bind.virq = virq;
459 return ioctl(xce_handle, IOCTL_EVTCHN_BIND_VIRQ, &bind);
460 }
462 int xc_evtchn_unbind(int xce_handle, evtchn_port_t port)
463 {
464 struct ioctl_evtchn_unbind unbind;
466 unbind.port = port;
468 return ioctl(xce_handle, IOCTL_EVTCHN_UNBIND, &unbind);
469 }
471 evtchn_port_or_error_t
472 xc_evtchn_pending(int xce_handle)
473 {
474 evtchn_port_t port;
476 if ( read_exact(xce_handle, (char *)&port, sizeof(port)) == -1 )
477 return -1;
479 return port;
480 }
482 int xc_evtchn_unmask(int xce_handle, evtchn_port_t port)
483 {
484 return write_exact(xce_handle, (char *)&port, sizeof(port));
485 }
487 /* Optionally flush file to disk and discard page cache */
488 void discard_file_cache(int fd, int flush)
489 {
490 off_t cur = 0;
491 int saved_errno = errno;
493 if ( flush && (fsync(fd) < 0) )
494 {
495 /*PERROR("Failed to flush file: %s", strerror(errno));*/
496 goto out;
497 }
499 /*
500 * Calculate last page boundary of amount written so far
501 * unless we are flushing in which case entire cache
502 * is discarded.
503 */
504 if ( !flush )
505 {
506 if ( (cur = lseek(fd, 0, SEEK_CUR)) == (off_t)-1 )
507 cur = 0;
508 cur &= ~(PAGE_SIZE-1);
509 }
511 /* Discard from the buffer cache. */
512 if ( posix_fadvise64(fd, 0, cur, POSIX_FADV_DONTNEED) < 0 )
513 {
514 /*PERROR("Failed to discard cache: %s", strerror(errno));*/
515 goto out;
516 }
518 out:
519 errno = saved_errno;
520 }
522 #define GNTTAB_DEV_NAME "/dev/xen/gntdev"
524 int xc_gnttab_open(void)
525 {
526 struct stat st;
527 int fd;
528 int devnum;
530 devnum = xc_find_device_number("gntdev");
532 /* Make sure any existing device file links to correct device. */
533 if ( (lstat(GNTTAB_DEV_NAME, &st) != 0) || !S_ISCHR(st.st_mode) ||
534 (st.st_rdev != devnum) )
535 (void)unlink(GNTTAB_DEV_NAME);
537 reopen:
538 if ( (fd = open(GNTTAB_DEV_NAME, O_RDWR)) == -1 )
539 {
540 if ( (errno == ENOENT) &&
541 ((mkdir("/dev/xen", 0755) == 0) || (errno == EEXIST)) &&
542 (mknod(GNTTAB_DEV_NAME, S_IFCHR|0600, devnum) == 0) )
543 goto reopen;
545 PERROR("Could not open grant table interface");
546 return -1;
547 }
549 return fd;
550 }
552 int xc_gnttab_close(int xcg_handle)
553 {
554 return close(xcg_handle);
555 }
557 void *xc_gnttab_map_grant_ref(int xcg_handle, uint32_t domid, uint32_t ref,
558 int prot)
559 {
560 struct ioctl_gntdev_map_grant_ref map;
561 void *addr;
563 map.count = 1;
564 map.refs[0].domid = domid;
565 map.refs[0].ref = ref;
567 if ( ioctl(xcg_handle, IOCTL_GNTDEV_MAP_GRANT_REF, &map) )
568 return NULL;
570 mmap_again:
571 addr = mmap(NULL, PAGE_SIZE, prot, MAP_SHARED, xcg_handle, map.index);
572 if ( addr == MAP_FAILED )
573 {
574 int saved_errno = errno;
575 struct ioctl_gntdev_unmap_grant_ref unmap_grant;
577 if(saved_errno == EAGAIN)
578 {
579 usleep(1000);
580 goto mmap_again;
581 }
582 /* Unmap the driver slots used to store the grant information. */
583 perror("xc_gnttab_map_grant_ref: mmap failed");
584 unmap_grant.index = map.index;
585 unmap_grant.count = 1;
586 ioctl(xcg_handle, IOCTL_GNTDEV_UNMAP_GRANT_REF, &unmap_grant);
587 errno = saved_errno;
588 return NULL;
589 }
591 return addr;
592 }
594 static void *do_gnttab_map_grant_refs(int xcg_handle, uint32_t count,
595 uint32_t *domids, int domids_stride,
596 uint32_t *refs, int prot)
597 {
598 struct ioctl_gntdev_map_grant_ref *map;
599 void *addr = NULL;
600 int i;
602 map = malloc(sizeof(*map) +
603 (count - 1) * sizeof(struct ioctl_gntdev_map_grant_ref));
604 if ( map == NULL )
605 return NULL;
607 for ( i = 0; i < count; i++ )
608 {
609 map->refs[i].domid = domids[i * domids_stride];
610 map->refs[i].ref = refs[i];
611 }
613 map->count = count;
615 if ( ioctl(xcg_handle, IOCTL_GNTDEV_MAP_GRANT_REF, map) )
616 goto out;
618 addr = mmap(NULL, PAGE_SIZE * count, prot, MAP_SHARED, xcg_handle,
619 map->index);
620 if ( addr == MAP_FAILED )
621 {
622 int saved_errno = errno;
623 struct ioctl_gntdev_unmap_grant_ref unmap_grant;
625 /* Unmap the driver slots used to store the grant information. */
626 perror("xc_gnttab_map_grant_refs: mmap failed");
627 unmap_grant.index = map->index;
628 unmap_grant.count = count;
629 ioctl(xcg_handle, IOCTL_GNTDEV_UNMAP_GRANT_REF, &unmap_grant);
630 errno = saved_errno;
631 addr = NULL;
632 }
634 out:
635 free(map);
637 return addr;
638 }
640 void *xc_gnttab_map_grant_refs(int xcg_handle, uint32_t count, uint32_t *domids,
641 uint32_t *refs, int prot)
642 {
643 return do_gnttab_map_grant_refs(xcg_handle, count, domids, 1, refs, prot);
644 }
646 void *xc_gnttab_map_domain_grant_refs(int xcg_handle, uint32_t count,
647 uint32_t domid, uint32_t *refs, int prot)
648 {
649 return do_gnttab_map_grant_refs(xcg_handle, count, &domid, 0, refs, prot);
650 }
652 int xc_gnttab_munmap(int xcg_handle, void *start_address, uint32_t count)
653 {
654 struct ioctl_gntdev_get_offset_for_vaddr get_offset;
655 struct ioctl_gntdev_unmap_grant_ref unmap_grant;
656 int rc;
658 if ( start_address == NULL )
659 {
660 errno = EINVAL;
661 return -1;
662 }
664 /* First, it is necessary to get the offset which was initially used to
665 * mmap() the pages.
666 */
667 get_offset.vaddr = (unsigned long)start_address;
668 if ( (rc = ioctl(xcg_handle, IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR,
669 &get_offset)) )
670 return rc;
672 if ( get_offset.count != count )
673 {
674 errno = EINVAL;
675 return -1;
676 }
678 /* Next, unmap the memory. */
679 if ( (rc = munmap(start_address, count * getpagesize())) )
680 return rc;
682 /* Finally, unmap the driver slots used to store the grant information. */
683 unmap_grant.index = get_offset.offset;
684 unmap_grant.count = count;
685 if ( (rc = ioctl(xcg_handle, IOCTL_GNTDEV_UNMAP_GRANT_REF, &unmap_grant)) )
686 return rc;
688 return 0;
689 }
691 int xc_gnttab_set_max_grants(int xcg_handle, uint32_t count)
692 {
693 struct ioctl_gntdev_set_max_grants set_max;
694 int rc;
696 set_max.count = count;
697 if ( (rc = ioctl(xcg_handle, IOCTL_GNTDEV_SET_MAX_GRANTS, &set_max)) )
698 return rc;
700 return 0;
701 }
703 int xc_gnttab_op(int xc_handle, int cmd, void * op, int op_size, int count)
704 {
705 int ret = 0;
706 DECLARE_HYPERCALL;
708 hypercall.op = __HYPERVISOR_grant_table_op;
709 hypercall.arg[0] = cmd;
710 hypercall.arg[1] = (unsigned long)op;
711 hypercall.arg[2] = count;
713 if ( lock_pages(op, count* op_size) != 0 )
714 {
715 PERROR("Could not lock memory for Xen hypercall");
716 goto out1;
717 }
719 ret = do_xen_hypercall(xc_handle, &hypercall);
721 unlock_pages(op, count * op_size);
723 out1:
724 return ret;
725 }
727 int xc_gnttab_get_version(int xc_handle, int domid)
728 {
729 struct gnttab_get_version query;
730 int rc;
732 query.dom = domid;
733 rc = xc_gnttab_op(xc_handle, GNTTABOP_get_version, &query, sizeof(query),
734 1);
735 if ( rc < 0 )
736 return rc;
737 else
738 return query.version;
739 }
741 static void *_gnttab_map_table(int xc_handle, int domid, int *gnt_num)
742 {
743 int rc, i;
744 struct gnttab_query_size query;
745 struct gnttab_setup_table setup;
746 unsigned long *frame_list = NULL;
747 xen_pfn_t *pfn_list = NULL;
748 grant_entry_v1_t *gnt = NULL;
750 if ( !gnt_num )
751 return NULL;
753 query.dom = domid;
754 rc = xc_gnttab_op(xc_handle, GNTTABOP_query_size, &query, sizeof(query), 1);
756 if ( rc || (query.status != GNTST_okay) )
757 {
758 ERROR("Could not query dom's grant size\n", domid);
759 return NULL;
760 }
762 *gnt_num = query.nr_frames * (PAGE_SIZE / sizeof(grant_entry_v1_t) );
764 frame_list = malloc(query.nr_frames * sizeof(unsigned long));
765 if ( !frame_list || lock_pages(frame_list,
766 query.nr_frames * sizeof(unsigned long)) )
767 {
768 ERROR("Alloc/lock frame_list in xc_gnttab_map_table\n");
769 if ( frame_list )
770 free(frame_list);
771 return NULL;
772 }
774 pfn_list = malloc(query.nr_frames * sizeof(xen_pfn_t));
775 if ( !pfn_list )
776 {
777 ERROR("Could not lock pfn_list in xc_gnttab_map_table\n");
778 goto err;
779 }
781 setup.dom = domid;
782 setup.nr_frames = query.nr_frames;
783 set_xen_guest_handle(setup.frame_list, frame_list);
785 /* XXX Any race with other setup_table hypercall? */
786 rc = xc_gnttab_op(xc_handle, GNTTABOP_setup_table, &setup, sizeof(setup),
787 1);
789 if ( rc || (setup.status != GNTST_okay) )
790 {
791 ERROR("Could not get grant table frame list\n");
792 goto err;
793 }
795 for ( i = 0; i < setup.nr_frames; i++ )
796 pfn_list[i] = frame_list[i];
798 gnt = xc_map_foreign_pages(xc_handle, domid, PROT_READ, pfn_list,
799 setup.nr_frames);
800 if ( !gnt )
801 {
802 ERROR("Could not map grant table\n");
803 goto err;
804 }
806 err:
807 if ( frame_list )
808 {
809 unlock_pages(frame_list, query.nr_frames * sizeof(unsigned long));
810 free(frame_list);
811 }
812 if ( pfn_list )
813 free(pfn_list);
815 return gnt;
816 }
818 grant_entry_v1_t *xc_gnttab_map_table_v1(int xc_handle, int domid,
819 int *gnt_num)
820 {
821 if (xc_gnttab_get_version(xc_handle, domid) == 2)
822 return NULL;
823 return _gnttab_map_table(xc_handle, domid, gnt_num);
824 }
826 grant_entry_v2_t *xc_gnttab_map_table_v2(int xc_handle, int domid,
827 int *gnt_num)
828 {
829 if (xc_gnttab_get_version(xc_handle, domid) != 2)
830 return NULL;
831 return _gnttab_map_table(xc_handle, domid, gnt_num);
832 }
834 /*
835 * Local variables:
836 * mode: C
837 * c-set-style: "BSD"
838 * c-basic-offset: 4
839 * tab-width: 4
840 * indent-tabs-mode: nil
841 * End:
842 */