debuggers.hg

view tools/libxc/xc_private.c @ 21067:b4a1832a916f

Update Xen version to 4.0.0-rc6
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 09 18:18:05 2010 +0000 (2010-03-09)
parents fbe8f32fa257
children 779c0ef9682c
line source
1 /******************************************************************************
2 * xc_private.c
3 *
4 * Helper functions for the rest of the library.
5 */
7 #include <inttypes.h>
8 #include "xc_private.h"
9 #include "xg_private.h"
10 #include <stdarg.h>
11 #include <stdlib.h>
12 #include <malloc.h>
13 #include <unistd.h>
14 #include <pthread.h>
16 static pthread_key_t last_error_pkey;
17 static pthread_once_t last_error_pkey_once = PTHREAD_ONCE_INIT;
19 static pthread_key_t errbuf_pkey;
20 static pthread_once_t errbuf_pkey_once = PTHREAD_ONCE_INIT;
22 #if DEBUG
23 static xc_error_handler error_handler = xc_default_error_handler;
24 #else
25 static xc_error_handler error_handler = NULL;
26 #endif
28 void xc_default_error_handler(const xc_error *err)
29 {
30 const char *desc = xc_error_code_to_desc(err->code);
31 fprintf(stderr, "ERROR %s: %s\n", desc, err->message);
32 }
34 static void
35 _xc_clean_last_error(void *m)
36 {
37 free(m);
38 pthread_setspecific(last_error_pkey, NULL);
39 }
41 static void
42 _xc_init_last_error(void)
43 {
44 pthread_key_create(&last_error_pkey, _xc_clean_last_error);
45 }
47 static xc_error *
48 _xc_get_last_error(void)
49 {
50 xc_error *last_error;
52 pthread_once(&last_error_pkey_once, _xc_init_last_error);
54 last_error = pthread_getspecific(last_error_pkey);
55 if (last_error == NULL) {
56 last_error = malloc(sizeof(xc_error));
57 pthread_setspecific(last_error_pkey, last_error);
58 xc_clear_last_error();
59 }
61 return last_error;
62 }
64 const xc_error *xc_get_last_error(void)
65 {
66 return _xc_get_last_error();
67 }
69 void xc_clear_last_error(void)
70 {
71 xc_error *last_error = _xc_get_last_error();
72 last_error->code = XC_ERROR_NONE;
73 last_error->message[0] = '\0';
74 }
76 const char *xc_error_code_to_desc(int code)
77 {
78 /* Sync to members of xc_error_code enumeration in xenctrl.h */
79 switch ( code )
80 {
81 case XC_ERROR_NONE:
82 return "No error details";
83 case XC_INTERNAL_ERROR:
84 return "Internal error";
85 case XC_INVALID_KERNEL:
86 return "Invalid kernel";
87 case XC_INVALID_PARAM:
88 return "Invalid configuration";
89 case XC_OUT_OF_MEMORY:
90 return "Out of memory";
91 }
93 return "Unknown error code";
94 }
96 xc_error_handler xc_set_error_handler(xc_error_handler handler)
97 {
98 xc_error_handler old = error_handler;
99 error_handler = handler;
100 return old;
101 }
103 static void _xc_set_error(int code, const char *msg)
104 {
105 xc_error *last_error = _xc_get_last_error();
106 last_error->code = code;
107 strncpy(last_error->message, msg, XC_MAX_ERROR_MSG_LEN - 1);
108 last_error->message[XC_MAX_ERROR_MSG_LEN-1] = '\0';
109 }
111 void xc_set_error(int code, const char *fmt, ...)
112 {
113 int saved_errno = errno;
114 char msg[XC_MAX_ERROR_MSG_LEN];
115 va_list args;
117 va_start(args, fmt);
118 vsnprintf(msg, XC_MAX_ERROR_MSG_LEN-1, fmt, args);
119 msg[XC_MAX_ERROR_MSG_LEN-1] = '\0';
120 va_end(args);
122 _xc_set_error(code, msg);
124 errno = saved_errno;
126 if ( error_handler != NULL ) {
127 xc_error *last_error = _xc_get_last_error();
128 error_handler(last_error);
129 }
130 }
132 #ifdef __sun__
134 int lock_pages(void *addr, size_t len) { return 0; }
135 void unlock_pages(void *addr, size_t len) { }
137 int hcall_buf_prep(void **addr, size_t len) { return 0; }
138 void hcall_buf_release(void **addr, size_t len) { }
140 #else /* !__sun__ */
142 int lock_pages(void *addr, size_t len)
143 {
144 int e;
145 void *laddr = (void *)((unsigned long)addr & PAGE_MASK);
146 size_t llen = (len + ((unsigned long)addr - (unsigned long)laddr) +
147 PAGE_SIZE - 1) & PAGE_MASK;
148 e = mlock(laddr, llen);
149 return e;
150 }
152 void unlock_pages(void *addr, size_t len)
153 {
154 void *laddr = (void *)((unsigned long)addr & PAGE_MASK);
155 size_t llen = (len + ((unsigned long)addr - (unsigned long)laddr) +
156 PAGE_SIZE - 1) & PAGE_MASK;
157 safe_munlock(laddr, llen);
158 }
160 static pthread_key_t hcall_buf_pkey;
161 static pthread_once_t hcall_buf_pkey_once = PTHREAD_ONCE_INIT;
162 struct hcall_buf {
163 void *buf;
164 void *oldbuf;
165 };
167 static void _xc_clean_hcall_buf(void *m)
168 {
169 struct hcall_buf *hcall_buf = m;
171 if ( hcall_buf )
172 {
173 if ( hcall_buf->buf )
174 {
175 unlock_pages(hcall_buf->buf, PAGE_SIZE);
176 free(hcall_buf->buf);
177 }
179 free(hcall_buf);
180 }
182 pthread_setspecific(hcall_buf_pkey, NULL);
183 }
185 static void _xc_init_hcall_buf(void)
186 {
187 pthread_key_create(&hcall_buf_pkey, _xc_clean_hcall_buf);
188 }
190 int hcall_buf_prep(void **addr, size_t len)
191 {
192 struct hcall_buf *hcall_buf;
194 pthread_once(&hcall_buf_pkey_once, _xc_init_hcall_buf);
196 hcall_buf = pthread_getspecific(hcall_buf_pkey);
197 if ( !hcall_buf )
198 {
199 hcall_buf = calloc(1, sizeof(*hcall_buf));
200 if ( !hcall_buf )
201 goto out;
202 pthread_setspecific(hcall_buf_pkey, hcall_buf);
203 }
205 if ( !hcall_buf->buf )
206 {
207 hcall_buf->buf = xc_memalign(PAGE_SIZE, PAGE_SIZE);
208 if ( !hcall_buf->buf || lock_pages(hcall_buf->buf, PAGE_SIZE) )
209 {
210 free(hcall_buf->buf);
211 hcall_buf->buf = NULL;
212 goto out;
213 }
214 }
216 if ( (len < PAGE_SIZE) && !hcall_buf->oldbuf )
217 {
218 memcpy(hcall_buf->buf, *addr, len);
219 hcall_buf->oldbuf = *addr;
220 *addr = hcall_buf->buf;
221 return 0;
222 }
224 out:
225 return lock_pages(*addr, len);
226 }
228 void hcall_buf_release(void **addr, size_t len)
229 {
230 struct hcall_buf *hcall_buf = pthread_getspecific(hcall_buf_pkey);
232 if ( hcall_buf && (hcall_buf->buf == *addr) )
233 {
234 memcpy(hcall_buf->oldbuf, *addr, len);
235 *addr = hcall_buf->oldbuf;
236 hcall_buf->oldbuf = NULL;
237 }
238 else
239 {
240 unlock_pages(*addr, len);
241 }
242 }
244 #endif
246 /* NB: arr must be locked */
247 int xc_get_pfn_type_batch(int xc_handle, uint32_t dom,
248 unsigned int num, xen_pfn_t *arr)
249 {
250 DECLARE_DOMCTL;
251 domctl.cmd = XEN_DOMCTL_getpageframeinfo3;
252 domctl.domain = (domid_t)dom;
253 domctl.u.getpageframeinfo3.num = num;
254 set_xen_guest_handle(domctl.u.getpageframeinfo3.array, arr);
255 return do_domctl(xc_handle, &domctl);
256 }
258 int xc_mmuext_op(
259 int xc_handle,
260 struct mmuext_op *op,
261 unsigned int nr_ops,
262 domid_t dom)
263 {
264 DECLARE_HYPERCALL;
265 long ret = -EINVAL;
267 if ( hcall_buf_prep((void **)&op, nr_ops*sizeof(*op)) != 0 )
268 {
269 PERROR("Could not lock memory for Xen hypercall");
270 goto out1;
271 }
273 hypercall.op = __HYPERVISOR_mmuext_op;
274 hypercall.arg[0] = (unsigned long)op;
275 hypercall.arg[1] = (unsigned long)nr_ops;
276 hypercall.arg[2] = (unsigned long)0;
277 hypercall.arg[3] = (unsigned long)dom;
279 ret = do_xen_hypercall(xc_handle, &hypercall);
281 hcall_buf_release((void **)&op, nr_ops*sizeof(*op));
283 out1:
284 return ret;
285 }
287 static int flush_mmu_updates(int xc_handle, struct xc_mmu *mmu)
288 {
289 int err = 0;
290 DECLARE_HYPERCALL;
292 if ( mmu->idx == 0 )
293 return 0;
295 hypercall.op = __HYPERVISOR_mmu_update;
296 hypercall.arg[0] = (unsigned long)mmu->updates;
297 hypercall.arg[1] = (unsigned long)mmu->idx;
298 hypercall.arg[2] = 0;
299 hypercall.arg[3] = mmu->subject;
301 if ( lock_pages(mmu->updates, sizeof(mmu->updates)) != 0 )
302 {
303 PERROR("flush_mmu_updates: mmu updates lock_pages failed");
304 err = 1;
305 goto out;
306 }
308 if ( do_xen_hypercall(xc_handle, &hypercall) < 0 )
309 {
310 ERROR("Failure when submitting mmu updates");
311 err = 1;
312 }
314 mmu->idx = 0;
316 unlock_pages(mmu->updates, sizeof(mmu->updates));
318 out:
319 return err;
320 }
322 struct xc_mmu *xc_alloc_mmu_updates(int xc_handle, domid_t dom)
323 {
324 struct xc_mmu *mmu = malloc(sizeof(*mmu));
325 if ( mmu == NULL )
326 return mmu;
327 mmu->idx = 0;
328 mmu->subject = dom;
329 return mmu;
330 }
332 int xc_add_mmu_update(int xc_handle, struct xc_mmu *mmu,
333 unsigned long long ptr, unsigned long long val)
334 {
335 mmu->updates[mmu->idx].ptr = ptr;
336 mmu->updates[mmu->idx].val = val;
338 if ( ++mmu->idx == MAX_MMU_UPDATES )
339 return flush_mmu_updates(xc_handle, mmu);
341 return 0;
342 }
344 int xc_flush_mmu_updates(int xc_handle, struct xc_mmu *mmu)
345 {
346 return flush_mmu_updates(xc_handle, mmu);
347 }
349 int xc_memory_op(int xc_handle,
350 int cmd,
351 void *arg)
352 {
353 DECLARE_HYPERCALL;
354 struct xen_memory_reservation *reservation = arg;
355 struct xen_machphys_mfn_list *xmml = arg;
356 xen_pfn_t *extent_start;
357 long ret = -EINVAL;
359 hypercall.op = __HYPERVISOR_memory_op;
360 hypercall.arg[0] = (unsigned long)cmd;
361 hypercall.arg[1] = (unsigned long)arg;
363 switch ( cmd )
364 {
365 case XENMEM_increase_reservation:
366 case XENMEM_decrease_reservation:
367 case XENMEM_populate_physmap:
368 if ( lock_pages(reservation, sizeof(*reservation)) != 0 )
369 {
370 PERROR("Could not lock");
371 goto out1;
372 }
373 get_xen_guest_handle(extent_start, reservation->extent_start);
374 if ( (extent_start != NULL) &&
375 (lock_pages(extent_start,
376 reservation->nr_extents * sizeof(xen_pfn_t)) != 0) )
377 {
378 PERROR("Could not lock");
379 unlock_pages(reservation, sizeof(*reservation));
380 goto out1;
381 }
382 break;
383 case XENMEM_machphys_mfn_list:
384 if ( lock_pages(xmml, sizeof(*xmml)) != 0 )
385 {
386 PERROR("Could not lock");
387 goto out1;
388 }
389 get_xen_guest_handle(extent_start, xmml->extent_start);
390 if ( lock_pages(extent_start,
391 xmml->max_extents * sizeof(xen_pfn_t)) != 0 )
392 {
393 PERROR("Could not lock");
394 unlock_pages(xmml, sizeof(*xmml));
395 goto out1;
396 }
397 break;
398 case XENMEM_add_to_physmap:
399 if ( lock_pages(arg, sizeof(struct xen_add_to_physmap)) )
400 {
401 PERROR("Could not lock");
402 goto out1;
403 }
404 break;
405 case XENMEM_current_reservation:
406 case XENMEM_maximum_reservation:
407 case XENMEM_maximum_gpfn:
408 if ( lock_pages(arg, sizeof(domid_t)) )
409 {
410 PERROR("Could not lock");
411 goto out1;
412 }
413 break;
414 case XENMEM_set_pod_target:
415 case XENMEM_get_pod_target:
416 if ( lock_pages(arg, sizeof(struct xen_pod_target)) )
417 {
418 PERROR("Could not lock");
419 goto out1;
420 }
421 break;
422 }
424 ret = do_xen_hypercall(xc_handle, &hypercall);
426 switch ( cmd )
427 {
428 case XENMEM_increase_reservation:
429 case XENMEM_decrease_reservation:
430 case XENMEM_populate_physmap:
431 unlock_pages(reservation, sizeof(*reservation));
432 get_xen_guest_handle(extent_start, reservation->extent_start);
433 if ( extent_start != NULL )
434 unlock_pages(extent_start,
435 reservation->nr_extents * sizeof(xen_pfn_t));
436 break;
437 case XENMEM_machphys_mfn_list:
438 unlock_pages(xmml, sizeof(*xmml));
439 get_xen_guest_handle(extent_start, xmml->extent_start);
440 unlock_pages(extent_start,
441 xmml->max_extents * sizeof(xen_pfn_t));
442 break;
443 case XENMEM_add_to_physmap:
444 unlock_pages(arg, sizeof(struct xen_add_to_physmap));
445 break;
446 case XENMEM_current_reservation:
447 case XENMEM_maximum_reservation:
448 case XENMEM_maximum_gpfn:
449 unlock_pages(arg, sizeof(domid_t));
450 break;
451 case XENMEM_set_pod_target:
452 case XENMEM_get_pod_target:
453 unlock_pages(arg, sizeof(struct xen_pod_target));
454 break;
455 }
457 out1:
458 return ret;
459 }
462 long long xc_domain_get_cpu_usage( int xc_handle, domid_t domid, int vcpu )
463 {
464 DECLARE_DOMCTL;
466 domctl.cmd = XEN_DOMCTL_getvcpuinfo;
467 domctl.domain = (domid_t)domid;
468 domctl.u.getvcpuinfo.vcpu = (uint16_t)vcpu;
469 if ( (do_domctl(xc_handle, &domctl) < 0) )
470 {
471 PERROR("Could not get info on domain");
472 return -1;
473 }
474 return domctl.u.getvcpuinfo.cpu_time;
475 }
478 #ifndef __ia64__
479 int xc_get_pfn_list(int xc_handle,
480 uint32_t domid,
481 uint64_t *pfn_buf,
482 unsigned long max_pfns)
483 {
484 DECLARE_DOMCTL;
485 int ret;
486 domctl.cmd = XEN_DOMCTL_getmemlist;
487 domctl.domain = (domid_t)domid;
488 domctl.u.getmemlist.max_pfns = max_pfns;
489 set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf);
491 #ifdef VALGRIND
492 memset(pfn_buf, 0, max_pfns * sizeof(*pfn_buf));
493 #endif
495 if ( lock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf)) != 0 )
496 {
497 PERROR("xc_get_pfn_list: pfn_buf lock failed");
498 return -1;
499 }
501 ret = do_domctl(xc_handle, &domctl);
503 unlock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf));
505 return (ret < 0) ? -1 : domctl.u.getmemlist.num_pfns;
506 }
507 #endif
509 long xc_get_tot_pages(int xc_handle, uint32_t domid)
510 {
511 DECLARE_DOMCTL;
512 domctl.cmd = XEN_DOMCTL_getdomaininfo;
513 domctl.domain = (domid_t)domid;
514 return (do_domctl(xc_handle, &domctl) < 0) ?
515 -1 : domctl.u.getdomaininfo.tot_pages;
516 }
518 int xc_copy_to_domain_page(int xc_handle,
519 uint32_t domid,
520 unsigned long dst_pfn,
521 const char *src_page)
522 {
523 void *vaddr = xc_map_foreign_range(
524 xc_handle, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
525 if ( vaddr == NULL )
526 return -1;
527 memcpy(vaddr, src_page, PAGE_SIZE);
528 munmap(vaddr, PAGE_SIZE);
529 return 0;
530 }
532 int xc_clear_domain_page(int xc_handle,
533 uint32_t domid,
534 unsigned long dst_pfn)
535 {
536 void *vaddr = xc_map_foreign_range(
537 xc_handle, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
538 if ( vaddr == NULL )
539 return -1;
540 memset(vaddr, 0, PAGE_SIZE);
541 munmap(vaddr, PAGE_SIZE);
542 return 0;
543 }
545 int xc_domctl(int xc_handle, struct xen_domctl *domctl)
546 {
547 return do_domctl(xc_handle, domctl);
548 }
550 int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl)
551 {
552 return do_sysctl(xc_handle, sysctl);
553 }
555 int xc_version(int xc_handle, int cmd, void *arg)
556 {
557 int rc, argsize = 0;
559 switch ( cmd )
560 {
561 case XENVER_extraversion:
562 argsize = sizeof(xen_extraversion_t);
563 break;
564 case XENVER_compile_info:
565 argsize = sizeof(xen_compile_info_t);
566 break;
567 case XENVER_capabilities:
568 argsize = sizeof(xen_capabilities_info_t);
569 break;
570 case XENVER_changeset:
571 argsize = sizeof(xen_changeset_info_t);
572 break;
573 case XENVER_platform_parameters:
574 argsize = sizeof(xen_platform_parameters_t);
575 break;
576 }
578 if ( (argsize != 0) && (lock_pages(arg, argsize) != 0) )
579 {
580 PERROR("Could not lock memory for version hypercall");
581 return -ENOMEM;
582 }
584 #ifdef VALGRIND
585 if (argsize != 0)
586 memset(arg, 0, argsize);
587 #endif
589 rc = do_xen_version(xc_handle, cmd, arg);
591 if ( argsize != 0 )
592 unlock_pages(arg, argsize);
594 return rc;
595 }
597 unsigned long xc_make_page_below_4G(
598 int xc_handle, uint32_t domid, unsigned long mfn)
599 {
600 xen_pfn_t old_mfn = mfn;
601 xen_pfn_t new_mfn;
603 if ( xc_domain_memory_decrease_reservation(
604 xc_handle, domid, 1, 0, &old_mfn) != 0 )
605 {
606 DPRINTF("xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
607 return 0;
608 }
610 if ( xc_domain_memory_increase_reservation(
611 xc_handle, domid, 1, 0, XENMEMF_address_bits(32), &new_mfn) != 0 )
612 {
613 DPRINTF("xc_make_page_below_4G increase failed. mfn=%lx\n",mfn);
614 return 0;
615 }
617 return new_mfn;
618 }
620 static void
621 _xc_clean_errbuf(void * m)
622 {
623 free(m);
624 pthread_setspecific(errbuf_pkey, NULL);
625 }
627 static void
628 _xc_init_errbuf(void)
629 {
630 pthread_key_create(&errbuf_pkey, _xc_clean_errbuf);
631 }
633 char *safe_strerror(int errcode)
634 {
635 #define XS_BUFSIZE 32
636 char *errbuf;
637 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
638 char *strerror_str;
640 pthread_once(&errbuf_pkey_once, _xc_init_errbuf);
642 errbuf = pthread_getspecific(errbuf_pkey);
643 if (errbuf == NULL) {
644 errbuf = malloc(XS_BUFSIZE);
645 pthread_setspecific(errbuf_pkey, errbuf);
646 }
648 /*
649 * Thread-unsafe strerror() is protected by a local mutex. We copy
650 * the string to a thread-private buffer before releasing the mutex.
651 */
652 pthread_mutex_lock(&mutex);
653 strerror_str = strerror(errcode);
654 strncpy(errbuf, strerror_str, XS_BUFSIZE);
655 errbuf[XS_BUFSIZE-1] = '\0';
656 pthread_mutex_unlock(&mutex);
658 return errbuf;
659 }
661 void bitmap_64_to_byte(uint8_t *bp, const uint64_t *lp, int nbits)
662 {
663 uint64_t l;
664 int i, j, b;
666 for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
667 l = lp[i];
668 for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
669 bp[b+j] = l;
670 l >>= 8;
671 nbits -= 8;
672 }
673 }
674 }
676 void bitmap_byte_to_64(uint64_t *lp, const uint8_t *bp, int nbits)
677 {
678 uint64_t l;
679 int i, j, b;
681 for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
682 l = 0;
683 for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
684 l |= (uint64_t)bp[b+j] << (j*8);
685 nbits -= 8;
686 }
687 lp[i] = l;
688 }
689 }
691 int read_exact(int fd, void *data, size_t size)
692 {
693 size_t offset = 0;
694 ssize_t len;
696 while ( offset < size )
697 {
698 len = read(fd, (char *)data + offset, size - offset);
699 if ( (len == -1) && (errno == EINTR) )
700 continue;
701 if ( len <= 0 )
702 return -1;
703 offset += len;
704 }
706 return 0;
707 }
709 int write_exact(int fd, const void *data, size_t size)
710 {
711 size_t offset = 0;
712 ssize_t len;
714 while ( offset < size )
715 {
716 len = write(fd, (const char *)data + offset, size - offset);
717 if ( (len == -1) && (errno == EINTR) )
718 continue;
719 if ( len <= 0 )
720 return -1;
721 offset += len;
722 }
724 return 0;
725 }
727 int xc_ffs8(uint8_t x)
728 {
729 int i;
730 for ( i = 0; i < 8; i++ )
731 if ( x & (1u << i) )
732 return i+1;
733 return 0;
734 }
736 int xc_ffs16(uint16_t x)
737 {
738 uint8_t h = x>>8, l = x;
739 return l ? xc_ffs8(l) : h ? xc_ffs8(h) + 8 : 0;
740 }
742 int xc_ffs32(uint32_t x)
743 {
744 uint16_t h = x>>16, l = x;
745 return l ? xc_ffs16(l) : h ? xc_ffs16(h) + 16 : 0;
746 }
748 int xc_ffs64(uint64_t x)
749 {
750 uint32_t h = x>>32, l = x;
751 return l ? xc_ffs32(l) : h ? xc_ffs32(h) + 32 : 0;
752 }
754 void *xc_memalign(size_t alignment, size_t size)
755 {
756 #if defined(_POSIX_C_SOURCE) && !defined(__sun__)
757 int ret;
758 void *ptr;
759 ret = posix_memalign(&ptr, alignment, size);
760 if (ret != 0)
761 return NULL;
762 return ptr;
763 #elif defined(__NetBSD__) || defined(__OpenBSD__)
764 return valloc(size);
765 #else
766 return memalign(alignment, size);
767 #endif
768 }
770 /*
771 * Local variables:
772 * mode: C
773 * c-set-style: "BSD"
774 * c-basic-offset: 4
775 * tab-width: 4
776 * indent-tabs-mode: nil
777 * End:
778 */