debuggers.hg

view tools/libxc/xc_private.c @ 16715:c5deb251b9dc

Update version to 3.2.0-rc4
author Keir Fraser <keir.fraser@citrix.com>
date Sat Dec 29 17:57:37 2007 +0000 (2007-12-29)
parents f669bf5c6720
children 4bdc3de246c3
line source
1 /******************************************************************************
2 * xc_private.c
3 *
4 * Helper functions for the rest of the library.
5 */
7 #include <inttypes.h>
8 #include "xc_private.h"
9 #include "xg_private.h"
10 #include <stdarg.h>
11 #include <pthread.h>
13 static pthread_key_t last_error_pkey;
14 static pthread_once_t last_error_pkey_once = PTHREAD_ONCE_INIT;
16 static pthread_key_t errbuf_pkey;
17 static pthread_once_t errbuf_pkey_once = PTHREAD_ONCE_INIT;
19 #if DEBUG
20 static xc_error_handler error_handler = xc_default_error_handler;
21 #else
22 static xc_error_handler error_handler = NULL;
23 #endif
25 void xc_default_error_handler(const xc_error *err)
26 {
27 const char *desc = xc_error_code_to_desc(err->code);
28 fprintf(stderr, "ERROR %s: %s\n", desc, err->message);
29 }
31 static void
32 _xc_clean_last_error(void *m)
33 {
34 free(m);
35 pthread_setspecific(last_error_pkey, NULL);
36 }
38 static void
39 _xc_init_last_error(void)
40 {
41 pthread_key_create(&last_error_pkey, _xc_clean_last_error);
42 }
44 static xc_error *
45 _xc_get_last_error(void)
46 {
47 xc_error *last_error;
49 pthread_once(&last_error_pkey_once, _xc_init_last_error);
51 last_error = pthread_getspecific(last_error_pkey);
52 if (last_error == NULL) {
53 last_error = malloc(sizeof(xc_error));
54 pthread_setspecific(last_error_pkey, last_error);
55 xc_clear_last_error();
56 }
58 return last_error;
59 }
61 const xc_error *xc_get_last_error(void)
62 {
63 return _xc_get_last_error();
64 }
66 void xc_clear_last_error(void)
67 {
68 xc_error *last_error = _xc_get_last_error();
69 last_error->code = XC_ERROR_NONE;
70 last_error->message[0] = '\0';
71 }
73 const char *xc_error_code_to_desc(int code)
74 {
75 /* Sync to members of xc_error_code enumeration in xenctrl.h */
76 switch ( code )
77 {
78 case XC_ERROR_NONE:
79 return "No error details";
80 case XC_INTERNAL_ERROR:
81 return "Internal error";
82 case XC_INVALID_KERNEL:
83 return "Invalid kernel";
84 case XC_INVALID_PARAM:
85 return "Invalid configuration";
86 case XC_OUT_OF_MEMORY:
87 return "Out of memory";
88 }
90 return "Unknown error code";
91 }
93 xc_error_handler xc_set_error_handler(xc_error_handler handler)
94 {
95 xc_error_handler old = error_handler;
96 error_handler = handler;
97 return old;
98 }
100 static void _xc_set_error(int code, const char *msg)
101 {
102 xc_error *last_error = _xc_get_last_error();
103 last_error->code = code;
104 strncpy(last_error->message, msg, XC_MAX_ERROR_MSG_LEN - 1);
105 last_error->message[XC_MAX_ERROR_MSG_LEN-1] = '\0';
106 }
108 void xc_set_error(int code, const char *fmt, ...)
109 {
110 int saved_errno = errno;
111 char msg[XC_MAX_ERROR_MSG_LEN];
112 va_list args;
114 va_start(args, fmt);
115 vsnprintf(msg, XC_MAX_ERROR_MSG_LEN-1, fmt, args);
116 msg[XC_MAX_ERROR_MSG_LEN-1] = '\0';
117 va_end(args);
119 _xc_set_error(code, msg);
121 errno = saved_errno;
123 if ( error_handler != NULL ) {
124 xc_error *last_error = _xc_get_last_error();
125 error_handler(last_error);
126 }
127 }
129 int lock_pages(void *addr, size_t len)
130 {
131 int e = 0;
132 #ifndef __sun__
133 void *laddr = (void *)((unsigned long)addr & PAGE_MASK);
134 size_t llen = (len + ((unsigned long)addr - (unsigned long)laddr) +
135 PAGE_SIZE - 1) & PAGE_MASK;
136 e = mlock(laddr, llen);
137 #endif
138 return e;
139 }
141 void unlock_pages(void *addr, size_t len)
142 {
143 #ifndef __sun__
144 void *laddr = (void *)((unsigned long)addr & PAGE_MASK);
145 size_t llen = (len + ((unsigned long)addr - (unsigned long)laddr) +
146 PAGE_SIZE - 1) & PAGE_MASK;
147 safe_munlock(laddr, llen);
148 #endif
149 }
151 /* NB: arr must be locked */
152 int xc_get_pfn_type_batch(int xc_handle,
153 uint32_t dom, int num, uint32_t *arr)
154 {
155 DECLARE_DOMCTL;
156 domctl.cmd = XEN_DOMCTL_getpageframeinfo2;
157 domctl.domain = (domid_t)dom;
158 domctl.u.getpageframeinfo2.num = num;
159 set_xen_guest_handle(domctl.u.getpageframeinfo2.array, arr);
160 return do_domctl(xc_handle, &domctl);
161 }
163 int xc_mmuext_op(
164 int xc_handle,
165 struct mmuext_op *op,
166 unsigned int nr_ops,
167 domid_t dom)
168 {
169 DECLARE_HYPERCALL;
170 long ret = -EINVAL;
172 hypercall.op = __HYPERVISOR_mmuext_op;
173 hypercall.arg[0] = (unsigned long)op;
174 hypercall.arg[1] = (unsigned long)nr_ops;
175 hypercall.arg[2] = (unsigned long)0;
176 hypercall.arg[3] = (unsigned long)dom;
178 if ( lock_pages(op, nr_ops*sizeof(*op)) != 0 )
179 {
180 PERROR("Could not lock memory for Xen hypercall");
181 goto out1;
182 }
184 ret = do_xen_hypercall(xc_handle, &hypercall);
186 unlock_pages(op, nr_ops*sizeof(*op));
188 out1:
189 return ret;
190 }
192 static int flush_mmu_updates(int xc_handle, struct xc_mmu *mmu)
193 {
194 int err = 0;
195 DECLARE_HYPERCALL;
197 if ( mmu->idx == 0 )
198 return 0;
200 hypercall.op = __HYPERVISOR_mmu_update;
201 hypercall.arg[0] = (unsigned long)mmu->updates;
202 hypercall.arg[1] = (unsigned long)mmu->idx;
203 hypercall.arg[2] = 0;
204 hypercall.arg[3] = mmu->subject;
206 if ( lock_pages(mmu->updates, sizeof(mmu->updates)) != 0 )
207 {
208 PERROR("flush_mmu_updates: mmu updates lock_pages failed");
209 err = 1;
210 goto out;
211 }
213 if ( do_xen_hypercall(xc_handle, &hypercall) < 0 )
214 {
215 ERROR("Failure when submitting mmu updates");
216 err = 1;
217 }
219 mmu->idx = 0;
221 unlock_pages(mmu->updates, sizeof(mmu->updates));
223 out:
224 return err;
225 }
227 struct xc_mmu *xc_alloc_mmu_updates(int xc_handle, domid_t dom)
228 {
229 struct xc_mmu *mmu = malloc(sizeof(*mmu));
230 if ( mmu == NULL )
231 return mmu;
232 mmu->idx = 0;
233 mmu->subject = dom;
234 return mmu;
235 }
237 int xc_add_mmu_update(int xc_handle, struct xc_mmu *mmu,
238 unsigned long long ptr, unsigned long long val)
239 {
240 mmu->updates[mmu->idx].ptr = ptr;
241 mmu->updates[mmu->idx].val = val;
243 if ( ++mmu->idx == MAX_MMU_UPDATES )
244 return flush_mmu_updates(xc_handle, mmu);
246 return 0;
247 }
249 int xc_flush_mmu_updates(int xc_handle, struct xc_mmu *mmu)
250 {
251 return flush_mmu_updates(xc_handle, mmu);
252 }
254 int xc_memory_op(int xc_handle,
255 int cmd,
256 void *arg)
257 {
258 DECLARE_HYPERCALL;
259 struct xen_memory_reservation *reservation = arg;
260 struct xen_machphys_mfn_list *xmml = arg;
261 xen_pfn_t *extent_start;
262 long ret = -EINVAL;
264 hypercall.op = __HYPERVISOR_memory_op;
265 hypercall.arg[0] = (unsigned long)cmd;
266 hypercall.arg[1] = (unsigned long)arg;
268 switch ( cmd )
269 {
270 case XENMEM_increase_reservation:
271 case XENMEM_decrease_reservation:
272 case XENMEM_populate_physmap:
273 if ( lock_pages(reservation, sizeof(*reservation)) != 0 )
274 {
275 PERROR("Could not lock");
276 goto out1;
277 }
278 get_xen_guest_handle(extent_start, reservation->extent_start);
279 if ( (extent_start != NULL) &&
280 (lock_pages(extent_start,
281 reservation->nr_extents * sizeof(xen_pfn_t)) != 0) )
282 {
283 PERROR("Could not lock");
284 unlock_pages(reservation, sizeof(*reservation));
285 goto out1;
286 }
287 break;
288 case XENMEM_machphys_mfn_list:
289 if ( lock_pages(xmml, sizeof(*xmml)) != 0 )
290 {
291 PERROR("Could not lock");
292 goto out1;
293 }
294 get_xen_guest_handle(extent_start, xmml->extent_start);
295 if ( lock_pages(extent_start,
296 xmml->max_extents * sizeof(xen_pfn_t)) != 0 )
297 {
298 PERROR("Could not lock");
299 unlock_pages(xmml, sizeof(*xmml));
300 goto out1;
301 }
302 break;
303 case XENMEM_add_to_physmap:
304 if ( lock_pages(arg, sizeof(struct xen_add_to_physmap)) )
305 {
306 PERROR("Could not lock");
307 goto out1;
308 }
309 break;
310 case XENMEM_current_reservation:
311 case XENMEM_maximum_reservation:
312 case XENMEM_maximum_gpfn:
313 if ( lock_pages(arg, sizeof(domid_t)) )
314 {
315 PERROR("Could not lock");
316 goto out1;
317 }
318 break;
319 }
321 ret = do_xen_hypercall(xc_handle, &hypercall);
323 switch ( cmd )
324 {
325 case XENMEM_increase_reservation:
326 case XENMEM_decrease_reservation:
327 case XENMEM_populate_physmap:
328 unlock_pages(reservation, sizeof(*reservation));
329 get_xen_guest_handle(extent_start, reservation->extent_start);
330 if ( extent_start != NULL )
331 unlock_pages(extent_start,
332 reservation->nr_extents * sizeof(xen_pfn_t));
333 break;
334 case XENMEM_machphys_mfn_list:
335 unlock_pages(xmml, sizeof(*xmml));
336 get_xen_guest_handle(extent_start, xmml->extent_start);
337 unlock_pages(extent_start,
338 xmml->max_extents * sizeof(xen_pfn_t));
339 break;
340 case XENMEM_add_to_physmap:
341 unlock_pages(arg, sizeof(struct xen_add_to_physmap));
342 break;
343 case XENMEM_current_reservation:
344 case XENMEM_maximum_reservation:
345 case XENMEM_maximum_gpfn:
346 unlock_pages(arg, sizeof(domid_t));
347 break;
348 }
350 out1:
351 return ret;
352 }
355 long long xc_domain_get_cpu_usage( int xc_handle, domid_t domid, int vcpu )
356 {
357 DECLARE_DOMCTL;
359 domctl.cmd = XEN_DOMCTL_getvcpuinfo;
360 domctl.domain = (domid_t)domid;
361 domctl.u.getvcpuinfo.vcpu = (uint16_t)vcpu;
362 if ( (do_domctl(xc_handle, &domctl) < 0) )
363 {
364 PERROR("Could not get info on domain");
365 return -1;
366 }
367 return domctl.u.getvcpuinfo.cpu_time;
368 }
371 #ifndef __ia64__
372 int xc_get_pfn_list(int xc_handle,
373 uint32_t domid,
374 uint64_t *pfn_buf,
375 unsigned long max_pfns)
376 {
377 DECLARE_DOMCTL;
378 int ret;
379 domctl.cmd = XEN_DOMCTL_getmemlist;
380 domctl.domain = (domid_t)domid;
381 domctl.u.getmemlist.max_pfns = max_pfns;
382 set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf);
384 #ifdef VALGRIND
385 memset(pfn_buf, 0, max_pfns * sizeof(*pfn_buf));
386 #endif
388 if ( lock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf)) != 0 )
389 {
390 PERROR("xc_get_pfn_list: pfn_buf lock failed");
391 return -1;
392 }
394 ret = do_domctl(xc_handle, &domctl);
396 unlock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf));
398 return (ret < 0) ? -1 : domctl.u.getmemlist.num_pfns;
399 }
400 #endif
402 long xc_get_tot_pages(int xc_handle, uint32_t domid)
403 {
404 DECLARE_DOMCTL;
405 domctl.cmd = XEN_DOMCTL_getdomaininfo;
406 domctl.domain = (domid_t)domid;
407 return (do_domctl(xc_handle, &domctl) < 0) ?
408 -1 : domctl.u.getdomaininfo.tot_pages;
409 }
411 int xc_copy_to_domain_page(int xc_handle,
412 uint32_t domid,
413 unsigned long dst_pfn,
414 const char *src_page)
415 {
416 void *vaddr = xc_map_foreign_range(
417 xc_handle, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
418 if ( vaddr == NULL )
419 return -1;
420 memcpy(vaddr, src_page, PAGE_SIZE);
421 munmap(vaddr, PAGE_SIZE);
422 return 0;
423 }
425 int xc_clear_domain_page(int xc_handle,
426 uint32_t domid,
427 unsigned long dst_pfn)
428 {
429 void *vaddr = xc_map_foreign_range(
430 xc_handle, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
431 if ( vaddr == NULL )
432 return -1;
433 memset(vaddr, 0, PAGE_SIZE);
434 munmap(vaddr, PAGE_SIZE);
435 return 0;
436 }
438 int xc_domctl(int xc_handle, struct xen_domctl *domctl)
439 {
440 return do_domctl(xc_handle, domctl);
441 }
443 int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl)
444 {
445 return do_sysctl(xc_handle, sysctl);
446 }
448 int xc_version(int xc_handle, int cmd, void *arg)
449 {
450 int rc, argsize = 0;
452 switch ( cmd )
453 {
454 case XENVER_extraversion:
455 argsize = sizeof(xen_extraversion_t);
456 break;
457 case XENVER_compile_info:
458 argsize = sizeof(xen_compile_info_t);
459 break;
460 case XENVER_capabilities:
461 argsize = sizeof(xen_capabilities_info_t);
462 break;
463 case XENVER_changeset:
464 argsize = sizeof(xen_changeset_info_t);
465 break;
466 case XENVER_platform_parameters:
467 argsize = sizeof(xen_platform_parameters_t);
468 break;
469 }
471 if ( (argsize != 0) && (lock_pages(arg, argsize) != 0) )
472 {
473 PERROR("Could not lock memory for version hypercall");
474 return -ENOMEM;
475 }
477 #ifdef VALGRIND
478 if (argsize != 0)
479 memset(arg, 0, argsize);
480 #endif
482 rc = do_xen_version(xc_handle, cmd, arg);
484 if ( argsize != 0 )
485 unlock_pages(arg, argsize);
487 return rc;
488 }
490 unsigned long xc_make_page_below_4G(
491 int xc_handle, uint32_t domid, unsigned long mfn)
492 {
493 xen_pfn_t old_mfn = mfn;
494 xen_pfn_t new_mfn;
496 if ( xc_domain_memory_decrease_reservation(
497 xc_handle, domid, 1, 0, &old_mfn) != 0 )
498 {
499 DPRINTF("xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
500 return 0;
501 }
503 if ( xc_domain_memory_increase_reservation(
504 xc_handle, domid, 1, 0, 32, &new_mfn) != 0 )
505 {
506 DPRINTF("xc_make_page_below_4G increase failed. mfn=%lx\n",mfn);
507 return 0;
508 }
510 return new_mfn;
511 }
513 static void
514 _xc_clean_errbuf(void * m)
515 {
516 free(m);
517 pthread_setspecific(errbuf_pkey, NULL);
518 }
520 static void
521 _xc_init_errbuf(void)
522 {
523 pthread_key_create(&errbuf_pkey, _xc_clean_errbuf);
524 }
526 char *safe_strerror(int errcode)
527 {
528 #define XS_BUFSIZE 32
529 char *errbuf;
530 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
531 char *strerror_str;
533 pthread_once(&errbuf_pkey_once, _xc_init_errbuf);
535 errbuf = pthread_getspecific(errbuf_pkey);
536 if (errbuf == NULL) {
537 errbuf = malloc(XS_BUFSIZE);
538 pthread_setspecific(errbuf_pkey, errbuf);
539 }
541 /*
542 * Thread-unsafe strerror() is protected by a local mutex. We copy
543 * the string to a thread-private buffer before releasing the mutex.
544 */
545 pthread_mutex_lock(&mutex);
546 strerror_str = strerror(errcode);
547 strncpy(errbuf, strerror_str, XS_BUFSIZE);
548 errbuf[XS_BUFSIZE-1] = '\0';
549 pthread_mutex_unlock(&mutex);
551 return errbuf;
552 }
554 void bitmap_64_to_byte(uint8_t *bp, const uint64_t *lp, int nbits)
555 {
556 uint64_t l;
557 int i, j, b;
559 for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
560 l = lp[i];
561 for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
562 bp[b+j] = l;
563 l >>= 8;
564 nbits -= 8;
565 }
566 }
567 }
569 void bitmap_byte_to_64(uint64_t *lp, const uint8_t *bp, int nbits)
570 {
571 uint64_t l;
572 int i, j, b;
574 for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
575 l = 0;
576 for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
577 l |= (uint64_t)bp[b+j] << (j*8);
578 nbits -= 8;
579 }
580 lp[i] = l;
581 }
582 }
584 int read_exact(int fd, void *data, size_t size)
585 {
586 size_t offset = 0;
587 ssize_t len;
589 while ( offset < size )
590 {
591 len = read(fd, (char *)data + offset, size - offset);
592 if ( (len == -1) && (errno == EINTR) )
593 continue;
594 if ( len <= 0 )
595 return -1;
596 offset += len;
597 }
599 return 0;
600 }
602 int write_exact(int fd, const void *data, size_t size)
603 {
604 size_t offset = 0;
605 ssize_t len;
607 while ( offset < size )
608 {
609 len = write(fd, (const char *)data + offset, size - offset);
610 if ( (len == -1) && (errno == EINTR) )
611 continue;
612 if ( len <= 0 )
613 return -1;
614 offset += len;
615 }
617 return 0;
618 }
620 /*
621 * Local variables:
622 * mode: C
623 * c-set-style: "BSD"
624 * c-basic-offset: 4
625 * tab-width: 4
626 * indent-tabs-mode: nil
627 * End:
628 */