debuggers.hg

view tools/libxc/xc_private.c @ 20838:0447c5532e9f

x86: add and use XEN_DOMCTL_getpageframeinfo3

To support wider than 28-bit MFNs, add XEN_DOMCTL_getpageframeinfo3
(with the type replacing the passed in MFN rather than getting or-ed
into it) to properly back xc_get_pfn_type_batch().

With xc_get_pfn_type_batch() only used internally to libxc, move its
prototype from xenctrl.h to xc_private.h.

This also fixes a couple of bugs in pre-existing code:
- the failure path for init_mem_info() leaked minfo->pfn_type,
- one error path of the XEN_DOMCTL_getpageframeinfo2 handler used
put_domain() where rcu_unlock_domain() was meant, and
- the XEN_DOMCTL_getpageframeinfo2 handler could call
xsm_getpageframeinfo() with an invalid struct page_info pointer.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 13 08:14:01 2010 +0000 (2010-01-13)
parents ee62aaafff46
children fbe8f32fa257
line source
1 /******************************************************************************
2 * xc_private.c
3 *
4 * Helper functions for the rest of the library.
5 */
7 #include <inttypes.h>
8 #include "xc_private.h"
9 #include "xg_private.h"
10 #include <stdarg.h>
11 #include <pthread.h>
13 static pthread_key_t last_error_pkey;
14 static pthread_once_t last_error_pkey_once = PTHREAD_ONCE_INIT;
16 static pthread_key_t errbuf_pkey;
17 static pthread_once_t errbuf_pkey_once = PTHREAD_ONCE_INIT;
19 #if DEBUG
20 static xc_error_handler error_handler = xc_default_error_handler;
21 #else
22 static xc_error_handler error_handler = NULL;
23 #endif
25 void xc_default_error_handler(const xc_error *err)
26 {
27 const char *desc = xc_error_code_to_desc(err->code);
28 fprintf(stderr, "ERROR %s: %s\n", desc, err->message);
29 }
31 static void
32 _xc_clean_last_error(void *m)
33 {
34 free(m);
35 pthread_setspecific(last_error_pkey, NULL);
36 }
38 static void
39 _xc_init_last_error(void)
40 {
41 pthread_key_create(&last_error_pkey, _xc_clean_last_error);
42 }
44 static xc_error *
45 _xc_get_last_error(void)
46 {
47 xc_error *last_error;
49 pthread_once(&last_error_pkey_once, _xc_init_last_error);
51 last_error = pthread_getspecific(last_error_pkey);
52 if (last_error == NULL) {
53 last_error = malloc(sizeof(xc_error));
54 pthread_setspecific(last_error_pkey, last_error);
55 xc_clear_last_error();
56 }
58 return last_error;
59 }
61 const xc_error *xc_get_last_error(void)
62 {
63 return _xc_get_last_error();
64 }
66 void xc_clear_last_error(void)
67 {
68 xc_error *last_error = _xc_get_last_error();
69 last_error->code = XC_ERROR_NONE;
70 last_error->message[0] = '\0';
71 }
73 const char *xc_error_code_to_desc(int code)
74 {
75 /* Sync to members of xc_error_code enumeration in xenctrl.h */
76 switch ( code )
77 {
78 case XC_ERROR_NONE:
79 return "No error details";
80 case XC_INTERNAL_ERROR:
81 return "Internal error";
82 case XC_INVALID_KERNEL:
83 return "Invalid kernel";
84 case XC_INVALID_PARAM:
85 return "Invalid configuration";
86 case XC_OUT_OF_MEMORY:
87 return "Out of memory";
88 }
90 return "Unknown error code";
91 }
93 xc_error_handler xc_set_error_handler(xc_error_handler handler)
94 {
95 xc_error_handler old = error_handler;
96 error_handler = handler;
97 return old;
98 }
100 static void _xc_set_error(int code, const char *msg)
101 {
102 xc_error *last_error = _xc_get_last_error();
103 last_error->code = code;
104 strncpy(last_error->message, msg, XC_MAX_ERROR_MSG_LEN - 1);
105 last_error->message[XC_MAX_ERROR_MSG_LEN-1] = '\0';
106 }
108 void xc_set_error(int code, const char *fmt, ...)
109 {
110 int saved_errno = errno;
111 char msg[XC_MAX_ERROR_MSG_LEN];
112 va_list args;
114 va_start(args, fmt);
115 vsnprintf(msg, XC_MAX_ERROR_MSG_LEN-1, fmt, args);
116 msg[XC_MAX_ERROR_MSG_LEN-1] = '\0';
117 va_end(args);
119 _xc_set_error(code, msg);
121 errno = saved_errno;
123 if ( error_handler != NULL ) {
124 xc_error *last_error = _xc_get_last_error();
125 error_handler(last_error);
126 }
127 }
129 int lock_pages(void *addr, size_t len)
130 {
131 int e = 0;
132 #ifndef __sun__
133 void *laddr = (void *)((unsigned long)addr & PAGE_MASK);
134 size_t llen = (len + ((unsigned long)addr - (unsigned long)laddr) +
135 PAGE_SIZE - 1) & PAGE_MASK;
136 e = mlock(laddr, llen);
137 #endif
138 return e;
139 }
141 void unlock_pages(void *addr, size_t len)
142 {
143 #ifndef __sun__
144 void *laddr = (void *)((unsigned long)addr & PAGE_MASK);
145 size_t llen = (len + ((unsigned long)addr - (unsigned long)laddr) +
146 PAGE_SIZE - 1) & PAGE_MASK;
147 safe_munlock(laddr, llen);
148 #endif
149 }
151 /* NB: arr must be locked */
152 int xc_get_pfn_type_batch(int xc_handle, uint32_t dom,
153 unsigned int num, xen_pfn_t *arr)
154 {
155 DECLARE_DOMCTL;
156 domctl.cmd = XEN_DOMCTL_getpageframeinfo3;
157 domctl.domain = (domid_t)dom;
158 domctl.u.getpageframeinfo3.num = num;
159 set_xen_guest_handle(domctl.u.getpageframeinfo3.array, arr);
160 return do_domctl(xc_handle, &domctl);
161 }
163 int xc_mmuext_op(
164 int xc_handle,
165 struct mmuext_op *op,
166 unsigned int nr_ops,
167 domid_t dom)
168 {
169 DECLARE_HYPERCALL;
170 long ret = -EINVAL;
172 hypercall.op = __HYPERVISOR_mmuext_op;
173 hypercall.arg[0] = (unsigned long)op;
174 hypercall.arg[1] = (unsigned long)nr_ops;
175 hypercall.arg[2] = (unsigned long)0;
176 hypercall.arg[3] = (unsigned long)dom;
178 if ( lock_pages(op, nr_ops*sizeof(*op)) != 0 )
179 {
180 PERROR("Could not lock memory for Xen hypercall");
181 goto out1;
182 }
184 ret = do_xen_hypercall(xc_handle, &hypercall);
186 unlock_pages(op, nr_ops*sizeof(*op));
188 out1:
189 return ret;
190 }
192 static int flush_mmu_updates(int xc_handle, struct xc_mmu *mmu)
193 {
194 int err = 0;
195 DECLARE_HYPERCALL;
197 if ( mmu->idx == 0 )
198 return 0;
200 hypercall.op = __HYPERVISOR_mmu_update;
201 hypercall.arg[0] = (unsigned long)mmu->updates;
202 hypercall.arg[1] = (unsigned long)mmu->idx;
203 hypercall.arg[2] = 0;
204 hypercall.arg[3] = mmu->subject;
206 if ( lock_pages(mmu->updates, sizeof(mmu->updates)) != 0 )
207 {
208 PERROR("flush_mmu_updates: mmu updates lock_pages failed");
209 err = 1;
210 goto out;
211 }
213 if ( do_xen_hypercall(xc_handle, &hypercall) < 0 )
214 {
215 ERROR("Failure when submitting mmu updates");
216 err = 1;
217 }
219 mmu->idx = 0;
221 unlock_pages(mmu->updates, sizeof(mmu->updates));
223 out:
224 return err;
225 }
227 struct xc_mmu *xc_alloc_mmu_updates(int xc_handle, domid_t dom)
228 {
229 struct xc_mmu *mmu = malloc(sizeof(*mmu));
230 if ( mmu == NULL )
231 return mmu;
232 mmu->idx = 0;
233 mmu->subject = dom;
234 return mmu;
235 }
237 int xc_add_mmu_update(int xc_handle, struct xc_mmu *mmu,
238 unsigned long long ptr, unsigned long long val)
239 {
240 mmu->updates[mmu->idx].ptr = ptr;
241 mmu->updates[mmu->idx].val = val;
243 if ( ++mmu->idx == MAX_MMU_UPDATES )
244 return flush_mmu_updates(xc_handle, mmu);
246 return 0;
247 }
249 int xc_flush_mmu_updates(int xc_handle, struct xc_mmu *mmu)
250 {
251 return flush_mmu_updates(xc_handle, mmu);
252 }
254 int xc_memory_op(int xc_handle,
255 int cmd,
256 void *arg)
257 {
258 DECLARE_HYPERCALL;
259 struct xen_memory_reservation *reservation = arg;
260 struct xen_machphys_mfn_list *xmml = arg;
261 xen_pfn_t *extent_start;
262 long ret = -EINVAL;
264 hypercall.op = __HYPERVISOR_memory_op;
265 hypercall.arg[0] = (unsigned long)cmd;
266 hypercall.arg[1] = (unsigned long)arg;
268 switch ( cmd )
269 {
270 case XENMEM_increase_reservation:
271 case XENMEM_decrease_reservation:
272 case XENMEM_populate_physmap:
273 if ( lock_pages(reservation, sizeof(*reservation)) != 0 )
274 {
275 PERROR("Could not lock");
276 goto out1;
277 }
278 get_xen_guest_handle(extent_start, reservation->extent_start);
279 if ( (extent_start != NULL) &&
280 (lock_pages(extent_start,
281 reservation->nr_extents * sizeof(xen_pfn_t)) != 0) )
282 {
283 PERROR("Could not lock");
284 unlock_pages(reservation, sizeof(*reservation));
285 goto out1;
286 }
287 break;
288 case XENMEM_machphys_mfn_list:
289 if ( lock_pages(xmml, sizeof(*xmml)) != 0 )
290 {
291 PERROR("Could not lock");
292 goto out1;
293 }
294 get_xen_guest_handle(extent_start, xmml->extent_start);
295 if ( lock_pages(extent_start,
296 xmml->max_extents * sizeof(xen_pfn_t)) != 0 )
297 {
298 PERROR("Could not lock");
299 unlock_pages(xmml, sizeof(*xmml));
300 goto out1;
301 }
302 break;
303 case XENMEM_add_to_physmap:
304 if ( lock_pages(arg, sizeof(struct xen_add_to_physmap)) )
305 {
306 PERROR("Could not lock");
307 goto out1;
308 }
309 break;
310 case XENMEM_current_reservation:
311 case XENMEM_maximum_reservation:
312 case XENMEM_maximum_gpfn:
313 if ( lock_pages(arg, sizeof(domid_t)) )
314 {
315 PERROR("Could not lock");
316 goto out1;
317 }
318 break;
319 case XENMEM_set_pod_target:
320 case XENMEM_get_pod_target:
321 if ( lock_pages(arg, sizeof(struct xen_pod_target)) )
322 {
323 PERROR("Could not lock");
324 goto out1;
325 }
326 break;
327 }
329 ret = do_xen_hypercall(xc_handle, &hypercall);
331 switch ( cmd )
332 {
333 case XENMEM_increase_reservation:
334 case XENMEM_decrease_reservation:
335 case XENMEM_populate_physmap:
336 unlock_pages(reservation, sizeof(*reservation));
337 get_xen_guest_handle(extent_start, reservation->extent_start);
338 if ( extent_start != NULL )
339 unlock_pages(extent_start,
340 reservation->nr_extents * sizeof(xen_pfn_t));
341 break;
342 case XENMEM_machphys_mfn_list:
343 unlock_pages(xmml, sizeof(*xmml));
344 get_xen_guest_handle(extent_start, xmml->extent_start);
345 unlock_pages(extent_start,
346 xmml->max_extents * sizeof(xen_pfn_t));
347 break;
348 case XENMEM_add_to_physmap:
349 unlock_pages(arg, sizeof(struct xen_add_to_physmap));
350 break;
351 case XENMEM_current_reservation:
352 case XENMEM_maximum_reservation:
353 case XENMEM_maximum_gpfn:
354 unlock_pages(arg, sizeof(domid_t));
355 break;
356 case XENMEM_set_pod_target:
357 case XENMEM_get_pod_target:
358 unlock_pages(arg, sizeof(struct xen_pod_target));
359 break;
360 }
362 out1:
363 return ret;
364 }
367 long long xc_domain_get_cpu_usage( int xc_handle, domid_t domid, int vcpu )
368 {
369 DECLARE_DOMCTL;
371 domctl.cmd = XEN_DOMCTL_getvcpuinfo;
372 domctl.domain = (domid_t)domid;
373 domctl.u.getvcpuinfo.vcpu = (uint16_t)vcpu;
374 if ( (do_domctl(xc_handle, &domctl) < 0) )
375 {
376 PERROR("Could not get info on domain");
377 return -1;
378 }
379 return domctl.u.getvcpuinfo.cpu_time;
380 }
383 #ifndef __ia64__
384 int xc_get_pfn_list(int xc_handle,
385 uint32_t domid,
386 uint64_t *pfn_buf,
387 unsigned long max_pfns)
388 {
389 DECLARE_DOMCTL;
390 int ret;
391 domctl.cmd = XEN_DOMCTL_getmemlist;
392 domctl.domain = (domid_t)domid;
393 domctl.u.getmemlist.max_pfns = max_pfns;
394 set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf);
396 #ifdef VALGRIND
397 memset(pfn_buf, 0, max_pfns * sizeof(*pfn_buf));
398 #endif
400 if ( lock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf)) != 0 )
401 {
402 PERROR("xc_get_pfn_list: pfn_buf lock failed");
403 return -1;
404 }
406 ret = do_domctl(xc_handle, &domctl);
408 unlock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf));
410 return (ret < 0) ? -1 : domctl.u.getmemlist.num_pfns;
411 }
412 #endif
414 long xc_get_tot_pages(int xc_handle, uint32_t domid)
415 {
416 DECLARE_DOMCTL;
417 domctl.cmd = XEN_DOMCTL_getdomaininfo;
418 domctl.domain = (domid_t)domid;
419 return (do_domctl(xc_handle, &domctl) < 0) ?
420 -1 : domctl.u.getdomaininfo.tot_pages;
421 }
423 int xc_copy_to_domain_page(int xc_handle,
424 uint32_t domid,
425 unsigned long dst_pfn,
426 const char *src_page)
427 {
428 void *vaddr = xc_map_foreign_range(
429 xc_handle, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
430 if ( vaddr == NULL )
431 return -1;
432 memcpy(vaddr, src_page, PAGE_SIZE);
433 munmap(vaddr, PAGE_SIZE);
434 return 0;
435 }
437 int xc_clear_domain_page(int xc_handle,
438 uint32_t domid,
439 unsigned long dst_pfn)
440 {
441 void *vaddr = xc_map_foreign_range(
442 xc_handle, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
443 if ( vaddr == NULL )
444 return -1;
445 memset(vaddr, 0, PAGE_SIZE);
446 munmap(vaddr, PAGE_SIZE);
447 return 0;
448 }
450 int xc_domctl(int xc_handle, struct xen_domctl *domctl)
451 {
452 return do_domctl(xc_handle, domctl);
453 }
455 int xc_sysctl(int xc_handle, struct xen_sysctl *sysctl)
456 {
457 return do_sysctl(xc_handle, sysctl);
458 }
460 int xc_version(int xc_handle, int cmd, void *arg)
461 {
462 int rc, argsize = 0;
464 switch ( cmd )
465 {
466 case XENVER_extraversion:
467 argsize = sizeof(xen_extraversion_t);
468 break;
469 case XENVER_compile_info:
470 argsize = sizeof(xen_compile_info_t);
471 break;
472 case XENVER_capabilities:
473 argsize = sizeof(xen_capabilities_info_t);
474 break;
475 case XENVER_changeset:
476 argsize = sizeof(xen_changeset_info_t);
477 break;
478 case XENVER_platform_parameters:
479 argsize = sizeof(xen_platform_parameters_t);
480 break;
481 }
483 if ( (argsize != 0) && (lock_pages(arg, argsize) != 0) )
484 {
485 PERROR("Could not lock memory for version hypercall");
486 return -ENOMEM;
487 }
489 #ifdef VALGRIND
490 if (argsize != 0)
491 memset(arg, 0, argsize);
492 #endif
494 rc = do_xen_version(xc_handle, cmd, arg);
496 if ( argsize != 0 )
497 unlock_pages(arg, argsize);
499 return rc;
500 }
502 unsigned long xc_make_page_below_4G(
503 int xc_handle, uint32_t domid, unsigned long mfn)
504 {
505 xen_pfn_t old_mfn = mfn;
506 xen_pfn_t new_mfn;
508 if ( xc_domain_memory_decrease_reservation(
509 xc_handle, domid, 1, 0, &old_mfn) != 0 )
510 {
511 DPRINTF("xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
512 return 0;
513 }
515 if ( xc_domain_memory_increase_reservation(
516 xc_handle, domid, 1, 0, XENMEMF_address_bits(32), &new_mfn) != 0 )
517 {
518 DPRINTF("xc_make_page_below_4G increase failed. mfn=%lx\n",mfn);
519 return 0;
520 }
522 return new_mfn;
523 }
525 static void
526 _xc_clean_errbuf(void * m)
527 {
528 free(m);
529 pthread_setspecific(errbuf_pkey, NULL);
530 }
532 static void
533 _xc_init_errbuf(void)
534 {
535 pthread_key_create(&errbuf_pkey, _xc_clean_errbuf);
536 }
538 char *safe_strerror(int errcode)
539 {
540 #define XS_BUFSIZE 32
541 char *errbuf;
542 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
543 char *strerror_str;
545 pthread_once(&errbuf_pkey_once, _xc_init_errbuf);
547 errbuf = pthread_getspecific(errbuf_pkey);
548 if (errbuf == NULL) {
549 errbuf = malloc(XS_BUFSIZE);
550 pthread_setspecific(errbuf_pkey, errbuf);
551 }
553 /*
554 * Thread-unsafe strerror() is protected by a local mutex. We copy
555 * the string to a thread-private buffer before releasing the mutex.
556 */
557 pthread_mutex_lock(&mutex);
558 strerror_str = strerror(errcode);
559 strncpy(errbuf, strerror_str, XS_BUFSIZE);
560 errbuf[XS_BUFSIZE-1] = '\0';
561 pthread_mutex_unlock(&mutex);
563 return errbuf;
564 }
566 void bitmap_64_to_byte(uint8_t *bp, const uint64_t *lp, int nbits)
567 {
568 uint64_t l;
569 int i, j, b;
571 for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
572 l = lp[i];
573 for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
574 bp[b+j] = l;
575 l >>= 8;
576 nbits -= 8;
577 }
578 }
579 }
581 void bitmap_byte_to_64(uint64_t *lp, const uint8_t *bp, int nbits)
582 {
583 uint64_t l;
584 int i, j, b;
586 for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
587 l = 0;
588 for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
589 l |= (uint64_t)bp[b+j] << (j*8);
590 nbits -= 8;
591 }
592 lp[i] = l;
593 }
594 }
596 int read_exact(int fd, void *data, size_t size)
597 {
598 size_t offset = 0;
599 ssize_t len;
601 while ( offset < size )
602 {
603 len = read(fd, (char *)data + offset, size - offset);
604 if ( (len == -1) && (errno == EINTR) )
605 continue;
606 if ( len <= 0 )
607 return -1;
608 offset += len;
609 }
611 return 0;
612 }
614 int write_exact(int fd, const void *data, size_t size)
615 {
616 size_t offset = 0;
617 ssize_t len;
619 while ( offset < size )
620 {
621 len = write(fd, (const char *)data + offset, size - offset);
622 if ( (len == -1) && (errno == EINTR) )
623 continue;
624 if ( len <= 0 )
625 return -1;
626 offset += len;
627 }
629 return 0;
630 }
632 int xc_ffs8(uint8_t x)
633 {
634 int i;
635 for ( i = 0; i < 8; i++ )
636 if ( x & (1u << i) )
637 return i+1;
638 return 0;
639 }
641 int xc_ffs16(uint16_t x)
642 {
643 uint8_t h = x>>8, l = x;
644 return l ? xc_ffs8(l) : h ? xc_ffs8(h) + 8 : 0;
645 }
647 int xc_ffs32(uint32_t x)
648 {
649 uint16_t h = x>>16, l = x;
650 return l ? xc_ffs16(l) : h ? xc_ffs16(h) + 16 : 0;
651 }
653 int xc_ffs64(uint64_t x)
654 {
655 uint32_t h = x>>32, l = x;
656 return l ? xc_ffs32(l) : h ? xc_ffs32(h) + 32 : 0;
657 }
659 /*
660 * Local variables:
661 * mode: C
662 * c-set-style: "BSD"
663 * c-basic-offset: 4
664 * tab-width: 4
665 * indent-tabs-mode: nil
666 * End:
667 */