debuggers.hg

view tools/libxc/xc_private.c @ 21950:6a0dd2c29999

libxc: free thread specific hypercall buffer on xc_interface_close

The per-thread hypercall buffer is usually cleaned up on pthread_exit
by the destructor passed to pthread_key_create. However if the calling
application is not threaded then the destructor is never called.

This frees the data for the current thread only but that is OK since
any other threads will be cleaned up by the destructor.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Ian Campbell <ian.campbell@citrix.com>
date Fri Jul 30 16:20:48 2010 +0100 (2010-07-30)
parents c81c215965f5
children 624c5fdf0284
line source
1 /******************************************************************************
2 * xc_private.c
3 *
4 * Helper functions for the rest of the library.
5 */
7 #include <inttypes.h>
8 #include "xc_private.h"
9 #include "xg_private.h"
10 #include "xc_dom.h"
11 #include <stdarg.h>
12 #include <stdlib.h>
13 #include <malloc.h>
14 #include <unistd.h>
15 #include <pthread.h>
16 #include <assert.h>
18 xc_interface *xc_interface_open(xentoollog_logger *logger,
19 xentoollog_logger *dombuild_logger,
20 unsigned open_flags) {
21 xc_interface xch_buf, *xch = &xch_buf;
23 xch->fd = -1;
24 xch->dombuild_logger_file = 0;
25 xc_clear_last_error(xch);
27 xch->error_handler = logger; xch->error_handler_tofree = 0;
28 xch->dombuild_logger = dombuild_logger; xch->dombuild_logger_tofree = 0;
30 if (!xch->error_handler) {
31 xch->error_handler = xch->error_handler_tofree =
32 (xentoollog_logger*)
33 xtl_createlogger_stdiostream(stderr, XTL_PROGRESS, 0);
34 if (!xch->error_handler)
35 goto err;
36 }
38 xch = malloc(sizeof(*xch));
39 if (!xch) {
40 xch = &xch_buf;
41 PERROR("Could not allocate new xc_interface struct");
42 goto err;
43 }
44 *xch = xch_buf;
46 if (!(open_flags & XC_OPENFLAG_DUMMY)) {
47 xch->fd = xc_interface_open_core(xch);
48 if (xch->fd < 0)
49 goto err;
50 }
52 return xch;
54 err:
55 if (xch) xtl_logger_destroy(xch->error_handler_tofree);
56 if (xch != &xch_buf) free(xch);
57 return 0;
58 }
60 static void xc_clean_hcall_buf(void);
62 int xc_interface_close(xc_interface *xch)
63 {
64 int rc = 0;
66 xtl_logger_destroy(xch->dombuild_logger_tofree);
67 xtl_logger_destroy(xch->error_handler_tofree);
69 if (xch->fd >= 0) {
70 rc = xc_interface_close_core(xch, xch->fd);
71 if (rc) PERROR("Could not close hypervisor interface");
72 }
74 xc_clean_hcall_buf();
76 free(xch);
77 return rc;
78 }
80 static pthread_key_t errbuf_pkey;
81 static pthread_once_t errbuf_pkey_once = PTHREAD_ONCE_INIT;
83 const xc_error *xc_get_last_error(xc_interface *xch)
84 {
85 return &xch->last_error;
86 }
88 void xc_clear_last_error(xc_interface *xch)
89 {
90 xch->last_error.code = XC_ERROR_NONE;
91 xch->last_error.message[0] = '\0';
92 }
94 const char *xc_error_code_to_desc(int code)
95 {
96 /* Sync to members of xc_error_code enumeration in xenctrl.h */
97 switch ( code )
98 {
99 case XC_ERROR_NONE:
100 return "No error details";
101 case XC_INTERNAL_ERROR:
102 return "Internal error";
103 case XC_INVALID_KERNEL:
104 return "Invalid kernel";
105 case XC_INVALID_PARAM:
106 return "Invalid configuration";
107 case XC_OUT_OF_MEMORY:
108 return "Out of memory";
109 }
111 return "Unknown error code";
112 }
114 void xc_reportv(xc_interface *xch, xentoollog_logger *lg,
115 xentoollog_level level, int code,
116 const char *fmt, va_list args) {
117 int saved_errno = errno;
118 char msgbuf[XC_MAX_ERROR_MSG_LEN];
119 char *msg;
121 /* Strip newlines from messages.
122 * XXX really the messages themselves should have the newlines removed.
123 */
124 char fmt_nonewline[512];
125 int fmt_l;
127 fmt_l = strlen(fmt);
128 if (fmt_l && fmt[fmt_l-1]=='\n' && fmt_l < sizeof(fmt_nonewline)) {
129 memcpy(fmt_nonewline, fmt, fmt_l-1);
130 fmt_nonewline[fmt_l-1] = 0;
131 fmt = fmt_nonewline;
132 }
134 if ( level >= XTL_ERROR ) {
135 msg = xch->last_error.message;
136 xch->last_error.code = code;
137 } else {
138 msg = msgbuf;
139 }
140 vsnprintf(msg, XC_MAX_ERROR_MSG_LEN-1, fmt, args);
141 msg[XC_MAX_ERROR_MSG_LEN-1] = '\0';
143 xtl_log(lg, level, -1, "xc",
144 "%s" "%s%s", msg,
145 code?": ":"", code ? xc_error_code_to_desc(code) : "");
147 errno = saved_errno;
148 }
150 void xc_report(xc_interface *xch, xentoollog_logger *lg,
151 xentoollog_level level, int code, const char *fmt, ...) {
152 va_list args;
153 va_start(args,fmt);
154 xc_reportv(xch,lg,level,code,fmt,args);
155 va_end(args);
156 }
158 void xc_report_error(xc_interface *xch, int code, const char *fmt, ...)
159 {
160 va_list args;
161 va_start(args, fmt);
162 xc_reportv(xch, xch->error_handler, XTL_ERROR, code, fmt, args);
163 va_end(args);
164 }
166 void xc_report_progress_start(xc_interface *xch, const char *doing,
167 unsigned long total) {
168 xch->currently_progress_reporting = doing;
169 xtl_progress(xch->error_handler, "xc", xch->currently_progress_reporting,
170 0, total);
171 }
173 void xc_report_progress_step(xc_interface *xch,
174 unsigned long done, unsigned long total) {
175 assert(xch->currently_progress_reporting);
176 xtl_progress(xch->error_handler, "xc", xch->currently_progress_reporting,
177 done, total);
178 }
180 #ifdef __sun__
182 int lock_pages(void *addr, size_t len) { return 0; }
183 void unlock_pages(void *addr, size_t len) { }
185 int hcall_buf_prep(void **addr, size_t len) { return 0; }
186 void hcall_buf_release(void **addr, size_t len) { }
188 static void xc_clean_hcall_buf(void) { }
190 #else /* !__sun__ */
192 int lock_pages(void *addr, size_t len)
193 {
194 int e;
195 void *laddr = (void *)((unsigned long)addr & PAGE_MASK);
196 size_t llen = (len + ((unsigned long)addr - (unsigned long)laddr) +
197 PAGE_SIZE - 1) & PAGE_MASK;
198 e = mlock(laddr, llen);
199 return e;
200 }
202 void unlock_pages(void *addr, size_t len)
203 {
204 void *laddr = (void *)((unsigned long)addr & PAGE_MASK);
205 size_t llen = (len + ((unsigned long)addr - (unsigned long)laddr) +
206 PAGE_SIZE - 1) & PAGE_MASK;
207 safe_munlock(laddr, llen);
208 }
210 static pthread_key_t hcall_buf_pkey;
211 static pthread_once_t hcall_buf_pkey_once = PTHREAD_ONCE_INIT;
212 struct hcall_buf {
213 void *buf;
214 void *oldbuf;
215 };
217 static void _xc_clean_hcall_buf(void *m)
218 {
219 struct hcall_buf *hcall_buf = m;
221 if ( hcall_buf )
222 {
223 if ( hcall_buf->buf )
224 {
225 unlock_pages(hcall_buf->buf, PAGE_SIZE);
226 free(hcall_buf->buf);
227 }
229 free(hcall_buf);
230 }
232 pthread_setspecific(hcall_buf_pkey, NULL);
233 }
235 static void xc_clean_hcall_buf(void)
236 {
237 void *hcall_buf = pthread_getspecific(hcall_buf_pkey);
239 if (hcall_buf)
240 _xc_clean_hcall_buf(hcall_buf);
241 }
243 static void _xc_init_hcall_buf(void)
244 {
245 pthread_key_create(&hcall_buf_pkey, _xc_clean_hcall_buf);
246 }
248 int hcall_buf_prep(void **addr, size_t len)
249 {
250 struct hcall_buf *hcall_buf;
252 pthread_once(&hcall_buf_pkey_once, _xc_init_hcall_buf);
254 hcall_buf = pthread_getspecific(hcall_buf_pkey);
255 if ( !hcall_buf )
256 {
257 hcall_buf = calloc(1, sizeof(*hcall_buf));
258 if ( !hcall_buf )
259 goto out;
260 pthread_setspecific(hcall_buf_pkey, hcall_buf);
261 }
263 if ( !hcall_buf->buf )
264 {
265 hcall_buf->buf = xc_memalign(PAGE_SIZE, PAGE_SIZE);
266 if ( !hcall_buf->buf || lock_pages(hcall_buf->buf, PAGE_SIZE) )
267 {
268 free(hcall_buf->buf);
269 hcall_buf->buf = NULL;
270 goto out;
271 }
272 }
274 if ( (len < PAGE_SIZE) && !hcall_buf->oldbuf )
275 {
276 memcpy(hcall_buf->buf, *addr, len);
277 hcall_buf->oldbuf = *addr;
278 *addr = hcall_buf->buf;
279 return 0;
280 }
282 out:
283 return lock_pages(*addr, len);
284 }
286 void hcall_buf_release(void **addr, size_t len)
287 {
288 struct hcall_buf *hcall_buf = pthread_getspecific(hcall_buf_pkey);
290 if ( hcall_buf && (hcall_buf->buf == *addr) )
291 {
292 memcpy(hcall_buf->oldbuf, *addr, len);
293 *addr = hcall_buf->oldbuf;
294 hcall_buf->oldbuf = NULL;
295 }
296 else
297 {
298 unlock_pages(*addr, len);
299 }
300 }
302 #endif
304 /* NB: arr must be locked */
305 int xc_get_pfn_type_batch(xc_interface *xch, uint32_t dom,
306 unsigned int num, xen_pfn_t *arr)
307 {
308 DECLARE_DOMCTL;
309 domctl.cmd = XEN_DOMCTL_getpageframeinfo3;
310 domctl.domain = (domid_t)dom;
311 domctl.u.getpageframeinfo3.num = num;
312 set_xen_guest_handle(domctl.u.getpageframeinfo3.array, arr);
313 return do_domctl(xch, &domctl);
314 }
316 int xc_mmuext_op(
317 xc_interface *xch,
318 struct mmuext_op *op,
319 unsigned int nr_ops,
320 domid_t dom)
321 {
322 DECLARE_HYPERCALL;
323 long ret = -EINVAL;
325 if ( hcall_buf_prep((void **)&op, nr_ops*sizeof(*op)) != 0 )
326 {
327 PERROR("Could not lock memory for Xen hypercall");
328 goto out1;
329 }
331 hypercall.op = __HYPERVISOR_mmuext_op;
332 hypercall.arg[0] = (unsigned long)op;
333 hypercall.arg[1] = (unsigned long)nr_ops;
334 hypercall.arg[2] = (unsigned long)0;
335 hypercall.arg[3] = (unsigned long)dom;
337 ret = do_xen_hypercall(xch, &hypercall);
339 hcall_buf_release((void **)&op, nr_ops*sizeof(*op));
341 out1:
342 return ret;
343 }
345 static int flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu)
346 {
347 int err = 0;
348 DECLARE_HYPERCALL;
350 if ( mmu->idx == 0 )
351 return 0;
353 hypercall.op = __HYPERVISOR_mmu_update;
354 hypercall.arg[0] = (unsigned long)mmu->updates;
355 hypercall.arg[1] = (unsigned long)mmu->idx;
356 hypercall.arg[2] = 0;
357 hypercall.arg[3] = mmu->subject;
359 if ( lock_pages(mmu->updates, sizeof(mmu->updates)) != 0 )
360 {
361 PERROR("flush_mmu_updates: mmu updates lock_pages failed");
362 err = 1;
363 goto out;
364 }
366 if ( do_xen_hypercall(xch, &hypercall) < 0 )
367 {
368 ERROR("Failure when submitting mmu updates");
369 err = 1;
370 }
372 mmu->idx = 0;
374 unlock_pages(mmu->updates, sizeof(mmu->updates));
376 out:
377 return err;
378 }
380 struct xc_mmu *xc_alloc_mmu_updates(xc_interface *xch, domid_t dom)
381 {
382 struct xc_mmu *mmu = malloc(sizeof(*mmu));
383 if ( mmu == NULL )
384 return mmu;
385 mmu->idx = 0;
386 mmu->subject = dom;
387 return mmu;
388 }
390 int xc_add_mmu_update(xc_interface *xch, struct xc_mmu *mmu,
391 unsigned long long ptr, unsigned long long val)
392 {
393 mmu->updates[mmu->idx].ptr = ptr;
394 mmu->updates[mmu->idx].val = val;
396 if ( ++mmu->idx == MAX_MMU_UPDATES )
397 return flush_mmu_updates(xch, mmu);
399 return 0;
400 }
402 int xc_flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu)
403 {
404 return flush_mmu_updates(xch, mmu);
405 }
407 int xc_memory_op(xc_interface *xch,
408 int cmd,
409 void *arg)
410 {
411 DECLARE_HYPERCALL;
412 struct xen_memory_reservation *reservation = arg;
413 struct xen_machphys_mfn_list *xmml = arg;
414 xen_pfn_t *extent_start;
415 long ret = -EINVAL;
417 hypercall.op = __HYPERVISOR_memory_op;
418 hypercall.arg[0] = (unsigned long)cmd;
419 hypercall.arg[1] = (unsigned long)arg;
421 switch ( cmd )
422 {
423 case XENMEM_increase_reservation:
424 case XENMEM_decrease_reservation:
425 case XENMEM_populate_physmap:
426 if ( lock_pages(reservation, sizeof(*reservation)) != 0 )
427 {
428 PERROR("Could not lock");
429 goto out1;
430 }
431 get_xen_guest_handle(extent_start, reservation->extent_start);
432 if ( (extent_start != NULL) &&
433 (lock_pages(extent_start,
434 reservation->nr_extents * sizeof(xen_pfn_t)) != 0) )
435 {
436 PERROR("Could not lock");
437 unlock_pages(reservation, sizeof(*reservation));
438 goto out1;
439 }
440 break;
441 case XENMEM_machphys_mfn_list:
442 if ( lock_pages(xmml, sizeof(*xmml)) != 0 )
443 {
444 PERROR("Could not lock");
445 goto out1;
446 }
447 get_xen_guest_handle(extent_start, xmml->extent_start);
448 if ( lock_pages(extent_start,
449 xmml->max_extents * sizeof(xen_pfn_t)) != 0 )
450 {
451 PERROR("Could not lock");
452 unlock_pages(xmml, sizeof(*xmml));
453 goto out1;
454 }
455 break;
456 case XENMEM_add_to_physmap:
457 if ( lock_pages(arg, sizeof(struct xen_add_to_physmap)) )
458 {
459 PERROR("Could not lock");
460 goto out1;
461 }
462 break;
463 case XENMEM_current_reservation:
464 case XENMEM_maximum_reservation:
465 case XENMEM_maximum_gpfn:
466 if ( lock_pages(arg, sizeof(domid_t)) )
467 {
468 PERROR("Could not lock");
469 goto out1;
470 }
471 break;
472 case XENMEM_set_pod_target:
473 case XENMEM_get_pod_target:
474 if ( lock_pages(arg, sizeof(struct xen_pod_target)) )
475 {
476 PERROR("Could not lock");
477 goto out1;
478 }
479 break;
480 }
482 ret = do_xen_hypercall(xch, &hypercall);
484 switch ( cmd )
485 {
486 case XENMEM_increase_reservation:
487 case XENMEM_decrease_reservation:
488 case XENMEM_populate_physmap:
489 unlock_pages(reservation, sizeof(*reservation));
490 get_xen_guest_handle(extent_start, reservation->extent_start);
491 if ( extent_start != NULL )
492 unlock_pages(extent_start,
493 reservation->nr_extents * sizeof(xen_pfn_t));
494 break;
495 case XENMEM_machphys_mfn_list:
496 unlock_pages(xmml, sizeof(*xmml));
497 get_xen_guest_handle(extent_start, xmml->extent_start);
498 unlock_pages(extent_start,
499 xmml->max_extents * sizeof(xen_pfn_t));
500 break;
501 case XENMEM_add_to_physmap:
502 unlock_pages(arg, sizeof(struct xen_add_to_physmap));
503 break;
504 case XENMEM_current_reservation:
505 case XENMEM_maximum_reservation:
506 case XENMEM_maximum_gpfn:
507 unlock_pages(arg, sizeof(domid_t));
508 break;
509 case XENMEM_set_pod_target:
510 case XENMEM_get_pod_target:
511 unlock_pages(arg, sizeof(struct xen_pod_target));
512 break;
513 }
515 out1:
516 return ret;
517 }
520 long long xc_domain_get_cpu_usage( xc_interface *xch, domid_t domid, int vcpu )
521 {
522 DECLARE_DOMCTL;
524 domctl.cmd = XEN_DOMCTL_getvcpuinfo;
525 domctl.domain = (domid_t)domid;
526 domctl.u.getvcpuinfo.vcpu = (uint16_t)vcpu;
527 if ( (do_domctl(xch, &domctl) < 0) )
528 {
529 PERROR("Could not get info on domain");
530 return -1;
531 }
532 return domctl.u.getvcpuinfo.cpu_time;
533 }
536 #ifndef __ia64__
537 int xc_get_pfn_list(xc_interface *xch,
538 uint32_t domid,
539 uint64_t *pfn_buf,
540 unsigned long max_pfns)
541 {
542 DECLARE_DOMCTL;
543 int ret;
544 domctl.cmd = XEN_DOMCTL_getmemlist;
545 domctl.domain = (domid_t)domid;
546 domctl.u.getmemlist.max_pfns = max_pfns;
547 set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf);
549 #ifdef VALGRIND
550 memset(pfn_buf, 0, max_pfns * sizeof(*pfn_buf));
551 #endif
553 if ( lock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf)) != 0 )
554 {
555 PERROR("xc_get_pfn_list: pfn_buf lock failed");
556 return -1;
557 }
559 ret = do_domctl(xch, &domctl);
561 unlock_pages(pfn_buf, max_pfns * sizeof(*pfn_buf));
563 return (ret < 0) ? -1 : domctl.u.getmemlist.num_pfns;
564 }
565 #endif
567 long xc_get_tot_pages(xc_interface *xch, uint32_t domid)
568 {
569 DECLARE_DOMCTL;
570 domctl.cmd = XEN_DOMCTL_getdomaininfo;
571 domctl.domain = (domid_t)domid;
572 return (do_domctl(xch, &domctl) < 0) ?
573 -1 : domctl.u.getdomaininfo.tot_pages;
574 }
576 int xc_copy_to_domain_page(xc_interface *xch,
577 uint32_t domid,
578 unsigned long dst_pfn,
579 const char *src_page)
580 {
581 void *vaddr = xc_map_foreign_range(
582 xch, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
583 if ( vaddr == NULL )
584 return -1;
585 memcpy(vaddr, src_page, PAGE_SIZE);
586 munmap(vaddr, PAGE_SIZE);
587 return 0;
588 }
590 int xc_clear_domain_page(xc_interface *xch,
591 uint32_t domid,
592 unsigned long dst_pfn)
593 {
594 void *vaddr = xc_map_foreign_range(
595 xch, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
596 if ( vaddr == NULL )
597 return -1;
598 memset(vaddr, 0, PAGE_SIZE);
599 munmap(vaddr, PAGE_SIZE);
600 return 0;
601 }
603 int xc_domctl(xc_interface *xch, struct xen_domctl *domctl)
604 {
605 return do_domctl(xch, domctl);
606 }
608 int xc_sysctl(xc_interface *xch, struct xen_sysctl *sysctl)
609 {
610 return do_sysctl(xch, sysctl);
611 }
613 int xc_version(xc_interface *xch, int cmd, void *arg)
614 {
615 int rc, argsize = 0;
617 switch ( cmd )
618 {
619 case XENVER_extraversion:
620 argsize = sizeof(xen_extraversion_t);
621 break;
622 case XENVER_compile_info:
623 argsize = sizeof(xen_compile_info_t);
624 break;
625 case XENVER_capabilities:
626 argsize = sizeof(xen_capabilities_info_t);
627 break;
628 case XENVER_changeset:
629 argsize = sizeof(xen_changeset_info_t);
630 break;
631 case XENVER_platform_parameters:
632 argsize = sizeof(xen_platform_parameters_t);
633 break;
634 }
636 if ( (argsize != 0) && (lock_pages(arg, argsize) != 0) )
637 {
638 PERROR("Could not lock memory for version hypercall");
639 return -ENOMEM;
640 }
642 #ifdef VALGRIND
643 if (argsize != 0)
644 memset(arg, 0, argsize);
645 #endif
647 rc = do_xen_version(xch, cmd, arg);
649 if ( argsize != 0 )
650 unlock_pages(arg, argsize);
652 return rc;
653 }
655 unsigned long xc_make_page_below_4G(
656 xc_interface *xch, uint32_t domid, unsigned long mfn)
657 {
658 xen_pfn_t old_mfn = mfn;
659 xen_pfn_t new_mfn;
661 if ( xc_domain_memory_decrease_reservation(
662 xch, domid, 1, 0, &old_mfn) != 0 )
663 {
664 DPRINTF("xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
665 return 0;
666 }
668 if ( xc_domain_memory_increase_reservation(
669 xch, domid, 1, 0, XENMEMF_address_bits(32), &new_mfn) != 0 )
670 {
671 DPRINTF("xc_make_page_below_4G increase failed. mfn=%lx\n",mfn);
672 return 0;
673 }
675 return new_mfn;
676 }
678 static void
679 _xc_clean_errbuf(void * m)
680 {
681 free(m);
682 pthread_setspecific(errbuf_pkey, NULL);
683 }
685 static void
686 _xc_init_errbuf(void)
687 {
688 pthread_key_create(&errbuf_pkey, _xc_clean_errbuf);
689 }
691 char *safe_strerror(int errcode)
692 {
693 #define XS_BUFSIZE 32
694 char *errbuf;
695 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
696 char *strerror_str;
698 pthread_once(&errbuf_pkey_once, _xc_init_errbuf);
700 errbuf = pthread_getspecific(errbuf_pkey);
701 if (errbuf == NULL) {
702 errbuf = malloc(XS_BUFSIZE);
703 pthread_setspecific(errbuf_pkey, errbuf);
704 }
706 /*
707 * Thread-unsafe strerror() is protected by a local mutex. We copy
708 * the string to a thread-private buffer before releasing the mutex.
709 */
710 pthread_mutex_lock(&mutex);
711 strerror_str = strerror(errcode);
712 strncpy(errbuf, strerror_str, XS_BUFSIZE);
713 errbuf[XS_BUFSIZE-1] = '\0';
714 pthread_mutex_unlock(&mutex);
716 return errbuf;
717 }
719 void bitmap_64_to_byte(uint8_t *bp, const uint64_t *lp, int nbits)
720 {
721 uint64_t l;
722 int i, j, b;
724 for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
725 l = lp[i];
726 for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
727 bp[b+j] = l;
728 l >>= 8;
729 nbits -= 8;
730 }
731 }
732 }
734 void bitmap_byte_to_64(uint64_t *lp, const uint8_t *bp, int nbits)
735 {
736 uint64_t l;
737 int i, j, b;
739 for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
740 l = 0;
741 for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
742 l |= (uint64_t)bp[b+j] << (j*8);
743 nbits -= 8;
744 }
745 lp[i] = l;
746 }
747 }
749 int read_exact(int fd, void *data, size_t size)
750 {
751 size_t offset = 0;
752 ssize_t len;
754 while ( offset < size )
755 {
756 len = read(fd, (char *)data + offset, size - offset);
757 if ( (len == -1) && (errno == EINTR) )
758 continue;
759 if ( len == 0 )
760 errno = 0;
761 if ( len <= 0 )
762 return -1;
763 offset += len;
764 }
766 return 0;
767 }
769 int write_exact(int fd, const void *data, size_t size)
770 {
771 size_t offset = 0;
772 ssize_t len;
774 while ( offset < size )
775 {
776 len = write(fd, (const char *)data + offset, size - offset);
777 if ( (len == -1) && (errno == EINTR) )
778 continue;
779 if ( len <= 0 )
780 return -1;
781 offset += len;
782 }
784 return 0;
785 }
787 int xc_ffs8(uint8_t x)
788 {
789 int i;
790 for ( i = 0; i < 8; i++ )
791 if ( x & (1u << i) )
792 return i+1;
793 return 0;
794 }
796 int xc_ffs16(uint16_t x)
797 {
798 uint8_t h = x>>8, l = x;
799 return l ? xc_ffs8(l) : h ? xc_ffs8(h) + 8 : 0;
800 }
802 int xc_ffs32(uint32_t x)
803 {
804 uint16_t h = x>>16, l = x;
805 return l ? xc_ffs16(l) : h ? xc_ffs16(h) + 16 : 0;
806 }
808 int xc_ffs64(uint64_t x)
809 {
810 uint32_t h = x>>32, l = x;
811 return l ? xc_ffs32(l) : h ? xc_ffs32(h) + 32 : 0;
812 }
814 void *xc_memalign(size_t alignment, size_t size)
815 {
816 #if defined(_POSIX_C_SOURCE) && !defined(__sun__)
817 int ret;
818 void *ptr;
819 ret = posix_memalign(&ptr, alignment, size);
820 if (ret != 0)
821 return NULL;
822 return ptr;
823 #elif defined(__NetBSD__) || defined(__OpenBSD__)
824 return valloc(size);
825 #else
826 return memalign(alignment, size);
827 #endif
828 }
830 /*
831 * Local variables:
832 * mode: C
833 * c-set-style: "BSD"
834 * c-basic-offset: 4
835 * tab-width: 4
836 * indent-tabs-mode: nil
837 * End:
838 */