debuggers.hg

view tools/libxc/ia64/xc_ia64_linux_save.c @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children 5c0bf00e371d
line source
1 /******************************************************************************
2 * xc_ia64_linux_save.c
3 *
4 * Save the state of a running Linux session.
5 *
6 * Copyright (c) 2003, K A Fraser.
7 * Rewritten for ia64 by Tristan Gingold <tristan.gingold@bull.net>
8 *
9 * Copyright (c) 2007 Isaku Yamahata <yamahata@valinux.co.jp>
10 * Use foreign p2m exposure.
11 * VTi domain support.
12 */
14 #include <inttypes.h>
15 #include <time.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18 #include <sys/time.h>
20 #include "xg_private.h"
21 #include "xc_ia64.h"
22 #include "xc_ia64_save_restore.h"
23 #include "xc_efi.h"
24 #include "xen/hvm/params.h"
26 /*
27 ** Default values for important tuning parameters. Can override by passing
28 ** non-zero replacement values to xc_linux_save().
29 **
30 ** XXX SMH: should consider if want to be able to override MAX_MBIT_RATE too.
31 **
32 */
33 #define DEF_MAX_ITERS (4 - 1) /* limit us to 4 times round loop */
34 #define DEF_MAX_FACTOR 3 /* never send more than 3x nr_pfns */
36 /*
37 ** During (live) save/migrate, we maintain a number of bitmaps to track
38 ** which pages we have to send, and to skip.
39 */
40 static inline int test_bit(int nr, volatile void * addr)
41 {
42 return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
43 }
45 static inline void clear_bit(int nr, volatile void * addr)
46 {
47 BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr));
48 }
50 static inline void set_bit(int nr, volatile void * addr)
51 {
52 BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
53 }
55 static int
56 suspend_and_state(int (*suspend)(int), int xc_handle, int io_fd,
57 int dom, xc_dominfo_t *info)
58 {
59 int i = 0;
61 if (!(*suspend)(dom)) {
62 ERROR("Suspend request failed");
63 return -1;
64 }
66 retry:
68 if (xc_domain_getinfo(xc_handle, dom, 1, info) != 1) {
69 ERROR("Could not get domain info");
70 return -1;
71 }
73 if (info->shutdown && info->shutdown_reason == SHUTDOWN_suspend)
74 return 0; // success
76 if (info->paused) {
77 // try unpausing domain, wait, and retest
78 xc_domain_unpause(xc_handle, dom);
80 ERROR("Domain was paused. Wait and re-test.");
81 usleep(10000); // 10ms
83 goto retry;
84 }
87 if(++i < 100) {
88 ERROR("Retry suspend domain.");
89 usleep(10000); // 10ms
90 goto retry;
91 }
93 ERROR("Unable to suspend domain.");
95 return -1;
96 }
98 static inline int
99 md_is_not_ram(const efi_memory_desc_t *md)
100 {
101 return ((md->type != EFI_CONVENTIONAL_MEMORY) ||
102 (md->attribute != EFI_MEMORY_WB) ||
103 (md->num_pages == 0));
104 }
106 /*
107 * Send through a list of all the PFNs that were not in map at the close.
108 * We send pages which was allocated. However balloon driver may
109 * decreased after sending page. So we have to check the freed
110 * page after pausing the domain.
111 */
112 static int
113 xc_ia64_send_unallocated_list(int xc_handle, int io_fd,
114 struct xen_ia64_p2m_table *p2m_table,
115 xen_ia64_memmap_info_t *memmap_info,
116 void *memmap_desc_start, void *memmap_desc_end)
117 {
118 void *p;
119 efi_memory_desc_t *md;
121 unsigned long N;
122 unsigned long pfntab[1024];
123 unsigned int j;
125 j = 0;
126 for (p = memmap_desc_start;
127 p < memmap_desc_end;
128 p += memmap_info->efi_memdesc_size) {
129 md = p;
131 if (md_is_not_ram(md))
132 continue;
134 for (N = md->phys_addr >> PAGE_SHIFT;
135 N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
136 PAGE_SHIFT;
137 N++) {
138 if (!xc_ia64_p2m_allocated(p2m_table, N))
139 j++;
140 }
141 }
142 if (write_exact(io_fd, &j, sizeof(unsigned int))) {
143 ERROR("Error when writing to state file (6a)");
144 return -1;
145 }
147 j = 0;
148 for (p = memmap_desc_start;
149 p < memmap_desc_end;
150 p += memmap_info->efi_memdesc_size) {
151 md = p;
153 if (md_is_not_ram(md))
154 continue;
156 for (N = md->phys_addr >> PAGE_SHIFT;
157 N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
158 PAGE_SHIFT;
159 N++) {
160 if (!xc_ia64_p2m_allocated(p2m_table, N))
161 pfntab[j++] = N;
162 if (j == sizeof(pfntab)/sizeof(pfntab[0])) {
163 if (write_exact(io_fd, &pfntab, sizeof(pfntab[0]) * j)) {
164 ERROR("Error when writing to state file (6b)");
165 return -1;
166 }
167 j = 0;
168 }
169 }
170 }
171 if (j > 0) {
172 if (write_exact(io_fd, &pfntab, sizeof(pfntab[0]) * j)) {
173 ERROR("Error when writing to state file (6c)");
174 return -1;
175 }
176 }
178 return 0;
179 }
181 static int
182 xc_ia64_send_vcpu_context(int xc_handle, int io_fd, uint32_t dom,
183 uint32_t vcpu, vcpu_guest_context_t *ctxt)
184 {
185 if (xc_vcpu_getcontext(xc_handle, dom, vcpu, ctxt)) {
186 ERROR("Could not get vcpu context");
187 return -1;
188 }
190 if (write_exact(io_fd, ctxt, sizeof(*ctxt))) {
191 ERROR("Error when writing to state file (1)");
192 return -1;
193 }
195 fprintf(stderr, "ip=%016lx, b0=%016lx\n", ctxt->regs.ip, ctxt->regs.b[0]);
196 return 0;
197 }
199 static int
200 xc_ia64_send_shared_info(int xc_handle, int io_fd, shared_info_t *live_shinfo)
201 {
202 if (write_exact(io_fd, live_shinfo, PAGE_SIZE)) {
203 ERROR("Error when writing to state file (1)");
204 return -1;
205 }
206 return 0;
207 }
209 static int
210 xc_ia64_pv_send_context(int xc_handle, int io_fd, uint32_t dom,
211 shared_info_t *live_shinfo)
212 {
213 /* A copy of the CPU context of the guest. */
214 vcpu_guest_context_t ctxt;
215 char *mem;
217 if (xc_ia64_send_vcpu_context(xc_handle, io_fd, dom, 0, &ctxt))
218 return -1;
220 mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
221 PROT_READ|PROT_WRITE, ctxt.privregs_pfn);
222 if (mem == NULL) {
223 ERROR("cannot map privreg page");
224 return -1;
225 }
226 if (write_exact(io_fd, mem, PAGE_SIZE)) {
227 ERROR("Error when writing privreg to state file (5)");
228 munmap(mem, PAGE_SIZE);
229 return -1;
230 }
231 munmap(mem, PAGE_SIZE);
233 if (xc_ia64_send_shared_info(xc_handle, io_fd, live_shinfo))
234 return -1;
236 return 0;
237 }
239 static int
240 xc_ia64_hvm_send_context(int xc_handle, int io_fd, uint32_t dom,
241 const xc_dominfo_t *info, shared_info_t *live_shinfo)
242 {
243 int rc = -1;
244 unsigned int i;
246 /* vcpu map */
247 uint64_t max_virt_cpus;
248 unsigned long vcpumap_size;
249 uint64_t *vcpumap = NULL;
251 /* HVM: magic frames for ioreqs and xenstore comms */
252 const int hvm_params[] = {
253 HVM_PARAM_STORE_PFN,
254 HVM_PARAM_IOREQ_PFN,
255 HVM_PARAM_BUFIOREQ_PFN,
256 HVM_PARAM_BUFPIOREQ_PFN,
257 };
258 const int NR_PARAMS = sizeof(hvm_params) / sizeof(hvm_params[0]);
259 /* ioreq_pfn, bufioreq_pfn, store_pfn */
260 uint64_t magic_pfns[NR_PARAMS];
262 /* HVM: a buffer for holding HVM contxt */
263 uint64_t rec_size;
264 uint64_t hvm_buf_size = 0;
265 uint8_t *hvm_buf = NULL;
267 if (xc_ia64_send_shared_info(xc_handle, io_fd, live_shinfo))
268 return -1;
270 /* vcpu map */
271 max_virt_cpus = MAX_VIRT_CPUS;
272 vcpumap_size = (max_virt_cpus + 1 + sizeof(vcpumap[0]) - 1) /
273 sizeof(vcpumap[0]);
274 vcpumap = malloc(vcpumap_size);
275 if (vcpumap == NULL) {
276 ERROR("memory alloc for vcpumap");
277 goto out;
278 }
279 memset(vcpumap, 0, vcpumap_size);
281 for (i = 0; i <= info->max_vcpu_id; i++) {
282 xc_vcpuinfo_t vinfo;
283 if ((xc_vcpu_getinfo(xc_handle, dom, i, &vinfo) == 0) && vinfo.online)
284 __set_bit(i, vcpumap);
285 }
287 if (write_exact(io_fd, &max_virt_cpus, sizeof(max_virt_cpus))) {
288 ERROR("write max_virt_cpus");
289 goto out;
290 }
292 if (write_exact(io_fd, vcpumap, vcpumap_size)) {
293 ERROR("write vcpumap");
294 goto out;
295 }
297 /* vcpu context */
298 for (i = 0; i <= info->max_vcpu_id; i++) {
299 /* A copy of the CPU context of the guest. */
300 vcpu_guest_context_t ctxt;
302 if (!__test_bit(i, vcpumap))
303 continue;
305 if (xc_ia64_send_vcpu_context(xc_handle, io_fd, dom, i, &ctxt))
306 goto out;
308 // system context of vcpu is sent as hvm context.
309 }
311 /* Save magic-page locations. */
312 memset(magic_pfns, 0, sizeof(magic_pfns));
313 for (i = 0; i < NR_PARAMS; i++) {
314 if (xc_get_hvm_param(xc_handle, dom, hvm_params[i], &magic_pfns[i])) {
315 PERROR("Error when xc_get_hvm_param");
316 goto out;
317 }
318 }
320 if (write_exact(io_fd, magic_pfns, sizeof(magic_pfns))) {
321 ERROR("Error when writing to state file (7)");
322 goto out;
323 }
325 /* Need another buffer for HVM context */
326 hvm_buf_size = xc_domain_hvm_getcontext(xc_handle, dom, 0, 0);
327 if (hvm_buf_size == -1) {
328 ERROR("Couldn't get HVM context size from Xen");
329 goto out;
330 }
332 hvm_buf = malloc(hvm_buf_size);
333 if (!hvm_buf) {
334 ERROR("Couldn't allocate memory");
335 goto out;
336 }
338 /* Get HVM context from Xen and save it too */
339 rec_size = xc_domain_hvm_getcontext(xc_handle, dom, hvm_buf, hvm_buf_size);
340 if (rec_size == -1) {
341 ERROR("HVM:Could not get hvm buffer");
342 goto out;
343 }
345 if (write_exact(io_fd, &rec_size, sizeof(rec_size))) {
346 ERROR("error write hvm buffer size");
347 goto out;
348 }
350 if (write_exact(io_fd, hvm_buf, rec_size)) {
351 ERROR("write HVM info failed!\n");
352 goto out;
353 }
355 rc = 0;
356 out:
357 if (hvm_buf != NULL)
358 free(hvm_buf);
359 if (vcpumap != NULL)
360 free(vcpumap);
361 return rc;
362 }
364 int
365 xc_domain_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
366 uint32_t max_factor, uint32_t flags, int (*suspend)(int),
367 int hvm, void *(*init_qemu_maps)(int, unsigned),
368 void (*qemu_flip_buffer)(int, int))
369 {
370 DECLARE_DOMCTL;
371 xc_dominfo_t info;
373 int rc = 1;
375 int debug = (flags & XCFLAGS_DEBUG);
376 int live = (flags & XCFLAGS_LIVE);
378 /* The new domain's shared-info frame number. */
379 unsigned long shared_info_frame;
381 /* Live mapping of shared info structure */
382 shared_info_t *live_shinfo = NULL;
384 /* Iteration number. */
385 int iter;
387 /* Number of pages sent in the last iteration (live only). */
388 unsigned int sent_last_iter;
390 /* Number of pages sent (live only). */
391 unsigned int total_sent;
393 /* total number of pages used by the current guest */
394 unsigned long p2m_size;
396 /* Size of the shadow bitmap (live only). */
397 unsigned int bitmap_size = 0;
399 /* True if last iteration. */
400 int last_iter;
402 /* Bitmap of pages to be sent. */
403 unsigned long *to_send = NULL;
404 /* Bitmap of pages not to be sent (because dirtied). */
405 unsigned long *to_skip = NULL;
407 char *mem;
409 /* HVM: shared-memory bitmaps for getting log-dirty bits from qemu-dm */
410 unsigned long *qemu_bitmaps[2];
411 int qemu_active = 0;
412 int qemu_non_active = 1;
414 /* for foreign p2m exposure */
415 unsigned int memmap_info_num_pages;
416 unsigned long memmap_size = 0;
417 xen_ia64_memmap_info_t *memmap_info_live = NULL;
418 xen_ia64_memmap_info_t *memmap_info = NULL;
419 void *memmap_desc_start;
420 void *memmap_desc_end;
421 void *p;
422 efi_memory_desc_t *md;
423 struct xen_ia64_p2m_table p2m_table;
424 xc_ia64_p2m_init(&p2m_table);
426 if (debug)
427 fprintf(stderr, "xc_linux_save (ia64): started dom=%d\n", dom);
429 /* If no explicit control parameters given, use defaults */
430 if (!max_iters)
431 max_iters = DEF_MAX_ITERS;
432 if (!max_factor)
433 max_factor = DEF_MAX_FACTOR;
435 //initialize_mbit_rate();
437 if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
438 ERROR("Could not get domain info");
439 return 1;
440 }
442 shared_info_frame = info.shared_info_frame;
444 #if 0
445 /* cheesy sanity check */
446 if ((info.max_memkb >> (PAGE_SHIFT - 10)) > max_mfn) {
447 ERROR("Invalid state record -- pfn count out of range: %lu",
448 (info.max_memkb >> (PAGE_SHIFT - 10)));
449 goto out;
450 }
451 #endif
453 /* Map the shared info frame */
454 live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
455 PROT_READ, shared_info_frame);
456 if (!live_shinfo) {
457 ERROR("Couldn't map live_shinfo");
458 goto out;
459 }
461 p2m_size = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &dom) + 1;
463 /* This is expected by xm restore. */
464 if (write_exact(io_fd, &p2m_size, sizeof(unsigned long))) {
465 ERROR("write: p2m_size");
466 goto out;
467 }
469 /* xc_linux_restore starts to read here. */
470 /* Write a version number. This can avoid searching for a stupid bug
471 if the format change.
472 The version is hard-coded, don't forget to change the restore code
473 too! */
474 {
475 unsigned long version = XC_IA64_SR_FORMAT_VER_CURRENT;
477 if (write_exact(io_fd, &version, sizeof(unsigned long))) {
478 ERROR("write: version");
479 goto out;
480 }
481 }
483 domctl.cmd = XEN_DOMCTL_arch_setup;
484 domctl.domain = (domid_t)dom;
485 domctl.u.arch_setup.flags = XEN_DOMAINSETUP_query;
486 if (xc_domctl(xc_handle, &domctl) < 0) {
487 ERROR("Could not get domain setup");
488 goto out;
489 }
490 if (write_exact(io_fd, &domctl.u.arch_setup,
491 sizeof(domctl.u.arch_setup))) {
492 ERROR("write: domain setup");
493 goto out;
494 }
496 /* Domain is still running at this point */
497 if (live) {
499 if (xc_shadow_control(xc_handle, dom,
500 XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
501 NULL, 0, NULL, 0, NULL ) < 0) {
502 ERROR("Couldn't enable shadow mode");
503 goto out;
504 }
506 last_iter = 0;
508 bitmap_size = ((p2m_size + BITS_PER_LONG-1) & ~(BITS_PER_LONG-1)) / 8;
509 to_send = malloc(bitmap_size);
510 to_skip = malloc(bitmap_size);
512 if (!to_send || !to_skip) {
513 ERROR("Couldn't allocate bitmap array");
514 goto out;
515 }
517 /* Initially all the pages must be sent. */
518 memset(to_send, 0xff, bitmap_size);
520 if (lock_pages(to_send, bitmap_size)) {
521 ERROR("Unable to lock_pages to_send");
522 goto out;
523 }
524 if (lock_pages(to_skip, bitmap_size)) {
525 ERROR("Unable to lock_pages to_skip");
526 goto out;
527 }
529 if (hvm) {
530 /* Get qemu-dm logging dirty pages too */
531 void *seg = init_qemu_maps(dom, bitmap_size);
532 qemu_bitmaps[0] = seg;
533 qemu_bitmaps[1] = seg + bitmap_size;
534 qemu_active = 0;
535 qemu_non_active = 1;
536 }
537 } else {
539 /* This is a non-live suspend. Issue the call back to get the
540 domain suspended */
542 last_iter = 1;
544 if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info)) {
545 ERROR("Domain appears not to have suspended");
546 goto out;
547 }
549 }
551 memmap_info_num_pages = live_shinfo->arch.memmap_info_num_pages;
552 memmap_size = PAGE_SIZE * memmap_info_num_pages;
553 memmap_info_live = xc_map_foreign_range(xc_handle, info.domid,
554 memmap_size, PROT_READ,
555 live_shinfo->arch.memmap_info_pfn);
556 if (memmap_info_live == NULL) {
557 PERROR("Could not map memmap info.");
558 goto out;
559 }
560 memmap_info = malloc(memmap_size);
561 if (memmap_info == NULL) {
562 PERROR("Could not allocate memmap info memory");
563 goto out;
564 }
565 memcpy(memmap_info, memmap_info_live, memmap_size);
566 munmap(memmap_info_live, memmap_size);
567 memmap_info_live = NULL;
569 if (xc_ia64_p2m_map(&p2m_table, xc_handle, dom, memmap_info, 0) < 0) {
570 PERROR("xc_ia64_p2m_map");
571 goto out;
572 }
573 if (write_exact(io_fd,
574 &memmap_info_num_pages, sizeof(memmap_info_num_pages))) {
575 PERROR("write: arch.memmap_info_num_pages");
576 goto out;
577 }
578 if (write_exact(io_fd, memmap_info, memmap_size)) {
579 PERROR("write: memmap_info");
580 goto out;
581 }
583 sent_last_iter = p2m_size;
584 total_sent = 0;
586 for (iter = 1; ; iter++) {
587 unsigned int sent_this_iter, skip_this_iter;
588 unsigned long N;
590 sent_this_iter = 0;
591 skip_this_iter = 0;
593 /* Dirtied pages won't be saved.
594 slightly wasteful to peek the whole array evey time,
595 but this is fast enough for the moment. */
596 if (!last_iter) {
597 if (xc_shadow_control(xc_handle, dom,
598 XEN_DOMCTL_SHADOW_OP_PEEK,
599 to_skip, p2m_size,
600 NULL, 0, NULL) != p2m_size) {
601 ERROR("Error peeking shadow bitmap");
602 goto out;
603 }
604 }
606 /* Start writing out the saved-domain record. */
607 memmap_desc_start = &memmap_info->memdesc;
608 memmap_desc_end = memmap_desc_start + memmap_info->efi_memmap_size;
609 for (p = memmap_desc_start;
610 p < memmap_desc_end;
611 p += memmap_info->efi_memdesc_size) {
612 md = p;
613 if (md_is_not_ram(md))
614 continue;
616 for (N = md->phys_addr >> PAGE_SHIFT;
617 N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
618 PAGE_SHIFT;
619 N++) {
621 if (!xc_ia64_p2m_allocated(&p2m_table, N))
622 continue;
624 if (!last_iter) {
625 if (test_bit(N, to_skip) && test_bit(N, to_send))
626 skip_this_iter++;
627 if (test_bit(N, to_skip) || !test_bit(N, to_send))
628 continue;
629 } else if (live) {
630 if (!test_bit(N, to_send))
631 continue;
632 }
634 if (debug)
635 fprintf(stderr, "xc_linux_save: page %lx (%lu/%lu)\n",
636 xc_ia64_p2m_mfn(&p2m_table, N),
637 N, p2m_size);
639 mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
640 PROT_READ|PROT_WRITE, N);
641 if (mem == NULL) {
642 /* The page may have move.
643 It will be remarked dirty.
644 FIXME: to be tracked. */
645 fprintf(stderr, "cannot map mfn page %lx gpfn %lx: %s\n",
646 xc_ia64_p2m_mfn(&p2m_table, N),
647 N, safe_strerror(errno));
648 continue;
649 }
651 if (write_exact(io_fd, &N, sizeof(N))) {
652 ERROR("write: p2m_size");
653 munmap(mem, PAGE_SIZE);
654 goto out;
655 }
657 if (write(io_fd, mem, PAGE_SIZE) != PAGE_SIZE) {
658 ERROR("Error when writing to state file (5)");
659 munmap(mem, PAGE_SIZE);
660 goto out;
661 }
662 munmap(mem, PAGE_SIZE);
663 sent_this_iter++;
664 total_sent++;
665 }
666 }
668 if (last_iter)
669 break;
671 DPRINTF(" %d: sent %d, skipped %d\n",
672 iter, sent_this_iter, skip_this_iter );
674 if (live) {
675 if ( /* ((sent_this_iter > sent_last_iter) && RATE_IS_MAX()) || */
676 (iter >= max_iters) || (sent_this_iter+skip_this_iter < 50) ||
677 (total_sent > p2m_size*max_factor)) {
678 DPRINTF("Start last iteration\n");
679 last_iter = 1;
681 if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info)) {
682 ERROR("Domain appears not to have suspended");
683 goto out;
684 }
685 }
687 /* Pages to be sent are pages which were dirty. */
688 if (xc_shadow_control(xc_handle, dom,
689 XEN_DOMCTL_SHADOW_OP_CLEAN,
690 to_send, p2m_size,
691 NULL, 0, NULL ) != p2m_size) {
692 ERROR("Error flushing shadow PT");
693 goto out;
694 }
696 if (hvm) {
697 unsigned int j;
698 /* Pull in the dirty bits from qemu-dm too */
699 if (!last_iter) {
700 qemu_active = qemu_non_active;
701 qemu_non_active = qemu_active ? 0 : 1;
702 qemu_flip_buffer(dom, qemu_active);
703 for (j = 0; j < bitmap_size / sizeof(unsigned long); j++) {
704 to_send[j] |= qemu_bitmaps[qemu_non_active][j];
705 qemu_bitmaps[qemu_non_active][j] = 0;
706 }
707 } else {
708 for (j = 0; j < bitmap_size / sizeof(unsigned long); j++)
709 to_send[j] |= qemu_bitmaps[qemu_active][j];
710 }
711 }
713 sent_last_iter = sent_this_iter;
715 //print_stats(xc_handle, dom, sent_this_iter, &stats, 1);
716 }
717 }
719 fprintf(stderr, "All memory is saved\n");
721 /* terminate */
722 {
723 unsigned long pfn = INVALID_MFN;
724 if (write_exact(io_fd, &pfn, sizeof(pfn))) {
725 ERROR("Error when writing to state file (6)");
726 goto out;
727 }
728 }
730 if (xc_ia64_send_unallocated_list(xc_handle, io_fd, &p2m_table,
731 memmap_info,
732 memmap_desc_start, memmap_desc_end))
733 goto out;
735 if (!hvm)
736 rc = xc_ia64_pv_send_context(xc_handle, io_fd, dom, live_shinfo);
737 else
738 rc = xc_ia64_hvm_send_context(xc_handle, io_fd,
739 dom, &info, live_shinfo);
740 if (rc)
741 goto out;
743 /* Success! */
744 rc = 0;
746 out:
748 if (live) {
749 if (xc_shadow_control(xc_handle, dom,
750 XEN_DOMCTL_SHADOW_OP_OFF,
751 NULL, 0, NULL, 0, NULL ) < 0) {
752 DPRINTF("Warning - couldn't disable shadow mode");
753 }
754 }
756 unlock_pages(to_send, bitmap_size);
757 free(to_send);
758 unlock_pages(to_skip, bitmap_size);
759 free(to_skip);
760 if (live_shinfo)
761 munmap(live_shinfo, PAGE_SIZE);
762 if (memmap_info_live)
763 munmap(memmap_info_live, memmap_size);
764 if (memmap_info)
765 free(memmap_info);
766 xc_ia64_p2m_unmap(&p2m_table);
768 fprintf(stderr,"Save exit rc=%d\n",rc);
770 return !!rc;
771 }
773 /*
774 * Local variables:
775 * mode: C
776 * c-set-style: "BSD"
777 * c-basic-offset: 4
778 * tab-width: 4
779 * indent-tabs-mode: nil
780 * End:
781 */