debuggers.hg

view tools/libxc/ia64/xc_ia64_linux_save.c @ 17969:1201c7657832

[IA64] ia64 save/restore new formart. save part.

Introduce ia64 save/restore new formart. save part.
The formart twist is necessary for pv_ops linux support saving/restoring
all of the online vcpu context.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Tue Jun 10 16:00:33 2008 +0900 (2008-06-10)
parents ef67e2867b44
children 08f77df14cba
line source
1 /******************************************************************************
2 * xc_ia64_linux_save.c
3 *
4 * Save the state of a running Linux session.
5 *
6 * Copyright (c) 2003, K A Fraser.
7 * Rewritten for ia64 by Tristan Gingold <tristan.gingold@bull.net>
8 *
9 * Copyright (c) 2007 Isaku Yamahata <yamahata@valinux.co.jp>
10 * Use foreign p2m exposure.
11 * VTi domain support.
12 */
14 #include <inttypes.h>
15 #include <time.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18 #include <sys/time.h>
20 #include "xg_private.h"
21 #include "xc_ia64.h"
22 #include "xc_ia64_save_restore.h"
23 #include "xc_efi.h"
24 #include "xen/hvm/params.h"
26 /*
27 ** Default values for important tuning parameters. Can override by passing
28 ** non-zero replacement values to xc_linux_save().
29 **
30 ** XXX SMH: should consider if want to be able to override MAX_MBIT_RATE too.
31 **
32 */
33 #define DEF_MAX_ITERS (4 - 1) /* limit us to 4 times round loop */
34 #define DEF_MAX_FACTOR 3 /* never send more than 3x nr_pfns */
36 /*
37 ** During (live) save/migrate, we maintain a number of bitmaps to track
38 ** which pages we have to send, and to skip.
39 */
40 static inline int test_bit(int nr, volatile void * addr)
41 {
42 return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
43 }
45 static inline void clear_bit(int nr, volatile void * addr)
46 {
47 BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr));
48 }
50 static inline void set_bit(int nr, volatile void * addr)
51 {
52 BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
53 }
55 static int
56 suspend_and_state(int (*suspend)(int), int xc_handle, int io_fd,
57 int dom, xc_dominfo_t *info)
58 {
59 int i = 0;
61 if (!(*suspend)(dom)) {
62 ERROR("Suspend request failed");
63 return -1;
64 }
66 retry:
68 if (xc_domain_getinfo(xc_handle, dom, 1, info) != 1) {
69 ERROR("Could not get domain info");
70 return -1;
71 }
73 if (info->shutdown && info->shutdown_reason == SHUTDOWN_suspend)
74 return 0; // success
76 if (info->paused) {
77 // try unpausing domain, wait, and retest
78 xc_domain_unpause(xc_handle, dom);
80 ERROR("Domain was paused. Wait and re-test.");
81 usleep(10000); // 10ms
83 goto retry;
84 }
87 if(++i < 100) {
88 ERROR("Retry suspend domain.");
89 usleep(10000); // 10ms
90 goto retry;
91 }
93 ERROR("Unable to suspend domain.");
95 return -1;
96 }
98 static inline int
99 md_is_not_ram(const efi_memory_desc_t *md)
100 {
101 return ((md->type != EFI_CONVENTIONAL_MEMORY) ||
102 (md->attribute != EFI_MEMORY_WB) ||
103 (md->num_pages == 0));
104 }
106 /*
107 * Send through a list of all the PFNs that were not in map at the close.
108 * We send pages which was allocated. However balloon driver may
109 * decreased after sending page. So we have to check the freed
110 * page after pausing the domain.
111 */
112 static int
113 xc_ia64_send_unallocated_list(int xc_handle, int io_fd,
114 struct xen_ia64_p2m_table *p2m_table,
115 xen_ia64_memmap_info_t *memmap_info,
116 void *memmap_desc_start, void *memmap_desc_end)
117 {
118 void *p;
119 efi_memory_desc_t *md;
121 unsigned long N;
122 unsigned long pfntab[1024];
123 unsigned int j;
125 j = 0;
126 for (p = memmap_desc_start;
127 p < memmap_desc_end;
128 p += memmap_info->efi_memdesc_size) {
129 md = p;
131 if (md_is_not_ram(md))
132 continue;
134 for (N = md->phys_addr >> PAGE_SHIFT;
135 N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
136 PAGE_SHIFT;
137 N++) {
138 if (!xc_ia64_p2m_allocated(p2m_table, N))
139 j++;
140 }
141 }
142 if (write_exact(io_fd, &j, sizeof(unsigned int))) {
143 ERROR("Error when writing to state file (6a)");
144 return -1;
145 }
147 j = 0;
148 for (p = memmap_desc_start;
149 p < memmap_desc_end;
150 p += memmap_info->efi_memdesc_size) {
151 md = p;
153 if (md_is_not_ram(md))
154 continue;
156 for (N = md->phys_addr >> PAGE_SHIFT;
157 N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
158 PAGE_SHIFT;
159 N++) {
160 if (!xc_ia64_p2m_allocated(p2m_table, N))
161 pfntab[j++] = N;
162 if (j == sizeof(pfntab)/sizeof(pfntab[0])) {
163 if (write_exact(io_fd, &pfntab, sizeof(pfntab[0]) * j)) {
164 ERROR("Error when writing to state file (6b)");
165 return -1;
166 }
167 j = 0;
168 }
169 }
170 }
171 if (j > 0) {
172 if (write_exact(io_fd, &pfntab, sizeof(pfntab[0]) * j)) {
173 ERROR("Error when writing to state file (6c)");
174 return -1;
175 }
176 }
178 return 0;
179 }
181 static int
182 xc_ia64_send_vcpu_context(int xc_handle, int io_fd, uint32_t dom,
183 uint32_t vcpu, vcpu_guest_context_t *ctxt)
184 {
185 if (xc_vcpu_getcontext(xc_handle, dom, vcpu, ctxt)) {
186 ERROR("Could not get vcpu context");
187 return -1;
188 }
190 if (write_exact(io_fd, ctxt, sizeof(*ctxt))) {
191 ERROR("Error when writing to state file (1)");
192 return -1;
193 }
195 fprintf(stderr, "ip=%016lx, b0=%016lx\n", ctxt->regs.ip, ctxt->regs.b[0]);
196 return 0;
197 }
199 static int
200 xc_ia64_send_shared_info(int xc_handle, int io_fd, shared_info_t *live_shinfo)
201 {
202 if (write_exact(io_fd, live_shinfo, PAGE_SIZE)) {
203 ERROR("Error when writing to state file (1)");
204 return -1;
205 }
206 return 0;
207 }
209 static int
210 xc_ia64_send_vcpumap(int xc_handle, int io_fd, uint32_t dom,
211 const xc_dominfo_t *info, uint64_t max_virt_cpus,
212 uint64_t **vcpumapp)
213 {
214 int rc = -1;
215 unsigned int i;
216 unsigned long vcpumap_size;
217 uint64_t *vcpumap = NULL;
219 vcpumap_size = (max_virt_cpus + 1 + sizeof(vcpumap[0]) - 1) /
220 sizeof(vcpumap[0]);
221 vcpumap = malloc(vcpumap_size);
222 if (vcpumap == NULL) {
223 ERROR("memory alloc for vcpumap");
224 goto out;
225 }
226 memset(vcpumap, 0, vcpumap_size);
228 for (i = 0; i <= info->max_vcpu_id; i++) {
229 xc_vcpuinfo_t vinfo;
230 if ((xc_vcpu_getinfo(xc_handle, dom, i, &vinfo) == 0) && vinfo.online)
231 __set_bit(i, vcpumap);
232 }
234 if (write_exact(io_fd, &max_virt_cpus, sizeof(max_virt_cpus))) {
235 ERROR("write max_virt_cpus");
236 goto out;
237 }
239 if (write_exact(io_fd, vcpumap, vcpumap_size)) {
240 ERROR("write vcpumap");
241 goto out;
242 }
244 rc = 0;
246 out:
247 if (rc != 0 && vcpumap != NULL) {
248 free(vcpumap);
249 vcpumap = NULL;
250 }
251 *vcpumapp = vcpumap;
252 return rc;
253 }
256 static int
257 xc_ia64_pv_send_context(int xc_handle, int io_fd, uint32_t dom,
258 const xc_dominfo_t *info, shared_info_t *live_shinfo)
259 {
260 int rc = -1;
261 unsigned int i;
263 /* vcpu map */
264 uint64_t *vcpumap = NULL;
265 if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, MAX_VIRT_CPUS,
266 &vcpumap))
267 goto out;
269 /* vcpu context */
270 for (i = 0; i <= info->max_vcpu_id; i++) {
271 /* A copy of the CPU context of the guest. */
272 vcpu_guest_context_t ctxt;
273 char *mem;
275 if (!__test_bit(i, vcpumap))
276 continue;
278 if (xc_ia64_send_vcpu_context(xc_handle, io_fd, dom, i, &ctxt))
279 goto out;
281 mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
282 PROT_READ|PROT_WRITE, ctxt.privregs_pfn);
283 if (mem == NULL) {
284 ERROR("cannot map privreg page");
285 goto out;
286 }
287 if (write_exact(io_fd, mem, PAGE_SIZE)) {
288 ERROR("Error when writing privreg to state file (5)");
289 munmap(mem, PAGE_SIZE);
290 goto out;
291 }
292 munmap(mem, PAGE_SIZE);
293 }
295 rc = xc_ia64_send_shared_info(xc_handle, io_fd, live_shinfo);
297 out:
298 if (vcpumap != NULL)
299 free(vcpumap);
300 return rc;
301 }
303 static int
304 xc_ia64_hvm_send_context(int xc_handle, int io_fd, uint32_t dom,
305 const xc_dominfo_t *info, shared_info_t *live_shinfo)
306 {
307 int rc = -1;
308 unsigned int i;
310 /* vcpu map */
311 uint64_t *vcpumap = NULL;
313 /* HVM: magic frames for ioreqs and xenstore comms */
314 const int hvm_params[] = {
315 HVM_PARAM_STORE_PFN,
316 HVM_PARAM_IOREQ_PFN,
317 HVM_PARAM_BUFIOREQ_PFN,
318 HVM_PARAM_BUFPIOREQ_PFN,
319 };
320 const int NR_PARAMS = sizeof(hvm_params) / sizeof(hvm_params[0]);
321 /* ioreq_pfn, bufioreq_pfn, store_pfn */
322 uint64_t magic_pfns[NR_PARAMS];
324 /* HVM: a buffer for holding HVM contxt */
325 uint64_t rec_size;
326 uint64_t hvm_buf_size = 0;
327 uint8_t *hvm_buf = NULL;
329 if (xc_ia64_send_shared_info(xc_handle, io_fd, live_shinfo))
330 return -1;
332 /* vcpu map */
333 if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, MAX_VIRT_CPUS,
334 &vcpumap))
335 goto out;
337 /* vcpu context */
338 for (i = 0; i <= info->max_vcpu_id; i++) {
339 /* A copy of the CPU context of the guest. */
340 vcpu_guest_context_t ctxt;
342 if (!__test_bit(i, vcpumap))
343 continue;
345 if (xc_ia64_send_vcpu_context(xc_handle, io_fd, dom, i, &ctxt))
346 goto out;
348 /* system context of vcpu is sent as hvm context. */
349 }
351 /* Save magic-page locations. */
352 memset(magic_pfns, 0, sizeof(magic_pfns));
353 for (i = 0; i < NR_PARAMS; i++) {
354 if (xc_get_hvm_param(xc_handle, dom, hvm_params[i], &magic_pfns[i])) {
355 PERROR("Error when xc_get_hvm_param");
356 goto out;
357 }
358 }
360 if (write_exact(io_fd, magic_pfns, sizeof(magic_pfns))) {
361 ERROR("Error when writing to state file (7)");
362 goto out;
363 }
365 /* Need another buffer for HVM context */
366 hvm_buf_size = xc_domain_hvm_getcontext(xc_handle, dom, 0, 0);
367 if (hvm_buf_size == -1) {
368 ERROR("Couldn't get HVM context size from Xen");
369 goto out;
370 }
372 hvm_buf = malloc(hvm_buf_size);
373 if (!hvm_buf) {
374 ERROR("Couldn't allocate memory");
375 goto out;
376 }
378 /* Get HVM context from Xen and save it too */
379 rec_size = xc_domain_hvm_getcontext(xc_handle, dom, hvm_buf, hvm_buf_size);
380 if (rec_size == -1) {
381 ERROR("HVM:Could not get hvm buffer");
382 goto out;
383 }
385 if (write_exact(io_fd, &rec_size, sizeof(rec_size))) {
386 ERROR("error write hvm buffer size");
387 goto out;
388 }
390 if (write_exact(io_fd, hvm_buf, rec_size)) {
391 ERROR("write HVM info failed!\n");
392 goto out;
393 }
395 rc = 0;
396 out:
397 if (hvm_buf != NULL)
398 free(hvm_buf);
399 if (vcpumap != NULL)
400 free(vcpumap);
401 return rc;
402 }
404 int
405 xc_domain_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
406 uint32_t max_factor, uint32_t flags, int (*suspend)(int),
407 int hvm, void *(*init_qemu_maps)(int, unsigned),
408 void (*qemu_flip_buffer)(int, int))
409 {
410 DECLARE_DOMCTL;
411 xc_dominfo_t info;
413 int rc = 1;
415 int debug = (flags & XCFLAGS_DEBUG);
416 int live = (flags & XCFLAGS_LIVE);
418 /* The new domain's shared-info frame number. */
419 unsigned long shared_info_frame;
421 /* Live mapping of shared info structure */
422 shared_info_t *live_shinfo = NULL;
424 /* Iteration number. */
425 int iter;
427 /* Number of pages sent in the last iteration (live only). */
428 unsigned int sent_last_iter;
430 /* Number of pages sent (live only). */
431 unsigned int total_sent;
433 /* total number of pages used by the current guest */
434 unsigned long p2m_size;
436 /* Size of the shadow bitmap (live only). */
437 unsigned int bitmap_size = 0;
439 /* True if last iteration. */
440 int last_iter;
442 /* Bitmap of pages to be sent. */
443 unsigned long *to_send = NULL;
444 /* Bitmap of pages not to be sent (because dirtied). */
445 unsigned long *to_skip = NULL;
447 char *mem;
449 /* HVM: shared-memory bitmaps for getting log-dirty bits from qemu-dm */
450 unsigned long *qemu_bitmaps[2];
451 int qemu_active = 0;
452 int qemu_non_active = 1;
454 /* for foreign p2m exposure */
455 unsigned int memmap_info_num_pages;
456 unsigned long memmap_size = 0;
457 xen_ia64_memmap_info_t *memmap_info_live = NULL;
458 xen_ia64_memmap_info_t *memmap_info = NULL;
459 void *memmap_desc_start;
460 void *memmap_desc_end;
461 void *p;
462 efi_memory_desc_t *md;
463 struct xen_ia64_p2m_table p2m_table;
464 xc_ia64_p2m_init(&p2m_table);
466 if (debug)
467 fprintf(stderr, "xc_linux_save (ia64): started dom=%d\n", dom);
469 /* If no explicit control parameters given, use defaults */
470 if (!max_iters)
471 max_iters = DEF_MAX_ITERS;
472 if (!max_factor)
473 max_factor = DEF_MAX_FACTOR;
475 //initialize_mbit_rate();
477 if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
478 ERROR("Could not get domain info");
479 return 1;
480 }
482 shared_info_frame = info.shared_info_frame;
484 #if 0
485 /* cheesy sanity check */
486 if ((info.max_memkb >> (PAGE_SHIFT - 10)) > max_mfn) {
487 ERROR("Invalid state record -- pfn count out of range: %lu",
488 (info.max_memkb >> (PAGE_SHIFT - 10)));
489 goto out;
490 }
491 #endif
493 /* Map the shared info frame */
494 live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
495 PROT_READ, shared_info_frame);
496 if (!live_shinfo) {
497 ERROR("Couldn't map live_shinfo");
498 goto out;
499 }
501 p2m_size = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &dom) + 1;
503 /* This is expected by xm restore. */
504 if (write_exact(io_fd, &p2m_size, sizeof(unsigned long))) {
505 ERROR("write: p2m_size");
506 goto out;
507 }
509 /* xc_linux_restore starts to read here. */
510 /* Write a version number. This can avoid searching for a stupid bug
511 if the format change.
512 The version is hard-coded, don't forget to change the restore code
513 too! */
514 {
515 unsigned long version = XC_IA64_SR_FORMAT_VER_CURRENT;
517 if (write_exact(io_fd, &version, sizeof(unsigned long))) {
518 ERROR("write: version");
519 goto out;
520 }
521 }
523 domctl.cmd = XEN_DOMCTL_arch_setup;
524 domctl.domain = (domid_t)dom;
525 domctl.u.arch_setup.flags = XEN_DOMAINSETUP_query;
526 if (xc_domctl(xc_handle, &domctl) < 0) {
527 ERROR("Could not get domain setup");
528 goto out;
529 }
530 if (write_exact(io_fd, &domctl.u.arch_setup,
531 sizeof(domctl.u.arch_setup))) {
532 ERROR("write: domain setup");
533 goto out;
534 }
536 /* Domain is still running at this point */
537 if (live) {
539 if (xc_shadow_control(xc_handle, dom,
540 XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
541 NULL, 0, NULL, 0, NULL ) < 0) {
542 ERROR("Couldn't enable shadow mode");
543 goto out;
544 }
546 last_iter = 0;
548 bitmap_size = ((p2m_size + BITS_PER_LONG-1) & ~(BITS_PER_LONG-1)) / 8;
549 to_send = malloc(bitmap_size);
550 to_skip = malloc(bitmap_size);
552 if (!to_send || !to_skip) {
553 ERROR("Couldn't allocate bitmap array");
554 goto out;
555 }
557 /* Initially all the pages must be sent. */
558 memset(to_send, 0xff, bitmap_size);
560 if (lock_pages(to_send, bitmap_size)) {
561 ERROR("Unable to lock_pages to_send");
562 goto out;
563 }
564 if (lock_pages(to_skip, bitmap_size)) {
565 ERROR("Unable to lock_pages to_skip");
566 goto out;
567 }
569 if (hvm) {
570 /* Get qemu-dm logging dirty pages too */
571 void *seg = init_qemu_maps(dom, bitmap_size);
572 qemu_bitmaps[0] = seg;
573 qemu_bitmaps[1] = seg + bitmap_size;
574 qemu_active = 0;
575 qemu_non_active = 1;
576 }
577 } else {
579 /* This is a non-live suspend. Issue the call back to get the
580 domain suspended */
582 last_iter = 1;
584 if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info)) {
585 ERROR("Domain appears not to have suspended");
586 goto out;
587 }
589 }
591 memmap_info_num_pages = live_shinfo->arch.memmap_info_num_pages;
592 memmap_size = PAGE_SIZE * memmap_info_num_pages;
593 memmap_info_live = xc_map_foreign_range(xc_handle, info.domid,
594 memmap_size, PROT_READ,
595 live_shinfo->arch.memmap_info_pfn);
596 if (memmap_info_live == NULL) {
597 PERROR("Could not map memmap info.");
598 goto out;
599 }
600 memmap_info = malloc(memmap_size);
601 if (memmap_info == NULL) {
602 PERROR("Could not allocate memmap info memory");
603 goto out;
604 }
605 memcpy(memmap_info, memmap_info_live, memmap_size);
606 munmap(memmap_info_live, memmap_size);
607 memmap_info_live = NULL;
609 if (xc_ia64_p2m_map(&p2m_table, xc_handle, dom, memmap_info, 0) < 0) {
610 PERROR("xc_ia64_p2m_map");
611 goto out;
612 }
613 if (write_exact(io_fd,
614 &memmap_info_num_pages, sizeof(memmap_info_num_pages))) {
615 PERROR("write: arch.memmap_info_num_pages");
616 goto out;
617 }
618 if (write_exact(io_fd, memmap_info, memmap_size)) {
619 PERROR("write: memmap_info");
620 goto out;
621 }
623 sent_last_iter = p2m_size;
624 total_sent = 0;
626 for (iter = 1; ; iter++) {
627 unsigned int sent_this_iter, skip_this_iter;
628 unsigned long N;
630 sent_this_iter = 0;
631 skip_this_iter = 0;
633 /* Dirtied pages won't be saved.
634 slightly wasteful to peek the whole array evey time,
635 but this is fast enough for the moment. */
636 if (!last_iter) {
637 if (xc_shadow_control(xc_handle, dom,
638 XEN_DOMCTL_SHADOW_OP_PEEK,
639 to_skip, p2m_size,
640 NULL, 0, NULL) != p2m_size) {
641 ERROR("Error peeking shadow bitmap");
642 goto out;
643 }
644 }
646 /* Start writing out the saved-domain record. */
647 memmap_desc_start = &memmap_info->memdesc;
648 memmap_desc_end = memmap_desc_start + memmap_info->efi_memmap_size;
649 for (p = memmap_desc_start;
650 p < memmap_desc_end;
651 p += memmap_info->efi_memdesc_size) {
652 md = p;
653 if (md_is_not_ram(md))
654 continue;
656 for (N = md->phys_addr >> PAGE_SHIFT;
657 N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
658 PAGE_SHIFT;
659 N++) {
661 if (!xc_ia64_p2m_allocated(&p2m_table, N))
662 continue;
664 if (!last_iter) {
665 if (test_bit(N, to_skip) && test_bit(N, to_send))
666 skip_this_iter++;
667 if (test_bit(N, to_skip) || !test_bit(N, to_send))
668 continue;
669 } else if (live) {
670 if (!test_bit(N, to_send))
671 continue;
672 }
674 if (debug)
675 fprintf(stderr, "xc_linux_save: page %lx (%lu/%lu)\n",
676 xc_ia64_p2m_mfn(&p2m_table, N),
677 N, p2m_size);
679 mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
680 PROT_READ|PROT_WRITE, N);
681 if (mem == NULL) {
682 /* The page may have move.
683 It will be remarked dirty.
684 FIXME: to be tracked. */
685 fprintf(stderr, "cannot map mfn page %lx gpfn %lx: %s\n",
686 xc_ia64_p2m_mfn(&p2m_table, N),
687 N, safe_strerror(errno));
688 continue;
689 }
691 if (write_exact(io_fd, &N, sizeof(N))) {
692 ERROR("write: p2m_size");
693 munmap(mem, PAGE_SIZE);
694 goto out;
695 }
697 if (write(io_fd, mem, PAGE_SIZE) != PAGE_SIZE) {
698 ERROR("Error when writing to state file (5)");
699 munmap(mem, PAGE_SIZE);
700 goto out;
701 }
702 munmap(mem, PAGE_SIZE);
703 sent_this_iter++;
704 total_sent++;
705 }
706 }
708 if (last_iter)
709 break;
711 DPRINTF(" %d: sent %d, skipped %d\n",
712 iter, sent_this_iter, skip_this_iter );
714 if (live) {
715 if ( /* ((sent_this_iter > sent_last_iter) && RATE_IS_MAX()) || */
716 (iter >= max_iters) || (sent_this_iter+skip_this_iter < 50) ||
717 (total_sent > p2m_size*max_factor)) {
718 DPRINTF("Start last iteration\n");
719 last_iter = 1;
721 if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info)) {
722 ERROR("Domain appears not to have suspended");
723 goto out;
724 }
725 }
727 /* Pages to be sent are pages which were dirty. */
728 if (xc_shadow_control(xc_handle, dom,
729 XEN_DOMCTL_SHADOW_OP_CLEAN,
730 to_send, p2m_size,
731 NULL, 0, NULL ) != p2m_size) {
732 ERROR("Error flushing shadow PT");
733 goto out;
734 }
736 if (hvm) {
737 unsigned int j;
738 /* Pull in the dirty bits from qemu-dm too */
739 if (!last_iter) {
740 qemu_active = qemu_non_active;
741 qemu_non_active = qemu_active ? 0 : 1;
742 qemu_flip_buffer(dom, qemu_active);
743 for (j = 0; j < bitmap_size / sizeof(unsigned long); j++) {
744 to_send[j] |= qemu_bitmaps[qemu_non_active][j];
745 qemu_bitmaps[qemu_non_active][j] = 0;
746 }
747 } else {
748 for (j = 0; j < bitmap_size / sizeof(unsigned long); j++)
749 to_send[j] |= qemu_bitmaps[qemu_active][j];
750 }
751 }
753 sent_last_iter = sent_this_iter;
755 //print_stats(xc_handle, dom, sent_this_iter, &stats, 1);
756 }
757 }
759 fprintf(stderr, "All memory is saved\n");
761 /* terminate */
762 {
763 unsigned long pfn = INVALID_MFN;
764 if (write_exact(io_fd, &pfn, sizeof(pfn))) {
765 ERROR("Error when writing to state file (6)");
766 goto out;
767 }
768 }
770 if (xc_ia64_send_unallocated_list(xc_handle, io_fd, &p2m_table,
771 memmap_info,
772 memmap_desc_start, memmap_desc_end))
773 goto out;
775 if (!hvm)
776 rc = xc_ia64_pv_send_context(xc_handle, io_fd,
777 dom, &info, live_shinfo);
778 else
779 rc = xc_ia64_hvm_send_context(xc_handle, io_fd,
780 dom, &info, live_shinfo);
781 if (rc)
782 goto out;
784 /* Success! */
785 rc = 0;
787 out:
789 if (live) {
790 if (xc_shadow_control(xc_handle, dom,
791 XEN_DOMCTL_SHADOW_OP_OFF,
792 NULL, 0, NULL, 0, NULL ) < 0) {
793 DPRINTF("Warning - couldn't disable shadow mode");
794 }
795 }
797 unlock_pages(to_send, bitmap_size);
798 free(to_send);
799 unlock_pages(to_skip, bitmap_size);
800 free(to_skip);
801 if (live_shinfo)
802 munmap(live_shinfo, PAGE_SIZE);
803 if (memmap_info_live)
804 munmap(memmap_info_live, memmap_size);
805 if (memmap_info)
806 free(memmap_info);
807 xc_ia64_p2m_unmap(&p2m_table);
809 fprintf(stderr,"Save exit rc=%d\n",rc);
811 return !!rc;
812 }
814 /*
815 * Local variables:
816 * mode: C
817 * c-set-style: "BSD"
818 * c-basic-offset: 4
819 * tab-width: 4
820 * indent-tabs-mode: nil
821 * End:
822 */