debuggers.hg

view tools/libxc/ia64/xc_ia64_linux_save.c @ 16408:f669bf5c6720

libxc: Consolidate read()/write() syscall wrappers to read/write an
exact number of bytes. The consolidated versions are more watertight
than the various versions previously distributed around the library
source code.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Sun Nov 11 18:22:33 2007 +0000 (2007-11-11)
parents df5b49037c77
children e83d2b840e88
line source
1 /******************************************************************************
2 * xc_ia64_linux_save.c
3 *
4 * Save the state of a running Linux session.
5 *
6 * Copyright (c) 2003, K A Fraser.
7 * Rewritten for ia64 by Tristan Gingold <tristan.gingold@bull.net>
8 *
9 * Copyright (c) 2007 Isaku Yamahata <yamahata@valinux.co.jp>
10 * Use foreign p2m exposure.
11 * VTi domain support.
12 */
14 #include <inttypes.h>
15 #include <time.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18 #include <sys/time.h>
20 #include "xg_private.h"
21 #include "xc_ia64.h"
22 #include "xc_ia64_save_restore.h"
23 #include "xc_efi.h"
24 #include "xen/hvm/params.h"
26 /*
27 ** Default values for important tuning parameters. Can override by passing
28 ** non-zero replacement values to xc_linux_save().
29 **
30 ** XXX SMH: should consider if want to be able to override MAX_MBIT_RATE too.
31 **
32 */
33 #define DEF_MAX_ITERS (4 - 1) /* limit us to 4 times round loop */
34 #define DEF_MAX_FACTOR 3 /* never send more than 3x nr_pfns */
36 /*
37 ** During (live) save/migrate, we maintain a number of bitmaps to track
38 ** which pages we have to send, and to skip.
39 */
40 static inline int test_bit(int nr, volatile void * addr)
41 {
42 return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
43 }
45 static inline void clear_bit(int nr, volatile void * addr)
46 {
47 BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr));
48 }
50 static inline void set_bit(int nr, volatile void * addr)
51 {
52 BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
53 }
55 static int xc_ia64_shadow_control(int xc_handle,
56 uint32_t domid,
57 unsigned int sop,
58 unsigned long *dirty_bitmap,
59 unsigned long pages,
60 xc_shadow_op_stats_t *stats)
61 {
62 if (dirty_bitmap != NULL && pages > 0) {
63 int i;
64 unsigned char *bmap = (unsigned char *)dirty_bitmap;
65 unsigned long bmap_bytes =
66 ((pages + BITS_PER_LONG - 1) & ~(BITS_PER_LONG - 1)) / 8;
67 unsigned int bmap_pages = (bmap_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
69 /* Touch the page so that it is in the TC.
70 FIXME: use a more reliable method. */
71 for (i = 0 ; i < bmap_pages ; i++)
72 bmap[i * PAGE_SIZE] = 0;
73 /* Because bmap is not page aligned (allocated by malloc), be sure the
74 last page is touched. */
75 bmap[bmap_bytes - 1] = 0;
76 }
78 return xc_shadow_control(xc_handle, domid, sop,
79 dirty_bitmap, pages, NULL, 0, stats);
80 }
82 static int
83 suspend_and_state(int (*suspend)(int), int xc_handle, int io_fd,
84 int dom, xc_dominfo_t *info)
85 {
86 int i = 0;
88 if (!(*suspend)(dom)) {
89 ERROR("Suspend request failed");
90 return -1;
91 }
93 retry:
95 if (xc_domain_getinfo(xc_handle, dom, 1, info) != 1) {
96 ERROR("Could not get domain info");
97 return -1;
98 }
100 if (info->shutdown && info->shutdown_reason == SHUTDOWN_suspend)
101 return 0; // success
103 if (info->paused) {
104 // try unpausing domain, wait, and retest
105 xc_domain_unpause(xc_handle, dom);
107 ERROR("Domain was paused. Wait and re-test.");
108 usleep(10000); // 10ms
110 goto retry;
111 }
114 if(++i < 100) {
115 ERROR("Retry suspend domain.");
116 usleep(10000); // 10ms
117 goto retry;
118 }
120 ERROR("Unable to suspend domain.");
122 return -1;
123 }
125 static inline int
126 md_is_not_ram(const efi_memory_desc_t *md)
127 {
128 return ((md->type != EFI_CONVENTIONAL_MEMORY) ||
129 (md->attribute != EFI_MEMORY_WB) ||
130 (md->num_pages == 0));
131 }
133 /*
134 * Send through a list of all the PFNs that were not in map at the close.
135 * We send pages which was allocated. However balloon driver may
136 * decreased after sending page. So we have to check the freed
137 * page after pausing the domain.
138 */
139 static int
140 xc_ia64_send_unallocated_list(int xc_handle, int io_fd,
141 struct xen_ia64_p2m_table *p2m_table,
142 xen_ia64_memmap_info_t *memmap_info,
143 void *memmap_desc_start, void *memmap_desc_end)
144 {
145 void *p;
146 efi_memory_desc_t *md;
148 unsigned long N;
149 unsigned long pfntab[1024];
150 unsigned int j;
152 j = 0;
153 for (p = memmap_desc_start;
154 p < memmap_desc_end;
155 p += memmap_info->efi_memdesc_size) {
156 md = p;
158 if (md_is_not_ram(md))
159 continue;
161 for (N = md->phys_addr >> PAGE_SHIFT;
162 N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
163 PAGE_SHIFT;
164 N++) {
165 if (!xc_ia64_p2m_allocated(p2m_table, N))
166 j++;
167 }
168 }
169 if (write_exact(io_fd, &j, sizeof(unsigned int))) {
170 ERROR("Error when writing to state file (6a)");
171 return -1;
172 }
174 j = 0;
175 for (p = memmap_desc_start;
176 p < memmap_desc_end;
177 p += memmap_info->efi_memdesc_size) {
178 md = p;
180 if (md_is_not_ram(md))
181 continue;
183 for (N = md->phys_addr >> PAGE_SHIFT;
184 N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
185 PAGE_SHIFT;
186 N++) {
187 if (!xc_ia64_p2m_allocated(p2m_table, N))
188 pfntab[j++] = N;
189 if (j == sizeof(pfntab)/sizeof(pfntab[0])) {
190 if (write_exact(io_fd, &pfntab, sizeof(pfntab[0]) * j)) {
191 ERROR("Error when writing to state file (6b)");
192 return -1;
193 }
194 j = 0;
195 }
196 }
197 }
198 if (j > 0) {
199 if (write_exact(io_fd, &pfntab, sizeof(pfntab[0]) * j)) {
200 ERROR("Error when writing to state file (6c)");
201 return -1;
202 }
203 }
205 return 0;
206 }
208 static int
209 xc_ia64_send_vcpu_context(int xc_handle, int io_fd, uint32_t dom,
210 uint32_t vcpu, vcpu_guest_context_t *ctxt)
211 {
212 if (xc_vcpu_getcontext(xc_handle, dom, vcpu, ctxt)) {
213 ERROR("Could not get vcpu context");
214 return -1;
215 }
217 if (write_exact(io_fd, ctxt, sizeof(*ctxt))) {
218 ERROR("Error when writing to state file (1)");
219 return -1;
220 }
222 fprintf(stderr, "ip=%016lx, b0=%016lx\n", ctxt->regs.ip, ctxt->regs.b[0]);
223 return 0;
224 }
226 static int
227 xc_ia64_send_shared_info(int xc_handle, int io_fd, shared_info_t *live_shinfo)
228 {
229 if (write_exact(io_fd, live_shinfo, PAGE_SIZE)) {
230 ERROR("Error when writing to state file (1)");
231 return -1;
232 }
233 return 0;
234 }
236 static int
237 xc_ia64_pv_send_context(int xc_handle, int io_fd, uint32_t dom,
238 shared_info_t *live_shinfo)
239 {
240 /* A copy of the CPU context of the guest. */
241 vcpu_guest_context_t ctxt;
242 char *mem;
244 if (xc_ia64_send_vcpu_context(xc_handle, io_fd, dom, 0, &ctxt))
245 return -1;
247 mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
248 PROT_READ|PROT_WRITE, ctxt.privregs_pfn);
249 if (mem == NULL) {
250 ERROR("cannot map privreg page");
251 return -1;
252 }
253 if (write_exact(io_fd, mem, PAGE_SIZE)) {
254 ERROR("Error when writing privreg to state file (5)");
255 munmap(mem, PAGE_SIZE);
256 return -1;
257 }
258 munmap(mem, PAGE_SIZE);
260 if (xc_ia64_send_shared_info(xc_handle, io_fd, live_shinfo))
261 return -1;
263 return 0;
264 }
266 static int
267 xc_ia64_hvm_send_context(int xc_handle, int io_fd, uint32_t dom,
268 const xc_dominfo_t *info, shared_info_t *live_shinfo)
269 {
270 int rc = -1;
271 unsigned int i;
273 /* vcpu map */
274 uint64_t max_virt_cpus;
275 unsigned long vcpumap_size;
276 uint64_t *vcpumap = NULL;
278 /* HVM: magic frames for ioreqs and xenstore comms */
279 const int hvm_params[] = {
280 HVM_PARAM_STORE_PFN,
281 HVM_PARAM_IOREQ_PFN,
282 HVM_PARAM_BUFIOREQ_PFN,
283 HVM_PARAM_BUFPIOREQ_PFN,
284 };
285 const int NR_PARAMS = sizeof(hvm_params) / sizeof(hvm_params[0]);
286 /* ioreq_pfn, bufioreq_pfn, store_pfn */
287 uint64_t magic_pfns[NR_PARAMS];
289 /* HVM: a buffer for holding HVM contxt */
290 uint64_t rec_size;
291 uint64_t hvm_buf_size = 0;
292 uint8_t *hvm_buf = NULL;
294 if (xc_ia64_send_shared_info(xc_handle, io_fd, live_shinfo))
295 return -1;
297 /* vcpu map */
298 max_virt_cpus = MAX_VIRT_CPUS;
299 vcpumap_size = (max_virt_cpus + 1 + sizeof(vcpumap[0]) - 1) /
300 sizeof(vcpumap[0]);
301 vcpumap = malloc(vcpumap_size);
302 if (vcpumap == NULL) {
303 ERROR("memory alloc for vcpumap");
304 goto out;
305 }
306 memset(vcpumap, 0, vcpumap_size);
308 for (i = 0; i <= info->max_vcpu_id; i++) {
309 xc_vcpuinfo_t vinfo;
310 if ((xc_vcpu_getinfo(xc_handle, dom, i, &vinfo) == 0) && vinfo.online)
311 __set_bit(i, vcpumap);
312 }
314 if (write_exact(io_fd, &max_virt_cpus, sizeof(max_virt_cpus))) {
315 ERROR("write max_virt_cpus");
316 goto out;
317 }
319 if (write_exact(io_fd, vcpumap, vcpumap_size)) {
320 ERROR("write vcpumap");
321 goto out;
322 }
324 /* vcpu context */
325 for (i = 0; i <= info->max_vcpu_id; i++) {
326 /* A copy of the CPU context of the guest. */
327 vcpu_guest_context_t ctxt;
329 if (!__test_bit(i, vcpumap))
330 continue;
332 if (xc_ia64_send_vcpu_context(xc_handle, io_fd, dom, i, &ctxt))
333 goto out;
335 // system context of vcpu is sent as hvm context.
336 }
338 /* Save magic-page locations. */
339 memset(magic_pfns, 0, sizeof(magic_pfns));
340 for (i = 0; i < NR_PARAMS; i++) {
341 if (xc_get_hvm_param(xc_handle, dom, hvm_params[i], &magic_pfns[i])) {
342 PERROR("Error when xc_get_hvm_param");
343 goto out;
344 }
345 }
347 if (write_exact(io_fd, magic_pfns, sizeof(magic_pfns))) {
348 ERROR("Error when writing to state file (7)");
349 goto out;
350 }
352 /* Need another buffer for HVM context */
353 hvm_buf_size = xc_domain_hvm_getcontext(xc_handle, dom, 0, 0);
354 if (hvm_buf_size == -1) {
355 ERROR("Couldn't get HVM context size from Xen");
356 goto out;
357 }
359 hvm_buf = malloc(hvm_buf_size);
360 if (!hvm_buf) {
361 ERROR("Couldn't allocate memory");
362 goto out;
363 }
365 /* Get HVM context from Xen and save it too */
366 rec_size = xc_domain_hvm_getcontext(xc_handle, dom, hvm_buf, hvm_buf_size);
367 if (rec_size == -1) {
368 ERROR("HVM:Could not get hvm buffer");
369 goto out;
370 }
372 if (write_exact(io_fd, &rec_size, sizeof(rec_size))) {
373 ERROR("error write hvm buffer size");
374 goto out;
375 }
377 if (write_exact(io_fd, hvm_buf, rec_size)) {
378 ERROR("write HVM info failed!\n");
379 goto out;
380 }
382 rc = 0;
383 out:
384 if (hvm_buf != NULL)
385 free(hvm_buf);
386 if (vcpumap != NULL)
387 free(vcpumap);
388 return rc;
389 }
391 int
392 xc_domain_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
393 uint32_t max_factor, uint32_t flags, int (*suspend)(int),
394 int hvm, void *(*init_qemu_maps)(int, unsigned),
395 void (*qemu_flip_buffer)(int, int))
396 {
397 DECLARE_DOMCTL;
398 xc_dominfo_t info;
400 int rc = 1;
402 int debug = (flags & XCFLAGS_DEBUG);
403 int live = (flags & XCFLAGS_LIVE);
405 /* The new domain's shared-info frame number. */
406 unsigned long shared_info_frame;
408 /* Live mapping of shared info structure */
409 shared_info_t *live_shinfo = NULL;
411 /* Iteration number. */
412 int iter;
414 /* Number of pages sent in the last iteration (live only). */
415 unsigned int sent_last_iter;
417 /* Number of pages sent (live only). */
418 unsigned int total_sent;
420 /* total number of pages used by the current guest */
421 unsigned long p2m_size;
423 /* Size of the shadow bitmap (live only). */
424 unsigned int bitmap_size = 0;
426 /* True if last iteration. */
427 int last_iter;
429 /* Bitmap of pages to be sent. */
430 unsigned long *to_send = NULL;
431 /* Bitmap of pages not to be sent (because dirtied). */
432 unsigned long *to_skip = NULL;
434 char *mem;
436 /* HVM: shared-memory bitmaps for getting log-dirty bits from qemu-dm */
437 unsigned long *qemu_bitmaps[2];
438 int qemu_active = 0;
439 int qemu_non_active = 1;
441 /* for foreign p2m exposure */
442 unsigned int memmap_info_num_pages;
443 unsigned long memmap_size = 0;
444 xen_ia64_memmap_info_t *memmap_info_live = NULL;
445 xen_ia64_memmap_info_t *memmap_info = NULL;
446 void *memmap_desc_start;
447 void *memmap_desc_end;
448 void *p;
449 efi_memory_desc_t *md;
450 struct xen_ia64_p2m_table p2m_table;
451 xc_ia64_p2m_init(&p2m_table);
453 if (debug)
454 fprintf(stderr, "xc_linux_save (ia64): started dom=%d\n", dom);
456 /* If no explicit control parameters given, use defaults */
457 if (!max_iters)
458 max_iters = DEF_MAX_ITERS;
459 if (!max_factor)
460 max_factor = DEF_MAX_FACTOR;
462 //initialize_mbit_rate();
464 if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
465 ERROR("Could not get domain info");
466 return 1;
467 }
469 shared_info_frame = info.shared_info_frame;
471 #if 0
472 /* cheesy sanity check */
473 if ((info.max_memkb >> (PAGE_SHIFT - 10)) > max_mfn) {
474 ERROR("Invalid state record -- pfn count out of range: %lu",
475 (info.max_memkb >> (PAGE_SHIFT - 10)));
476 goto out;
477 }
478 #endif
480 /* Map the shared info frame */
481 live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
482 PROT_READ, shared_info_frame);
483 if (!live_shinfo) {
484 ERROR("Couldn't map live_shinfo");
485 goto out;
486 }
488 p2m_size = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &dom);
490 /* This is expected by xm restore. */
491 if (write_exact(io_fd, &p2m_size, sizeof(unsigned long))) {
492 ERROR("write: p2m_size");
493 goto out;
494 }
496 /* xc_linux_restore starts to read here. */
497 /* Write a version number. This can avoid searching for a stupid bug
498 if the format change.
499 The version is hard-coded, don't forget to change the restore code
500 too! */
501 {
502 unsigned long version = XC_IA64_SR_FORMAT_VER_CURRENT;
504 if (write_exact(io_fd, &version, sizeof(unsigned long))) {
505 ERROR("write: version");
506 goto out;
507 }
508 }
510 domctl.cmd = XEN_DOMCTL_arch_setup;
511 domctl.domain = (domid_t)dom;
512 domctl.u.arch_setup.flags = XEN_DOMAINSETUP_query;
513 if (xc_domctl(xc_handle, &domctl) < 0) {
514 ERROR("Could not get domain setup");
515 goto out;
516 }
517 if (write_exact(io_fd, &domctl.u.arch_setup,
518 sizeof(domctl.u.arch_setup))) {
519 ERROR("write: domain setup");
520 goto out;
521 }
523 /* Domain is still running at this point */
524 if (live) {
526 if (xc_ia64_shadow_control(xc_handle, dom,
527 XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
528 NULL, 0, NULL ) < 0) {
529 ERROR("Couldn't enable shadow mode");
530 goto out;
531 }
533 last_iter = 0;
535 bitmap_size = ((p2m_size + BITS_PER_LONG-1) & ~(BITS_PER_LONG-1)) / 8;
536 to_send = malloc(bitmap_size);
537 to_skip = malloc(bitmap_size);
539 if (!to_send || !to_skip) {
540 ERROR("Couldn't allocate bitmap array");
541 goto out;
542 }
544 /* Initially all the pages must be sent. */
545 memset(to_send, 0xff, bitmap_size);
547 if (lock_pages(to_send, bitmap_size)) {
548 ERROR("Unable to lock_pages to_send");
549 goto out;
550 }
551 if (lock_pages(to_skip, bitmap_size)) {
552 ERROR("Unable to lock_pages to_skip");
553 goto out;
554 }
556 if (hvm) {
557 /* Get qemu-dm logging dirty pages too */
558 void *seg = init_qemu_maps(dom, bitmap_size);
559 qemu_bitmaps[0] = seg;
560 qemu_bitmaps[1] = seg + bitmap_size;
561 qemu_active = 0;
562 qemu_non_active = 1;
563 }
564 } else {
566 /* This is a non-live suspend. Issue the call back to get the
567 domain suspended */
569 last_iter = 1;
571 if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info)) {
572 ERROR("Domain appears not to have suspended");
573 goto out;
574 }
576 }
578 memmap_info_num_pages = live_shinfo->arch.memmap_info_num_pages;
579 memmap_size = PAGE_SIZE * memmap_info_num_pages;
580 memmap_info_live = xc_map_foreign_range(xc_handle, info.domid,
581 memmap_size, PROT_READ,
582 live_shinfo->arch.memmap_info_pfn);
583 if (memmap_info_live == NULL) {
584 PERROR("Could not map memmap info.");
585 goto out;
586 }
587 memmap_info = malloc(memmap_size);
588 if (memmap_info == NULL) {
589 PERROR("Could not allocate memmap info memory");
590 goto out;
591 }
592 memcpy(memmap_info, memmap_info_live, memmap_size);
593 munmap(memmap_info_live, memmap_size);
594 memmap_info_live = NULL;
596 if (xc_ia64_p2m_map(&p2m_table, xc_handle, dom, memmap_info, 0) < 0) {
597 PERROR("xc_ia64_p2m_map");
598 goto out;
599 }
600 if (write_exact(io_fd,
601 &memmap_info_num_pages, sizeof(memmap_info_num_pages))) {
602 PERROR("write: arch.memmap_info_num_pages");
603 goto out;
604 }
605 if (write_exact(io_fd, memmap_info, memmap_size)) {
606 PERROR("write: memmap_info");
607 goto out;
608 }
610 sent_last_iter = p2m_size;
611 total_sent = 0;
613 for (iter = 1; ; iter++) {
614 unsigned int sent_this_iter, skip_this_iter;
615 unsigned long N;
617 sent_this_iter = 0;
618 skip_this_iter = 0;
620 /* Dirtied pages won't be saved.
621 slightly wasteful to peek the whole array evey time,
622 but this is fast enough for the moment. */
623 if (!last_iter) {
624 if (xc_ia64_shadow_control(xc_handle, dom,
625 XEN_DOMCTL_SHADOW_OP_PEEK,
626 to_skip, p2m_size, NULL) != p2m_size) {
627 ERROR("Error peeking shadow bitmap");
628 goto out;
629 }
630 }
632 /* Start writing out the saved-domain record. */
633 memmap_desc_start = &memmap_info->memdesc;
634 memmap_desc_end = memmap_desc_start + memmap_info->efi_memmap_size;
635 for (p = memmap_desc_start;
636 p < memmap_desc_end;
637 p += memmap_info->efi_memdesc_size) {
638 md = p;
639 if (md_is_not_ram(md))
640 continue;
642 for (N = md->phys_addr >> PAGE_SHIFT;
643 N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
644 PAGE_SHIFT;
645 N++) {
647 if (!xc_ia64_p2m_allocated(&p2m_table, N))
648 continue;
650 if (!last_iter) {
651 if (test_bit(N, to_skip) && test_bit(N, to_send))
652 skip_this_iter++;
653 if (test_bit(N, to_skip) || !test_bit(N, to_send))
654 continue;
655 }
657 if (debug)
658 fprintf(stderr, "xc_linux_save: page %lx (%lu/%lu)\n",
659 xc_ia64_p2m_mfn(&p2m_table, N),
660 N, p2m_size);
662 mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
663 PROT_READ|PROT_WRITE, N);
664 if (mem == NULL) {
665 /* The page may have move.
666 It will be remarked dirty.
667 FIXME: to be tracked. */
668 fprintf(stderr, "cannot map mfn page %lx gpfn %lx: %s\n",
669 xc_ia64_p2m_mfn(&p2m_table, N),
670 N, safe_strerror(errno));
671 continue;
672 }
674 if (write_exact(io_fd, &N, sizeof(N))) {
675 ERROR("write: p2m_size");
676 munmap(mem, PAGE_SIZE);
677 goto out;
678 }
680 if (write(io_fd, mem, PAGE_SIZE) != PAGE_SIZE) {
681 ERROR("Error when writing to state file (5)");
682 munmap(mem, PAGE_SIZE);
683 goto out;
684 }
685 munmap(mem, PAGE_SIZE);
686 sent_this_iter++;
687 total_sent++;
688 }
689 }
691 if (last_iter)
692 break;
694 DPRINTF(" %d: sent %d, skipped %d\n",
695 iter, sent_this_iter, skip_this_iter );
697 if (live) {
698 if ( /* ((sent_this_iter > sent_last_iter) && RATE_IS_MAX()) || */
699 (iter >= max_iters) || (sent_this_iter+skip_this_iter < 50) ||
700 (total_sent > p2m_size*max_factor)) {
701 DPRINTF("Start last iteration\n");
702 last_iter = 1;
704 if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info)) {
705 ERROR("Domain appears not to have suspended");
706 goto out;
707 }
708 }
710 /* Pages to be sent are pages which were dirty. */
711 if (xc_ia64_shadow_control(xc_handle, dom,
712 XEN_DOMCTL_SHADOW_OP_CLEAN,
713 to_send, p2m_size, NULL ) != p2m_size) {
714 ERROR("Error flushing shadow PT");
715 goto out;
716 }
718 if (hvm) {
719 unsigned int j;
720 /* Pull in the dirty bits from qemu-dm too */
721 if (!last_iter) {
722 qemu_active = qemu_non_active;
723 qemu_non_active = qemu_active ? 0 : 1;
724 qemu_flip_buffer(dom, qemu_active);
725 for (j = 0; j < bitmap_size / sizeof(unsigned long); j++) {
726 to_send[j] |= qemu_bitmaps[qemu_non_active][j];
727 qemu_bitmaps[qemu_non_active][j] = 0;
728 }
729 } else {
730 for (j = 0; j < bitmap_size / sizeof(unsigned long); j++)
731 to_send[j] |= qemu_bitmaps[qemu_active][j];
732 }
733 }
735 sent_last_iter = sent_this_iter;
737 //print_stats(xc_handle, dom, sent_this_iter, &stats, 1);
738 }
739 }
741 fprintf(stderr, "All memory is saved\n");
743 /* terminate */
744 {
745 unsigned long pfn = INVALID_MFN;
746 if (write_exact(io_fd, &pfn, sizeof(pfn))) {
747 ERROR("Error when writing to state file (6)");
748 goto out;
749 }
750 }
752 if (xc_ia64_send_unallocated_list(xc_handle, io_fd, &p2m_table,
753 memmap_info,
754 memmap_desc_start, memmap_desc_end))
755 goto out;
757 if (!hvm)
758 rc = xc_ia64_pv_send_context(xc_handle, io_fd, dom, live_shinfo);
759 else
760 rc = xc_ia64_hvm_send_context(xc_handle, io_fd,
761 dom, &info, live_shinfo);
762 if (rc)
763 goto out;
765 /* Success! */
766 rc = 0;
768 out:
770 if (live) {
771 if (xc_ia64_shadow_control(xc_handle, dom, XEN_DOMCTL_SHADOW_OP_OFF,
772 NULL, 0, NULL ) < 0) {
773 DPRINTF("Warning - couldn't disable shadow mode");
774 }
775 }
777 unlock_pages(to_send, bitmap_size);
778 free(to_send);
779 unlock_pages(to_skip, bitmap_size);
780 free(to_skip);
781 if (live_shinfo)
782 munmap(live_shinfo, PAGE_SIZE);
783 if (memmap_info_live)
784 munmap(memmap_info_live, memmap_size);
785 if (memmap_info)
786 free(memmap_info);
787 xc_ia64_p2m_unmap(&p2m_table);
789 fprintf(stderr,"Save exit rc=%d\n",rc);
791 return !!rc;
792 }
794 /*
795 * Local variables:
796 * mode: C
797 * c-set-style: "BSD"
798 * c-basic-offset: 4
799 * tab-width: 4
800 * indent-tabs-mode: nil
801 * End:
802 */