debuggers.hg

view tools/libxc/ia64/xc_ia64_linux_save.c @ 19826:2f9e1348aa98

x86_64: allow more vCPU-s per guest

Since the shared info layout is fixed, guests are required to use
VCPUOP_register_vcpu_info prior to booting any vCPU beyond the
traditional limit of 32.

MAX_VIRT_CPUS, being an implemetation detail of the hypervisor, is no
longer being exposed in the public headers.

The tools changes are clearly incomplete (and done only so things
would
build again), and the current state of the tools (using scalar
variables all over the place to represent vCPU bitmaps) very likely
doesn't permit booting DomU-s with more than the traditional number of
vCPU-s. Testing of the extended functionality was done with Dom0 (96
vCPU-s, as well as 128 vCPU-s out of which the kernel elected - by way
of a simple kernel side patch - to use only some, resulting in a
sparse
bitmap).

ia64 changes only to make things build, and build-tested only (and the
tools part only as far as the build would go without encountering
unrelated problems in the blktap code).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 18 10:14:16 2009 +0100 (2009-06-18)
parents c8511a5e9a57
children 49b561416efc
line source
1 /******************************************************************************
2 * xc_ia64_linux_save.c
3 *
4 * Save the state of a running Linux session.
5 *
6 * Copyright (c) 2003, K A Fraser.
7 * Rewritten for ia64 by Tristan Gingold <tristan.gingold@bull.net>
8 *
9 * Copyright (c) 2007 Isaku Yamahata <yamahata@valinux.co.jp>
10 * Use foreign p2m exposure.
11 * VTi domain support.
12 */
14 #include <inttypes.h>
15 #include <time.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18 #include <sys/time.h>
20 #include "xg_private.h"
21 #include "xc_ia64.h"
22 #include "xc_ia64_save_restore.h"
23 #include "xc_efi.h"
24 #include "xen/hvm/params.h"
26 /*
27 ** Default values for important tuning parameters. Can override by passing
28 ** non-zero replacement values to xc_linux_save().
29 **
30 ** XXX SMH: should consider if want to be able to override MAX_MBIT_RATE too.
31 **
32 */
33 #define DEF_MAX_ITERS (4 - 1) /* limit us to 4 times round loop */
34 #define DEF_MAX_FACTOR 3 /* never send more than 3x nr_pfns */
36 /*
37 ** During (live) save/migrate, we maintain a number of bitmaps to track
38 ** which pages we have to send, and to skip.
39 */
40 static inline int test_bit(int nr, volatile void * addr)
41 {
42 return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
43 }
45 static inline void clear_bit(int nr, volatile void * addr)
46 {
47 BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr));
48 }
50 static inline void set_bit(int nr, volatile void * addr)
51 {
52 BITMAP_ENTRY(nr, addr) |= (1UL << BITMAP_SHIFT(nr));
53 }
55 static int
56 suspend_and_state(int (*suspend)(void), int xc_handle, int io_fd,
57 int dom, xc_dominfo_t *info)
58 {
59 if (!(*suspend)()) {
60 ERROR("Suspend request failed");
61 return -1;
62 }
64 if ( (xc_domain_getinfo(xc_handle, dom, 1, info) != 1) ||
65 !info->shutdown || (info->shutdown_reason != SHUTDOWN_suspend) ) {
66 ERROR("Could not get domain info");
67 return -1;
68 }
70 return 0;
71 }
73 static inline int
74 md_is_not_ram(const efi_memory_desc_t *md)
75 {
76 return ((md->type != EFI_CONVENTIONAL_MEMORY) ||
77 (md->attribute != EFI_MEMORY_WB) ||
78 (md->num_pages == 0));
79 }
81 /*
82 * Send through a list of all the PFNs that were not in map at the close.
83 * We send pages which was allocated. However balloon driver may
84 * decreased after sending page. So we have to check the freed
85 * page after pausing the domain.
86 */
87 static int
88 xc_ia64_send_unallocated_list(int xc_handle, int io_fd,
89 struct xen_ia64_p2m_table *p2m_table,
90 xen_ia64_memmap_info_t *memmap_info,
91 void *memmap_desc_start, void *memmap_desc_end)
92 {
93 void *p;
94 efi_memory_desc_t *md;
96 unsigned long N;
97 unsigned long pfntab[1024];
98 unsigned int j;
100 j = 0;
101 for (p = memmap_desc_start;
102 p < memmap_desc_end;
103 p += memmap_info->efi_memdesc_size) {
104 md = p;
106 if (md_is_not_ram(md))
107 continue;
109 for (N = md->phys_addr >> PAGE_SHIFT;
110 N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
111 PAGE_SHIFT;
112 N++) {
113 if (!xc_ia64_p2m_allocated(p2m_table, N))
114 j++;
115 }
116 }
117 if (write_exact(io_fd, &j, sizeof(unsigned int))) {
118 ERROR("Error when writing to state file (6a)");
119 return -1;
120 }
122 j = 0;
123 for (p = memmap_desc_start;
124 p < memmap_desc_end;
125 p += memmap_info->efi_memdesc_size) {
126 md = p;
128 if (md_is_not_ram(md))
129 continue;
131 for (N = md->phys_addr >> PAGE_SHIFT;
132 N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
133 PAGE_SHIFT;
134 N++) {
135 if (!xc_ia64_p2m_allocated(p2m_table, N))
136 pfntab[j++] = N;
137 if (j == sizeof(pfntab)/sizeof(pfntab[0])) {
138 if (write_exact(io_fd, &pfntab, sizeof(pfntab[0]) * j)) {
139 ERROR("Error when writing to state file (6b)");
140 return -1;
141 }
142 j = 0;
143 }
144 }
145 }
146 if (j > 0) {
147 if (write_exact(io_fd, &pfntab, sizeof(pfntab[0]) * j)) {
148 ERROR("Error when writing to state file (6c)");
149 return -1;
150 }
151 }
153 return 0;
154 }
156 static int
157 xc_ia64_send_vcpu_context(int xc_handle, int io_fd, uint32_t dom,
158 uint32_t vcpu, vcpu_guest_context_any_t *ctxt_any)
159 {
160 vcpu_guest_context_t *ctxt = &ctxt_any->c;
161 if (xc_vcpu_getcontext(xc_handle, dom, vcpu, ctxt_any)) {
162 ERROR("Could not get vcpu context");
163 return -1;
164 }
166 if (write_exact(io_fd, ctxt, sizeof(*ctxt))) {
167 ERROR("Error when writing to state file (1)");
168 return -1;
169 }
171 fprintf(stderr, "ip=%016lx, b0=%016lx\n", ctxt->regs.ip, ctxt->regs.b[0]);
172 return 0;
173 }
175 static int
176 xc_ia64_send_shared_info(int xc_handle, int io_fd, shared_info_t *live_shinfo)
177 {
178 if (write_exact(io_fd, live_shinfo, PAGE_SIZE)) {
179 ERROR("Error when writing to state file (1)");
180 return -1;
181 }
182 return 0;
183 }
185 static int
186 xc_ia64_send_vcpumap(int xc_handle, int io_fd, uint32_t dom,
187 const xc_dominfo_t *info, uint64_t max_virt_cpus,
188 uint64_t **vcpumapp)
189 {
190 int rc = -1;
191 unsigned int i;
192 unsigned long vcpumap_size;
193 uint64_t *vcpumap = NULL;
195 vcpumap_size = (max_virt_cpus + 1 + sizeof(vcpumap[0]) - 1) /
196 sizeof(vcpumap[0]);
197 vcpumap = malloc(vcpumap_size);
198 if (vcpumap == NULL) {
199 ERROR("memory alloc for vcpumap");
200 goto out;
201 }
202 memset(vcpumap, 0, vcpumap_size);
204 for (i = 0; i <= info->max_vcpu_id; i++) {
205 xc_vcpuinfo_t vinfo;
206 if ((xc_vcpu_getinfo(xc_handle, dom, i, &vinfo) == 0) && vinfo.online)
207 __set_bit(i, vcpumap);
208 }
210 if (write_exact(io_fd, &max_virt_cpus, sizeof(max_virt_cpus))) {
211 ERROR("write max_virt_cpus");
212 goto out;
213 }
215 if (write_exact(io_fd, vcpumap, vcpumap_size)) {
216 ERROR("write vcpumap");
217 goto out;
218 }
220 rc = 0;
222 out:
223 if (rc != 0 && vcpumap != NULL) {
224 free(vcpumap);
225 vcpumap = NULL;
226 }
227 *vcpumapp = vcpumap;
228 return rc;
229 }
232 static int
233 xc_ia64_pv_send_context(int xc_handle, int io_fd, uint32_t dom,
234 const xc_dominfo_t *info, shared_info_t *live_shinfo)
235 {
236 int rc = -1;
237 unsigned int i;
239 /* vcpu map */
240 uint64_t *vcpumap = NULL;
241 if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, XEN_LEGACY_MAX_VCPUS,
242 &vcpumap))
243 goto out;
245 /* vcpu context */
246 for (i = 0; i <= info->max_vcpu_id; i++) {
247 /* A copy of the CPU context of the guest. */
248 vcpu_guest_context_any_t ctxt_any;
249 vcpu_guest_context_t *ctxt = &ctxt_any.c;
251 char *mem;
253 if (!__test_bit(i, vcpumap))
254 continue;
256 if (xc_ia64_send_vcpu_context(xc_handle, io_fd, dom, i, &ctxt_any))
257 goto out;
259 mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
260 PROT_READ|PROT_WRITE, ctxt->privregs_pfn);
261 if (mem == NULL) {
262 ERROR("cannot map privreg page");
263 goto out;
264 }
265 if (write_exact(io_fd, mem, PAGE_SIZE)) {
266 ERROR("Error when writing privreg to state file (5)");
267 munmap(mem, PAGE_SIZE);
268 goto out;
269 }
270 munmap(mem, PAGE_SIZE);
271 }
273 rc = xc_ia64_send_shared_info(xc_handle, io_fd, live_shinfo);
275 out:
276 if (vcpumap != NULL)
277 free(vcpumap);
278 return rc;
279 }
281 static int
282 xc_ia64_hvm_send_context(int xc_handle, int io_fd, uint32_t dom,
283 const xc_dominfo_t *info, shared_info_t *live_shinfo)
284 {
285 int rc = -1;
286 unsigned int i;
288 /* vcpu map */
289 uint64_t *vcpumap = NULL;
291 /* HVM: magic frames for ioreqs and xenstore comms */
292 const int hvm_params[] = {
293 HVM_PARAM_STORE_PFN,
294 HVM_PARAM_IOREQ_PFN,
295 HVM_PARAM_BUFIOREQ_PFN,
296 HVM_PARAM_BUFPIOREQ_PFN,
297 };
298 const int NR_PARAMS = sizeof(hvm_params) / sizeof(hvm_params[0]);
299 /* ioreq_pfn, bufioreq_pfn, store_pfn */
300 uint64_t magic_pfns[NR_PARAMS];
302 /* HVM: a buffer for holding HVM contxt */
303 uint64_t rec_size;
304 uint64_t hvm_buf_size = 0;
305 uint8_t *hvm_buf = NULL;
307 if (xc_ia64_send_shared_info(xc_handle, io_fd, live_shinfo))
308 return -1;
310 /* vcpu map */
311 if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, XEN_LEGACY_MAX_VCPUS,
312 &vcpumap))
313 goto out;
315 /* vcpu context */
316 for (i = 0; i <= info->max_vcpu_id; i++) {
317 /* A copy of the CPU context of the guest. */
318 vcpu_guest_context_any_t ctxt_any;
320 if (!__test_bit(i, vcpumap))
321 continue;
323 if (xc_ia64_send_vcpu_context(xc_handle, io_fd, dom, i, &ctxt_any))
324 goto out;
326 /* system context of vcpu is sent as hvm context. */
327 }
329 /* Save magic-page locations. */
330 memset(magic_pfns, 0, sizeof(magic_pfns));
331 for (i = 0; i < NR_PARAMS; i++) {
332 if (xc_get_hvm_param(xc_handle, dom, hvm_params[i], &magic_pfns[i])) {
333 PERROR("Error when xc_get_hvm_param");
334 goto out;
335 }
336 }
338 if (write_exact(io_fd, magic_pfns, sizeof(magic_pfns))) {
339 ERROR("Error when writing to state file (7)");
340 goto out;
341 }
343 /* Need another buffer for HVM context */
344 hvm_buf_size = xc_domain_hvm_getcontext(xc_handle, dom, 0, 0);
345 if (hvm_buf_size == -1) {
346 ERROR("Couldn't get HVM context size from Xen");
347 goto out;
348 }
350 hvm_buf = malloc(hvm_buf_size);
351 if (!hvm_buf) {
352 ERROR("Couldn't allocate memory");
353 goto out;
354 }
356 /* Get HVM context from Xen and save it too */
357 rec_size = xc_domain_hvm_getcontext(xc_handle, dom, hvm_buf, hvm_buf_size);
358 if (rec_size == -1) {
359 ERROR("HVM:Could not get hvm buffer");
360 goto out;
361 }
363 if (write_exact(io_fd, &rec_size, sizeof(rec_size))) {
364 ERROR("error write hvm buffer size");
365 goto out;
366 }
368 if (write_exact(io_fd, hvm_buf, rec_size)) {
369 ERROR("write HVM info failed!\n");
370 goto out;
371 }
373 rc = 0;
374 out:
375 if (hvm_buf != NULL)
376 free(hvm_buf);
377 if (vcpumap != NULL)
378 free(vcpumap);
379 return rc;
380 }
382 int
383 xc_domain_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
384 uint32_t max_factor, uint32_t flags, int (*suspend)(void),
385 int hvm, void *(*init_qemu_maps)(int, unsigned),
386 void (*qemu_flip_buffer)(int, int))
387 {
388 DECLARE_DOMCTL;
389 xc_dominfo_t info;
391 int rc = 1;
393 int debug = (flags & XCFLAGS_DEBUG);
394 int live = (flags & XCFLAGS_LIVE);
396 /* The new domain's shared-info frame number. */
397 unsigned long shared_info_frame;
399 /* Live mapping of shared info structure */
400 shared_info_t *live_shinfo = NULL;
402 /* Iteration number. */
403 int iter;
405 /* Number of pages sent in the last iteration (live only). */
406 unsigned int sent_last_iter;
408 /* Number of pages sent (live only). */
409 unsigned int total_sent;
411 /* total number of pages used by the current guest */
412 unsigned long p2m_size;
414 /* Size of the shadow bitmap (live only). */
415 unsigned int bitmap_size = 0;
417 /* True if last iteration. */
418 int last_iter;
420 /* Bitmap of pages to be sent. */
421 unsigned long *to_send = NULL;
422 /* Bitmap of pages not to be sent (because dirtied). */
423 unsigned long *to_skip = NULL;
425 char *mem;
427 /* HVM: shared-memory bitmaps for getting log-dirty bits from qemu-dm */
428 unsigned long *qemu_bitmaps[2];
429 int qemu_active = 0;
430 int qemu_non_active = 1;
432 /* for foreign p2m exposure */
433 unsigned long memmap_info_num_pages;
434 /* Unsigned int was used before. To keep file format compatibility. */
435 unsigned int memmap_info_num_pages_to_send;
436 unsigned long memmap_size = 0;
437 xen_ia64_memmap_info_t *memmap_info = NULL;
438 void *memmap_desc_start;
439 void *memmap_desc_end;
440 void *p;
441 efi_memory_desc_t *md;
442 struct xen_ia64_p2m_table p2m_table;
443 xc_ia64_p2m_init(&p2m_table);
445 if (debug)
446 fprintf(stderr, "xc_linux_save (ia64): started dom=%d\n", dom);
448 /* If no explicit control parameters given, use defaults */
449 if (!max_iters)
450 max_iters = DEF_MAX_ITERS;
451 if (!max_factor)
452 max_factor = DEF_MAX_FACTOR;
454 //initialize_mbit_rate();
456 if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
457 ERROR("Could not get domain info");
458 return 1;
459 }
461 shared_info_frame = info.shared_info_frame;
463 #if 0
464 /* cheesy sanity check */
465 if ((info.max_memkb >> (PAGE_SHIFT - 10)) > max_mfn) {
466 ERROR("Invalid state record -- pfn count out of range: %lu",
467 (info.max_memkb >> (PAGE_SHIFT - 10)));
468 goto out;
469 }
470 #endif
472 /* Map the shared info frame */
473 live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
474 PROT_READ, shared_info_frame);
475 if (!live_shinfo) {
476 ERROR("Couldn't map live_shinfo");
477 goto out;
478 }
480 p2m_size = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &dom) + 1;
482 /* This is expected by xm restore. */
483 if (write_exact(io_fd, &p2m_size, sizeof(unsigned long))) {
484 ERROR("write: p2m_size");
485 goto out;
486 }
488 /* xc_linux_restore starts to read here. */
489 /* Write a version number. This can avoid searching for a stupid bug
490 if the format change.
491 The version is hard-coded, don't forget to change the restore code
492 too! */
493 {
494 unsigned long version = XC_IA64_SR_FORMAT_VER_CURRENT;
496 if (write_exact(io_fd, &version, sizeof(unsigned long))) {
497 ERROR("write: version");
498 goto out;
499 }
500 }
502 domctl.cmd = XEN_DOMCTL_arch_setup;
503 domctl.domain = (domid_t)dom;
504 domctl.u.arch_setup.flags = XEN_DOMAINSETUP_query;
505 if (xc_domctl(xc_handle, &domctl) < 0) {
506 ERROR("Could not get domain setup");
507 goto out;
508 }
509 if (write_exact(io_fd, &domctl.u.arch_setup,
510 sizeof(domctl.u.arch_setup))) {
511 ERROR("write: domain setup");
512 goto out;
513 }
515 /* Domain is still running at this point */
516 if (live) {
518 if (xc_shadow_control(xc_handle, dom,
519 XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
520 NULL, 0, NULL, 0, NULL ) < 0) {
521 ERROR("Couldn't enable shadow mode");
522 goto out;
523 }
525 last_iter = 0;
527 bitmap_size = ((p2m_size + BITS_PER_LONG-1) & ~(BITS_PER_LONG-1)) / 8;
528 to_send = malloc(bitmap_size);
529 to_skip = malloc(bitmap_size);
531 if (!to_send || !to_skip) {
532 ERROR("Couldn't allocate bitmap array");
533 goto out;
534 }
536 /* Initially all the pages must be sent. */
537 memset(to_send, 0xff, bitmap_size);
539 if (lock_pages(to_send, bitmap_size)) {
540 ERROR("Unable to lock_pages to_send");
541 goto out;
542 }
543 if (lock_pages(to_skip, bitmap_size)) {
544 ERROR("Unable to lock_pages to_skip");
545 goto out;
546 }
548 if (hvm) {
549 /* Get qemu-dm logging dirty pages too */
550 void *seg = init_qemu_maps(dom, bitmap_size);
551 qemu_bitmaps[0] = seg;
552 qemu_bitmaps[1] = seg + bitmap_size;
553 qemu_active = 0;
554 qemu_non_active = 1;
555 }
556 } else {
558 /* This is a non-live suspend. Issue the call back to get the
559 domain suspended */
561 last_iter = 1;
563 if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info)) {
564 ERROR("Domain appears not to have suspended");
565 goto out;
566 }
568 }
570 /* copy before use in case someone updating them */
571 if (xc_ia64_copy_memmap(xc_handle, info.domid, live_shinfo,
572 &memmap_info, &memmap_info_num_pages) != 0) {
573 PERROR("Could not copy memmap");
574 goto out;
575 }
576 memmap_size = memmap_info_num_pages << PAGE_SHIFT;
578 if (xc_ia64_p2m_map(&p2m_table, xc_handle, dom, memmap_info, 0) < 0) {
579 PERROR("xc_ia64_p2m_map");
580 goto out;
581 }
582 memmap_info_num_pages_to_send = memmap_info_num_pages;
583 if (write_exact(io_fd, &memmap_info_num_pages_to_send,
584 sizeof(memmap_info_num_pages_to_send))) {
585 PERROR("write: arch.memmap_info_num_pages");
586 goto out;
587 }
588 if (write_exact(io_fd, memmap_info, memmap_size)) {
589 PERROR("write: memmap_info");
590 goto out;
591 }
593 sent_last_iter = p2m_size;
594 total_sent = 0;
596 for (iter = 1; ; iter++) {
597 unsigned int sent_this_iter, skip_this_iter;
598 unsigned long N;
600 sent_this_iter = 0;
601 skip_this_iter = 0;
603 /* Dirtied pages won't be saved.
604 slightly wasteful to peek the whole array evey time,
605 but this is fast enough for the moment. */
606 if (!last_iter) {
607 if (xc_shadow_control(xc_handle, dom,
608 XEN_DOMCTL_SHADOW_OP_PEEK,
609 to_skip, p2m_size,
610 NULL, 0, NULL) != p2m_size) {
611 ERROR("Error peeking shadow bitmap");
612 goto out;
613 }
614 }
616 /* Start writing out the saved-domain record. */
617 memmap_desc_start = &memmap_info->memdesc;
618 memmap_desc_end = memmap_desc_start + memmap_info->efi_memmap_size;
619 for (p = memmap_desc_start;
620 p < memmap_desc_end;
621 p += memmap_info->efi_memdesc_size) {
622 md = p;
623 if (md_is_not_ram(md))
624 continue;
626 for (N = md->phys_addr >> PAGE_SHIFT;
627 N < (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
628 PAGE_SHIFT;
629 N++) {
631 if (!xc_ia64_p2m_allocated(&p2m_table, N))
632 continue;
634 if (!last_iter) {
635 if (test_bit(N, to_skip) && test_bit(N, to_send))
636 skip_this_iter++;
637 if (test_bit(N, to_skip) || !test_bit(N, to_send))
638 continue;
639 } else if (live) {
640 if (!test_bit(N, to_send))
641 continue;
642 }
644 if (debug)
645 fprintf(stderr, "xc_linux_save: page %lx (%lu/%lu)\n",
646 xc_ia64_p2m_mfn(&p2m_table, N),
647 N, p2m_size);
649 mem = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
650 PROT_READ|PROT_WRITE, N);
651 if (mem == NULL) {
652 /* The page may have move.
653 It will be remarked dirty.
654 FIXME: to be tracked. */
655 fprintf(stderr, "cannot map mfn page %lx gpfn %lx: %s\n",
656 xc_ia64_p2m_mfn(&p2m_table, N),
657 N, safe_strerror(errno));
658 continue;
659 }
661 if (write_exact(io_fd, &N, sizeof(N))) {
662 ERROR("write: p2m_size");
663 munmap(mem, PAGE_SIZE);
664 goto out;
665 }
667 if (write(io_fd, mem, PAGE_SIZE) != PAGE_SIZE) {
668 ERROR("Error when writing to state file (5)");
669 munmap(mem, PAGE_SIZE);
670 goto out;
671 }
672 munmap(mem, PAGE_SIZE);
673 sent_this_iter++;
674 total_sent++;
675 }
676 }
678 if (last_iter)
679 break;
681 DPRINTF(" %d: sent %d, skipped %d\n",
682 iter, sent_this_iter, skip_this_iter );
684 if (live) {
685 if ( /* ((sent_this_iter > sent_last_iter) && RATE_IS_MAX()) || */
686 (iter >= max_iters) || (sent_this_iter+skip_this_iter < 50) ||
687 (total_sent > p2m_size*max_factor)) {
688 DPRINTF("Start last iteration\n");
689 last_iter = 1;
691 if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info)) {
692 ERROR("Domain appears not to have suspended");
693 goto out;
694 }
695 }
697 /* Pages to be sent are pages which were dirty. */
698 if (xc_shadow_control(xc_handle, dom,
699 XEN_DOMCTL_SHADOW_OP_CLEAN,
700 to_send, p2m_size,
701 NULL, 0, NULL ) != p2m_size) {
702 ERROR("Error flushing shadow PT");
703 goto out;
704 }
706 if (hvm) {
707 unsigned int j;
708 /* Pull in the dirty bits from qemu-dm too */
709 if (!last_iter) {
710 qemu_active = qemu_non_active;
711 qemu_non_active = qemu_active ? 0 : 1;
712 qemu_flip_buffer(dom, qemu_active);
713 for (j = 0; j < bitmap_size / sizeof(unsigned long); j++) {
714 to_send[j] |= qemu_bitmaps[qemu_non_active][j];
715 qemu_bitmaps[qemu_non_active][j] = 0;
716 }
717 } else {
718 for (j = 0; j < bitmap_size / sizeof(unsigned long); j++)
719 to_send[j] |= qemu_bitmaps[qemu_active][j];
720 }
721 }
723 sent_last_iter = sent_this_iter;
725 //print_stats(xc_handle, dom, sent_this_iter, &stats, 1);
726 }
727 }
729 fprintf(stderr, "All memory is saved\n");
731 /* terminate */
732 {
733 unsigned long pfn = INVALID_MFN;
734 if (write_exact(io_fd, &pfn, sizeof(pfn))) {
735 ERROR("Error when writing to state file (6)");
736 goto out;
737 }
738 }
740 if (xc_ia64_send_unallocated_list(xc_handle, io_fd, &p2m_table,
741 memmap_info,
742 memmap_desc_start, memmap_desc_end))
743 goto out;
745 if (!hvm)
746 rc = xc_ia64_pv_send_context(xc_handle, io_fd,
747 dom, &info, live_shinfo);
748 else
749 rc = xc_ia64_hvm_send_context(xc_handle, io_fd,
750 dom, &info, live_shinfo);
751 if (rc)
752 goto out;
754 /* Success! */
755 rc = 0;
757 out:
759 if (live) {
760 if (xc_shadow_control(xc_handle, dom,
761 XEN_DOMCTL_SHADOW_OP_OFF,
762 NULL, 0, NULL, 0, NULL ) < 0) {
763 DPRINTF("Warning - couldn't disable shadow mode");
764 }
765 }
767 unlock_pages(to_send, bitmap_size);
768 free(to_send);
769 unlock_pages(to_skip, bitmap_size);
770 free(to_skip);
771 if (live_shinfo)
772 munmap(live_shinfo, PAGE_SIZE);
773 if (memmap_info)
774 free(memmap_info);
775 xc_ia64_p2m_unmap(&p2m_table);
777 fprintf(stderr,"Save exit rc=%d\n",rc);
779 return !!rc;
780 }
782 /*
783 * Local variables:
784 * mode: C
785 * c-set-style: "BSD"
786 * c-basic-offset: 4
787 * tab-width: 4
788 * indent-tabs-mode: nil
789 * End:
790 */