debuggers.hg

view tools/libxc/xc_domain_restore.c @ 16715:c5deb251b9dc

Update version to 3.2.0-rc4
author Keir Fraser <keir.fraser@citrix.com>
date Sat Dec 29 17:57:37 2007 +0000 (2007-12-29)
parents f669bf5c6720
children 0164d924ceba
line source
1 /******************************************************************************
2 * xc_domain_restore.c
3 *
4 * Restore the state of a guest session.
5 *
6 * Copyright (c) 2003, K A Fraser.
7 * Copyright (c) 2006, Intel Corporation
8 * Copyright (c) 2007, XenSource Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
21 * Place - Suite 330, Boston, MA 02111-1307 USA.
22 *
23 */
25 #include <stdlib.h>
26 #include <unistd.h>
28 #include "xg_private.h"
29 #include "xg_save_restore.h"
30 #include "xc_dom.h"
32 #include <xen/hvm/ioreq.h>
33 #include <xen/hvm/params.h>
35 /* max mfn of the current host machine */
36 static unsigned long max_mfn;
38 /* virtual starting address of the hypervisor */
39 static unsigned long hvirt_start;
41 /* #levels of page tables used by the current guest */
42 static unsigned int pt_levels;
44 /* number of pfns this guest has (i.e. number of entries in the P2M) */
45 static unsigned long p2m_size;
47 /* number of 'in use' pfns in the guest (i.e. #P2M entries with a valid mfn) */
48 static unsigned long nr_pfns;
50 /* Live mapping of the table mapping each PFN to its current MFN. */
51 static xen_pfn_t *live_p2m = NULL;
53 /* A table mapping each PFN to its new MFN. */
54 static xen_pfn_t *p2m = NULL;
56 /* A table of P2M mappings in the current region */
57 static xen_pfn_t *p2m_batch = NULL;
59 /* Address size of the guest, in bytes */
60 unsigned int guest_width;
62 /*
63 ** In the state file (or during transfer), all page-table pages are
64 ** converted into a 'canonical' form where references to actual mfns
65 ** are replaced with references to the corresponding pfns.
66 ** This function inverts that operation, replacing the pfn values with
67 ** the (now known) appropriate mfn values.
68 */
69 static int uncanonicalize_pagetable(int xc_handle, uint32_t dom,
70 unsigned long type, void *page)
71 {
72 int i, pte_last;
73 unsigned long pfn;
74 uint64_t pte;
75 int nr_mfns = 0;
77 pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8);
79 /* First pass: work out how many (if any) MFNs we need to alloc */
80 for ( i = 0; i < pte_last; i++ )
81 {
82 if ( pt_levels == 2 )
83 pte = ((uint32_t *)page)[i];
84 else
85 pte = ((uint64_t *)page)[i];
87 /* XXX SMH: below needs fixing for PROT_NONE etc */
88 if ( !(pte & _PAGE_PRESENT) )
89 continue;
91 pfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86;
93 if ( pfn >= p2m_size )
94 {
95 /* This "page table page" is probably not one; bail. */
96 ERROR("Frame number in type %lu page table is out of range: "
97 "i=%d pfn=0x%lx p2m_size=%lu",
98 type >> 28, i, pfn, p2m_size);
99 return 0;
100 }
102 if ( p2m[pfn] == INVALID_P2M_ENTRY )
103 {
104 /* Have a 'valid' PFN without a matching MFN - need to alloc */
105 p2m_batch[nr_mfns++] = pfn;
106 p2m[pfn]--;
107 }
108 }
110 /* Allocate the requisite number of mfns. */
111 if ( nr_mfns &&
112 (xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0, 0,
113 p2m_batch) != 0) )
114 {
115 ERROR("Failed to allocate memory for batch.!\n");
116 errno = ENOMEM;
117 return 0;
118 }
120 /* Second pass: uncanonicalize each present PTE */
121 nr_mfns = 0;
122 for ( i = 0; i < pte_last; i++ )
123 {
124 if ( pt_levels == 2 )
125 pte = ((uint32_t *)page)[i];
126 else
127 pte = ((uint64_t *)page)[i];
129 /* XXX SMH: below needs fixing for PROT_NONE etc */
130 if ( !(pte & _PAGE_PRESENT) )
131 continue;
133 pfn = (pte >> PAGE_SHIFT) & MFN_MASK_X86;
135 if ( p2m[pfn] == (INVALID_P2M_ENTRY-1) )
136 p2m[pfn] = p2m_batch[nr_mfns++];
138 pte &= ~MADDR_MASK_X86;
139 pte |= (uint64_t)p2m[pfn] << PAGE_SHIFT;
141 if ( pt_levels == 2 )
142 ((uint32_t *)page)[i] = (uint32_t)pte;
143 else
144 ((uint64_t *)page)[i] = (uint64_t)pte;
145 }
147 return 1;
148 }
151 /* Load the p2m frame list, plus potential extended info chunk */
152 static xen_pfn_t *load_p2m_frame_list(
153 int io_fd, int *pae_extended_cr3, int *ext_vcpucontext)
154 {
155 xen_pfn_t *p2m_frame_list;
156 vcpu_guest_context_either_t ctxt;
157 xen_pfn_t p2m_fl_zero;
159 /* Read first entry of P2M list, or extended-info signature (~0UL). */
160 if ( read_exact(io_fd, &p2m_fl_zero, sizeof(long)) )
161 {
162 ERROR("read extended-info signature failed");
163 return NULL;
164 }
166 if ( p2m_fl_zero == ~0UL )
167 {
168 uint32_t tot_bytes;
170 /* Next 4 bytes: total size of following extended info. */
171 if ( read_exact(io_fd, &tot_bytes, sizeof(tot_bytes)) )
172 {
173 ERROR("read extended-info size failed");
174 return NULL;
175 }
177 while ( tot_bytes )
178 {
179 uint32_t chunk_bytes;
180 char chunk_sig[4];
182 /* 4-character chunk signature + 4-byte remaining chunk size. */
183 if ( read_exact(io_fd, chunk_sig, sizeof(chunk_sig)) ||
184 read_exact(io_fd, &chunk_bytes, sizeof(chunk_bytes)) ||
185 (tot_bytes < (chunk_bytes + 8)) )
186 {
187 ERROR("read extended-info chunk signature failed");
188 return NULL;
189 }
190 tot_bytes -= 8;
192 /* VCPU context structure? */
193 if ( !strncmp(chunk_sig, "vcpu", 4) )
194 {
195 /* Pick a guest word-size and PT depth from the ctxt size */
196 if ( chunk_bytes == sizeof (ctxt.x32) )
197 {
198 guest_width = 4;
199 if ( pt_levels > 2 )
200 pt_levels = 3;
201 }
202 else if ( chunk_bytes == sizeof (ctxt.x64) )
203 {
204 guest_width = 8;
205 pt_levels = 4;
206 }
207 else
208 {
209 ERROR("bad extended-info context size %d", chunk_bytes);
210 return NULL;
211 }
213 if ( read_exact(io_fd, &ctxt, chunk_bytes) )
214 {
215 ERROR("read extended-info vcpu context failed");
216 return NULL;
217 }
218 tot_bytes -= chunk_bytes;
219 chunk_bytes = 0;
221 if ( GET_FIELD(&ctxt, vm_assist)
222 & (1UL << VMASST_TYPE_pae_extended_cr3) )
223 *pae_extended_cr3 = 1;
224 }
225 else if ( !strncmp(chunk_sig, "extv", 4) )
226 {
227 *ext_vcpucontext = 1;
228 }
230 /* Any remaining bytes of this chunk: read and discard. */
231 while ( chunk_bytes )
232 {
233 unsigned long sz = MIN(chunk_bytes, sizeof(xen_pfn_t));
234 if ( read_exact(io_fd, &p2m_fl_zero, sz) )
235 {
236 ERROR("read-and-discard extended-info chunk bytes failed");
237 return NULL;
238 }
239 chunk_bytes -= sz;
240 tot_bytes -= sz;
241 }
242 }
244 /* Now read the real first entry of P2M list. */
245 if ( read_exact(io_fd, &p2m_fl_zero, sizeof(xen_pfn_t)) )
246 {
247 ERROR("read first entry of p2m_frame_list failed");
248 return NULL;
249 }
250 }
252 /* Now that we know the guest's word-size, can safely allocate
253 * the p2m frame list */
254 if ( (p2m_frame_list = malloc(P2M_FL_SIZE)) == NULL )
255 {
256 ERROR("Couldn't allocate p2m_frame_list array");
257 return NULL;
258 }
260 /* First entry has already been read. */
261 p2m_frame_list[0] = p2m_fl_zero;
262 if ( read_exact(io_fd, &p2m_frame_list[1],
263 (P2M_FL_ENTRIES - 1) * sizeof(xen_pfn_t)) )
264 {
265 ERROR("read p2m_frame_list failed");
266 return NULL;
267 }
269 return p2m_frame_list;
270 }
272 int xc_domain_restore(int xc_handle, int io_fd, uint32_t dom,
273 unsigned int store_evtchn, unsigned long *store_mfn,
274 unsigned int console_evtchn, unsigned long *console_mfn,
275 unsigned int hvm, unsigned int pae)
276 {
277 DECLARE_DOMCTL;
278 int rc = 1, frc, i, j, n, m, pae_extended_cr3 = 0, ext_vcpucontext = 0;
279 unsigned long mfn, pfn;
280 unsigned int prev_pc, this_pc;
281 int verify = 0;
282 int nraces = 0;
284 /* The new domain's shared-info frame number. */
285 unsigned long shared_info_frame;
286 unsigned char shared_info_page[PAGE_SIZE]; /* saved contents from file */
287 shared_info_either_t *old_shared_info = (shared_info_either_t *)shared_info_page;
288 shared_info_either_t *new_shared_info;
290 /* A copy of the CPU context of the guest. */
291 vcpu_guest_context_either_t ctxt;
293 /* A table containing the type of each PFN (/not/ MFN!). */
294 unsigned long *pfn_type = NULL;
296 /* A table of MFNs to map in the current region */
297 xen_pfn_t *region_mfn = NULL;
299 /* Types of the pfns in the current region */
300 unsigned long region_pfn_type[MAX_BATCH_SIZE];
302 /* A copy of the pfn-to-mfn table frame list. */
303 xen_pfn_t *p2m_frame_list = NULL;
305 /* A temporary mapping of the guest's start_info page. */
306 start_info_either_t *start_info;
308 /* Our mapping of the current region (batch) */
309 char *region_base;
311 struct xc_mmu *mmu = NULL;
313 /* used by debug verify code */
314 unsigned long buf[PAGE_SIZE/sizeof(unsigned long)];
316 struct mmuext_op pin[MAX_PIN_BATCH];
317 unsigned int nr_pins;
319 uint64_t vcpumap = 1ULL;
320 unsigned int max_vcpu_id = 0;
321 int new_ctxt_format = 0;
323 /* Magic frames in HVM guests: ioreqs and xenstore comms. */
324 uint64_t magic_pfns[3]; /* ioreq_pfn, bufioreq_pfn, store_pfn */
326 /* Buffer for holding HVM context */
327 uint8_t *hvm_buf = NULL;
329 /* For info only */
330 nr_pfns = 0;
332 if ( read_exact(io_fd, &p2m_size, sizeof(unsigned long)) )
333 {
334 ERROR("read: p2m_size");
335 goto out;
336 }
337 DPRINTF("xc_domain_restore start: p2m_size = %lx\n", p2m_size);
339 if ( !get_platform_info(xc_handle, dom,
340 &max_mfn, &hvirt_start, &pt_levels, &guest_width) )
341 {
342 ERROR("Unable to get platform info.");
343 return 1;
344 }
346 /* The *current* word size of the guest isn't very interesting; for now
347 * assume the guest will be the same as we are. We'll fix that later
348 * if we discover otherwise. */
349 guest_width = sizeof(unsigned long);
350 pt_levels = (guest_width == 8) ? 4 : (pt_levels == 2) ? 2 : 3;
352 if ( lock_pages(&ctxt, sizeof(ctxt)) )
353 {
354 /* needed for build domctl, but might as well do early */
355 ERROR("Unable to lock ctxt");
356 return 1;
357 }
359 if ( !hvm )
360 {
361 /* Load the p2m frame list, plus potential extended info chunk */
362 p2m_frame_list = load_p2m_frame_list(
363 io_fd, &pae_extended_cr3, &ext_vcpucontext);
364 if ( !p2m_frame_list )
365 goto out;
367 /* Now that we know the word size, tell Xen about it */
368 memset(&domctl, 0, sizeof(domctl));
369 domctl.domain = dom;
370 domctl.cmd = XEN_DOMCTL_set_address_size;
371 domctl.u.address_size.size = guest_width * 8;
372 frc = do_domctl(xc_handle, &domctl);
373 if ( frc != 0 )
374 {
375 ERROR("Unable to set guest address size.");
376 goto out;
377 }
378 }
380 /* We want zeroed memory so use calloc rather than malloc. */
381 p2m = calloc(p2m_size, MAX(guest_width, sizeof (xen_pfn_t)));
382 pfn_type = calloc(p2m_size, sizeof(unsigned long));
383 region_mfn = calloc(MAX_BATCH_SIZE, sizeof(xen_pfn_t));
384 p2m_batch = calloc(MAX_BATCH_SIZE, sizeof(xen_pfn_t));
386 if ( (p2m == NULL) || (pfn_type == NULL) ||
387 (region_mfn == NULL) || (p2m_batch == NULL) )
388 {
389 ERROR("memory alloc failed");
390 errno = ENOMEM;
391 goto out;
392 }
394 if ( lock_pages(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
395 {
396 ERROR("Could not lock region_mfn");
397 goto out;
398 }
400 if ( lock_pages(p2m_batch, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
401 {
402 ERROR("Could not lock p2m_batch");
403 goto out;
404 }
406 /* Get the domain's shared-info frame. */
407 domctl.cmd = XEN_DOMCTL_getdomaininfo;
408 domctl.domain = (domid_t)dom;
409 if ( xc_domctl(xc_handle, &domctl) < 0 )
410 {
411 ERROR("Could not get information on new domain");
412 goto out;
413 }
414 shared_info_frame = domctl.u.getdomaininfo.shared_info_frame;
416 /* Mark all PFNs as invalid; we allocate on demand */
417 for ( pfn = 0; pfn < p2m_size; pfn++ )
418 p2m[pfn] = INVALID_P2M_ENTRY;
420 mmu = xc_alloc_mmu_updates(xc_handle, dom);
421 if ( mmu == NULL )
422 {
423 ERROR("Could not initialise for MMU updates");
424 goto out;
425 }
427 DPRINTF("Reloading memory pages: 0%%\n");
429 /*
430 * Now simply read each saved frame into its new machine frame.
431 * We uncanonicalise page tables as we go.
432 */
433 prev_pc = 0;
435 n = m = 0;
436 for ( ; ; )
437 {
438 int j, nr_mfns = 0;
440 this_pc = (n * 100) / p2m_size;
441 if ( (this_pc - prev_pc) >= 5 )
442 {
443 PPRINTF("\b\b\b\b%3d%%", this_pc);
444 prev_pc = this_pc;
445 }
447 if ( read_exact(io_fd, &j, sizeof(int)) )
448 {
449 ERROR("Error when reading batch size");
450 goto out;
451 }
453 PPRINTF("batch %d\n",j);
455 if ( j == -1 )
456 {
457 verify = 1;
458 DPRINTF("Entering page verify mode\n");
459 continue;
460 }
462 if ( j == -2 )
463 {
464 new_ctxt_format = 1;
465 if ( read_exact(io_fd, &max_vcpu_id, sizeof(int)) ||
466 (max_vcpu_id >= 64) ||
467 read_exact(io_fd, &vcpumap, sizeof(uint64_t)) )
468 {
469 ERROR("Error when reading max_vcpu_id");
470 goto out;
471 }
472 continue;
473 }
475 if ( j == 0 )
476 break; /* our work here is done */
478 if ( (j > MAX_BATCH_SIZE) || (j < 0) )
479 {
480 ERROR("Max batch size exceeded. Giving up.");
481 goto out;
482 }
484 if ( read_exact(io_fd, region_pfn_type, j*sizeof(unsigned long)) )
485 {
486 ERROR("Error when reading region pfn types");
487 goto out;
488 }
490 /* First pass for this batch: work out how much memory to alloc */
491 nr_mfns = 0;
492 for ( i = 0; i < j; i++ )
493 {
494 unsigned long pfn, pagetype;
495 pfn = region_pfn_type[i] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
496 pagetype = region_pfn_type[i] & XEN_DOMCTL_PFINFO_LTAB_MASK;
498 if ( (pagetype != XEN_DOMCTL_PFINFO_XTAB) &&
499 (p2m[pfn] == INVALID_P2M_ENTRY) )
500 {
501 /* Have a live PFN which hasn't had an MFN allocated */
502 p2m_batch[nr_mfns++] = pfn;
503 p2m[pfn]--;
504 }
505 }
507 /* Now allocate a bunch of mfns for this batch */
508 if ( nr_mfns &&
509 (xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0,
510 0, p2m_batch) != 0) )
511 {
512 ERROR("Failed to allocate memory for batch.!\n");
513 errno = ENOMEM;
514 goto out;
515 }
517 /* Second pass for this batch: update p2m[] and region_mfn[] */
518 nr_mfns = 0;
519 for ( i = 0; i < j; i++ )
520 {
521 unsigned long pfn, pagetype;
522 pfn = region_pfn_type[i] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
523 pagetype = region_pfn_type[i] & XEN_DOMCTL_PFINFO_LTAB_MASK;
525 if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
526 region_mfn[i] = ~0UL; /* map will fail but we don't care */
527 else
528 {
529 if ( p2m[pfn] == (INVALID_P2M_ENTRY-1) )
530 {
531 /* We just allocated a new mfn above; update p2m */
532 p2m[pfn] = p2m_batch[nr_mfns++];
533 nr_pfns++;
534 }
536 /* setup region_mfn[] for batch map.
537 * For HVM guests, this interface takes PFNs, not MFNs */
538 region_mfn[i] = hvm ? pfn : p2m[pfn];
539 }
540 }
542 /* Map relevant mfns */
543 region_base = xc_map_foreign_batch(
544 xc_handle, dom, PROT_WRITE, region_mfn, j);
546 if ( region_base == NULL )
547 {
548 ERROR("map batch failed");
549 goto out;
550 }
552 for ( i = 0; i < j; i++ )
553 {
554 void *page;
555 unsigned long pagetype;
557 pfn = region_pfn_type[i] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
558 pagetype = region_pfn_type[i] & XEN_DOMCTL_PFINFO_LTAB_MASK;
560 if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
561 /* a bogus/unmapped page: skip it */
562 continue;
564 if ( pfn > p2m_size )
565 {
566 ERROR("pfn out of range");
567 goto out;
568 }
570 pfn_type[pfn] = pagetype;
572 mfn = p2m[pfn];
574 /* In verify mode, we use a copy; otherwise we work in place */
575 page = verify ? (void *)buf : (region_base + i*PAGE_SIZE);
577 if ( read_exact(io_fd, page, PAGE_SIZE) )
578 {
579 ERROR("Error when reading page (type was %lx)", pagetype);
580 goto out;
581 }
583 pagetype &= XEN_DOMCTL_PFINFO_LTABTYPE_MASK;
585 if ( (pagetype >= XEN_DOMCTL_PFINFO_L1TAB) &&
586 (pagetype <= XEN_DOMCTL_PFINFO_L4TAB) )
587 {
588 /*
589 ** A page table page - need to 'uncanonicalize' it, i.e.
590 ** replace all the references to pfns with the corresponding
591 ** mfns for the new domain.
592 **
593 ** On PAE we need to ensure that PGDs are in MFNs < 4G, and
594 ** so we may need to update the p2m after the main loop.
595 ** Hence we defer canonicalization of L1s until then.
596 */
597 if ((pt_levels != 3) ||
598 pae_extended_cr3 ||
599 (pagetype != XEN_DOMCTL_PFINFO_L1TAB)) {
601 if (!uncanonicalize_pagetable(xc_handle, dom,
602 pagetype, page)) {
603 /*
604 ** Failing to uncanonicalize a page table can be ok
605 ** under live migration since the pages type may have
606 ** changed by now (and we'll get an update later).
607 */
608 DPRINTF("PT L%ld race on pfn=%08lx mfn=%08lx\n",
609 pagetype >> 28, pfn, mfn);
610 nraces++;
611 continue;
612 }
613 }
614 }
615 else if ( pagetype != XEN_DOMCTL_PFINFO_NOTAB )
616 {
617 ERROR("Bogus page type %lx page table is out of range: "
618 "i=%d p2m_size=%lu", pagetype, i, p2m_size);
619 goto out;
621 }
623 if ( verify )
624 {
625 int res = memcmp(buf, (region_base + i*PAGE_SIZE), PAGE_SIZE);
626 if ( res )
627 {
628 int v;
630 DPRINTF("************** pfn=%lx type=%lx gotcs=%08lx "
631 "actualcs=%08lx\n", pfn, pfn_type[pfn],
632 csum_page(region_base + i*PAGE_SIZE),
633 csum_page(buf));
635 for ( v = 0; v < 4; v++ )
636 {
637 unsigned long *p = (unsigned long *)
638 (region_base + i*PAGE_SIZE);
639 if ( buf[v] != p[v] )
640 DPRINTF(" %d: %08lx %08lx\n", v, buf[v], p[v]);
641 }
642 }
643 }
645 if ( !hvm &&
646 xc_add_mmu_update(xc_handle, mmu,
647 (((unsigned long long)mfn) << PAGE_SHIFT)
648 | MMU_MACHPHYS_UPDATE, pfn) )
649 {
650 ERROR("failed machpys update mfn=%lx pfn=%lx", mfn, pfn);
651 goto out;
652 }
653 } /* end of 'batch' for loop */
655 munmap(region_base, j*PAGE_SIZE);
656 n+= j; /* crude stats */
658 /*
659 * Discard cache for portion of file read so far up to last
660 * page boundary every 16MB or so.
661 */
662 m += j;
663 if ( m > MAX_PAGECACHE_USAGE )
664 {
665 discard_file_cache(io_fd, 0 /* no flush */);
666 m = 0;
667 }
668 }
670 /*
671 * Ensure we flush all machphys updates before potential PAE-specific
672 * reallocations below.
673 */
674 if ( !hvm && xc_flush_mmu_updates(xc_handle, mmu) )
675 {
676 ERROR("Error doing flush_mmu_updates()");
677 goto out;
678 }
680 DPRINTF("Received all pages (%d races)\n", nraces);
682 if ( hvm )
683 {
684 uint32_t rec_len;
686 /* Set HVM-specific parameters */
687 if ( read_exact(io_fd, magic_pfns, sizeof(magic_pfns)) )
688 {
689 ERROR("error reading magic page addresses");
690 goto out;
691 }
693 /* These comms pages need to be zeroed at the start of day */
694 if ( xc_clear_domain_page(xc_handle, dom, magic_pfns[0]) ||
695 xc_clear_domain_page(xc_handle, dom, magic_pfns[1]) ||
696 xc_clear_domain_page(xc_handle, dom, magic_pfns[2]) )
697 {
698 ERROR("error zeroing magic pages");
699 goto out;
700 }
702 if ( (frc = xc_set_hvm_param(xc_handle, dom,
703 HVM_PARAM_IOREQ_PFN, magic_pfns[0]))
704 || (frc = xc_set_hvm_param(xc_handle, dom,
705 HVM_PARAM_BUFIOREQ_PFN, magic_pfns[1]))
706 || (frc = xc_set_hvm_param(xc_handle, dom,
707 HVM_PARAM_STORE_PFN, magic_pfns[2]))
708 || (frc = xc_set_hvm_param(xc_handle, dom,
709 HVM_PARAM_PAE_ENABLED, pae))
710 || (frc = xc_set_hvm_param(xc_handle, dom,
711 HVM_PARAM_STORE_EVTCHN,
712 store_evtchn)) )
713 {
714 ERROR("error setting HVM params: %i", frc);
715 goto out;
716 }
717 *store_mfn = magic_pfns[2];
719 /* Read HVM context */
720 if ( read_exact(io_fd, &rec_len, sizeof(uint32_t)) )
721 {
722 ERROR("error read hvm context size!\n");
723 goto out;
724 }
726 hvm_buf = malloc(rec_len);
727 if ( hvm_buf == NULL )
728 {
729 ERROR("memory alloc for hvm context buffer failed");
730 errno = ENOMEM;
731 goto out;
732 }
734 if ( read_exact(io_fd, hvm_buf, rec_len) )
735 {
736 ERROR("error loading the HVM context");
737 goto out;
738 }
740 frc = xc_domain_hvm_setcontext(xc_handle, dom, hvm_buf, rec_len);
741 if ( frc )
742 {
743 ERROR("error setting the HVM context");
744 goto out;
745 }
747 /* HVM success! */
748 rc = 0;
749 goto out;
750 }
752 /* Non-HVM guests only from here on */
754 if ( (pt_levels == 3) && !pae_extended_cr3 )
755 {
756 /*
757 ** XXX SMH on PAE we need to ensure PGDs are in MFNs < 4G. This
758 ** is a little awkward and involves (a) finding all such PGDs and
759 ** replacing them with 'lowmem' versions; (b) upating the p2m[]
760 ** with the new info; and (c) canonicalizing all the L1s using the
761 ** (potentially updated) p2m[].
762 **
763 ** This is relatively slow (and currently involves two passes through
764 ** the pfn_type[] array), but at least seems to be correct. May wish
765 ** to consider more complex approaches to optimize this later.
766 */
768 int j, k;
770 /* First pass: find all L3TABs current in > 4G mfns and get new mfns */
771 for ( i = 0; i < p2m_size; i++ )
772 {
773 if ( ((pfn_type[i] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) ==
774 XEN_DOMCTL_PFINFO_L3TAB) &&
775 (p2m[i] > 0xfffffUL) )
776 {
777 unsigned long new_mfn;
778 uint64_t l3ptes[4];
779 uint64_t *l3tab;
781 l3tab = (uint64_t *)
782 xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
783 PROT_READ, p2m[i]);
785 for ( j = 0; j < 4; j++ )
786 l3ptes[j] = l3tab[j];
788 munmap(l3tab, PAGE_SIZE);
790 new_mfn = xc_make_page_below_4G(xc_handle, dom, p2m[i]);
791 if ( !new_mfn )
792 {
793 ERROR("Couldn't get a page below 4GB :-(");
794 goto out;
795 }
797 p2m[i] = new_mfn;
798 if ( xc_add_mmu_update(xc_handle, mmu,
799 (((unsigned long long)new_mfn)
800 << PAGE_SHIFT) |
801 MMU_MACHPHYS_UPDATE, i) )
802 {
803 ERROR("Couldn't m2p on PAE root pgdir");
804 goto out;
805 }
807 l3tab = (uint64_t *)
808 xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
809 PROT_READ | PROT_WRITE, p2m[i]);
811 for ( j = 0; j < 4; j++ )
812 l3tab[j] = l3ptes[j];
814 munmap(l3tab, PAGE_SIZE);
815 }
816 }
818 /* Second pass: find all L1TABs and uncanonicalize them */
819 j = 0;
821 for ( i = 0; i < p2m_size; i++ )
822 {
823 if ( ((pfn_type[i] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) ==
824 XEN_DOMCTL_PFINFO_L1TAB) )
825 {
826 region_mfn[j] = p2m[i];
827 j++;
828 }
830 if ( (i == (p2m_size-1)) || (j == MAX_BATCH_SIZE) )
831 {
832 region_base = xc_map_foreign_batch(
833 xc_handle, dom, PROT_READ | PROT_WRITE, region_mfn, j);
834 if ( region_base == NULL )
835 {
836 ERROR("map batch failed");
837 goto out;
838 }
840 for ( k = 0; k < j; k++ )
841 {
842 if ( !uncanonicalize_pagetable(
843 xc_handle, dom, XEN_DOMCTL_PFINFO_L1TAB,
844 region_base + k*PAGE_SIZE) )
845 {
846 ERROR("failed uncanonicalize pt!");
847 goto out;
848 }
849 }
851 munmap(region_base, j*PAGE_SIZE);
852 j = 0;
853 }
854 }
856 if ( xc_flush_mmu_updates(xc_handle, mmu) )
857 {
858 ERROR("Error doing xc_flush_mmu_updates()");
859 goto out;
860 }
861 }
863 /*
864 * Pin page tables. Do this after writing to them as otherwise Xen
865 * will barf when doing the type-checking.
866 */
867 nr_pins = 0;
868 for ( i = 0; i < p2m_size; i++ )
869 {
870 if ( (pfn_type[i] & XEN_DOMCTL_PFINFO_LPINTAB) == 0 )
871 continue;
873 switch ( pfn_type[i] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK )
874 {
875 case XEN_DOMCTL_PFINFO_L1TAB:
876 pin[nr_pins].cmd = MMUEXT_PIN_L1_TABLE;
877 break;
879 case XEN_DOMCTL_PFINFO_L2TAB:
880 pin[nr_pins].cmd = MMUEXT_PIN_L2_TABLE;
881 break;
883 case XEN_DOMCTL_PFINFO_L3TAB:
884 pin[nr_pins].cmd = MMUEXT_PIN_L3_TABLE;
885 break;
887 case XEN_DOMCTL_PFINFO_L4TAB:
888 pin[nr_pins].cmd = MMUEXT_PIN_L4_TABLE;
889 break;
891 default:
892 continue;
893 }
895 pin[nr_pins].arg1.mfn = p2m[i];
896 nr_pins++;
898 /* Batch full? Then flush. */
899 if ( nr_pins == MAX_PIN_BATCH )
900 {
901 if ( xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0 )
902 {
903 ERROR("Failed to pin batch of %d page tables", nr_pins);
904 goto out;
905 }
906 nr_pins = 0;
907 }
908 }
910 /* Flush final partial batch. */
911 if ( (nr_pins != 0) && (xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0) )
912 {
913 ERROR("Failed to pin batch of %d page tables", nr_pins);
914 goto out;
915 }
917 DPRINTF("\b\b\b\b100%%\n");
918 DPRINTF("Memory reloaded (%ld pages)\n", nr_pfns);
920 /* Get the list of PFNs that are not in the psuedo-phys map */
921 {
922 unsigned int count = 0;
923 unsigned long *pfntab;
924 int nr_frees;
926 if ( read_exact(io_fd, &count, sizeof(count)) ||
927 (count > (1U << 28)) ) /* up to 1TB of address space */
928 {
929 ERROR("Error when reading pfn count (= %u)", count);
930 goto out;
931 }
933 if ( !(pfntab = malloc(sizeof(unsigned long) * count)) )
934 {
935 ERROR("Out of memory");
936 goto out;
937 }
939 if ( read_exact(io_fd, pfntab, sizeof(unsigned long)*count) )
940 {
941 ERROR("Error when reading pfntab");
942 goto out;
943 }
945 nr_frees = 0;
946 for ( i = 0; i < count; i++ )
947 {
948 unsigned long pfn = pfntab[i];
950 if ( p2m[pfn] != INVALID_P2M_ENTRY )
951 {
952 /* pfn is not in physmap now, but was at some point during
953 the save/migration process - need to free it */
954 pfntab[nr_frees++] = p2m[pfn];
955 p2m[pfn] = INVALID_P2M_ENTRY; /* not in pseudo-physical map */
956 }
957 }
959 if ( nr_frees > 0 )
960 {
961 struct xen_memory_reservation reservation = {
962 .nr_extents = nr_frees,
963 .extent_order = 0,
964 .domid = dom
965 };
966 set_xen_guest_handle(reservation.extent_start, pfntab);
968 if ( (frc = xc_memory_op(xc_handle, XENMEM_decrease_reservation,
969 &reservation)) != nr_frees )
970 {
971 ERROR("Could not decrease reservation : %d", frc);
972 goto out;
973 }
974 else
975 DPRINTF("Decreased reservation by %d pages\n", count);
976 }
977 }
979 for ( i = 0; i <= max_vcpu_id; i++ )
980 {
981 if ( !(vcpumap & (1ULL << i)) )
982 continue;
984 if ( read_exact(io_fd, &ctxt, ((guest_width == 8)
985 ? sizeof(ctxt.x64)
986 : sizeof(ctxt.x32))) )
987 {
988 ERROR("Error when reading ctxt %d", i);
989 goto out;
990 }
992 if ( !new_ctxt_format )
993 SET_FIELD(&ctxt, flags, GET_FIELD(&ctxt, flags) | VGCF_online);
995 if ( i == 0 )
996 {
997 /*
998 * Uncanonicalise the suspend-record frame number and poke
999 * resume record.
1000 */
1001 pfn = GET_FIELD(&ctxt, user_regs.edx);
1002 if ( (pfn >= p2m_size) ||
1003 (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
1005 ERROR("Suspend record frame number is bad");
1006 goto out;
1008 mfn = p2m[pfn];
1009 SET_FIELD(&ctxt, user_regs.edx, mfn);
1010 start_info = xc_map_foreign_range(
1011 xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, mfn);
1012 SET_FIELD(start_info, nr_pages, p2m_size);
1013 SET_FIELD(start_info, shared_info, shared_info_frame<<PAGE_SHIFT);
1014 SET_FIELD(start_info, flags, 0);
1015 *store_mfn = p2m[GET_FIELD(start_info, store_mfn)];
1016 SET_FIELD(start_info, store_mfn, *store_mfn);
1017 SET_FIELD(start_info, store_evtchn, store_evtchn);
1018 *console_mfn = p2m[GET_FIELD(start_info, console.domU.mfn)];
1019 SET_FIELD(start_info, console.domU.mfn, *console_mfn);
1020 SET_FIELD(start_info, console.domU.evtchn, console_evtchn);
1021 munmap(start_info, PAGE_SIZE);
1023 /* Uncanonicalise each GDT frame number. */
1024 if ( GET_FIELD(&ctxt, gdt_ents) > 8192 )
1026 ERROR("GDT entry count out of range");
1027 goto out;
1030 for ( j = 0; (512*j) < GET_FIELD(&ctxt, gdt_ents); j++ )
1032 pfn = GET_FIELD(&ctxt, gdt_frames[j]);
1033 if ( (pfn >= p2m_size) ||
1034 (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
1036 ERROR("GDT frame number %i (0x%lx) is bad",
1037 j, (unsigned long)pfn);
1038 goto out;
1040 SET_FIELD(&ctxt, gdt_frames[j], p2m[pfn]);
1042 /* Uncanonicalise the page table base pointer. */
1043 pfn = xen_cr3_to_pfn(GET_FIELD(&ctxt, ctrlreg[3]));
1045 if ( pfn >= p2m_size )
1047 ERROR("PT base is bad: pfn=%lu p2m_size=%lu type=%08lx",
1048 pfn, p2m_size, pfn_type[pfn]);
1049 goto out;
1052 if ( (pfn_type[pfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) !=
1053 ((unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) )
1055 ERROR("PT base is bad. pfn=%lu nr=%lu type=%08lx %08lx",
1056 pfn, p2m_size, pfn_type[pfn],
1057 (unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT);
1058 goto out;
1060 SET_FIELD(&ctxt, ctrlreg[3], xen_pfn_to_cr3(p2m[pfn]));
1062 /* Guest pagetable (x86/64) stored in otherwise-unused CR1. */
1063 if ( (pt_levels == 4) && (ctxt.x64.ctrlreg[1] & 1) )
1065 pfn = xen_cr3_to_pfn(ctxt.x64.ctrlreg[1] & ~1);
1066 if ( pfn >= p2m_size )
1068 ERROR("User PT base is bad: pfn=%lu p2m_size=%lu",
1069 pfn, p2m_size);
1070 goto out;
1072 if ( (pfn_type[pfn] & XEN_DOMCTL_PFINFO_LTABTYPE_MASK) !=
1073 ((unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT) )
1075 ERROR("User PT base is bad. pfn=%lu nr=%lu type=%08lx %08lx",
1076 pfn, p2m_size, pfn_type[pfn],
1077 (unsigned long)pt_levels<<XEN_DOMCTL_PFINFO_LTAB_SHIFT);
1078 goto out;
1080 ctxt.x64.ctrlreg[1] = xen_pfn_to_cr3(p2m[pfn]);
1082 domctl.cmd = XEN_DOMCTL_setvcpucontext;
1083 domctl.domain = (domid_t)dom;
1084 domctl.u.vcpucontext.vcpu = i;
1085 set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt.c);
1086 frc = xc_domctl(xc_handle, &domctl);
1087 if ( frc != 0 )
1089 ERROR("Couldn't build vcpu%d", i);
1090 goto out;
1093 if ( !ext_vcpucontext )
1094 continue;
1095 if ( read_exact(io_fd, &domctl.u.ext_vcpucontext, 128) ||
1096 (domctl.u.ext_vcpucontext.vcpu != i) )
1098 ERROR("Error when reading extended ctxt %d", i);
1099 goto out;
1101 domctl.cmd = XEN_DOMCTL_set_ext_vcpucontext;
1102 domctl.domain = dom;
1103 frc = xc_domctl(xc_handle, &domctl);
1104 if ( frc != 0 )
1106 ERROR("Couldn't set extended vcpu%d info\n", i);
1107 goto out;
1111 if ( read_exact(io_fd, shared_info_page, PAGE_SIZE) )
1113 ERROR("Error when reading shared info page");
1114 goto out;
1117 /* Restore contents of shared-info page. No checking needed. */
1118 new_shared_info = xc_map_foreign_range(
1119 xc_handle, dom, PAGE_SIZE, PROT_WRITE, shared_info_frame);
1121 /* restore saved vcpu_info and arch specific info */
1122 MEMCPY_FIELD(new_shared_info, old_shared_info, vcpu_info);
1123 MEMCPY_FIELD(new_shared_info, old_shared_info, arch);
1125 /* clear any pending events and the selector */
1126 MEMSET_ARRAY_FIELD(new_shared_info, evtchn_pending, 0);
1127 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
1128 SET_FIELD(new_shared_info, vcpu_info[i].evtchn_pending_sel, 0);
1130 /* mask event channels */
1131 MEMSET_ARRAY_FIELD(new_shared_info, evtchn_mask, 0xff);
1133 /* leave wallclock time. set by hypervisor */
1134 munmap(new_shared_info, PAGE_SIZE);
1136 /* Uncanonicalise the pfn-to-mfn table frame-number list. */
1137 for ( i = 0; i < P2M_FL_ENTRIES; i++ )
1139 pfn = p2m_frame_list[i];
1140 if ( (pfn >= p2m_size) || (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) )
1142 ERROR("PFN-to-MFN frame number %i (%#lx) is bad", i, pfn);
1143 goto out;
1145 p2m_frame_list[i] = p2m[pfn];
1148 /* Copy the P2M we've constructed to the 'live' P2M */
1149 if ( !(live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_WRITE,
1150 p2m_frame_list, P2M_FL_ENTRIES)) )
1152 ERROR("Couldn't map p2m table");
1153 goto out;
1156 /* If the domain we're restoring has a different word size to ours,
1157 * we need to repack the p2m appropriately */
1158 if ( guest_width > sizeof (xen_pfn_t) )
1159 for ( i = p2m_size - 1; i >= 0; i-- )
1160 ((uint64_t *)p2m)[i] = p2m[i];
1161 else if ( guest_width > sizeof (xen_pfn_t) )
1162 for ( i = 0; i < p2m_size; i++ )
1163 ((uint32_t *)p2m)[i] = p2m[i];
1165 memcpy(live_p2m, p2m, ROUNDUP(p2m_size * guest_width, PAGE_SHIFT));
1166 munmap(live_p2m, ROUNDUP(p2m_size * guest_width, PAGE_SHIFT));
1168 DPRINTF("Domain ready to be built.\n");
1169 rc = 0;
1171 out:
1172 if ( (rc != 0) && (dom != 0) )
1173 xc_domain_destroy(xc_handle, dom);
1174 free(mmu);
1175 free(p2m);
1176 free(pfn_type);
1177 free(hvm_buf);
1179 /* discard cache for save file */
1180 discard_file_cache(io_fd, 1 /*flush*/);
1182 DPRINTF("Restore exit with rc=%d\n", rc);
1184 return rc;