/root/src/xen/xen/arch/x86/hvm/save.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * hvm/save.c: Save and restore HVM guest's emulated hardware state. |
3 | | * |
4 | | * Copyright (c) 2004, Intel Corporation. |
5 | | * Copyright (c) 2007, XenSource Inc. |
6 | | * Copyright (c) 2007, Isaku Yamahata <yamahata at valinux co jp> |
7 | | * VA Linux Systems Japan K.K. |
8 | | * split x86 specific part |
9 | | * |
10 | | * This program is free software; you can redistribute it and/or modify it |
11 | | * under the terms and conditions of the GNU General Public License, |
12 | | * version 2, as published by the Free Software Foundation. |
13 | | * |
14 | | * This program is distributed in the hope it will be useful, but WITHOUT |
15 | | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
16 | | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
17 | | * more details. |
18 | | * |
19 | | * You should have received a copy of the GNU General Public License along with |
20 | | * this program; If not, see <http://www.gnu.org/licenses/>. |
21 | | */ |
22 | | |
23 | | #include <xen/guest_access.h> |
24 | | #include <xen/version.h> |
25 | | |
26 | | #include <asm/hvm/support.h> |
27 | | |
28 | | #include <public/hvm/save.h> |
29 | | |
30 | | void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr) |
31 | 0 | { |
32 | 0 | uint32_t eax, ebx, ecx, edx; |
33 | 0 |
|
34 | 0 | /* Save some CPUID bits */ |
35 | 0 | cpuid(1, &eax, &ebx, &ecx, &edx); |
36 | 0 | hdr->cpuid = eax; |
37 | 0 |
|
38 | 0 | /* Save guest's preferred TSC. */ |
39 | 0 | hdr->gtsc_khz = d->arch.tsc_khz; |
40 | 0 |
|
41 | 0 | /* Time when saving started */ |
42 | 0 | d->arch.hvm_domain.sync_tsc = rdtsc(); |
43 | 0 | } |
44 | | |
45 | | int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr) |
46 | 0 | { |
47 | 0 | uint32_t eax, ebx, ecx, edx; |
48 | 0 |
|
49 | 0 | if ( hdr->magic != HVM_FILE_MAGIC ) |
50 | 0 | { |
51 | 0 | printk(XENLOG_G_ERR "HVM%d restore: bad magic number %#"PRIx32"\n", |
52 | 0 | d->domain_id, hdr->magic); |
53 | 0 | return -1; |
54 | 0 | } |
55 | 0 |
|
56 | 0 | if ( hdr->version != HVM_FILE_VERSION ) |
57 | 0 | { |
58 | 0 | printk(XENLOG_G_ERR "HVM%d restore: unsupported version %u\n", |
59 | 0 | d->domain_id, hdr->version); |
60 | 0 | return -1; |
61 | 0 | } |
62 | 0 |
|
63 | 0 | cpuid(1, &eax, &ebx, &ecx, &edx); |
64 | 0 | /* CPUs ought to match but with feature-masking they might not */ |
65 | 0 | if ( (hdr->cpuid & ~0x0fUL) != (eax & ~0x0fUL) ) |
66 | 0 | printk(XENLOG_G_INFO "HVM%d restore: VM saved on one CPU " |
67 | 0 | "(%#"PRIx32") and restored on another (%#"PRIx32").\n", |
68 | 0 | d->domain_id, hdr->cpuid, eax); |
69 | 0 |
|
70 | 0 | /* Restore guest's preferred TSC frequency. */ |
71 | 0 | if ( hdr->gtsc_khz ) |
72 | 0 | d->arch.tsc_khz = hdr->gtsc_khz; |
73 | 0 | if ( d->arch.vtsc ) |
74 | 0 | hvm_set_rdtsc_exiting(d, 1); |
75 | 0 |
|
76 | 0 | /* Time when restore started */ |
77 | 0 | d->arch.hvm_domain.sync_tsc = rdtsc(); |
78 | 0 |
|
79 | 0 | /* VGA state is not saved/restored, so we nobble the cache. */ |
80 | 0 | d->arch.hvm_domain.stdvga.cache = STDVGA_CACHE_DISABLED; |
81 | 0 |
|
82 | 0 | return 0; |
83 | 0 | } |
84 | | |
85 | | /* List of handlers for various HVM save and restore types */ |
86 | | static struct { |
87 | | hvm_save_handler save; |
88 | | hvm_load_handler load; |
89 | | const char *name; |
90 | | size_t size; |
91 | | int kind; |
92 | | } hvm_sr_handlers[HVM_SAVE_CODE_MAX + 1] = { {NULL, NULL, "<?>"}, }; |
93 | | |
94 | | /* Init-time function to add entries to that list */ |
95 | | void __init hvm_register_savevm(uint16_t typecode, |
96 | | const char *name, |
97 | | hvm_save_handler save_state, |
98 | | hvm_load_handler load_state, |
99 | | size_t size, int kind) |
100 | 18 | { |
101 | 18 | ASSERT(typecode <= HVM_SAVE_CODE_MAX); |
102 | 18 | ASSERT(hvm_sr_handlers[typecode].save == NULL); |
103 | 18 | ASSERT(hvm_sr_handlers[typecode].load == NULL); |
104 | 18 | hvm_sr_handlers[typecode].save = save_state; |
105 | 18 | hvm_sr_handlers[typecode].load = load_state; |
106 | 18 | hvm_sr_handlers[typecode].name = name; |
107 | 18 | hvm_sr_handlers[typecode].size = size; |
108 | 18 | hvm_sr_handlers[typecode].kind = kind; |
109 | 18 | } |
110 | | |
111 | | size_t hvm_save_size(struct domain *d) |
112 | 0 | { |
113 | 0 | struct vcpu *v; |
114 | 0 | size_t sz; |
115 | 0 | int i; |
116 | 0 |
|
117 | 0 | /* Basic overhead for header and footer */ |
118 | 0 | sz = (2 * sizeof (struct hvm_save_descriptor)) + HVM_SAVE_LENGTH(HEADER); |
119 | 0 |
|
120 | 0 | /* Plus space for each thing we will be saving */ |
121 | 0 | for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) |
122 | 0 | if ( hvm_sr_handlers[i].kind == HVMSR_PER_VCPU ) |
123 | 0 | for_each_vcpu(d, v) |
124 | 0 | sz += hvm_sr_handlers[i].size; |
125 | 0 | else |
126 | 0 | sz += hvm_sr_handlers[i].size; |
127 | 0 |
|
128 | 0 | return sz; |
129 | 0 | } |
130 | | |
131 | | /* |
132 | | * Extract a single instance of a save record, by marshalling all records of |
133 | | * that type and copying out the one we need. |
134 | | */ |
135 | | int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int instance, |
136 | | XEN_GUEST_HANDLE_64(uint8) handle, uint64_t *bufsz) |
137 | 0 | { |
138 | 0 | int rv; |
139 | 0 | hvm_domain_context_t ctxt = { }; |
140 | 0 | const struct hvm_save_descriptor *desc; |
141 | 0 |
|
142 | 0 | if ( d->is_dying || |
143 | 0 | typecode > HVM_SAVE_CODE_MAX || |
144 | 0 | hvm_sr_handlers[typecode].size < sizeof(*desc) || |
145 | 0 | !hvm_sr_handlers[typecode].save ) |
146 | 0 | return -EINVAL; |
147 | 0 |
|
148 | 0 | ctxt.size = hvm_sr_handlers[typecode].size; |
149 | 0 | if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU ) |
150 | 0 | ctxt.size *= d->max_vcpus; |
151 | 0 | ctxt.data = xmalloc_bytes(ctxt.size); |
152 | 0 | if ( !ctxt.data ) |
153 | 0 | return -ENOMEM; |
154 | 0 |
|
155 | 0 | if ( (rv = hvm_sr_handlers[typecode].save(d, &ctxt)) != 0 ) |
156 | 0 | printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" (%d)\n", |
157 | 0 | d->domain_id, typecode, rv); |
158 | 0 | else if ( rv = -ENOENT, ctxt.cur >= sizeof(*desc) ) |
159 | 0 | { |
160 | 0 | uint32_t off; |
161 | 0 |
|
162 | 0 | for ( off = 0; off <= (ctxt.cur - sizeof(*desc)); off += desc->length ) |
163 | 0 | { |
164 | 0 | desc = (void *)(ctxt.data + off); |
165 | 0 | /* Move past header */ |
166 | 0 | off += sizeof(*desc); |
167 | 0 | if ( ctxt.cur < desc->length || |
168 | 0 | off > ctxt.cur - desc->length ) |
169 | 0 | break; |
170 | 0 | if ( instance == desc->instance ) |
171 | 0 | { |
172 | 0 | rv = 0; |
173 | 0 | if ( guest_handle_is_null(handle) ) |
174 | 0 | *bufsz = desc->length; |
175 | 0 | else if ( *bufsz < desc->length ) |
176 | 0 | rv = -ENOBUFS; |
177 | 0 | else if ( copy_to_guest(handle, ctxt.data + off, desc->length) ) |
178 | 0 | rv = -EFAULT; |
179 | 0 | else |
180 | 0 | *bufsz = desc->length; |
181 | 0 | break; |
182 | 0 | } |
183 | 0 | } |
184 | 0 | } |
185 | 0 |
|
186 | 0 | xfree(ctxt.data); |
187 | 0 | return rv; |
188 | 0 | } |
189 | | |
190 | | int hvm_save(struct domain *d, hvm_domain_context_t *h) |
191 | 0 | { |
192 | 0 | char *c; |
193 | 0 | struct hvm_save_header hdr; |
194 | 0 | struct hvm_save_end end; |
195 | 0 | hvm_save_handler handler; |
196 | 0 | unsigned int i; |
197 | 0 |
|
198 | 0 | if ( d->is_dying ) |
199 | 0 | return -EINVAL; |
200 | 0 |
|
201 | 0 | hdr.magic = HVM_FILE_MAGIC; |
202 | 0 | hdr.version = HVM_FILE_VERSION; |
203 | 0 |
|
204 | 0 | /* Save xen changeset */ |
205 | 0 | c = strrchr(xen_changeset(), ':'); |
206 | 0 | if ( c ) |
207 | 0 | hdr.changeset = simple_strtoll(c, NULL, 16); |
208 | 0 | else |
209 | 0 | hdr.changeset = -1ULL; /* Unknown */ |
210 | 0 |
|
211 | 0 | arch_hvm_save(d, &hdr); |
212 | 0 |
|
213 | 0 | if ( hvm_save_entry(HEADER, 0, h, &hdr) != 0 ) |
214 | 0 | { |
215 | 0 | printk(XENLOG_G_ERR "HVM%d save: failed to write header\n", |
216 | 0 | d->domain_id); |
217 | 0 | return -EFAULT; |
218 | 0 | } |
219 | 0 |
|
220 | 0 | /* Save all available kinds of state */ |
221 | 0 | for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) |
222 | 0 | { |
223 | 0 | handler = hvm_sr_handlers[i].save; |
224 | 0 | if ( handler != NULL ) |
225 | 0 | { |
226 | 0 | printk(XENLOG_G_INFO "HVM%d save: %s\n", |
227 | 0 | d->domain_id, hvm_sr_handlers[i].name); |
228 | 0 | if ( handler(d, h) != 0 ) |
229 | 0 | { |
230 | 0 | printk(XENLOG_G_ERR |
231 | 0 | "HVM%d save: failed to save type %"PRIu16"\n", |
232 | 0 | d->domain_id, i); |
233 | 0 | return -EFAULT; |
234 | 0 | } |
235 | 0 | } |
236 | 0 | } |
237 | 0 |
|
238 | 0 | /* Save an end-of-file marker */ |
239 | 0 | if ( hvm_save_entry(END, 0, h, &end) != 0 ) |
240 | 0 | { |
241 | 0 | /* Run out of data */ |
242 | 0 | printk(XENLOG_G_ERR "HVM%d save: no room for end marker\n", |
243 | 0 | d->domain_id); |
244 | 0 | return -EFAULT; |
245 | 0 | } |
246 | 0 |
|
247 | 0 | /* Save macros should not have let us overrun */ |
248 | 0 | ASSERT(h->cur <= h->size); |
249 | 0 | return 0; |
250 | 0 | } |
251 | | |
252 | | int hvm_load(struct domain *d, hvm_domain_context_t *h) |
253 | 0 | { |
254 | 0 | struct hvm_save_header hdr; |
255 | 0 | struct hvm_save_descriptor *desc; |
256 | 0 | hvm_load_handler handler; |
257 | 0 | struct vcpu *v; |
258 | 0 |
|
259 | 0 | if ( d->is_dying ) |
260 | 0 | return -EINVAL; |
261 | 0 |
|
262 | 0 | /* Read the save header, which must be first */ |
263 | 0 | if ( hvm_load_entry(HEADER, h, &hdr) != 0 ) |
264 | 0 | return -1; |
265 | 0 |
|
266 | 0 | if ( arch_hvm_load(d, &hdr) ) |
267 | 0 | return -1; |
268 | 0 |
|
269 | 0 | /* Down all the vcpus: we only re-enable the ones that had state saved. */ |
270 | 0 | for_each_vcpu(d, v) |
271 | 0 | if ( test_and_set_bit(_VPF_down, &v->pause_flags) ) |
272 | 0 | vcpu_sleep_nosync(v); |
273 | 0 |
|
274 | 0 | for ( ; ; ) |
275 | 0 | { |
276 | 0 | if ( h->size - h->cur < sizeof(struct hvm_save_descriptor) ) |
277 | 0 | { |
278 | 0 | /* Run out of data */ |
279 | 0 | printk(XENLOG_G_ERR |
280 | 0 | "HVM%d restore: save did not end with a null entry\n", |
281 | 0 | d->domain_id); |
282 | 0 | return -1; |
283 | 0 | } |
284 | 0 |
|
285 | 0 | /* Read the typecode of the next entry and check for the end-marker */ |
286 | 0 | desc = (struct hvm_save_descriptor *)(&h->data[h->cur]); |
287 | 0 | if ( desc->typecode == 0 ) |
288 | 0 | return 0; |
289 | 0 |
|
290 | 0 | /* Find the handler for this entry */ |
291 | 0 | if ( (desc->typecode > HVM_SAVE_CODE_MAX) || |
292 | 0 | ((handler = hvm_sr_handlers[desc->typecode].load) == NULL) ) |
293 | 0 | { |
294 | 0 | printk(XENLOG_G_ERR "HVM%d restore: unknown entry typecode %u\n", |
295 | 0 | d->domain_id, desc->typecode); |
296 | 0 | return -1; |
297 | 0 | } |
298 | 0 |
|
299 | 0 | /* Load the entry */ |
300 | 0 | printk(XENLOG_G_INFO "HVM%d restore: %s %"PRIu16"\n", d->domain_id, |
301 | 0 | hvm_sr_handlers[desc->typecode].name, desc->instance); |
302 | 0 | if ( handler(d, h) != 0 ) |
303 | 0 | { |
304 | 0 | printk(XENLOG_G_ERR "HVM%d restore: failed to load entry %u/%u\n", |
305 | 0 | d->domain_id, desc->typecode, desc->instance); |
306 | 0 | return -1; |
307 | 0 | } |
308 | 0 | } |
309 | 0 |
|
310 | 0 | /* Not reached */ |
311 | 0 | } |
312 | | |
313 | | int _hvm_init_entry(struct hvm_domain_context *h, uint16_t tc, uint16_t inst, |
314 | | uint32_t len) |
315 | 0 | { |
316 | 0 | struct hvm_save_descriptor *d |
317 | 0 | = (struct hvm_save_descriptor *)&h->data[h->cur]; |
318 | 0 |
|
319 | 0 | if ( h->size - h->cur < len + sizeof (*d) ) |
320 | 0 | { |
321 | 0 | printk(XENLOG_G_WARNING "HVM save: no room for" |
322 | 0 | " %"PRIu32" + %zu bytes for typecode %"PRIu16"\n", |
323 | 0 | len, sizeof(*d), tc); |
324 | 0 | return -1; |
325 | 0 | } |
326 | 0 |
|
327 | 0 | d->typecode = tc; |
328 | 0 | d->instance = inst; |
329 | 0 | d->length = len; |
330 | 0 | h->cur += sizeof(*d); |
331 | 0 |
|
332 | 0 | return 0; |
333 | 0 | } |
334 | | |
335 | | void _hvm_write_entry(struct hvm_domain_context *h, void *src, |
336 | | uint32_t src_len) |
337 | 0 | { |
338 | 0 | memcpy(&h->data[h->cur], src, src_len); |
339 | 0 | h->cur += src_len; |
340 | 0 | } |
341 | | |
342 | | int _hvm_check_entry(struct hvm_domain_context *h, uint16_t type, uint32_t len, |
343 | | bool strict_length) |
344 | 0 | { |
345 | 0 | struct hvm_save_descriptor *d |
346 | 0 | = (struct hvm_save_descriptor *)&h->data[h->cur]; |
347 | 0 |
|
348 | 0 | if ( sizeof(*d) > h->size - h->cur) |
349 | 0 | { |
350 | 0 | printk(XENLOG_G_WARNING |
351 | 0 | "HVM restore: not enough data left to read %zu bytes " |
352 | 0 | "for type %u header\n", sizeof(*d), type); |
353 | 0 | return -1; |
354 | 0 | } |
355 | 0 |
|
356 | 0 | if ( (type != d->typecode) || |
357 | 0 | (strict_length ? (len != d->length) : (len < d->length)) || |
358 | 0 | (d->length > (h->size - h->cur - sizeof(*d))) ) |
359 | 0 | { |
360 | 0 | printk(XENLOG_G_WARNING |
361 | 0 | "HVM restore mismatch: expected %s type %u length %u, " |
362 | 0 | "saw type %u length %u. %zu bytes remaining\n", |
363 | 0 | strict_length ? "strict" : "zeroextended", type, len, |
364 | 0 | d->typecode, d->length, h->size - h->cur - sizeof(*d)); |
365 | 0 | return -1; |
366 | 0 | } |
367 | 0 |
|
368 | 0 | h->cur += sizeof(*d); |
369 | 0 |
|
370 | 0 | return 0; |
371 | 0 | } |
372 | | |
373 | | void _hvm_read_entry(struct hvm_domain_context *h, void *dest, |
374 | | uint32_t dest_len) |
375 | 0 | { |
376 | 0 | struct hvm_save_descriptor *d |
377 | 0 | = (struct hvm_save_descriptor *)&h->data[h->cur - sizeof(*d)]; |
378 | 0 |
|
379 | 0 | BUG_ON(d->length > dest_len); |
380 | 0 |
|
381 | 0 | memcpy(dest, &h->data[h->cur], d->length); |
382 | 0 |
|
383 | 0 | if ( d->length < dest_len ) |
384 | 0 | memset((char *)dest + d->length, 0, dest_len - d->length); |
385 | 0 |
|
386 | 0 | h->cur += d->length; |
387 | 0 | } |
388 | | |
389 | | /* |
390 | | * Local variables: |
391 | | * mode: C |
392 | | * c-file-style: "BSD" |
393 | | * c-basic-offset: 4 |
394 | | * tab-width: 4 |
395 | | * indent-tabs-mode: nil |
396 | | * End: |
397 | | */ |