debuggers.hg
changeset 13706:99d36a153024
[HVM] Save/restore cleanups: don't save state for downed vcpus.
(Since we wouldn't load it anyway)
Also tidy up the plumbing around the hypercalls.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
(Since we wouldn't load it anyway)
Also tidy up the plumbing around the hypercalls.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author | Tim Deegan <Tim.Deegan@xensource.com> |
---|---|
date | Mon Jan 29 16:49:29 2007 +0000 (2007-01-29) |
parents | 5bb084098493 |
children | 21d6135f522f |
files | xen/arch/x86/domctl.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/intercept.c xen/include/asm-x86/hvm/support.h |
line diff
1.1 --- a/xen/arch/x86/domctl.c Mon Jan 29 13:22:21 2007 +0000 1.2 +++ b/xen/arch/x86/domctl.c Mon Jan 29 16:49:29 2007 +0000 1.3 @@ -290,7 +290,6 @@ long arch_do_domctl( 1.4 { 1.5 struct hvm_domain_context *c; 1.6 struct domain *d; 1.7 - struct vcpu *v; 1.8 1.9 ret = -ESRCH; 1.10 if ( (d = get_domain_by_id(domctl->domain)) == NULL ) 1.11 @@ -299,15 +298,16 @@ long arch_do_domctl( 1.12 ret = -ENOMEM; 1.13 if ( (c = xmalloc(struct hvm_domain_context)) == NULL ) 1.14 goto sethvmcontext_out; 1.15 - 1.16 - v = d->vcpu[0]; 1.17 1.18 ret = -EFAULT; 1.19 - 1.20 if ( copy_from_guest(c, domctl->u.hvmcontext.ctxt, 1) != 0 ) 1.21 goto sethvmcontext_out; 1.22 1.23 - ret = arch_sethvm_ctxt(v, c); 1.24 + ret = -EINVAL; 1.25 + if ( !is_hvm_domain(d) ) 1.26 + goto sethvmcontext_out; 1.27 + 1.28 + ret = hvm_load(d, c); 1.29 1.30 xfree(c); 1.31 1.32 @@ -321,7 +321,6 @@ long arch_do_domctl( 1.33 { 1.34 struct hvm_domain_context *c; 1.35 struct domain *d; 1.36 - struct vcpu *v; 1.37 1.38 ret = -ESRCH; 1.39 if ( (d = get_domain_by_id(domctl->domain)) == NULL ) 1.40 @@ -330,15 +329,14 @@ long arch_do_domctl( 1.41 ret = -ENOMEM; 1.42 if ( (c = xmalloc(struct hvm_domain_context)) == NULL ) 1.43 goto gethvmcontext_out; 1.44 - 1.45 - v = d->vcpu[0]; 1.46 - 1.47 + memset(c, 0, sizeof(*c)); 1.48 + 1.49 ret = -ENODATA; 1.50 - if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 1.51 + if ( !is_hvm_domain(d) ) 1.52 goto gethvmcontext_out; 1.53 1.54 ret = 0; 1.55 - if (arch_gethvm_ctxt(v, c) == -1) 1.56 + if (hvm_save(d, c) != 0) 1.57 ret = -EFAULT; 1.58 1.59 if ( copy_to_guest(domctl->u.hvmcontext.ctxt, c, 1) )
2.1 --- a/xen/arch/x86/hvm/hvm.c Mon Jan 29 13:22:21 2007 +0000 2.2 +++ b/xen/arch/x86/hvm/hvm.c Mon Jan 29 16:49:29 2007 +0000 2.3 @@ -189,17 +189,15 @@ void hvm_domain_destroy(struct domain *d 2.4 unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va); 2.5 } 2.6 2.7 -#define HVM_VCPU_CTXT_MAGIC 0x85963130 2.8 void hvm_save_cpu_ctxt(hvm_domain_context_t *h, void *opaque) 2.9 { 2.10 struct vcpu *v = opaque; 2.11 2.12 - if ( test_bit(_VCPUF_down, &v->vcpu_flags) ) { 2.13 - hvm_put_32u(h, 0x0); 2.14 + /* We don't need to save state for a vcpu that is down; the restore 2.15 + * code will leave it down if there is nothing saved. */ 2.16 + if ( test_bit(_VCPUF_down, &v->vcpu_flags) ) 2.17 return; 2.18 - } 2.19 2.20 - hvm_put_32u(h, HVM_VCPU_CTXT_MAGIC); 2.21 hvm_funcs.save_cpu_ctxt(h, opaque); 2.22 } 2.23 2.24 @@ -207,13 +205,10 @@ int hvm_load_cpu_ctxt(hvm_domain_context 2.25 { 2.26 struct vcpu *v = opaque; 2.27 2.28 - if ( hvm_get_32u(h) != HVM_VCPU_CTXT_MAGIC ) 2.29 - return 0; 2.30 - 2.31 if ( hvm_funcs.load_cpu_ctxt(h, opaque, version) < 0 ) 2.32 return -EINVAL; 2.33 2.34 - /* Auxiliary processors shoudl be woken immediately. */ 2.35 + /* Auxiliary processors should be woken immediately. */ 2.36 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) ) 2.37 vcpu_wake(v); 2.38
3.1 --- a/xen/arch/x86/hvm/intercept.c Mon Jan 29 13:22:21 2007 +0000 3.2 +++ b/xen/arch/x86/hvm/intercept.c Mon Jan 29 16:49:29 2007 +0000 3.3 @@ -190,7 +190,7 @@ int hvm_register_savevm(struct domain *d 3.4 return 0; 3.5 } 3.6 3.7 -int hvm_save(struct vcpu *v, hvm_domain_context_t *h) 3.8 +int hvm_save(struct domain *d, hvm_domain_context_t *h) 3.9 { 3.10 uint32_t len, len_pos, cur_pos; 3.11 uint32_t eax, ebx, ecx, edx; 3.12 @@ -198,13 +198,6 @@ int hvm_save(struct vcpu *v, hvm_domain_ 3.13 char *chgset; 3.14 struct hvm_save_header hdr; 3.15 3.16 - if (!is_hvm_vcpu(v)) { 3.17 - printk("hvm_save only for hvm guest!\n"); 3.18 - return -1; 3.19 - } 3.20 - 3.21 - memset(h, 0, sizeof(hvm_domain_context_t)); 3.22 - 3.23 hdr.magic = HVM_FILE_MAGIC; 3.24 hdr.version = HVM_FILE_VERSION; 3.25 cpuid(1, &eax, &ebx, &ecx, &edx); 3.26 @@ -222,7 +215,7 @@ int hvm_save(struct vcpu *v, hvm_domain_ 3.27 hvm_put_8u(h, len); 3.28 hvm_put_buffer(h, chgset, len); 3.29 3.30 - for(se = v->domain->arch.hvm_domain.first_se; se != NULL; se = se->next) { 3.31 + for(se = d->arch.hvm_domain.first_se; se != NULL; se = se->next) { 3.32 /* ID string */ 3.33 len = strnlen(se->idstr, HVM_SE_IDSTR_LEN); 3.34 hvm_put_8u(h, len); 3.35 @@ -270,7 +263,7 @@ static HVMStateEntry *find_se(struct dom 3.36 return NULL; 3.37 } 3.38 3.39 -int hvm_load(struct vcpu *v, hvm_domain_context_t *h) 3.40 +int hvm_load(struct domain *d, hvm_domain_context_t *h) 3.41 { 3.42 uint32_t len, rec_len, rec_pos, instance_id, version_id; 3.43 uint32_t eax, ebx, ecx, edx; 3.44 @@ -280,11 +273,7 @@ int hvm_load(struct vcpu *v, hvm_domain_ 3.45 char *cur_chgset; 3.46 int ret; 3.47 struct hvm_save_header hdr; 3.48 - 3.49 - if (!is_hvm_vcpu(v)) { 3.50 - printk("hvm_load only for hvm guest!\n"); 3.51 - return -1; 3.52 - } 3.53 + struct vcpu *v; 3.54 3.55 if (h->size >= HVM_CTXT_SIZE) { 3.56 printk("hvm_load fail! seems hvm_domain_context overflow when hvm_save! need %"PRId32" bytes.\n", h->size); 3.57 @@ -339,6 +328,11 @@ int hvm_load(struct vcpu *v, hvm_domain_ 3.58 printk("warnings: try to restore hvm guest when changeset is unavailable.\n"); 3.59 3.60 3.61 + /* Down all the vcpus: we only re-enable the ones that had state saved. */ 3.62 + for_each_vcpu(d, v) 3.63 + if ( test_and_set_bit(_VCPUF_down, &v->vcpu_flags) ) 3.64 + vcpu_sleep_nosync(v); 3.65 + 3.66 while(1) { 3.67 if (hvm_ctxt_end(h)) { 3.68 break; 3.69 @@ -362,7 +356,7 @@ int hvm_load(struct vcpu *v, hvm_domain_ 3.70 rec_len = hvm_get_32u(h); 3.71 rec_pos = hvm_ctxt_tell(h); 3.72 3.73 - se = find_se(v->domain, idstr, instance_id); 3.74 + se = find_se(d, idstr, instance_id); 3.75 if (se == NULL) { 3.76 printk("warnings: hvm load can't find device %s's instance %d!\n", 3.77 idstr, instance_id); 3.78 @@ -384,21 +378,6 @@ int hvm_load(struct vcpu *v, hvm_domain_ 3.79 return 0; 3.80 } 3.81 3.82 -int arch_gethvm_ctxt( 3.83 - struct vcpu *v, struct hvm_domain_context *c) 3.84 -{ 3.85 - if ( !is_hvm_vcpu(v) ) 3.86 - return -1; 3.87 - 3.88 - return hvm_save(v, c); 3.89 - 3.90 -} 3.91 - 3.92 -int arch_sethvm_ctxt( 3.93 - struct vcpu *v, struct hvm_domain_context *c) 3.94 -{ 3.95 - return hvm_load(v, c); 3.96 -} 3.97 3.98 #ifdef HVM_DEBUG_SUSPEND 3.99 static void shpage_info(shared_iopage_t *sh)
4.1 --- a/xen/include/asm-x86/hvm/support.h Mon Jan 29 13:22:21 2007 +0000 4.2 +++ b/xen/include/asm-x86/hvm/support.h Mon Jan 29 16:49:29 2007 +0000 4.3 @@ -242,11 +242,8 @@ static inline void hvm_get_buffer(hvm_do 4.4 #define hvm_get_struct(_h, _p) \ 4.5 hvm_get_buffer((_h), (char *)(_p), sizeof(*(_p))) 4.6 4.7 -int hvm_save(struct vcpu*, hvm_domain_context_t *h); 4.8 -int hvm_load(struct vcpu*, hvm_domain_context_t *h); 4.9 - 4.10 -int arch_sethvm_ctxt(struct vcpu *v, struct hvm_domain_context *c); 4.11 -int arch_gethvm_ctxt(struct vcpu *v, struct hvm_domain_context *c); 4.12 +int hvm_save(struct domain *d, hvm_domain_context_t *h); 4.13 +int hvm_load(struct domain *d, hvm_domain_context_t *h); 4.14 4.15 void shpage_init(struct domain *d, shared_iopage_t *sp); 4.16