debuggers.hg
changeset 13637:e75107963846
[HVM][SVM] Add save/restore support.
From: Mats Petersson <Mats.Petersson@amd.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
From: Mats Petersson <Mats.Petersson@amd.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Wed Jan 24 18:30:07 2007 +0000 (2007-01-24) |
parents | bc7363b9b892 |
children | 32f7d3200a99 |
files | xen/arch/x86/hvm/svm/svm.c |
line diff
1.1 --- a/xen/arch/x86/hvm/svm/svm.c Wed Jan 24 18:23:23 2007 +0000 1.2 +++ b/xen/arch/x86/hvm/svm/svm.c Wed Jan 24 18:30:07 2007 +0000 1.3 @@ -361,6 +361,280 @@ static inline void __restore_debug_regis 1.4 } 1.5 1.6 1.7 +int svm_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c) 1.8 +{ 1.9 + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.10 + 1.11 + c->eip = vmcb->rip; 1.12 + 1.13 +#ifdef HVM_DEBUG_SUSPEND 1.14 + printk("%s: eip=0x%"PRIx64".\n", 1.15 + __func__, 1.16 + inst_len, c->eip); 1.17 +#endif 1.18 + 1.19 + c->esp = vmcb->rsp; 1.20 + c->eflags = vmcb->rflags; 1.21 + 1.22 + c->cr0 = v->arch.hvm_svm.cpu_shadow_cr0; 1.23 + c->cr3 = v->arch.hvm_svm.cpu_cr3; 1.24 + c->cr4 = v->arch.hvm_svm.cpu_shadow_cr4; 1.25 + 1.26 +#ifdef HVM_DEBUG_SUSPEND 1.27 + printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n", 1.28 + __func__, 1.29 + c->cr3, 1.30 + c->cr0, 1.31 + c->cr4); 1.32 +#endif 1.33 + 1.34 + c->idtr_limit = vmcb->idtr.limit; 1.35 + c->idtr_base = vmcb->idtr.base; 1.36 + 1.37 + c->gdtr_limit = vmcb->gdtr.limit; 1.38 + c->gdtr_base = vmcb->gdtr.base; 1.39 + 1.40 + c->cs_sel = vmcb->cs.sel; 1.41 + c->cs_limit = vmcb->cs.limit; 1.42 + c->cs_base = vmcb->cs.base; 1.43 + c->cs_arbytes = vmcb->cs.attr.bytes; 1.44 + 1.45 + c->ds_sel = vmcb->ds.sel; 1.46 + c->ds_limit = vmcb->ds.limit; 1.47 + c->ds_base = vmcb->ds.base; 1.48 + c->ds_arbytes = vmcb->ds.attr.bytes; 1.49 + 1.50 + c->es_sel = vmcb->es.sel; 1.51 + c->es_limit = vmcb->es.limit; 1.52 + c->es_base = vmcb->es.base; 1.53 + c->es_arbytes = vmcb->es.attr.bytes; 1.54 + 1.55 + c->ss_sel = vmcb->ss.sel; 1.56 + c->ss_limit = vmcb->ss.limit; 1.57 + c->ss_base = vmcb->ss.base; 1.58 + c->ss_arbytes = vmcb->ss.attr.bytes; 1.59 + 1.60 + c->fs_sel = vmcb->fs.sel; 1.61 + c->fs_limit = vmcb->fs.limit; 1.62 + c->fs_base = vmcb->fs.base; 1.63 + c->fs_arbytes = vmcb->fs.attr.bytes; 1.64 + 1.65 + c->gs_sel = vmcb->gs.sel; 1.66 + c->gs_limit = vmcb->gs.limit; 1.67 + c->gs_base = vmcb->gs.base; 1.68 + c->gs_arbytes = vmcb->gs.attr.bytes; 1.69 + 1.70 + c->tr_sel = vmcb->tr.sel; 1.71 + c->tr_limit = vmcb->tr.limit; 1.72 + c->tr_base = vmcb->tr.base; 1.73 + c->tr_arbytes = vmcb->tr.attr.bytes; 1.74 + 1.75 + c->ldtr_sel = vmcb->ldtr.sel; 1.76 + c->ldtr_limit = vmcb->ldtr.limit; 1.77 + c->ldtr_base = vmcb->ldtr.base; 1.78 + c->ldtr_arbytes = vmcb->ldtr.attr.bytes; 1.79 + 1.80 + c->sysenter_cs = vmcb->sysenter_cs; 1.81 + c->sysenter_esp = vmcb->sysenter_esp; 1.82 + c->sysenter_eip = vmcb->sysenter_eip; 1.83 + 1.84 + return 1; 1.85 +} 1.86 + 1.87 + 1.88 +int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c) 1.89 +{ 1.90 + unsigned long mfn, old_base_mfn; 1.91 + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.92 + 1.93 + vmcb->rip = c->eip; 1.94 + vmcb->rsp = c->esp; 1.95 + vmcb->rflags = c->eflags; 1.96 + 1.97 + v->arch.hvm_svm.cpu_shadow_cr0 = c->cr0; 1.98 + 1.99 +#ifdef HVM_DEBUG_SUSPEND 1.100 + printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n", 1.101 + __func__, 1.102 + c->cr3, 1.103 + c->cr0, 1.104 + c->cr4); 1.105 +#endif 1.106 + 1.107 + if (!svm_paging_enabled(v)) { 1.108 + printk("%s: paging not enabled.", __func__); 1.109 + goto skip_cr3; 1.110 + } 1.111 + 1.112 + if (c->cr3 == v->arch.hvm_svm.cpu_cr3) { 1.113 + /* 1.114 + * This is simple TLB flush, implying the guest has 1.115 + * removed some translation or changed page attributes. 1.116 + * We simply invalidate the shadow. 1.117 + */ 1.118 + mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT); 1.119 + if (mfn != pagetable_get_pfn(v->arch.guest_table)) { 1.120 + goto bad_cr3; 1.121 + } 1.122 + } else { 1.123 + /* 1.124 + * If different, make a shadow. Check if the PDBR is valid 1.125 + * first. 1.126 + */ 1.127 + HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64"", c->cr3); 1.128 + /* current!=vcpu as not called by arch_vmx_do_launch */ 1.129 + mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT); 1.130 + if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain)) { 1.131 + goto bad_cr3; 1.132 + } 1.133 + old_base_mfn = pagetable_get_pfn(v->arch.guest_table); 1.134 + v->arch.guest_table = pagetable_from_pfn(mfn); 1.135 + if (old_base_mfn) 1.136 + put_page(mfn_to_page(old_base_mfn)); 1.137 + /* 1.138 + * arch.shadow_table should now hold the next CR3 for shadow 1.139 + */ 1.140 + v->arch.hvm_svm.cpu_cr3 = c->cr3; 1.141 + } 1.142 + 1.143 + skip_cr3: 1.144 +#if defined(__x86_64__) && 0 1.145 + if (vmx_long_mode_enabled(v)) { 1.146 + unsigned long vm_entry_value; 1.147 + vm_entry_value = __vmread(VM_ENTRY_CONTROLS); 1.148 + vm_entry_value |= VM_ENTRY_IA32E_MODE; 1.149 + __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value); 1.150 + } 1.151 +#endif 1.152 + 1.153 + vmcb->cr4 = c->cr4 | SVM_CR4_HOST_MASK; 1.154 + v->arch.hvm_svm.cpu_shadow_cr4 = c->cr4; 1.155 + 1.156 + vmcb->idtr.limit = c->idtr_limit; 1.157 + vmcb->idtr.base = c->idtr_base; 1.158 + 1.159 + vmcb->gdtr.limit = c->gdtr_limit; 1.160 + vmcb->gdtr.base = c->gdtr_base; 1.161 + 1.162 + vmcb->cs.sel = c->cs_sel; 1.163 + vmcb->cs.limit = c->cs_limit; 1.164 + vmcb->cs.base = c->cs_base; 1.165 + vmcb->cs.attr.bytes = c->cs_arbytes; 1.166 + 1.167 + vmcb->ds.sel = c->ds_sel; 1.168 + vmcb->ds.limit = c->ds_limit; 1.169 + vmcb->ds.base = c->ds_base; 1.170 + vmcb->ds.attr.bytes = c->ds_arbytes; 1.171 + 1.172 + vmcb->es.sel = c->es_sel; 1.173 + vmcb->es.limit = c->es_limit; 1.174 + vmcb->es.base = c->es_base; 1.175 + vmcb->es.attr.bytes = c->es_arbytes; 1.176 + 1.177 + vmcb->ss.sel = c->ss_sel; 1.178 + vmcb->ss.limit = c->ss_limit; 1.179 + vmcb->ss.base = c->ss_base; 1.180 + vmcb->ss.attr.bytes = c->ss_arbytes; 1.181 + 1.182 + vmcb->fs.sel = c->fs_sel; 1.183 + vmcb->fs.limit = c->fs_limit; 1.184 + vmcb->fs.base = c->fs_base; 1.185 + vmcb->fs.attr.bytes = c->fs_arbytes; 1.186 + 1.187 + vmcb->gs.sel = c->gs_sel; 1.188 + vmcb->gs.limit = c->gs_limit; 1.189 + vmcb->gs.base = c->gs_base; 1.190 + vmcb->gs.attr.bytes = c->gs_arbytes; 1.191 + 1.192 + vmcb->tr.sel = c->tr_sel; 1.193 + vmcb->tr.limit = c->tr_limit; 1.194 + vmcb->tr.base = c->tr_base; 1.195 + vmcb->tr.attr.bytes = c->tr_arbytes; 1.196 + 1.197 + vmcb->ldtr.sel = c->ldtr_sel; 1.198 + vmcb->ldtr.limit = c->ldtr_limit; 1.199 + vmcb->ldtr.base = c->ldtr_base; 1.200 + vmcb->ldtr.attr.bytes = c->ldtr_arbytes; 1.201 + 1.202 + vmcb->sysenter_cs = c->sysenter_cs; 1.203 + vmcb->sysenter_esp = c->sysenter_esp; 1.204 + vmcb->sysenter_eip = c->sysenter_eip; 1.205 + 1.206 + shadow_update_paging_modes(v); 1.207 + return 0; 1.208 + 1.209 + bad_cr3: 1.210 + gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"", c->cr3); 1.211 + return -EINVAL; 1.212 +} 1.213 + 1.214 + 1.215 +void svm_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) 1.216 +{ 1.217 + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.218 + 1.219 + data->shadow_gs = vmcb->kerngsbase; 1.220 + /* MSR_LSTAR, MSR_STAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_EFER */ 1.221 + data->msr_items[0] = vmcb->lstar; 1.222 + data->msr_items[1] = vmcb->star; 1.223 + data->msr_items[2] = vmcb->cstar; 1.224 + data->msr_items[3] = vmcb->sfmask; 1.225 + data->msr_items[4] = vmcb->efer; 1.226 + 1.227 + data->tsc = hvm_get_guest_time(v); 1.228 + 1.229 + // dump_msr_state(guest_state); 1.230 +} 1.231 + 1.232 + 1.233 +void svm_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) 1.234 +{ 1.235 + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.236 + 1.237 + /* MSR_LSTAR, MSR_STAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_EFER */ 1.238 + vmcb->lstar = data->msr_items[0]; 1.239 + vmcb->star = data->msr_items[1]; 1.240 + vmcb->cstar = data->msr_items[2]; 1.241 + vmcb->sfmask = data->msr_items[3]; 1.242 + vmcb->efer = data->msr_items[4]; 1.243 + 1.244 + hvm_set_guest_time(v, data->tsc); 1.245 + 1.246 + // dump_msr_state(guest_state); 1.247 +} 1.248 + 1.249 +void svm_save_vmcb_ctxt(hvm_domain_context_t *h, void *opaque) 1.250 +{ 1.251 + struct vcpu *v = opaque; 1.252 + struct hvm_hw_cpu ctxt; 1.253 + 1.254 + svm_save_cpu_state(v, &ctxt); 1.255 + 1.256 + svm_vmcs_save(v, &ctxt); 1.257 + 1.258 + hvm_put_struct(h, &ctxt); 1.259 +} 1.260 + 1.261 +int svm_load_vmcb_ctxt(hvm_domain_context_t *h, void *opaque, int version) 1.262 +{ 1.263 + struct vcpu *v = opaque; 1.264 + struct hvm_hw_cpu ctxt; 1.265 + 1.266 + if (version != 1) 1.267 + return -EINVAL; 1.268 + 1.269 + hvm_get_struct(h, &ctxt); 1.270 + svm_load_cpu_state(v, &ctxt); 1.271 + if (svm_vmcb_restore(v, &ctxt)) { 1.272 + printk("svm_vmcb restore failed!\n"); 1.273 + domain_crash(v->domain); 1.274 + return -EINVAL; 1.275 + } 1.276 + 1.277 + return 0; 1.278 +} 1.279 + 1.280 + 1.281 static inline void svm_restore_dr(struct vcpu *v) 1.282 { 1.283 if ( unlikely(v->arch.guest_context.debugreg[7] & 0xFF) ) 1.284 @@ -773,6 +1047,9 @@ int start_svm(void) 1.285 hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs; 1.286 hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs; 1.287 1.288 + hvm_funcs.save_cpu_ctxt = svm_save_vmcb_ctxt; 1.289 + hvm_funcs.load_cpu_ctxt = svm_load_vmcb_ctxt; 1.290 + 1.291 hvm_funcs.paging_enabled = svm_paging_enabled; 1.292 hvm_funcs.long_mode_enabled = svm_long_mode_enabled; 1.293 hvm_funcs.pae_enabled = svm_pae_enabled;