debuggers.hg
changeset 17075:5e1df44d406e
x86 hvm: Factor out save/restore of segment registers from VMX/SVM
files into common HVM code.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
files into common HVM code.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Wed Feb 13 14:03:58 2008 +0000 (2008-02-13) |
parents | 0164d924ceba |
children | e56c9fe4a7e6 |
files | xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c |
line diff
1.1 --- a/xen/arch/x86/hvm/hvm.c Wed Feb 13 10:43:13 2008 +0000 1.2 +++ b/xen/arch/x86/hvm/hvm.c Wed Feb 13 14:03:58 2008 +0000 1.3 @@ -287,9 +287,10 @@ static int hvm_save_cpu_ctxt(struct doma 1.4 { 1.5 struct vcpu *v; 1.6 struct hvm_hw_cpu ctxt; 1.7 + struct segment_register seg; 1.8 struct vcpu_guest_context *vc; 1.9 1.10 - for_each_vcpu(d, v) 1.11 + for_each_vcpu ( d, v ) 1.12 { 1.13 /* We don't need to save state for a vcpu that is down; the restore 1.14 * code will leave it down if there is nothing saved. */ 1.15 @@ -299,12 +300,69 @@ static int hvm_save_cpu_ctxt(struct doma 1.16 /* Architecture-specific vmcs/vmcb bits */ 1.17 hvm_funcs.save_cpu_ctxt(v, &ctxt); 1.18 1.19 - /* Other vcpu register state */ 1.20 + hvm_get_segment_register(v, x86_seg_idtr, &seg); 1.21 + ctxt.idtr_limit = seg.limit; 1.22 + ctxt.idtr_base = seg.base; 1.23 + 1.24 + hvm_get_segment_register(v, x86_seg_gdtr, &seg); 1.25 + ctxt.gdtr_limit = seg.limit; 1.26 + ctxt.gdtr_base = seg.base; 1.27 + 1.28 + hvm_get_segment_register(v, x86_seg_cs, &seg); 1.29 + ctxt.cs_sel = seg.sel; 1.30 + ctxt.cs_limit = seg.limit; 1.31 + ctxt.cs_base = seg.base; 1.32 + ctxt.cs_arbytes = seg.attr.bytes; 1.33 + 1.34 + hvm_get_segment_register(v, x86_seg_ds, &seg); 1.35 + ctxt.ds_sel = seg.sel; 1.36 + ctxt.ds_limit = seg.limit; 1.37 + ctxt.ds_base = seg.base; 1.38 + ctxt.ds_arbytes = seg.attr.bytes; 1.39 + 1.40 + hvm_get_segment_register(v, x86_seg_es, &seg); 1.41 + ctxt.es_sel = seg.sel; 1.42 + ctxt.es_limit = seg.limit; 1.43 + ctxt.es_base = seg.base; 1.44 + ctxt.es_arbytes = seg.attr.bytes; 1.45 + 1.46 + hvm_get_segment_register(v, x86_seg_ss, &seg); 1.47 + ctxt.ss_sel = seg.sel; 1.48 + ctxt.ss_limit = seg.limit; 1.49 + ctxt.ss_base = seg.base; 1.50 + ctxt.ss_arbytes = seg.attr.bytes; 1.51 + 1.52 + hvm_get_segment_register(v, x86_seg_fs, &seg); 1.53 + ctxt.fs_sel = seg.sel; 1.54 + ctxt.fs_limit = seg.limit; 1.55 + ctxt.fs_base = seg.base; 1.56 + ctxt.fs_arbytes = seg.attr.bytes; 1.57 + 1.58 + hvm_get_segment_register(v, x86_seg_gs, &seg); 1.59 + ctxt.gs_sel = seg.sel; 1.60 + ctxt.gs_limit = seg.limit; 1.61 + ctxt.gs_base = seg.base; 1.62 + ctxt.gs_arbytes = seg.attr.bytes; 1.63 + 1.64 + hvm_get_segment_register(v, x86_seg_tr, &seg); 1.65 + ctxt.tr_sel = seg.sel; 1.66 + ctxt.tr_limit = seg.limit; 1.67 + ctxt.tr_base = seg.base; 1.68 + ctxt.tr_arbytes = seg.attr.bytes; 1.69 + 1.70 + hvm_get_segment_register(v, x86_seg_ldtr, &seg); 1.71 + ctxt.ldtr_sel = seg.sel; 1.72 + ctxt.ldtr_limit = seg.limit; 1.73 + ctxt.ldtr_base = seg.base; 1.74 + ctxt.ldtr_arbytes = seg.attr.bytes; 1.75 + 1.76 vc = &v->arch.guest_context; 1.77 + 1.78 if ( v->fpu_initialised ) 1.79 memcpy(ctxt.fpu_regs, &vc->fpu_ctxt, sizeof(ctxt.fpu_regs)); 1.80 else 1.81 memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs)); 1.82 + 1.83 ctxt.rax = vc->user_regs.eax; 1.84 ctxt.rbx = vc->user_regs.ebx; 1.85 ctxt.rcx = vc->user_regs.ecx; 1.86 @@ -343,6 +401,7 @@ static int hvm_load_cpu_ctxt(struct doma 1.87 int vcpuid, rc; 1.88 struct vcpu *v; 1.89 struct hvm_hw_cpu ctxt; 1.90 + struct segment_register seg; 1.91 struct vcpu_guest_context *vc; 1.92 1.93 /* Which vcpu is this? */ 1.94 @@ -398,8 +457,64 @@ static int hvm_load_cpu_ctxt(struct doma 1.95 if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 ) 1.96 return -EINVAL; 1.97 1.98 - /* Other vcpu register state */ 1.99 + seg.limit = ctxt.idtr_limit; 1.100 + seg.base = ctxt.idtr_base; 1.101 + hvm_set_segment_register(v, x86_seg_idtr, &seg); 1.102 + 1.103 + seg.limit = ctxt.gdtr_limit; 1.104 + seg.base = ctxt.gdtr_base; 1.105 + hvm_set_segment_register(v, x86_seg_gdtr, &seg); 1.106 + 1.107 + seg.sel = ctxt.cs_sel; 1.108 + seg.limit = ctxt.cs_limit; 1.109 + seg.base = ctxt.cs_base; 1.110 + seg.attr.bytes = ctxt.cs_arbytes; 1.111 + hvm_set_segment_register(v, x86_seg_cs, &seg); 1.112 + 1.113 + seg.sel = ctxt.ds_sel; 1.114 + seg.limit = ctxt.ds_limit; 1.115 + seg.base = ctxt.ds_base; 1.116 + seg.attr.bytes = ctxt.ds_arbytes; 1.117 + hvm_set_segment_register(v, x86_seg_ds, &seg); 1.118 + 1.119 + seg.sel = ctxt.es_sel; 1.120 + seg.limit = ctxt.es_limit; 1.121 + seg.base = ctxt.es_base; 1.122 + seg.attr.bytes = ctxt.es_arbytes; 1.123 + hvm_set_segment_register(v, x86_seg_es, &seg); 1.124 + 1.125 + seg.sel = ctxt.ss_sel; 1.126 + seg.limit = ctxt.ss_limit; 1.127 + seg.base = ctxt.ss_base; 1.128 + seg.attr.bytes = ctxt.ss_arbytes; 1.129 + hvm_set_segment_register(v, x86_seg_ss, &seg); 1.130 + 1.131 + seg.sel = ctxt.fs_sel; 1.132 + seg.limit = ctxt.fs_limit; 1.133 + seg.base = ctxt.fs_base; 1.134 + seg.attr.bytes = ctxt.fs_arbytes; 1.135 + hvm_set_segment_register(v, x86_seg_fs, &seg); 1.136 + 1.137 + seg.sel = ctxt.gs_sel; 1.138 + seg.limit = ctxt.gs_limit; 1.139 + seg.base = ctxt.gs_base; 1.140 + seg.attr.bytes = ctxt.gs_arbytes; 1.141 + hvm_set_segment_register(v, x86_seg_gs, &seg); 1.142 + 1.143 + seg.sel = ctxt.tr_sel; 1.144 + seg.limit = ctxt.tr_limit; 1.145 + seg.base = ctxt.tr_base; 1.146 + seg.attr.bytes = ctxt.tr_arbytes; 1.147 + hvm_set_segment_register(v, x86_seg_tr, &seg); 1.148 + 1.149 + seg.sel = ctxt.ldtr_sel; 1.150 + seg.limit = ctxt.ldtr_limit; 1.151 + seg.base = ctxt.ldtr_base; 1.152 + seg.attr.bytes = ctxt.ldtr_arbytes; 1.153 + hvm_set_segment_register(v, x86_seg_ldtr, &seg); 1.154 + 1.155 memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs)); 1.156 + 1.157 vc->user_regs.eax = ctxt.rax; 1.158 vc->user_regs.ebx = ctxt.rbx; 1.159 vc->user_regs.ecx = ctxt.rcx;
2.1 --- a/xen/arch/x86/hvm/svm/svm.c Wed Feb 13 10:43:13 2008 +0000 2.2 +++ b/xen/arch/x86/hvm/svm/svm.c Wed Feb 13 14:03:58 2008 +0000 2.3 @@ -181,7 +181,7 @@ static void svm_restore_dr(struct vcpu * 2.4 __restore_debug_registers(v); 2.5 } 2.6 2.7 -int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c) 2.8 +static int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c) 2.9 { 2.10 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 2.11 2.12 @@ -190,52 +190,6 @@ int svm_vmcb_save(struct vcpu *v, struct 2.13 c->cr3 = v->arch.hvm_vcpu.guest_cr[3]; 2.14 c->cr4 = v->arch.hvm_vcpu.guest_cr[4]; 2.15 2.16 - c->idtr_limit = vmcb->idtr.limit; 2.17 - c->idtr_base = vmcb->idtr.base; 2.18 - 2.19 - c->gdtr_limit = vmcb->gdtr.limit; 2.20 - c->gdtr_base = vmcb->gdtr.base; 2.21 - 2.22 - c->cs_sel = vmcb->cs.sel; 2.23 - c->cs_limit = vmcb->cs.limit; 2.24 - c->cs_base = vmcb->cs.base; 2.25 - c->cs_arbytes = vmcb->cs.attr.bytes; 2.26 - 2.27 - c->ds_sel = vmcb->ds.sel; 2.28 - c->ds_limit = vmcb->ds.limit; 2.29 - c->ds_base = vmcb->ds.base; 2.30 - c->ds_arbytes = vmcb->ds.attr.bytes; 2.31 - 2.32 - c->es_sel = vmcb->es.sel; 2.33 - c->es_limit = vmcb->es.limit; 2.34 - c->es_base = vmcb->es.base; 2.35 - c->es_arbytes = vmcb->es.attr.bytes; 2.36 - 2.37 - c->ss_sel = vmcb->ss.sel; 2.38 - c->ss_limit = vmcb->ss.limit; 2.39 - c->ss_base = vmcb->ss.base; 2.40 - c->ss_arbytes = vmcb->ss.attr.bytes; 2.41 - 2.42 - c->fs_sel = vmcb->fs.sel; 2.43 - c->fs_limit = vmcb->fs.limit; 2.44 - c->fs_base = vmcb->fs.base; 2.45 - c->fs_arbytes = vmcb->fs.attr.bytes; 2.46 - 2.47 - c->gs_sel = vmcb->gs.sel; 2.48 - c->gs_limit = vmcb->gs.limit; 2.49 - c->gs_base = vmcb->gs.base; 2.50 - c->gs_arbytes = vmcb->gs.attr.bytes; 2.51 - 2.52 - c->tr_sel = vmcb->tr.sel; 2.53 - c->tr_limit = vmcb->tr.limit; 2.54 - c->tr_base = vmcb->tr.base; 2.55 - c->tr_arbytes = vmcb->tr.attr.bytes; 2.56 - 2.57 - c->ldtr_sel = vmcb->ldtr.sel; 2.58 - c->ldtr_limit = vmcb->ldtr.limit; 2.59 - c->ldtr_base = vmcb->ldtr.base; 2.60 - c->ldtr_arbytes = vmcb->ldtr.attr.bytes; 2.61 - 2.62 c->sysenter_cs = vmcb->sysenter_cs; 2.63 c->sysenter_esp = vmcb->sysenter_esp; 2.64 c->sysenter_eip = vmcb->sysenter_eip; 2.65 @@ -253,8 +207,7 @@ int svm_vmcb_save(struct vcpu *v, struct 2.66 return 1; 2.67 } 2.68 2.69 - 2.70 -int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c) 2.71 +static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c) 2.72 { 2.73 unsigned long mfn = 0; 2.74 p2m_type_t p2mt; 2.75 @@ -301,53 +254,6 @@ int svm_vmcb_restore(struct vcpu *v, str 2.76 __func__, c->cr3, c->cr0, c->cr4); 2.77 #endif 2.78 2.79 - vmcb->idtr.limit = c->idtr_limit; 2.80 - vmcb->idtr.base = c->idtr_base; 2.81 - 2.82 - vmcb->gdtr.limit = c->gdtr_limit; 2.83 - vmcb->gdtr.base = c->gdtr_base; 2.84 - 2.85 - vmcb->cs.sel = c->cs_sel; 2.86 - vmcb->cs.limit = c->cs_limit; 2.87 - vmcb->cs.base = c->cs_base; 2.88 - vmcb->cs.attr.bytes = c->cs_arbytes; 2.89 - 2.90 - vmcb->ds.sel = c->ds_sel; 2.91 - vmcb->ds.limit = c->ds_limit; 2.92 - vmcb->ds.base = c->ds_base; 2.93 - vmcb->ds.attr.bytes = c->ds_arbytes; 2.94 - 2.95 - vmcb->es.sel = c->es_sel; 2.96 - vmcb->es.limit = c->es_limit; 2.97 - vmcb->es.base = c->es_base; 2.98 - vmcb->es.attr.bytes = c->es_arbytes; 2.99 - 2.100 - vmcb->ss.sel = c->ss_sel; 2.101 - vmcb->ss.limit = c->ss_limit; 2.102 - vmcb->ss.base = c->ss_base; 2.103 - vmcb->ss.attr.bytes = c->ss_arbytes; 2.104 - vmcb->cpl = vmcb->ss.attr.fields.dpl; 2.105 - 2.106 - vmcb->fs.sel = c->fs_sel; 2.107 - vmcb->fs.limit = c->fs_limit; 2.108 - vmcb->fs.base = c->fs_base; 2.109 - vmcb->fs.attr.bytes = c->fs_arbytes; 2.110 - 2.111 - vmcb->gs.sel = c->gs_sel; 2.112 - vmcb->gs.limit = c->gs_limit; 2.113 - vmcb->gs.base = c->gs_base; 2.114 - vmcb->gs.attr.bytes = c->gs_arbytes; 2.115 - 2.116 - vmcb->tr.sel = c->tr_sel; 2.117 - vmcb->tr.limit = c->tr_limit; 2.118 - vmcb->tr.base = c->tr_base; 2.119 - vmcb->tr.attr.bytes = c->tr_arbytes; 2.120 - 2.121 - vmcb->ldtr.sel = c->ldtr_sel; 2.122 - vmcb->ldtr.limit = c->ldtr_limit; 2.123 - vmcb->ldtr.base = c->ldtr_base; 2.124 - vmcb->ldtr.attr.bytes = c->ldtr_arbytes; 2.125 - 2.126 vmcb->sysenter_cs = c->sysenter_cs; 2.127 vmcb->sysenter_esp = c->sysenter_esp; 2.128 vmcb->sysenter_eip = c->sysenter_eip;
3.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c Wed Feb 13 10:43:13 2008 +0000 3.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed Feb 13 14:03:58 2008 +0000 3.3 @@ -338,6 +338,8 @@ void vmx_vmcs_enter(struct vcpu *v) 3.4 if ( likely(v == current) ) 3.5 return; 3.6 3.7 + BUG_ON(vcpu_runnable(v)); 3.8 + 3.9 fv = &this_cpu(foreign_vmcs); 3.10 3.11 if ( fv->v == v ) 3.12 @@ -368,6 +370,8 @@ void vmx_vmcs_exit(struct vcpu *v) 3.13 if ( likely(v == current) ) 3.14 return; 3.15 3.16 + BUG_ON(vcpu_runnable(v)); 3.17 + 3.18 fv = &this_cpu(foreign_vmcs); 3.19 BUG_ON(fv->v != v); 3.20 BUG_ON(fv->count == 0);
4.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Wed Feb 13 10:43:13 2008 +0000 4.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Feb 13 14:03:58 2008 +0000 4.3 @@ -450,7 +450,7 @@ static void vmx_restore_dr(struct vcpu * 4.4 __restore_debug_registers(v); 4.5 } 4.6 4.7 -void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c) 4.8 +static void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c) 4.9 { 4.10 uint32_t ev; 4.11 4.12 @@ -463,52 +463,6 @@ void vmx_vmcs_save(struct vcpu *v, struc 4.13 4.14 c->msr_efer = v->arch.hvm_vcpu.guest_efer; 4.15 4.16 - c->idtr_limit = __vmread(GUEST_IDTR_LIMIT); 4.17 - c->idtr_base = __vmread(GUEST_IDTR_BASE); 4.18 - 4.19 - c->gdtr_limit = __vmread(GUEST_GDTR_LIMIT); 4.20 - c->gdtr_base = __vmread(GUEST_GDTR_BASE); 4.21 - 4.22 - c->cs_sel = __vmread(GUEST_CS_SELECTOR); 4.23 - c->cs_limit = __vmread(GUEST_CS_LIMIT); 4.24 - c->cs_base = __vmread(GUEST_CS_BASE); 4.25 - c->cs_arbytes = __vmread(GUEST_CS_AR_BYTES); 4.26 - 4.27 - c->ds_sel = __vmread(GUEST_DS_SELECTOR); 4.28 - c->ds_limit = __vmread(GUEST_DS_LIMIT); 4.29 - c->ds_base = __vmread(GUEST_DS_BASE); 4.30 - c->ds_arbytes = __vmread(GUEST_DS_AR_BYTES); 4.31 - 4.32 - c->es_sel = __vmread(GUEST_ES_SELECTOR); 4.33 - c->es_limit = __vmread(GUEST_ES_LIMIT); 4.34 - c->es_base = __vmread(GUEST_ES_BASE); 4.35 - c->es_arbytes = __vmread(GUEST_ES_AR_BYTES); 4.36 - 4.37 - c->ss_sel = __vmread(GUEST_SS_SELECTOR); 4.38 - c->ss_limit = __vmread(GUEST_SS_LIMIT); 4.39 - c->ss_base = __vmread(GUEST_SS_BASE); 4.40 - c->ss_arbytes = __vmread(GUEST_SS_AR_BYTES); 4.41 - 4.42 - c->fs_sel = __vmread(GUEST_FS_SELECTOR); 4.43 - c->fs_limit = __vmread(GUEST_FS_LIMIT); 4.44 - c->fs_base = __vmread(GUEST_FS_BASE); 4.45 - c->fs_arbytes = __vmread(GUEST_FS_AR_BYTES); 4.46 - 4.47 - c->gs_sel = __vmread(GUEST_GS_SELECTOR); 4.48 - c->gs_limit = __vmread(GUEST_GS_LIMIT); 4.49 - c->gs_base = __vmread(GUEST_GS_BASE); 4.50 - c->gs_arbytes = __vmread(GUEST_GS_AR_BYTES); 4.51 - 4.52 - c->tr_sel = __vmread(GUEST_TR_SELECTOR); 4.53 - c->tr_limit = __vmread(GUEST_TR_LIMIT); 4.54 - c->tr_base = __vmread(GUEST_TR_BASE); 4.55 - c->tr_arbytes = __vmread(GUEST_TR_AR_BYTES); 4.56 - 4.57 - c->ldtr_sel = __vmread(GUEST_LDTR_SELECTOR); 4.58 - c->ldtr_limit = __vmread(GUEST_LDTR_LIMIT); 4.59 - c->ldtr_base = __vmread(GUEST_LDTR_BASE); 4.60 - c->ldtr_arbytes = __vmread(GUEST_LDTR_AR_BYTES); 4.61 - 4.62 c->sysenter_cs = __vmread(GUEST_SYSENTER_CS); 4.63 c->sysenter_esp = __vmread(GUEST_SYSENTER_ESP); 4.64 c->sysenter_eip = __vmread(GUEST_SYSENTER_EIP); 4.65 @@ -552,7 +506,7 @@ static int vmx_restore_cr0_cr3( 4.66 return 0; 4.67 } 4.68 4.69 -int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c) 4.70 +static int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c) 4.71 { 4.72 int rc; 4.73 4.74 @@ -585,52 +539,6 @@ int vmx_vmcs_restore(struct vcpu *v, str 4.75 v->arch.hvm_vcpu.guest_efer = c->msr_efer; 4.76 vmx_update_guest_efer(v); 4.77 4.78 - __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit); 4.79 - __vmwrite(GUEST_IDTR_BASE, c->idtr_base); 4.80 - 4.81 - __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit); 4.82 - __vmwrite(GUEST_GDTR_BASE, c->gdtr_base); 4.83 - 4.84 - __vmwrite(GUEST_CS_SELECTOR, c->cs_sel); 4.85 - __vmwrite(GUEST_CS_LIMIT, c->cs_limit); 4.86 - __vmwrite(GUEST_CS_BASE, c->cs_base); 4.87 - __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes); 4.88 - 4.89 - __vmwrite(GUEST_DS_SELECTOR, c->ds_sel); 4.90 - __vmwrite(GUEST_DS_LIMIT, c->ds_limit); 4.91 - __vmwrite(GUEST_DS_BASE, c->ds_base); 4.92 - __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes); 4.93 - 4.94 - __vmwrite(GUEST_ES_SELECTOR, c->es_sel); 4.95 - __vmwrite(GUEST_ES_LIMIT, c->es_limit); 4.96 - __vmwrite(GUEST_ES_BASE, c->es_base); 4.97 - __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes); 4.98 - 4.99 - __vmwrite(GUEST_SS_SELECTOR, c->ss_sel); 4.100 - __vmwrite(GUEST_SS_LIMIT, c->ss_limit); 4.101 - __vmwrite(GUEST_SS_BASE, c->ss_base); 4.102 - __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes); 4.103 - 4.104 - __vmwrite(GUEST_FS_SELECTOR, c->fs_sel); 4.105 - __vmwrite(GUEST_FS_LIMIT, c->fs_limit); 4.106 - __vmwrite(GUEST_FS_BASE, c->fs_base); 4.107 - __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes); 4.108 - 4.109 - __vmwrite(GUEST_GS_SELECTOR, c->gs_sel); 4.110 - __vmwrite(GUEST_GS_LIMIT, c->gs_limit); 4.111 - __vmwrite(GUEST_GS_BASE, c->gs_base); 4.112 - __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes); 4.113 - 4.114 - __vmwrite(GUEST_TR_SELECTOR, c->tr_sel); 4.115 - __vmwrite(GUEST_TR_LIMIT, c->tr_limit); 4.116 - __vmwrite(GUEST_TR_BASE, c->tr_base); 4.117 - __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes); 4.118 - 4.119 - __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel); 4.120 - __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit); 4.121 - __vmwrite(GUEST_LDTR_BASE, c->ldtr_base); 4.122 - __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes); 4.123 - 4.124 __vmwrite(GUEST_SYSENTER_CS, c->sysenter_cs); 4.125 __vmwrite(GUEST_SYSENTER_ESP, c->sysenter_esp); 4.126 __vmwrite(GUEST_SYSENTER_EIP, c->sysenter_eip); 4.127 @@ -821,7 +729,7 @@ static void vmx_get_segment_register(str 4.128 { 4.129 uint32_t attr = 0; 4.130 4.131 - ASSERT(v == current); 4.132 + vmx_vmcs_enter(v); 4.133 4.134 switch ( seg ) 4.135 { 4.136 @@ -885,6 +793,8 @@ static void vmx_get_segment_register(str 4.137 BUG(); 4.138 } 4.139 4.140 + vmx_vmcs_exit(v); 4.141 + 4.142 reg->attr.bytes = (attr & 0xff) | ((attr >> 4) & 0xf00); 4.143 /* Unusable flag is folded into Present flag. */ 4.144 if ( attr & (1u<<16) ) 4.145 @@ -896,8 +806,6 @@ static void vmx_set_segment_register(str 4.146 { 4.147 uint32_t attr; 4.148 4.149 - ASSERT((v == current) || !vcpu_runnable(v)); 4.150 - 4.151 attr = reg->attr.bytes; 4.152 attr = ((attr & 0xf00) << 4) | (attr & 0xff); 4.153 4.154 @@ -1029,7 +937,6 @@ static enum hvm_intblk vmx_interrupt_blo 4.155 4.156 static void vmx_update_host_cr3(struct vcpu *v) 4.157 { 4.158 - ASSERT((v == current) || !vcpu_runnable(v)); 4.159 vmx_vmcs_enter(v); 4.160 __vmwrite(HOST_CR3, v->arch.cr3); 4.161 vmx_vmcs_exit(v); 4.162 @@ -1037,8 +944,6 @@ static void vmx_update_host_cr3(struct v 4.163 4.164 static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr) 4.165 { 4.166 - ASSERT((v == current) || !vcpu_runnable(v)); 4.167 - 4.168 vmx_vmcs_enter(v); 4.169 4.170 switch ( cr ) 4.171 @@ -1089,8 +994,6 @@ static void vmx_update_guest_efer(struct 4.172 #ifdef __x86_64__ 4.173 unsigned long vm_entry_value; 4.174 4.175 - ASSERT((v == current) || !vcpu_runnable(v)); 4.176 - 4.177 vmx_vmcs_enter(v); 4.178 4.179 vm_entry_value = __vmread(VM_ENTRY_CONTROLS);