debuggers.hg
changeset 19863:81edfffb3aff
Scaling guest's TSC when the target machine's frequency is different
with its requirement.
Using trap&emulate for guest's each rdtsc instruction first, maybe it
can be optimized later.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
with its requirement.
Using trap&emulate for guest's each rdtsc instruction first, maybe it
can be optimized later.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Wed Jun 24 11:05:22 2009 +0100 (2009-06-24) |
parents | 50634c215234 |
children | 7750bae3d042 |
files | xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/save.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vpt.c xen/include/asm-x86/hvm/domain.h xen/include/asm-x86/hvm/hvm.h |
line diff
1.1 --- a/xen/arch/x86/hvm/hvm.c Wed Jun 24 10:57:00 2009 +0100 1.2 +++ b/xen/arch/x86/hvm/hvm.c Wed Jun 24 11:05:22 2009 +0100 1.3 @@ -144,26 +144,67 @@ uint8_t hvm_combine_hw_exceptions(uint8_ 1.4 return TRAP_double_fault; 1.5 } 1.6 1.7 +void hvm_enable_rdtsc_exiting(struct domain *d) 1.8 +{ 1.9 + struct vcpu *v; 1.10 + 1.11 + if ( opt_softtsc || !hvm_funcs.enable_rdtsc_exiting ) 1.12 + return; 1.13 + 1.14 + for_each_vcpu ( d, v ) 1.15 + hvm_funcs.enable_rdtsc_exiting(v); 1.16 +} 1.17 + 1.18 +int hvm_gtsc_need_scale(struct domain *d) 1.19 +{ 1.20 + uint32_t gtsc_mhz, htsc_mhz; 1.21 + 1.22 + gtsc_mhz = d->arch.hvm_domain.gtsc_khz / 1000; 1.23 + htsc_mhz = opt_softtsc ? 1000 : ((uint32_t)cpu_khz / 1000); 1.24 + 1.25 + d->arch.hvm_domain.tsc_scaled = (gtsc_mhz && (gtsc_mhz != htsc_mhz)); 1.26 + return d->arch.hvm_domain.tsc_scaled; 1.27 +} 1.28 + 1.29 +static u64 hvm_h2g_scale_tsc(struct vcpu *v, u64 host_tsc) 1.30 +{ 1.31 + uint32_t gtsc_khz, htsc_khz; 1.32 + 1.33 + if ( !v->domain->arch.hvm_domain.tsc_scaled ) 1.34 + return host_tsc; 1.35 + 1.36 + htsc_khz = opt_softtsc ? 1000000 : cpu_khz; 1.37 + gtsc_khz = v->domain->arch.hvm_domain.gtsc_khz; 1.38 + return muldiv64(host_tsc, gtsc_khz, htsc_khz); 1.39 +} 1.40 + 1.41 void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc) 1.42 { 1.43 - u64 host_tsc; 1.44 - 1.45 - rdtscll(host_tsc); 1.46 - 1.47 - v->arch.hvm_vcpu.cache_tsc_offset = guest_tsc - host_tsc; 1.48 + uint64_t host_tsc, scaled_htsc; 1.49 + 1.50 + if ( opt_softtsc ) 1.51 + host_tsc = hvm_get_guest_time(v); 1.52 + else 1.53 + rdtscll(host_tsc); 1.54 + 1.55 + scaled_htsc = hvm_h2g_scale_tsc(v, host_tsc); 1.56 + 1.57 + v->arch.hvm_vcpu.cache_tsc_offset = guest_tsc - scaled_htsc; 1.58 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset); 1.59 } 1.60 1.61 u64 hvm_get_guest_tsc(struct vcpu *v) 1.62 { 1.63 - u64 host_tsc; 1.64 + uint64_t host_tsc, scaled_htsc; 1.65 1.66 if ( opt_softtsc ) 1.67 host_tsc = hvm_get_guest_time(v); 1.68 else 1.69 rdtscll(host_tsc); 1.70 1.71 - return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset; 1.72 + scaled_htsc = hvm_h2g_scale_tsc(v, host_tsc); 1.73 + 1.74 + return scaled_htsc + v->arch.hvm_vcpu.cache_tsc_offset; 1.75 } 1.76 1.77 void hvm_migrate_timers(struct vcpu *v)
2.1 --- a/xen/arch/x86/hvm/save.c Wed Jun 24 10:57:00 2009 +0100 2.2 +++ b/xen/arch/x86/hvm/save.c Wed Jun 24 11:05:22 2009 +0100 2.3 @@ -63,6 +63,15 @@ int arch_hvm_load(struct domain *d, stru 2.4 /* Restore guest's preferred TSC frequency. */ 2.5 d->arch.hvm_domain.gtsc_khz = hdr->gtsc_khz; 2.6 2.7 + if ( hdr->gtsc_khz && hvm_gtsc_need_scale(d) ) 2.8 + { 2.9 + hvm_enable_rdtsc_exiting(d); 2.10 + gdprintk(XENLOG_WARNING, "Loading VM(id:%d) expects freq: %dmHz, " 2.11 + "but host's freq :%"PRIu64"mHz, trap and emulate rdtsc!!!\n", 2.12 + d->domain_id, hdr->gtsc_khz / 1000, opt_softtsc ? 1000 : 2.13 + cpu_khz / 1000); 2.14 + } 2.15 + 2.16 /* VGA state is not saved/restored, so we nobble the cache. */ 2.17 d->arch.hvm_domain.stdvga.cache = 0; 2.18
3.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Wed Jun 24 10:57:00 2009 +0100 3.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Jun 24 11:05:22 2009 +0100 3.3 @@ -947,6 +947,14 @@ static void vmx_set_tsc_offset(struct vc 3.4 vmx_vmcs_exit(v); 3.5 } 3.6 3.7 +static void vmx_enable_rdtsc_exiting(struct vcpu *v) 3.8 +{ 3.9 + vmx_vmcs_enter(v); 3.10 + v->arch.hvm_vmx.exec_control |= CPU_BASED_RDTSC_EXITING; 3.11 + __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control); 3.12 + vmx_vmcs_exit(v); 3.13 + } 3.14 + 3.15 void do_nmi(struct cpu_user_regs *); 3.16 3.17 static void vmx_init_hypercall_page(struct domain *d, void *hypercall_page) 3.18 @@ -1395,7 +1403,8 @@ static struct hvm_function_table vmx_fun 3.19 .msr_write_intercept = vmx_msr_write_intercept, 3.20 .invlpg_intercept = vmx_invlpg_intercept, 3.21 .set_uc_mode = vmx_set_uc_mode, 3.22 - .set_info_guest = vmx_set_info_guest 3.23 + .set_info_guest = vmx_set_info_guest, 3.24 + .enable_rdtsc_exiting = vmx_enable_rdtsc_exiting 3.25 }; 3.26 3.27 static unsigned long *vpid_bitmap;
4.1 --- a/xen/arch/x86/hvm/vpt.c Wed Jun 24 10:57:00 2009 +0100 4.2 +++ b/xen/arch/x86/hvm/vpt.c Wed Jun 24 11:05:22 2009 +0100 4.3 @@ -33,7 +33,8 @@ void hvm_init_guest_time(struct domain * 4.4 pl->stime_offset = -(u64)get_s_time(); 4.5 pl->last_guest_time = 0; 4.6 4.7 - d->arch.hvm_domain.gtsc_khz = cpu_khz; 4.8 + d->arch.hvm_domain.gtsc_khz = opt_softtsc ? 1000000 : cpu_khz; 4.9 + d->arch.hvm_domain.tsc_scaled = 0; 4.10 } 4.11 4.12 u64 hvm_get_guest_time(struct vcpu *v)
5.1 --- a/xen/include/asm-x86/hvm/domain.h Wed Jun 24 10:57:00 2009 +0100 5.2 +++ b/xen/include/asm-x86/hvm/domain.h Wed Jun 24 11:05:22 2009 +0100 5.3 @@ -45,7 +45,7 @@ struct hvm_domain { 5.4 struct hvm_ioreq_page buf_ioreq; 5.5 5.6 uint32_t gtsc_khz; /* kHz */ 5.7 - uint32_t pad0; 5.8 + bool_t tsc_scaled; 5.9 struct pl_time pl_time; 5.10 5.11 struct hvm_io_handler io_handler;
6.1 --- a/xen/include/asm-x86/hvm/hvm.h Wed Jun 24 10:57:00 2009 +0100 6.2 +++ b/xen/include/asm-x86/hvm/hvm.h Wed Jun 24 11:05:22 2009 +0100 6.3 @@ -129,6 +129,7 @@ struct hvm_function_table { 6.4 void (*invlpg_intercept)(unsigned long vaddr); 6.5 void (*set_uc_mode)(struct vcpu *v); 6.6 void (*set_info_guest)(struct vcpu *v); 6.7 + void (*enable_rdtsc_exiting)(struct vcpu *v); 6.8 }; 6.9 6.10 extern struct hvm_function_table hvm_funcs; 6.11 @@ -282,6 +283,9 @@ int hvm_event_needs_reinjection(uint8_t 6.12 6.13 uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2); 6.14 6.15 +void hvm_enable_rdtsc_exiting(struct domain *d); 6.16 +int hvm_gtsc_need_scale(struct domain *d); 6.17 + 6.18 static inline int hvm_cpu_up(void) 6.19 { 6.20 if ( hvm_funcs.cpu_up )