debuggers.hg
changeset 18093:bd6d194199e5
x86: Add clocksource=tsc option.
This option should only be used on machines where TSC is known to be
synchronized across all processors. A future TODO is to dynamically
determine if this is the case.
Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
This option should only be used on machines where TSC is known to be
synchronized across all processors. A future TODO is to dynamically
determine if this is the case.
Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Tue Jul 15 14:04:02 2008 +0100 (2008-07-15) |
parents | 1c22d42043bb |
children | 4b882c41c9b9 |
files | xen/arch/x86/time.c |
line diff
1.1 --- a/xen/arch/x86/time.c Tue Jul 15 13:36:22 2008 +0100 1.2 +++ b/xen/arch/x86/time.c Tue Jul 15 14:04:02 2008 +0100 1.3 @@ -481,6 +481,30 @@ static int init_pmtimer(struct platform_ 1.4 } 1.5 1.6 /************************************************************ 1.7 + * PLATFORM TIMER 5: TSC 1.8 + */ 1.9 + 1.10 +#define platform_timer_is_tsc() (!strcmp(plt_src.name, "TSC")) 1.11 +static u64 tsc_freq; 1.12 + 1.13 +static u64 read_tsc_count(void) 1.14 +{ 1.15 + u64 tsc; 1.16 + rdtscll(tsc); 1.17 + return tsc; 1.18 +} 1.19 + 1.20 +static int init_tsctimer(struct platform_timesource *pts) 1.21 +{ 1.22 + /* TODO: evaluate stability of TSC here, return 0 if not stable. */ 1.23 + pts->name = "TSC"; 1.24 + pts->frequency = tsc_freq; 1.25 + pts->read_counter = read_tsc_count; 1.26 + pts->counter_bits = 64; 1.27 + return 1; 1.28 +} 1.29 + 1.30 +/************************************************************ 1.31 * GENERIC PLATFORM TIMER INFRASTRUCTURE 1.32 */ 1.33 1.34 @@ -565,6 +589,8 @@ static void init_platform_timer(void) 1.35 rc = init_cyclone(pts); 1.36 else if ( !strcmp(opt_clocksource, "acpi") ) 1.37 rc = init_pmtimer(pts); 1.38 + else if ( !strcmp(opt_clocksource, "tsc") ) 1.39 + rc = init_tsctimer(pts); 1.40 1.41 if ( rc <= 0 ) 1.42 printk("WARNING: %s clocksource '%s'.\n", 1.43 @@ -780,6 +806,10 @@ int cpu_frequency_change(u64 freq) 1.44 struct cpu_time *t = &this_cpu(cpu_time); 1.45 u64 curr_tsc; 1.46 1.47 + /* Nothing to do if TSC is platform timer. Assume it is constant-rate. */ 1.48 + if ( platform_timer_is_tsc() ) 1.49 + return 0; 1.50 + 1.51 /* Sanity check: CPU frequency allegedly dropping below 1MHz? */ 1.52 if ( freq < 1000000u ) 1.53 { 1.54 @@ -978,9 +1008,12 @@ void init_percpu_time(void) 1.55 unsigned long flags; 1.56 s_time_t now; 1.57 1.58 + if ( platform_timer_is_tsc() ) 1.59 + return; 1.60 + 1.61 local_irq_save(flags); 1.62 rdtscll(t->local_tsc_stamp); 1.63 - now = !plt_src.read_counter ? 0 : read_platform_stime(); 1.64 + now = read_platform_stime(); 1.65 local_irq_restore(flags); 1.66 1.67 t->stime_master_stamp = now; 1.68 @@ -998,11 +1031,11 @@ int __init init_xen_time(void) 1.69 1.70 local_irq_disable(); 1.71 1.72 - init_percpu_time(); 1.73 - 1.74 stime_platform_stamp = 0; 1.75 init_platform_timer(); 1.76 1.77 + init_percpu_time(); 1.78 + 1.79 /* check if TSC is invariant during deep C state 1.80 this is a new feature introduced by Nehalem*/ 1.81 if ( cpuid_edx(0x80000007) & (1U<<8) ) 1.82 @@ -1019,6 +1052,7 @@ void __init early_time_init(void) 1.83 { 1.84 u64 tmp = init_pit_and_calibrate_tsc(); 1.85 1.86 + tsc_freq = tmp; 1.87 set_time_scale(&this_cpu(cpu_time).tsc_scale, tmp); 1.88 1.89 do_div(tmp, 1000);