debuggers.hg

view xen/arch/x86/hvm/hpet.c @ 16727:66db23ecd562

hvm: hpet: Fix per-timer enable/disable.

The enable/disable per timer interrupt bit is wrongly used as per
timer enable/disable. According to spec, comparator value should
constantly increasing when HPET is globally enabled, no matter
whether the timer interrupt is enabled or not.

From: Haitao Shan <haitao.shan@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jan 08 13:57:45 2008 +0000 (2008-01-08)
parents c00f31f27de6
children 9ff64d045e61
line source
1 /*
2 * hpet.c: HPET emulation for HVM guests.
3 * Copyright (c) 2006, Intel Corporation.
4 * Copyright (c) 2006, Keir Fraser <keir@xensource.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <asm/hvm/vpt.h>
21 #include <asm/hvm/io.h>
22 #include <asm/hvm/support.h>
23 #include <asm/current.h>
24 #include <xen/sched.h>
25 #include <xen/event.h>
27 #define HPET_BASE_ADDRESS 0xfed00000ULL
28 #define HPET_MMAP_SIZE 1024
29 #define S_TO_NS 1000000000ULL /* 1s = 10^9 ns */
30 #define S_TO_FS 1000000000000000ULL /* 1s = 10^15 fs */
32 /* Frequency_of_TSC / frequency_of_HPET = 32 */
33 #define TSC_PER_HPET_TICK 32
34 #define guest_time_hpet(v) (hvm_get_guest_time(v) / TSC_PER_HPET_TICK)
36 #define HPET_ID 0x000
37 #define HPET_PERIOD 0x004
38 #define HPET_CFG 0x010
39 #define HPET_STATUS 0x020
40 #define HPET_COUNTER 0x0f0
41 #define HPET_T0_CFG 0x100
42 #define HPET_T0_CMP 0x108
43 #define HPET_T0_ROUTE 0x110
44 #define HPET_T1_CFG 0x120
45 #define HPET_T1_CMP 0x128
46 #define HPET_T1_ROUTE 0x130
47 #define HPET_T2_CFG 0x140
48 #define HPET_T2_CMP 0x148
49 #define HPET_T2_ROUTE 0x150
50 #define HPET_T3_CFG 0x160
52 #define HPET_CFG_ENABLE 0x001
53 #define HPET_CFG_LEGACY 0x002
55 #define HPET_TN_INT_TYPE_LEVEL 0x002
56 #define HPET_TN_ENABLE 0x004
57 #define HPET_TN_PERIODIC 0x008
58 #define HPET_TN_PERIODIC_CAP 0x010
59 #define HPET_TN_SIZE_CAP 0x020
60 #define HPET_TN_SETVAL 0x040
61 #define HPET_TN_32BIT 0x100
62 #define HPET_TN_INT_ROUTE_MASK 0x3e00
63 #define HPET_TN_INT_ROUTE_SHIFT 9
64 #define HPET_TN_INT_ROUTE_CAP_SHIFT 32
65 #define HPET_TN_CFG_BITS_READONLY_OR_RESERVED 0xffff80b1U
67 /* can be routed to IOAPIC.redirect_table[23..20] */
68 #define HPET_TN_INT_ROUTE_CAP (0x00f00000ULL \
69 << HPET_TN_INT_ROUTE_CAP_SHIFT)
71 #define HPET_TN_INT_ROUTE_CAP_MASK (0xffffffffULL \
72 << HPET_TN_INT_ROUTE_CAP_SHIFT)
74 #define hpet_tick_to_ns(h, tick) ((s_time_t)(tick)* \
75 (S_TO_NS*TSC_PER_HPET_TICK)/h->tsc_freq)
77 #define timer_config(h, n) (h->hpet.timers[n].config)
78 #define timer_is_periodic(h, n) (timer_config(h, n) & HPET_TN_PERIODIC)
79 #define timer_is_32bit(h, n) (timer_config(h, n) & HPET_TN_32BIT)
80 #define hpet_enabled(h) (h->hpet.config & HPET_CFG_ENABLE)
81 #define timer_level(h, n) (timer_config(h, n) & HPET_TN_INT_TYPE_LEVEL)
83 #define timer_int_route(h, n) \
84 ((timer_config(h, n) & HPET_TN_INT_ROUTE_MASK) >> HPET_TN_INT_ROUTE_SHIFT)
86 #define timer_int_route_cap(h, n) \
87 ((timer_config(h, n) & HPET_TN_INT_ROUTE_CAP_MASK) \
88 >> HPET_TN_INT_ROUTE_CAP_SHIFT)
90 #define hpet_time_after(a, b) ((int32_t)(b) - (int32_t)(a) < 0)
91 #define hpet_time_after64(a, b) ((int64_t)(b) - (int64_t)(a) < 0)
93 static inline uint64_t hpet_read64(HPETState *h, unsigned long addr)
94 {
95 addr &= ~7;
97 switch ( addr )
98 {
99 case HPET_ID:
100 return h->hpet.capability;
101 case HPET_CFG:
102 return h->hpet.config;
103 case HPET_STATUS:
104 return h->hpet.isr;
105 case HPET_COUNTER:
106 return h->hpet.mc64;
107 case HPET_T0_CFG:
108 case HPET_T1_CFG:
109 case HPET_T2_CFG:
110 return h->hpet.timers[(addr - HPET_T0_CFG) >> 5].config;
111 case HPET_T0_CMP:
112 case HPET_T1_CMP:
113 case HPET_T2_CMP:
114 return h->hpet.timers[(addr - HPET_T0_CMP) >> 5].cmp;
115 case HPET_T0_ROUTE:
116 case HPET_T1_ROUTE:
117 case HPET_T2_ROUTE:
118 return h->hpet.timers[(addr - HPET_T0_ROUTE) >> 5].fsb;
119 }
121 return 0;
122 }
124 static inline int hpet_check_access_length(
125 unsigned long addr, unsigned long len)
126 {
127 if ( (addr & (len - 1)) || (len > 8) )
128 {
129 /*
130 * According to ICH9 specification, unaligned accesses may result
131 * in unexpected behaviour or master abort, but should not crash/hang.
132 * Hence we read all-ones, drop writes, and log a warning.
133 */
134 gdprintk(XENLOG_WARNING, "HPET: access across register boundary: "
135 "%lx %lx\n", addr, len);
136 return -EINVAL;
137 }
139 return 0;
140 }
142 static inline uint64_t hpet_read_maincounter(HPETState *h)
143 {
144 ASSERT(spin_is_locked(&h->lock));
146 if ( hpet_enabled(h) )
147 return guest_time_hpet(h->vcpu) + h->mc_offset;
148 else
149 return h->hpet.mc64;
150 }
152 static unsigned long hpet_read(
153 struct vcpu *v, unsigned long addr, unsigned long length)
154 {
155 HPETState *h = &v->domain->arch.hvm_domain.pl_time.vhpet;
156 unsigned long result;
157 uint64_t val;
159 addr &= HPET_MMAP_SIZE-1;
161 if ( hpet_check_access_length(addr, length) != 0 )
162 return ~0UL;
164 spin_lock(&h->lock);
166 val = hpet_read64(h, addr);
167 if ( (addr & ~7) == HPET_COUNTER )
168 val = hpet_read_maincounter(h);
170 result = val;
171 if ( length != 8 )
172 result = (val >> ((addr & 7) * 8)) & ((1ULL << (length * 8)) - 1);
174 spin_unlock(&h->lock);
176 return result;
177 }
179 static void hpet_stop_timer(HPETState *h, unsigned int tn)
180 {
181 ASSERT(tn < HPET_TIMER_NUM);
182 ASSERT(spin_is_locked(&h->lock));
183 stop_timer(&h->timers[tn]);
184 }
186 /* the number of HPET tick that stands for
187 * 1/(2^10) second, namely, 0.9765625 milliseconds */
188 #define HPET_TINY_TIME_SPAN ((h->tsc_freq >> 10) / TSC_PER_HPET_TICK)
190 static void hpet_set_timer(HPETState *h, unsigned int tn)
191 {
192 uint64_t tn_cmp, cur_tick, diff;
194 ASSERT(tn < HPET_TIMER_NUM);
195 ASSERT(spin_is_locked(&h->lock));
197 if ( (tn == 0) && (h->hpet.config & HPET_CFG_LEGACY) )
198 {
199 /* HPET specification requires PIT shouldn't generate
200 * interrupts if LegacyReplacementRoute is set for timer0 */
201 PITState *pit = &h->vcpu->domain->arch.hvm_domain.pl_time.vpit;
202 pit_stop_channel0_irq(pit);
203 }
205 tn_cmp = h->hpet.timers[tn].cmp;
206 cur_tick = hpet_read_maincounter(h);
207 if ( timer_is_32bit(h, tn) )
208 {
209 tn_cmp = (uint32_t)tn_cmp;
210 cur_tick = (uint32_t)cur_tick;
211 }
213 diff = tn_cmp - cur_tick;
215 /*
216 * Detect time values set in the past. This is hard to do for 32-bit
217 * comparators as the timer does not have to be set that far in the future
218 * for the counter difference to wrap a 32-bit signed integer. We fudge
219 * by looking for a 'small' time value in the past.
220 */
221 if ( (int64_t)diff < 0 )
222 diff = (timer_is_32bit(h, tn) && (-diff > HPET_TINY_TIME_SPAN))
223 ? (uint32_t)diff : 0;
225 set_timer(&h->timers[tn], NOW() + hpet_tick_to_ns(h, diff));
226 }
228 static inline uint64_t hpet_fixup_reg(
229 uint64_t new, uint64_t old, uint64_t mask)
230 {
231 new &= mask;
232 new |= old & ~mask;
233 return new;
234 }
236 static void hpet_write(
237 struct vcpu *v, unsigned long addr,
238 unsigned long length, unsigned long val)
239 {
240 HPETState *h = &v->domain->arch.hvm_domain.pl_time.vhpet;
241 uint64_t old_val, new_val;
242 int tn, i;
244 addr &= HPET_MMAP_SIZE-1;
246 if ( hpet_check_access_length(addr, length) != 0 )
247 return;
249 spin_lock(&h->lock);
251 old_val = hpet_read64(h, addr);
252 if ( (addr & ~7) == HPET_COUNTER )
253 old_val = hpet_read_maincounter(h);
255 new_val = val;
256 if ( length != 8 )
257 new_val = hpet_fixup_reg(
258 new_val << (addr & 7) * 8, old_val,
259 ((1ULL << (length*8)) - 1) << ((addr & 7) * 8));
261 switch ( addr & ~7 )
262 {
263 case HPET_CFG:
264 h->hpet.config = hpet_fixup_reg(new_val, old_val, 0x3);
266 if ( !(old_val & HPET_CFG_ENABLE) && (new_val & HPET_CFG_ENABLE) )
267 {
268 /* Enable main counter and interrupt generation. */
269 h->mc_offset = h->hpet.mc64 - guest_time_hpet(h->vcpu);
270 for ( i = 0; i < HPET_TIMER_NUM; i++ )
271 hpet_set_timer(h, i);
272 }
273 else if ( (old_val & HPET_CFG_ENABLE) && !(new_val & HPET_CFG_ENABLE) )
274 {
275 /* Halt main counter and disable interrupt generation. */
276 h->hpet.mc64 = h->mc_offset + guest_time_hpet(h->vcpu);
277 for ( i = 0; i < HPET_TIMER_NUM; i++ )
278 hpet_stop_timer(h, i);
279 }
280 break;
282 case HPET_COUNTER:
283 if ( hpet_enabled(h) )
284 gdprintk(XENLOG_WARNING,
285 "HPET: writing main counter but it's not halted!\n");
286 h->hpet.mc64 = new_val;
287 break;
289 case HPET_T0_CFG:
290 case HPET_T1_CFG:
291 case HPET_T2_CFG:
292 tn = (addr - HPET_T0_CFG) >> 5;
294 h->hpet.timers[tn].config = hpet_fixup_reg(new_val, old_val, 0x3f4e);
296 if ( timer_level(h, tn) )
297 {
298 gdprintk(XENLOG_ERR,
299 "HPET: level triggered interrupt not supported now\n");
300 domain_crash(current->domain);
301 break;
302 }
304 if ( new_val & HPET_TN_32BIT )
305 h->hpet.timers[tn].cmp = (uint32_t)h->hpet.timers[tn].cmp;
307 break;
309 case HPET_T0_CMP:
310 case HPET_T1_CMP:
311 case HPET_T2_CMP:
312 tn = (addr - HPET_T0_CMP) >> 5;
313 if ( timer_is_32bit(h, tn) )
314 new_val = (uint32_t)new_val;
315 if ( !timer_is_periodic(h, tn) ||
316 (h->hpet.timers[tn].config & HPET_TN_SETVAL) )
317 h->hpet.timers[tn].cmp = new_val;
318 else
319 h->hpet.period[tn] = new_val;
320 h->hpet.timers[tn].config &= ~HPET_TN_SETVAL;
321 if ( hpet_enabled(h) )
322 hpet_set_timer(h, tn);
323 break;
325 case HPET_T0_ROUTE:
326 case HPET_T1_ROUTE:
327 case HPET_T2_ROUTE:
328 tn = (addr - HPET_T0_ROUTE) >> 5;
329 h->hpet.timers[tn].fsb = new_val;
330 break;
332 default:
333 /* Ignore writes to unsupported and reserved registers. */
334 break;
335 }
337 spin_unlock(&h->lock);
338 }
340 static int hpet_range(struct vcpu *v, unsigned long addr)
341 {
342 return ((addr >= HPET_BASE_ADDRESS) &&
343 (addr < (HPET_BASE_ADDRESS + HPET_MMAP_SIZE)));
344 }
346 struct hvm_mmio_handler hpet_mmio_handler = {
347 .check_handler = hpet_range,
348 .read_handler = hpet_read,
349 .write_handler = hpet_write
350 };
352 static void hpet_route_interrupt(HPETState *h, unsigned int tn)
353 {
354 unsigned int tn_int_route = timer_int_route(h, tn);
355 struct domain *d = h->vcpu->domain;
357 ASSERT(spin_is_locked(&h->lock));
359 if ( (tn <= 1) && (h->hpet.config & HPET_CFG_LEGACY) )
360 {
361 /* if LegacyReplacementRoute bit is set, HPET specification requires
362 timer0 be routed to IRQ0 in NON-APIC or IRQ2 in the I/O APIC,
363 timer1 be routed to IRQ8 in NON-APIC or IRQ8 in the I/O APIC. */
364 int isa_irq = (tn == 0) ? 0 : 8;
365 hvm_isa_irq_deassert(d, isa_irq);
366 hvm_isa_irq_assert(d, isa_irq);
367 return;
368 }
370 if ( !(timer_int_route_cap(h, tn) & (1U << tn_int_route)) )
371 {
372 gdprintk(XENLOG_ERR,
373 "HPET: timer%u: invalid interrupt route config\n", tn);
374 domain_crash(d);
375 return;
376 }
378 /* We only support edge-triggered interrupt now */
379 spin_lock(&d->arch.hvm_domain.irq_lock);
380 vioapic_irq_positive_edge(d, tn_int_route);
381 spin_unlock(&d->arch.hvm_domain.irq_lock);
382 }
384 static void hpet_timer_fn(void *opaque)
385 {
386 struct HPET_timer_fn_info *htfi = opaque;
387 HPETState *h = htfi->hs;
388 unsigned int tn = htfi->tn;
390 spin_lock(&h->lock);
392 if ( !hpet_enabled(h) )
393 {
394 spin_unlock(&h->lock);
395 return;
396 }
398 if ( timer_config(h, tn) & HPET_TN_ENABLE )
399 hpet_route_interrupt(h, tn);
401 if ( timer_is_periodic(h, tn) && (h->hpet.period[tn] != 0) )
402 {
403 uint64_t mc = hpet_read_maincounter(h);
404 if ( timer_is_32bit(h, tn) )
405 {
406 while ( hpet_time_after(mc, h->hpet.timers[tn].cmp) )
407 h->hpet.timers[tn].cmp = (uint32_t)(
408 h->hpet.timers[tn].cmp + h->hpet.period[tn]);
409 }
410 else
411 {
412 while ( hpet_time_after64(mc, h->hpet.timers[tn].cmp) )
413 h->hpet.timers[tn].cmp += h->hpet.period[tn];
414 }
415 set_timer(&h->timers[tn],
416 NOW() + hpet_tick_to_ns(h, h->hpet.period[tn]));
417 }
419 spin_unlock(&h->lock);
420 }
422 void hpet_migrate_timers(struct vcpu *v)
423 {
424 struct HPETState *h = &v->domain->arch.hvm_domain.pl_time.vhpet;
425 int i;
427 if ( v != h->vcpu )
428 return;
430 for ( i = 0; i < HPET_TIMER_NUM; i++ )
431 migrate_timer(&h->timers[i], v->processor);
432 }
434 static int hpet_save(struct domain *d, hvm_domain_context_t *h)
435 {
436 HPETState *hp = &d->arch.hvm_domain.pl_time.vhpet;
437 int rc;
439 spin_lock(&hp->lock);
441 /* Write the proper value into the main counter */
442 hp->hpet.mc64 = hp->mc_offset + guest_time_hpet(hp->vcpu);
444 /* Save the HPET registers */
445 rc = _hvm_init_entry(h, HVM_SAVE_CODE(HPET), 0, HVM_SAVE_LENGTH(HPET));
446 if ( rc == 0 )
447 {
448 struct hvm_hw_hpet *rec = (struct hvm_hw_hpet *)&h->data[h->cur];
449 h->cur += HVM_SAVE_LENGTH(HPET);
450 memset(rec, 0, HVM_SAVE_LENGTH(HPET));
451 #define C(x) rec->x = hp->hpet.x
452 C(capability);
453 C(config);
454 C(isr);
455 C(mc64);
456 C(timers[0].config);
457 C(timers[0].cmp);
458 C(timers[0].fsb);
459 C(timers[1].config);
460 C(timers[1].cmp);
461 C(timers[1].fsb);
462 C(timers[2].config);
463 C(timers[2].cmp);
464 C(timers[2].fsb);
465 C(period[0]);
466 C(period[1]);
467 C(period[2]);
468 #undef C
469 }
471 spin_unlock(&hp->lock);
473 return rc;
474 }
476 static int hpet_load(struct domain *d, hvm_domain_context_t *h)
477 {
478 HPETState *hp = &d->arch.hvm_domain.pl_time.vhpet;
479 struct hvm_hw_hpet *rec;
480 int i;
482 spin_lock(&hp->lock);
484 /* Reload the HPET registers */
485 if ( _hvm_check_entry(h, HVM_SAVE_CODE(HPET), HVM_SAVE_LENGTH(HPET)) )
486 {
487 spin_unlock(&hp->lock);
488 return -EINVAL;
489 }
491 rec = (struct hvm_hw_hpet *)&h->data[h->cur];
492 h->cur += HVM_SAVE_LENGTH(HPET);
494 #define C(x) hp->hpet.x = rec->x
495 C(capability);
496 C(config);
497 C(isr);
498 C(mc64);
499 C(timers[0].config);
500 C(timers[0].cmp);
501 C(timers[0].fsb);
502 C(timers[1].config);
503 C(timers[1].cmp);
504 C(timers[1].fsb);
505 C(timers[2].config);
506 C(timers[2].cmp);
507 C(timers[2].fsb);
508 C(period[0]);
509 C(period[1]);
510 C(period[2]);
511 #undef C
513 /* Recalculate the offset between the main counter and guest time */
514 hp->mc_offset = hp->hpet.mc64 - guest_time_hpet(hp->vcpu);
516 /* Restart the timers */
517 for ( i = 0; i < HPET_TIMER_NUM; i++ )
518 if ( hpet_enabled(hp) )
519 hpet_set_timer(hp, i);
521 spin_unlock(&hp->lock);
523 return 0;
524 }
526 HVM_REGISTER_SAVE_RESTORE(HPET, hpet_save, hpet_load, 1, HVMSR_PER_DOM);
528 void hpet_init(struct vcpu *v)
529 {
530 HPETState *h = &v->domain->arch.hvm_domain.pl_time.vhpet;
531 int i;
533 memset(h, 0, sizeof(HPETState));
535 spin_lock_init(&h->lock);
537 h->vcpu = v;
538 h->tsc_freq = ticks_per_sec(v);
540 /* 64-bit main counter; 3 timers supported; LegacyReplacementRoute. */
541 h->hpet.capability = 0x8086A201ULL;
543 /* This is the number of femptoseconds per HPET tick. */
544 /* Here we define HPET's frequency to be 1/32 of the TSC's */
545 h->hpet.capability |= ((S_TO_FS*TSC_PER_HPET_TICK/h->tsc_freq) << 32);
547 for ( i = 0; i < HPET_TIMER_NUM; i++ )
548 {
549 h->hpet.timers[i].config =
550 HPET_TN_INT_ROUTE_CAP | HPET_TN_SIZE_CAP | HPET_TN_PERIODIC_CAP;
551 h->hpet.timers[i].cmp = ~0ULL;
552 h->timer_fn_info[i].hs = h;
553 h->timer_fn_info[i].tn = i;
554 init_timer(&h->timers[i], hpet_timer_fn, &h->timer_fn_info[i],
555 v->processor);
556 }
557 }
559 void hpet_deinit(struct domain *d)
560 {
561 int i;
562 HPETState *h = &d->arch.hvm_domain.pl_time.vhpet;
564 for ( i = 0; i < HPET_TIMER_NUM; i++ )
565 kill_timer(&h->timers[i]);
566 }