debuggers.hg

view xen/arch/x86/hpet.c @ 21031:82661c9ad896

ACPI S3: fix S3 resume fail on system w/ msi capable hpet

Don't re-allocate memory for irq_channel which will cause a BUG_ON in
hpet_msi_write, and make sure hpet_setup_msi_irq() executed during S3
resuming.

Signed-off-by: Wei Gang <gang.wei@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Feb 26 14:05:32 2010 +0000 (2010-02-26)
parents 3b475d9ed6b5
children cd6c6c685015
line source
1 /******************************************************************************
2 * arch/x86/hpet.c
3 *
4 * HPET management.
5 */
7 #include <xen/config.h>
8 #include <xen/errno.h>
9 #include <xen/time.h>
10 #include <xen/timer.h>
11 #include <xen/smp.h>
12 #include <xen/softirq.h>
13 #include <xen/irq.h>
14 #include <asm/fixmap.h>
15 #include <asm/div64.h>
16 #include <asm/hpet.h>
17 #include <asm/msi.h>
18 #include <mach_apic.h>
19 #include <xen/cpuidle.h>
21 #define MAX_DELTA_NS MILLISECS(10*1000)
22 #define MIN_DELTA_NS MICROSECS(20)
24 #define MAX_HPET_NUM 32
26 #define HPET_EVT_USED_BIT 0
27 #define HPET_EVT_USED (1 << HPET_EVT_USED_BIT)
28 #define HPET_EVT_DISABLE_BIT 1
29 #define HPET_EVT_DISABLE (1 << HPET_EVT_DISABLE_BIT)
31 struct hpet_event_channel
32 {
33 unsigned long mult;
34 int shift;
35 s_time_t next_event;
36 cpumask_t cpumask;
37 spinlock_t lock;
38 void (*event_handler)(struct hpet_event_channel *);
40 unsigned int idx; /* physical channel idx */
41 int cpu; /* msi target */
42 int irq; /* msi irq */
43 unsigned int flags; /* HPET_EVT_x */
44 } __cacheline_aligned;
45 static struct hpet_event_channel legacy_hpet_event;
46 static struct hpet_event_channel hpet_events[MAX_HPET_NUM] =
47 { [0 ... MAX_HPET_NUM-1].irq = -1 };
48 static unsigned int num_hpets_used; /* msi hpet channels used for broadcast */
50 DEFINE_PER_CPU(struct hpet_event_channel *, cpu_bc_channel);
52 static int *irq_channel;
54 #define irq_to_channel(irq) irq_channel[irq]
56 unsigned long hpet_address;
58 /*
59 * force_hpet_broadcast: by default legacy hpet broadcast will be stopped
60 * if RTC interrupts are enabled. Enable this option if want to always enable
61 * legacy hpet broadcast for deep C state
62 */
63 int force_hpet_broadcast;
64 boolean_param("hpetbroadcast", force_hpet_broadcast);
66 /*
67 * Calculate a multiplication factor for scaled math, which is used to convert
68 * nanoseconds based values to clock ticks:
69 *
70 * clock_ticks = (nanoseconds * factor) >> shift.
71 *
72 * div_sc is the rearranged equation to calculate a factor from a given clock
73 * ticks / nanoseconds ratio:
74 *
75 * factor = (clock_ticks << shift) / nanoseconds
76 */
77 static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec,
78 int shift)
79 {
80 uint64_t tmp = ((uint64_t)ticks) << shift;
82 do_div(tmp, nsec);
83 return (unsigned long) tmp;
84 }
86 /*
87 * Convert nanoseconds based values to clock ticks:
88 *
89 * clock_ticks = (nanoseconds * factor) >> shift.
90 */
91 static inline unsigned long ns2ticks(unsigned long nsec, int shift,
92 unsigned long factor)
93 {
94 uint64_t tmp = ((uint64_t)nsec * factor) >> shift;
96 return (unsigned long) tmp;
97 }
99 static int hpet_next_event(unsigned long delta, int timer)
100 {
101 uint32_t cnt, cmp;
102 unsigned long flags;
104 local_irq_save(flags);
105 cnt = hpet_read32(HPET_COUNTER);
106 cmp = cnt + delta;
107 hpet_write32(cmp, HPET_Tn_CMP(timer));
108 cmp = hpet_read32(HPET_COUNTER);
109 local_irq_restore(flags);
111 /* Are we within two ticks of the deadline passing? Then we may miss. */
112 return ((cmp + 2 - cnt) > delta) ? -ETIME : 0;
113 }
115 static int reprogram_hpet_evt_channel(
116 struct hpet_event_channel *ch,
117 s_time_t expire, s_time_t now, int force)
118 {
119 int64_t delta;
120 int ret;
122 if ( (ch->flags & HPET_EVT_DISABLE) || (expire == 0) )
123 return 0;
125 if ( unlikely(expire < 0) )
126 {
127 printk(KERN_DEBUG "reprogram: expire <= 0\n");
128 return -ETIME;
129 }
131 delta = expire - now;
132 if ( (delta <= 0) && !force )
133 return -ETIME;
135 ch->next_event = expire;
137 if ( expire == STIME_MAX )
138 {
139 /* We assume it will take a long time for the timer to wrap. */
140 hpet_write32(0, HPET_Tn_CMP(ch->idx));
141 return 0;
142 }
144 delta = min_t(int64_t, delta, MAX_DELTA_NS);
145 delta = max_t(int64_t, delta, MIN_DELTA_NS);
146 delta = ns2ticks(delta, ch->shift, ch->mult);
148 ret = hpet_next_event(delta, ch->idx);
149 while ( ret && force )
150 {
151 delta += delta;
152 ret = hpet_next_event(delta, ch->idx);
153 }
155 return ret;
156 }
158 static int evt_do_broadcast(cpumask_t mask)
159 {
160 int ret = 0, cpu = smp_processor_id();
162 if ( cpu_isset(cpu, mask) )
163 {
164 cpu_clear(cpu, mask);
165 raise_softirq(TIMER_SOFTIRQ);
166 ret = 1;
167 }
169 if ( !cpus_empty(mask) )
170 {
171 cpumask_raise_softirq(mask, TIMER_SOFTIRQ);
172 ret = 1;
173 }
174 return ret;
175 }
177 static void handle_hpet_broadcast(struct hpet_event_channel *ch)
178 {
179 cpumask_t mask;
180 s_time_t now, next_event;
181 int cpu;
183 spin_lock_irq(&ch->lock);
185 again:
186 ch->next_event = STIME_MAX;
187 next_event = STIME_MAX;
188 mask = (cpumask_t)CPU_MASK_NONE;
189 now = NOW();
191 /* find all expired events */
192 for_each_cpu_mask(cpu, ch->cpumask)
193 {
194 if ( per_cpu(timer_deadline_start, cpu) <= now )
195 cpu_set(cpu, mask);
196 else if ( per_cpu(timer_deadline_end, cpu) < next_event )
197 next_event = per_cpu(timer_deadline_end, cpu);
198 }
200 /* wakeup the cpus which have an expired event. */
201 evt_do_broadcast(mask);
203 if ( next_event != STIME_MAX )
204 {
205 if ( reprogram_hpet_evt_channel(ch, next_event, now, 0) )
206 goto again;
207 }
208 spin_unlock_irq(&ch->lock);
209 }
211 static void hpet_interrupt_handler(int irq, void *data,
212 struct cpu_user_regs *regs)
213 {
214 struct hpet_event_channel *ch = (struct hpet_event_channel *)data;
216 this_cpu(irq_count)--;
218 if ( !ch->event_handler )
219 {
220 printk(XENLOG_WARNING "Spurious HPET timer interrupt on HPET timer %d\n", ch->idx);
221 return;
222 }
224 ch->event_handler(ch);
225 }
227 static void hpet_msi_unmask(unsigned int irq)
228 {
229 unsigned long cfg;
230 int ch_idx = irq_to_channel(irq);
231 struct hpet_event_channel *ch;
233 BUG_ON(ch_idx < 0);
234 ch = &hpet_events[ch_idx];
236 cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
237 cfg |= HPET_TN_FSB;
238 hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
239 }
241 static void hpet_msi_mask(unsigned int irq)
242 {
243 unsigned long cfg;
244 int ch_idx = irq_to_channel(irq);
245 struct hpet_event_channel *ch;
247 BUG_ON(ch_idx < 0);
248 ch = &hpet_events[ch_idx];
250 cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
251 cfg &= ~HPET_TN_FSB;
252 hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
253 }
255 static void hpet_msi_write(unsigned int irq, struct msi_msg *msg)
256 {
257 int ch_idx = irq_to_channel(irq);
258 struct hpet_event_channel *ch;
260 BUG_ON(ch_idx < 0);
261 ch = &hpet_events[ch_idx];
263 hpet_write32(msg->data, HPET_Tn_ROUTE(ch->idx));
264 hpet_write32(msg->address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
265 }
267 static void hpet_msi_read(unsigned int irq, struct msi_msg *msg)
268 {
269 int ch_idx = irq_to_channel(irq);
270 struct hpet_event_channel *ch;
272 BUG_ON(ch_idx < 0);
273 ch = &hpet_events[ch_idx];
275 msg->data = hpet_read32(HPET_Tn_ROUTE(ch->idx));
276 msg->address_lo = hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4);
277 msg->address_hi = 0;
278 }
280 static unsigned int hpet_msi_startup(unsigned int irq)
281 {
282 hpet_msi_unmask(irq);
283 return 0;
284 }
286 static void hpet_msi_shutdown(unsigned int irq)
287 {
288 hpet_msi_mask(irq);
289 }
291 static void hpet_msi_ack(unsigned int irq)
292 {
293 struct irq_desc *desc = irq_to_desc(irq);
295 irq_complete_move(&desc);
296 move_native_irq(irq);
297 ack_APIC_irq();
298 }
300 static void hpet_msi_end(unsigned int irq)
301 {
302 }
304 static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
305 {
306 struct msi_msg msg;
307 unsigned int dest;
308 struct irq_desc * desc = irq_to_desc(irq);
309 struct irq_cfg *cfg= desc->chip_data;
311 dest = set_desc_affinity(desc, mask);
312 if (dest == BAD_APICID)
313 return;
315 hpet_msi_read(irq, &msg);
316 msg.data &= ~MSI_DATA_VECTOR_MASK;
317 msg.data |= MSI_DATA_VECTOR(cfg->vector);
318 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
319 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
320 hpet_msi_write(irq, &msg);
321 }
323 /*
324 * IRQ Chip for MSI HPET Devices,
325 */
326 static hw_irq_controller hpet_msi_type = {
327 .typename = "HPET-MSI",
328 .startup = hpet_msi_startup,
329 .shutdown = hpet_msi_shutdown,
330 .enable = hpet_msi_unmask,
331 .disable = hpet_msi_mask,
332 .ack = hpet_msi_ack,
333 .end = hpet_msi_end,
334 .set_affinity = hpet_msi_set_affinity,
335 };
337 static int hpet_setup_msi_irq(unsigned int irq)
338 {
339 int ret;
340 struct msi_msg msg;
341 struct hpet_event_channel *ch = &hpet_events[irq_to_channel(irq)];
343 irq_desc[irq].handler = &hpet_msi_type;
344 ret = request_irq(irq, hpet_interrupt_handler,
345 0, "HPET", ch);
346 if ( ret < 0 )
347 return ret;
349 msi_compose_msg(NULL, irq, &msg);
350 hpet_msi_write(irq, &msg);
352 return 0;
353 }
355 static int hpet_assign_irq(struct hpet_event_channel *ch)
356 {
357 int irq = ch->irq;
359 if ( irq < 0 )
360 {
361 if ( (irq = create_irq()) < 0 )
362 return irq;
364 irq_channel[irq] = ch - &hpet_events[0];
365 ch->irq = irq;
366 }
368 /* hpet_setup_msi_irq should also be called for S3 resuming */
369 if ( hpet_setup_msi_irq(irq) )
370 {
371 destroy_irq(irq);
372 irq_channel[irq] = -1;
373 ch->irq = -1;
374 return -EINVAL;
375 }
377 return 0;
378 }
380 static int hpet_fsb_cap_lookup(void)
381 {
382 unsigned int id;
383 unsigned int num_chs, num_chs_used;
384 int i;
386 /* TODO. */
387 if ( iommu_intremap )
388 {
389 printk(XENLOG_INFO "HPET's MSI mode hasn't been supported when "
390 "Interrupt Remapping is enabled.\n");
391 return 0;
392 }
394 id = hpet_read32(HPET_ID);
396 num_chs = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
397 num_chs++; /* Value read out starts from 0 */
399 num_chs_used = 0;
400 for ( i = 0; i < num_chs; i++ )
401 {
402 struct hpet_event_channel *ch = &hpet_events[num_chs_used];
403 unsigned long cfg = hpet_read32(HPET_Tn_CFG(i));
405 /* Only consider HPET timer with MSI support */
406 if ( !(cfg & HPET_TN_FSB_CAP) )
407 continue;
409 ch->flags = 0;
410 ch->idx = i;
412 if ( hpet_assign_irq(ch) )
413 continue;
415 /* set default irq affinity */
416 ch->cpu = num_chs_used;
417 per_cpu(cpu_bc_channel, ch->cpu) = ch;
418 irq_desc[ch->irq].handler->
419 set_affinity(ch->irq, cpumask_of_cpu(ch->cpu));
421 num_chs_used++;
423 if ( num_chs_used == num_possible_cpus() )
424 break;
425 }
427 printk(XENLOG_INFO
428 "HPET: %d timers in total, %d timers will be used for broadcast\n",
429 num_chs, num_chs_used);
431 return num_chs_used;
432 }
434 static int next_channel;
435 static spinlock_t next_lock = SPIN_LOCK_UNLOCKED;
437 static struct hpet_event_channel *hpet_get_channel(int cpu)
438 {
439 int i;
440 int next;
441 struct hpet_event_channel *ch;
443 spin_lock(&next_lock);
444 next = next_channel = (next_channel + 1) % num_hpets_used;
445 spin_unlock(&next_lock);
447 /* try unused channel first */
448 for ( i = next; i < next + num_hpets_used; i++ )
449 {
450 ch = &hpet_events[i % num_hpets_used];
451 if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
452 {
453 ch->cpu = cpu;
454 return ch;
455 }
456 }
458 /* share a in-use channel */
459 ch = &hpet_events[next];
460 if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
461 ch->cpu = cpu;
463 return ch;
464 }
466 static void hpet_attach_channel_share(int cpu, struct hpet_event_channel *ch)
467 {
468 per_cpu(cpu_bc_channel, cpu) = ch;
470 /* try to be the channel owner again while holding the lock */
471 if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
472 ch->cpu = cpu;
474 if ( ch->cpu != cpu )
475 return;
477 /* set irq affinity */
478 irq_desc[ch->irq].handler->
479 set_affinity(ch->irq, cpumask_of_cpu(ch->cpu));
480 }
482 static void hpet_detach_channel_share(int cpu)
483 {
484 struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
486 per_cpu(cpu_bc_channel, cpu) = NULL;
488 if ( cpu != ch->cpu )
489 return;
491 if ( cpus_empty(ch->cpumask) )
492 {
493 ch->cpu = -1;
494 clear_bit(HPET_EVT_USED_BIT, &ch->flags);
495 return;
496 }
498 ch->cpu = first_cpu(ch->cpumask);
499 /* set irq affinity */
500 irq_desc[ch->irq].handler->
501 set_affinity(ch->irq, cpumask_of_cpu(ch->cpu));
502 }
504 static void (*hpet_attach_channel)(int cpu, struct hpet_event_channel *ch);
505 static void (*hpet_detach_channel)(int cpu);
507 #include <asm/mc146818rtc.h>
509 void (*pv_rtc_handler)(unsigned int port, uint8_t value);
511 static void handle_rtc_once(unsigned int port, uint8_t value)
512 {
513 static int index;
515 if ( port == 0x70 )
516 {
517 index = value;
518 return;
519 }
521 if ( index != RTC_REG_B )
522 return;
524 /* RTC Reg B, contain PIE/AIE/UIE */
525 if ( value & (RTC_PIE | RTC_AIE | RTC_UIE ) )
526 {
527 cpuidle_disable_deep_cstate();
528 pv_rtc_handler = NULL;
529 }
530 }
532 void hpet_broadcast_init(void)
533 {
534 u64 hpet_rate;
535 u32 hpet_id, cfg;
536 int i;
538 if ( irq_channel == NULL )
539 {
540 irq_channel = xmalloc_array(int, nr_irqs);
541 BUG_ON(irq_channel == NULL);
542 for ( i = 0; i < nr_irqs; i++ )
543 irq_channel[i] = -1;
544 }
546 hpet_rate = hpet_setup();
547 if ( hpet_rate == 0 )
548 return;
550 num_hpets_used = hpet_fsb_cap_lookup();
551 if ( num_hpets_used > 0 )
552 {
553 /* Stop HPET legacy interrupts */
554 cfg = hpet_read32(HPET_CFG);
555 cfg &= ~HPET_CFG_LEGACY;
556 hpet_write32(cfg, HPET_CFG);
558 for ( i = 0; i < num_hpets_used; i++ )
559 {
560 /* set HPET Tn as oneshot */
561 cfg = hpet_read32(HPET_Tn_CFG(hpet_events[i].idx));
562 cfg &= ~HPET_TN_PERIODIC;
563 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
564 hpet_write32(cfg, HPET_Tn_CFG(hpet_events[i].idx));
566 hpet_events[i].mult = div_sc((unsigned long)hpet_rate,
567 1000000000ul, 32);
568 hpet_events[i].shift = 32;
569 hpet_events[i].next_event = STIME_MAX;
570 hpet_events[i].event_handler = handle_hpet_broadcast;
571 spin_lock_init(&hpet_events[i].lock);
572 }
574 if ( num_hpets_used < num_possible_cpus() )
575 {
576 hpet_attach_channel = hpet_attach_channel_share;
577 hpet_detach_channel = hpet_detach_channel_share;
578 }
580 return;
581 }
583 if ( legacy_hpet_event.flags & HPET_EVT_DISABLE )
584 return;
586 hpet_id = hpet_read32(HPET_ID);
587 if ( !(hpet_id & HPET_ID_LEGSUP) )
588 return;
590 /* Start HPET legacy interrupts */
591 cfg = hpet_read32(HPET_CFG);
592 cfg |= HPET_CFG_LEGACY;
593 hpet_write32(cfg, HPET_CFG);
595 /* set HPET T0 as oneshot */
596 cfg = hpet_read32(HPET_T0_CFG);
597 cfg &= ~HPET_TN_PERIODIC;
598 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
599 hpet_write32(cfg, HPET_T0_CFG);
601 /*
602 * The period is a femto seconds value. We need to calculate the scaled
603 * math multiplication factor for nanosecond to hpet tick conversion.
604 */
605 legacy_hpet_event.mult = div_sc((unsigned long)hpet_rate, 1000000000ul, 32);
606 legacy_hpet_event.shift = 32;
607 legacy_hpet_event.next_event = STIME_MAX;
608 legacy_hpet_event.event_handler = handle_hpet_broadcast;
609 legacy_hpet_event.idx = 0;
610 legacy_hpet_event.flags = 0;
611 spin_lock_init(&legacy_hpet_event.lock);
613 for_each_possible_cpu(i)
614 per_cpu(cpu_bc_channel, i) = &legacy_hpet_event;
616 if ( !force_hpet_broadcast )
617 pv_rtc_handler = handle_rtc_once;
618 }
620 void hpet_disable_legacy_broadcast(void)
621 {
622 u32 cfg;
623 unsigned long flags;
625 spin_lock_irqsave(&legacy_hpet_event.lock, flags);
627 legacy_hpet_event.flags |= HPET_EVT_DISABLE;
629 /* disable HPET T0 */
630 cfg = hpet_read32(HPET_T0_CFG);
631 cfg &= ~HPET_TN_ENABLE;
632 hpet_write32(cfg, HPET_T0_CFG);
634 /* Stop HPET legacy interrupts */
635 cfg = hpet_read32(HPET_CFG);
636 cfg &= ~HPET_CFG_LEGACY;
637 hpet_write32(cfg, HPET_CFG);
639 spin_unlock_irqrestore(&legacy_hpet_event.lock, flags);
641 smp_send_event_check_mask(&cpu_online_map);
642 }
644 void hpet_broadcast_enter(void)
645 {
646 int cpu = smp_processor_id();
647 struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
649 if ( this_cpu(timer_deadline_start) == 0 )
650 return;
652 if ( !ch )
653 ch = hpet_get_channel(cpu);
654 BUG_ON( !ch );
656 ASSERT(!local_irq_is_enabled());
657 spin_lock(&ch->lock);
659 if ( hpet_attach_channel )
660 hpet_attach_channel(cpu, ch);
662 /* Cancel any outstanding LAPIC timer event and disable interrupts. */
663 reprogram_timer(0);
664 disable_APIC_timer();
666 cpu_set(cpu, ch->cpumask);
668 /* reprogram if current cpu expire time is nearer */
669 if ( this_cpu(timer_deadline_end) < ch->next_event )
670 reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline_end), NOW(), 1);
672 spin_unlock(&ch->lock);
673 }
675 void hpet_broadcast_exit(void)
676 {
677 int cpu = smp_processor_id();
678 struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
680 if ( this_cpu(timer_deadline_start) == 0 )
681 return;
683 BUG_ON( !ch );
685 spin_lock_irq(&ch->lock);
687 if ( cpu_test_and_clear(cpu, ch->cpumask) )
688 {
689 /* Reprogram the deadline; trigger timer work now if it has passed. */
690 enable_APIC_timer();
691 if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
692 raise_softirq(TIMER_SOFTIRQ);
694 if ( cpus_empty(ch->cpumask) && ch->next_event != STIME_MAX )
695 reprogram_hpet_evt_channel(ch, STIME_MAX, 0, 0);
696 }
698 if ( hpet_detach_channel )
699 hpet_detach_channel(cpu);
701 spin_unlock_irq(&ch->lock);
702 }
704 int hpet_broadcast_is_available(void)
705 {
706 return (legacy_hpet_event.event_handler == handle_hpet_broadcast
707 || num_hpets_used > 0);
708 }
710 int hpet_legacy_irq_tick(void)
711 {
712 this_cpu(irq_count)--;
714 if ( !legacy_hpet_event.event_handler )
715 return 0;
716 legacy_hpet_event.event_handler(&legacy_hpet_event);
717 return 1;
718 }
720 u64 hpet_setup(void)
721 {
722 static u64 hpet_rate;
723 static u32 system_reset_latch;
724 u32 hpet_id, hpet_period, cfg;
725 int i;
727 if ( system_reset_latch == system_reset_counter )
728 return hpet_rate;
729 system_reset_latch = system_reset_counter;
731 if ( hpet_address == 0 )
732 return 0;
734 set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
736 hpet_id = hpet_read32(HPET_ID);
737 if ( (hpet_id & HPET_ID_REV) == 0 )
738 {
739 printk("BAD HPET revision id.\n");
740 return 0;
741 }
743 /* Check for sane period (100ps <= period <= 100ns). */
744 hpet_period = hpet_read32(HPET_PERIOD);
745 if ( (hpet_period > 100000000) || (hpet_period < 100000) )
746 {
747 printk("BAD HPET period %u.\n", hpet_period);
748 return 0;
749 }
751 cfg = hpet_read32(HPET_CFG);
752 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
753 hpet_write32(cfg, HPET_CFG);
755 for ( i = 0; i <= ((hpet_id >> 8) & 31); i++ )
756 {
757 cfg = hpet_read32(HPET_Tn_CFG(i));
758 cfg &= ~HPET_TN_ENABLE;
759 hpet_write32(cfg, HPET_Tn_CFG(i));
760 }
762 cfg = hpet_read32(HPET_CFG);
763 cfg |= HPET_CFG_ENABLE;
764 hpet_write32(cfg, HPET_CFG);
766 hpet_rate = 1000000000000000ULL; /* 10^15 */
767 (void)do_div(hpet_rate, hpet_period);
769 return hpet_rate;
770 }