Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/hpet.c
Line
Count
Source (jump to first uncovered line)
1
/******************************************************************************
2
 * arch/x86/hpet.c
3
 *
4
 * HPET management.
5
 */
6
7
#include <xen/errno.h>
8
#include <xen/time.h>
9
#include <xen/timer.h>
10
#include <xen/smp.h>
11
#include <xen/softirq.h>
12
#include <xen/irq.h>
13
#include <xen/numa.h>
14
#include <asm/fixmap.h>
15
#include <asm/div64.h>
16
#include <asm/hpet.h>
17
#include <asm/msi.h>
18
#include <mach_apic.h>
19
#include <xen/cpuidle.h>
20
21
#define MAX_DELTA_NS MILLISECS(10*1000)
22
#define MIN_DELTA_NS MICROSECS(20)
23
24
#define HPET_EVT_USED_BIT    0
25
#define HPET_EVT_USED       (1 << HPET_EVT_USED_BIT)
26
0
#define HPET_EVT_DISABLE_BIT 1
27
0
#define HPET_EVT_DISABLE    (1 << HPET_EVT_DISABLE_BIT)
28
0
#define HPET_EVT_LEGACY_BIT  2
29
0
#define HPET_EVT_LEGACY     (1 << HPET_EVT_LEGACY_BIT)
30
31
struct hpet_event_channel
32
{
33
    unsigned long mult;
34
    int           shift;
35
    s_time_t      next_event;
36
    cpumask_var_t cpumask;
37
    spinlock_t    lock;
38
    void          (*event_handler)(struct hpet_event_channel *);
39
40
    unsigned int idx;   /* physical channel idx */
41
    unsigned int cpu;   /* msi target */
42
    struct msi_desc msi;/* msi state */
43
    unsigned int flags; /* HPET_EVT_x */
44
} __cacheline_aligned;
45
static struct hpet_event_channel *__read_mostly hpet_events;
46
47
/* msi hpet channels used for broadcast */
48
static unsigned int __read_mostly num_hpets_used;
49
50
DEFINE_PER_CPU(struct hpet_event_channel *, cpu_bc_channel);
51
52
unsigned long __initdata hpet_address;
53
u8 __initdata hpet_blockid;
54
u8 __initdata hpet_flags;
55
56
/*
57
 * force_hpet_broadcast: by default legacy hpet broadcast will be stopped
58
 * if RTC interrupts are enabled. Enable this option if want to always enable
59
 * legacy hpet broadcast for deep C state
60
 */
61
static bool __initdata force_hpet_broadcast;
62
boolean_param("hpetbroadcast", force_hpet_broadcast);
63
64
/*
65
 * Calculate a multiplication factor for scaled math, which is used to convert
66
 * nanoseconds based values to clock ticks:
67
 *
68
 * clock_ticks = (nanoseconds * factor) >> shift.
69
 *
70
 * div_sc is the rearranged equation to calculate a factor from a given clock
71
 * ticks / nanoseconds ratio:
72
 *
73
 * factor = (clock_ticks << shift) / nanoseconds
74
 */
75
static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec,
76
                                   int shift)
77
0
{
78
0
    uint64_t tmp = ((uint64_t)ticks) << shift;
79
0
80
0
    do_div(tmp, nsec);
81
0
    return (unsigned long) tmp;
82
0
}
83
84
/*
85
 * Convert nanoseconds based values to clock ticks:
86
 *
87
 * clock_ticks = (nanoseconds * factor) >> shift.
88
 */
89
static inline unsigned long ns2ticks(unsigned long nsec, int shift,
90
                                     unsigned long factor)
91
0
{
92
0
    uint64_t tmp = ((uint64_t)nsec * factor) >> shift;
93
0
94
0
    return (unsigned long) tmp;
95
0
}
96
97
static int hpet_next_event(unsigned long delta, int timer)
98
0
{
99
0
    uint32_t cnt, cmp;
100
0
    unsigned long flags;
101
0
102
0
    local_irq_save(flags);
103
0
    cnt = hpet_read32(HPET_COUNTER);
104
0
    cmp = cnt + delta;
105
0
    hpet_write32(cmp, HPET_Tn_CMP(timer));
106
0
    cmp = hpet_read32(HPET_COUNTER);
107
0
    local_irq_restore(flags);
108
0
109
0
    /* Are we within two ticks of the deadline passing? Then we may miss. */
110
0
    return ((cmp + 2 - cnt) > delta) ? -ETIME : 0;
111
0
}
112
113
static int reprogram_hpet_evt_channel(
114
    struct hpet_event_channel *ch,
115
    s_time_t expire, s_time_t now, int force)
116
0
{
117
0
    int64_t delta;
118
0
    int ret;
119
0
120
0
    if ( (ch->flags & HPET_EVT_DISABLE) || (expire == 0) )
121
0
        return 0;
122
0
123
0
    if ( unlikely(expire < 0) )
124
0
    {
125
0
        printk(KERN_DEBUG "reprogram: expire <= 0\n");
126
0
        return -ETIME;
127
0
    }
128
0
129
0
    delta = expire - now;
130
0
    if ( (delta <= 0) && !force )
131
0
        return -ETIME;
132
0
133
0
    ch->next_event = expire;
134
0
135
0
    if ( expire == STIME_MAX )
136
0
    {
137
0
        /* We assume it will take a long time for the timer to wrap. */
138
0
        hpet_write32(0, HPET_Tn_CMP(ch->idx));
139
0
        return 0;
140
0
    }
141
0
142
0
    delta = min_t(int64_t, delta, MAX_DELTA_NS);
143
0
    delta = max_t(int64_t, delta, MIN_DELTA_NS);
144
0
    delta = ns2ticks(delta, ch->shift, ch->mult);
145
0
146
0
    ret = hpet_next_event(delta, ch->idx);
147
0
    while ( ret && force )
148
0
    {
149
0
        delta += delta;
150
0
        ret = hpet_next_event(delta, ch->idx);
151
0
    }
152
0
153
0
    return ret;
154
0
}
155
156
static void evt_do_broadcast(cpumask_t *mask)
157
0
{
158
0
    unsigned int cpu = smp_processor_id();
159
0
160
0
    if ( __cpumask_test_and_clear_cpu(cpu, mask) )
161
0
        raise_softirq(TIMER_SOFTIRQ);
162
0
163
0
    cpuidle_wakeup_mwait(mask);
164
0
165
0
    if ( !cpumask_empty(mask) )
166
0
       cpumask_raise_softirq(mask, TIMER_SOFTIRQ);
167
0
}
168
169
static void handle_hpet_broadcast(struct hpet_event_channel *ch)
170
0
{
171
0
    cpumask_t mask;
172
0
    s_time_t now, next_event;
173
0
    unsigned int cpu;
174
0
    unsigned long flags;
175
0
176
0
    spin_lock_irqsave(&ch->lock, flags);
177
0
178
0
again:
179
0
    ch->next_event = STIME_MAX;
180
0
181
0
    spin_unlock_irqrestore(&ch->lock, flags);
182
0
183
0
    next_event = STIME_MAX;
184
0
    cpumask_clear(&mask);
185
0
    now = NOW();
186
0
187
0
    /* find all expired events */
188
0
    for_each_cpu(cpu, ch->cpumask)
189
0
    {
190
0
        s_time_t deadline;
191
0
192
0
        if ( !cpumask_test_cpu(cpu, ch->cpumask) )
193
0
            continue;
194
0
195
0
        deadline = ACCESS_ONCE(per_cpu(timer_deadline, cpu));
196
0
197
0
        if ( deadline <= now )
198
0
            __cpumask_set_cpu(cpu, &mask);
199
0
        else if ( deadline < next_event )
200
0
            next_event = deadline;
201
0
    }
202
0
203
0
    /* wakeup the cpus which have an expired event. */
204
0
    evt_do_broadcast(&mask);
205
0
206
0
    if ( next_event != STIME_MAX )
207
0
    {
208
0
        spin_lock_irqsave(&ch->lock, flags);
209
0
210
0
        if ( next_event < ch->next_event &&
211
0
             reprogram_hpet_evt_channel(ch, next_event, now, 0) )
212
0
            goto again;
213
0
214
0
        spin_unlock_irqrestore(&ch->lock, flags);
215
0
    }
216
0
}
217
218
static void hpet_interrupt_handler(int irq, void *data,
219
        struct cpu_user_regs *regs)
220
0
{
221
0
    struct hpet_event_channel *ch = (struct hpet_event_channel *)data;
222
0
223
0
    this_cpu(irq_count)--;
224
0
225
0
    if ( !ch->event_handler )
226
0
    {
227
0
        printk(XENLOG_WARNING "Spurious HPET timer interrupt on HPET timer %d\n", ch->idx);
228
0
        return;
229
0
    }
230
0
231
0
    ch->event_handler(ch);
232
0
}
233
234
static void hpet_msi_unmask(struct irq_desc *desc)
235
0
{
236
0
    u32 cfg;
237
0
    struct hpet_event_channel *ch = desc->action->dev_id;
238
0
239
0
    cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
240
0
    cfg |= HPET_TN_ENABLE;
241
0
    hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
242
0
    ch->msi.msi_attrib.host_masked = 0;
243
0
}
244
245
static void hpet_msi_mask(struct irq_desc *desc)
246
0
{
247
0
    u32 cfg;
248
0
    struct hpet_event_channel *ch = desc->action->dev_id;
249
0
250
0
    cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
251
0
    cfg &= ~HPET_TN_ENABLE;
252
0
    hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
253
0
    ch->msi.msi_attrib.host_masked = 1;
254
0
}
255
256
static int hpet_msi_write(struct hpet_event_channel *ch, struct msi_msg *msg)
257
0
{
258
0
    ch->msi.msg = *msg;
259
0
260
0
    if ( iommu_intremap )
261
0
    {
262
0
        int rc = iommu_update_ire_from_msi(&ch->msi, msg);
263
0
264
0
        if ( rc )
265
0
            return rc;
266
0
    }
267
0
268
0
    hpet_write32(msg->data, HPET_Tn_ROUTE(ch->idx));
269
0
    hpet_write32(msg->address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
270
0
271
0
    return 0;
272
0
}
273
274
static void __maybe_unused
275
hpet_msi_read(struct hpet_event_channel *ch, struct msi_msg *msg)
276
0
{
277
0
    msg->data = hpet_read32(HPET_Tn_ROUTE(ch->idx));
278
0
    msg->address_lo = hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4);
279
0
    msg->address_hi = MSI_ADDR_BASE_HI;
280
0
    if ( iommu_intremap )
281
0
        iommu_read_msi_from_ire(&ch->msi, msg);
282
0
}
283
284
static unsigned int hpet_msi_startup(struct irq_desc *desc)
285
0
{
286
0
    hpet_msi_unmask(desc);
287
0
    return 0;
288
0
}
289
290
#define hpet_msi_shutdown hpet_msi_mask
291
292
static void hpet_msi_ack(struct irq_desc *desc)
293
0
{
294
0
    irq_complete_move(desc);
295
0
    move_native_irq(desc);
296
0
    ack_APIC_irq();
297
0
}
298
299
static void hpet_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
300
0
{
301
0
    struct hpet_event_channel *ch = desc->action->dev_id;
302
0
    struct msi_msg msg = ch->msi.msg;
303
0
304
0
    msg.dest32 = set_desc_affinity(desc, mask);
305
0
    if ( msg.dest32 == BAD_APICID )
306
0
        return;
307
0
308
0
    msg.data &= ~MSI_DATA_VECTOR_MASK;
309
0
    msg.data |= MSI_DATA_VECTOR(desc->arch.vector);
310
0
    msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
311
0
    msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
312
0
    if ( msg.data != ch->msi.msg.data || msg.dest32 != ch->msi.msg.dest32 )
313
0
        hpet_msi_write(ch, &msg);
314
0
}
315
316
/*
317
 * IRQ Chip for MSI HPET Devices,
318
 */
319
static hw_irq_controller hpet_msi_type = {
320
    .typename   = "HPET-MSI",
321
    .startup    = hpet_msi_startup,
322
    .shutdown   = hpet_msi_shutdown,
323
    .enable     = hpet_msi_unmask,
324
    .disable    = hpet_msi_mask,
325
    .ack        = hpet_msi_ack,
326
    .set_affinity   = hpet_msi_set_affinity,
327
};
328
329
static int __hpet_setup_msi_irq(struct irq_desc *desc)
330
0
{
331
0
    struct msi_msg msg;
332
0
333
0
    msi_compose_msg(desc->arch.vector, desc->arch.cpu_mask, &msg);
334
0
    return hpet_msi_write(desc->action->dev_id, &msg);
335
0
}
336
337
static int __init hpet_setup_msi_irq(struct hpet_event_channel *ch)
338
0
{
339
0
    int ret;
340
0
    u32 cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
341
0
    irq_desc_t *desc = irq_to_desc(ch->msi.irq);
342
0
343
0
    if ( iommu_intremap )
344
0
    {
345
0
        ch->msi.hpet_id = hpet_blockid;
346
0
        ret = iommu_setup_hpet_msi(&ch->msi);
347
0
        if ( ret )
348
0
            return ret;
349
0
    }
350
0
351
0
    /* set HPET Tn as oneshot */
352
0
    cfg &= ~(HPET_TN_LEVEL | HPET_TN_PERIODIC);
353
0
    cfg |= HPET_TN_FSB | HPET_TN_32BIT;
354
0
    hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
355
0
356
0
    desc->handler = &hpet_msi_type;
357
0
    ret = request_irq(ch->msi.irq, 0, hpet_interrupt_handler, "HPET", ch);
358
0
    if ( ret >= 0 )
359
0
        ret = __hpet_setup_msi_irq(desc);
360
0
    if ( ret < 0 )
361
0
    {
362
0
        if ( iommu_intremap )
363
0
            iommu_update_ire_from_msi(&ch->msi, NULL);
364
0
        return ret;
365
0
    }
366
0
367
0
    desc->msi_desc = &ch->msi;
368
0
369
0
    return 0;
370
0
}
371
372
static int __init hpet_assign_irq(struct hpet_event_channel *ch)
373
0
{
374
0
    int irq;
375
0
376
0
    if ( (irq = create_irq(NUMA_NO_NODE)) < 0 )
377
0
        return irq;
378
0
379
0
    ch->msi.irq = irq;
380
0
    if ( hpet_setup_msi_irq(ch) )
381
0
    {
382
0
        destroy_irq(irq);
383
0
        return -EINVAL;
384
0
    }
385
0
386
0
    return 0;
387
0
}
388
389
static void __init hpet_fsb_cap_lookup(void)
390
0
{
391
0
    u32 id;
392
0
    unsigned int i, num_chs;
393
0
394
0
    if ( unlikely(acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) )
395
0
        return;
396
0
397
0
    id = hpet_read32(HPET_ID);
398
0
399
0
    num_chs = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
400
0
    num_chs++; /* Value read out starts from 0 */
401
0
402
0
    hpet_events = xzalloc_array(struct hpet_event_channel, num_chs);
403
0
    if ( !hpet_events )
404
0
        return;
405
0
406
0
    for ( i = 0; i < num_chs && num_hpets_used < nr_cpu_ids; i++ )
407
0
    {
408
0
        struct hpet_event_channel *ch = &hpet_events[num_hpets_used];
409
0
        u32 cfg = hpet_read32(HPET_Tn_CFG(i));
410
0
411
0
        /* Only consider HPET timer with MSI support */
412
0
        if ( !(cfg & HPET_TN_FSB_CAP) )
413
0
            continue;
414
0
415
0
        if ( !zalloc_cpumask_var(&ch->cpumask) )
416
0
        {
417
0
            if ( !num_hpets_used )
418
0
            {
419
0
                xfree(hpet_events);
420
0
                hpet_events = NULL;
421
0
            }
422
0
            break;
423
0
        }
424
0
425
0
        ch->flags = 0;
426
0
        ch->idx = i;
427
0
428
0
        if ( hpet_assign_irq(ch) == 0 )
429
0
            num_hpets_used++;
430
0
    }
431
0
432
0
    printk(XENLOG_INFO "HPET: %u timers usable for broadcast (%u total)\n",
433
0
           num_hpets_used, num_chs);
434
0
}
435
436
static struct hpet_event_channel *hpet_get_channel(unsigned int cpu)
437
0
{
438
0
    static unsigned int next_channel;
439
0
    unsigned int i, next;
440
0
    struct hpet_event_channel *ch;
441
0
442
0
    if ( num_hpets_used == 0 )
443
0
        return hpet_events;
444
0
445
0
    if ( num_hpets_used >= nr_cpu_ids )
446
0
        return &hpet_events[cpu];
447
0
448
0
    do {
449
0
        next = next_channel;
450
0
        if ( (i = next + 1) == num_hpets_used )
451
0
            i = 0;
452
0
    } while ( cmpxchg(&next_channel, next, i) != next );
453
0
454
0
    /* try unused channel first */
455
0
    for ( i = next; i < next + num_hpets_used; i++ )
456
0
    {
457
0
        ch = &hpet_events[i % num_hpets_used];
458
0
        if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
459
0
        {
460
0
            ch->cpu = cpu;
461
0
            return ch;
462
0
        }
463
0
    }
464
0
465
0
    /* share a in-use channel */
466
0
    ch = &hpet_events[next];
467
0
    if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
468
0
        ch->cpu = cpu;
469
0
470
0
    return ch;
471
0
}
472
473
static void set_channel_irq_affinity(struct hpet_event_channel *ch)
474
0
{
475
0
    struct irq_desc *desc = irq_to_desc(ch->msi.irq);
476
0
477
0
    ASSERT(!local_irq_is_enabled());
478
0
    spin_lock(&desc->lock);
479
0
    hpet_msi_mask(desc);
480
0
    hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
481
0
    hpet_msi_unmask(desc);
482
0
    spin_unlock(&desc->lock);
483
0
484
0
    spin_unlock(&ch->lock);
485
0
486
0
    /* We may have missed an interrupt due to the temporary masking. */
487
0
    if ( ch->event_handler && ch->next_event < NOW() )
488
0
        ch->event_handler(ch);
489
0
}
490
491
static void hpet_attach_channel(unsigned int cpu,
492
                                struct hpet_event_channel *ch)
493
0
{
494
0
    ASSERT(!local_irq_is_enabled());
495
0
    spin_lock(&ch->lock);
496
0
497
0
    per_cpu(cpu_bc_channel, cpu) = ch;
498
0
499
0
    /* try to be the channel owner again while holding the lock */
500
0
    if ( !test_and_set_bit(HPET_EVT_USED_BIT, &ch->flags) )
501
0
        ch->cpu = cpu;
502
0
503
0
    if ( ch->cpu != cpu )
504
0
        spin_unlock(&ch->lock);
505
0
    else
506
0
        set_channel_irq_affinity(ch);
507
0
}
508
509
static void hpet_detach_channel(unsigned int cpu,
510
                                struct hpet_event_channel *ch)
511
0
{
512
0
    spin_lock_irq(&ch->lock);
513
0
514
0
    ASSERT(ch == per_cpu(cpu_bc_channel, cpu));
515
0
516
0
    per_cpu(cpu_bc_channel, cpu) = NULL;
517
0
518
0
    if ( cpu != ch->cpu )
519
0
        spin_unlock_irq(&ch->lock);
520
0
    else if ( cpumask_empty(ch->cpumask) )
521
0
    {
522
0
        ch->cpu = -1;
523
0
        clear_bit(HPET_EVT_USED_BIT, &ch->flags);
524
0
        spin_unlock_irq(&ch->lock);
525
0
    }
526
0
    else
527
0
    {
528
0
        ch->cpu = cpumask_first(ch->cpumask);
529
0
        set_channel_irq_affinity(ch);
530
0
        local_irq_enable();
531
0
    }
532
0
}
533
534
#include <asm/mc146818rtc.h>
535
536
void (*__read_mostly pv_rtc_handler)(uint8_t index, uint8_t value);
537
538
static void handle_rtc_once(uint8_t index, uint8_t value)
539
0
{
540
0
    if ( index != RTC_REG_B )
541
0
        return;
542
0
543
0
    /* RTC Reg B, contain PIE/AIE/UIE */
544
0
    if ( value & (RTC_PIE | RTC_AIE | RTC_UIE ) )
545
0
    {
546
0
        cpuidle_disable_deep_cstate();
547
0
        pv_rtc_handler = NULL;
548
0
    }
549
0
}
550
551
void __init hpet_broadcast_init(void)
552
0
{
553
0
    u64 hpet_rate = hpet_setup();
554
0
    u32 hpet_id, cfg;
555
0
    unsigned int i, n;
556
0
557
0
    if ( hpet_rate == 0 || hpet_broadcast_is_available() )
558
0
        return;
559
0
560
0
    cfg = hpet_read32(HPET_CFG);
561
0
562
0
    hpet_fsb_cap_lookup();
563
0
    if ( num_hpets_used > 0 )
564
0
    {
565
0
        /* Stop HPET legacy interrupts */
566
0
        cfg &= ~HPET_CFG_LEGACY;
567
0
        n = num_hpets_used;
568
0
    }
569
0
    else
570
0
    {
571
0
        hpet_id = hpet_read32(HPET_ID);
572
0
        if ( !(hpet_id & HPET_ID_LEGSUP) )
573
0
            return;
574
0
575
0
        if ( !hpet_events )
576
0
            hpet_events = xzalloc(struct hpet_event_channel);
577
0
        if ( !hpet_events || !zalloc_cpumask_var(&hpet_events->cpumask) )
578
0
            return;
579
0
        hpet_events->msi.irq = -1;
580
0
581
0
        /* Start HPET legacy interrupts */
582
0
        cfg |= HPET_CFG_LEGACY;
583
0
        n = 1;
584
0
585
0
        if ( !force_hpet_broadcast )
586
0
            pv_rtc_handler = handle_rtc_once;
587
0
    }
588
0
589
0
    hpet_write32(cfg, HPET_CFG);
590
0
591
0
    for ( i = 0; i < n; i++ )
592
0
    {
593
0
        if ( i == 0 && (cfg & HPET_CFG_LEGACY) )
594
0
        {
595
0
            /* set HPET T0 as oneshot */
596
0
            cfg = hpet_read32(HPET_Tn_CFG(0));
597
0
            cfg &= ~(HPET_TN_LEVEL | HPET_TN_PERIODIC);
598
0
            cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
599
0
            hpet_write32(cfg, HPET_Tn_CFG(0));
600
0
        }
601
0
602
0
        /*
603
0
         * The period is a femto seconds value. We need to calculate the scaled
604
0
         * math multiplication factor for nanosecond to hpet tick conversion.
605
0
         */
606
0
        hpet_events[i].mult = div_sc((unsigned long)hpet_rate,
607
0
                                     1000000000ul, 32);
608
0
        hpet_events[i].shift = 32;
609
0
        hpet_events[i].next_event = STIME_MAX;
610
0
        spin_lock_init(&hpet_events[i].lock);
611
0
        wmb();
612
0
        hpet_events[i].event_handler = handle_hpet_broadcast;
613
0
614
0
        hpet_events[i].msi.msi_attrib.maskbit = 1;
615
0
        hpet_events[i].msi.msi_attrib.pos = MSI_TYPE_HPET;
616
0
    }
617
0
618
0
    if ( !num_hpets_used )
619
0
        hpet_events->flags = HPET_EVT_LEGACY;
620
0
}
621
622
void hpet_broadcast_resume(void)
623
0
{
624
0
    u32 cfg;
625
0
    unsigned int i, n;
626
0
627
0
    if ( !hpet_events )
628
0
        return;
629
0
630
0
    hpet_resume(NULL);
631
0
632
0
    cfg = hpet_read32(HPET_CFG);
633
0
634
0
    if ( num_hpets_used > 0 )
635
0
    {
636
0
        /* Stop HPET legacy interrupts */
637
0
        cfg &= ~HPET_CFG_LEGACY;
638
0
        n = num_hpets_used;
639
0
    }
640
0
    else if ( hpet_events->flags & HPET_EVT_DISABLE )
641
0
        return;
642
0
    else
643
0
    {
644
0
        /* Start HPET legacy interrupts */
645
0
        cfg |= HPET_CFG_LEGACY;
646
0
        n = 1;
647
0
    }
648
0
649
0
    hpet_write32(cfg, HPET_CFG);
650
0
651
0
    for ( i = 0; i < n; i++ )
652
0
    {
653
0
        if ( hpet_events[i].msi.irq >= 0 )
654
0
            __hpet_setup_msi_irq(irq_to_desc(hpet_events[i].msi.irq));
655
0
656
0
        /* set HPET Tn as oneshot */
657
0
        cfg = hpet_read32(HPET_Tn_CFG(hpet_events[i].idx));
658
0
        cfg &= ~(HPET_TN_LEVEL | HPET_TN_PERIODIC);
659
0
        cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
660
0
        if ( !(hpet_events[i].flags & HPET_EVT_LEGACY) )
661
0
            cfg |= HPET_TN_FSB;
662
0
        hpet_write32(cfg, HPET_Tn_CFG(hpet_events[i].idx));
663
0
664
0
        hpet_events[i].next_event = STIME_MAX;
665
0
    }
666
0
}
667
668
void hpet_disable_legacy_broadcast(void)
669
0
{
670
0
    u32 cfg;
671
0
    unsigned long flags;
672
0
673
0
    if ( !hpet_events || !(hpet_events->flags & HPET_EVT_LEGACY) )
674
0
        return;
675
0
676
0
    spin_lock_irqsave(&hpet_events->lock, flags);
677
0
678
0
    hpet_events->flags |= HPET_EVT_DISABLE;
679
0
680
0
    /* disable HPET T0 */
681
0
    cfg = hpet_read32(HPET_Tn_CFG(0));
682
0
    cfg &= ~HPET_TN_ENABLE;
683
0
    hpet_write32(cfg, HPET_Tn_CFG(0));
684
0
685
0
    /* Stop HPET legacy interrupts */
686
0
    cfg = hpet_read32(HPET_CFG);
687
0
    cfg &= ~HPET_CFG_LEGACY;
688
0
    hpet_write32(cfg, HPET_CFG);
689
0
690
0
    spin_unlock_irqrestore(&hpet_events->lock, flags);
691
0
692
0
    smp_send_event_check_mask(&cpu_online_map);
693
0
}
694
695
void hpet_broadcast_enter(void)
696
0
{
697
0
    unsigned int cpu = smp_processor_id();
698
0
    struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
699
0
    s_time_t deadline = per_cpu(timer_deadline, cpu);
700
0
701
0
    if ( deadline == 0 )
702
0
        return;
703
0
704
0
    if ( !ch )
705
0
        ch = hpet_get_channel(cpu);
706
0
707
0
    ASSERT(!local_irq_is_enabled());
708
0
709
0
    if ( !(ch->flags & HPET_EVT_LEGACY) )
710
0
        hpet_attach_channel(cpu, ch);
711
0
712
0
    /* Disable LAPIC timer interrupts. */
713
0
    disable_APIC_timer();
714
0
    cpumask_set_cpu(cpu, ch->cpumask);
715
0
716
0
    spin_lock(&ch->lock);
717
0
    /*
718
0
     * Reprogram if current cpu expire time is nearer.  deadline is never
719
0
     * written by a remote cpu, so the value read earlier is still valid.
720
0
     */
721
0
    if ( deadline < ch->next_event )
722
0
        reprogram_hpet_evt_channel(ch, deadline, NOW(), 1);
723
0
    spin_unlock(&ch->lock);
724
0
}
725
726
void hpet_broadcast_exit(void)
727
0
{
728
0
    unsigned int cpu = smp_processor_id();
729
0
    struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
730
0
    s_time_t deadline = per_cpu(timer_deadline, cpu);
731
0
732
0
    if ( deadline == 0 )
733
0
        return;
734
0
735
0
    if ( !ch )
736
0
        ch = hpet_get_channel(cpu);
737
0
738
0
    /* Reprogram the deadline; trigger timer work now if it has passed. */
739
0
    enable_APIC_timer();
740
0
    if ( !reprogram_timer(deadline) )
741
0
        raise_softirq(TIMER_SOFTIRQ);
742
0
743
0
    cpumask_clear_cpu(cpu, ch->cpumask);
744
0
745
0
    if ( !(ch->flags & HPET_EVT_LEGACY) )
746
0
        hpet_detach_channel(cpu, ch);
747
0
}
748
749
int hpet_broadcast_is_available(void)
750
0
{
751
0
    return ((hpet_events && (hpet_events->flags & HPET_EVT_LEGACY))
752
0
            || num_hpets_used > 0);
753
0
}
754
755
int hpet_legacy_irq_tick(void)
756
35
{
757
35
    this_cpu(irq_count)--;
758
35
759
35
    if ( !hpet_events ||
760
0
         (hpet_events->flags & (HPET_EVT_DISABLE|HPET_EVT_LEGACY)) !=
761
0
         HPET_EVT_LEGACY )
762
35
        return 0;
763
0
    hpet_events->event_handler(hpet_events);
764
0
    return 1;
765
35
}
766
767
static u32 *hpet_boot_cfg;
768
769
u64 __init hpet_setup(void)
770
1
{
771
1
    static u64 __initdata hpet_rate;
772
1
    u32 hpet_id, hpet_period;
773
1
    unsigned int last;
774
1
775
1
    if ( hpet_rate )
776
0
        return hpet_rate;
777
1
778
1
    if ( hpet_address == 0 )
779
0
        return 0;
780
1
781
1
    set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
782
1
783
1
    hpet_id = hpet_read32(HPET_ID);
784
1
    if ( (hpet_id & HPET_ID_REV) == 0 )
785
0
    {
786
0
        printk("BAD HPET revision id.\n");
787
0
        return 0;
788
0
    }
789
1
790
1
    /* Check for sane period (100ps <= period <= 100ns). */
791
1
    hpet_period = hpet_read32(HPET_PERIOD);
792
1
    if ( (hpet_period > 100000000) || (hpet_period < 100000) )
793
0
    {
794
0
        printk("BAD HPET period %u.\n", hpet_period);
795
0
        return 0;
796
0
    }
797
1
798
1
    last = (hpet_id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
799
1
    hpet_boot_cfg = xmalloc_array(u32, 2 + last);
800
1
    hpet_resume(hpet_boot_cfg);
801
1
802
1
    hpet_rate = 1000000000000000ULL; /* 10^15 */
803
1
    (void)do_div(hpet_rate, hpet_period);
804
1
805
1
    return hpet_rate;
806
1
}
807
808
void hpet_resume(u32 *boot_cfg)
809
1
{
810
1
    static u32 system_reset_latch;
811
1
    u32 hpet_id, cfg;
812
1
    unsigned int i, last;
813
1
814
1
    if ( system_reset_latch == system_reset_counter )
815
0
        return;
816
1
    system_reset_latch = system_reset_counter;
817
1
818
1
    cfg = hpet_read32(HPET_CFG);
819
1
    if ( boot_cfg )
820
1
        *boot_cfg = cfg;
821
1
    cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
822
1
    if ( cfg )
823
0
    {
824
0
        printk(XENLOG_WARNING
825
0
               "HPET: reserved bits %#x set in global config register\n",
826
0
               cfg);
827
0
        cfg = 0;
828
0
    }
829
1
    hpet_write32(cfg, HPET_CFG);
830
1
831
1
    hpet_id = hpet_read32(HPET_ID);
832
1
    last = (hpet_id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
833
9
    for ( i = 0; i <= last; ++i )
834
8
    {
835
8
        cfg = hpet_read32(HPET_Tn_CFG(i));
836
8
        if ( boot_cfg )
837
8
            boot_cfg[i + 1] = cfg;
838
8
        cfg &= ~HPET_TN_ENABLE;
839
8
        if ( cfg & HPET_TN_RESERVED )
840
0
        {
841
0
            printk(XENLOG_WARNING
842
0
                   "HPET: reserved bits %#x set in channel %u config register\n",
843
0
                   cfg & HPET_TN_RESERVED, i);
844
0
            cfg &= ~HPET_TN_RESERVED;
845
0
        }
846
8
        hpet_write32(cfg, HPET_Tn_CFG(i));
847
8
    }
848
1
849
1
    cfg = hpet_read32(HPET_CFG);
850
1
    cfg |= HPET_CFG_ENABLE;
851
1
    hpet_write32(cfg, HPET_CFG);
852
1
}
853
854
void hpet_disable(void)
855
0
{
856
0
    unsigned int i;
857
0
    u32 id;
858
0
859
0
    if ( !hpet_boot_cfg )
860
0
    {
861
0
        if ( hpet_broadcast_is_available() )
862
0
            hpet_disable_legacy_broadcast();
863
0
        return;
864
0
    }
865
0
866
0
    hpet_write32(*hpet_boot_cfg & ~HPET_CFG_ENABLE, HPET_CFG);
867
0
868
0
    id = hpet_read32(HPET_ID);
869
0
    for ( i = 0; i <= ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); ++i )
870
0
        hpet_write32(hpet_boot_cfg[i + 1], HPET_Tn_CFG(i));
871
0
872
0
    if ( *hpet_boot_cfg & HPET_CFG_ENABLE )
873
0
        hpet_write32(*hpet_boot_cfg, HPET_CFG);
874
0
}