Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/hvm/vpt.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * vpt.c: Virtual Platform Timer
3
 *
4
 * Copyright (c) 2006, Xiaowei Yang, Intel Corporation.
5
 *
6
 * This program is free software; you can redistribute it and/or modify it
7
 * under the terms and conditions of the GNU General Public License,
8
 * version 2, as published by the Free Software Foundation.
9
 *
10
 * This program is distributed in the hope it will be useful, but WITHOUT
11
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13
 * more details.
14
 *
15
 * You should have received a copy of the GNU General Public License along with
16
 * this program; If not, see <http://www.gnu.org/licenses/>.
17
 */
18
19
#include <xen/time.h>
20
#include <asm/hvm/support.h>
21
#include <asm/hvm/vpt.h>
22
#include <asm/event.h>
23
#include <asm/apic.h>
24
#include <asm/mc146818rtc.h>
25
26
#define mode_is(d, name) \
27
4.77M
    ((d)->arch.hvm_domain.params[HVM_PARAM_TIMER_MODE] == HVMPTM_##name)
28
29
void hvm_init_guest_time(struct domain *d)
30
1
{
31
1
    struct pl_time *pl = d->arch.hvm_domain.pl_time;
32
1
33
1
    spin_lock_init(&pl->pl_time_lock);
34
1
    pl->stime_offset = -(u64)get_s_time();
35
1
    pl->last_guest_time = 0;
36
1
}
37
38
u64 hvm_get_guest_time_fixed(struct vcpu *v, u64 at_tsc)
39
0
{
40
0
    struct pl_time *pl = v->domain->arch.hvm_domain.pl_time;
41
0
    u64 now;
42
0
43
0
    /* Called from device models shared with PV guests. Be careful. */
44
0
    ASSERT(is_hvm_vcpu(v));
45
0
46
0
    spin_lock(&pl->pl_time_lock);
47
0
    now = get_s_time_fixed(at_tsc) + pl->stime_offset;
48
0
49
0
    if ( !at_tsc )
50
0
    {
51
0
        if ( (int64_t)(now - pl->last_guest_time) > 0 )
52
0
            pl->last_guest_time = now;
53
0
        else
54
0
            now = ++pl->last_guest_time;
55
0
    }
56
0
    spin_unlock(&pl->pl_time_lock);
57
0
58
0
    return now + v->arch.hvm_vcpu.stime_offset;
59
0
}
60
61
void hvm_set_guest_time(struct vcpu *v, u64 guest_time)
62
0
{
63
0
    u64 offset = guest_time - hvm_get_guest_time(v);
64
0
65
0
    if ( offset )
66
0
    {
67
0
        v->arch.hvm_vcpu.stime_offset += offset;
68
0
        /*
69
0
         * If hvm_vcpu.stime_offset is updated make sure to
70
0
         * also update vcpu time, since this value is used to
71
0
         * calculate the TSC.
72
0
         */
73
0
        if ( v == current )
74
0
            update_vcpu_system_time(v);
75
0
    }
76
0
}
77
78
static int pt_irq_vector(struct periodic_time *pt, enum hvm_intsrc src)
79
0
{
80
0
    struct vcpu *v = pt->vcpu;
81
0
    unsigned int gsi, isa_irq;
82
0
    int vector;
83
0
84
0
    if ( pt->source == PTSRC_lapic )
85
0
        return pt->irq;
86
0
87
0
    isa_irq = pt->irq;
88
0
    gsi = hvm_isa_irq_to_gsi(isa_irq);
89
0
90
0
    if ( src == hvm_intsrc_pic )
91
0
        return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base
92
0
                + (isa_irq & 7));
93
0
94
0
    ASSERT(src == hvm_intsrc_lapic);
95
0
    vector = vioapic_get_vector(v->domain, gsi);
96
0
    if ( vector < 0 )
97
0
    {
98
0
        dprintk(XENLOG_WARNING, "d%u: invalid GSI (%u) for platform timer\n",
99
0
                v->domain->domain_id, gsi);
100
0
        domain_crash(v->domain);
101
0
        return -1;
102
0
    }
103
0
104
0
    return vector;
105
0
}
106
107
static int pt_irq_masked(struct periodic_time *pt)
108
0
{
109
0
    struct vcpu *v = pt->vcpu;
110
0
    unsigned int gsi, isa_irq;
111
0
    int mask;
112
0
    uint8_t pic_imr;
113
0
114
0
    if ( pt->source == PTSRC_lapic )
115
0
    {
116
0
        struct vlapic *vlapic = vcpu_vlapic(v);
117
0
        return (!vlapic_enabled(vlapic) ||
118
0
                (vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_MASKED));
119
0
    }
120
0
121
0
    isa_irq = pt->irq;
122
0
    gsi = hvm_isa_irq_to_gsi(isa_irq);
123
0
    pic_imr = v->domain->arch.hvm_domain.vpic[isa_irq >> 3].imr;
124
0
    mask = vioapic_get_mask(v->domain, gsi);
125
0
    if ( mask < 0 )
126
0
    {
127
0
        dprintk(XENLOG_WARNING, "d%u: invalid GSI (%u) for platform timer\n",
128
0
                v->domain->domain_id, gsi);
129
0
        domain_crash(v->domain);
130
0
        return -1;
131
0
    }
132
0
133
0
    return (((pic_imr & (1 << (isa_irq & 7))) || !vlapic_accept_pic_intr(v)) &&
134
0
            mask);
135
0
}
136
137
static void pt_lock(struct periodic_time *pt)
138
0
{
139
0
    struct vcpu *v;
140
0
141
0
    for ( ; ; )
142
0
    {
143
0
        v = pt->vcpu;
144
0
        spin_lock(&v->arch.hvm_vcpu.tm_lock);
145
0
        if ( likely(pt->vcpu == v) )
146
0
            break;
147
0
        spin_unlock(&v->arch.hvm_vcpu.tm_lock);
148
0
    }
149
0
}
150
151
static void pt_unlock(struct periodic_time *pt)
152
0
{
153
0
    spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
154
0
}
155
156
static void pt_process_missed_ticks(struct periodic_time *pt)
157
0
{
158
0
    s_time_t missed_ticks, now = NOW();
159
0
160
0
    if ( pt->one_shot )
161
0
        return;
162
0
163
0
    missed_ticks = now - pt->scheduled;
164
0
    if ( missed_ticks <= 0 )
165
0
        return;
166
0
167
0
    missed_ticks = missed_ticks / (s_time_t) pt->period + 1;
168
0
    if ( mode_is(pt->vcpu->domain, no_missed_ticks_pending) )
169
0
        pt->do_not_freeze = !pt->pending_intr_nr;
170
0
    else
171
0
        pt->pending_intr_nr += missed_ticks;
172
0
    pt->scheduled += missed_ticks * pt->period;
173
0
}
174
175
static void pt_freeze_time(struct vcpu *v)
176
0
{
177
0
    if ( !mode_is(v->domain, delay_for_missed_ticks) )
178
0
        return;
179
0
180
0
    v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
181
0
}
182
183
static void pt_thaw_time(struct vcpu *v)
184
4.77M
{
185
4.77M
    if ( !mode_is(v->domain, delay_for_missed_ticks) )
186
0
        return;
187
4.77M
188
4.77M
    if ( v->arch.hvm_vcpu.guest_time == 0 )
189
4.77M
        return;
190
4.77M
191
18.4E
    hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
192
18.4E
    v->arch.hvm_vcpu.guest_time = 0;
193
18.4E
}
194
195
void pt_save_timer(struct vcpu *v)
196
0
{
197
0
    struct list_head *head = &v->arch.hvm_vcpu.tm_list;
198
0
    struct periodic_time *pt;
199
0
200
0
    if ( v->pause_flags & VPF_blocked )
201
0
        return;
202
0
203
0
    spin_lock(&v->arch.hvm_vcpu.tm_lock);
204
0
205
0
    list_for_each_entry ( pt, head, list )
206
0
        if ( !pt->do_not_freeze )
207
0
            stop_timer(&pt->timer);
208
0
209
0
    pt_freeze_time(v);
210
0
211
0
    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
212
0
}
213
214
void pt_restore_timer(struct vcpu *v)
215
4.57M
{
216
4.57M
    struct list_head *head = &v->arch.hvm_vcpu.tm_list;
217
4.57M
    struct periodic_time *pt;
218
4.57M
219
4.57M
    spin_lock(&v->arch.hvm_vcpu.tm_lock);
220
4.57M
221
4.57M
    list_for_each_entry ( pt, head, list )
222
0
    {
223
0
        if ( pt->pending_intr_nr == 0 )
224
0
        {
225
0
            pt_process_missed_ticks(pt);
226
0
            set_timer(&pt->timer, pt->scheduled);
227
0
        }
228
0
    }
229
4.57M
230
4.57M
    pt_thaw_time(v);
231
4.57M
232
4.57M
    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
233
4.57M
}
234
235
static void pt_timer_fn(void *data)
236
0
{
237
0
    struct periodic_time *pt = data;
238
0
239
0
    pt_lock(pt);
240
0
241
0
    pt->pending_intr_nr++;
242
0
    pt->scheduled += pt->period;
243
0
    pt->do_not_freeze = 0;
244
0
245
0
    vcpu_kick(pt->vcpu);
246
0
247
0
    pt_unlock(pt);
248
0
}
249
250
int pt_update_irq(struct vcpu *v)
251
9.88M
{
252
9.88M
    struct list_head *head = &v->arch.hvm_vcpu.tm_list;
253
9.88M
    struct periodic_time *pt, *temp, *earliest_pt;
254
9.88M
    uint64_t max_lag;
255
9.88M
    int irq, is_lapic;
256
9.88M
257
9.88M
    spin_lock(&v->arch.hvm_vcpu.tm_lock);
258
9.88M
259
9.88M
    earliest_pt = NULL;
260
9.88M
    max_lag = -1ULL;
261
9.88M
    list_for_each_entry_safe ( pt, temp, head, list )
262
0
    {
263
0
        if ( pt->pending_intr_nr )
264
0
        {
265
0
            /* RTC code takes care of disabling the timer itself. */
266
0
            if ( (pt->irq != RTC_IRQ || !pt->priv) && pt_irq_masked(pt) )
267
0
            {
268
0
                /* suspend timer emulation */
269
0
                list_del(&pt->list);
270
0
                pt->on_list = 0;
271
0
            }
272
0
            else
273
0
            {
274
0
                if ( (pt->last_plt_gtime + pt->period) < max_lag )
275
0
                {
276
0
                    max_lag = pt->last_plt_gtime + pt->period;
277
0
                    earliest_pt = pt;
278
0
                }
279
0
            }
280
0
        }
281
0
    }
282
9.88M
283
9.88M
    if ( earliest_pt == NULL )
284
9.93M
    {
285
9.93M
        spin_unlock(&v->arch.hvm_vcpu.tm_lock);
286
9.93M
        return -1;
287
9.93M
    }
288
9.88M
289
18.4E
    earliest_pt->irq_issued = 1;
290
18.4E
    irq = earliest_pt->irq;
291
18.4E
    is_lapic = (earliest_pt->source == PTSRC_lapic);
292
18.4E
293
18.4E
    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
294
18.4E
295
18.4E
    if ( is_lapic )
296
0
        vlapic_set_irq(vcpu_vlapic(v), irq, 0);
297
18.4E
    else
298
18.4E
    {
299
18.4E
        hvm_isa_irq_deassert(v->domain, irq);
300
18.4E
        hvm_isa_irq_assert(v->domain, irq);
301
18.4E
    }
302
18.4E
303
18.4E
    /*
304
18.4E
     * If periodic timer interrut is handled by lapic, its vector in
305
18.4E
     * IRR is returned and used to set eoi_exit_bitmap for virtual
306
18.4E
     * interrupt delivery case. Otherwise return -1 to do nothing.  
307
18.4E
     */ 
308
18.4E
    if ( !is_lapic &&
309
0
         platform_legacy_irq(irq) && vlapic_accept_pic_intr(v) &&
310
0
         (&v->domain->arch.hvm_domain)->vpic[irq >> 3].int_output )
311
0
        return -1;
312
18.4E
    else 
313
18.4E
        return pt_irq_vector(earliest_pt, hvm_intsrc_lapic);
314
18.4E
}
315
316
static struct periodic_time *is_pt_irq(
317
    struct vcpu *v, struct hvm_intack intack)
318
4.20k
{
319
4.20k
    struct list_head *head = &v->arch.hvm_vcpu.tm_list;
320
4.20k
    struct periodic_time *pt;
321
4.20k
322
4.20k
    list_for_each_entry ( pt, head, list )
323
0
    {
324
0
        if ( pt->pending_intr_nr && pt->irq_issued &&
325
0
             (intack.vector == pt_irq_vector(pt, intack.source)) )
326
0
            return pt;
327
0
    }
328
4.20k
329
4.20k
    return NULL;
330
4.20k
}
331
332
void pt_intr_post(struct vcpu *v, struct hvm_intack intack)
333
101k
{
334
101k
    struct periodic_time *pt;
335
101k
    time_cb *cb;
336
101k
    void *cb_priv;
337
101k
338
101k
    if ( intack.source == hvm_intsrc_vector )
339
97.8k
        return;
340
101k
341
4.09k
    spin_lock(&v->arch.hvm_vcpu.tm_lock);
342
4.09k
343
4.09k
    pt = is_pt_irq(v, intack);
344
4.09k
    if ( pt == NULL )
345
4.21k
    {
346
4.21k
        spin_unlock(&v->arch.hvm_vcpu.tm_lock);
347
4.21k
        return;
348
4.21k
    }
349
4.09k
350
18.4E
    pt->irq_issued = 0;
351
18.4E
352
18.4E
    if ( pt->one_shot )
353
0
    {
354
0
        if ( pt->on_list )
355
0
            list_del(&pt->list);
356
0
        pt->on_list = 0;
357
0
        pt->pending_intr_nr = 0;
358
0
    }
359
18.4E
    else if ( mode_is(v->domain, one_missed_tick_pending) ||
360
0
              mode_is(v->domain, no_missed_ticks_pending) )
361
0
    {
362
0
        pt->last_plt_gtime = hvm_get_guest_time(v);
363
0
        pt_process_missed_ticks(pt);
364
0
        pt->pending_intr_nr = 0; /* 'collapse' all missed ticks */
365
0
        set_timer(&pt->timer, pt->scheduled);
366
0
    }
367
18.4E
    else
368
18.4E
    {
369
18.4E
        pt->last_plt_gtime += pt->period;
370
18.4E
        if ( --pt->pending_intr_nr == 0 )
371
0
        {
372
0
            pt_process_missed_ticks(pt);
373
0
            if ( pt->pending_intr_nr == 0 )
374
0
                set_timer(&pt->timer, pt->scheduled);
375
0
        }
376
18.4E
    }
377
18.4E
378
18.4E
    if ( mode_is(v->domain, delay_for_missed_ticks) &&
379
0
         (hvm_get_guest_time(v) < pt->last_plt_gtime) )
380
0
        hvm_set_guest_time(v, pt->last_plt_gtime);
381
18.4E
382
18.4E
    cb = pt->cb;
383
18.4E
    cb_priv = pt->priv;
384
18.4E
385
18.4E
    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
386
18.4E
387
18.4E
    if ( cb != NULL )
388
0
        cb(v, cb_priv);
389
18.4E
}
390
391
void pt_migrate(struct vcpu *v)
392
548
{
393
548
    struct list_head *head = &v->arch.hvm_vcpu.tm_list;
394
548
    struct periodic_time *pt;
395
548
396
548
    spin_lock(&v->arch.hvm_vcpu.tm_lock);
397
548
398
548
    list_for_each_entry ( pt, head, list )
399
0
        migrate_timer(&pt->timer, v->processor);
400
548
401
548
    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
402
548
}
403
404
void create_periodic_time(
405
    struct vcpu *v, struct periodic_time *pt, uint64_t delta,
406
    uint64_t period, uint8_t irq, time_cb *cb, void *data)
407
0
{
408
0
    ASSERT(pt->source != 0);
409
0
410
0
    destroy_periodic_time(pt);
411
0
412
0
    spin_lock(&v->arch.hvm_vcpu.tm_lock);
413
0
414
0
    pt->pending_intr_nr = 0;
415
0
    pt->do_not_freeze = 0;
416
0
    pt->irq_issued = 0;
417
0
418
0
    /* Periodic timer must be at least 0.1ms. */
419
0
    if ( (period < 100000) && period )
420
0
    {
421
0
        if ( !test_and_set_bool(pt->warned_timeout_too_short) )
422
0
            gdprintk(XENLOG_WARNING, "HVM_PlatformTime: program too "
423
0
                     "small period %"PRIu64"\n", period);
424
0
        period = 100000;
425
0
    }
426
0
427
0
    pt->period = period;
428
0
    pt->vcpu = v;
429
0
    pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
430
0
    pt->irq = irq;
431
0
    pt->one_shot = !period;
432
0
    pt->scheduled = NOW() + delta;
433
0
434
0
    if ( !pt->one_shot )
435
0
    {
436
0
        if ( v->domain->arch.hvm_domain.params[HVM_PARAM_VPT_ALIGN] )
437
0
        {
438
0
            pt->scheduled = align_timer(pt->scheduled, pt->period);
439
0
        }
440
0
        else if ( pt->source == PTSRC_lapic )
441
0
        {
442
0
            /*
443
0
             * Offset LAPIC ticks from other timer ticks. Otherwise guests
444
0
             * which use LAPIC ticks for process accounting can see long
445
0
             * sequences of process ticks incorrectly accounted to interrupt
446
0
             * processing (seen with RHEL3 guest).
447
0
             */
448
0
            pt->scheduled += delta >> 1;
449
0
        }
450
0
    }
451
0
452
0
    pt->cb = cb;
453
0
    pt->priv = data;
454
0
455
0
    pt->on_list = 1;
456
0
    list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
457
0
458
0
    init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
459
0
    set_timer(&pt->timer, pt->scheduled);
460
0
461
0
    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
462
0
}
463
464
void destroy_periodic_time(struct periodic_time *pt)
465
24
{
466
24
    /* Was this structure previously initialised by create_periodic_time()? */
467
24
    if ( pt->vcpu == NULL )
468
24
        return;
469
24
470
0
    pt_lock(pt);
471
0
    if ( pt->on_list )
472
0
        list_del(&pt->list);
473
0
    pt->on_list = 0;
474
0
    pt->pending_intr_nr = 0;
475
0
    pt_unlock(pt);
476
0
477
0
    /*
478
0
     * pt_timer_fn() can run until this kill_timer() returns. We must do this
479
0
     * outside pt_lock() otherwise we can deadlock with pt_timer_fn().
480
0
     */
481
0
    kill_timer(&pt->timer);
482
0
}
483
484
static void pt_adjust_vcpu(struct periodic_time *pt, struct vcpu *v)
485
0
{
486
0
    int on_list;
487
0
488
0
    ASSERT(pt->source == PTSRC_isa);
489
0
490
0
    if ( pt->vcpu == NULL )
491
0
        return;
492
0
493
0
    pt_lock(pt);
494
0
    on_list = pt->on_list;
495
0
    if ( pt->on_list )
496
0
        list_del(&pt->list);
497
0
    pt->on_list = 0;
498
0
    pt_unlock(pt);
499
0
500
0
    spin_lock(&v->arch.hvm_vcpu.tm_lock);
501
0
    pt->vcpu = v;
502
0
    if ( on_list )
503
0
    {
504
0
        pt->on_list = 1;
505
0
        list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
506
0
507
0
        migrate_timer(&pt->timer, v->processor);
508
0
    }
509
0
    spin_unlock(&v->arch.hvm_vcpu.tm_lock);
510
0
}
511
512
void pt_adjust_global_vcpu_target(struct vcpu *v)
513
0
{
514
0
    struct PITState *vpit;
515
0
    struct pl_time *pl_time;
516
0
    int i;
517
0
518
0
    if ( !v || !has_vpit(v->domain) )
519
0
        return;
520
0
521
0
    vpit = &v->domain->arch.vpit;
522
0
523
0
    spin_lock(&vpit->lock);
524
0
    pt_adjust_vcpu(&vpit->pt0, v);
525
0
    spin_unlock(&vpit->lock);
526
0
527
0
    pl_time = v->domain->arch.hvm_domain.pl_time;
528
0
529
0
    spin_lock(&pl_time->vrtc.lock);
530
0
    pt_adjust_vcpu(&pl_time->vrtc.pt, v);
531
0
    spin_unlock(&pl_time->vrtc.lock);
532
0
533
0
    write_lock(&pl_time->vhpet.lock);
534
0
    for ( i = 0; i < HPET_TIMER_NUM; i++ )
535
0
        pt_adjust_vcpu(&pl_time->vhpet.pt[i], v);
536
0
    write_unlock(&pl_time->vhpet.lock);
537
0
}
538
539
540
static void pt_resume(struct periodic_time *pt)
541
1.65k
{
542
1.65k
    if ( pt->vcpu == NULL )
543
1.65k
        return;
544
1.65k
545
0
    pt_lock(pt);
546
0
    if ( pt->pending_intr_nr && !pt->on_list )
547
0
    {
548
0
        pt->on_list = 1;
549
0
        list_add(&pt->list, &pt->vcpu->arch.hvm_vcpu.tm_list);
550
0
        vcpu_kick(pt->vcpu);
551
0
    }
552
0
    pt_unlock(pt);
553
0
}
554
555
void pt_may_unmask_irq(struct domain *d, struct periodic_time *vlapic_pt)
556
329
{
557
329
    int i;
558
329
559
329
    if ( d )
560
329
    {
561
329
        pt_resume(&d->arch.vpit.pt0);
562
329
        pt_resume(&d->arch.hvm_domain.pl_time->vrtc.pt);
563
1.31k
        for ( i = 0; i < HPET_TIMER_NUM; i++ )
564
987
            pt_resume(&d->arch.hvm_domain.pl_time->vhpet.pt[i]);
565
329
    }
566
329
567
329
    if ( vlapic_pt )
568
13
        pt_resume(vlapic_pt);
569
329
}