Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/hvm/pmtimer.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * hvm/pmtimer.c: emulation of the ACPI PM timer 
3
 *
4
 * Copyright (c) 2007, XenSource inc.
5
 * Copyright (c) 2006, Intel Corporation.
6
 *
7
 * This program is free software; you can redistribute it and/or modify it
8
 * under the terms and conditions of the GNU General Public License,
9
 * version 2, as published by the Free Software Foundation.
10
 *
11
 * This program is distributed in the hope it will be useful, but WITHOUT
12
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14
 * more details.
15
 *
16
 * You should have received a copy of the GNU General Public License along with
17
 * this program; If not, see <http://www.gnu.org/licenses/>.
18
 */
19
20
#include <asm/hvm/vpt.h>
21
#include <asm/hvm/io.h>
22
#include <asm/hvm/support.h>
23
#include <asm/acpi.h> /* for hvm_acpi_power_button prototype */
24
#include <public/hvm/params.h>
25
26
/* Slightly more readable port I/O addresses for the registers we intercept */
27
0
#define PM1a_STS_ADDR_V0 (ACPI_PM1A_EVT_BLK_ADDRESS_V0)
28
#define PM1a_EN_ADDR_V0  (ACPI_PM1A_EVT_BLK_ADDRESS_V0 + 2)
29
0
#define TMR_VAL_ADDR_V0  (ACPI_PM_TMR_BLK_ADDRESS_V0)
30
0
#define PM1a_STS_ADDR_V1 (ACPI_PM1A_EVT_BLK_ADDRESS_V1)
31
#define PM1a_EN_ADDR_V1  (ACPI_PM1A_EVT_BLK_ADDRESS_V1 + 2)
32
0
#define TMR_VAL_ADDR_V1  (ACPI_PM_TMR_BLK_ADDRESS_V1)
33
34
/* The interesting bits of the PM1a_STS register */
35
0
#define TMR_STS    (1 << 0)
36
0
#define GBL_STS    (1 << 5)
37
0
#define PWRBTN_STS (1 << 8)
38
0
#define SLPBTN_STS (1 << 9)
39
40
/* The same in PM1a_EN */
41
#define TMR_EN     (1 << 0)
42
#define GBL_EN     (1 << 5)
43
#define PWRBTN_EN  (1 << 8)
44
#define SLPBTN_EN  (1 << 9)
45
46
/* Mask of bits in PM1a_STS that can generate an SCI. */
47
0
#define SCI_MASK (TMR_STS|PWRBTN_STS|SLPBTN_STS|GBL_STS) 
48
49
/* SCI IRQ number (must match SCI_INT number in ACPI FADT in hvmloader) */
50
0
#define SCI_IRQ 9
51
52
/* We provide a 32-bit counter (must match the TMR_VAL_EXT bit in the FADT) */
53
0
#define TMR_VAL_MASK  (0xffffffff)
54
0
#define TMR_VAL_MSB   (0x80000000)
55
56
/* Dispatch SCIs based on the PM1a_STS and PM1a_EN registers */
57
static void pmt_update_sci(PMTState *s)
58
0
{
59
0
    struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm_domain.acpi;
60
0
61
0
    ASSERT(spin_is_locked(&s->lock));
62
0
63
0
    if ( acpi->pm1a_en & acpi->pm1a_sts & SCI_MASK )
64
0
        hvm_isa_irq_assert(s->vcpu->domain, SCI_IRQ);
65
0
    else
66
0
        hvm_isa_irq_deassert(s->vcpu->domain, SCI_IRQ);
67
0
}
68
69
void hvm_acpi_power_button(struct domain *d)
70
0
{
71
0
    PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
72
0
73
0
    if ( !has_vpm(d) )
74
0
        return;
75
0
76
0
    spin_lock(&s->lock);
77
0
    d->arch.hvm_domain.acpi.pm1a_sts |= PWRBTN_STS;
78
0
    pmt_update_sci(s);
79
0
    spin_unlock(&s->lock);
80
0
}
81
82
void hvm_acpi_sleep_button(struct domain *d)
83
0
{
84
0
    PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
85
0
86
0
    if ( !has_vpm(d) )
87
0
        return;
88
0
89
0
    spin_lock(&s->lock);
90
0
    d->arch.hvm_domain.acpi.pm1a_sts |= PWRBTN_STS;
91
0
    pmt_update_sci(s);
92
0
    spin_unlock(&s->lock);
93
0
}
94
95
/* Set the correct value in the timer, accounting for time elapsed
96
 * since the last time we did that. */
97
static void pmt_update_time(PMTState *s)
98
0
{
99
0
    uint64_t curr_gtime, tmp;
100
0
    struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm_domain.acpi;
101
0
    uint32_t tmr_val = acpi->tmr_val, msb = tmr_val & TMR_VAL_MSB;
102
0
    
103
0
    ASSERT(spin_is_locked(&s->lock));
104
0
105
0
    /* Update the timer */
106
0
    curr_gtime = hvm_get_guest_time(s->vcpu);
107
0
    tmp = ((curr_gtime - s->last_gtime) * s->scale) + s->not_accounted;
108
0
    s->not_accounted = (uint32_t)tmp;
109
0
    tmr_val += tmp >> 32;
110
0
    tmr_val &= TMR_VAL_MASK;
111
0
    s->last_gtime = curr_gtime;
112
0
113
0
    /* Update timer value atomically wrt lock-free reads in handle_pmt_io(). */
114
0
    write_atomic(&acpi->tmr_val, tmr_val);
115
0
116
0
    /* If the counter's MSB has changed, set the status bit */
117
0
    if ( (tmr_val & TMR_VAL_MSB) != msb )
118
0
    {
119
0
        acpi->pm1a_sts |= TMR_STS;
120
0
        pmt_update_sci(s);
121
0
    }
122
0
}
123
124
/* This function should be called soon after each time the MSB of the
125
 * pmtimer register rolls over, to make sure we update the status
126
 * registers and SCI at least once per rollover */
127
static void pmt_timer_callback(void *opaque)
128
0
{
129
0
    PMTState *s = opaque;
130
0
    uint32_t pmt_cycles_until_flip;
131
0
    uint64_t time_until_flip;
132
0
133
0
    spin_lock(&s->lock);
134
0
135
0
    /* Recalculate the timer and make sure we get an SCI if we need one */
136
0
    pmt_update_time(s);
137
0
138
0
    /* How close are we to the next MSB flip? */
139
0
    pmt_cycles_until_flip = TMR_VAL_MSB -
140
0
        (s->vcpu->domain->arch.hvm_domain.acpi.tmr_val & (TMR_VAL_MSB - 1));
141
0
142
0
    /* Overall time between MSB flips */
143
0
    time_until_flip = (1000000000ULL << 23) / FREQUENCE_PMTIMER;
144
0
145
0
    /* Reduced appropriately */
146
0
    time_until_flip = (time_until_flip * pmt_cycles_until_flip) >> 23;
147
0
148
0
    /* Wake up again near the next bit-flip */
149
0
    set_timer(&s->timer, NOW() + time_until_flip + MILLISECS(1));
150
0
151
0
    spin_unlock(&s->lock);
152
0
}
153
154
/* Handle port I/O to the PM1a_STS and PM1a_EN registers */
155
static int handle_evt_io(
156
    int dir, unsigned int port, unsigned int bytes, uint32_t *val)
157
0
{
158
0
    struct vcpu *v = current;
159
0
    struct hvm_hw_acpi *acpi = &v->domain->arch.hvm_domain.acpi;
160
0
    PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
161
0
    uint32_t addr, data, byte;
162
0
    int i;
163
0
164
0
    addr = port -
165
0
        ((v->domain->arch.hvm_domain.params[
166
0
            HVM_PARAM_ACPI_IOPORTS_LOCATION] == 0) ?
167
0
         PM1a_STS_ADDR_V0 : PM1a_STS_ADDR_V1);
168
0
169
0
    spin_lock(&s->lock);
170
0
171
0
    if ( dir == IOREQ_WRITE )
172
0
    {
173
0
        /* Handle this I/O one byte at a time */
174
0
        for ( i = bytes, data = *val;
175
0
              i > 0;
176
0
              i--, addr++, data >>= 8 )
177
0
        {
178
0
            byte = data & 0xff;
179
0
            switch ( addr )
180
0
            {
181
0
                /* PM1a_STS register bits are write-to-clear */
182
0
            case 0 /* PM1a_STS_ADDR */:
183
0
                acpi->pm1a_sts &= ~byte;
184
0
                break;
185
0
            case 1 /* PM1a_STS_ADDR + 1 */:
186
0
                acpi->pm1a_sts &= ~(byte << 8);
187
0
                break;
188
0
            case 2 /* PM1a_EN_ADDR */:
189
0
                acpi->pm1a_en = (acpi->pm1a_en & 0xff00) | byte;
190
0
                break;
191
0
            case 3 /* PM1a_EN_ADDR + 1 */:
192
0
                acpi->pm1a_en = (acpi->pm1a_en & 0xff) | (byte << 8);
193
0
                break;
194
0
            default:
195
0
                gdprintk(XENLOG_WARNING, 
196
0
                         "Bad ACPI PM register write: %x bytes (%x) at %x\n", 
197
0
                         bytes, *val, port);
198
0
            }
199
0
        }
200
0
        /* Fix up the SCI state to match the new register state */
201
0
        pmt_update_sci(s);
202
0
    }
203
0
    else /* p->dir == IOREQ_READ */
204
0
    {
205
0
        data = acpi->pm1a_sts | ((uint32_t)acpi->pm1a_en << 16);
206
0
        data >>= 8 * addr;
207
0
        if ( bytes == 1 ) data &= 0xff;
208
0
        else if ( bytes == 2 ) data &= 0xffff;
209
0
        *val = data;
210
0
    }
211
0
212
0
    spin_unlock(&s->lock);
213
0
214
0
    return X86EMUL_OKAY;
215
0
}
216
217
218
/* Handle port I/O to the TMR_VAL register */
219
static int handle_pmt_io(
220
    int dir, unsigned int port, unsigned int bytes, uint32_t *val)
221
0
{
222
0
    struct vcpu *v = current;
223
0
    struct hvm_hw_acpi *acpi = &v->domain->arch.hvm_domain.acpi;
224
0
    PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
225
0
226
0
    if ( bytes != 4 || dir != IOREQ_READ )
227
0
    {
228
0
        gdprintk(XENLOG_WARNING, "HVM_PMT bad access\n");
229
0
        *val = ~0;
230
0
    }
231
0
    else if ( spin_trylock(&s->lock) )
232
0
    {
233
0
        /* We hold the lock: update timer value and return it. */
234
0
        pmt_update_time(s);
235
0
        *val = acpi->tmr_val;
236
0
        spin_unlock(&s->lock);
237
0
    }
238
0
    else
239
0
    {
240
0
        /*
241
0
         * Someone else is updating the timer: rather than do the work
242
0
         * again ourselves, wait for them to finish and then steal their
243
0
         * updated value with a lock-free atomic read.
244
0
         */
245
0
        spin_barrier(&s->lock);
246
0
        *val = read_atomic(&acpi->tmr_val);
247
0
    }
248
0
249
0
    return X86EMUL_OKAY;
250
0
}
251
252
static int acpi_save(struct domain *d, hvm_domain_context_t *h)
253
0
{
254
0
    struct hvm_hw_acpi *acpi = &d->arch.hvm_domain.acpi;
255
0
    PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
256
0
    uint32_t x, msb = acpi->tmr_val & TMR_VAL_MSB;
257
0
    int rc;
258
0
259
0
    if ( !has_vpm(d) )
260
0
        return 0;
261
0
262
0
    spin_lock(&s->lock);
263
0
264
0
    /*
265
0
     * Update the counter to the guest's current time.  Make sure it only
266
0
     * goes forwards.
267
0
     */
268
0
    x = (((s->vcpu->arch.hvm_vcpu.guest_time ?: hvm_get_guest_time(s->vcpu)) -
269
0
          s->last_gtime) * s->scale) >> 32;
270
0
    if ( x < 1UL<<31 )
271
0
        acpi->tmr_val += x;
272
0
    if ( (acpi->tmr_val & TMR_VAL_MSB) != msb )
273
0
        acpi->pm1a_sts |= TMR_STS;
274
0
    /* No point in setting the SCI here because we'll already have saved the 
275
0
     * IRQ and *PIC state; we'll fix it up when we restore the domain */
276
0
    rc = hvm_save_entry(PMTIMER, 0, h, acpi);
277
0
278
0
    spin_unlock(&s->lock);
279
0
280
0
    return rc;
281
0
}
282
283
static int acpi_load(struct domain *d, hvm_domain_context_t *h)
284
0
{
285
0
    struct hvm_hw_acpi *acpi = &d->arch.hvm_domain.acpi;
286
0
    PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
287
0
288
0
    if ( !has_vpm(d) )
289
0
        return -ENODEV;
290
0
291
0
    spin_lock(&s->lock);
292
0
293
0
    /* Reload the registers */
294
0
    if ( hvm_load_entry(PMTIMER, h, acpi) )
295
0
    {
296
0
        spin_unlock(&s->lock);
297
0
        return -EINVAL;
298
0
    }
299
0
300
0
    /* Calculate future counter values from now. */
301
0
    s->last_gtime = hvm_get_guest_time(s->vcpu);
302
0
    s->not_accounted = 0;
303
0
304
0
    /* Set the SCI state from the registers */ 
305
0
    pmt_update_sci(s);
306
0
307
0
    spin_unlock(&s->lock);
308
0
    
309
0
    return 0;
310
0
}
311
312
HVM_REGISTER_SAVE_RESTORE(PMTIMER, acpi_save, acpi_load,
313
                          1, HVMSR_PER_DOM);
314
315
int pmtimer_change_ioport(struct domain *d, unsigned int version)
316
0
{
317
0
    unsigned int old_version;
318
0
319
0
    if ( !has_vpm(d) )
320
0
        return -ENODEV;
321
0
322
0
    /* Check that version is changing. */
323
0
    old_version = d->arch.hvm_domain.params[HVM_PARAM_ACPI_IOPORTS_LOCATION];
324
0
    if ( version == old_version )
325
0
        return 0;
326
0
327
0
    /* Only allow changes between versions 0 and 1. */
328
0
    if ( (version ^ old_version) != 1 )
329
0
        return -EINVAL;
330
0
331
0
    if ( version == 1 )
332
0
    {
333
0
        /* Moving from version 0 to version 1. */
334
0
        relocate_portio_handler(d, TMR_VAL_ADDR_V0, TMR_VAL_ADDR_V1, 4);
335
0
        relocate_portio_handler(d, PM1a_STS_ADDR_V0, PM1a_STS_ADDR_V1, 4);
336
0
    }
337
0
    else
338
0
    {
339
0
        /* Moving from version 1 to version 0. */
340
0
        relocate_portio_handler(d, TMR_VAL_ADDR_V1, TMR_VAL_ADDR_V0, 4);
341
0
        relocate_portio_handler(d, PM1a_STS_ADDR_V1, PM1a_STS_ADDR_V0, 4);
342
0
    }
343
0
344
0
    return 0;
345
0
}
346
347
void pmtimer_init(struct vcpu *v)
348
1
{
349
1
    PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
350
1
351
1
    if ( !has_vpm(v->domain) )
352
1
        return;
353
1
354
0
    spin_lock_init(&s->lock);
355
0
356
0
    s->scale = ((uint64_t)FREQUENCE_PMTIMER << 32) / SYSTEM_TIME_HZ;
357
0
    s->not_accounted = 0;
358
0
    s->vcpu = v;
359
0
360
0
    /* Intercept port I/O (need two handlers because PM1a_CNT is between
361
0
     * PM1a_EN and TMR_VAL and is handled by qemu) */
362
0
    register_portio_handler(v->domain, TMR_VAL_ADDR_V0, 4, handle_pmt_io);
363
0
    register_portio_handler(v->domain, PM1a_STS_ADDR_V0, 4, handle_evt_io);
364
0
365
0
    /* Set up callback to fire SCIs when the MSB of TMR_VAL changes */
366
0
    init_timer(&s->timer, pmt_timer_callback, s, v->processor);
367
0
    pmt_timer_callback(s);
368
0
}
369
370
371
void pmtimer_deinit(struct domain *d)
372
0
{
373
0
    PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
374
0
375
0
    if ( !has_vpm(d) )
376
0
        return;
377
0
378
0
    kill_timer(&s->timer);
379
0
}
380
381
void pmtimer_reset(struct domain *d)
382
0
{
383
0
    if ( !has_vpm(d) )
384
0
        return;
385
0
386
0
    /* Reset the counter. */
387
0
    d->arch.hvm_domain.acpi.tmr_val = 0;
388
0
}
389
390
/*
391
 * Local variables:
392
 * mode: C
393
 * c-file-style: "BSD"
394
 * c-basic-offset: 4
395
 * tab-width: 4
396
 * indent-tabs-mode: nil
397
 * End:
398
 */