/root/src/xen/xen/arch/x86/hvm/vlapic.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * vlapic.c: virtualize LAPIC for HVM vcpus. |
3 | | * |
4 | | * Copyright (c) 2004, Intel Corporation. |
5 | | * Copyright (c) 2006 Keir Fraser, XenSource Inc. |
6 | | * |
7 | | * This program is free software; you can redistribute it and/or modify it |
8 | | * under the terms and conditions of the GNU General Public License, |
9 | | * version 2, as published by the Free Software Foundation. |
10 | | * |
11 | | * This program is distributed in the hope it will be useful, but WITHOUT |
12 | | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
14 | | * more details. |
15 | | * |
16 | | * You should have received a copy of the GNU General Public License along with |
17 | | * this program; If not, see <http://www.gnu.org/licenses/>. |
18 | | */ |
19 | | |
20 | | #include <xen/types.h> |
21 | | #include <xen/mm.h> |
22 | | #include <xen/xmalloc.h> |
23 | | #include <xen/domain.h> |
24 | | #include <xen/domain_page.h> |
25 | | #include <xen/event.h> |
26 | | #include <xen/trace.h> |
27 | | #include <xen/lib.h> |
28 | | #include <xen/sched.h> |
29 | | #include <xen/numa.h> |
30 | | #include <asm/current.h> |
31 | | #include <asm/page.h> |
32 | | #include <asm/apic.h> |
33 | | #include <asm/io_apic.h> |
34 | | #include <asm/vpmu.h> |
35 | | #include <asm/hvm/hvm.h> |
36 | | #include <asm/hvm/io.h> |
37 | | #include <asm/hvm/support.h> |
38 | | #include <asm/hvm/vmx/vmx.h> |
39 | | #include <asm/hvm/nestedhvm.h> |
40 | | #include <asm/hvm/viridian.h> |
41 | | #include <public/hvm/ioreq.h> |
42 | | #include <public/hvm/params.h> |
43 | | |
44 | 25 | #define VLAPIC_VERSION 0x00050014 |
45 | 84 | #define VLAPIC_LVT_NUM 6 |
46 | | |
47 | | #define LVT_MASK \ |
48 | 60 | (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK) |
49 | | |
50 | | #define LINT_MASK \ |
51 | 24 | (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY |\ |
52 | 24 | APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER) |
53 | | |
54 | | static const unsigned int vlapic_lvt_mask[VLAPIC_LVT_NUM] = |
55 | | { |
56 | | /* LVTT */ |
57 | | LVT_MASK | APIC_TIMER_MODE_MASK, |
58 | | /* LVTTHMR */ |
59 | | LVT_MASK | APIC_MODE_MASK, |
60 | | /* LVTPC */ |
61 | | LVT_MASK | APIC_MODE_MASK, |
62 | | /* LVT0-1 */ |
63 | | LINT_MASK, LINT_MASK, |
64 | | /* LVTERR */ |
65 | | LVT_MASK |
66 | | }; |
67 | | |
68 | | #define vlapic_lvt_vector(vlapic, lvt_type) \ |
69 | | (vlapic_get_reg(vlapic, lvt_type) & APIC_VECTOR_MASK) |
70 | | |
71 | | #define vlapic_lvt_dm(vlapic, lvt_type) \ |
72 | | (vlapic_get_reg(vlapic, lvt_type) & APIC_MODE_MASK) |
73 | | |
74 | | #define vlapic_lvtt_period(vlapic) \ |
75 | 0 | ((vlapic_get_reg(vlapic, APIC_LVTT) & APIC_TIMER_MODE_MASK) \ |
76 | 0 | == APIC_TIMER_MODE_PERIODIC) |
77 | | |
78 | | #define vlapic_lvtt_oneshot(vlapic) \ |
79 | 0 | ((vlapic_get_reg(vlapic, APIC_LVTT) & APIC_TIMER_MODE_MASK) \ |
80 | 0 | == APIC_TIMER_MODE_ONESHOT) |
81 | | |
82 | | #define vlapic_lvtt_tdt(vlapic) \ |
83 | 12 | ((vlapic_get_reg(vlapic, APIC_LVTT) & APIC_TIMER_MODE_MASK) \ |
84 | 12 | == APIC_TIMER_MODE_TSC_DEADLINE) |
85 | | |
86 | | static void vlapic_do_init(struct vlapic *vlapic); |
87 | | |
88 | | static int vlapic_find_highest_vector(const void *bitmap) |
89 | 7.40M | { |
90 | 7.40M | const uint32_t *word = bitmap; |
91 | 7.40M | unsigned int word_offset = NR_VECTORS / 32; |
92 | 7.40M | |
93 | 7.40M | /* Work backwards through the bitmap (first 32-bit word in every four). */ |
94 | 66.5M | while ( (word_offset != 0) && (word[(--word_offset)*4] == 0) ) |
95 | 59.1M | continue; |
96 | 7.40M | |
97 | 7.40M | return (fls(word[word_offset*4]) - 1) + (word_offset * 32); |
98 | 7.40M | } |
99 | | |
100 | | /* |
101 | | * IRR-specific bitmap update & search routines. |
102 | | */ |
103 | | |
104 | | static int vlapic_test_and_set_irr(int vector, struct vlapic *vlapic) |
105 | 0 | { |
106 | 0 | return vlapic_test_and_set_vector(vector, &vlapic->regs->data[APIC_IRR]); |
107 | 0 | } |
108 | | |
109 | | static void vlapic_clear_irr(int vector, struct vlapic *vlapic) |
110 | 0 | { |
111 | 0 | vlapic_clear_vector(vector, &vlapic->regs->data[APIC_IRR]); |
112 | 0 | } |
113 | | |
114 | | static int vlapic_find_highest_irr(struct vlapic *vlapic) |
115 | 7.09M | { |
116 | 7.09M | if ( hvm_funcs.sync_pir_to_irr ) |
117 | 7.10M | hvm_funcs.sync_pir_to_irr(vlapic_vcpu(vlapic)); |
118 | 7.09M | |
119 | 7.09M | return vlapic_find_highest_vector(&vlapic->regs->data[APIC_IRR]); |
120 | 7.09M | } |
121 | | |
122 | | static void vlapic_error(struct vlapic *vlapic, unsigned int errmask) |
123 | 0 | { |
124 | 0 | unsigned long flags; |
125 | 0 | uint32_t esr; |
126 | 0 |
|
127 | 0 | spin_lock_irqsave(&vlapic->esr_lock, flags); |
128 | 0 | esr = vlapic_get_reg(vlapic, APIC_ESR); |
129 | 0 | if ( (esr & errmask) != errmask ) |
130 | 0 | { |
131 | 0 | uint32_t lvterr = vlapic_get_reg(vlapic, APIC_LVTERR); |
132 | 0 |
|
133 | 0 | vlapic_set_reg(vlapic, APIC_ESR, esr | errmask); |
134 | 0 | if ( !(lvterr & APIC_LVT_MASKED) ) |
135 | 0 | vlapic_set_irq(vlapic, lvterr & APIC_VECTOR_MASK, 0); |
136 | 0 | } |
137 | 0 | spin_unlock_irqrestore(&vlapic->esr_lock, flags); |
138 | 0 | } |
139 | | |
140 | | void vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig) |
141 | 4.14k | { |
142 | 4.14k | struct vcpu *target = vlapic_vcpu(vlapic); |
143 | 4.14k | |
144 | 4.14k | if ( unlikely(vec < 16) ) |
145 | 0 | { |
146 | 0 | vlapic_error(vlapic, APIC_ESR_RECVILL); |
147 | 0 | return; |
148 | 0 | } |
149 | 4.14k | |
150 | 4.14k | if ( trig ) |
151 | 4.14k | vlapic_set_vector(vec, &vlapic->regs->data[APIC_TMR]); |
152 | 4.14k | |
153 | 4.14k | if ( hvm_funcs.update_eoi_exit_bitmap ) |
154 | 4.14k | hvm_funcs.update_eoi_exit_bitmap(target, vec, trig); |
155 | 4.14k | |
156 | 4.14k | if ( hvm_funcs.deliver_posted_intr ) |
157 | 4.14k | hvm_funcs.deliver_posted_intr(target, vec); |
158 | 0 | else if ( !vlapic_test_and_set_irr(vec, vlapic) ) |
159 | 0 | vcpu_kick(target); |
160 | 4.14k | } |
161 | | |
162 | | static int vlapic_find_highest_isr(struct vlapic *vlapic) |
163 | 0 | { |
164 | 0 | return vlapic_find_highest_vector(&vlapic->regs->data[APIC_ISR]); |
165 | 0 | } |
166 | | |
167 | | static uint32_t vlapic_get_ppr(struct vlapic *vlapic) |
168 | 0 | { |
169 | 0 | uint32_t tpr, isrv, ppr; |
170 | 0 | int isr; |
171 | 0 |
|
172 | 0 | tpr = vlapic_get_reg(vlapic, APIC_TASKPRI); |
173 | 0 | isr = vlapic_find_highest_isr(vlapic); |
174 | 0 | isrv = (isr != -1) ? isr : 0; |
175 | 0 |
|
176 | 0 | if ( (tpr & 0xf0) >= (isrv & 0xf0) ) |
177 | 0 | ppr = tpr & 0xff; |
178 | 0 | else |
179 | 0 | ppr = isrv & 0xf0; |
180 | 0 |
|
181 | 0 | HVM_DBG_LOG(DBG_LEVEL_VLAPIC_INTERRUPT, |
182 | 0 | "vlapic %p, ppr %#x, isr %#x, isrv %#x", |
183 | 0 | vlapic, ppr, isr, isrv); |
184 | 0 |
|
185 | 0 | return ppr; |
186 | 0 | } |
187 | | |
188 | | uint32_t vlapic_set_ppr(struct vlapic *vlapic) |
189 | 0 | { |
190 | 0 | uint32_t ppr = vlapic_get_ppr(vlapic); |
191 | 0 |
|
192 | 0 | vlapic_set_reg(vlapic, APIC_PROCPRI, ppr); |
193 | 0 | return ppr; |
194 | 0 | } |
195 | | |
196 | | static bool_t vlapic_match_logical_addr(const struct vlapic *vlapic, |
197 | | uint32_t mda) |
198 | 0 | { |
199 | 0 | bool_t result = 0; |
200 | 0 | uint32_t logical_id = vlapic_get_reg(vlapic, APIC_LDR); |
201 | 0 |
|
202 | 0 | if ( vlapic_x2apic_mode(vlapic) ) |
203 | 0 | return ((logical_id >> 16) == (mda >> 16)) && |
204 | 0 | (uint16_t)(logical_id & mda); |
205 | 0 |
|
206 | 0 | logical_id = GET_xAPIC_LOGICAL_ID(logical_id); |
207 | 0 | mda = (uint8_t)mda; |
208 | 0 |
|
209 | 0 | switch ( vlapic_get_reg(vlapic, APIC_DFR) ) |
210 | 0 | { |
211 | 0 | case APIC_DFR_FLAT: |
212 | 0 | if ( logical_id & mda ) |
213 | 0 | result = 1; |
214 | 0 | break; |
215 | 0 | case APIC_DFR_CLUSTER: |
216 | 0 | if ( ((logical_id >> 4) == (mda >> 0x4)) && (logical_id & mda & 0xf) ) |
217 | 0 | result = 1; |
218 | 0 | break; |
219 | 0 | default: |
220 | 0 | printk(XENLOG_G_WARNING "%pv: bad LAPIC DFR value %08x\n", |
221 | 0 | const_vlapic_vcpu(vlapic), |
222 | 0 | vlapic_get_reg(vlapic, APIC_DFR)); |
223 | 0 | break; |
224 | 0 | } |
225 | 0 |
|
226 | 0 | return result; |
227 | 0 | } |
228 | | |
229 | | bool_t vlapic_match_dest( |
230 | | const struct vlapic *target, const struct vlapic *source, |
231 | | int short_hand, uint32_t dest, bool_t dest_mode) |
232 | 50.7k | { |
233 | 50.7k | HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "target %p, source %p, dest %#x, " |
234 | 50.7k | "dest_mode %#x, short_hand %#x", |
235 | 50.7k | target, source, dest, dest_mode, short_hand); |
236 | 50.7k | |
237 | 50.7k | switch ( short_hand ) |
238 | 50.7k | { |
239 | 50.7k | case APIC_DEST_NOSHORT: |
240 | 50.7k | if ( dest_mode ) |
241 | 0 | return vlapic_match_logical_addr(target, dest); |
242 | 50.7k | return (dest == _VLAPIC_ID(target, 0xffffffff)) || |
243 | 50.7k | (dest == VLAPIC_ID(target)); |
244 | 50.7k | |
245 | 0 | case APIC_DEST_SELF: |
246 | 0 | return (target == source); |
247 | 50.7k | |
248 | 0 | case APIC_DEST_ALLINC: |
249 | 0 | return 1; |
250 | 50.7k | |
251 | 0 | case APIC_DEST_ALLBUT: |
252 | 0 | return (target != source); |
253 | 50.7k | |
254 | 0 | default: |
255 | 0 | gdprintk(XENLOG_WARNING, "Bad dest shorthand value %x\n", short_hand); |
256 | 0 | break; |
257 | 50.7k | } |
258 | 50.7k | |
259 | 0 | return 0; |
260 | 50.7k | } |
261 | | |
262 | | static void vlapic_init_sipi_one(struct vcpu *target, uint32_t icr) |
263 | 44 | { |
264 | 44 | vcpu_pause(target); |
265 | 44 | |
266 | 44 | switch ( icr & APIC_MODE_MASK ) |
267 | 44 | { |
268 | 22 | case APIC_DM_INIT: { |
269 | 22 | bool_t fpu_initialised; |
270 | 22 | int rc; |
271 | 22 | |
272 | 22 | /* No work on INIT de-assert for P4-type APIC. */ |
273 | 22 | if ( (icr & (APIC_INT_LEVELTRIG | APIC_INT_ASSERT)) == |
274 | 22 | APIC_INT_LEVELTRIG ) |
275 | 11 | break; |
276 | 22 | /* Nothing to do if the VCPU is already reset. */ |
277 | 11 | if ( !target->is_initialised ) |
278 | 11 | break; |
279 | 0 | hvm_vcpu_down(target); |
280 | 0 | domain_lock(target->domain); |
281 | 0 | /* Reset necessary VCPU state. This does not include FPU state. */ |
282 | 0 | fpu_initialised = target->fpu_initialised; |
283 | 0 | rc = vcpu_reset(target); |
284 | 0 | ASSERT(!rc); |
285 | 0 | target->fpu_initialised = fpu_initialised; |
286 | 0 | vlapic_do_init(vcpu_vlapic(target)); |
287 | 0 | domain_unlock(target->domain); |
288 | 0 | break; |
289 | 11 | } |
290 | 11 | |
291 | 22 | case APIC_DM_STARTUP: { |
292 | 22 | uint16_t reset_cs = (icr & 0xffu) << 8; |
293 | 22 | hvm_vcpu_reset_state(target, reset_cs, 0); |
294 | 22 | break; |
295 | 11 | } |
296 | 11 | |
297 | 0 | default: |
298 | 0 | BUG(); |
299 | 44 | } |
300 | 44 | |
301 | 44 | vcpu_unpause(target); |
302 | 44 | } |
303 | | |
304 | | static void vlapic_init_sipi_action(unsigned long _vcpu) |
305 | 44 | { |
306 | 44 | struct vcpu *origin = (struct vcpu *)_vcpu; |
307 | 44 | uint32_t icr = vcpu_vlapic(origin)->init_sipi.icr; |
308 | 44 | uint32_t dest = vcpu_vlapic(origin)->init_sipi.dest; |
309 | 44 | uint32_t short_hand = icr & APIC_SHORT_MASK; |
310 | 44 | bool_t dest_mode = !!(icr & APIC_DEST_MASK); |
311 | 44 | struct vcpu *v; |
312 | 44 | |
313 | 44 | if ( icr == 0 ) |
314 | 0 | return; |
315 | 44 | |
316 | 44 | for_each_vcpu ( origin->domain, v ) |
317 | 528 | { |
318 | 528 | if ( vlapic_match_dest(vcpu_vlapic(v), vcpu_vlapic(origin), |
319 | 528 | short_hand, dest, dest_mode) ) |
320 | 44 | vlapic_init_sipi_one(v, icr); |
321 | 528 | } |
322 | 44 | |
323 | 44 | vcpu_vlapic(origin)->init_sipi.icr = 0; |
324 | 44 | vcpu_unpause(origin); |
325 | 44 | } |
326 | | |
327 | | /* Add a pending IRQ into lapic. */ |
328 | | static void vlapic_accept_irq(struct vcpu *v, uint32_t icr_low) |
329 | 0 | { |
330 | 0 | struct vlapic *vlapic = vcpu_vlapic(v); |
331 | 0 | uint8_t vector = (uint8_t)icr_low; |
332 | 0 |
|
333 | 0 | switch ( icr_low & APIC_MODE_MASK ) |
334 | 0 | { |
335 | 0 | case APIC_DM_FIXED: |
336 | 0 | case APIC_DM_LOWEST: |
337 | 0 | if ( vlapic_enabled(vlapic) ) |
338 | 0 | vlapic_set_irq(vlapic, vector, 0); |
339 | 0 | break; |
340 | 0 |
|
341 | 0 | case APIC_DM_REMRD: |
342 | 0 | gdprintk(XENLOG_WARNING, "Ignoring delivery mode 3\n"); |
343 | 0 | break; |
344 | 0 |
|
345 | 0 | case APIC_DM_SMI: |
346 | 0 | gdprintk(XENLOG_WARNING, "Ignoring guest SMI\n"); |
347 | 0 | break; |
348 | 0 |
|
349 | 0 | case APIC_DM_NMI: |
350 | 0 | if ( !test_and_set_bool(v->nmi_pending) ) |
351 | 0 | { |
352 | 0 | bool_t wake = 0; |
353 | 0 | domain_lock(v->domain); |
354 | 0 | if ( v->is_initialised ) |
355 | 0 | wake = test_and_clear_bit(_VPF_down, &v->pause_flags); |
356 | 0 | domain_unlock(v->domain); |
357 | 0 | if ( wake ) |
358 | 0 | vcpu_wake(v); |
359 | 0 | vcpu_kick(v); |
360 | 0 | } |
361 | 0 | break; |
362 | 0 |
|
363 | 0 | case APIC_DM_INIT: |
364 | 0 | case APIC_DM_STARTUP: |
365 | 0 | BUG(); /* Handled in vlapic_ipi(). */ |
366 | 0 |
|
367 | 0 | default: |
368 | 0 | gdprintk(XENLOG_ERR, "TODO: unsupported delivery mode in ICR %x\n", |
369 | 0 | icr_low); |
370 | 0 | domain_crash(v->domain); |
371 | 0 | } |
372 | 0 | } |
373 | | |
374 | | struct vlapic *vlapic_lowest_prio( |
375 | | struct domain *d, const struct vlapic *source, |
376 | | int short_hand, uint32_t dest, bool_t dest_mode) |
377 | 0 | { |
378 | 0 | int old = hvm_domain_irq(d)->round_robin_prev_vcpu; |
379 | 0 | uint32_t ppr, target_ppr = UINT_MAX; |
380 | 0 | struct vlapic *vlapic, *target = NULL; |
381 | 0 | struct vcpu *v; |
382 | 0 |
|
383 | 0 | if ( unlikely(!d->vcpu) || unlikely((v = d->vcpu[old]) == NULL) ) |
384 | 0 | return NULL; |
385 | 0 |
|
386 | 0 | do { |
387 | 0 | v = v->next_in_list ? : d->vcpu[0]; |
388 | 0 | vlapic = vcpu_vlapic(v); |
389 | 0 | if ( vlapic_match_dest(vlapic, source, short_hand, dest, dest_mode) && |
390 | 0 | vlapic_enabled(vlapic) && |
391 | 0 | ((ppr = vlapic_get_ppr(vlapic)) < target_ppr) ) |
392 | 0 | { |
393 | 0 | target = vlapic; |
394 | 0 | target_ppr = ppr; |
395 | 0 | } |
396 | 0 | } while ( v->vcpu_id != old ); |
397 | 0 |
|
398 | 0 | if ( target != NULL ) |
399 | 0 | hvm_domain_irq(d)->round_robin_prev_vcpu = |
400 | 0 | vlapic_vcpu(target)->vcpu_id; |
401 | 0 |
|
402 | 0 | return target; |
403 | 0 | } |
404 | | |
405 | | void vlapic_EOI_set(struct vlapic *vlapic) |
406 | 0 | { |
407 | 0 | int vector = vlapic_find_highest_isr(vlapic); |
408 | 0 |
|
409 | 0 | /* Some EOI writes may not have a matching to an in-service interrupt. */ |
410 | 0 | if ( vector == -1 ) |
411 | 0 | return; |
412 | 0 |
|
413 | 0 | vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]); |
414 | 0 |
|
415 | 0 | if ( hvm_funcs.handle_eoi ) |
416 | 0 | hvm_funcs.handle_eoi(vector); |
417 | 0 |
|
418 | 0 | vlapic_handle_EOI(vlapic, vector); |
419 | 0 | } |
420 | | |
421 | | void vlapic_handle_EOI(struct vlapic *vlapic, u8 vector) |
422 | 296 | { |
423 | 296 | struct domain *d = vlapic_domain(vlapic); |
424 | 296 | |
425 | 296 | if ( vlapic_test_and_clear_vector(vector, &vlapic->regs->data[APIC_TMR]) ) |
426 | 296 | vioapic_update_EOI(d, vector); |
427 | 296 | |
428 | 296 | hvm_dpci_msi_eoi(d, vector); |
429 | 296 | } |
430 | | |
431 | | static bool_t is_multicast_dest(struct vlapic *vlapic, unsigned int short_hand, |
432 | | uint32_t dest, bool_t dest_mode) |
433 | 0 | { |
434 | 0 | if ( vlapic_domain(vlapic)->max_vcpus <= 2 ) |
435 | 0 | return 0; |
436 | 0 |
|
437 | 0 | if ( short_hand ) |
438 | 0 | return short_hand != APIC_DEST_SELF; |
439 | 0 |
|
440 | 0 | if ( vlapic_x2apic_mode(vlapic) ) |
441 | 0 | return dest_mode ? hweight16(dest) > 1 : dest == 0xffffffff; |
442 | 0 |
|
443 | 0 | if ( dest_mode ) |
444 | 0 | return hweight8(dest & |
445 | 0 | GET_xAPIC_DEST_FIELD(vlapic_get_reg(vlapic, |
446 | 0 | APIC_DFR))) > 1; |
447 | 0 |
|
448 | 0 | return dest == 0xff; |
449 | 0 | } |
450 | | |
451 | | void vlapic_ipi( |
452 | | struct vlapic *vlapic, uint32_t icr_low, uint32_t icr_high) |
453 | 44 | { |
454 | 44 | unsigned int dest; |
455 | 44 | unsigned int short_hand = icr_low & APIC_SHORT_MASK; |
456 | 44 | bool_t dest_mode = !!(icr_low & APIC_DEST_MASK); |
457 | 44 | |
458 | 44 | HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "icr = 0x%08x:%08x", icr_high, icr_low); |
459 | 44 | |
460 | 44 | dest = _VLAPIC_ID(vlapic, icr_high); |
461 | 44 | |
462 | 44 | switch ( icr_low & APIC_MODE_MASK ) |
463 | 44 | { |
464 | 44 | case APIC_DM_INIT: |
465 | 44 | case APIC_DM_STARTUP: |
466 | 44 | if ( vlapic->init_sipi.icr != 0 ) |
467 | 0 | { |
468 | 0 | WARN(); /* should be impossible but don't BUG, just in case */ |
469 | 0 | break; |
470 | 0 | } |
471 | 44 | vcpu_pause_nosync(vlapic_vcpu(vlapic)); |
472 | 44 | vlapic->init_sipi.icr = icr_low; |
473 | 44 | vlapic->init_sipi.dest = dest; |
474 | 44 | tasklet_schedule(&vlapic->init_sipi.tasklet); |
475 | 44 | break; |
476 | 44 | |
477 | 0 | case APIC_DM_LOWEST: { |
478 | 0 | struct vlapic *target = vlapic_lowest_prio( |
479 | 0 | vlapic_domain(vlapic), vlapic, short_hand, dest, dest_mode); |
480 | 0 |
|
481 | 0 | if ( unlikely((icr_low & APIC_VECTOR_MASK) < 16) ) |
482 | 0 | vlapic_error(vlapic, APIC_ESR_SENDILL); |
483 | 0 | else if ( target ) |
484 | 0 | vlapic_accept_irq(vlapic_vcpu(target), icr_low); |
485 | 0 | break; |
486 | 44 | } |
487 | 44 | |
488 | 0 | case APIC_DM_FIXED: |
489 | 0 | if ( unlikely((icr_low & APIC_VECTOR_MASK) < 16) ) |
490 | 0 | { |
491 | 0 | vlapic_error(vlapic, APIC_ESR_SENDILL); |
492 | 0 | break; |
493 | 0 | } |
494 | 0 | /* fall through */ |
495 | 0 | default: { |
496 | 0 | struct vcpu *v; |
497 | 0 | bool_t batch = is_multicast_dest(vlapic, short_hand, dest, dest_mode); |
498 | 0 |
|
499 | 0 | if ( batch ) |
500 | 0 | cpu_raise_softirq_batch_begin(); |
501 | 0 | for_each_vcpu ( vlapic_domain(vlapic), v ) |
502 | 0 | { |
503 | 0 | if ( vlapic_match_dest(vcpu_vlapic(v), vlapic, |
504 | 0 | short_hand, dest, dest_mode) ) |
505 | 0 | vlapic_accept_irq(v, icr_low); |
506 | 0 | } |
507 | 0 | if ( batch ) |
508 | 0 | cpu_raise_softirq_batch_finish(); |
509 | 0 | break; |
510 | 0 | } |
511 | 44 | } |
512 | 44 | } |
513 | | |
514 | | static uint32_t vlapic_get_tmcct(struct vlapic *vlapic) |
515 | 0 | { |
516 | 0 | struct vcpu *v = current; |
517 | 0 | uint32_t tmcct = 0, tmict = vlapic_get_reg(vlapic, APIC_TMICT); |
518 | 0 | uint64_t counter_passed; |
519 | 0 |
|
520 | 0 | counter_passed = ((hvm_get_guest_time(v) - vlapic->timer_last_update) |
521 | 0 | / (APIC_BUS_CYCLE_NS * vlapic->hw.timer_divisor)); |
522 | 0 |
|
523 | 0 | /* If timer_last_update is 0, then TMCCT should return 0 as well. */ |
524 | 0 | if ( tmict && vlapic->timer_last_update ) |
525 | 0 | { |
526 | 0 | if ( vlapic_lvtt_period(vlapic) ) |
527 | 0 | counter_passed %= tmict; |
528 | 0 | if ( counter_passed < tmict ) |
529 | 0 | tmcct = tmict - counter_passed; |
530 | 0 | } |
531 | 0 |
|
532 | 0 | HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, |
533 | 0 | "timer initial count %d, timer current count %d, " |
534 | 0 | "offset %"PRId64, |
535 | 0 | tmict, tmcct, counter_passed); |
536 | 0 |
|
537 | 0 | return tmcct; |
538 | 0 | } |
539 | | |
540 | | static void vlapic_set_tdcr(struct vlapic *vlapic, unsigned int val) |
541 | 12 | { |
542 | 12 | /* Only bits 0, 1 and 3 are settable; others are MBZ. */ |
543 | 12 | val &= 0xb; |
544 | 12 | vlapic_set_reg(vlapic, APIC_TDCR, val); |
545 | 12 | |
546 | 12 | /* Update the demangled hw.timer_divisor. */ |
547 | 12 | val = ((val & 3) | ((val & 8) >> 1)) + 1; |
548 | 12 | vlapic->hw.timer_divisor = 1 << (val & 7); |
549 | 12 | |
550 | 12 | HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, |
551 | 12 | "timer_divisor: %d", vlapic->hw.timer_divisor); |
552 | 12 | } |
553 | | |
554 | | static uint32_t vlapic_read_aligned(struct vlapic *vlapic, unsigned int offset) |
555 | 0 | { |
556 | 0 | switch ( offset ) |
557 | 0 | { |
558 | 0 | case APIC_PROCPRI: |
559 | 0 | return vlapic_get_ppr(vlapic); |
560 | 0 |
|
561 | 0 | case APIC_TMCCT: /* Timer CCR */ |
562 | 0 | if ( !vlapic_lvtt_oneshot(vlapic) && !vlapic_lvtt_period(vlapic) ) |
563 | 0 | break; |
564 | 0 | return vlapic_get_tmcct(vlapic); |
565 | 0 |
|
566 | 0 | case APIC_TMICT: /* Timer ICR */ |
567 | 0 | if ( !vlapic_lvtt_oneshot(vlapic) && !vlapic_lvtt_period(vlapic) ) |
568 | 0 | break; |
569 | 0 | /* fall through */ |
570 | 0 | default: |
571 | 0 | return vlapic_get_reg(vlapic, offset); |
572 | 0 | } |
573 | 0 |
|
574 | 0 | return 0; |
575 | 0 | } |
576 | | |
577 | | static int vlapic_read( |
578 | | struct vcpu *v, unsigned long address, |
579 | | unsigned int len, unsigned long *pval) |
580 | 0 | { |
581 | 0 | struct vlapic *vlapic = vcpu_vlapic(v); |
582 | 0 | unsigned int offset = address - vlapic_base_address(vlapic); |
583 | 0 | unsigned int alignment = offset & 3, tmp, result = 0; |
584 | 0 |
|
585 | 0 | if ( offset > (APIC_TDCR + 0x3) ) |
586 | 0 | goto out; |
587 | 0 |
|
588 | 0 | tmp = vlapic_read_aligned(vlapic, offset & ~3); |
589 | 0 |
|
590 | 0 | switch ( len ) |
591 | 0 | { |
592 | 0 | case 1: |
593 | 0 | result = *((unsigned char *)&tmp + alignment); |
594 | 0 | break; |
595 | 0 |
|
596 | 0 | case 2: |
597 | 0 | if ( alignment == 3 ) |
598 | 0 | goto unaligned_exit_and_crash; |
599 | 0 | result = *(unsigned short *)((unsigned char *)&tmp + alignment); |
600 | 0 | break; |
601 | 0 |
|
602 | 0 | case 4: |
603 | 0 | if ( alignment != 0 ) |
604 | 0 | goto unaligned_exit_and_crash; |
605 | 0 | result = *(unsigned int *)((unsigned char *)&tmp + alignment); |
606 | 0 | break; |
607 | 0 |
|
608 | 0 | default: |
609 | 0 | gdprintk(XENLOG_ERR, "Local APIC read with len=%#x, " |
610 | 0 | "should be 4 instead.\n", len); |
611 | 0 | goto exit_and_crash; |
612 | 0 | } |
613 | 0 |
|
614 | 0 | HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset %#x with length %#x, " |
615 | 0 | "and the result is %#x", offset, len, result); |
616 | 0 |
|
617 | 0 | out: |
618 | 0 | *pval = result; |
619 | 0 | return X86EMUL_OKAY; |
620 | 0 |
|
621 | 0 | unaligned_exit_and_crash: |
622 | 0 | gdprintk(XENLOG_ERR, "Unaligned LAPIC read len=%#x at offset=%#x.\n", |
623 | 0 | len, offset); |
624 | 0 | exit_and_crash: |
625 | 0 | domain_crash(v->domain); |
626 | 0 | return X86EMUL_OKAY; |
627 | 0 | } |
628 | | |
629 | | int hvm_x2apic_msr_read(struct vcpu *v, unsigned int msr, uint64_t *msr_content) |
630 | 0 | { |
631 | 0 | static const unsigned long readable[] = |
632 | 0 | { |
633 | 0 | #define REG(x) (1UL << (APIC_ ## x >> 4)) |
634 | 0 | REG(ID) | REG(LVR) | REG(TASKPRI) | REG(PROCPRI) | |
635 | 0 | REG(LDR) | REG(SPIV) | REG(ESR) | REG(ICR) | |
636 | 0 | REG(CMCI) | REG(LVTT) | REG(LVTTHMR) | REG(LVTPC) | |
637 | 0 | REG(LVT0) | REG(LVT1) | REG(LVTERR) | REG(TMICT) | |
638 | 0 | REG(TMCCT) | REG(TDCR) | |
639 | 0 | #undef REG |
640 | 0 | #define REGBLOCK(x) (((1UL << (NR_VECTORS / 32)) - 1) << (APIC_ ## x >> 4)) |
641 | 0 | REGBLOCK(ISR) | REGBLOCK(TMR) | REGBLOCK(IRR) |
642 | 0 | #undef REGBLOCK |
643 | 0 | }; |
644 | 0 | struct vlapic *vlapic = vcpu_vlapic(v); |
645 | 0 | uint32_t high = 0, reg = msr - MSR_IA32_APICBASE_MSR, offset = reg << 4; |
646 | 0 |
|
647 | 0 | if ( !vlapic_x2apic_mode(vlapic) || |
648 | 0 | (reg >= sizeof(readable) * 8) || !test_bit(reg, readable) ) |
649 | 0 | return X86EMUL_UNHANDLEABLE; |
650 | 0 |
|
651 | 0 | if ( offset == APIC_ICR ) |
652 | 0 | high = vlapic_read_aligned(vlapic, APIC_ICR2); |
653 | 0 |
|
654 | 0 | *msr_content = ((uint64_t)high << 32) | |
655 | 0 | vlapic_read_aligned(vlapic, offset); |
656 | 0 |
|
657 | 0 | return X86EMUL_OKAY; |
658 | 0 | } |
659 | | |
660 | | static void vlapic_pt_cb(struct vcpu *v, void *data) |
661 | 0 | { |
662 | 0 | TRACE_0D(TRC_HVM_EMUL_LAPIC_TIMER_CB); |
663 | 0 | *(s_time_t *)data = hvm_get_guest_time(v); |
664 | 0 | } |
665 | | |
666 | | static void vlapic_tdt_pt_cb(struct vcpu *v, void *data) |
667 | 0 | { |
668 | 0 | *(s_time_t *)data = hvm_get_guest_time(v); |
669 | 0 | vcpu_vlapic(v)->hw.tdt_msr = 0; |
670 | 0 | } |
671 | | |
672 | | /* |
673 | | * This function is used when a register related to the APIC timer is updated. |
674 | | * It expects the new value for the register TMICT to be set *before* |
675 | | * being called, and the previous value of the divisor (calculated from TDCR) |
676 | | * to be passed as argument. |
677 | | * It expect the new value of LVTT to be set *after* being called, with this |
678 | | * new values passed as parameter (only APIC_TIMER_MODE_MASK bits matter). |
679 | | */ |
680 | | static void vlapic_update_timer(struct vlapic *vlapic, uint32_t lvtt, |
681 | | bool tmict_updated, uint32_t old_divisor) |
682 | 12 | { |
683 | 12 | uint64_t period, delta = 0; |
684 | 12 | bool is_oneshot, is_periodic; |
685 | 12 | |
686 | 12 | is_periodic = (lvtt & APIC_TIMER_MODE_MASK) == APIC_TIMER_MODE_PERIODIC; |
687 | 12 | is_oneshot = (lvtt & APIC_TIMER_MODE_MASK) == APIC_TIMER_MODE_ONESHOT; |
688 | 12 | |
689 | 12 | period = (uint64_t)vlapic_get_reg(vlapic, APIC_TMICT) |
690 | 12 | * APIC_BUS_CYCLE_NS * old_divisor; |
691 | 12 | |
692 | 12 | /* Calculate the next time the timer should trigger an interrupt. */ |
693 | 12 | if ( tmict_updated ) |
694 | 0 | delta = period; |
695 | 12 | else if ( period && vlapic->timer_last_update ) |
696 | 0 | { |
697 | 0 | uint64_t time_passed = hvm_get_guest_time(current) |
698 | 0 | - vlapic->timer_last_update; |
699 | 0 |
|
700 | 0 | /* This depends of the previous mode, if a new mode is being set */ |
701 | 0 | if ( vlapic_lvtt_period(vlapic) ) |
702 | 0 | time_passed %= period; |
703 | 0 | if ( time_passed < period ) |
704 | 0 | delta = period - time_passed; |
705 | 0 | } |
706 | 12 | |
707 | 12 | if ( delta && (is_oneshot || is_periodic) ) |
708 | 0 | { |
709 | 0 | if ( vlapic->hw.timer_divisor != old_divisor ) |
710 | 0 | { |
711 | 0 | period = (uint64_t)vlapic_get_reg(vlapic, APIC_TMICT) |
712 | 0 | * APIC_BUS_CYCLE_NS * vlapic->hw.timer_divisor; |
713 | 0 | delta = delta * vlapic->hw.timer_divisor / old_divisor; |
714 | 0 | } |
715 | 0 |
|
716 | 0 | TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(delta), |
717 | 0 | TRC_PAR_LONG(is_periodic ? period : 0), |
718 | 0 | vlapic->pt.irq); |
719 | 0 |
|
720 | 0 | create_periodic_time(current, &vlapic->pt, delta, |
721 | 0 | is_periodic ? period : 0, vlapic->pt.irq, |
722 | 0 | is_periodic ? vlapic_pt_cb : NULL, |
723 | 0 | &vlapic->timer_last_update); |
724 | 0 |
|
725 | 0 | vlapic->timer_last_update = vlapic->pt.last_plt_gtime; |
726 | 0 | if ( !tmict_updated ) |
727 | 0 | vlapic->timer_last_update -= period - delta; |
728 | 0 |
|
729 | 0 | HVM_DBG_LOG(DBG_LEVEL_VLAPIC, |
730 | 0 | "bus cycle is %uns, " |
731 | 0 | "initial count %u, period %"PRIu64"ns", |
732 | 0 | APIC_BUS_CYCLE_NS, |
733 | 0 | vlapic_get_reg(vlapic, APIC_TMICT), |
734 | 0 | period); |
735 | 0 | } |
736 | 12 | else |
737 | 12 | { |
738 | 12 | TRACE_0D(TRC_HVM_EMUL_LAPIC_STOP_TIMER); |
739 | 12 | destroy_periodic_time(&vlapic->pt); |
740 | 12 | /* |
741 | 12 | * From now, TMCCT should return 0 until TMICT is set again. |
742 | 12 | * This is because the timer mode was one-shot when the counter reach 0 |
743 | 12 | * or just because the timer is disable. |
744 | 12 | */ |
745 | 12 | vlapic->timer_last_update = 0; |
746 | 12 | } |
747 | 12 | } |
748 | | |
749 | | static void vlapic_reg_write(struct vcpu *v, |
750 | | unsigned int offset, uint32_t val) |
751 | 129 | { |
752 | 129 | struct vlapic *vlapic = vcpu_vlapic(v); |
753 | 129 | |
754 | 129 | memset(&vlapic->loaded, 0, sizeof(vlapic->loaded)); |
755 | 129 | |
756 | 129 | switch ( offset ) |
757 | 129 | { |
758 | 0 | case APIC_ID: |
759 | 0 | vlapic_set_reg(vlapic, APIC_ID, val); |
760 | 0 | break; |
761 | 0 |
|
762 | 0 | case APIC_TASKPRI: |
763 | 0 | vlapic_set_reg(vlapic, APIC_TASKPRI, val & 0xff); |
764 | 0 | break; |
765 | 0 |
|
766 | 0 | case APIC_EOI: |
767 | 0 | vlapic_EOI_set(vlapic); |
768 | 0 | break; |
769 | 0 |
|
770 | 0 | case APIC_LDR: |
771 | 0 | vlapic_set_reg(vlapic, APIC_LDR, val & APIC_LDR_MASK); |
772 | 0 | break; |
773 | 0 |
|
774 | 0 | case APIC_DFR: |
775 | 0 | vlapic_set_reg(vlapic, APIC_DFR, val | 0x0FFFFFFF); |
776 | 0 | break; |
777 | 0 |
|
778 | 13 | case APIC_SPIV: |
779 | 13 | vlapic_set_reg(vlapic, APIC_SPIV, val & 0x3ff); |
780 | 13 | |
781 | 13 | if ( !(val & APIC_SPIV_APIC_ENABLED) ) |
782 | 0 | { |
783 | 0 | int i; |
784 | 0 | uint32_t lvt_val; |
785 | 0 |
|
786 | 0 | vlapic->hw.disabled |= VLAPIC_SW_DISABLED; |
787 | 0 |
|
788 | 0 | for ( i = 0; i < VLAPIC_LVT_NUM; i++ ) |
789 | 0 | { |
790 | 0 | lvt_val = vlapic_get_reg(vlapic, APIC_LVTT + 0x10 * i); |
791 | 0 | vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i, |
792 | 0 | lvt_val | APIC_LVT_MASKED); |
793 | 0 | } |
794 | 0 | } |
795 | 13 | else |
796 | 13 | { |
797 | 13 | vlapic->hw.disabled &= ~VLAPIC_SW_DISABLED; |
798 | 13 | pt_may_unmask_irq(vlapic_domain(vlapic), &vlapic->pt); |
799 | 13 | } |
800 | 13 | break; |
801 | 0 |
|
802 | 44 | case APIC_ICR: |
803 | 44 | val &= ~(1 << 12); /* always clear the pending bit */ |
804 | 44 | vlapic_ipi(vlapic, val, vlapic_get_reg(vlapic, APIC_ICR2)); |
805 | 44 | vlapic_set_reg(vlapic, APIC_ICR, val); |
806 | 44 | break; |
807 | 0 |
|
808 | 0 | case APIC_ICR2: |
809 | 0 | vlapic_set_reg(vlapic, APIC_ICR2, val & 0xff000000); |
810 | 0 | break; |
811 | 0 |
|
812 | 12 | case APIC_LVTT: /* LVT Timer Reg */ |
813 | 12 | if ( vlapic_lvtt_tdt(vlapic) != |
814 | 12 | ((val & APIC_TIMER_MODE_MASK) == APIC_TIMER_MODE_TSC_DEADLINE)) |
815 | 0 | { |
816 | 0 | vlapic_set_reg(vlapic, APIC_TMICT, 0); |
817 | 0 | vlapic->hw.tdt_msr = 0; |
818 | 0 | } |
819 | 12 | vlapic->pt.irq = val & APIC_VECTOR_MASK; |
820 | 12 | |
821 | 12 | vlapic_update_timer(vlapic, val, false, vlapic->hw.timer_divisor); |
822 | 12 | |
823 | 12 | /* fallthrough */ |
824 | 60 | case APIC_LVTTHMR: /* LVT Thermal Monitor */ |
825 | 60 | case APIC_LVTPC: /* LVT Performance Counter */ |
826 | 60 | case APIC_LVT0: /* LVT LINT0 Reg */ |
827 | 60 | case APIC_LVT1: /* LVT Lint1 Reg */ |
828 | 60 | case APIC_LVTERR: /* LVT Error Reg */ |
829 | 60 | if ( vlapic_sw_disabled(vlapic) ) |
830 | 0 | val |= APIC_LVT_MASKED; |
831 | 60 | val &= vlapic_lvt_mask[(offset - APIC_LVTT) >> 4]; |
832 | 60 | vlapic_set_reg(vlapic, offset, val); |
833 | 60 | if ( offset == APIC_LVT0 ) |
834 | 12 | { |
835 | 12 | vlapic_adjust_i8259_target(v->domain); |
836 | 12 | pt_may_unmask_irq(v->domain, NULL); |
837 | 12 | } |
838 | 60 | if ( (offset == APIC_LVTT) && !(val & APIC_LVT_MASKED) ) |
839 | 0 | pt_may_unmask_irq(NULL, &vlapic->pt); |
840 | 60 | if ( offset == APIC_LVTPC ) |
841 | 12 | vpmu_lvtpc_update(val); |
842 | 60 | break; |
843 | 60 | |
844 | 0 | case APIC_TMICT: |
845 | 0 | if ( !vlapic_lvtt_oneshot(vlapic) && !vlapic_lvtt_period(vlapic) ) |
846 | 0 | break; |
847 | 0 |
|
848 | 0 | vlapic_set_reg(vlapic, APIC_TMICT, val); |
849 | 0 |
|
850 | 0 | vlapic_update_timer(vlapic, vlapic_get_reg(vlapic, APIC_LVTT), true, |
851 | 0 | vlapic->hw.timer_divisor); |
852 | 0 | break; |
853 | 0 |
|
854 | 0 | case APIC_TDCR: |
855 | 0 | { |
856 | 0 | uint32_t current_divisor = vlapic->hw.timer_divisor; |
857 | 0 |
|
858 | 0 | vlapic_set_tdcr(vlapic, val & 0xb); |
859 | 0 |
|
860 | 0 | vlapic_update_timer(vlapic, vlapic_get_reg(vlapic, APIC_LVTT), false, |
861 | 0 | current_divisor); |
862 | 0 | HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "timer divisor is %#x", |
863 | 0 | vlapic->hw.timer_divisor); |
864 | 0 | break; |
865 | 0 | } |
866 | 129 | } |
867 | 129 | } |
868 | | |
869 | | static int vlapic_write(struct vcpu *v, unsigned long address, |
870 | | unsigned int len, unsigned long val) |
871 | 0 | { |
872 | 0 | struct vlapic *vlapic = vcpu_vlapic(v); |
873 | 0 | unsigned int offset = address - vlapic_base_address(vlapic); |
874 | 0 | int rc = X86EMUL_OKAY; |
875 | 0 |
|
876 | 0 | if ( offset != APIC_EOI ) |
877 | 0 | HVM_DBG_LOG(DBG_LEVEL_VLAPIC, |
878 | 0 | "offset %#x with length %#x, and value is %#lx", |
879 | 0 | offset, len, val); |
880 | 0 |
|
881 | 0 | /* |
882 | 0 | * According to the IA32 Manual, all accesses should be 32 bits. |
883 | 0 | * Some OSes do 8- or 16-byte accesses, however. |
884 | 0 | */ |
885 | 0 | if ( unlikely(len != 4) ) |
886 | 0 | { |
887 | 0 | unsigned int tmp = vlapic_read_aligned(vlapic, offset & ~3); |
888 | 0 | unsigned char alignment = (offset & 3) * 8; |
889 | 0 |
|
890 | 0 | switch ( len ) |
891 | 0 | { |
892 | 0 | case 1: |
893 | 0 | val = ((tmp & ~(0xffU << alignment)) | |
894 | 0 | ((val & 0xff) << alignment)); |
895 | 0 | break; |
896 | 0 |
|
897 | 0 | case 2: |
898 | 0 | if ( alignment & 1 ) |
899 | 0 | goto unaligned_exit_and_crash; |
900 | 0 | val = ((tmp & ~(0xffffU << alignment)) | |
901 | 0 | ((val & 0xffff) << alignment)); |
902 | 0 | break; |
903 | 0 |
|
904 | 0 | default: |
905 | 0 | gprintk(XENLOG_ERR, "LAPIC write with len %u\n", len); |
906 | 0 | goto exit_and_crash; |
907 | 0 | } |
908 | 0 |
|
909 | 0 | gdprintk(XENLOG_INFO, "Notice: LAPIC write with len %u\n", len); |
910 | 0 | offset &= ~3; |
911 | 0 | } |
912 | 0 | else if ( unlikely(offset & 3) ) |
913 | 0 | goto unaligned_exit_and_crash; |
914 | 0 |
|
915 | 0 | vlapic_reg_write(v, offset, val); |
916 | 0 |
|
917 | 0 | return X86EMUL_OKAY; |
918 | 0 |
|
919 | 0 | unaligned_exit_and_crash: |
920 | 0 | gprintk(XENLOG_ERR, "Unaligned LAPIC write: len=%u offset=%#x.\n", |
921 | 0 | len, offset); |
922 | 0 | exit_and_crash: |
923 | 0 | domain_crash(v->domain); |
924 | 0 | return rc; |
925 | 0 | } |
926 | | |
927 | | int vlapic_apicv_write(struct vcpu *v, unsigned int offset) |
928 | 0 | { |
929 | 0 | struct vlapic *vlapic = vcpu_vlapic(v); |
930 | 0 | uint32_t val = vlapic_get_reg(vlapic, offset); |
931 | 0 |
|
932 | 0 | if ( vlapic_x2apic_mode(vlapic) ) |
933 | 0 | { |
934 | 0 | if ( offset != APIC_SELF_IPI ) |
935 | 0 | return X86EMUL_UNHANDLEABLE; |
936 | 0 |
|
937 | 0 | offset = APIC_ICR; |
938 | 0 | val = APIC_DEST_SELF | (val & APIC_VECTOR_MASK); |
939 | 0 | } |
940 | 0 |
|
941 | 0 | vlapic_reg_write(v, offset, val); |
942 | 0 |
|
943 | 0 | return X86EMUL_OKAY; |
944 | 0 | } |
945 | | |
946 | | int hvm_x2apic_msr_write(struct vcpu *v, unsigned int msr, uint64_t msr_content) |
947 | 129 | { |
948 | 129 | struct vlapic *vlapic = vcpu_vlapic(v); |
949 | 129 | uint32_t offset = (msr - MSR_IA32_APICBASE_MSR) << 4; |
950 | 129 | |
951 | 129 | if ( !vlapic_x2apic_mode(vlapic) ) |
952 | 0 | return X86EMUL_UNHANDLEABLE; |
953 | 129 | |
954 | 129 | switch ( offset ) |
955 | 129 | { |
956 | 0 | case APIC_TASKPRI: |
957 | 0 | if ( msr_content & ~APIC_TPRI_MASK ) |
958 | 0 | return X86EMUL_UNHANDLEABLE; |
959 | 0 | break; |
960 | 0 |
|
961 | 13 | case APIC_SPIV: |
962 | 13 | if ( msr_content & ~(APIC_VECTOR_MASK | APIC_SPIV_APIC_ENABLED | |
963 | 13 | (VLAPIC_VERSION & APIC_LVR_DIRECTED_EOI |
964 | 13 | ? APIC_SPIV_DIRECTED_EOI : 0)) ) |
965 | 0 | return X86EMUL_UNHANDLEABLE; |
966 | 13 | break; |
967 | 13 | |
968 | 12 | case APIC_LVTT: |
969 | 12 | if ( msr_content & ~(LVT_MASK | APIC_TIMER_MODE_MASK) ) |
970 | 0 | return X86EMUL_UNHANDLEABLE; |
971 | 12 | break; |
972 | 12 | |
973 | 12 | case APIC_LVTTHMR: |
974 | 12 | case APIC_LVTPC: |
975 | 12 | case APIC_CMCI: |
976 | 12 | if ( msr_content & ~(LVT_MASK | APIC_MODE_MASK) ) |
977 | 0 | return X86EMUL_UNHANDLEABLE; |
978 | 12 | break; |
979 | 12 | |
980 | 24 | case APIC_LVT0: |
981 | 24 | case APIC_LVT1: |
982 | 24 | if ( msr_content & ~LINT_MASK ) |
983 | 0 | return X86EMUL_UNHANDLEABLE; |
984 | 24 | break; |
985 | 24 | |
986 | 12 | case APIC_LVTERR: |
987 | 12 | if ( msr_content & ~LVT_MASK ) |
988 | 0 | return X86EMUL_UNHANDLEABLE; |
989 | 12 | break; |
990 | 12 | |
991 | 0 | case APIC_TMICT: |
992 | 0 | break; |
993 | 12 | |
994 | 0 | case APIC_TDCR: |
995 | 0 | if ( msr_content & ~APIC_TDR_DIV_1 ) |
996 | 0 | return X86EMUL_UNHANDLEABLE; |
997 | 0 | break; |
998 | 0 |
|
999 | 44 | case APIC_ICR: |
1000 | 44 | if ( (uint32_t)msr_content & ~(APIC_VECTOR_MASK | APIC_MODE_MASK | |
1001 | 44 | APIC_DEST_MASK | APIC_INT_ASSERT | |
1002 | 44 | APIC_INT_LEVELTRIG | APIC_SHORT_MASK) ) |
1003 | 0 | return X86EMUL_UNHANDLEABLE; |
1004 | 44 | vlapic_set_reg(vlapic, APIC_ICR2, msr_content >> 32); |
1005 | 44 | break; |
1006 | 44 | |
1007 | 0 | case APIC_SELF_IPI: |
1008 | 0 | if ( msr_content & ~APIC_VECTOR_MASK ) |
1009 | 0 | return X86EMUL_UNHANDLEABLE; |
1010 | 0 | offset = APIC_ICR; |
1011 | 0 | msr_content = APIC_DEST_SELF | (msr_content & APIC_VECTOR_MASK); |
1012 | 0 | break; |
1013 | 0 |
|
1014 | 12 | case APIC_EOI: |
1015 | 12 | case APIC_ESR: |
1016 | 12 | if ( msr_content ) |
1017 | 0 | default: |
1018 | 0 | return X86EMUL_UNHANDLEABLE; |
1019 | 129 | } |
1020 | 129 | |
1021 | 129 | vlapic_reg_write(v, offset, msr_content); |
1022 | 129 | |
1023 | 129 | return X86EMUL_OKAY; |
1024 | 129 | } |
1025 | | |
1026 | | static int vlapic_range(struct vcpu *v, unsigned long addr) |
1027 | 481k | { |
1028 | 481k | struct vlapic *vlapic = vcpu_vlapic(v); |
1029 | 481k | unsigned long offset = addr - vlapic_base_address(vlapic); |
1030 | 481k | |
1031 | 481k | return !vlapic_hw_disabled(vlapic) && |
1032 | 482k | !vlapic_x2apic_mode(vlapic) && |
1033 | 4.83k | (offset < PAGE_SIZE); |
1034 | 481k | } |
1035 | | |
1036 | | static const struct hvm_mmio_ops vlapic_mmio_ops = { |
1037 | | .check = vlapic_range, |
1038 | | .read = vlapic_read, |
1039 | | .write = vlapic_write |
1040 | | }; |
1041 | | |
1042 | | static void set_x2apic_id(struct vlapic *vlapic) |
1043 | 9 | { |
1044 | 9 | u32 id = vlapic_vcpu(vlapic)->vcpu_id; |
1045 | 9 | u32 ldr = ((id & ~0xf) << 12) | (1 << (id & 0xf)); |
1046 | 9 | |
1047 | 9 | vlapic_set_reg(vlapic, APIC_ID, id * 2); |
1048 | 9 | vlapic_set_reg(vlapic, APIC_LDR, ldr); |
1049 | 9 | } |
1050 | | |
1051 | | bool_t vlapic_msr_set(struct vlapic *vlapic, uint64_t value) |
1052 | 9 | { |
1053 | 9 | if ( !has_vlapic(vlapic_domain(vlapic)) ) |
1054 | 0 | return 0; |
1055 | 9 | |
1056 | 9 | if ( (vlapic->hw.apic_base_msr ^ value) & MSR_IA32_APICBASE_ENABLE ) |
1057 | 0 | { |
1058 | 0 | if ( unlikely(value & MSR_IA32_APICBASE_EXTD) ) |
1059 | 0 | return 0; |
1060 | 0 | if ( value & MSR_IA32_APICBASE_ENABLE ) |
1061 | 0 | { |
1062 | 0 | vlapic_reset(vlapic); |
1063 | 0 | vlapic->hw.disabled &= ~VLAPIC_HW_DISABLED; |
1064 | 0 | pt_may_unmask_irq(vlapic_domain(vlapic), &vlapic->pt); |
1065 | 0 | } |
1066 | 0 | else |
1067 | 0 | { |
1068 | 0 | vlapic->hw.disabled |= VLAPIC_HW_DISABLED; |
1069 | 0 | pt_may_unmask_irq(vlapic_domain(vlapic), NULL); |
1070 | 0 | } |
1071 | 0 | } |
1072 | 9 | else if ( ((vlapic->hw.apic_base_msr ^ value) & MSR_IA32_APICBASE_EXTD) && |
1073 | 9 | unlikely(!vlapic_xapic_mode(vlapic)) ) |
1074 | 0 | return 0; |
1075 | 9 | |
1076 | 9 | vlapic->hw.apic_base_msr = value; |
1077 | 9 | memset(&vlapic->loaded, 0, sizeof(vlapic->loaded)); |
1078 | 9 | |
1079 | 9 | if ( vlapic_x2apic_mode(vlapic) ) |
1080 | 9 | set_x2apic_id(vlapic); |
1081 | 9 | |
1082 | 9 | vmx_vlapic_msr_changed(vlapic_vcpu(vlapic)); |
1083 | 9 | |
1084 | 9 | HVM_DBG_LOG(DBG_LEVEL_VLAPIC, |
1085 | 9 | "apic base msr is 0x%016"PRIx64, vlapic->hw.apic_base_msr); |
1086 | 9 | |
1087 | 9 | return 1; |
1088 | 9 | } |
1089 | | |
1090 | | uint64_t vlapic_tdt_msr_get(struct vlapic *vlapic) |
1091 | 0 | { |
1092 | 0 | if ( !vlapic_lvtt_tdt(vlapic) ) |
1093 | 0 | return 0; |
1094 | 0 |
|
1095 | 0 | return vlapic->hw.tdt_msr; |
1096 | 0 | } |
1097 | | |
1098 | | void vlapic_tdt_msr_set(struct vlapic *vlapic, uint64_t value) |
1099 | 0 | { |
1100 | 0 | uint64_t guest_tsc; |
1101 | 0 | struct vcpu *v = vlapic_vcpu(vlapic); |
1102 | 0 |
|
1103 | 0 | if ( vlapic_hw_disabled(vlapic) ) |
1104 | 0 | return; |
1105 | 0 |
|
1106 | 0 | if ( !vlapic_lvtt_tdt(vlapic) ) |
1107 | 0 | { |
1108 | 0 | HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "ignore tsc deadline msr write"); |
1109 | 0 | return; |
1110 | 0 | } |
1111 | 0 | |
1112 | 0 | /* new_value = 0, >0 && <= now, > now */ |
1113 | 0 | guest_tsc = hvm_get_guest_tsc(v); |
1114 | 0 | if ( value > guest_tsc ) |
1115 | 0 | { |
1116 | 0 | uint64_t delta = gtsc_to_gtime(v->domain, value - guest_tsc); |
1117 | 0 | delta = max_t(s64, delta, 0); |
1118 | 0 |
|
1119 | 0 | HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "delta[0x%016"PRIx64"]", delta); |
1120 | 0 |
|
1121 | 0 | vlapic->hw.tdt_msr = value; |
1122 | 0 | /* .... reprogram tdt timer */ |
1123 | 0 | TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(delta), |
1124 | 0 | TRC_PAR_LONG(0LL), vlapic->pt.irq); |
1125 | 0 | create_periodic_time(v, &vlapic->pt, delta, 0, |
1126 | 0 | vlapic->pt.irq, vlapic_tdt_pt_cb, |
1127 | 0 | &vlapic->timer_last_update); |
1128 | 0 | vlapic->timer_last_update = vlapic->pt.last_plt_gtime; |
1129 | 0 | } |
1130 | 0 | else |
1131 | 0 | { |
1132 | 0 | vlapic->hw.tdt_msr = 0; |
1133 | 0 |
|
1134 | 0 | /* trigger a timer event if needed */ |
1135 | 0 | if ( value > 0 ) |
1136 | 0 | { |
1137 | 0 | TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(0LL), |
1138 | 0 | TRC_PAR_LONG(0LL), vlapic->pt.irq); |
1139 | 0 | create_periodic_time(v, &vlapic->pt, 0, 0, |
1140 | 0 | vlapic->pt.irq, vlapic_tdt_pt_cb, |
1141 | 0 | &vlapic->timer_last_update); |
1142 | 0 | vlapic->timer_last_update = vlapic->pt.last_plt_gtime; |
1143 | 0 | } |
1144 | 0 | else |
1145 | 0 | { |
1146 | 0 | /* .... stop tdt timer */ |
1147 | 0 | TRACE_0D(TRC_HVM_EMUL_LAPIC_STOP_TIMER); |
1148 | 0 | destroy_periodic_time(&vlapic->pt); |
1149 | 0 | } |
1150 | 0 |
|
1151 | 0 | HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "value[0x%016"PRIx64"]", value); |
1152 | 0 | } |
1153 | 0 |
|
1154 | 0 | HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, |
1155 | 0 | "tdt_msr[0x%016"PRIx64"]," |
1156 | 0 | " gtsc[0x%016"PRIx64"]", |
1157 | 0 | vlapic->hw.tdt_msr, guest_tsc); |
1158 | 0 | } |
1159 | | |
1160 | | static int __vlapic_accept_pic_intr(struct vcpu *v) |
1161 | 0 | { |
1162 | 0 | struct domain *d = v->domain; |
1163 | 0 | struct vlapic *vlapic = vcpu_vlapic(v); |
1164 | 0 | uint32_t lvt0 = vlapic_get_reg(vlapic, APIC_LVT0); |
1165 | 0 | union vioapic_redir_entry redir0; |
1166 | 0 |
|
1167 | 0 | ASSERT(has_vpic(d)); |
1168 | 0 |
|
1169 | 0 | if ( !has_vioapic(d) ) |
1170 | 0 | return 0; |
1171 | 0 |
|
1172 | 0 | redir0 = domain_vioapic(d, 0)->redirtbl[0]; |
1173 | 0 |
|
1174 | 0 | /* We deliver 8259 interrupts to the appropriate CPU as follows. */ |
1175 | 0 | return ((/* IOAPIC pin0 is unmasked and routing to this LAPIC? */ |
1176 | 0 | ((redir0.fields.delivery_mode == dest_ExtINT) && |
1177 | 0 | !redir0.fields.mask && |
1178 | 0 | redir0.fields.dest_id == VLAPIC_ID(vlapic) && |
1179 | 0 | !vlapic_disabled(vlapic)) || |
1180 | 0 | /* LAPIC has LVT0 unmasked for ExtInts? */ |
1181 | 0 | ((lvt0 & (APIC_MODE_MASK|APIC_LVT_MASKED)) == APIC_DM_EXTINT) || |
1182 | 0 | /* LAPIC is fully disabled? */ |
1183 | 0 | vlapic_hw_disabled(vlapic))); |
1184 | 0 | } |
1185 | | |
1186 | | int vlapic_accept_pic_intr(struct vcpu *v) |
1187 | 7.81M | { |
1188 | 7.85M | if ( vlapic_hw_disabled(vcpu_vlapic(v)) || !has_vpic(v->domain) ) |
1189 | 7.88M | return 0; |
1190 | 7.81M | |
1191 | 18.4E | TRACE_2D(TRC_HVM_EMUL_LAPIC_PIC_INTR, |
1192 | 18.4E | (v == v->domain->arch.hvm_domain.i8259_target), |
1193 | 18.4E | v ? __vlapic_accept_pic_intr(v) : -1); |
1194 | 18.4E | |
1195 | 18.4E | return ((v == v->domain->arch.hvm_domain.i8259_target) && |
1196 | 0 | __vlapic_accept_pic_intr(v)); |
1197 | 7.81M | } |
1198 | | |
1199 | | void vlapic_adjust_i8259_target(struct domain *d) |
1200 | 15 | { |
1201 | 15 | struct vcpu *v; |
1202 | 15 | |
1203 | 15 | if ( !has_vpic(d) ) |
1204 | 15 | return; |
1205 | 15 | |
1206 | 0 | for_each_vcpu ( d, v ) |
1207 | 0 | if ( __vlapic_accept_pic_intr(v) ) |
1208 | 0 | goto found; |
1209 | 0 |
|
1210 | 0 | v = d->vcpu ? d->vcpu[0] : NULL; |
1211 | 0 |
|
1212 | 0 | found: |
1213 | 0 | if ( d->arch.hvm_domain.i8259_target == v ) |
1214 | 0 | return; |
1215 | 0 | d->arch.hvm_domain.i8259_target = v; |
1216 | 0 | pt_adjust_global_vcpu_target(v); |
1217 | 0 | } |
1218 | | |
1219 | | int vlapic_virtual_intr_delivery_enabled(void) |
1220 | 12.6k | { |
1221 | 12.6k | if ( hvm_funcs.virtual_intr_delivery_enabled ) |
1222 | 12.6k | return hvm_funcs.virtual_intr_delivery_enabled(); |
1223 | 12.6k | else |
1224 | 18.4E | return 0; |
1225 | 12.6k | } |
1226 | | |
1227 | | int vlapic_has_pending_irq(struct vcpu *v) |
1228 | 7.96M | { |
1229 | 7.96M | struct vlapic *vlapic = vcpu_vlapic(v); |
1230 | 7.96M | int irr, vector, isr; |
1231 | 7.96M | |
1232 | 7.96M | if ( !vlapic_enabled(vlapic) ) |
1233 | 878k | return -1; |
1234 | 7.96M | |
1235 | 7.08M | irr = vlapic_find_highest_irr(vlapic); |
1236 | 7.08M | if ( irr == -1 ) |
1237 | 7.41M | return -1; |
1238 | 7.08M | |
1239 | 18.4E | if ( vlapic_virtual_intr_delivery_enabled() && |
1240 | 8.41k | !nestedhvm_vcpu_in_guestmode(v) ) |
1241 | 8.41k | return irr; |
1242 | 18.4E | |
1243 | 18.4E | /* |
1244 | 18.4E | * If APIC assist was used then there may have been no EOI so |
1245 | 18.4E | * we need to clear the requisite bit from the ISR here, before |
1246 | 18.4E | * comparing with the IRR. |
1247 | 18.4E | */ |
1248 | 18.4E | vector = viridian_complete_apic_assist(v); |
1249 | 18.4E | if ( vector ) |
1250 | 18.4E | vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]); |
1251 | 18.4E | |
1252 | 18.4E | isr = vlapic_find_highest_isr(vlapic); |
1253 | 18.4E | if ( isr == -1 ) |
1254 | 0 | return irr; |
1255 | 18.4E | |
1256 | 18.4E | /* |
1257 | 18.4E | * A vector is pending in the ISR so, regardless of whether the new |
1258 | 18.4E | * vector in the IRR is lower or higher in priority, any pending |
1259 | 18.4E | * APIC assist must be aborted to ensure an EOI. |
1260 | 18.4E | */ |
1261 | 18.4E | viridian_abort_apic_assist(v); |
1262 | 18.4E | |
1263 | 18.4E | return ((isr & 0xf0) < (irr & 0xf0)) ? irr : -1; |
1264 | 18.4E | } |
1265 | | |
1266 | | int vlapic_ack_pending_irq(struct vcpu *v, int vector, bool_t force_ack) |
1267 | 4.20k | { |
1268 | 4.20k | struct vlapic *vlapic = vcpu_vlapic(v); |
1269 | 4.20k | int isr; |
1270 | 4.20k | |
1271 | 4.20k | if ( !force_ack && |
1272 | 4.20k | vlapic_virtual_intr_delivery_enabled() ) |
1273 | 4.21k | return 1; |
1274 | 4.20k | |
1275 | 4.20k | /* If there's no chance of using APIC assist then bail now. */ |
1276 | 18.4E | if ( !has_viridian_apic_assist(v->domain) || |
1277 | 0 | vlapic_test_vector(vector, &vlapic->regs->data[APIC_TMR]) ) |
1278 | 0 | goto done; |
1279 | 18.4E | |
1280 | 18.4E | isr = vlapic_find_highest_isr(vlapic); |
1281 | 18.4E | if ( isr == -1 ) |
1282 | 0 | { |
1283 | 0 | /* |
1284 | 0 | * This vector is edge triggered and no other vectors are pending |
1285 | 0 | * in the ISR so we can use APIC assist to avoid exiting for EOI. |
1286 | 0 | */ |
1287 | 0 | viridian_start_apic_assist(v, vector); |
1288 | 0 | } |
1289 | 18.4E | |
1290 | 0 | done: |
1291 | 0 | vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]); |
1292 | 0 | vlapic_clear_irr(vector, vlapic); |
1293 | 0 | return 1; |
1294 | 18.4E | } |
1295 | | |
1296 | | bool_t is_vlapic_lvtpc_enabled(struct vlapic *vlapic) |
1297 | 0 | { |
1298 | 0 | return (vlapic_enabled(vlapic) && |
1299 | 0 | !(vlapic_get_reg(vlapic, APIC_LVTPC) & APIC_LVT_MASKED)); |
1300 | 0 | } |
1301 | | |
1302 | | /* Reset the VLAPIC back to its init state. */ |
1303 | | static void vlapic_do_init(struct vlapic *vlapic) |
1304 | 12 | { |
1305 | 12 | int i; |
1306 | 12 | |
1307 | 12 | if ( !has_vlapic(vlapic_vcpu(vlapic)->domain) ) |
1308 | 0 | return; |
1309 | 12 | |
1310 | 12 | vlapic_set_reg(vlapic, APIC_LVR, VLAPIC_VERSION); |
1311 | 12 | |
1312 | 108 | for ( i = 0; i < 8; i++ ) |
1313 | 96 | { |
1314 | 96 | vlapic_set_reg(vlapic, APIC_IRR + 0x10 * i, 0); |
1315 | 96 | vlapic_set_reg(vlapic, APIC_ISR + 0x10 * i, 0); |
1316 | 96 | vlapic_set_reg(vlapic, APIC_TMR + 0x10 * i, 0); |
1317 | 96 | } |
1318 | 12 | vlapic_set_reg(vlapic, APIC_ICR, 0); |
1319 | 12 | vlapic_set_reg(vlapic, APIC_ICR2, 0); |
1320 | 12 | /* |
1321 | 12 | * LDR is read-only in x2APIC mode. Preserve its value when handling |
1322 | 12 | * INIT signal in x2APIC mode. |
1323 | 12 | */ |
1324 | 12 | if ( !vlapic_x2apic_mode(vlapic) ) |
1325 | 12 | vlapic_set_reg(vlapic, APIC_LDR, 0); |
1326 | 12 | vlapic_set_reg(vlapic, APIC_TASKPRI, 0); |
1327 | 12 | vlapic_set_reg(vlapic, APIC_TMICT, 0); |
1328 | 12 | vlapic_set_reg(vlapic, APIC_TMCCT, 0); |
1329 | 12 | vlapic_set_tdcr(vlapic, 0); |
1330 | 12 | |
1331 | 12 | vlapic_set_reg(vlapic, APIC_DFR, 0xffffffffU); |
1332 | 12 | |
1333 | 84 | for ( i = 0; i < VLAPIC_LVT_NUM; i++ ) |
1334 | 72 | vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); |
1335 | 12 | |
1336 | 12 | vlapic_set_reg(vlapic, APIC_SPIV, 0xff); |
1337 | 12 | vlapic->hw.disabled |= VLAPIC_SW_DISABLED; |
1338 | 12 | |
1339 | 12 | TRACE_0D(TRC_HVM_EMUL_LAPIC_STOP_TIMER); |
1340 | 12 | destroy_periodic_time(&vlapic->pt); |
1341 | 12 | } |
1342 | | |
1343 | | /* Reset the VLAPIC back to its power-on/reset state. */ |
1344 | | void vlapic_reset(struct vlapic *vlapic) |
1345 | 12 | { |
1346 | 12 | const struct vcpu *v = vlapic_vcpu(vlapic); |
1347 | 12 | |
1348 | 12 | if ( !has_vlapic(v->domain) ) |
1349 | 0 | return; |
1350 | 12 | |
1351 | 12 | vlapic->hw.apic_base_msr = (MSR_IA32_APICBASE_ENABLE | |
1352 | 12 | APIC_DEFAULT_PHYS_BASE); |
1353 | 12 | if ( v->vcpu_id == 0 ) |
1354 | 1 | vlapic->hw.apic_base_msr |= MSR_IA32_APICBASE_BSP; |
1355 | 12 | |
1356 | 12 | vlapic_set_reg(vlapic, APIC_ID, (v->vcpu_id * 2) << 24); |
1357 | 12 | vlapic_do_init(vlapic); |
1358 | 12 | } |
1359 | | |
1360 | | /* rearm the actimer if needed, after a HVM restore */ |
1361 | | static void lapic_rearm(struct vlapic *s) |
1362 | 0 | { |
1363 | 0 | unsigned long tmict; |
1364 | 0 | uint64_t period, tdt_msr; |
1365 | 0 |
|
1366 | 0 | s->pt.irq = vlapic_get_reg(s, APIC_LVTT) & APIC_VECTOR_MASK; |
1367 | 0 |
|
1368 | 0 | if ( vlapic_lvtt_tdt(s) ) |
1369 | 0 | { |
1370 | 0 | if ( (tdt_msr = vlapic_tdt_msr_get(s)) != 0 ) |
1371 | 0 | vlapic_tdt_msr_set(s, tdt_msr); |
1372 | 0 | return; |
1373 | 0 | } |
1374 | 0 |
|
1375 | 0 | if ( (tmict = vlapic_get_reg(s, APIC_TMICT)) == 0 ) |
1376 | 0 | return; |
1377 | 0 |
|
1378 | 0 | period = ((uint64_t)APIC_BUS_CYCLE_NS * |
1379 | 0 | (uint32_t)tmict * s->hw.timer_divisor); |
1380 | 0 | TRACE_2_LONG_3D(TRC_HVM_EMUL_LAPIC_START_TIMER, TRC_PAR_LONG(period), |
1381 | 0 | TRC_PAR_LONG(vlapic_lvtt_period(s) ? period : 0LL), s->pt.irq); |
1382 | 0 | create_periodic_time(vlapic_vcpu(s), &s->pt, period, |
1383 | 0 | vlapic_lvtt_period(s) ? period : 0, |
1384 | 0 | s->pt.irq, |
1385 | 0 | vlapic_lvtt_period(s) ? vlapic_pt_cb : NULL, |
1386 | 0 | &s->timer_last_update); |
1387 | 0 | s->timer_last_update = s->pt.last_plt_gtime; |
1388 | 0 | } |
1389 | | |
1390 | | static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h) |
1391 | 0 | { |
1392 | 0 | struct vcpu *v; |
1393 | 0 | struct vlapic *s; |
1394 | 0 | int rc = 0; |
1395 | 0 |
|
1396 | 0 | if ( !has_vlapic(d) ) |
1397 | 0 | return 0; |
1398 | 0 |
|
1399 | 0 | for_each_vcpu ( d, v ) |
1400 | 0 | { |
1401 | 0 | s = vcpu_vlapic(v); |
1402 | 0 | if ( (rc = hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw)) != 0 ) |
1403 | 0 | break; |
1404 | 0 | } |
1405 | 0 |
|
1406 | 0 | return rc; |
1407 | 0 | } |
1408 | | |
1409 | | static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h) |
1410 | 0 | { |
1411 | 0 | struct vcpu *v; |
1412 | 0 | struct vlapic *s; |
1413 | 0 | int rc = 0; |
1414 | 0 |
|
1415 | 0 | if ( !has_vlapic(d) ) |
1416 | 0 | return 0; |
1417 | 0 |
|
1418 | 0 | for_each_vcpu ( d, v ) |
1419 | 0 | { |
1420 | 0 | if ( hvm_funcs.sync_pir_to_irr ) |
1421 | 0 | hvm_funcs.sync_pir_to_irr(v); |
1422 | 0 |
|
1423 | 0 | s = vcpu_vlapic(v); |
1424 | 0 | if ( (rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs)) != 0 ) |
1425 | 0 | break; |
1426 | 0 | } |
1427 | 0 |
|
1428 | 0 | return rc; |
1429 | 0 | } |
1430 | | |
1431 | | /* |
1432 | | * Following lapic_load_hidden()/lapic_load_regs() we may need to |
1433 | | * correct ID and LDR when they come from an old, broken hypervisor. |
1434 | | */ |
1435 | | static void lapic_load_fixup(struct vlapic *vlapic) |
1436 | 0 | { |
1437 | 0 | uint32_t id = vlapic->loaded.id; |
1438 | 0 |
|
1439 | 0 | if ( vlapic_x2apic_mode(vlapic) && id && vlapic->loaded.ldr == 1 ) |
1440 | 0 | { |
1441 | 0 | /* |
1442 | 0 | * This is optional: ID != 0 contradicts LDR == 1. It's being added |
1443 | 0 | * to aid in eventual debugging of issues arising from the fixup done |
1444 | 0 | * here, but can be dropped as soon as it is found to conflict with |
1445 | 0 | * other (future) changes. |
1446 | 0 | */ |
1447 | 0 | if ( GET_xAPIC_ID(id) != vlapic_vcpu(vlapic)->vcpu_id * 2 || |
1448 | 0 | id != SET_xAPIC_ID(GET_xAPIC_ID(id)) ) |
1449 | 0 | printk(XENLOG_G_WARNING "%pv: bogus APIC ID %#x loaded\n", |
1450 | 0 | vlapic_vcpu(vlapic), id); |
1451 | 0 | set_x2apic_id(vlapic); |
1452 | 0 | } |
1453 | 0 | else /* Undo an eventual earlier fixup. */ |
1454 | 0 | { |
1455 | 0 | vlapic_set_reg(vlapic, APIC_ID, id); |
1456 | 0 | vlapic_set_reg(vlapic, APIC_LDR, vlapic->loaded.ldr); |
1457 | 0 | } |
1458 | 0 | } |
1459 | | |
1460 | | static int lapic_load_hidden(struct domain *d, hvm_domain_context_t *h) |
1461 | 0 | { |
1462 | 0 | uint16_t vcpuid; |
1463 | 0 | struct vcpu *v; |
1464 | 0 | struct vlapic *s; |
1465 | 0 |
|
1466 | 0 | if ( !has_vlapic(d) ) |
1467 | 0 | return -ENODEV; |
1468 | 0 |
|
1469 | 0 | /* Which vlapic to load? */ |
1470 | 0 | vcpuid = hvm_load_instance(h); |
1471 | 0 | if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) |
1472 | 0 | { |
1473 | 0 | dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no apic%u\n", |
1474 | 0 | d->domain_id, vcpuid); |
1475 | 0 | return -EINVAL; |
1476 | 0 | } |
1477 | 0 | s = vcpu_vlapic(v); |
1478 | 0 | |
1479 | 0 | if ( hvm_load_entry_zeroextend(LAPIC, h, &s->hw) != 0 ) |
1480 | 0 | return -EINVAL; |
1481 | 0 |
|
1482 | 0 | s->loaded.hw = 1; |
1483 | 0 | if ( s->loaded.regs ) |
1484 | 0 | lapic_load_fixup(s); |
1485 | 0 |
|
1486 | 0 | if ( !(s->hw.apic_base_msr & MSR_IA32_APICBASE_ENABLE) && |
1487 | 0 | unlikely(vlapic_x2apic_mode(s)) ) |
1488 | 0 | return -EINVAL; |
1489 | 0 |
|
1490 | 0 | vmx_vlapic_msr_changed(v); |
1491 | 0 |
|
1492 | 0 | return 0; |
1493 | 0 | } |
1494 | | |
1495 | | static int lapic_load_regs(struct domain *d, hvm_domain_context_t *h) |
1496 | 0 | { |
1497 | 0 | uint16_t vcpuid; |
1498 | 0 | struct vcpu *v; |
1499 | 0 | struct vlapic *s; |
1500 | 0 |
|
1501 | 0 | if ( !has_vlapic(d) ) |
1502 | 0 | return -ENODEV; |
1503 | 0 |
|
1504 | 0 | /* Which vlapic to load? */ |
1505 | 0 | vcpuid = hvm_load_instance(h); |
1506 | 0 | if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) |
1507 | 0 | { |
1508 | 0 | dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no apic%u\n", |
1509 | 0 | d->domain_id, vcpuid); |
1510 | 0 | return -EINVAL; |
1511 | 0 | } |
1512 | 0 | s = vcpu_vlapic(v); |
1513 | 0 | |
1514 | 0 | if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 ) |
1515 | 0 | return -EINVAL; |
1516 | 0 |
|
1517 | 0 | s->loaded.id = vlapic_get_reg(s, APIC_ID); |
1518 | 0 | s->loaded.ldr = vlapic_get_reg(s, APIC_LDR); |
1519 | 0 | s->loaded.regs = 1; |
1520 | 0 | if ( s->loaded.hw ) |
1521 | 0 | lapic_load_fixup(s); |
1522 | 0 |
|
1523 | 0 | if ( hvm_funcs.process_isr ) |
1524 | 0 | hvm_funcs.process_isr(vlapic_find_highest_isr(s), v); |
1525 | 0 |
|
1526 | 0 | vlapic_adjust_i8259_target(d); |
1527 | 0 | lapic_rearm(s); |
1528 | 0 | return 0; |
1529 | 0 | } |
1530 | | |
1531 | | HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden, |
1532 | | 1, HVMSR_PER_VCPU); |
1533 | | HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs, |
1534 | | 1, HVMSR_PER_VCPU); |
1535 | | |
1536 | | int vlapic_init(struct vcpu *v) |
1537 | 12 | { |
1538 | 12 | struct vlapic *vlapic = vcpu_vlapic(v); |
1539 | 12 | |
1540 | 12 | HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "%d", v->vcpu_id); |
1541 | 12 | |
1542 | 12 | if ( !has_vlapic(v->domain) ) |
1543 | 0 | { |
1544 | 0 | vlapic->hw.disabled = VLAPIC_HW_DISABLED; |
1545 | 0 | return 0; |
1546 | 0 | } |
1547 | 12 | |
1548 | 12 | vlapic->pt.source = PTSRC_lapic; |
1549 | 12 | |
1550 | 12 | if (vlapic->regs_page == NULL) |
1551 | 12 | { |
1552 | 12 | vlapic->regs_page = alloc_domheap_page(v->domain, MEMF_no_owner); |
1553 | 12 | if ( vlapic->regs_page == NULL ) |
1554 | 0 | { |
1555 | 0 | dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n", |
1556 | 0 | v->domain->domain_id, v->vcpu_id); |
1557 | 0 | return -ENOMEM; |
1558 | 0 | } |
1559 | 12 | } |
1560 | 12 | if (vlapic->regs == NULL) |
1561 | 12 | { |
1562 | 12 | vlapic->regs = __map_domain_page_global(vlapic->regs_page); |
1563 | 12 | if ( vlapic->regs == NULL ) |
1564 | 0 | { |
1565 | 0 | dprintk(XENLOG_ERR, "map vlapic regs error: %d/%d\n", |
1566 | 0 | v->domain->domain_id, v->vcpu_id); |
1567 | 0 | return -ENOMEM; |
1568 | 0 | } |
1569 | 12 | } |
1570 | 12 | clear_page(vlapic->regs); |
1571 | 12 | |
1572 | 12 | vlapic_reset(vlapic); |
1573 | 12 | |
1574 | 12 | spin_lock_init(&vlapic->esr_lock); |
1575 | 12 | |
1576 | 12 | tasklet_init(&vlapic->init_sipi.tasklet, |
1577 | 12 | vlapic_init_sipi_action, |
1578 | 12 | (unsigned long)v); |
1579 | 12 | |
1580 | 12 | if ( v->vcpu_id == 0 ) |
1581 | 1 | register_mmio_handler(v->domain, &vlapic_mmio_ops); |
1582 | 12 | |
1583 | 12 | return 0; |
1584 | 12 | } |
1585 | | |
1586 | | void vlapic_destroy(struct vcpu *v) |
1587 | 0 | { |
1588 | 0 | struct vlapic *vlapic = vcpu_vlapic(v); |
1589 | 0 |
|
1590 | 0 | if ( !has_vlapic(v->domain) ) |
1591 | 0 | return; |
1592 | 0 |
|
1593 | 0 | tasklet_kill(&vlapic->init_sipi.tasklet); |
1594 | 0 | TRACE_0D(TRC_HVM_EMUL_LAPIC_STOP_TIMER); |
1595 | 0 | destroy_periodic_time(&vlapic->pt); |
1596 | 0 | unmap_domain_page_global(vlapic->regs); |
1597 | 0 | free_domheap_page(vlapic->regs_page); |
1598 | 0 | } |
1599 | | |
1600 | | /* |
1601 | | * Local variables: |
1602 | | * mode: C |
1603 | | * c-file-style: "BSD" |
1604 | | * c-basic-offset: 4 |
1605 | | * indent-tabs-mode: nil |
1606 | | * End: |
1607 | | */ |