debuggers.hg

view xen/arch/x86/hvm/vlapic.c @ 19950:721c14d7f60b

x86 hvm: Use 'x' as parameter name for macros converting between
{vcpu,domain} and {vlapic,vpic,vrtc,hpet}. Completely avoids
accidental aliasing.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jul 08 14:22:00 2009 +0100 (2009-07-08)
parents a29bb4efff00
children 18e60f40c44b
line source
1 /*
2 * vlapic.c: virtualize LAPIC for HVM vcpus.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2006 Keir Fraser, XenSource Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/types.h>
23 #include <xen/mm.h>
24 #include <xen/xmalloc.h>
25 #include <xen/domain.h>
26 #include <xen/domain_page.h>
27 #include <xen/event.h>
28 #include <xen/trace.h>
29 #include <xen/lib.h>
30 #include <xen/sched.h>
31 #include <xen/numa.h>
32 #include <asm/current.h>
33 #include <asm/page.h>
34 #include <asm/hvm/hvm.h>
35 #include <asm/hvm/io.h>
36 #include <asm/hvm/support.h>
37 #include <asm/hvm/vmx/vmx.h>
38 #include <public/hvm/ioreq.h>
39 #include <public/hvm/params.h>
41 #define VLAPIC_VERSION 0x00050014
42 #define VLAPIC_LVT_NUM 6
44 /* vlapic's frequence is 100 MHz */
45 #define APIC_BUS_CYCLE_NS 10
47 #define LVT_MASK \
48 APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK
50 #define LINT_MASK \
51 LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY |\
52 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER
54 static unsigned int vlapic_lvt_mask[VLAPIC_LVT_NUM] =
55 {
56 /* LVTT */
57 LVT_MASK | APIC_LVT_TIMER_PERIODIC,
58 /* LVTTHMR */
59 LVT_MASK | APIC_MODE_MASK,
60 /* LVTPC */
61 LVT_MASK | APIC_MODE_MASK,
62 /* LVT0-1 */
63 LINT_MASK, LINT_MASK,
64 /* LVTERR */
65 LVT_MASK
66 };
68 /* Following could belong in apicdef.h */
69 #define APIC_SHORT_MASK 0xc0000
70 #define APIC_DEST_NOSHORT 0x0
71 #define APIC_DEST_MASK 0x800
73 #define vlapic_lvt_vector(vlapic, lvt_type) \
74 (vlapic_get_reg(vlapic, lvt_type) & APIC_VECTOR_MASK)
76 #define vlapic_lvt_dm(vlapic, lvt_type) \
77 (vlapic_get_reg(vlapic, lvt_type) & APIC_MODE_MASK)
79 #define vlapic_lvtt_period(vlapic) \
80 (vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC)
83 /*
84 * Generic APIC bitmap vector update & search routines.
85 */
87 #define VEC_POS(v) ((v)%32)
88 #define REG_POS(v) (((v)/32) * 0x10)
89 #define vlapic_test_and_set_vector(vec, bitmap) \
90 test_and_set_bit(VEC_POS(vec), \
91 (unsigned long *)((bitmap) + REG_POS(vec)))
92 #define vlapic_test_and_clear_vector(vec, bitmap) \
93 test_and_clear_bit(VEC_POS(vec), \
94 (unsigned long *)((bitmap) + REG_POS(vec)))
95 #define vlapic_set_vector(vec, bitmap) \
96 set_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec)))
97 #define vlapic_clear_vector(vec, bitmap) \
98 clear_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec)))
100 static int vlapic_find_highest_vector(void *bitmap)
101 {
102 uint32_t *word = bitmap;
103 int word_offset = MAX_VECTOR / 32;
105 /* Work backwards through the bitmap (first 32-bit word in every four). */
106 while ( (word_offset != 0) && (word[(--word_offset)*4] == 0) )
107 continue;
109 return (fls(word[word_offset*4]) - 1) + (word_offset * 32);
110 }
113 /*
114 * IRR-specific bitmap update & search routines.
115 */
117 static int vlapic_test_and_set_irr(int vector, struct vlapic *vlapic)
118 {
119 return vlapic_test_and_set_vector(vector, &vlapic->regs->data[APIC_IRR]);
120 }
122 static void vlapic_clear_irr(int vector, struct vlapic *vlapic)
123 {
124 vlapic_clear_vector(vector, &vlapic->regs->data[APIC_IRR]);
125 }
127 static int vlapic_find_highest_irr(struct vlapic *vlapic)
128 {
129 return vlapic_find_highest_vector(&vlapic->regs->data[APIC_IRR]);
130 }
132 int vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig)
133 {
134 int ret;
136 ret = !vlapic_test_and_set_irr(vec, vlapic);
137 if ( trig )
138 vlapic_set_vector(vec, &vlapic->regs->data[APIC_TMR]);
140 /* We may need to wake up target vcpu, besides set pending bit here */
141 return ret;
142 }
144 static int vlapic_find_highest_isr(struct vlapic *vlapic)
145 {
146 return vlapic_find_highest_vector(&vlapic->regs->data[APIC_ISR]);
147 }
149 uint32_t vlapic_get_ppr(struct vlapic *vlapic)
150 {
151 uint32_t tpr, isrv, ppr;
152 int isr;
154 tpr = vlapic_get_reg(vlapic, APIC_TASKPRI);
155 isr = vlapic_find_highest_isr(vlapic);
156 isrv = (isr != -1) ? isr : 0;
158 if ( (tpr & 0xf0) >= (isrv & 0xf0) )
159 ppr = tpr & 0xff;
160 else
161 ppr = isrv & 0xf0;
163 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_INTERRUPT,
164 "vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
165 vlapic, ppr, isr, isrv);
167 return ppr;
168 }
170 int vlapic_match_logical_addr(struct vlapic *vlapic, uint8_t mda)
171 {
172 int result = 0;
173 uint8_t logical_id;
175 logical_id = GET_xAPIC_LOGICAL_ID(vlapic_get_reg(vlapic, APIC_LDR));
177 switch ( vlapic_get_reg(vlapic, APIC_DFR) )
178 {
179 case APIC_DFR_FLAT:
180 if ( logical_id & mda )
181 result = 1;
182 break;
183 case APIC_DFR_CLUSTER:
184 if ( ((logical_id >> 4) == (mda >> 0x4)) && (logical_id & mda & 0xf) )
185 result = 1;
186 break;
187 default:
188 gdprintk(XENLOG_WARNING, "Bad DFR value for lapic of vcpu %d: %08x\n",
189 vlapic_vcpu(vlapic)->vcpu_id,
190 vlapic_get_reg(vlapic, APIC_DFR));
191 break;
192 }
194 return result;
195 }
197 static int vlapic_match_dest(struct vcpu *v, struct vlapic *source,
198 int short_hand, int dest, int dest_mode)
199 {
200 int result = 0;
201 struct vlapic *target = vcpu_vlapic(v);
203 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "target %p, source %p, dest 0x%x, "
204 "dest_mode 0x%x, short_hand 0x%x",
205 target, source, dest, dest_mode, short_hand);
207 switch ( short_hand )
208 {
209 case APIC_DEST_NOSHORT:
210 if ( dest_mode == 0 )
211 {
212 /* Physical mode. */
213 if ( (dest == 0xFF) || (dest == VLAPIC_ID(target)) )
214 result = 1;
215 }
216 else
217 {
218 /* Logical mode. */
219 result = vlapic_match_logical_addr(target, dest);
220 }
221 break;
223 case APIC_DEST_SELF:
224 if ( target == source )
225 result = 1;
226 break;
228 case APIC_DEST_ALLINC:
229 result = 1;
230 break;
232 case APIC_DEST_ALLBUT:
233 if ( target != source )
234 result = 1;
235 break;
237 default:
238 gdprintk(XENLOG_WARNING, "Bad dest shorthand value %x\n", short_hand);
239 break;
240 }
242 return result;
243 }
245 static int vlapic_vcpu_pause_async(struct vcpu *v)
246 {
247 vcpu_pause_nosync(v);
249 if ( v->is_running )
250 {
251 vcpu_unpause(v);
252 return 0;
253 }
255 sync_vcpu_execstate(v);
256 return 1;
257 }
259 static void vlapic_init_action(unsigned long _vcpu)
260 {
261 struct vcpu *v = (struct vcpu *)_vcpu;
262 struct domain *d = v->domain;
263 bool_t fpu_initialised;
265 /* If the VCPU is not on its way down we have nothing to do. */
266 if ( !test_bit(_VPF_down, &v->pause_flags) )
267 return;
269 if ( !vlapic_vcpu_pause_async(v) )
270 {
271 tasklet_schedule(&vcpu_vlapic(v)->init_tasklet);
272 return;
273 }
275 /* Reset necessary VCPU state. This does not include FPU state. */
276 domain_lock(d);
277 fpu_initialised = v->fpu_initialised;
278 vcpu_reset(v);
279 v->fpu_initialised = fpu_initialised;
280 vlapic_reset(vcpu_vlapic(v));
281 domain_unlock(d);
283 vcpu_unpause(v);
284 }
286 static int vlapic_accept_init(struct vcpu *v)
287 {
288 /* Nothing to do if the VCPU is already reset. */
289 if ( !v->is_initialised )
290 return X86EMUL_OKAY;
292 /* Asynchronously take the VCPU down and schedule reset work. */
293 hvm_vcpu_down(v);
294 tasklet_schedule(&vcpu_vlapic(v)->init_tasklet);
295 return X86EMUL_RETRY;
296 }
298 static int vlapic_accept_sipi(struct vcpu *v, int trampoline_vector)
299 {
300 /* If the VCPU is not on its way down we have nothing to do. */
301 if ( !test_bit(_VPF_down, &v->pause_flags) )
302 return X86EMUL_OKAY;
304 if ( !vlapic_vcpu_pause_async(v) )
305 return X86EMUL_RETRY;
307 hvm_vcpu_reset_state(v, trampoline_vector << 8, 0);
309 vcpu_unpause(v);
311 return X86EMUL_OKAY;
312 }
314 /* Add a pending IRQ into lapic. */
315 static int vlapic_accept_irq(struct vcpu *v, int delivery_mode,
316 int vector, int level, int trig_mode)
317 {
318 struct vlapic *vlapic = vcpu_vlapic(v);
319 int rc = X86EMUL_OKAY;
321 switch ( delivery_mode )
322 {
323 case APIC_DM_FIXED:
324 case APIC_DM_LOWEST:
325 /* FIXME add logic for vcpu on reset */
326 if ( unlikely(!vlapic_enabled(vlapic)) )
327 break;
329 if ( vlapic_test_and_set_irr(vector, vlapic) && trig_mode )
330 {
331 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
332 "level trig mode repeatedly for vector %d", vector);
333 break;
334 }
336 if ( trig_mode )
337 {
338 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
339 "level trig mode for vector %d", vector);
340 vlapic_set_vector(vector, &vlapic->regs->data[APIC_TMR]);
341 }
343 vcpu_kick(v);
344 break;
346 case APIC_DM_REMRD:
347 gdprintk(XENLOG_WARNING, "Ignoring delivery mode 3\n");
348 break;
350 case APIC_DM_SMI:
351 gdprintk(XENLOG_WARNING, "Ignoring guest SMI\n");
352 break;
354 case APIC_DM_NMI:
355 if ( !test_and_set_bool(v->nmi_pending) )
356 vcpu_kick(v);
357 break;
359 case APIC_DM_INIT:
360 /* No work on INIT de-assert for P4-type APIC. */
361 if ( trig_mode && !(level & APIC_INT_ASSERT) )
362 break;
363 rc = vlapic_accept_init(v);
364 break;
366 case APIC_DM_STARTUP:
367 rc = vlapic_accept_sipi(v, vector);
368 break;
370 default:
371 gdprintk(XENLOG_ERR, "TODO: unsupported delivery mode %x\n",
372 delivery_mode);
373 domain_crash(v->domain);
374 }
376 return rc;
377 }
379 /* This function is used by both ioapic and lapic.The bitmap is for vcpu_id. */
380 struct vlapic *apic_lowest_prio(struct domain *d, uint32_t bitmap)
381 {
382 int old = d->arch.hvm_domain.irq.round_robin_prev_vcpu;
383 uint32_t ppr, target_ppr = UINT_MAX;
384 struct vlapic *vlapic, *target = NULL;
385 struct vcpu *v;
387 if ( unlikely(!d->vcpu) || unlikely((v = d->vcpu[old]) == NULL) )
388 return NULL;
390 do {
391 v = v->next_in_list ? : d->vcpu[0];
392 vlapic = vcpu_vlapic(v);
393 if ( test_bit(v->vcpu_id, &bitmap) && vlapic_enabled(vlapic) &&
394 ((ppr = vlapic_get_ppr(vlapic)) < target_ppr) )
395 {
396 target = vlapic;
397 target_ppr = ppr;
398 }
399 } while ( v->vcpu_id != old );
401 if ( target != NULL )
402 d->arch.hvm_domain.irq.round_robin_prev_vcpu =
403 vlapic_vcpu(target)->vcpu_id;
405 return target;
406 }
408 void vlapic_EOI_set(struct vlapic *vlapic)
409 {
410 int vector = vlapic_find_highest_isr(vlapic);
412 /* Some EOI writes may not have a matching to an in-service interrupt. */
413 if ( vector == -1 )
414 return;
416 vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]);
418 if ( vlapic_test_and_clear_vector(vector, &vlapic->regs->data[APIC_TMR]) )
419 vioapic_update_EOI(vlapic_domain(vlapic), vector);
421 hvm_dpci_msi_eoi(current->domain, vector);
422 }
424 int vlapic_ipi(
425 struct vlapic *vlapic, uint32_t icr_low, uint32_t icr_high)
426 {
427 unsigned int dest = GET_xAPIC_DEST_FIELD(icr_high);
428 unsigned int short_hand = icr_low & APIC_SHORT_MASK;
429 unsigned int trig_mode = icr_low & APIC_INT_LEVELTRIG;
430 unsigned int level = icr_low & APIC_INT_ASSERT;
431 unsigned int dest_mode = icr_low & APIC_DEST_MASK;
432 unsigned int delivery_mode =icr_low & APIC_MODE_MASK;
433 unsigned int vector = icr_low & APIC_VECTOR_MASK;
435 struct vlapic *target;
436 struct vcpu *v;
437 uint32_t lpr_map = 0;
438 int rc = X86EMUL_OKAY;
440 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "icr_high 0x%x, icr_low 0x%x, "
441 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
442 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x",
443 icr_high, icr_low, short_hand, dest,
444 trig_mode, level, dest_mode, delivery_mode, vector);
446 for_each_vcpu ( vlapic_domain(vlapic), v )
447 {
448 if ( vlapic_match_dest(v, vlapic, short_hand, dest, dest_mode) )
449 {
450 if ( delivery_mode == APIC_DM_LOWEST )
451 __set_bit(v->vcpu_id, &lpr_map);
452 else
453 rc = vlapic_accept_irq(v, delivery_mode,
454 vector, level, trig_mode);
455 }
457 if ( rc != X86EMUL_OKAY )
458 break;
459 }
461 if ( delivery_mode == APIC_DM_LOWEST )
462 {
463 target = apic_lowest_prio(vlapic_domain(vlapic), lpr_map);
464 if ( target != NULL )
465 rc = vlapic_accept_irq(vlapic_vcpu(target), delivery_mode,
466 vector, level, trig_mode);
467 }
469 return rc;
470 }
472 static uint32_t vlapic_get_tmcct(struct vlapic *vlapic)
473 {
474 struct vcpu *v = current;
475 uint32_t tmcct, tmict = vlapic_get_reg(vlapic, APIC_TMICT);
476 uint64_t counter_passed;
478 counter_passed = ((hvm_get_guest_time(v) - vlapic->timer_last_update)
479 / APIC_BUS_CYCLE_NS / vlapic->hw.timer_divisor);
480 tmcct = tmict - counter_passed;
482 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
483 "timer initial count %d, timer current count %d, "
484 "offset %"PRId64,
485 tmict, tmcct, counter_passed);
487 return tmcct;
488 }
490 static void vlapic_set_tdcr(struct vlapic *vlapic, unsigned int val)
491 {
492 /* Only bits 0, 1 and 3 are settable; others are MBZ. */
493 val &= 0xb;
494 vlapic_set_reg(vlapic, APIC_TDCR, val);
496 /* Update the demangled hw.timer_divisor. */
497 val = ((val & 3) | ((val & 8) >> 1)) + 1;
498 vlapic->hw.timer_divisor = 1 << (val & 7);
500 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
501 "timer_divisor: %d", vlapic->hw.timer_divisor);
502 }
504 static void vlapic_read_aligned(
505 struct vlapic *vlapic, unsigned int offset, unsigned int *result)
506 {
507 switch ( offset )
508 {
509 case APIC_PROCPRI:
510 *result = vlapic_get_ppr(vlapic);
511 break;
513 case APIC_TMCCT: /* Timer CCR */
514 *result = vlapic_get_tmcct(vlapic);
515 break;
517 default:
518 *result = vlapic_get_reg(vlapic, offset);
519 break;
520 }
521 }
523 static int vlapic_read(
524 struct vcpu *v, unsigned long address,
525 unsigned long len, unsigned long *pval)
526 {
527 unsigned int alignment;
528 unsigned int tmp;
529 unsigned long result = 0;
530 struct vlapic *vlapic = vcpu_vlapic(v);
531 unsigned int offset = address - vlapic_base_address(vlapic);
533 if ( offset > (APIC_TDCR + 0x3) )
534 goto out;
536 alignment = offset & 0x3;
538 vlapic_read_aligned(vlapic, offset & ~0x3, &tmp);
539 switch ( len )
540 {
541 case 1:
542 result = *((unsigned char *)&tmp + alignment);
543 break;
545 case 2:
546 if ( alignment == 3 )
547 goto unaligned_exit_and_crash;
548 result = *(unsigned short *)((unsigned char *)&tmp + alignment);
549 break;
551 case 4:
552 if ( alignment != 0 )
553 goto unaligned_exit_and_crash;
554 result = *(unsigned int *)((unsigned char *)&tmp + alignment);
555 break;
557 default:
558 gdprintk(XENLOG_ERR, "Local APIC read with len=0x%lx, "
559 "should be 4 instead.\n", len);
560 goto exit_and_crash;
561 }
563 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset 0x%x with length 0x%lx, "
564 "and the result is 0x%lx", offset, len, result);
566 out:
567 *pval = result;
568 return X86EMUL_OKAY;
570 unaligned_exit_and_crash:
571 gdprintk(XENLOG_ERR, "Unaligned LAPIC read len=0x%lx at offset=0x%x.\n",
572 len, offset);
573 exit_and_crash:
574 domain_crash(v->domain);
575 return X86EMUL_OKAY;
576 }
578 void vlapic_pt_cb(struct vcpu *v, void *data)
579 {
580 *(s_time_t *)data = hvm_get_guest_time(v);
581 }
583 static int vlapic_write(struct vcpu *v, unsigned long address,
584 unsigned long len, unsigned long val)
585 {
586 struct vlapic *vlapic = vcpu_vlapic(v);
587 unsigned int offset = address - vlapic_base_address(vlapic);
588 int rc = X86EMUL_OKAY;
590 if ( offset != 0xb0 )
591 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
592 "offset 0x%x with length 0x%lx, and value is 0x%lx",
593 offset, len, val);
595 /*
596 * According to the IA32 Manual, all accesses should be 32 bits.
597 * Some OSes do 8- or 16-byte accesses, however.
598 */
599 val = (uint32_t)val;
600 if ( len != 4 )
601 {
602 unsigned long tmp;
603 unsigned char alignment;
605 gdprintk(XENLOG_INFO, "Notice: Local APIC write with len = %lx\n",len);
607 alignment = offset & 0x3;
608 (void)vlapic_read(v, offset & ~0x3, 4, &tmp);
610 switch ( len )
611 {
612 case 1:
613 val = ((tmp & ~(0xff << (8*alignment))) |
614 ((val & 0xff) << (8*alignment)));
615 break;
617 case 2:
618 if ( alignment & 1 )
619 goto unaligned_exit_and_crash;
620 val = ((tmp & ~(0xffff << (8*alignment))) |
621 ((val & 0xffff) << (8*alignment)));
622 break;
624 default:
625 gdprintk(XENLOG_ERR, "Local APIC write with len = %lx, "
626 "should be 4 instead\n", len);
627 goto exit_and_crash;
628 }
629 }
630 else if ( (offset & 0x3) != 0 )
631 goto unaligned_exit_and_crash;
633 offset &= ~0x3;
635 switch ( offset )
636 {
637 case APIC_TASKPRI:
638 vlapic_set_reg(vlapic, APIC_TASKPRI, val & 0xff);
639 break;
641 case APIC_EOI:
642 vlapic_EOI_set(vlapic);
643 break;
645 case APIC_LDR:
646 vlapic_set_reg(vlapic, APIC_LDR, val & APIC_LDR_MASK);
647 break;
649 case APIC_DFR:
650 vlapic_set_reg(vlapic, APIC_DFR, val | 0x0FFFFFFF);
651 break;
653 case APIC_SPIV:
654 vlapic_set_reg(vlapic, APIC_SPIV, val & 0x3ff);
656 if ( !(val & APIC_SPIV_APIC_ENABLED) )
657 {
658 int i;
659 uint32_t lvt_val;
661 vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
663 for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
664 {
665 lvt_val = vlapic_get_reg(vlapic, APIC_LVTT + 0x10 * i);
666 vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i,
667 lvt_val | APIC_LVT_MASKED);
668 }
669 }
670 else
671 vlapic->hw.disabled &= ~VLAPIC_SW_DISABLED;
672 break;
674 case APIC_ESR:
675 /* Nothing to do. */
676 break;
678 case APIC_ICR:
679 val &= ~(1 << 12); /* always clear the pending bit */
680 rc = vlapic_ipi(vlapic, val, vlapic_get_reg(vlapic, APIC_ICR2));
681 if ( rc == X86EMUL_OKAY )
682 vlapic_set_reg(vlapic, APIC_ICR, val);
683 break;
685 case APIC_ICR2:
686 vlapic_set_reg(vlapic, APIC_ICR2, val & 0xff000000);
687 break;
689 case APIC_LVTT: /* LVT Timer Reg */
690 vlapic->pt.irq = val & APIC_VECTOR_MASK;
691 case APIC_LVTTHMR: /* LVT Thermal Monitor */
692 case APIC_LVTPC: /* LVT Performance Counter */
693 case APIC_LVT0: /* LVT LINT0 Reg */
694 case APIC_LVT1: /* LVT Lint1 Reg */
695 case APIC_LVTERR: /* LVT Error Reg */
696 if ( vlapic_sw_disabled(vlapic) )
697 val |= APIC_LVT_MASKED;
698 val &= vlapic_lvt_mask[(offset - APIC_LVTT) >> 4];
699 vlapic_set_reg(vlapic, offset, val);
700 if ( offset == APIC_LVT0 )
701 vlapic_adjust_i8259_target(v->domain);
702 break;
704 case APIC_TMICT:
705 {
706 uint64_t period = (uint64_t)APIC_BUS_CYCLE_NS *
707 (uint32_t)val * vlapic->hw.timer_divisor;
709 vlapic_set_reg(vlapic, APIC_TMICT, val);
710 create_periodic_time(current, &vlapic->pt, period,
711 vlapic_lvtt_period(vlapic) ? period : 0,
712 vlapic->pt.irq, vlapic_pt_cb,
713 &vlapic->timer_last_update);
714 vlapic->timer_last_update = vlapic->pt.last_plt_gtime;
716 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
717 "bus cycle is %uns, "
718 "initial count %lu, period %"PRIu64"ns",
719 APIC_BUS_CYCLE_NS, val, period);
720 }
721 break;
723 case APIC_TDCR:
724 vlapic_set_tdcr(vlapic, val & 0xb);
725 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "timer divisor is 0x%x",
726 vlapic->hw.timer_divisor);
727 break;
729 default:
730 gdprintk(XENLOG_DEBUG,
731 "Local APIC Write to read-only register 0x%x\n", offset);
732 break;
733 }
735 return rc;
737 unaligned_exit_and_crash:
738 gdprintk(XENLOG_ERR, "Unaligned LAPIC write len=0x%lx at offset=0x%x.\n",
739 len, offset);
740 exit_and_crash:
741 domain_crash(v->domain);
742 return rc;
743 }
745 static int vlapic_range(struct vcpu *v, unsigned long addr)
746 {
747 struct vlapic *vlapic = vcpu_vlapic(v);
748 unsigned long offset = addr - vlapic_base_address(vlapic);
749 return (!vlapic_hw_disabled(vlapic) && (offset < PAGE_SIZE));
750 }
752 struct hvm_mmio_handler vlapic_mmio_handler = {
753 .check_handler = vlapic_range,
754 .read_handler = vlapic_read,
755 .write_handler = vlapic_write
756 };
758 void vlapic_msr_set(struct vlapic *vlapic, uint64_t value)
759 {
760 if ( (vlapic->hw.apic_base_msr ^ value) & MSR_IA32_APICBASE_ENABLE )
761 {
762 if ( value & MSR_IA32_APICBASE_ENABLE )
763 {
764 vlapic_reset(vlapic);
765 vlapic->hw.disabled &= ~VLAPIC_HW_DISABLED;
766 }
767 else
768 {
769 vlapic->hw.disabled |= VLAPIC_HW_DISABLED;
770 }
771 }
773 vlapic->hw.apic_base_msr = value;
775 vmx_vlapic_msr_changed(vlapic_vcpu(vlapic));
777 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
778 "apic base msr is 0x%016"PRIx64, vlapic->hw.apic_base_msr);
779 }
781 static int __vlapic_accept_pic_intr(struct vcpu *v)
782 {
783 struct domain *d = v->domain;
784 struct vlapic *vlapic = vcpu_vlapic(v);
785 uint32_t lvt0 = vlapic_get_reg(vlapic, APIC_LVT0);
786 union vioapic_redir_entry redir0 = domain_vioapic(d)->redirtbl[0];
788 /* We deliver 8259 interrupts to the appropriate CPU as follows. */
789 return ((/* IOAPIC pin0 is unmasked and routing to this LAPIC? */
790 ((redir0.fields.delivery_mode == dest_ExtINT) &&
791 !redir0.fields.mask &&
792 redir0.fields.dest_id == VLAPIC_ID(vlapic) &&
793 !vlapic_disabled(vlapic)) ||
794 /* LAPIC has LVT0 unmasked for ExtInts? */
795 ((lvt0 & (APIC_MODE_MASK|APIC_LVT_MASKED)) == APIC_DM_EXTINT) ||
796 /* LAPIC is fully disabled? */
797 vlapic_hw_disabled(vlapic)));
798 }
800 int vlapic_accept_pic_intr(struct vcpu *v)
801 {
802 return ((v == v->domain->arch.hvm_domain.i8259_target) &&
803 __vlapic_accept_pic_intr(v));
804 }
806 void vlapic_adjust_i8259_target(struct domain *d)
807 {
808 struct vcpu *v;
810 for_each_vcpu ( d, v )
811 if ( __vlapic_accept_pic_intr(v) )
812 goto found;
814 v = d->vcpu ? d->vcpu[0] : NULL;
816 found:
817 if ( d->arch.hvm_domain.i8259_target == v )
818 return;
819 d->arch.hvm_domain.i8259_target = v;
820 pt_adjust_global_vcpu_target(v);
821 }
823 int vlapic_has_pending_irq(struct vcpu *v)
824 {
825 struct vlapic *vlapic = vcpu_vlapic(v);
826 int irr, isr;
828 if ( !vlapic_enabled(vlapic) )
829 return -1;
831 irr = vlapic_find_highest_irr(vlapic);
832 if ( irr == -1 )
833 return -1;
835 isr = vlapic_find_highest_isr(vlapic);
836 isr = (isr != -1) ? isr : 0;
837 if ( (isr & 0xf0) >= (irr & 0xf0) )
838 return -1;
840 return irr;
841 }
843 int vlapic_ack_pending_irq(struct vcpu *v, int vector)
844 {
845 struct vlapic *vlapic = vcpu_vlapic(v);
847 vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
848 vlapic_clear_irr(vector, vlapic);
850 return 1;
851 }
853 /* Reset the VLPAIC back to its power-on/reset state. */
854 void vlapic_reset(struct vlapic *vlapic)
855 {
856 struct vcpu *v = vlapic_vcpu(vlapic);
857 int i;
859 vlapic_set_reg(vlapic, APIC_ID, (v->vcpu_id * 2) << 24);
860 vlapic_set_reg(vlapic, APIC_LVR, VLAPIC_VERSION);
862 for ( i = 0; i < 8; i++ )
863 {
864 vlapic_set_reg(vlapic, APIC_IRR + 0x10 * i, 0);
865 vlapic_set_reg(vlapic, APIC_ISR + 0x10 * i, 0);
866 vlapic_set_reg(vlapic, APIC_TMR + 0x10 * i, 0);
867 }
868 vlapic_set_reg(vlapic, APIC_ICR, 0);
869 vlapic_set_reg(vlapic, APIC_ICR2, 0);
870 vlapic_set_reg(vlapic, APIC_LDR, 0);
871 vlapic_set_reg(vlapic, APIC_TASKPRI, 0);
872 vlapic_set_reg(vlapic, APIC_TMICT, 0);
873 vlapic_set_reg(vlapic, APIC_TMCCT, 0);
874 vlapic_set_tdcr(vlapic, 0);
876 vlapic_set_reg(vlapic, APIC_DFR, 0xffffffffU);
878 for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
879 vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
881 vlapic_set_reg(vlapic, APIC_SPIV, 0xff);
882 vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
884 destroy_periodic_time(&vlapic->pt);
885 }
887 /* rearm the actimer if needed, after a HVM restore */
888 static void lapic_rearm(struct vlapic *s)
889 {
890 unsigned long tmict = vlapic_get_reg(s, APIC_TMICT);
891 uint64_t period;
893 if ( (tmict = vlapic_get_reg(s, APIC_TMICT)) == 0 )
894 return;
896 period = ((uint64_t)APIC_BUS_CYCLE_NS *
897 (uint32_t)tmict * s->hw.timer_divisor);
898 s->pt.irq = vlapic_get_reg(s, APIC_LVTT) & APIC_VECTOR_MASK;
899 create_periodic_time(vlapic_vcpu(s), &s->pt, period,
900 vlapic_lvtt_period(s) ? period : 0,
901 s->pt.irq, vlapic_pt_cb,
902 &s->timer_last_update);
903 s->timer_last_update = s->pt.last_plt_gtime;
904 }
906 static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h)
907 {
908 struct vcpu *v;
909 struct vlapic *s;
910 int rc = 0;
912 for_each_vcpu ( d, v )
913 {
914 s = vcpu_vlapic(v);
915 if ( (rc = hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw)) != 0 )
916 break;
917 }
919 return rc;
920 }
922 static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h)
923 {
924 struct vcpu *v;
925 struct vlapic *s;
926 int rc = 0;
928 for_each_vcpu ( d, v )
929 {
930 s = vcpu_vlapic(v);
931 if ( (rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs)) != 0 )
932 break;
933 }
935 return rc;
936 }
938 static int lapic_load_hidden(struct domain *d, hvm_domain_context_t *h)
939 {
940 uint16_t vcpuid;
941 struct vcpu *v;
942 struct vlapic *s;
944 /* Which vlapic to load? */
945 vcpuid = hvm_load_instance(h);
946 if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
947 {
948 gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
949 return -EINVAL;
950 }
951 s = vcpu_vlapic(v);
953 if ( hvm_load_entry(LAPIC, h, &s->hw) != 0 )
954 return -EINVAL;
956 vmx_vlapic_msr_changed(v);
958 return 0;
959 }
961 static int lapic_load_regs(struct domain *d, hvm_domain_context_t *h)
962 {
963 uint16_t vcpuid;
964 struct vcpu *v;
965 struct vlapic *s;
967 /* Which vlapic to load? */
968 vcpuid = hvm_load_instance(h);
969 if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
970 {
971 gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
972 return -EINVAL;
973 }
974 s = vcpu_vlapic(v);
976 if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 )
977 return -EINVAL;
979 vlapic_adjust_i8259_target(d);
980 lapic_rearm(s);
981 return 0;
982 }
984 HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden,
985 1, HVMSR_PER_VCPU);
986 HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs,
987 1, HVMSR_PER_VCPU);
989 int vlapic_init(struct vcpu *v)
990 {
991 struct vlapic *vlapic = vcpu_vlapic(v);
992 unsigned int memflags = MEMF_node(vcpu_to_node(v));
994 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "%d", v->vcpu_id);
996 vlapic->pt.source = PTSRC_lapic;
998 #ifdef __i386__
999 /* 32-bit VMX may be limited to 32-bit physical addresses. */
1000 if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
1001 memflags |= MEMF_bits(32);
1002 #endif
1003 if (vlapic->regs_page == NULL)
1005 vlapic->regs_page = alloc_domheap_page(NULL, memflags);
1006 if ( vlapic->regs_page == NULL )
1008 dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n",
1009 v->domain->domain_id, v->vcpu_id);
1010 return -ENOMEM;
1013 if (vlapic->regs == NULL)
1015 vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
1016 if ( vlapic->regs == NULL )
1018 dprintk(XENLOG_ERR, "map vlapic regs error: %d/%d\n",
1019 v->domain->domain_id, v->vcpu_id);
1020 return -ENOMEM;
1023 clear_page(vlapic->regs);
1025 vlapic_reset(vlapic);
1027 vlapic->hw.apic_base_msr = (MSR_IA32_APICBASE_ENABLE |
1028 APIC_DEFAULT_PHYS_BASE);
1029 if ( v->vcpu_id == 0 )
1030 vlapic->hw.apic_base_msr |= MSR_IA32_APICBASE_BSP;
1032 tasklet_init(&vlapic->init_tasklet, vlapic_init_action, (unsigned long)v);
1034 return 0;
1037 void vlapic_destroy(struct vcpu *v)
1039 struct vlapic *vlapic = vcpu_vlapic(v);
1041 tasklet_kill(&vlapic->init_tasklet);
1042 destroy_periodic_time(&vlapic->pt);
1043 unmap_domain_page_global(vlapic->regs);
1044 free_domheap_page(vlapic->regs_page);