debuggers.hg

view xen/arch/x86/hvm/vlapic.c @ 16674:181483b8e959

hvm: Some cleanups to vlapic emulation.
Some of this was suggested by Dexuan Cui.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Dec 19 11:14:05 2007 +0000 (2007-12-19)
parents 966a6d3b7408
children af33f2054f47
line source
1 /*
2 * vlapic.c: virtualize LAPIC for HVM vcpus.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2006 Keir Fraser, XenSource Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/types.h>
23 #include <xen/mm.h>
24 #include <xen/xmalloc.h>
25 #include <xen/domain_page.h>
26 #include <asm/page.h>
27 #include <xen/event.h>
28 #include <xen/trace.h>
29 #include <asm/hvm/hvm.h>
30 #include <asm/hvm/io.h>
31 #include <asm/hvm/support.h>
32 #include <xen/lib.h>
33 #include <xen/sched.h>
34 #include <asm/current.h>
35 #include <asm/hvm/vmx/vmx.h>
36 #include <public/hvm/ioreq.h>
37 #include <public/hvm/params.h>
39 #define VLAPIC_VERSION 0x00050014
40 #define VLAPIC_LVT_NUM 6
42 /* vlapic's frequence is 100 MHz */
43 #define APIC_BUS_CYCLE_NS 10
45 #define LVT_MASK \
46 APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK
48 #define LINT_MASK \
49 LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY |\
50 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER
52 static unsigned int vlapic_lvt_mask[VLAPIC_LVT_NUM] =
53 {
54 /* LVTT */
55 LVT_MASK | APIC_LVT_TIMER_PERIODIC,
56 /* LVTTHMR */
57 LVT_MASK | APIC_MODE_MASK,
58 /* LVTPC */
59 LVT_MASK | APIC_MODE_MASK,
60 /* LVT0-1 */
61 LINT_MASK, LINT_MASK,
62 /* LVTERR */
63 LVT_MASK
64 };
66 /* Following could belong in apicdef.h */
67 #define APIC_SHORT_MASK 0xc0000
68 #define APIC_DEST_NOSHORT 0x0
69 #define APIC_DEST_MASK 0x800
71 #define vlapic_lvt_vector(vlapic, lvt_type) \
72 (vlapic_get_reg(vlapic, lvt_type) & APIC_VECTOR_MASK)
74 #define vlapic_lvt_dm(vlapic, lvt_type) \
75 (vlapic_get_reg(vlapic, lvt_type) & APIC_MODE_MASK)
77 #define vlapic_lvtt_period(vlapic) \
78 (vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC)
81 /*
82 * Generic APIC bitmap vector update & search routines.
83 */
85 #define VEC_POS(v) ((v)%32)
86 #define REG_POS(v) (((v)/32)* 0x10)
87 #define vlapic_test_and_set_vector(vec, bitmap) \
88 test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
89 #define vlapic_test_and_clear_vector(vec, bitmap) \
90 test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
91 #define vlapic_set_vector(vec, bitmap) \
92 set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
93 #define vlapic_clear_vector(vec, bitmap) \
94 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec))
96 static int vlapic_find_highest_vector(void *bitmap)
97 {
98 uint32_t *word = bitmap;
99 int word_offset = MAX_VECTOR / 32;
101 /* Work backwards through the bitmap (first 32-bit word in every four). */
102 while ( (word_offset != 0) && (word[(--word_offset)*4] == 0) )
103 continue;
105 return (fls(word[word_offset*4]) - 1) + (word_offset * 32);
106 }
109 /*
110 * IRR-specific bitmap update & search routines.
111 */
113 static int vlapic_test_and_set_irr(int vector, struct vlapic *vlapic)
114 {
115 return vlapic_test_and_set_vector(vector, &vlapic->regs->data[APIC_IRR]);
116 }
118 static void vlapic_clear_irr(int vector, struct vlapic *vlapic)
119 {
120 vlapic_clear_vector(vector, &vlapic->regs->data[APIC_IRR]);
121 }
123 static int vlapic_find_highest_irr(struct vlapic *vlapic)
124 {
125 return vlapic_find_highest_vector(&vlapic->regs->data[APIC_IRR]);
126 }
128 int vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig)
129 {
130 int ret;
132 ret = !vlapic_test_and_set_irr(vec, vlapic);
133 if ( trig )
134 vlapic_set_vector(vec, &vlapic->regs->data[APIC_TMR]);
136 /* We may need to wake up target vcpu, besides set pending bit here */
137 return ret;
138 }
140 static int vlapic_find_highest_isr(struct vlapic *vlapic)
141 {
142 return vlapic_find_highest_vector(&vlapic->regs->data[APIC_ISR]);
143 }
145 uint32_t vlapic_get_ppr(struct vlapic *vlapic)
146 {
147 uint32_t tpr, isrv, ppr;
148 int isr;
150 tpr = vlapic_get_reg(vlapic, APIC_TASKPRI);
151 isr = vlapic_find_highest_isr(vlapic);
152 isrv = (isr != -1) ? isr : 0;
154 if ( (tpr & 0xf0) >= (isrv & 0xf0) )
155 ppr = tpr & 0xff;
156 else
157 ppr = isrv & 0xf0;
159 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_INTERRUPT,
160 "vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
161 vlapic, ppr, isr, isrv);
163 return ppr;
164 }
166 int vlapic_match_logical_addr(struct vlapic *vlapic, uint8_t mda)
167 {
168 int result = 0;
169 uint8_t logical_id;
171 logical_id = GET_APIC_LOGICAL_ID(vlapic_get_reg(vlapic, APIC_LDR));
173 switch ( vlapic_get_reg(vlapic, APIC_DFR) )
174 {
175 case APIC_DFR_FLAT:
176 if ( logical_id & mda )
177 result = 1;
178 break;
179 case APIC_DFR_CLUSTER:
180 if ( ((logical_id >> 4) == (mda >> 0x4)) && (logical_id & mda & 0xf) )
181 result = 1;
182 break;
183 default:
184 gdprintk(XENLOG_WARNING, "Bad DFR value for lapic of vcpu %d: %08x\n",
185 vlapic_vcpu(vlapic)->vcpu_id,
186 vlapic_get_reg(vlapic, APIC_DFR));
187 break;
188 }
190 return result;
191 }
193 static int vlapic_match_dest(struct vcpu *v, struct vlapic *source,
194 int short_hand, int dest, int dest_mode)
195 {
196 int result = 0;
197 struct vlapic *target = vcpu_vlapic(v);
199 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "target %p, source %p, dest 0x%x, "
200 "dest_mode 0x%x, short_hand 0x%x",
201 target, source, dest, dest_mode, short_hand);
203 switch ( short_hand )
204 {
205 case APIC_DEST_NOSHORT:
206 if ( dest_mode == 0 )
207 {
208 /* Physical mode. */
209 if ( (dest == 0xFF) || (dest == VLAPIC_ID(target)) )
210 result = 1;
211 }
212 else
213 {
214 /* Logical mode. */
215 result = vlapic_match_logical_addr(target, dest);
216 }
217 break;
219 case APIC_DEST_SELF:
220 if ( target == source )
221 result = 1;
222 break;
224 case APIC_DEST_ALLINC:
225 result = 1;
226 break;
228 case APIC_DEST_ALLBUT:
229 if ( target != source )
230 result = 1;
231 break;
233 default:
234 gdprintk(XENLOG_WARNING, "Bad dest shorthand value %x\n", short_hand);
235 break;
236 }
238 return result;
239 }
241 /* Add a pending IRQ into lapic. */
242 static int vlapic_accept_irq(struct vcpu *v, int delivery_mode,
243 int vector, int level, int trig_mode)
244 {
245 int result = 0;
246 struct vlapic *vlapic = vcpu_vlapic(v);
248 switch ( delivery_mode )
249 {
250 case APIC_DM_FIXED:
251 case APIC_DM_LOWEST:
252 /* FIXME add logic for vcpu on reset */
253 if ( unlikely(!vlapic_enabled(vlapic)) )
254 break;
256 if ( vlapic_test_and_set_irr(vector, vlapic) && trig_mode )
257 {
258 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
259 "level trig mode repeatedly for vector %d", vector);
260 break;
261 }
263 if ( trig_mode )
264 {
265 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
266 "level trig mode for vector %d", vector);
267 vlapic_set_vector(vector, &vlapic->regs->data[APIC_TMR]);
268 }
270 vcpu_kick(v);
272 result = 1;
273 break;
275 case APIC_DM_REMRD:
276 gdprintk(XENLOG_WARNING, "Ignoring delivery mode 3\n");
277 break;
279 case APIC_DM_SMI:
280 gdprintk(XENLOG_WARNING, "Ignoring guest SMI\n");
281 break;
283 case APIC_DM_NMI:
284 if ( !test_and_set_bool(v->nmi_pending) )
285 vcpu_kick(v);
286 break;
288 case APIC_DM_INIT:
289 /* No work on INIT de-assert for P4-type APIC. */
290 if ( trig_mode && !(level & APIC_INT_ASSERT) )
291 break;
292 /* FIXME How to check the situation after vcpu reset? */
293 if ( v->is_initialised )
294 hvm_vcpu_reset(v);
295 v->arch.hvm_vcpu.init_sipi_sipi_state =
296 HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI;
297 result = 1;
298 break;
300 case APIC_DM_STARTUP:
301 if ( v->arch.hvm_vcpu.init_sipi_sipi_state ==
302 HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM )
303 break;
305 v->arch.hvm_vcpu.init_sipi_sipi_state =
306 HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM;
308 if ( v->is_initialised )
309 {
310 gdprintk(XENLOG_ERR, "SIPI for initialized vcpu %x\n", v->vcpu_id);
311 goto exit_and_crash;
312 }
314 if ( hvm_bringup_ap(v->vcpu_id, vector) != 0 )
315 result = 0;
316 break;
318 default:
319 gdprintk(XENLOG_ERR, "TODO: unsupported delivery mode %x\n",
320 delivery_mode);
321 goto exit_and_crash;
322 }
324 return result;
326 exit_and_crash:
327 domain_crash(v->domain);
328 return 0;
329 }
331 /* This function is used by both ioapic and lapic.The bitmap is for vcpu_id. */
332 struct vlapic *apic_round_robin(
333 struct domain *d, uint8_t vector, uint32_t bitmap)
334 {
335 int next, old;
336 struct vlapic *target = NULL;
338 old = next = d->arch.hvm_domain.irq.round_robin_prev_vcpu;
340 do {
341 if ( ++next == MAX_VIRT_CPUS )
342 next = 0;
343 if ( (d->vcpu[next] == NULL) || !test_bit(next, &bitmap) )
344 continue;
345 target = vcpu_vlapic(d->vcpu[next]);
346 if ( vlapic_enabled(target) )
347 break;
348 target = NULL;
349 } while ( next != old );
351 d->arch.hvm_domain.irq.round_robin_prev_vcpu = next;
353 return target;
354 }
356 void vlapic_EOI_set(struct vlapic *vlapic)
357 {
358 int vector = vlapic_find_highest_isr(vlapic);
360 /* Some EOI writes may not have a matching to an in-service interrupt. */
361 if ( vector == -1 )
362 return;
364 vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]);
366 if ( vlapic_test_and_clear_vector(vector, &vlapic->regs->data[APIC_TMR]) )
367 vioapic_update_EOI(vlapic_domain(vlapic), vector);
368 }
370 static void vlapic_ipi(struct vlapic *vlapic)
371 {
372 uint32_t icr_low = vlapic_get_reg(vlapic, APIC_ICR);
373 uint32_t icr_high = vlapic_get_reg(vlapic, APIC_ICR2);
375 unsigned int dest = GET_APIC_DEST_FIELD(icr_high);
376 unsigned int short_hand = icr_low & APIC_SHORT_MASK;
377 unsigned int trig_mode = icr_low & APIC_INT_LEVELTRIG;
378 unsigned int level = icr_low & APIC_INT_ASSERT;
379 unsigned int dest_mode = icr_low & APIC_DEST_MASK;
380 unsigned int delivery_mode =icr_low & APIC_MODE_MASK;
381 unsigned int vector = icr_low & APIC_VECTOR_MASK;
383 struct vlapic *target;
384 struct vcpu *v;
385 uint32_t lpr_map = 0;
387 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "icr_high 0x%x, icr_low 0x%x, "
388 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
389 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x",
390 icr_high, icr_low, short_hand, dest,
391 trig_mode, level, dest_mode, delivery_mode, vector);
393 for_each_vcpu ( vlapic_domain(vlapic), v )
394 {
395 if ( vlapic_match_dest(v, vlapic, short_hand, dest, dest_mode) )
396 {
397 if ( delivery_mode == APIC_DM_LOWEST )
398 __set_bit(v->vcpu_id, &lpr_map);
399 else
400 vlapic_accept_irq(v, delivery_mode,
401 vector, level, trig_mode);
402 }
403 }
405 if ( delivery_mode == APIC_DM_LOWEST )
406 {
407 target = apic_round_robin(vlapic_domain(v), vector, lpr_map);
408 if ( target != NULL )
409 vlapic_accept_irq(vlapic_vcpu(target), delivery_mode,
410 vector, level, trig_mode);
411 }
412 }
414 static uint32_t vlapic_get_tmcct(struct vlapic *vlapic)
415 {
416 struct vcpu *v = current;
417 uint32_t tmcct, tmict = vlapic_get_reg(vlapic, APIC_TMICT);
418 uint64_t counter_passed;
420 counter_passed = ((hvm_get_guest_time(v) - vlapic->timer_last_update)
421 * 1000000000ULL / ticks_per_sec(v)
422 / APIC_BUS_CYCLE_NS / vlapic->hw.timer_divisor);
423 tmcct = tmict - counter_passed;
425 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
426 "timer initial count %d, timer current count %d, "
427 "offset %"PRId64,
428 tmict, tmcct, counter_passed);
430 return tmcct;
431 }
433 static void vlapic_set_tdcr(struct vlapic *vlapic, unsigned int val)
434 {
435 /* Only bits 0, 1 and 3 are settable; others are MBZ. */
436 val &= 0xb;
437 vlapic_set_reg(vlapic, APIC_TDCR, val);
439 /* Update the demangled hw.timer_divisor. */
440 val = ((val & 3) | ((val & 8) >> 1)) + 1;
441 vlapic->hw.timer_divisor = 1 << (val & 7);
443 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
444 "timer_divisor: %d", vlapic->hw.timer_divisor);
445 }
447 static void vlapic_read_aligned(
448 struct vlapic *vlapic, unsigned int offset, unsigned int *result)
449 {
450 switch ( offset )
451 {
452 case APIC_PROCPRI:
453 *result = vlapic_get_ppr(vlapic);
454 break;
456 case APIC_TMCCT: /* Timer CCR */
457 *result = vlapic_get_tmcct(vlapic);
458 break;
460 default:
461 *result = vlapic_get_reg(vlapic, offset);
462 break;
463 }
464 }
466 static unsigned long vlapic_read(struct vcpu *v, unsigned long address,
467 unsigned long len)
468 {
469 unsigned int alignment;
470 unsigned int tmp;
471 unsigned long result;
472 struct vlapic *vlapic = vcpu_vlapic(v);
473 unsigned int offset = address - vlapic_base_address(vlapic);
475 if ( offset > (APIC_TDCR + 0x3) )
476 return 0;
478 alignment = offset & 0x3;
480 vlapic_read_aligned(vlapic, offset & ~0x3, &tmp);
481 switch ( len )
482 {
483 case 1:
484 result = *((unsigned char *)&tmp + alignment);
485 break;
487 case 2:
488 if ( alignment == 3 )
489 goto unaligned_exit_and_crash;
490 result = *(unsigned short *)((unsigned char *)&tmp + alignment);
491 break;
493 case 4:
494 if ( alignment != 0 )
495 goto unaligned_exit_and_crash;
496 result = *(unsigned int *)((unsigned char *)&tmp + alignment);
497 break;
499 default:
500 gdprintk(XENLOG_ERR, "Local APIC read with len=0x%lx, "
501 "should be 4 instead.\n", len);
502 goto exit_and_crash;
503 }
505 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset 0x%x with length 0x%lx, "
506 "and the result is 0x%lx", offset, len, result);
508 return result;
510 unaligned_exit_and_crash:
511 gdprintk(XENLOG_ERR, "Unaligned LAPIC read len=0x%lx at offset=0x%x.\n",
512 len, offset);
513 exit_and_crash:
514 domain_crash(v->domain);
515 return 0;
516 }
518 void vlapic_pt_cb(struct vcpu *v, void *data)
519 {
520 *(s_time_t *)data = hvm_get_guest_time(v);
521 }
523 static void vlapic_write(struct vcpu *v, unsigned long address,
524 unsigned long len, unsigned long val)
525 {
526 struct vlapic *vlapic = vcpu_vlapic(v);
527 unsigned int offset = address - vlapic_base_address(vlapic);
529 if ( offset != 0xb0 )
530 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
531 "offset 0x%x with length 0x%lx, and value is 0x%lx",
532 offset, len, val);
534 /*
535 * According to the IA32 Manual, all accesses should be 32 bits.
536 * Some OSes do 8- or 16-byte accesses, however.
537 */
538 val = (uint32_t)val;
539 if ( len != 4 )
540 {
541 unsigned int tmp;
542 unsigned char alignment;
544 gdprintk(XENLOG_INFO, "Notice: Local APIC write with len = %lx\n",len);
546 alignment = offset & 0x3;
547 tmp = vlapic_read(v, offset & ~0x3, 4);
549 switch ( len )
550 {
551 case 1:
552 val = ((tmp & ~(0xff << (8*alignment))) |
553 ((val & 0xff) << (8*alignment)));
554 break;
556 case 2:
557 if ( alignment & 1 )
558 goto unaligned_exit_and_crash;
559 val = ((tmp & ~(0xffff << (8*alignment))) |
560 ((val & 0xffff) << (8*alignment)));
561 break;
563 default:
564 gdprintk(XENLOG_ERR, "Local APIC write with len = %lx, "
565 "should be 4 instead\n", len);
566 goto exit_and_crash;
567 }
568 }
569 else if ( (offset & 0x3) != 0 )
570 goto unaligned_exit_and_crash;
572 offset &= ~0x3;
574 switch ( offset )
575 {
576 case APIC_TASKPRI:
577 vlapic_set_reg(vlapic, APIC_TASKPRI, val & 0xff);
578 break;
580 case APIC_EOI:
581 vlapic_EOI_set(vlapic);
582 break;
584 case APIC_LDR:
585 vlapic_set_reg(vlapic, APIC_LDR, val & APIC_LDR_MASK);
586 break;
588 case APIC_DFR:
589 vlapic_set_reg(vlapic, APIC_DFR, val | 0x0FFFFFFF);
590 break;
592 case APIC_SPIV:
593 vlapic_set_reg(vlapic, APIC_SPIV, val & 0x3ff);
595 if ( !(val & APIC_SPIV_APIC_ENABLED) )
596 {
597 int i;
598 uint32_t lvt_val;
600 vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
602 for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
603 {
604 lvt_val = vlapic_get_reg(vlapic, APIC_LVTT + 0x10 * i);
605 vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i,
606 lvt_val | APIC_LVT_MASKED);
607 }
608 }
609 else
610 vlapic->hw.disabled &= ~VLAPIC_SW_DISABLED;
611 break;
613 case APIC_ESR:
614 /* Nothing to do. */
615 break;
617 case APIC_ICR:
618 /* No delay here, so we always clear the pending bit*/
619 vlapic_set_reg(vlapic, APIC_ICR, val & ~(1 << 12));
620 vlapic_ipi(vlapic);
621 break;
623 case APIC_ICR2:
624 vlapic_set_reg(vlapic, APIC_ICR2, val & 0xff000000);
625 break;
627 case APIC_LVTT: /* LVT Timer Reg */
628 vlapic->pt.irq = val & APIC_VECTOR_MASK;
629 case APIC_LVTTHMR: /* LVT Thermal Monitor */
630 case APIC_LVTPC: /* LVT Performance Counter */
631 case APIC_LVT0: /* LVT LINT0 Reg */
632 case APIC_LVT1: /* LVT Lint1 Reg */
633 case APIC_LVTERR: /* LVT Error Reg */
634 if ( vlapic_sw_disabled(vlapic) )
635 val |= APIC_LVT_MASKED;
636 val &= vlapic_lvt_mask[(offset - APIC_LVTT) >> 4];
637 vlapic_set_reg(vlapic, offset, val);
638 break;
640 case APIC_TMICT:
641 {
642 uint64_t period = (uint64_t)APIC_BUS_CYCLE_NS *
643 (uint32_t)val * vlapic->hw.timer_divisor;
645 vlapic_set_reg(vlapic, APIC_TMICT, val);
646 create_periodic_time(current, &vlapic->pt, period, vlapic->pt.irq,
647 !vlapic_lvtt_period(vlapic), vlapic_pt_cb,
648 &vlapic->timer_last_update);
649 vlapic->timer_last_update = vlapic->pt.last_plt_gtime;
651 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
652 "bus cycle is %uns, "
653 "initial count %lu, period %"PRIu64"ns",
654 APIC_BUS_CYCLE_NS, val, period);
655 }
656 break;
658 case APIC_TDCR:
659 vlapic_set_tdcr(vlapic, val & 0xb);
660 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "timer divisor is 0x%x",
661 vlapic->hw.timer_divisor);
662 break;
664 default:
665 gdprintk(XENLOG_DEBUG,
666 "Local APIC Write to read-only register 0x%x\n", offset);
667 break;
668 }
670 return;
672 unaligned_exit_and_crash:
673 gdprintk(XENLOG_ERR, "Unaligned LAPIC write len=0x%lx at offset=0x%x.\n",
674 len, offset);
675 exit_and_crash:
676 domain_crash(v->domain);
677 }
679 static int vlapic_range(struct vcpu *v, unsigned long addr)
680 {
681 struct vlapic *vlapic = vcpu_vlapic(v);
682 unsigned long offset = addr - vlapic_base_address(vlapic);
683 return (!vlapic_hw_disabled(vlapic) && (offset < PAGE_SIZE));
684 }
686 struct hvm_mmio_handler vlapic_mmio_handler = {
687 .check_handler = vlapic_range,
688 .read_handler = vlapic_read,
689 .write_handler = vlapic_write
690 };
692 void vlapic_msr_set(struct vlapic *vlapic, uint64_t value)
693 {
694 if ( (vlapic->hw.apic_base_msr ^ value) & MSR_IA32_APICBASE_ENABLE )
695 {
696 if ( value & MSR_IA32_APICBASE_ENABLE )
697 {
698 vlapic_reset(vlapic);
699 vlapic->hw.disabled &= ~VLAPIC_HW_DISABLED;
700 }
701 else
702 {
703 vlapic->hw.disabled |= VLAPIC_HW_DISABLED;
704 }
705 }
707 vlapic->hw.apic_base_msr = value;
709 vmx_vlapic_msr_changed(vlapic_vcpu(vlapic));
711 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
712 "apic base msr is 0x%016"PRIx64, vlapic->hw.apic_base_msr);
713 }
715 int vlapic_accept_pic_intr(struct vcpu *v)
716 {
717 struct vlapic *vlapic = vcpu_vlapic(v);
718 uint32_t lvt0 = vlapic_get_reg(vlapic, APIC_LVT0);
720 /*
721 * Only CPU0 is wired to the 8259A. INTA cycles occur if LINT0 is set up
722 * accept ExtInts, or if the LAPIC is disabled (so LINT0 behaves as INTR).
723 */
724 return ((v->vcpu_id == 0) &&
725 (((lvt0 & (APIC_MODE_MASK|APIC_LVT_MASKED)) == APIC_DM_EXTINT) ||
726 vlapic_hw_disabled(vlapic)));
727 }
729 int vlapic_has_pending_irq(struct vcpu *v)
730 {
731 struct vlapic *vlapic = vcpu_vlapic(v);
732 int irr, isr;
734 if ( !vlapic_enabled(vlapic) )
735 return -1;
737 irr = vlapic_find_highest_irr(vlapic);
738 if ( irr == -1 )
739 return -1;
741 isr = vlapic_find_highest_isr(vlapic);
742 isr = (isr != -1) ? isr : 0;
743 if ( (isr & 0xf0) >= (irr & 0xf0) )
744 return -1;
746 return irr;
747 }
749 int vlapic_ack_pending_irq(struct vcpu *v, int vector)
750 {
751 struct vlapic *vlapic = vcpu_vlapic(v);
753 vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
754 vlapic_clear_irr(vector, vlapic);
756 return 1;
757 }
759 /* Reset the VLPAIC back to its power-on/reset state. */
760 void vlapic_reset(struct vlapic *vlapic)
761 {
762 struct vcpu *v = vlapic_vcpu(vlapic);
763 int i;
765 vlapic_set_reg(vlapic, APIC_ID, (v->vcpu_id * 2) << 24);
766 vlapic_set_reg(vlapic, APIC_LVR, VLAPIC_VERSION);
768 for ( i = 0; i < 8; i++ )
769 {
770 vlapic_set_reg(vlapic, APIC_IRR + 0x10 * i, 0);
771 vlapic_set_reg(vlapic, APIC_ISR + 0x10 * i, 0);
772 vlapic_set_reg(vlapic, APIC_TMR + 0x10 * i, 0);
773 }
774 vlapic_set_reg(vlapic, APIC_ICR, 0);
775 vlapic_set_reg(vlapic, APIC_ICR2, 0);
776 vlapic_set_reg(vlapic, APIC_LDR, 0);
777 vlapic_set_reg(vlapic, APIC_TASKPRI, 0);
778 vlapic_set_reg(vlapic, APIC_TMICT, 0);
779 vlapic_set_reg(vlapic, APIC_TMCCT, 0);
780 vlapic_set_tdcr(vlapic, 0);
782 vlapic_set_reg(vlapic, APIC_DFR, 0xffffffffU);
784 for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
785 vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
787 vlapic_set_reg(vlapic, APIC_SPIV, 0xff);
788 vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
789 }
791 #ifdef HVM_DEBUG_SUSPEND
792 static void lapic_info(struct vlapic *s)
793 {
794 printk("*****lapic state:*****\n");
795 printk("lapic 0x%"PRIx64".\n", s->hw.apic_base_msr);
796 printk("lapic 0x%x.\n", s->hw.disabled);
797 printk("lapic 0x%x.\n", s->hw.timer_divisor);
798 }
799 #else
800 static void lapic_info(struct vlapic *s)
801 {
802 }
803 #endif
805 /* rearm the actimer if needed, after a HVM restore */
806 static void lapic_rearm(struct vlapic *s)
807 {
808 unsigned long tmict;
810 tmict = vlapic_get_reg(s, APIC_TMICT);
811 if ( tmict > 0 )
812 {
813 uint64_t period = (uint64_t)APIC_BUS_CYCLE_NS *
814 (uint32_t)tmict * s->hw.timer_divisor;
815 uint32_t lvtt = vlapic_get_reg(s, APIC_LVTT);
817 s->pt.irq = lvtt & APIC_VECTOR_MASK;
818 create_periodic_time(vlapic_vcpu(s), &s->pt, period, s->pt.irq,
819 !vlapic_lvtt_period(s), vlapic_pt_cb,
820 &s->timer_last_update);
821 s->timer_last_update = s->pt.last_plt_gtime;
823 printk("lapic_load to rearm the actimer:"
824 "bus cycle is %uns, "
825 "saved tmict count %lu, period %"PRIu64"ns, irq=%"PRIu8"\n",
826 APIC_BUS_CYCLE_NS, tmict, period, s->pt.irq);
827 }
829 lapic_info(s);
830 }
832 static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h)
833 {
834 struct vcpu *v;
835 struct vlapic *s;
837 for_each_vcpu(d, v)
838 {
839 s = vcpu_vlapic(v);
840 lapic_info(s);
842 if ( hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw) != 0 )
843 return 1;
844 }
845 return 0;
846 }
848 static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h)
849 {
850 struct vcpu *v;
851 struct vlapic *s;
853 for_each_vcpu(d, v)
854 {
855 s = vcpu_vlapic(v);
856 if ( hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs) != 0 )
857 return 1;
858 }
859 return 0;
860 }
862 static int lapic_load_hidden(struct domain *d, hvm_domain_context_t *h)
863 {
864 uint16_t vcpuid;
865 struct vcpu *v;
866 struct vlapic *s;
868 /* Which vlapic to load? */
869 vcpuid = hvm_load_instance(h);
870 if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
871 {
872 gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
873 return -EINVAL;
874 }
875 s = vcpu_vlapic(v);
877 if ( hvm_load_entry(LAPIC, h, &s->hw) != 0 )
878 return -EINVAL;
880 lapic_info(s);
882 vmx_vlapic_msr_changed(v);
884 return 0;
885 }
887 static int lapic_load_regs(struct domain *d, hvm_domain_context_t *h)
888 {
889 uint16_t vcpuid;
890 struct vcpu *v;
891 struct vlapic *s;
893 /* Which vlapic to load? */
894 vcpuid = hvm_load_instance(h);
895 if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
896 {
897 gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
898 return -EINVAL;
899 }
900 s = vcpu_vlapic(v);
902 if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 )
903 return -EINVAL;
905 lapic_rearm(s);
906 return 0;
907 }
909 HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden,
910 1, HVMSR_PER_VCPU);
911 HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs,
912 1, HVMSR_PER_VCPU);
914 int vlapic_init(struct vcpu *v)
915 {
916 struct vlapic *vlapic = vcpu_vlapic(v);
917 unsigned int memflags = 0;
919 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "%d", v->vcpu_id);
921 vlapic->pt.source = PTSRC_lapic;
923 #ifdef __i386__
924 /* 32-bit VMX may be limited to 32-bit physical addresses. */
925 if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
926 memflags = MEMF_bits(32);
927 #endif
929 vlapic->regs_page = alloc_domheap_pages(NULL, 0, memflags);
930 if ( vlapic->regs_page == NULL )
931 {
932 dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n",
933 v->domain->domain_id, v->vcpu_id);
934 return -ENOMEM;
935 }
937 vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
938 if ( vlapic->regs == NULL )
939 {
940 dprintk(XENLOG_ERR, "map vlapic regs error: %d/%d\n",
941 v->domain->domain_id, v->vcpu_id);
942 return -ENOMEM;
943 }
945 clear_page(vlapic->regs);
947 vlapic_reset(vlapic);
949 vlapic->hw.apic_base_msr = (MSR_IA32_APICBASE_ENABLE |
950 APIC_DEFAULT_PHYS_BASE);
951 if ( v->vcpu_id == 0 )
952 vlapic->hw.apic_base_msr |= MSR_IA32_APICBASE_BSP;
954 return 0;
955 }
957 void vlapic_destroy(struct vcpu *v)
958 {
959 struct vlapic *vlapic = vcpu_vlapic(v);
961 destroy_periodic_time(&vlapic->pt);
962 unmap_domain_page_global(vlapic->regs);
963 free_domheap_page(vlapic->regs_page);
964 }