debuggers.hg

view xen/arch/x86/hvm/vlapic.c @ 17232:af33f2054f47

x86: Allow bitop functions to be applied only to fields of at least 4
bytes. Otherwise the 'longword' processor instructions used will
overlap with adjacent fields with unpredictable consequences.

This change requires some code fixup and just a few casts (mainly when
operating on guest-shared fields which cannot be changed, and which by
observation are clearly safe).

Based on ideas from Jan Beulich <jbeulich@novell.com>

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Sun Mar 16 14:11:34 2008 +0000 (2008-03-16)
parents 181483b8e959
children 12b589420bd1
line source
1 /*
2 * vlapic.c: virtualize LAPIC for HVM vcpus.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2006 Keir Fraser, XenSource Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/types.h>
23 #include <xen/mm.h>
24 #include <xen/xmalloc.h>
25 #include <xen/domain_page.h>
26 #include <asm/page.h>
27 #include <xen/event.h>
28 #include <xen/trace.h>
29 #include <asm/hvm/hvm.h>
30 #include <asm/hvm/io.h>
31 #include <asm/hvm/support.h>
32 #include <xen/lib.h>
33 #include <xen/sched.h>
34 #include <asm/current.h>
35 #include <asm/hvm/vmx/vmx.h>
36 #include <public/hvm/ioreq.h>
37 #include <public/hvm/params.h>
39 #define VLAPIC_VERSION 0x00050014
40 #define VLAPIC_LVT_NUM 6
42 /* vlapic's frequence is 100 MHz */
43 #define APIC_BUS_CYCLE_NS 10
45 #define LVT_MASK \
46 APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK
48 #define LINT_MASK \
49 LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY |\
50 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER
52 static unsigned int vlapic_lvt_mask[VLAPIC_LVT_NUM] =
53 {
54 /* LVTT */
55 LVT_MASK | APIC_LVT_TIMER_PERIODIC,
56 /* LVTTHMR */
57 LVT_MASK | APIC_MODE_MASK,
58 /* LVTPC */
59 LVT_MASK | APIC_MODE_MASK,
60 /* LVT0-1 */
61 LINT_MASK, LINT_MASK,
62 /* LVTERR */
63 LVT_MASK
64 };
66 /* Following could belong in apicdef.h */
67 #define APIC_SHORT_MASK 0xc0000
68 #define APIC_DEST_NOSHORT 0x0
69 #define APIC_DEST_MASK 0x800
71 #define vlapic_lvt_vector(vlapic, lvt_type) \
72 (vlapic_get_reg(vlapic, lvt_type) & APIC_VECTOR_MASK)
74 #define vlapic_lvt_dm(vlapic, lvt_type) \
75 (vlapic_get_reg(vlapic, lvt_type) & APIC_MODE_MASK)
77 #define vlapic_lvtt_period(vlapic) \
78 (vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC)
81 /*
82 * Generic APIC bitmap vector update & search routines.
83 */
85 #define VEC_POS(v) ((v)%32)
86 #define REG_POS(v) (((v)/32) * 0x10)
87 #define vlapic_test_and_set_vector(vec, bitmap) \
88 test_and_set_bit(VEC_POS(vec), \
89 (unsigned long *)((bitmap) + REG_POS(vec)))
90 #define vlapic_test_and_clear_vector(vec, bitmap) \
91 test_and_clear_bit(VEC_POS(vec), \
92 (unsigned long *)((bitmap) + REG_POS(vec)))
93 #define vlapic_set_vector(vec, bitmap) \
94 set_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec)))
95 #define vlapic_clear_vector(vec, bitmap) \
96 clear_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec)))
98 static int vlapic_find_highest_vector(void *bitmap)
99 {
100 uint32_t *word = bitmap;
101 int word_offset = MAX_VECTOR / 32;
103 /* Work backwards through the bitmap (first 32-bit word in every four). */
104 while ( (word_offset != 0) && (word[(--word_offset)*4] == 0) )
105 continue;
107 return (fls(word[word_offset*4]) - 1) + (word_offset * 32);
108 }
111 /*
112 * IRR-specific bitmap update & search routines.
113 */
115 static int vlapic_test_and_set_irr(int vector, struct vlapic *vlapic)
116 {
117 return vlapic_test_and_set_vector(
118 vector, (unsigned long *)&vlapic->regs->data[APIC_IRR]);
119 }
121 static void vlapic_clear_irr(int vector, struct vlapic *vlapic)
122 {
123 vlapic_clear_vector(
124 vector, (unsigned long *)&vlapic->regs->data[APIC_IRR]);
125 }
127 static int vlapic_find_highest_irr(struct vlapic *vlapic)
128 {
129 return vlapic_find_highest_vector(&vlapic->regs->data[APIC_IRR]);
130 }
132 int vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig)
133 {
134 int ret;
136 ret = !vlapic_test_and_set_irr(vec, vlapic);
137 if ( trig )
138 vlapic_set_vector(vec, &vlapic->regs->data[APIC_TMR]);
140 /* We may need to wake up target vcpu, besides set pending bit here */
141 return ret;
142 }
144 static int vlapic_find_highest_isr(struct vlapic *vlapic)
145 {
146 return vlapic_find_highest_vector(&vlapic->regs->data[APIC_ISR]);
147 }
149 uint32_t vlapic_get_ppr(struct vlapic *vlapic)
150 {
151 uint32_t tpr, isrv, ppr;
152 int isr;
154 tpr = vlapic_get_reg(vlapic, APIC_TASKPRI);
155 isr = vlapic_find_highest_isr(vlapic);
156 isrv = (isr != -1) ? isr : 0;
158 if ( (tpr & 0xf0) >= (isrv & 0xf0) )
159 ppr = tpr & 0xff;
160 else
161 ppr = isrv & 0xf0;
163 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_INTERRUPT,
164 "vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
165 vlapic, ppr, isr, isrv);
167 return ppr;
168 }
170 int vlapic_match_logical_addr(struct vlapic *vlapic, uint8_t mda)
171 {
172 int result = 0;
173 uint8_t logical_id;
175 logical_id = GET_APIC_LOGICAL_ID(vlapic_get_reg(vlapic, APIC_LDR));
177 switch ( vlapic_get_reg(vlapic, APIC_DFR) )
178 {
179 case APIC_DFR_FLAT:
180 if ( logical_id & mda )
181 result = 1;
182 break;
183 case APIC_DFR_CLUSTER:
184 if ( ((logical_id >> 4) == (mda >> 0x4)) && (logical_id & mda & 0xf) )
185 result = 1;
186 break;
187 default:
188 gdprintk(XENLOG_WARNING, "Bad DFR value for lapic of vcpu %d: %08x\n",
189 vlapic_vcpu(vlapic)->vcpu_id,
190 vlapic_get_reg(vlapic, APIC_DFR));
191 break;
192 }
194 return result;
195 }
197 static int vlapic_match_dest(struct vcpu *v, struct vlapic *source,
198 int short_hand, int dest, int dest_mode)
199 {
200 int result = 0;
201 struct vlapic *target = vcpu_vlapic(v);
203 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "target %p, source %p, dest 0x%x, "
204 "dest_mode 0x%x, short_hand 0x%x",
205 target, source, dest, dest_mode, short_hand);
207 switch ( short_hand )
208 {
209 case APIC_DEST_NOSHORT:
210 if ( dest_mode == 0 )
211 {
212 /* Physical mode. */
213 if ( (dest == 0xFF) || (dest == VLAPIC_ID(target)) )
214 result = 1;
215 }
216 else
217 {
218 /* Logical mode. */
219 result = vlapic_match_logical_addr(target, dest);
220 }
221 break;
223 case APIC_DEST_SELF:
224 if ( target == source )
225 result = 1;
226 break;
228 case APIC_DEST_ALLINC:
229 result = 1;
230 break;
232 case APIC_DEST_ALLBUT:
233 if ( target != source )
234 result = 1;
235 break;
237 default:
238 gdprintk(XENLOG_WARNING, "Bad dest shorthand value %x\n", short_hand);
239 break;
240 }
242 return result;
243 }
245 /* Add a pending IRQ into lapic. */
246 static int vlapic_accept_irq(struct vcpu *v, int delivery_mode,
247 int vector, int level, int trig_mode)
248 {
249 int result = 0;
250 struct vlapic *vlapic = vcpu_vlapic(v);
252 switch ( delivery_mode )
253 {
254 case APIC_DM_FIXED:
255 case APIC_DM_LOWEST:
256 /* FIXME add logic for vcpu on reset */
257 if ( unlikely(!vlapic_enabled(vlapic)) )
258 break;
260 if ( vlapic_test_and_set_irr(vector, vlapic) && trig_mode )
261 {
262 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
263 "level trig mode repeatedly for vector %d", vector);
264 break;
265 }
267 if ( trig_mode )
268 {
269 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
270 "level trig mode for vector %d", vector);
271 vlapic_set_vector(vector, &vlapic->regs->data[APIC_TMR]);
272 }
274 vcpu_kick(v);
276 result = 1;
277 break;
279 case APIC_DM_REMRD:
280 gdprintk(XENLOG_WARNING, "Ignoring delivery mode 3\n");
281 break;
283 case APIC_DM_SMI:
284 gdprintk(XENLOG_WARNING, "Ignoring guest SMI\n");
285 break;
287 case APIC_DM_NMI:
288 if ( !test_and_set_bool(v->nmi_pending) )
289 vcpu_kick(v);
290 break;
292 case APIC_DM_INIT:
293 /* No work on INIT de-assert for P4-type APIC. */
294 if ( trig_mode && !(level & APIC_INT_ASSERT) )
295 break;
296 /* FIXME How to check the situation after vcpu reset? */
297 if ( v->is_initialised )
298 hvm_vcpu_reset(v);
299 v->arch.hvm_vcpu.init_sipi_sipi_state =
300 HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI;
301 result = 1;
302 break;
304 case APIC_DM_STARTUP:
305 if ( v->arch.hvm_vcpu.init_sipi_sipi_state ==
306 HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM )
307 break;
309 v->arch.hvm_vcpu.init_sipi_sipi_state =
310 HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM;
312 if ( v->is_initialised )
313 {
314 gdprintk(XENLOG_ERR, "SIPI for initialized vcpu %x\n", v->vcpu_id);
315 goto exit_and_crash;
316 }
318 if ( hvm_bringup_ap(v->vcpu_id, vector) != 0 )
319 result = 0;
320 break;
322 default:
323 gdprintk(XENLOG_ERR, "TODO: unsupported delivery mode %x\n",
324 delivery_mode);
325 goto exit_and_crash;
326 }
328 return result;
330 exit_and_crash:
331 domain_crash(v->domain);
332 return 0;
333 }
335 /* This function is used by both ioapic and lapic.The bitmap is for vcpu_id. */
336 struct vlapic *apic_round_robin(
337 struct domain *d, uint8_t vector, uint32_t bitmap)
338 {
339 int next, old;
340 struct vlapic *target = NULL;
342 old = next = d->arch.hvm_domain.irq.round_robin_prev_vcpu;
344 do {
345 if ( ++next == MAX_VIRT_CPUS )
346 next = 0;
347 if ( (d->vcpu[next] == NULL) || !test_bit(next, &bitmap) )
348 continue;
349 target = vcpu_vlapic(d->vcpu[next]);
350 if ( vlapic_enabled(target) )
351 break;
352 target = NULL;
353 } while ( next != old );
355 d->arch.hvm_domain.irq.round_robin_prev_vcpu = next;
357 return target;
358 }
360 void vlapic_EOI_set(struct vlapic *vlapic)
361 {
362 int vector = vlapic_find_highest_isr(vlapic);
364 /* Some EOI writes may not have a matching to an in-service interrupt. */
365 if ( vector == -1 )
366 return;
368 vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]);
370 if ( vlapic_test_and_clear_vector(vector, &vlapic->regs->data[APIC_TMR]) )
371 vioapic_update_EOI(vlapic_domain(vlapic), vector);
372 }
374 static void vlapic_ipi(struct vlapic *vlapic)
375 {
376 uint32_t icr_low = vlapic_get_reg(vlapic, APIC_ICR);
377 uint32_t icr_high = vlapic_get_reg(vlapic, APIC_ICR2);
379 unsigned int dest = GET_APIC_DEST_FIELD(icr_high);
380 unsigned int short_hand = icr_low & APIC_SHORT_MASK;
381 unsigned int trig_mode = icr_low & APIC_INT_LEVELTRIG;
382 unsigned int level = icr_low & APIC_INT_ASSERT;
383 unsigned int dest_mode = icr_low & APIC_DEST_MASK;
384 unsigned int delivery_mode =icr_low & APIC_MODE_MASK;
385 unsigned int vector = icr_low & APIC_VECTOR_MASK;
387 struct vlapic *target;
388 struct vcpu *v;
389 uint32_t lpr_map = 0;
391 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "icr_high 0x%x, icr_low 0x%x, "
392 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
393 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x",
394 icr_high, icr_low, short_hand, dest,
395 trig_mode, level, dest_mode, delivery_mode, vector);
397 for_each_vcpu ( vlapic_domain(vlapic), v )
398 {
399 if ( vlapic_match_dest(v, vlapic, short_hand, dest, dest_mode) )
400 {
401 if ( delivery_mode == APIC_DM_LOWEST )
402 __set_bit(v->vcpu_id, &lpr_map);
403 else
404 vlapic_accept_irq(v, delivery_mode,
405 vector, level, trig_mode);
406 }
407 }
409 if ( delivery_mode == APIC_DM_LOWEST )
410 {
411 target = apic_round_robin(vlapic_domain(v), vector, lpr_map);
412 if ( target != NULL )
413 vlapic_accept_irq(vlapic_vcpu(target), delivery_mode,
414 vector, level, trig_mode);
415 }
416 }
418 static uint32_t vlapic_get_tmcct(struct vlapic *vlapic)
419 {
420 struct vcpu *v = current;
421 uint32_t tmcct, tmict = vlapic_get_reg(vlapic, APIC_TMICT);
422 uint64_t counter_passed;
424 counter_passed = ((hvm_get_guest_time(v) - vlapic->timer_last_update)
425 * 1000000000ULL / ticks_per_sec(v)
426 / APIC_BUS_CYCLE_NS / vlapic->hw.timer_divisor);
427 tmcct = tmict - counter_passed;
429 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
430 "timer initial count %d, timer current count %d, "
431 "offset %"PRId64,
432 tmict, tmcct, counter_passed);
434 return tmcct;
435 }
437 static void vlapic_set_tdcr(struct vlapic *vlapic, unsigned int val)
438 {
439 /* Only bits 0, 1 and 3 are settable; others are MBZ. */
440 val &= 0xb;
441 vlapic_set_reg(vlapic, APIC_TDCR, val);
443 /* Update the demangled hw.timer_divisor. */
444 val = ((val & 3) | ((val & 8) >> 1)) + 1;
445 vlapic->hw.timer_divisor = 1 << (val & 7);
447 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
448 "timer_divisor: %d", vlapic->hw.timer_divisor);
449 }
451 static void vlapic_read_aligned(
452 struct vlapic *vlapic, unsigned int offset, unsigned int *result)
453 {
454 switch ( offset )
455 {
456 case APIC_PROCPRI:
457 *result = vlapic_get_ppr(vlapic);
458 break;
460 case APIC_TMCCT: /* Timer CCR */
461 *result = vlapic_get_tmcct(vlapic);
462 break;
464 default:
465 *result = vlapic_get_reg(vlapic, offset);
466 break;
467 }
468 }
470 static unsigned long vlapic_read(struct vcpu *v, unsigned long address,
471 unsigned long len)
472 {
473 unsigned int alignment;
474 unsigned int tmp;
475 unsigned long result;
476 struct vlapic *vlapic = vcpu_vlapic(v);
477 unsigned int offset = address - vlapic_base_address(vlapic);
479 if ( offset > (APIC_TDCR + 0x3) )
480 return 0;
482 alignment = offset & 0x3;
484 vlapic_read_aligned(vlapic, offset & ~0x3, &tmp);
485 switch ( len )
486 {
487 case 1:
488 result = *((unsigned char *)&tmp + alignment);
489 break;
491 case 2:
492 if ( alignment == 3 )
493 goto unaligned_exit_and_crash;
494 result = *(unsigned short *)((unsigned char *)&tmp + alignment);
495 break;
497 case 4:
498 if ( alignment != 0 )
499 goto unaligned_exit_and_crash;
500 result = *(unsigned int *)((unsigned char *)&tmp + alignment);
501 break;
503 default:
504 gdprintk(XENLOG_ERR, "Local APIC read with len=0x%lx, "
505 "should be 4 instead.\n", len);
506 goto exit_and_crash;
507 }
509 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset 0x%x with length 0x%lx, "
510 "and the result is 0x%lx", offset, len, result);
512 return result;
514 unaligned_exit_and_crash:
515 gdprintk(XENLOG_ERR, "Unaligned LAPIC read len=0x%lx at offset=0x%x.\n",
516 len, offset);
517 exit_and_crash:
518 domain_crash(v->domain);
519 return 0;
520 }
522 void vlapic_pt_cb(struct vcpu *v, void *data)
523 {
524 *(s_time_t *)data = hvm_get_guest_time(v);
525 }
527 static void vlapic_write(struct vcpu *v, unsigned long address,
528 unsigned long len, unsigned long val)
529 {
530 struct vlapic *vlapic = vcpu_vlapic(v);
531 unsigned int offset = address - vlapic_base_address(vlapic);
533 if ( offset != 0xb0 )
534 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
535 "offset 0x%x with length 0x%lx, and value is 0x%lx",
536 offset, len, val);
538 /*
539 * According to the IA32 Manual, all accesses should be 32 bits.
540 * Some OSes do 8- or 16-byte accesses, however.
541 */
542 val = (uint32_t)val;
543 if ( len != 4 )
544 {
545 unsigned int tmp;
546 unsigned char alignment;
548 gdprintk(XENLOG_INFO, "Notice: Local APIC write with len = %lx\n",len);
550 alignment = offset & 0x3;
551 tmp = vlapic_read(v, offset & ~0x3, 4);
553 switch ( len )
554 {
555 case 1:
556 val = ((tmp & ~(0xff << (8*alignment))) |
557 ((val & 0xff) << (8*alignment)));
558 break;
560 case 2:
561 if ( alignment & 1 )
562 goto unaligned_exit_and_crash;
563 val = ((tmp & ~(0xffff << (8*alignment))) |
564 ((val & 0xffff) << (8*alignment)));
565 break;
567 default:
568 gdprintk(XENLOG_ERR, "Local APIC write with len = %lx, "
569 "should be 4 instead\n", len);
570 goto exit_and_crash;
571 }
572 }
573 else if ( (offset & 0x3) != 0 )
574 goto unaligned_exit_and_crash;
576 offset &= ~0x3;
578 switch ( offset )
579 {
580 case APIC_TASKPRI:
581 vlapic_set_reg(vlapic, APIC_TASKPRI, val & 0xff);
582 break;
584 case APIC_EOI:
585 vlapic_EOI_set(vlapic);
586 break;
588 case APIC_LDR:
589 vlapic_set_reg(vlapic, APIC_LDR, val & APIC_LDR_MASK);
590 break;
592 case APIC_DFR:
593 vlapic_set_reg(vlapic, APIC_DFR, val | 0x0FFFFFFF);
594 break;
596 case APIC_SPIV:
597 vlapic_set_reg(vlapic, APIC_SPIV, val & 0x3ff);
599 if ( !(val & APIC_SPIV_APIC_ENABLED) )
600 {
601 int i;
602 uint32_t lvt_val;
604 vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
606 for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
607 {
608 lvt_val = vlapic_get_reg(vlapic, APIC_LVTT + 0x10 * i);
609 vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i,
610 lvt_val | APIC_LVT_MASKED);
611 }
612 }
613 else
614 vlapic->hw.disabled &= ~VLAPIC_SW_DISABLED;
615 break;
617 case APIC_ESR:
618 /* Nothing to do. */
619 break;
621 case APIC_ICR:
622 /* No delay here, so we always clear the pending bit*/
623 vlapic_set_reg(vlapic, APIC_ICR, val & ~(1 << 12));
624 vlapic_ipi(vlapic);
625 break;
627 case APIC_ICR2:
628 vlapic_set_reg(vlapic, APIC_ICR2, val & 0xff000000);
629 break;
631 case APIC_LVTT: /* LVT Timer Reg */
632 vlapic->pt.irq = val & APIC_VECTOR_MASK;
633 case APIC_LVTTHMR: /* LVT Thermal Monitor */
634 case APIC_LVTPC: /* LVT Performance Counter */
635 case APIC_LVT0: /* LVT LINT0 Reg */
636 case APIC_LVT1: /* LVT Lint1 Reg */
637 case APIC_LVTERR: /* LVT Error Reg */
638 if ( vlapic_sw_disabled(vlapic) )
639 val |= APIC_LVT_MASKED;
640 val &= vlapic_lvt_mask[(offset - APIC_LVTT) >> 4];
641 vlapic_set_reg(vlapic, offset, val);
642 break;
644 case APIC_TMICT:
645 {
646 uint64_t period = (uint64_t)APIC_BUS_CYCLE_NS *
647 (uint32_t)val * vlapic->hw.timer_divisor;
649 vlapic_set_reg(vlapic, APIC_TMICT, val);
650 create_periodic_time(current, &vlapic->pt, period, vlapic->pt.irq,
651 !vlapic_lvtt_period(vlapic), vlapic_pt_cb,
652 &vlapic->timer_last_update);
653 vlapic->timer_last_update = vlapic->pt.last_plt_gtime;
655 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
656 "bus cycle is %uns, "
657 "initial count %lu, period %"PRIu64"ns",
658 APIC_BUS_CYCLE_NS, val, period);
659 }
660 break;
662 case APIC_TDCR:
663 vlapic_set_tdcr(vlapic, val & 0xb);
664 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "timer divisor is 0x%x",
665 vlapic->hw.timer_divisor);
666 break;
668 default:
669 gdprintk(XENLOG_DEBUG,
670 "Local APIC Write to read-only register 0x%x\n", offset);
671 break;
672 }
674 return;
676 unaligned_exit_and_crash:
677 gdprintk(XENLOG_ERR, "Unaligned LAPIC write len=0x%lx at offset=0x%x.\n",
678 len, offset);
679 exit_and_crash:
680 domain_crash(v->domain);
681 }
683 static int vlapic_range(struct vcpu *v, unsigned long addr)
684 {
685 struct vlapic *vlapic = vcpu_vlapic(v);
686 unsigned long offset = addr - vlapic_base_address(vlapic);
687 return (!vlapic_hw_disabled(vlapic) && (offset < PAGE_SIZE));
688 }
690 struct hvm_mmio_handler vlapic_mmio_handler = {
691 .check_handler = vlapic_range,
692 .read_handler = vlapic_read,
693 .write_handler = vlapic_write
694 };
696 void vlapic_msr_set(struct vlapic *vlapic, uint64_t value)
697 {
698 if ( (vlapic->hw.apic_base_msr ^ value) & MSR_IA32_APICBASE_ENABLE )
699 {
700 if ( value & MSR_IA32_APICBASE_ENABLE )
701 {
702 vlapic_reset(vlapic);
703 vlapic->hw.disabled &= ~VLAPIC_HW_DISABLED;
704 }
705 else
706 {
707 vlapic->hw.disabled |= VLAPIC_HW_DISABLED;
708 }
709 }
711 vlapic->hw.apic_base_msr = value;
713 vmx_vlapic_msr_changed(vlapic_vcpu(vlapic));
715 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
716 "apic base msr is 0x%016"PRIx64, vlapic->hw.apic_base_msr);
717 }
719 int vlapic_accept_pic_intr(struct vcpu *v)
720 {
721 struct vlapic *vlapic = vcpu_vlapic(v);
722 uint32_t lvt0 = vlapic_get_reg(vlapic, APIC_LVT0);
724 /*
725 * Only CPU0 is wired to the 8259A. INTA cycles occur if LINT0 is set up
726 * accept ExtInts, or if the LAPIC is disabled (so LINT0 behaves as INTR).
727 */
728 return ((v->vcpu_id == 0) &&
729 (((lvt0 & (APIC_MODE_MASK|APIC_LVT_MASKED)) == APIC_DM_EXTINT) ||
730 vlapic_hw_disabled(vlapic)));
731 }
733 int vlapic_has_pending_irq(struct vcpu *v)
734 {
735 struct vlapic *vlapic = vcpu_vlapic(v);
736 int irr, isr;
738 if ( !vlapic_enabled(vlapic) )
739 return -1;
741 irr = vlapic_find_highest_irr(vlapic);
742 if ( irr == -1 )
743 return -1;
745 isr = vlapic_find_highest_isr(vlapic);
746 isr = (isr != -1) ? isr : 0;
747 if ( (isr & 0xf0) >= (irr & 0xf0) )
748 return -1;
750 return irr;
751 }
753 int vlapic_ack_pending_irq(struct vcpu *v, int vector)
754 {
755 struct vlapic *vlapic = vcpu_vlapic(v);
757 vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
758 vlapic_clear_irr(vector, vlapic);
760 return 1;
761 }
763 /* Reset the VLPAIC back to its power-on/reset state. */
764 void vlapic_reset(struct vlapic *vlapic)
765 {
766 struct vcpu *v = vlapic_vcpu(vlapic);
767 int i;
769 vlapic_set_reg(vlapic, APIC_ID, (v->vcpu_id * 2) << 24);
770 vlapic_set_reg(vlapic, APIC_LVR, VLAPIC_VERSION);
772 for ( i = 0; i < 8; i++ )
773 {
774 vlapic_set_reg(vlapic, APIC_IRR + 0x10 * i, 0);
775 vlapic_set_reg(vlapic, APIC_ISR + 0x10 * i, 0);
776 vlapic_set_reg(vlapic, APIC_TMR + 0x10 * i, 0);
777 }
778 vlapic_set_reg(vlapic, APIC_ICR, 0);
779 vlapic_set_reg(vlapic, APIC_ICR2, 0);
780 vlapic_set_reg(vlapic, APIC_LDR, 0);
781 vlapic_set_reg(vlapic, APIC_TASKPRI, 0);
782 vlapic_set_reg(vlapic, APIC_TMICT, 0);
783 vlapic_set_reg(vlapic, APIC_TMCCT, 0);
784 vlapic_set_tdcr(vlapic, 0);
786 vlapic_set_reg(vlapic, APIC_DFR, 0xffffffffU);
788 for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
789 vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
791 vlapic_set_reg(vlapic, APIC_SPIV, 0xff);
792 vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
793 }
795 #ifdef HVM_DEBUG_SUSPEND
796 static void lapic_info(struct vlapic *s)
797 {
798 printk("*****lapic state:*****\n");
799 printk("lapic 0x%"PRIx64".\n", s->hw.apic_base_msr);
800 printk("lapic 0x%x.\n", s->hw.disabled);
801 printk("lapic 0x%x.\n", s->hw.timer_divisor);
802 }
803 #else
804 static void lapic_info(struct vlapic *s)
805 {
806 }
807 #endif
809 /* rearm the actimer if needed, after a HVM restore */
810 static void lapic_rearm(struct vlapic *s)
811 {
812 unsigned long tmict;
814 tmict = vlapic_get_reg(s, APIC_TMICT);
815 if ( tmict > 0 )
816 {
817 uint64_t period = (uint64_t)APIC_BUS_CYCLE_NS *
818 (uint32_t)tmict * s->hw.timer_divisor;
819 uint32_t lvtt = vlapic_get_reg(s, APIC_LVTT);
821 s->pt.irq = lvtt & APIC_VECTOR_MASK;
822 create_periodic_time(vlapic_vcpu(s), &s->pt, period, s->pt.irq,
823 !vlapic_lvtt_period(s), vlapic_pt_cb,
824 &s->timer_last_update);
825 s->timer_last_update = s->pt.last_plt_gtime;
827 printk("lapic_load to rearm the actimer:"
828 "bus cycle is %uns, "
829 "saved tmict count %lu, period %"PRIu64"ns, irq=%"PRIu8"\n",
830 APIC_BUS_CYCLE_NS, tmict, period, s->pt.irq);
831 }
833 lapic_info(s);
834 }
836 static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h)
837 {
838 struct vcpu *v;
839 struct vlapic *s;
841 for_each_vcpu(d, v)
842 {
843 s = vcpu_vlapic(v);
844 lapic_info(s);
846 if ( hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw) != 0 )
847 return 1;
848 }
849 return 0;
850 }
852 static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h)
853 {
854 struct vcpu *v;
855 struct vlapic *s;
857 for_each_vcpu(d, v)
858 {
859 s = vcpu_vlapic(v);
860 if ( hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs) != 0 )
861 return 1;
862 }
863 return 0;
864 }
866 static int lapic_load_hidden(struct domain *d, hvm_domain_context_t *h)
867 {
868 uint16_t vcpuid;
869 struct vcpu *v;
870 struct vlapic *s;
872 /* Which vlapic to load? */
873 vcpuid = hvm_load_instance(h);
874 if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
875 {
876 gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
877 return -EINVAL;
878 }
879 s = vcpu_vlapic(v);
881 if ( hvm_load_entry(LAPIC, h, &s->hw) != 0 )
882 return -EINVAL;
884 lapic_info(s);
886 vmx_vlapic_msr_changed(v);
888 return 0;
889 }
891 static int lapic_load_regs(struct domain *d, hvm_domain_context_t *h)
892 {
893 uint16_t vcpuid;
894 struct vcpu *v;
895 struct vlapic *s;
897 /* Which vlapic to load? */
898 vcpuid = hvm_load_instance(h);
899 if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
900 {
901 gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
902 return -EINVAL;
903 }
904 s = vcpu_vlapic(v);
906 if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 )
907 return -EINVAL;
909 lapic_rearm(s);
910 return 0;
911 }
913 HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden,
914 1, HVMSR_PER_VCPU);
915 HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs,
916 1, HVMSR_PER_VCPU);
918 int vlapic_init(struct vcpu *v)
919 {
920 struct vlapic *vlapic = vcpu_vlapic(v);
921 unsigned int memflags = 0;
923 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "%d", v->vcpu_id);
925 vlapic->pt.source = PTSRC_lapic;
927 #ifdef __i386__
928 /* 32-bit VMX may be limited to 32-bit physical addresses. */
929 if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
930 memflags = MEMF_bits(32);
931 #endif
933 vlapic->regs_page = alloc_domheap_pages(NULL, 0, memflags);
934 if ( vlapic->regs_page == NULL )
935 {
936 dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n",
937 v->domain->domain_id, v->vcpu_id);
938 return -ENOMEM;
939 }
941 vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
942 if ( vlapic->regs == NULL )
943 {
944 dprintk(XENLOG_ERR, "map vlapic regs error: %d/%d\n",
945 v->domain->domain_id, v->vcpu_id);
946 return -ENOMEM;
947 }
949 clear_page(vlapic->regs);
951 vlapic_reset(vlapic);
953 vlapic->hw.apic_base_msr = (MSR_IA32_APICBASE_ENABLE |
954 APIC_DEFAULT_PHYS_BASE);
955 if ( v->vcpu_id == 0 )
956 vlapic->hw.apic_base_msr |= MSR_IA32_APICBASE_BSP;
958 return 0;
959 }
961 void vlapic_destroy(struct vcpu *v)
962 {
963 struct vlapic *vlapic = vcpu_vlapic(v);
965 destroy_periodic_time(&vlapic->pt);
966 unmap_domain_page_global(vlapic->regs);
967 free_domheap_page(vlapic->regs_page);
968 }