debuggers.hg

view xen/arch/x86/hvm/vlapic.c @ 10951:aeb484dafc5b

[HVM] Declare vlapic_ipi() as static. Used only once.
Signed-off-by: Steven Smith <ssmith@xensource.com>
author kfraser@localhost.localdomain
date Thu Aug 03 13:55:41 2006 +0100 (2006-08-03)
parents b33c08de3d98
children 415614d3a1ee
line source
1 /*
2 * vlapic.c: virtualize LAPIC for HVM vcpus.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <xen/config.h>
21 #include <xen/types.h>
22 #include <xen/mm.h>
23 #include <xen/xmalloc.h>
24 #include <asm/shadow.h>
25 #include <asm/page.h>
26 #include <xen/event.h>
27 #include <xen/trace.h>
28 #include <asm/hvm/hvm.h>
29 #include <asm/hvm/io.h>
30 #include <asm/hvm/support.h>
32 #include <xen/lib.h>
33 #include <xen/sched.h>
34 #include <asm/current.h>
35 #include <public/hvm/ioreq.h>
36 #include <public/hvm/params.h>
38 /* XXX remove this definition after GFW enabled */
39 #define VLAPIC_NO_BIOS
41 extern u32 get_apic_bus_cycle(void);
43 #define APIC_BUS_CYCLE_NS (((s_time_t)get_apic_bus_cycle()) / 1000)
45 static unsigned int vlapic_lvt_mask[VLAPIC_LVT_NUM] =
46 {
47 /* LVTT */
48 LVT_MASK | APIC_LVT_TIMER_PERIODIC,
49 /* LVTTHMR */
50 LVT_MASK | APIC_MODE_MASK,
51 /* LVTPC */
52 LVT_MASK | APIC_MODE_MASK,
53 /* LVT0-1 */
54 LINT_MASK, LINT_MASK,
55 /* LVTERR */
56 LVT_MASK
57 };
59 int hvm_apic_support(struct domain *d)
60 {
61 return d->arch.hvm_domain.params[HVM_PARAM_APIC_ENABLED];
62 }
64 int vlapic_find_highest_irr(struct vlapic *vlapic)
65 {
66 int result;
68 result = find_highest_bit((unsigned long *)(vlapic->regs + APIC_IRR),
69 MAX_VECTOR);
71 ASSERT( result == -1 || result > 16);
73 return result;
74 }
76 s_time_t get_apictime_scheduled(struct vcpu *v)
77 {
78 struct vlapic *vlapic = VLAPIC(v);
80 if ( !hvm_apic_support(v->domain) ||
81 !vlapic_lvt_enabled(vlapic, APIC_LVTT) )
82 return -1;
84 return vlapic->vlapic_timer.expires;
85 }
87 int vlapic_find_highest_isr(struct vlapic *vlapic)
88 {
89 int result;
91 result = find_highest_bit((unsigned long *)(vlapic->regs + APIC_ISR),
92 MAX_VECTOR);
94 ASSERT( result == -1 || result > 16);
96 return result;
97 }
99 uint32_t vlapic_update_ppr(struct vlapic *vlapic)
100 {
101 uint32_t tpr, isrv, ppr;
102 int isr;
104 tpr = vlapic_get_reg(vlapic, APIC_TASKPRI);
106 isr = vlapic_find_highest_isr(vlapic);
108 if ( isr != -1 )
109 isrv = (isr >> 4) & 0xf; /* ditto */
110 else
111 isrv = 0;
113 if ( (tpr >> 4) >= isrv )
114 ppr = tpr & 0xff;
115 else
116 ppr = isrv << 4; /* low 4 bits of PPR have to be cleared */
118 vlapic_set_reg(vlapic, APIC_PROCPRI, ppr);
120 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_INTERRUPT,
121 "vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x.",
122 vlapic, ppr, isr, isrv);
124 return ppr;
125 }
127 /* This only for fixed delivery mode */
128 static int vlapic_match_dest(struct vcpu *v, struct vlapic *source,
129 int short_hand, int dest, int dest_mode,
130 int delivery_mode)
131 {
132 int result = 0;
133 struct vlapic *target = VLAPIC(v);
135 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "target %p, source %p, dest 0x%x, "
136 "dest_mode 0x%x, short_hand 0x%x, delivery_mode 0x%x.",
137 target, source, dest, dest_mode, short_hand, delivery_mode);
139 if ( unlikely(target == NULL) &&
140 ((delivery_mode != APIC_DM_INIT) &&
141 (delivery_mode != APIC_DM_STARTUP) &&
142 (delivery_mode != APIC_DM_NMI)) )
143 {
144 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "uninitialized target vcpu %p, "
145 "delivery_mode 0x%x, dest 0x%x.\n", v, delivery_mode, dest);
146 return result;
147 }
149 switch ( short_hand ) {
150 case APIC_DEST_NOSHORT: /* no shorthand */
151 if ( !dest_mode ) /* Physical */
152 {
153 result = ( ((target != NULL) ?
154 GET_APIC_ID(vlapic_get_reg(target, APIC_ID)):
155 v->vcpu_id)) == dest;
156 }
157 else /* Logical */
158 {
159 uint32_t ldr = vlapic_get_reg(target, APIC_LDR);
161 if ( target == NULL )
162 break;
163 /* Flat mode */
164 if ( vlapic_get_reg(target, APIC_DFR) == APIC_DFR_FLAT)
165 {
166 result = GET_APIC_LOGICAL_ID(ldr) & dest;
167 }
168 else
169 {
170 if ( (delivery_mode == APIC_DM_LOWEST) &&
171 (dest == 0xff) )
172 {
173 /* What shall we do now? */
174 printk("Broadcast IPI with lowest priority "
175 "delivery mode\n");
176 domain_crash_synchronous();
177 }
178 result = (GET_APIC_LOGICAL_ID(ldr) == (dest & 0xf)) ?
179 (GET_APIC_LOGICAL_ID(ldr) >> 4) & (dest >> 4) : 0;
180 }
181 }
182 break;
184 case APIC_DEST_SELF:
185 if ( target == source )
186 result = 1;
187 break;
189 case APIC_DEST_ALLINC:
190 result = 1;
191 break;
193 case APIC_DEST_ALLBUT:
194 if ( target != source )
195 result = 1;
196 break;
198 default:
199 break;
200 }
202 return result;
203 }
205 /*
206 * Add a pending IRQ into lapic.
207 * Return 1 if successfully added and 0 if discarded.
208 */
209 static int vlapic_accept_irq(struct vcpu *v, int delivery_mode,
210 int vector, int level, int trig_mode)
211 {
212 int result = 0;
213 struct vlapic *vlapic = VLAPIC(v);
215 switch ( delivery_mode ) {
216 case APIC_DM_FIXED:
217 case APIC_DM_LOWEST:
218 /* FIXME add logic for vcpu on reset */
219 if ( unlikely(vlapic == NULL || !vlapic_enabled(vlapic)) )
220 break;
222 if ( test_and_set_bit(vector, vlapic->regs + APIC_IRR) )
223 {
224 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
225 "level trig mode repeatedly for vector %d\n", vector);
226 break;
227 }
229 if ( level )
230 {
231 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
232 "level trig mode for vector %d\n", vector);
233 set_bit(vector, vlapic->regs + APIC_TMR);
234 }
235 evtchn_set_pending(v, iopacket_port(v));
237 result = 1;
238 break;
240 case APIC_DM_REMRD:
241 printk("Ignore deliver mode 3 in vlapic_accept_irq\n");
242 break;
244 case APIC_DM_SMI:
245 case APIC_DM_NMI:
246 /* Fixme */
247 printk("TODO: for guest SMI/NMI\n");
248 break;
250 case APIC_DM_INIT:
251 if ( level && !(trig_mode & APIC_INT_ASSERT) ) //Deassert
252 printk("This hvm_vlapic is for P4, no work for De-assert init\n");
253 else
254 {
255 /* FIXME How to check the situation after vcpu reset? */
256 if ( test_and_clear_bit(_VCPUF_initialised, &v->vcpu_flags) )
257 {
258 printk("Reset hvm vcpu not supported yet\n");
259 domain_crash_synchronous();
260 }
261 v->arch.hvm_vcpu.init_sipi_sipi_state =
262 HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI;
263 result = 1;
264 }
265 break;
267 case APIC_DM_STARTUP:
268 if ( v->arch.hvm_vcpu.init_sipi_sipi_state ==
269 HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM )
270 break;
272 v->arch.hvm_vcpu.init_sipi_sipi_state =
273 HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM;
275 if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
276 {
277 printk("SIPI for initialized vcpu vcpuid %x\n", v->vcpu_id);
278 domain_crash_synchronous();
279 }
281 if ( hvm_bringup_ap(v->vcpu_id, vector) != 0 )
282 result = 0;
283 break;
285 default:
286 printk("TODO: not support interrupt type %x\n", delivery_mode);
287 domain_crash_synchronous();
288 break;
289 }
291 return result;
292 }
293 /*
294 This function is used by both ioapic and local APIC
295 The bitmap is for vcpu_id
296 */
297 struct vlapic* apic_round_robin(struct domain *d,
298 uint8_t dest_mode,
299 uint8_t vector,
300 uint32_t bitmap)
301 {
302 int next, old;
303 struct vlapic* target = NULL;
305 if ( dest_mode == 0 ) //Physical mode
306 {
307 printk("<apic_round_robin> lowest priority for physical mode.\n");
308 return NULL;
309 }
311 if ( !bitmap )
312 {
313 printk("<apic_round_robin> no bit set in bitmap.\n");
314 return NULL;
315 }
317 spin_lock(&d->arch.hvm_domain.round_robin_lock);
319 old = next = d->arch.hvm_domain.round_info[vector];
321 /* the vcpu array is arranged according to vcpu_id */
322 do
323 {
324 next++;
325 if ( !d->vcpu[next] ||
326 !test_bit(_VCPUF_initialised, &d->vcpu[next]->vcpu_flags) ||
327 next == MAX_VIRT_CPUS )
328 next = 0;
330 if ( test_bit(next, &bitmap) )
331 {
332 target = d->vcpu[next]->arch.hvm_vcpu.vlapic;
334 if ( target == NULL || !vlapic_enabled(target) )
335 {
336 printk("warning: targe round robin local apic disabled\n");
337 /* XXX should we domain crash?? Or should we return NULL */
338 }
339 break;
340 }
341 } while ( next != old );
343 d->arch.hvm_domain.round_info[vector] = next;
344 spin_unlock(&d->arch.hvm_domain.round_robin_lock);
346 return target;
347 }
349 void vlapic_EOI_set(struct vlapic *vlapic)
350 {
351 int vector = vlapic_find_highest_isr(vlapic);
353 /* Not every write EOI will has correpsoning ISR,
354 one example is when Kernel check timer on setup_IO_APIC */
355 if ( vector == -1 )
356 return ;
358 clear_bit(vector, vlapic->regs + APIC_ISR);
359 vlapic_update_ppr(vlapic);
361 if ( test_and_clear_bit(vector, vlapic->regs + APIC_TMR) )
362 ioapic_update_EOI(vlapic->domain, vector);
363 }
365 static int vlapic_check_vector(struct vlapic *vlapic,
366 uint32_t dm, uint32_t vector)
367 {
368 if ( (dm == APIC_DM_FIXED) && (vector < 16) )
369 {
370 vlapic->err_status |= 0x40;
371 vlapic_accept_irq(vlapic->vcpu, APIC_DM_FIXED,
372 vlapic_lvt_vector(vlapic, APIC_LVTERR), 0, 0);
373 printk("<vlapic_check_vector>: check failed "
374 " dm %x vector %x\n", dm, vector);
375 return 0;
376 }
377 return 1;
378 }
380 static void vlapic_ipi(struct vlapic *vlapic)
381 {
382 uint32_t icr_low = vlapic_get_reg(vlapic, APIC_ICR);
383 uint32_t icr_high = vlapic_get_reg(vlapic, APIC_ICR2);
385 unsigned int dest = GET_APIC_DEST_FIELD(icr_high);
386 unsigned int short_hand = icr_low & APIC_SHORT_MASK;
387 unsigned int trig_mode = icr_low & APIC_INT_ASSERT;
388 unsigned int level = icr_low & APIC_INT_LEVELTRIG;
389 unsigned int dest_mode = icr_low & APIC_DEST_MASK;
390 unsigned int delivery_mode = icr_low & APIC_MODE_MASK;
391 unsigned int vector = icr_low & APIC_VECTOR_MASK;
393 struct vlapic *target;
394 struct vcpu *v = NULL;
395 uint32_t lpr_map;
397 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "icr_high 0x%x, icr_low 0x%x, "
398 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
399 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x.",
400 icr_high, icr_low, short_hand, dest,
401 trig_mode, level, dest_mode, delivery_mode, vector);
403 for_each_vcpu ( vlapic->domain, v )
404 {
405 if ( vlapic_match_dest(v, vlapic, short_hand,
406 dest, dest_mode, delivery_mode) )
407 {
408 if ( delivery_mode == APIC_DM_LOWEST)
409 set_bit(v->vcpu_id, &lpr_map);
410 else
411 vlapic_accept_irq(v, delivery_mode,
412 vector, level, trig_mode);
413 }
414 }
416 if ( delivery_mode == APIC_DM_LOWEST)
417 {
418 v = vlapic->vcpu;
419 target = apic_round_robin(v->domain, dest_mode, vector, lpr_map);
421 if ( target )
422 vlapic_accept_irq(target->vcpu, delivery_mode,
423 vector, level, trig_mode);
424 }
425 }
427 static uint32_t vlapic_get_tmcct(struct vlapic *vlapic)
428 {
429 uint32_t counter_passed;
430 s_time_t passed, now = NOW();
431 uint32_t tmcct = vlapic_get_reg(vlapic, APIC_TMCCT);
433 ASSERT(vlapic != NULL);
435 if ( unlikely(now <= vlapic->timer_last_update) )
436 {
437 passed = ~0x0LL - vlapic->timer_last_update + now;
438 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "time elapsed.");
439 }
440 else
441 passed = now - vlapic->timer_last_update;
443 counter_passed = passed /
444 (APIC_BUS_CYCLE_NS * vlapic->timer_divide_count);
446 tmcct -= counter_passed;
448 if ( tmcct <= 0 )
449 {
450 if ( unlikely(!vlapic_lvtt_period(vlapic)) )
451 {
452 tmcct = 0;
453 // FIXME: should we add interrupt here?
454 }
455 else
456 {
457 do {
458 tmcct += vlapic_get_reg(vlapic, APIC_TMICT);
459 } while ( tmcct < 0 );
460 }
461 }
463 vlapic->timer_last_update = now;
464 vlapic_set_reg(vlapic, APIC_TMCCT, tmcct);
466 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
467 "timer initial count 0x%x, timer current count 0x%x, "
468 "update 0x%016"PRIx64", now 0x%016"PRIx64", offset 0x%x.",
469 vlapic_get_reg(vlapic, APIC_TMICT),
470 vlapic_get_reg(vlapic, APIC_TMCCT),
471 vlapic->timer_last_update, now, counter_passed);
473 return tmcct;
474 }
476 static void vlapic_read_aligned(struct vlapic *vlapic, unsigned int offset,
477 unsigned int len, unsigned int *result)
478 {
479 ASSERT(len == 4 && offset > 0 && offset <= APIC_TDCR);
481 *result = 0;
483 switch ( offset ) {
484 case APIC_ARBPRI:
485 printk("access local APIC ARBPRI register which is for P6\n");
486 break;
488 case APIC_TMCCT: //Timer CCR
489 *result = vlapic_get_tmcct(vlapic);
490 break;
492 default:
493 *result = vlapic_get_reg(vlapic, offset);
494 break;
495 }
496 }
498 static unsigned long vlapic_read(struct vcpu *v, unsigned long address,
499 unsigned long len)
500 {
501 unsigned int alignment;
502 unsigned int tmp;
503 unsigned long result;
504 struct vlapic *vlapic = VLAPIC(v);
505 unsigned int offset = address - vlapic->base_address;
507 if ( offset > APIC_TDCR)
508 return 0;
510 /* some bugs on kernel cause read this with byte*/
511 if ( len != 4 )
512 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
513 "read with len=0x%lx, should be 4 instead.\n",
514 len);
516 alignment = offset & 0x3;
518 vlapic_read_aligned(vlapic, offset & ~0x3, 4, &tmp);
519 switch ( len ) {
520 case 1:
521 result = *((unsigned char *)&tmp + alignment);
522 break;
524 case 2:
525 result = *(unsigned short *)((unsigned char *)&tmp + alignment);
526 break;
528 case 4:
529 result = *(unsigned int *)((unsigned char *)&tmp + alignment);
530 break;
532 default:
533 printk("Local APIC read with len=0x%lx, should be 4 instead.\n", len);
534 domain_crash_synchronous();
535 break;
536 }
538 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset 0x%x with length 0x%lx, "
539 "and the result is 0x%lx.", offset, len, result);
541 return result;
542 }
544 static void vlapic_write(struct vcpu *v, unsigned long address,
545 unsigned long len, unsigned long val)
546 {
547 struct vlapic *vlapic = VLAPIC(v);
548 unsigned int offset = address - vlapic->base_address;
550 if ( offset != 0xb0 )
551 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
552 "offset 0x%x with length 0x%lx, and value is 0x%lx.",
553 offset, len, val);
555 /*
556 * According to IA 32 Manual, all resgiters should be accessed with
557 * 32 bits alignment.
558 */
559 if ( len != 4 )
560 {
561 unsigned int tmp;
562 unsigned char alignment;
564 /* Some kernel do will access with byte/word alignment*/
565 printk("Notice: Local APIC write with len = %lx\n",len);
566 alignment = offset & 0x3;
567 tmp = vlapic_read(v, offset & ~0x3, 4);
568 switch ( len ) {
569 case 1:
570 /* XXX the saddr is a tmp variable from caller, so should be ok
571 But we should still change the following ref to val to
572 local variable later */
573 val = (tmp & ~(0xff << alignment)) |
574 ((val & 0xff) << alignment);
575 break;
577 case 2:
578 if ( alignment != 0x0 && alignment != 0x2 )
579 {
580 printk("alignment error for vlapic with len == 2\n");
581 domain_crash_synchronous();
582 }
584 val = (tmp & ~(0xffff << alignment)) |
585 ((val & 0xffff) << alignment);
586 break;
588 case 3:
589 /* will it happen? */
590 printk("vlapic_write with len = 3 !!!\n");
591 domain_crash_synchronous();
592 break;
594 default:
595 printk("Local APIC write with len = %lx, should be 4 instead\n", len);
596 domain_crash_synchronous();
597 break;
598 }
599 }
601 offset &= 0xff0;
603 switch ( offset ) {
604 case APIC_ID: /* Local APIC ID */
605 vlapic_set_reg(vlapic, APIC_ID, val);
606 break;
608 case APIC_TASKPRI:
609 vlapic_set_reg(vlapic, APIC_TASKPRI, val & 0xff);
610 vlapic_update_ppr(vlapic);
611 break;
613 case APIC_EOI:
614 vlapic_EOI_set(vlapic);
615 break;
617 case APIC_LDR:
618 vlapic_set_reg(vlapic, APIC_LDR, val & APIC_LDR_MASK);
619 break;
621 case APIC_DFR:
622 vlapic_set_reg(vlapic, APIC_DFR, val);
623 break;
625 case APIC_SPIV:
626 vlapic_set_reg(vlapic, APIC_SPIV, val & 0x1ff);
628 if ( !( val & APIC_SPIV_APIC_ENABLED) )
629 {
630 int i;
631 uint32_t lvt_val;
633 vlapic->status |= VLAPIC_SOFTWARE_DISABLE_MASK;
635 for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
636 {
637 lvt_val = vlapic_get_reg(vlapic, APIC_LVT1 + 0x10 * i);
638 vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i,
639 lvt_val | APIC_LVT_MASKED);
640 }
642 if ( (vlapic_get_reg(vlapic, APIC_LVT0) & APIC_MODE_MASK)
643 == APIC_DM_EXTINT )
644 clear_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
645 }
646 else
647 {
648 vlapic->status &= ~VLAPIC_SOFTWARE_DISABLE_MASK;
649 if ( (vlapic_get_reg(vlapic, APIC_LVT0) & APIC_MODE_MASK)
650 == APIC_DM_EXTINT )
651 set_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
652 }
653 break;
655 case APIC_ESR:
656 vlapic->err_write_count = !vlapic->err_write_count;
657 if ( !vlapic->err_write_count )
658 vlapic->err_status = 0;
659 break;
661 case APIC_ICR:
662 /* No delay here, so we always clear the pending bit*/
663 vlapic_set_reg(vlapic, APIC_ICR, val & ~(1 << 12));
664 vlapic_ipi(vlapic);
665 break;
667 case APIC_ICR2:
668 vlapic_set_reg(vlapic, APIC_ICR2, val & 0xff000000);
669 break;
671 case APIC_LVTT: // LVT Timer Reg
672 case APIC_LVTTHMR: // LVT Thermal Monitor
673 case APIC_LVTPC: // LVT Performance Counter
674 case APIC_LVT0: // LVT LINT0 Reg
675 case APIC_LVT1: // LVT Lint1 Reg
676 case APIC_LVTERR: // LVT Error Reg
677 {
678 if ( vlapic->status & VLAPIC_SOFTWARE_DISABLE_MASK )
679 val |= APIC_LVT_MASKED;
681 val &= vlapic_lvt_mask[(offset - APIC_LVTT) >> 4];
683 vlapic_set_reg(vlapic, offset, val);
685 /* On hardware, when write vector less than 0x20 will error */
686 if ( !(val & APIC_LVT_MASKED) )
687 vlapic_check_vector(vlapic, vlapic_lvt_dm(vlapic, offset),
688 vlapic_lvt_vector(vlapic, offset));
690 if ( !vlapic->vcpu_id && (offset == APIC_LVT0) )
691 {
692 if ( (val & APIC_MODE_MASK) == APIC_DM_EXTINT )
693 if ( val & APIC_LVT_MASKED)
694 clear_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
695 else
696 set_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
697 else
698 clear_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
699 }
701 }
702 break;
704 case APIC_TMICT:
705 {
706 s_time_t now = NOW(), offset;
708 stop_timer(&vlapic->vlapic_timer);
710 vlapic_set_reg(vlapic, APIC_TMICT, val);
711 vlapic_set_reg(vlapic, APIC_TMCCT, val);
712 vlapic->timer_last_update = now;
714 offset = APIC_BUS_CYCLE_NS *
715 vlapic->timer_divide_count * val;
717 set_timer(&vlapic->vlapic_timer, now + offset);
719 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
720 "bus cycle is %"PRId64"ns, now 0x%016"PRIx64", "
721 "timer initial count 0x%x, offset 0x%016"PRIx64", "
722 "expire @ 0x%016"PRIx64".",
723 APIC_BUS_CYCLE_NS, now,
724 vlapic_get_reg(vlapic, APIC_TMICT),
725 offset, now + offset);
726 }
727 break;
729 case APIC_TDCR:
730 {
731 unsigned int tmp1, tmp2;
733 tmp1 = val & 0xf;
734 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
735 vlapic->timer_divide_count = 0x1 << (tmp2 & 0x7);
737 vlapic_set_reg(vlapic, APIC_TDCR, val);
739 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "timer divide count is 0x%x",
740 vlapic->timer_divide_count);
741 }
742 break;
744 default:
745 printk("Local APIC Write to read-only register\n");
746 break;
747 }
748 }
750 static int vlapic_range(struct vcpu *v, unsigned long addr)
751 {
752 struct vlapic *vlapic = VLAPIC(v);
754 if ( vlapic_global_enabled(vlapic) &&
755 (addr >= vlapic->base_address) &&
756 (addr <= vlapic->base_address + VLOCAL_APIC_MEM_LENGTH) )
757 return 1;
759 return 0;
760 }
762 struct hvm_mmio_handler vlapic_mmio_handler = {
763 .check_handler = vlapic_range,
764 .read_handler = vlapic_read,
765 .write_handler = vlapic_write
766 };
768 void vlapic_msr_set(struct vlapic *vlapic, uint64_t value)
769 {
770 /* When apic disabled */
771 if ( vlapic == NULL )
772 return;
774 if ( vlapic->vcpu_id )
775 value &= ~MSR_IA32_APICBASE_BSP;
777 vlapic->apic_base_msr = value;
778 vlapic->base_address = vlapic->apic_base_msr &
779 MSR_IA32_APICBASE_BASE;
781 /* with FSB delivery interrupt, we can restart APIC functionality */
782 if ( !(value & MSR_IA32_APICBASE_ENABLE) )
783 set_bit(_VLAPIC_GLOB_DISABLE, &vlapic->status );
784 else
785 clear_bit(_VLAPIC_GLOB_DISABLE, &vlapic->status);
787 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
788 "apic base msr is 0x%016"PRIx64", and base address is 0x%lx.",
789 vlapic->apic_base_msr, vlapic->base_address);
790 }
792 void vlapic_timer_fn(void *data)
793 {
794 struct vlapic *vlapic = data;
795 struct vcpu *v;
796 uint32_t timer_vector;
797 s_time_t now;
799 if ( unlikely(!vlapic_enabled(vlapic) ||
800 !vlapic_lvt_enabled(vlapic, APIC_LVTT)) )
801 return;
803 v = vlapic->vcpu;
804 timer_vector = vlapic_lvt_vector(vlapic, APIC_LVTT);
805 now = NOW();
807 vlapic->timer_last_update = now;
809 if ( test_and_set_bit(timer_vector, vlapic->regs + APIC_IRR ))
810 vlapic->intr_pending_count[timer_vector]++;
812 if ( vlapic_lvtt_period(vlapic) )
813 {
814 s_time_t offset;
815 uint32_t tmict = vlapic_get_reg(vlapic, APIC_TMICT);
817 vlapic_set_reg(vlapic, APIC_TMCCT, tmict);
819 offset = APIC_BUS_CYCLE_NS *
820 vlapic->timer_divide_count * tmict;
822 set_timer(&vlapic->vlapic_timer, now + offset);
823 }
824 else
825 vlapic_set_reg(vlapic, APIC_TMCCT, 0);
827 #if 0
828 if ( test_bit(_VCPUF_running, &v->vcpu_flags) )
829 {
830 /* TODO: add guest time handling here */
831 }
832 #endif
834 HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
835 "now 0x%016"PRIx64", expire @ 0x%016"PRIx64", "
836 "timer initial count 0x%x, timer current count 0x%x.",
837 now, vlapic->vlapic_timer.expires,
838 vlapic_get_reg(vlapic, APIC_TMICT),
839 vlapic_get_reg(vlapic, APIC_TMCCT));
840 }
842 #if 0
843 static int
844 vlapic_check_direct_intr(struct vcpu *v, int * mode)
845 {
846 struct vlapic *vlapic = VLAPIC(v);
847 int type;
849 type = fls(vlapic->direct_intr.deliver_mode) - 1;
850 if ( type == -1 )
851 return -1;
853 *mode = type;
854 return 0;
855 }
856 #endif
858 int vlapic_accept_pic_intr(struct vcpu *v)
859 {
860 struct vlapic *vlapic = VLAPIC(v);
862 return vlapic ? test_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status) : 1;
863 }
865 int cpu_get_apic_interrupt(struct vcpu *v, int *mode)
866 {
867 struct vlapic *vlapic = VLAPIC(v);
869 if ( vlapic && vlapic_enabled(vlapic) )
870 {
871 int highest_irr = vlapic_find_highest_irr(vlapic);
873 if ( highest_irr != -1 &&
874 ( (highest_irr & 0xF0) > vlapic_get_reg(vlapic, APIC_PROCPRI) ) )
875 {
876 if ( highest_irr < 0x10 )
877 {
878 uint32_t err_vector;
880 vlapic->err_status |= 0x20;
881 err_vector = vlapic_lvt_vector(vlapic, APIC_LVTERR);
883 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
884 "Sending an illegal vector 0x%x.", highest_irr);
886 set_bit(err_vector, vlapic->regs + APIC_IRR);
887 highest_irr = err_vector;
888 }
890 *mode = APIC_DM_FIXED;
891 return highest_irr;
892 }
893 }
894 return -1;
895 }
897 int cpu_has_apic_interrupt(struct vcpu* v)
898 {
899 struct vlapic *vlapic = VLAPIC(v);
901 if (vlapic && vlapic_enabled(vlapic)) {
902 int highest_irr = vlapic_find_highest_irr(vlapic);
904 if ( highest_irr != -1 &&
905 ( (highest_irr & 0xF0) > vlapic_get_reg(vlapic, APIC_PROCPRI) ) ) {
906 return 1;
907 }
908 }
909 return 0;
910 }
912 void vlapic_post_injection(struct vcpu *v, int vector, int deliver_mode)
913 {
914 struct vlapic *vlapic = VLAPIC(v);
916 if ( unlikely(vlapic == NULL) )
917 return;
919 switch ( deliver_mode ) {
920 case APIC_DM_FIXED:
921 case APIC_DM_LOWEST:
922 set_bit(vector, vlapic->regs + APIC_ISR);
923 clear_bit(vector, vlapic->regs + APIC_IRR);
924 vlapic_update_ppr(vlapic);
926 if ( vector == vlapic_lvt_vector(vlapic, APIC_LVTT) )
927 {
928 vlapic->intr_pending_count[vector]--;
929 if ( vlapic->intr_pending_count[vector] > 0 )
930 test_and_set_bit(vector, vlapic->regs + APIC_IRR);
931 }
932 break;
934 /*XXX deal with these later */
935 case APIC_DM_REMRD:
936 printk("Ignore deliver mode 3 in vlapic_post_injection\n");
937 break;
939 case APIC_DM_SMI:
940 case APIC_DM_NMI:
941 case APIC_DM_INIT:
942 case APIC_DM_STARTUP:
943 vlapic->direct_intr.deliver_mode &= deliver_mode;
944 break;
946 default:
947 printk("<vlapic_post_injection> invalid deliver mode\n");
948 break;
949 }
950 }
952 static int vlapic_reset(struct vlapic *vlapic)
953 {
954 struct vcpu *v;
955 int i;
957 ASSERT( vlapic != NULL );
959 v = vlapic->vcpu;
961 ASSERT( v != NULL );
963 vlapic->domain = v->domain;
965 vlapic->vcpu_id = v->vcpu_id;
967 vlapic_set_reg(vlapic, APIC_ID, v->vcpu_id << 24);
969 vlapic_set_reg(vlapic, APIC_LVR, VLAPIC_VERSION);
971 for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
972 vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
974 vlapic_set_reg(vlapic, APIC_DFR, 0xffffffffU);
976 vlapic_set_reg(vlapic, APIC_SPIV, 0xff);
978 vlapic->apic_base_msr = MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
980 if ( v->vcpu_id == 0 )
981 vlapic->apic_base_msr |= MSR_IA32_APICBASE_BSP;
983 vlapic->base_address = vlapic->apic_base_msr &
984 MSR_IA32_APICBASE_BASE;
986 hvm_vioapic_add_lapic(vlapic, v);
988 init_timer(&vlapic->vlapic_timer,
989 vlapic_timer_fn, vlapic, v->processor);
991 #ifdef VLAPIC_NO_BIOS
992 /*
993 * XXX According to mp sepcific, BIOS will enable LVT0/1,
994 * remove it after BIOS enabled
995 */
996 if ( !v->vcpu_id )
997 {
998 vlapic_set_reg(vlapic, APIC_LVT0, APIC_MODE_EXTINT << 8);
999 vlapic_set_reg(vlapic, APIC_LVT1, APIC_MODE_NMI << 8);
1000 set_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
1002 #endif
1004 HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
1005 "vcpu=%p, id=%d, vlapic_apic_base_msr=0x%016"PRIx64", "
1006 "base_address=0x%0lx.",
1007 v, GET_APIC_ID(vlapic_get_reg(vlapic, APIC_ID)),
1008 vlapic->apic_base_msr, vlapic->base_address);
1010 return 1;
1013 int vlapic_init(struct vcpu *v)
1015 struct vlapic *vlapic = NULL;
1017 ASSERT( v != NULL );
1019 HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "vlapic_init %d", v->vcpu_id);
1021 vlapic = xmalloc_bytes(sizeof(struct vlapic));
1022 if ( vlapic == NULL )
1024 printk("malloc vlapic error for vcpu %x\n", v->vcpu_id);
1025 return -ENOMEM;
1028 memset(vlapic, 0, sizeof(struct vlapic));
1030 vlapic->regs_page = alloc_domheap_page(NULL);
1031 if ( vlapic->regs_page == NULL )
1033 printk("malloc vlapic regs error for vcpu %x\n", v->vcpu_id);
1034 xfree(vlapic);
1035 return -ENOMEM;
1038 vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
1040 memset(vlapic->regs, 0, PAGE_SIZE);
1042 VLAPIC(v) = vlapic;
1044 vlapic->vcpu = v;
1046 vlapic_reset(vlapic);
1048 return 0;