debuggers.hg

view xen/arch/ia64/vmx/vlsapic.c @ 20967:f5fba6214a20

Remove hardcoded instances of TIMER_SLOP.

They aren't needed at all, since slop now only delays a timer firing,
rather than allowing it to happen early.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Feb 10 13:27:55 2010 +0000 (2010-02-10)
parents 5839491bbf20
children
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * vlsapic.c: virtual lsapic model including ITC timer.
5 * Copyright (c) 2005, Intel Corporation.
6 *
7 * Copyright (c) 2007, Isaku Yamahata <yamahata at valinux co jp>
8 * VA Linux Systems Japan K.K.
9 * save/restore support
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
22 * Place - Suite 330, Boston, MA 02111-1307 USA.
23 *
24 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
25 */
27 #include <linux/sched.h>
28 #include <public/xen.h>
29 #include <asm/ia64_int.h>
30 #include <asm/vcpu.h>
31 #include <asm/regionreg.h>
32 #include <asm/processor.h>
33 #include <asm/delay.h>
34 #include <asm/vmx_vcpu.h>
35 #include <asm/regs.h>
36 #include <asm/gcc_intrin.h>
37 #include <asm/vmx_mm_def.h>
38 #include <asm/vmx.h>
39 #include <asm/vmx_vpd.h>
40 #include <asm/hw_irq.h>
41 #include <asm/vmx_pal_vsa.h>
42 #include <asm/kregs.h>
43 #include <asm/vmx_platform.h>
44 #include <asm/viosapic.h>
45 #include <asm/vlsapic.h>
46 #include <asm/vmx_phy_mode.h>
47 #include <asm/linux/jiffies.h>
48 #include <xen/domain.h>
49 #include <asm/hvm/support.h>
50 #include <public/hvm/save.h>
51 #include <public/arch-ia64/hvm/memmap.h>
53 #ifdef IPI_DEBUG
54 #define IPI_DPRINTK(x...) printk(x)
55 #else
56 #define IPI_DPRINTK(x...)
57 #endif
59 //u64 fire_itc;
60 //u64 fire_itc2;
61 //u64 fire_itm;
62 //u64 fire_itm2;
63 /*
64 * Update the checked last_itc.
65 */
67 extern void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
68 u64 vector, REGS *regs);
69 static void update_last_itc(vtime_t *vtm, uint64_t cur_itc)
70 {
71 vtm->last_itc = cur_itc;
72 }
74 /*
75 * Next for vLSapic
76 */
78 #define NMI_VECTOR 2
79 #define ExtINT_VECTOR 0
80 #define NULL_VECTOR -1
82 static void update_vhpi(VCPU *vcpu, int vec)
83 {
84 u64 vhpi;
86 if (vec == NULL_VECTOR)
87 vhpi = 0;
88 else if (vec == NMI_VECTOR)
89 vhpi = 32;
90 else if (vec == ExtINT_VECTOR)
91 vhpi = 16;
92 else
93 vhpi = vec >> 4;
95 VCPU(vcpu,vhpi) = vhpi;
96 // TODO: Add support for XENO
97 if (VCPU(vcpu,vac).a_int) {
98 vmx_vpd_pin(vcpu);
99 ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
100 (uint64_t)vcpu->arch.privregs, 0, 0, 0, 0, 0, 0);
101 vmx_vpd_unpin(vcpu);
102 }
103 }
106 /*
107 * May come from virtualization fault or
108 * nested host interrupt.
109 */
110 static int vmx_vcpu_unpend_interrupt(VCPU *vcpu, uint8_t vector)
111 {
112 int ret;
114 if (vector & ~0xff) {
115 dprintk(XENLOG_WARNING, "vmx_vcpu_pend_interrupt: bad vector\n");
116 return -1;
117 }
119 ret = test_and_clear_bit(vector, &VCPU(vcpu, irr[0]));
121 if (ret) {
122 vcpu->arch.irq_new_pending = 1;
123 wmb();
124 }
126 return ret;
127 }
129 /*
130 * ITC value saw in guest (host+offset+drift).
131 */
132 static uint64_t now_itc(vtime_t *vtm)
133 {
134 uint64_t guest_itc = vtm->vtm_offset + ia64_get_itc();
136 if (guest_itc >= vtm->last_itc)
137 return guest_itc;
138 else
139 /* guest ITC went backward due to LP switch */
140 return vtm->last_itc;
141 }
143 /*
144 * Interval time components reset.
145 */
146 static void vtm_reset(VCPU *vcpu)
147 {
148 int i;
149 u64 vtm_offset;
150 VCPU *v;
151 struct domain *d = vcpu->domain;
152 vtime_t *vtm = &VMX(vcpu, vtm);
154 if (vcpu->vcpu_id == 0) {
155 vtm_offset = 0UL - ia64_get_itc();
156 for (i = d->max_vcpus - 1; i >= 0; i--) {
157 if ((v = d->vcpu[i]) != NULL) {
158 VMX(v, vtm).vtm_offset = vtm_offset;
159 VMX(v, vtm).last_itc = 0;
160 }
161 }
162 }
163 vtm->vtm_local_drift = 0;
164 VCPU(vcpu, itm) = 0;
165 VCPU(vcpu, itv) = 0x10000;
166 vtm->last_itc = 0;
167 }
169 /* callback function when vtm_timer expires */
170 static void vtm_timer_fn(void *data)
171 {
172 VCPU *vcpu = data;
173 vtime_t *vtm = &VMX(vcpu, vtm);
174 u64 vitv;
176 vitv = VCPU(vcpu, itv);
177 if (!ITV_IRQ_MASK(vitv)) {
178 vmx_vcpu_pend_interrupt(vcpu, ITV_VECTOR(vitv));
179 vcpu_unblock(vcpu);
180 } else
181 vtm->pending = 1;
183 /*
184 * "+ 1" is for fixing oops message at timer_interrupt() on VTI guest.
185 * If oops checking condition changed to timer_after_eq() on VTI guest,
186 * this parameter should be erased.
187 */
188 update_last_itc(vtm, VCPU(vcpu, itm) + 1); // update vITC
189 }
191 void vtm_init(VCPU *vcpu)
192 {
193 vtime_t *vtm;
194 uint64_t itc_freq;
196 vtm = &VMX(vcpu, vtm);
198 itc_freq = local_cpu_data->itc_freq;
199 vtm->cfg_max_jump=itc_freq*MAX_JUMP_STEP/1000;
200 vtm->cfg_min_grun=itc_freq*MIN_GUEST_RUNNING_TIME/1000;
201 init_timer(&vtm->vtm_timer, vtm_timer_fn, vcpu, vcpu->processor);
202 vtm_reset(vcpu);
203 }
205 /*
206 * Action when guest read ITC.
207 */
208 uint64_t vtm_get_itc(VCPU *vcpu)
209 {
210 uint64_t guest_itc;
211 vtime_t *vtm = &VMX(vcpu, vtm);
213 guest_itc = now_itc(vtm);
214 return guest_itc;
215 }
218 void vtm_set_itc(VCPU *vcpu, uint64_t new_itc)
219 {
220 int i;
221 uint64_t vitm, vtm_offset;
222 vtime_t *vtm;
223 VCPU *v;
224 struct domain *d = vcpu->domain;
226 vitm = VCPU(vcpu, itm);
227 vtm = &VMX(vcpu, vtm);
228 if (vcpu->vcpu_id == 0) {
229 vtm_offset = new_itc - ia64_get_itc();
230 for (i = d->max_vcpus - 1; i >= 0; i--) {
231 if ((v = d->vcpu[i]) != NULL) {
232 VMX(v, vtm).vtm_offset = vtm_offset;
233 VMX(v, vtm).last_itc = 0;
234 }
235 }
236 }
237 vtm->last_itc = 0;
238 if (vitm <= new_itc)
239 stop_timer(&vtm->vtm_timer);
240 else
241 vtm_set_itm(vcpu, vitm);
242 }
245 extern u64 cycle_to_ns(u64 cyle);
248 void vtm_set_itm(VCPU *vcpu, uint64_t val)
249 {
250 vtime_t *vtm;
251 uint64_t vitv, cur_itc, expires;
253 vitv = VCPU(vcpu, itv);
254 vtm = &VMX(vcpu, vtm);
255 VCPU(vcpu, itm) = val;
256 if (val > vtm->last_itc) {
257 cur_itc = now_itc(vtm);
258 if (time_before(val, cur_itc))
259 val = cur_itc;
260 expires = NOW() + cycle_to_ns(val-cur_itc);
261 vmx_vcpu_unpend_interrupt(vcpu, ITV_VECTOR(vitv));
262 set_timer(&vtm->vtm_timer, expires);
263 }else{
264 stop_timer(&vtm->vtm_timer);
265 }
266 }
269 void vtm_set_itv(VCPU *vcpu, uint64_t val)
270 {
271 vtime_t *vtm = &VMX(vcpu, vtm);
273 VCPU(vcpu, itv) = val;
275 if (!ITV_IRQ_MASK(val) && vtm->pending) {
276 vmx_vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
277 vtm->pending = 0;
278 }
279 }
282 void vlsapic_reset(VCPU *vcpu)
283 {
284 int i;
286 VCPU(vcpu, lid) = VCPU_LID(vcpu);
287 VCPU(vcpu, ivr) = 0;
288 VCPU(vcpu,tpr) = 0x10000;
289 VCPU(vcpu, eoi) = 0;
290 VCPU(vcpu, irr[0]) = 0;
291 VCPU(vcpu, irr[1]) = 0;
292 VCPU(vcpu, irr[2]) = 0;
293 VCPU(vcpu, irr[3]) = 0;
294 VCPU(vcpu, pmv) = 0x10000;
295 VCPU(vcpu, cmcv) = 0x10000;
296 VCPU(vcpu, lrr0) = 0x10000; // default reset value?
297 VCPU(vcpu, lrr1) = 0x10000; // default reset value?
298 update_vhpi(vcpu, NULL_VECTOR);
299 VLSAPIC_XTP(vcpu) = 0x80; // disabled
300 for ( i=0; i<4; i++) {
301 VLSAPIC_INSVC(vcpu,i) = 0;
302 }
304 dprintk(XENLOG_INFO, "VLSAPIC inservice base=%p\n", &VLSAPIC_INSVC(vcpu,0) );
305 }
307 /*
308 * Find highest signaled bits in 4 words (long).
309 *
310 * return 0-255: highest bits.
311 * -1 : Not found.
312 */
313 static __inline__ int highest_bits(uint64_t *dat)
314 {
315 uint64_t bits, bitnum;
316 int i;
318 /* loop for all 256 bits */
319 for ( i=3; i >= 0 ; i -- ) {
320 bits = dat[i];
321 if ( bits ) {
322 bitnum = ia64_fls(bits);
323 return i*64+bitnum;
324 }
325 }
326 return NULL_VECTOR;
327 }
329 /*
330 * Return 0-255 for pending irq.
331 * NULL_VECTOR: when no pending.
332 */
333 static int highest_pending_irq(VCPU *vcpu)
334 {
335 if ( VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
336 if ( VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
337 return highest_bits(&VCPU(vcpu, irr[0]));
338 }
340 static int highest_inservice_irq(VCPU *vcpu)
341 {
342 if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<NMI_VECTOR) ) return NMI_VECTOR;
343 if ( VLSAPIC_INSVC(vcpu, 0) & (1UL<<ExtINT_VECTOR) ) return ExtINT_VECTOR;
344 return highest_bits(&(VLSAPIC_INSVC(vcpu, 0)));
345 }
347 /*
348 * The pending irq is higher than the inservice one.
349 *
350 */
351 static int is_higher_irq(int pending, int inservice)
352 {
353 return ( (pending > inservice) ||
354 ((pending != NULL_VECTOR) && (inservice == NULL_VECTOR)) );
355 }
357 static int is_higher_class(int pending, int mic)
358 {
359 return ( (pending >> 4) > mic );
360 }
362 #define IRQ_NO_MASKED 0
363 #define IRQ_MASKED_BY_VTPR 1
364 #define IRQ_MASKED_BY_INSVC 2 // masked by inservice IRQ
366 /* See Table 5-8 in SDM vol2 for the definition */
367 static int
368 _xirq_masked(VCPU *vcpu, int h_pending, int h_inservice)
369 {
370 tpr_t vtpr;
372 vtpr.val = VCPU(vcpu, tpr);
374 if ( h_inservice == NMI_VECTOR ) {
375 return IRQ_MASKED_BY_INSVC;
376 }
377 if ( h_pending == NMI_VECTOR ) {
378 // Non Maskable Interrupt
379 return IRQ_NO_MASKED;
380 }
381 if ( h_inservice == ExtINT_VECTOR ) {
382 return IRQ_MASKED_BY_INSVC;
383 }
385 if ( h_pending == ExtINT_VECTOR ) {
386 if ( vtpr.mmi ) {
387 // mask all external IRQ
388 return IRQ_MASKED_BY_VTPR;
389 }
390 else {
391 return IRQ_NO_MASKED;
392 }
393 }
395 if ( is_higher_irq(h_pending, h_inservice) ) {
396 if ( is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)) ) {
397 return IRQ_NO_MASKED;
398 }
399 else {
400 return IRQ_MASKED_BY_VTPR;
401 }
402 }
403 else {
404 return IRQ_MASKED_BY_INSVC;
405 }
406 }
408 static int irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
409 {
410 int mask;
412 mask = _xirq_masked(vcpu, h_pending, h_inservice);
413 return mask;
414 }
417 /*
418 * May come from virtualization fault or
419 * nested host interrupt.
420 */
421 int vmx_vcpu_pend_interrupt(VCPU *vcpu, uint8_t vector)
422 {
423 int ret;
425 if (vector & ~0xff) {
426 gdprintk(XENLOG_INFO, "vmx_vcpu_pend_interrupt: bad vector\n");
427 return -1;
428 }
429 ret = test_and_set_bit(vector, &VCPU(vcpu, irr[0]));
431 if (!ret) {
432 vcpu->arch.irq_new_pending = 1;
433 wmb();
434 }
436 return ret;
437 }
440 /*
441 * Add batch of pending interrupt.
442 * The interrupt source is contained in pend_irr[0-3] with
443 * each bits stand for one interrupt.
444 */
445 void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu, u64 *pend_irr)
446 {
447 uint64_t spsr;
448 int i;
450 local_irq_save(spsr);
451 for (i=0 ; i<4; i++ ) {
452 VCPU(vcpu,irr[i]) |= pend_irr[i];
453 }
454 local_irq_restore(spsr);
455 vcpu->arch.irq_new_pending = 1;
456 wmb();
457 }
459 /*
460 * If the new pending interrupt is enabled and not masked, we directly inject
461 * it into the guest. Otherwise, we set the VHPI if vac.a_int=1 so that when
462 * the interrupt becomes unmasked, it gets injected.
463 * RETURN:
464 * the highest unmasked interrupt.
465 *
466 * Optimization: We defer setting the VHPI until the EOI time, if a higher
467 * priority interrupt is in-service. The idea is to reduce the
468 * number of unnecessary calls to inject_vhpi.
469 */
470 int vmx_check_pending_irq(VCPU *vcpu)
471 {
472 int mask, h_pending, h_inservice;
473 uint64_t isr;
474 IA64_PSR vpsr;
475 REGS *regs=vcpu_regs(vcpu);
476 h_pending = highest_pending_irq(vcpu);
477 if ( h_pending == NULL_VECTOR ) {
478 update_vhpi(vcpu, NULL_VECTOR);
479 h_pending = SPURIOUS_VECTOR;
480 goto chk_irq_exit;
481 }
482 h_inservice = highest_inservice_irq(vcpu);
484 vpsr.val = VCPU(vcpu, vpsr);
485 mask = irq_masked(vcpu, h_pending, h_inservice);
486 if ( vpsr.i && IRQ_NO_MASKED == mask ) {
487 isr = vpsr.val & IA64_PSR_RI;
488 if ( !vpsr.ic )
489 panic_domain(regs,"Interrupt when IC=0\n");
490 update_vhpi(vcpu, h_pending);
491 vmx_reflect_interruption(0, isr, 0, 12, regs); // EXT IRQ
492 } else if (mask == IRQ_MASKED_BY_INSVC) {
493 if (VCPU(vcpu, vhpi))
494 update_vhpi(vcpu, NULL_VECTOR);
495 }
496 else {
497 // masked by vpsr.i or vtpr.
498 update_vhpi(vcpu,h_pending);
499 }
501 chk_irq_exit:
502 return h_pending;
503 }
505 /*
506 * Set a INIT interruption request to vcpu[0] of target domain.
507 * The INIT interruption is injected into each vcpu by guest firmware.
508 */
509 void vmx_pend_pal_init(struct domain *d)
510 {
511 VCPU *vcpu;
513 vcpu = d->vcpu[0];
514 vcpu->arch.arch_vmx.pal_init_pending = 1;
515 }
517 /*
518 * Only coming from virtualization fault.
519 */
520 void guest_write_eoi(VCPU *vcpu)
521 {
522 int vec;
524 vec = highest_inservice_irq(vcpu);
525 if (vec == NULL_VECTOR) {
526 gdprintk(XENLOG_WARNING, "vcpu(%d): Wrong vector to EOI\n",
527 vcpu->vcpu_id);
528 return;
529 }
530 VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
531 VCPU(vcpu, eoi)=0; // overwrite the data
532 vcpu->arch.irq_new_pending=1;
533 wmb();
534 }
536 int is_unmasked_irq(VCPU *vcpu)
537 {
538 int h_pending, h_inservice;
540 h_pending = highest_pending_irq(vcpu);
541 h_inservice = highest_inservice_irq(vcpu);
542 if ( h_pending == NULL_VECTOR ||
543 irq_masked(vcpu, h_pending, h_inservice) != IRQ_NO_MASKED ) {
544 return 0;
545 }
546 else
547 return 1;
548 }
550 uint64_t guest_read_vivr(VCPU *vcpu)
551 {
552 int vec, h_inservice, mask;
553 vec = highest_pending_irq(vcpu);
554 h_inservice = highest_inservice_irq(vcpu);
555 mask = irq_masked(vcpu, vec, h_inservice);
556 if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
557 if (VCPU(vcpu, vhpi))
558 update_vhpi(vcpu, NULL_VECTOR);
559 return IA64_SPURIOUS_INT_VECTOR;
560 }
561 if (mask == IRQ_MASKED_BY_VTPR) {
562 update_vhpi(vcpu, vec);
563 return IA64_SPURIOUS_INT_VECTOR;
564 }
565 VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
566 vmx_vcpu_unpend_interrupt(vcpu, vec);
567 return (uint64_t)vec;
568 }
570 static void generate_exirq(VCPU *vcpu)
571 {
572 IA64_PSR vpsr;
573 uint64_t isr;
574 REGS *regs=vcpu_regs(vcpu);
575 vpsr.val = VCPU(vcpu, vpsr);
576 isr = vpsr.val & IA64_PSR_RI;
577 if ( !vpsr.ic )
578 panic_domain(regs,"Interrupt when IC=0\n");
579 vmx_reflect_interruption(0,isr,0, 12, regs); // EXT IRQ
580 }
582 void vhpi_detection(VCPU *vcpu)
583 {
584 uint64_t threshold,vhpi;
585 tpr_t vtpr;
586 IA64_PSR vpsr;
587 vpsr.val = VCPU(vcpu, vpsr);
588 vtpr.val = VCPU(vcpu, tpr);
590 threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
591 vhpi = VCPU(vcpu,vhpi);
592 if ( vhpi > threshold ) {
593 // interrupt actived
594 generate_exirq (vcpu);
595 }
596 }
598 void vmx_vexirq(VCPU *vcpu)
599 {
600 generate_exirq (vcpu);
601 }
603 struct vcpu *lid_to_vcpu(struct domain *d, uint16_t dest)
604 {
605 int id = dest >> 8;
607 /* Fast look: assume EID=0 ID=vcpu_id. */
608 if ((dest & 0xff) == 0 && id < d->max_vcpus)
609 return d->vcpu[id];
610 return NULL;
611 }
613 /*
614 * To inject INIT to guest, we must set the PAL_INIT entry
615 * and set psr to switch to physical mode
616 */
617 #define PSR_SET_BITS (IA64_PSR_DT | IA64_PSR_IT | IA64_PSR_RT | \
618 IA64_PSR_IC | IA64_PSR_RI | IA64_PSR_I | IA64_PSR_CPL)
620 static void vmx_inject_guest_pal_init(VCPU *vcpu)
621 {
622 REGS *regs = vcpu_regs(vcpu);
623 uint64_t psr = vmx_vcpu_get_psr(vcpu);
625 regs->cr_iip = PAL_INIT_ENTRY;
627 psr = psr & ~PSR_SET_BITS;
628 vmx_vcpu_set_psr(vcpu, psr);
629 }
632 /*
633 * Deliver IPI message. (Only U-VP is supported now)
634 * offset: address offset to IPI space.
635 * value: deliver value.
636 */
637 static int vcpu_deliver_int(VCPU *vcpu, uint64_t dm, uint64_t vector)
638 {
639 int running = vcpu->is_running;
641 IPI_DPRINTK("deliver_int %lx %lx\n", dm, vector);
643 switch (dm) {
644 case SAPIC_FIXED: // INT
645 vmx_vcpu_pend_interrupt(vcpu, vector);
646 break;
647 case SAPIC_LOWEST_PRIORITY:
648 {
649 struct vcpu *lowest = vcpu_viosapic(vcpu)->lowest_vcpu;
651 if (lowest == NULL)
652 lowest = vcpu;
653 vmx_vcpu_pend_interrupt(lowest, vector);
654 break;
655 }
656 case SAPIC_PMI:
657 // TODO -- inject guest PMI
658 panic_domain(NULL, "Inject guest PMI!\n");
659 break;
660 case SAPIC_NMI:
661 vmx_vcpu_pend_interrupt(vcpu, 2);
662 break;
663 case SAPIC_INIT:
664 vmx_inject_guest_pal_init(vcpu);
665 break;
666 case SAPIC_EXTINT: // ExtINT
667 vmx_vcpu_pend_interrupt(vcpu, 0);
668 break;
669 default:
670 return -EINVAL;
671 }
673 /* Kick vcpu. */
674 vcpu_unblock(vcpu);
675 if (running)
676 smp_send_event_check_cpu(vcpu->processor);
678 return 0;
679 }
681 int vlsapic_deliver_int(struct domain *d,
682 uint16_t dest, uint64_t dm, uint64_t vector)
683 {
684 VCPU *vcpu;
686 vcpu = lid_to_vcpu(d, dest);
687 if (vcpu == NULL)
688 return -ESRCH;
690 if (!vcpu->is_initialised || test_bit(_VPF_down, &vcpu->pause_flags))
691 return -ENOEXEC;
693 return vcpu_deliver_int (vcpu, dm, vector);
694 }
696 /*
697 * Deliver the INIT interruption to guest.
698 */
699 void deliver_pal_init(VCPU *vcpu)
700 {
701 vcpu_deliver_int(vcpu, SAPIC_INIT, 0);
702 }
704 /*
705 * execute write IPI op.
706 */
707 static void vlsapic_write_ipi(VCPU *vcpu, uint64_t addr, uint64_t value)
708 {
709 VCPU *targ;
710 struct domain *d = vcpu->domain;
712 targ = lid_to_vcpu(vcpu->domain,
713 (((ipi_a_t)addr).id << 8) | ((ipi_a_t)addr).eid);
714 if (targ == NULL)
715 panic_domain(NULL, "Unknown IPI cpu\n");
717 if (!targ->is_initialised ||
718 test_bit(_VPF_down, &targ->pause_flags)) {
720 struct pt_regs *targ_regs = vcpu_regs(targ);
722 if (arch_set_info_guest(targ, NULL) != 0) {
723 printk("arch_boot_vcpu: failure\n");
724 return;
725 }
726 /* First or next rendez-vous: set registers. */
727 vcpu_init_regs(targ);
728 targ_regs->cr_iip = d->arch.sal_data->boot_rdv_ip;
729 targ_regs->r1 = d->arch.sal_data->boot_rdv_r1;
731 if (test_and_clear_bit(_VPF_down,&targ->pause_flags)) {
732 vcpu_wake(targ);
733 printk(XENLOG_DEBUG "arch_boot_vcpu: vcpu %d awaken %016lx!\n",
734 targ->vcpu_id, targ_regs->cr_iip);
735 } else {
736 printk("arch_boot_vcpu: huh, already awake!");
737 }
738 } else {
739 if (((ipi_d_t)value).dm == SAPIC_LOWEST_PRIORITY ||
740 vcpu_deliver_int(targ, ((ipi_d_t)value).dm,
741 ((ipi_d_t)value).vector) < 0)
742 panic_domain(NULL, "Deliver reserved interrupt!\n");
743 }
744 return;
745 }
748 unsigned long vlsapic_read(struct vcpu *v,
749 unsigned long addr,
750 unsigned long length)
751 {
752 uint64_t result = 0;
754 addr &= (PIB_SIZE - 1);
756 switch (addr) {
757 case PIB_OFST_INTA:
758 if (length == 1) // 1 byte load
759 ; // There is no i8259, there is no INTA access
760 else
761 panic_domain(NULL,"Undefined read on PIB INTA\n");
763 break;
764 case PIB_OFST_XTP:
765 if (length == 1) {
766 result = VLSAPIC_XTP(v);
767 // printk("read xtp %lx\n", result);
768 } else {
769 panic_domain(NULL, "Undefined read on PIB XTP\n");
770 }
771 break;
772 default:
773 if (PIB_LOW_HALF(addr)) { // lower half
774 if (length != 8 )
775 panic_domain(NULL, "Undefined IPI-LHF read!\n");
776 else
777 IPI_DPRINTK("IPI-LHF read %lx\n", pib_off);
778 } else { // upper half
779 IPI_DPRINTK("IPI-UHF read %lx\n", addr);
780 }
781 break;
782 }
783 return result;
784 }
786 static void vlsapic_write_xtp(struct vcpu *v, uint8_t val)
787 {
788 struct viosapic * viosapic;
789 struct vcpu *lvcpu, *vcpu;
790 viosapic = vcpu_viosapic(v);
792 spin_lock(&viosapic->lock);
793 lvcpu = viosapic->lowest_vcpu;
794 VLSAPIC_XTP(v) = val;
796 for_each_vcpu(v->domain, vcpu) {
797 if (VLSAPIC_XTP(lvcpu) > VLSAPIC_XTP(vcpu))
798 lvcpu = vcpu;
799 }
801 if (VLSAPIC_XTP(lvcpu) & 0x80) // Disabled
802 lvcpu = NULL;
804 viosapic->lowest_vcpu = lvcpu;
805 spin_unlock(&viosapic->lock);
806 }
808 void vlsapic_write(struct vcpu *v,
809 unsigned long addr,
810 unsigned long length,
811 unsigned long val)
812 {
813 addr &= (PIB_SIZE - 1);
815 switch (addr) {
816 case PIB_OFST_INTA:
817 panic_domain(NULL, "Undefined write on PIB INTA\n");
818 break;
819 case PIB_OFST_XTP:
820 if (length == 1) {
821 // printk("write xtp %lx\n", val);
822 vlsapic_write_xtp(v, val);
823 } else {
824 panic_domain(NULL, "Undefined write on PIB XTP\n");
825 }
826 break;
827 default:
828 if (PIB_LOW_HALF(addr)) { // lower half
829 if (length != 8)
830 panic_domain(NULL, "Undefined IPI-LHF write with size %ld!\n",
831 length);
832 else
833 vlsapic_write_ipi(v, addr, val);
834 }
835 else { // upper half
836 // printk("IPI-UHF write %lx\n",addr);
837 panic_domain(NULL, "No support for SM-VP yet\n");
838 }
839 break;
840 }
841 }
843 static int vlsapic_save(struct domain *d, hvm_domain_context_t *h)
844 {
845 struct vcpu *v;
847 for_each_vcpu(d, v) {
848 struct hvm_hw_ia64_vlsapic vlsapic;
849 int i;
851 if (test_bit(_VPF_down, &v->pause_flags))
852 continue;
854 memset(&vlsapic, 0, sizeof(vlsapic));
855 for (i = 0; i < 4; i++)
856 vlsapic.insvc[i] = VLSAPIC_INSVC(v,i);
858 vlsapic.vhpi = VCPU(v, vhpi);
859 vlsapic.xtp = VLSAPIC_XTP(v);
860 vlsapic.pal_init_pending = v->arch.arch_vmx.pal_init_pending;
862 if (hvm_save_entry(VLSAPIC, v->vcpu_id, h, &vlsapic))
863 return -EINVAL;
864 }
866 return 0;
867 }
869 static int vlsapic_load(struct domain *d, hvm_domain_context_t *h)
870 {
871 uint16_t vcpuid;
872 struct vcpu *v;
873 struct hvm_hw_ia64_vlsapic vlsapic;
874 int i;
876 vcpuid = hvm_load_instance(h);
877 if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) {
878 gdprintk(XENLOG_ERR,
879 "%s: domain has no vlsapic %u\n", __func__, vcpuid);
880 return -EINVAL;
881 }
883 if (hvm_load_entry(VLSAPIC, h, &vlsapic) != 0)
884 return -EINVAL;
886 for (i = 0; i < 4; i++)
887 VLSAPIC_INSVC(v,i) = vlsapic.insvc[i];
889 VCPU(v, vhpi) = vlsapic.vhpi;
890 VLSAPIC_XTP(v) = vlsapic.xtp;
891 v->arch.arch_vmx.pal_init_pending = vlsapic.pal_init_pending;
892 v->arch.irq_new_pending = 1; /* to force checking irq */
894 return 0;
895 }
897 HVM_REGISTER_SAVE_RESTORE(VLSAPIC, vlsapic_save, vlsapic_load,
898 1, HVMSR_PER_VCPU);
900 static int vtime_save(struct domain *d, hvm_domain_context_t *h)
901 {
902 struct vcpu *v;
904 for_each_vcpu(d, v) {
905 vtime_t *vtm = &VMX(v, vtm);
906 struct hvm_hw_ia64_vtime vtime;
908 if (test_bit(_VPF_down, &v->pause_flags))
909 continue;
911 stop_timer(&vtm->vtm_timer);//XXX should wait for callback not running.
913 memset(&vtime, 0, sizeof(vtime));
914 vtime.itc = now_itc(vtm);
915 vtime.itm = VCPU(v, itm);
916 vtime.last_itc = vtm->last_itc;
917 vtime.pending = vtm->pending;
919 vtm_set_itm(v, vtime.itm);// this may start timer.
921 if (hvm_save_entry(VTIME, v->vcpu_id, h, &vtime))
922 return -EINVAL;
923 }
925 return 0;
926 }
928 static int vtime_load(struct domain *d, hvm_domain_context_t *h)
929 {
930 uint16_t vcpuid;
931 struct vcpu *v;
932 struct hvm_hw_ia64_vtime vtime;
933 vtime_t *vtm;
935 vcpuid = hvm_load_instance(h);
936 if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) {
937 gdprintk(XENLOG_ERR,
938 "%s: domain has no vtime %u\n", __func__, vcpuid);
939 return -EINVAL;
940 }
942 if (hvm_load_entry(VTIME, h, &vtime) != 0)
943 return -EINVAL;
945 vtm = &VMX(v, vtm);
946 stop_timer(&vtm->vtm_timer); //XXX should wait for callback not running.
948 vtm->last_itc = vtime.last_itc;
949 vtm->pending = vtime.pending;
951 migrate_timer(&vtm->vtm_timer, v->processor);
952 vtm_set_itm(v, vtime.itm);
953 vtm_set_itc(v, vtime.itc); // This may start timer.
955 if (test_and_clear_bit(_VPF_down, &v->pause_flags))
956 vcpu_wake(v);
958 return 0;
959 }
961 HVM_REGISTER_SAVE_RESTORE(VTIME, vtime_save, vtime_load, 1, HVMSR_PER_VCPU);