debuggers.hg

view xen/arch/x86/hvm/svm/intr.c @ 16730:9865d5e82802

hvm: Fix evtchn-to-fake-pci interrupt propagation.

Previously the evtchn_upcall_pending flag would only ever be sampled
on VCPU0, possibly leading to long delays in deasserting the
fake-pci-device INTx line if the interrupt is actually delivered to
other than VCPU0.

Diagnosed by Ian Jackson <ian.jackson@eu.citrix.com>

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jan 08 15:55:29 2008 +0000 (2008-01-08)
parents 9eff4c97053b
children e4fd457a3dd5
line source
1 /*
2 * intr.c: Interrupt handling for SVM.
3 * Copyright (c) 2005, AMD Inc.
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <xen/config.h>
21 #include <xen/init.h>
22 #include <xen/mm.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/errno.h>
26 #include <asm/cpufeature.h>
27 #include <asm/processor.h>
28 #include <asm/msr.h>
29 #include <asm/paging.h>
30 #include <asm/hvm/hvm.h>
31 #include <asm/hvm/io.h>
32 #include <asm/hvm/support.h>
33 #include <asm/hvm/vlapic.h>
34 #include <asm/hvm/svm/svm.h>
35 #include <asm/hvm/svm/intr.h>
36 #include <xen/event.h>
37 #include <xen/kernel.h>
38 #include <public/hvm/ioreq.h>
39 #include <xen/domain_page.h>
40 #include <asm/hvm/trace.h>
42 static void svm_inject_nmi(struct vcpu *v)
43 {
44 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
45 eventinj_t event;
47 event.bytes = 0;
48 event.fields.v = 1;
49 event.fields.type = X86_EVENTTYPE_NMI;
50 event.fields.vector = 2;
52 ASSERT(vmcb->eventinj.fields.v == 0);
53 vmcb->eventinj = event;
54 }
56 static void svm_inject_extint(struct vcpu *v, int vector)
57 {
58 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
59 eventinj_t event;
61 event.bytes = 0;
62 event.fields.v = 1;
63 event.fields.type = X86_EVENTTYPE_EXT_INTR;
64 event.fields.vector = vector;
66 ASSERT(vmcb->eventinj.fields.v == 0);
67 vmcb->eventinj = event;
68 }
70 static void enable_intr_window(struct vcpu *v, struct hvm_intack intack)
71 {
72 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
73 vintr_t intr;
75 ASSERT(intack.source != hvm_intsrc_none);
77 HVMTRACE_2D(INJ_VIRQ, v, 0x0, /*fake=*/ 1);
79 /*
80 * Create a dummy virtual interrupt to intercept as soon as the
81 * guest can accept the real interrupt.
82 *
83 * TODO: Better NMI handling. We need a way to skip a MOV SS interrupt
84 * shadow. This is hard to do without hardware support. We should also
85 * track 'NMI blocking' from NMI injection until IRET. This can be done
86 * quite easily in software by intercepting the unblocking IRET.
87 */
88 intr = vmcb->vintr;
89 intr.fields.irq = 1;
90 intr.fields.vector = 0;
91 intr.fields.prio = intack.vector >> 4;
92 intr.fields.ign_tpr = (intack.source != hvm_intsrc_lapic);
93 vmcb->vintr = intr;
94 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR;
95 }
97 asmlinkage void svm_intr_assist(void)
98 {
99 struct vcpu *v = current;
100 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
101 struct hvm_intack intack;
103 /* Crank the handle on interrupt state. */
104 pt_update_irq(v);
105 hvm_maybe_deassert_evtchn_irq();
107 do {
108 intack = hvm_vcpu_has_pending_irq(v);
109 if ( likely(intack.source == hvm_intsrc_none) )
110 return;
112 /*
113 * Pending IRQs must be delayed if:
114 * 1. An event is already pending. This is despite the fact that SVM
115 * provides a VINTR delivery method quite separate from the EVENTINJ
116 * mechanism. The event delivery can arbitrarily delay the injection
117 * of the vintr (for example, if the exception is handled via an
118 * interrupt gate, hence zeroing RFLAGS.IF). In the meantime:
119 * - the vTPR could be modified upwards, so we need to wait until
120 * the exception is delivered before we can safely decide that an
121 * interrupt is deliverable; and
122 * - the guest might look at the APIC/PIC state, so we ought not to
123 * have cleared the interrupt out of the IRR.
124 * 2. The IRQ is masked.
125 */
126 if ( unlikely(vmcb->eventinj.fields.v) ||
127 hvm_interrupt_blocked(v, intack) )
128 {
129 enable_intr_window(v, intack);
130 return;
131 }
133 intack = hvm_vcpu_ack_pending_irq(v, intack);
134 } while ( intack.source == hvm_intsrc_none );
136 if ( intack.source == hvm_intsrc_nmi )
137 {
138 svm_inject_nmi(v);
139 }
140 else
141 {
142 HVMTRACE_2D(INJ_VIRQ, v, intack.vector, /*fake=*/ 0);
143 svm_inject_extint(v, intack.vector);
144 pt_intr_post(v, intack);
145 }
147 /* Is there another IRQ to queue up behind this one? */
148 intack = hvm_vcpu_has_pending_irq(v);
149 if ( unlikely(intack.source != hvm_intsrc_none) )
150 enable_intr_window(v, intack);
151 }
153 /*
154 * Local variables:
155 * mode: C
156 * c-set-style: "BSD"
157 * c-basic-offset: 4
158 * tab-width: 4
159 * indent-tabs-mode: nil
160 * End:
161 */