/root/src/xen/xen/arch/x86/hvm/svm/intr.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * intr.c: Interrupt handling for SVM. |
3 | | * Copyright (c) 2005, AMD Inc. |
4 | | * Copyright (c) 2004, Intel Corporation. |
5 | | * |
6 | | * This program is free software; you can redistribute it and/or modify it |
7 | | * under the terms and conditions of the GNU General Public License, |
8 | | * version 2, as published by the Free Software Foundation. |
9 | | * |
10 | | * This program is distributed in the hope it will be useful, but WITHOUT |
11 | | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
12 | | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
13 | | * more details. |
14 | | * |
15 | | * You should have received a copy of the GNU General Public License along with |
16 | | * this program; If not, see <http://www.gnu.org/licenses/>. |
17 | | */ |
18 | | |
19 | | #include <xen/init.h> |
20 | | #include <xen/mm.h> |
21 | | #include <xen/lib.h> |
22 | | #include <xen/trace.h> |
23 | | #include <xen/errno.h> |
24 | | #include <asm/cpufeature.h> |
25 | | #include <asm/processor.h> |
26 | | #include <asm/msr.h> |
27 | | #include <asm/paging.h> |
28 | | #include <asm/hvm/hvm.h> |
29 | | #include <asm/hvm/io.h> |
30 | | #include <asm/hvm/support.h> |
31 | | #include <asm/hvm/vlapic.h> |
32 | | #include <asm/hvm/svm/svm.h> |
33 | | #include <asm/hvm/svm/intr.h> |
34 | | #include <asm/hvm/nestedhvm.h> /* for nestedhvm_vcpu_in_guestmode */ |
35 | | #include <xen/event.h> |
36 | | #include <xen/kernel.h> |
37 | | #include <public/hvm/ioreq.h> |
38 | | #include <xen/domain_page.h> |
39 | | #include <asm/hvm/trace.h> |
40 | | |
41 | | static void svm_inject_nmi(struct vcpu *v) |
42 | 0 | { |
43 | 0 | struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; |
44 | 0 | u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); |
45 | 0 | eventinj_t event; |
46 | 0 |
|
47 | 0 | event.bytes = 0; |
48 | 0 | event.fields.v = 1; |
49 | 0 | event.fields.type = X86_EVENTTYPE_NMI; |
50 | 0 | event.fields.vector = 2; |
51 | 0 |
|
52 | 0 | ASSERT(vmcb->eventinj.fields.v == 0); |
53 | 0 | vmcb->eventinj = event; |
54 | 0 |
|
55 | 0 | /* |
56 | 0 | * SVM does not virtualise the NMI mask, so we emulate it by intercepting |
57 | 0 | * the next IRET and blocking NMI injection until the intercept triggers. |
58 | 0 | */ |
59 | 0 | vmcb_set_general1_intercepts( |
60 | 0 | vmcb, general1_intercepts | GENERAL1_INTERCEPT_IRET); |
61 | 0 | } |
62 | | |
63 | | static void svm_inject_extint(struct vcpu *v, int vector) |
64 | 0 | { |
65 | 0 | struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; |
66 | 0 | eventinj_t event; |
67 | 0 |
|
68 | 0 | event.bytes = 0; |
69 | 0 | event.fields.v = 1; |
70 | 0 | event.fields.type = X86_EVENTTYPE_EXT_INTR; |
71 | 0 | event.fields.vector = vector; |
72 | 0 |
|
73 | 0 | ASSERT(vmcb->eventinj.fields.v == 0); |
74 | 0 | vmcb->eventinj = event; |
75 | 0 | } |
76 | | |
77 | | static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack) |
78 | 0 | { |
79 | 0 | struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; |
80 | 0 | uint32_t general1_intercepts = vmcb_get_general1_intercepts(vmcb); |
81 | 0 | vintr_t intr; |
82 | 0 |
|
83 | 0 | ASSERT(intack.source != hvm_intsrc_none); |
84 | 0 |
|
85 | 0 | if ( nestedhvm_enabled(v->domain) ) { |
86 | 0 | struct nestedvcpu *nv = &vcpu_nestedhvm(v); |
87 | 0 | if ( nv->nv_vmentry_pending ) { |
88 | 0 | struct vmcb_struct *gvmcb = nv->nv_vvmcx; |
89 | 0 |
|
90 | 0 | /* check if l1 guest injects interrupt into l2 guest via vintr. |
91 | 0 | * return here or l2 guest looses interrupts, otherwise. |
92 | 0 | */ |
93 | 0 | ASSERT(gvmcb != NULL); |
94 | 0 | intr = vmcb_get_vintr(gvmcb); |
95 | 0 | if ( intr.fields.irq ) |
96 | 0 | return; |
97 | 0 | } |
98 | 0 | } |
99 | 0 |
|
100 | 0 | HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source, |
101 | 0 | vmcb->eventinj.fields.v?vmcb->eventinj.fields.vector:-1); |
102 | 0 |
|
103 | 0 | /* |
104 | 0 | * Create a dummy virtual interrupt to intercept as soon as the |
105 | 0 | * guest can accept the real interrupt. |
106 | 0 | * |
107 | 0 | * TODO: Better NMI handling. We need a way to skip a MOV SS interrupt |
108 | 0 | * shadow. This is hard to do without hardware support. Also we should |
109 | 0 | * not be waiting for EFLAGS.IF to become 1. |
110 | 0 | */ |
111 | 0 |
|
112 | 0 | /* |
113 | 0 | * NMI-blocking window is handled by IRET interception. We should not |
114 | 0 | * inject a VINTR in this case as VINTR is unaware of NMI-blocking and |
115 | 0 | * hence we can enter an endless loop (VINTR intercept fires, yet |
116 | 0 | * hvm_interrupt_blocked() still indicates NMI-blocking is active, so |
117 | 0 | * we inject a VINTR, ...). |
118 | 0 | */ |
119 | 0 | if ( (intack.source == hvm_intsrc_nmi) && |
120 | 0 | (general1_intercepts & GENERAL1_INTERCEPT_IRET) ) |
121 | 0 | return; |
122 | 0 |
|
123 | 0 | intr = vmcb_get_vintr(vmcb); |
124 | 0 | intr.fields.irq = 1; |
125 | 0 | intr.fields.vector = 0; |
126 | 0 | intr.fields.prio = intack.vector >> 4; |
127 | 0 | intr.fields.ign_tpr = (intack.source != hvm_intsrc_lapic); |
128 | 0 | vmcb_set_vintr(vmcb, intr); |
129 | 0 | vmcb_set_general1_intercepts( |
130 | 0 | vmcb, general1_intercepts | GENERAL1_INTERCEPT_VINTR); |
131 | 0 | } |
132 | | |
133 | | void svm_intr_assist(void) |
134 | 0 | { |
135 | 0 | struct vcpu *v = current; |
136 | 0 | struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; |
137 | 0 | struct hvm_intack intack; |
138 | 0 | enum hvm_intblk intblk; |
139 | 0 |
|
140 | 0 | /* Crank the handle on interrupt state. */ |
141 | 0 | pt_update_irq(v); |
142 | 0 |
|
143 | 0 | do { |
144 | 0 | intack = hvm_vcpu_has_pending_irq(v); |
145 | 0 | if ( likely(intack.source == hvm_intsrc_none) ) |
146 | 0 | return; |
147 | 0 |
|
148 | 0 | intblk = hvm_interrupt_blocked(v, intack); |
149 | 0 | if ( intblk == hvm_intblk_svm_gif ) { |
150 | 0 | ASSERT(nestedhvm_enabled(v->domain)); |
151 | 0 | return; |
152 | 0 | } |
153 | 0 |
|
154 | 0 | /* Interrupts for the nested guest are already |
155 | 0 | * in the vmcb. |
156 | 0 | */ |
157 | 0 | if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) ) |
158 | 0 | { |
159 | 0 | int rc; |
160 | 0 |
|
161 | 0 | /* l2 guest was running when an interrupt for |
162 | 0 | * the l1 guest occured. |
163 | 0 | */ |
164 | 0 | rc = nestedsvm_vcpu_interrupt(v, intack); |
165 | 0 | switch (rc) { |
166 | 0 | case NSVM_INTR_NOTINTERCEPTED: |
167 | 0 | /* Inject interrupt into 2nd level guest directly. */ |
168 | 0 | break; |
169 | 0 | case NSVM_INTR_NOTHANDLED: |
170 | 0 | case NSVM_INTR_FORCEVMEXIT: |
171 | 0 | return; |
172 | 0 | case NSVM_INTR_MASKED: |
173 | 0 | /* Guest already enabled an interrupt window. */ |
174 | 0 | return; |
175 | 0 | default: |
176 | 0 | panic("%s: nestedsvm_vcpu_interrupt can't handle value %#x", |
177 | 0 | __func__, rc); |
178 | 0 | } |
179 | 0 | } |
180 | 0 |
|
181 | 0 | /* |
182 | 0 | * Pending IRQs must be delayed if: |
183 | 0 | * 1. An event is already pending. This is despite the fact that SVM |
184 | 0 | * provides a VINTR delivery method quite separate from the EVENTINJ |
185 | 0 | * mechanism. The event delivery can arbitrarily delay the injection |
186 | 0 | * of the vintr (for example, if the exception is handled via an |
187 | 0 | * interrupt gate, hence zeroing RFLAGS.IF). In the meantime: |
188 | 0 | * - the vTPR could be modified upwards, so we need to wait until |
189 | 0 | * the exception is delivered before we can safely decide that an |
190 | 0 | * interrupt is deliverable; and |
191 | 0 | * - the guest might look at the APIC/PIC state, so we ought not to |
192 | 0 | * have cleared the interrupt out of the IRR. |
193 | 0 | * 2. The IRQ is masked. |
194 | 0 | */ |
195 | 0 | if ( unlikely(vmcb->eventinj.fields.v) || intblk ) |
196 | 0 | { |
197 | 0 | svm_enable_intr_window(v, intack); |
198 | 0 | return; |
199 | 0 | } |
200 | 0 |
|
201 | 0 | intack = hvm_vcpu_ack_pending_irq(v, intack); |
202 | 0 | } while ( intack.source == hvm_intsrc_none ); |
203 | 0 |
|
204 | 0 | if ( intack.source == hvm_intsrc_nmi ) |
205 | 0 | { |
206 | 0 | svm_inject_nmi(v); |
207 | 0 | } |
208 | 0 | else |
209 | 0 | { |
210 | 0 | HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0); |
211 | 0 | svm_inject_extint(v, intack.vector); |
212 | 0 | pt_intr_post(v, intack); |
213 | 0 | } |
214 | 0 |
|
215 | 0 | /* Is there another IRQ to queue up behind this one? */ |
216 | 0 | intack = hvm_vcpu_has_pending_irq(v); |
217 | 0 | if ( unlikely(intack.source != hvm_intsrc_none) ) |
218 | 0 | svm_enable_intr_window(v, intack); |
219 | 0 | } |
220 | | |
221 | | /* |
222 | | * Local variables: |
223 | | * mode: C |
224 | | * c-file-style: "BSD" |
225 | | * c-basic-offset: 4 |
226 | | * tab-width: 4 |
227 | | * indent-tabs-mode: nil |
228 | | * End: |
229 | | */ |