/root/src/xen/xen/arch/x86/hvm/irq.c
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * irq.c |
3 | | * |
4 | | * Interrupt distribution and delivery logic. |
5 | | * |
6 | | * Copyright (c) 2006, K A Fraser, XenSource Inc. |
7 | | * |
8 | | * This program is free software; you can redistribute it and/or modify it |
9 | | * under the terms and conditions of the GNU General Public License, |
10 | | * version 2, as published by the Free Software Foundation. |
11 | | * |
12 | | * This program is distributed in the hope it will be useful, but WITHOUT |
13 | | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
14 | | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
15 | | * more details. |
16 | | * |
17 | | * You should have received a copy of the GNU General Public License along with |
18 | | * this program; If not, see <http://www.gnu.org/licenses/>. |
19 | | */ |
20 | | |
21 | | #include <xen/types.h> |
22 | | #include <xen/event.h> |
23 | | #include <xen/sched.h> |
24 | | #include <xen/irq.h> |
25 | | #include <xen/keyhandler.h> |
26 | | #include <asm/hvm/domain.h> |
27 | | #include <asm/hvm/support.h> |
28 | | #include <asm/msi.h> |
29 | | |
30 | | /* Must be called with hvm_domain->irq_lock hold */ |
31 | | static void assert_gsi(struct domain *d, unsigned ioapic_gsi) |
32 | 297 | { |
33 | 297 | struct pirq *pirq = |
34 | 297 | pirq_info(d, domain_emuirq_to_pirq(d, ioapic_gsi)); |
35 | 297 | |
36 | 297 | if ( hvm_domain_use_pirq(d, pirq) ) |
37 | 0 | { |
38 | 0 | send_guest_pirq(d, pirq); |
39 | 0 | return; |
40 | 0 | } |
41 | 297 | vioapic_irq_positive_edge(d, ioapic_gsi); |
42 | 297 | } |
43 | | |
44 | | static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq) |
45 | 0 | { |
46 | 0 | assert_gsi(d, ioapic_gsi); |
47 | 0 | vpic_irq_positive_edge(d, pic_irq); |
48 | 0 | } |
49 | | |
50 | | /* Must be called with hvm_domain->irq_lock hold */ |
51 | | static void deassert_irq(struct domain *d, unsigned isa_irq) |
52 | 0 | { |
53 | 0 | struct pirq *pirq = |
54 | 0 | pirq_info(d, domain_emuirq_to_pirq(d, isa_irq)); |
55 | 0 |
|
56 | 0 | if ( !hvm_domain_use_pirq(d, pirq) ) |
57 | 0 | vpic_irq_negative_edge(d, isa_irq); |
58 | 0 | } |
59 | | |
60 | | static void __hvm_pci_intx_assert( |
61 | | struct domain *d, unsigned int device, unsigned int intx) |
62 | 0 | { |
63 | 0 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
64 | 0 | unsigned int gsi, link, isa_irq; |
65 | 0 |
|
66 | 0 | ASSERT((device <= 31) && (intx <= 3)); |
67 | 0 |
|
68 | 0 | if ( __test_and_set_bit(device*4 + intx, &hvm_irq->pci_intx.i) ) |
69 | 0 | return; |
70 | 0 |
|
71 | 0 | gsi = hvm_pci_intx_gsi(device, intx); |
72 | 0 | if ( gsi >= hvm_irq->nr_gsis ) |
73 | 0 | { |
74 | 0 | ASSERT_UNREACHABLE(); |
75 | 0 | return; |
76 | 0 | } |
77 | 0 | if ( hvm_irq->gsi_assert_count[gsi]++ == 0 ) |
78 | 0 | assert_gsi(d, gsi); |
79 | 0 |
|
80 | 0 | link = hvm_pci_intx_link(device, intx); |
81 | 0 | isa_irq = hvm_irq->pci_link.route[link]; |
82 | 0 | if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq && |
83 | 0 | (hvm_irq->gsi_assert_count[isa_irq]++ == 0) ) |
84 | 0 | assert_irq(d, isa_irq, isa_irq); |
85 | 0 | } |
86 | | |
87 | | void hvm_pci_intx_assert( |
88 | | struct domain *d, unsigned int device, unsigned int intx) |
89 | 0 | { |
90 | 0 | spin_lock(&d->arch.hvm_domain.irq_lock); |
91 | 0 | __hvm_pci_intx_assert(d, device, intx); |
92 | 0 | spin_unlock(&d->arch.hvm_domain.irq_lock); |
93 | 0 | } |
94 | | |
95 | | static void __hvm_pci_intx_deassert( |
96 | | struct domain *d, unsigned int device, unsigned int intx) |
97 | 0 | { |
98 | 0 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
99 | 0 | unsigned int gsi, link, isa_irq; |
100 | 0 |
|
101 | 0 | ASSERT((device <= 31) && (intx <= 3)); |
102 | 0 |
|
103 | 0 | if ( !__test_and_clear_bit(device*4 + intx, &hvm_irq->pci_intx.i) ) |
104 | 0 | return; |
105 | 0 |
|
106 | 0 | gsi = hvm_pci_intx_gsi(device, intx); |
107 | 0 | if ( gsi >= hvm_irq->nr_gsis ) |
108 | 0 | { |
109 | 0 | ASSERT_UNREACHABLE(); |
110 | 0 | return; |
111 | 0 | } |
112 | 0 | --hvm_irq->gsi_assert_count[gsi]; |
113 | 0 |
|
114 | 0 | link = hvm_pci_intx_link(device, intx); |
115 | 0 | isa_irq = hvm_irq->pci_link.route[link]; |
116 | 0 | if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq && |
117 | 0 | (--hvm_irq->gsi_assert_count[isa_irq] == 0) ) |
118 | 0 | deassert_irq(d, isa_irq); |
119 | 0 | } |
120 | | |
121 | | void hvm_pci_intx_deassert( |
122 | | struct domain *d, unsigned int device, unsigned int intx) |
123 | 0 | { |
124 | 0 | spin_lock(&d->arch.hvm_domain.irq_lock); |
125 | 0 | __hvm_pci_intx_deassert(d, device, intx); |
126 | 0 | spin_unlock(&d->arch.hvm_domain.irq_lock); |
127 | 0 | } |
128 | | |
129 | | void hvm_gsi_assert(struct domain *d, unsigned int gsi) |
130 | 301 | { |
131 | 301 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
132 | 301 | |
133 | 301 | if ( gsi >= hvm_irq->nr_gsis ) |
134 | 0 | { |
135 | 0 | ASSERT_UNREACHABLE(); |
136 | 0 | return; |
137 | 0 | } |
138 | 301 | |
139 | 301 | /* |
140 | 301 | * __hvm_pci_intx_{de}assert uses a bitfield in pci_intx.i to track the |
141 | 301 | * status of each interrupt line, and Xen does the routing and GSI |
142 | 301 | * assertion based on that. The value of the pci_intx.i bitmap prevents the |
143 | 301 | * same line from triggering multiple times. As we don't use that bitmap |
144 | 301 | * for the hardware domain, Xen needs to rely on gsi_assert_count in order |
145 | 301 | * to know if the GSI is pending or not. |
146 | 301 | */ |
147 | 301 | spin_lock(&d->arch.hvm_domain.irq_lock); |
148 | 301 | if ( !hvm_irq->gsi_assert_count[gsi] ) |
149 | 297 | { |
150 | 297 | hvm_irq->gsi_assert_count[gsi] = 1; |
151 | 297 | assert_gsi(d, gsi); |
152 | 297 | } |
153 | 301 | spin_unlock(&d->arch.hvm_domain.irq_lock); |
154 | 301 | } |
155 | | |
156 | | void hvm_gsi_deassert(struct domain *d, unsigned int gsi) |
157 | 599 | { |
158 | 599 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
159 | 599 | |
160 | 599 | if ( gsi >= hvm_irq->nr_gsis ) |
161 | 0 | { |
162 | 0 | ASSERT_UNREACHABLE(); |
163 | 0 | return; |
164 | 0 | } |
165 | 599 | |
166 | 599 | spin_lock(&d->arch.hvm_domain.irq_lock); |
167 | 599 | hvm_irq->gsi_assert_count[gsi] = 0; |
168 | 599 | spin_unlock(&d->arch.hvm_domain.irq_lock); |
169 | 599 | } |
170 | | |
171 | | void hvm_isa_irq_assert( |
172 | | struct domain *d, unsigned int isa_irq) |
173 | 0 | { |
174 | 0 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
175 | 0 | unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); |
176 | 0 |
|
177 | 0 | ASSERT(isa_irq <= 15); |
178 | 0 |
|
179 | 0 | spin_lock(&d->arch.hvm_domain.irq_lock); |
180 | 0 |
|
181 | 0 | if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) && |
182 | 0 | (hvm_irq->gsi_assert_count[gsi]++ == 0) ) |
183 | 0 | assert_irq(d, gsi, isa_irq); |
184 | 0 |
|
185 | 0 | spin_unlock(&d->arch.hvm_domain.irq_lock); |
186 | 0 | } |
187 | | |
188 | | void hvm_isa_irq_deassert( |
189 | | struct domain *d, unsigned int isa_irq) |
190 | 0 | { |
191 | 0 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
192 | 0 | unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); |
193 | 0 |
|
194 | 0 | ASSERT(isa_irq <= 15); |
195 | 0 |
|
196 | 0 | spin_lock(&d->arch.hvm_domain.irq_lock); |
197 | 0 |
|
198 | 0 | if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) && |
199 | 0 | (--hvm_irq->gsi_assert_count[gsi] == 0) ) |
200 | 0 | deassert_irq(d, isa_irq); |
201 | 0 |
|
202 | 0 | spin_unlock(&d->arch.hvm_domain.irq_lock); |
203 | 0 | } |
204 | | |
205 | | static void hvm_set_callback_irq_level(struct vcpu *v) |
206 | 0 | { |
207 | 0 | struct domain *d = v->domain; |
208 | 0 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
209 | 0 | unsigned int gsi, pdev, pintx, asserted; |
210 | 0 |
|
211 | 0 | ASSERT(v->vcpu_id == 0); |
212 | 0 |
|
213 | 0 | spin_lock(&d->arch.hvm_domain.irq_lock); |
214 | 0 |
|
215 | 0 | /* NB. Do not check the evtchn_upcall_mask. It is not used in HVM mode. */ |
216 | 0 | asserted = !!vcpu_info(v, evtchn_upcall_pending); |
217 | 0 | if ( hvm_irq->callback_via_asserted == asserted ) |
218 | 0 | goto out; |
219 | 0 | hvm_irq->callback_via_asserted = asserted; |
220 | 0 |
|
221 | 0 | /* Callback status has changed. Update the callback via. */ |
222 | 0 | switch ( hvm_irq->callback_via_type ) |
223 | 0 | { |
224 | 0 | case HVMIRQ_callback_gsi: |
225 | 0 | gsi = hvm_irq->callback_via.gsi; |
226 | 0 | if ( asserted && (hvm_irq->gsi_assert_count[gsi]++ == 0) ) |
227 | 0 | { |
228 | 0 | vioapic_irq_positive_edge(d, gsi); |
229 | 0 | if ( gsi <= 15 ) |
230 | 0 | vpic_irq_positive_edge(d, gsi); |
231 | 0 | } |
232 | 0 | else if ( !asserted && (--hvm_irq->gsi_assert_count[gsi] == 0) ) |
233 | 0 | { |
234 | 0 | if ( gsi <= 15 ) |
235 | 0 | vpic_irq_negative_edge(d, gsi); |
236 | 0 | } |
237 | 0 | break; |
238 | 0 | case HVMIRQ_callback_pci_intx: |
239 | 0 | pdev = hvm_irq->callback_via.pci.dev; |
240 | 0 | pintx = hvm_irq->callback_via.pci.intx; |
241 | 0 | if ( asserted ) |
242 | 0 | __hvm_pci_intx_assert(d, pdev, pintx); |
243 | 0 | else |
244 | 0 | __hvm_pci_intx_deassert(d, pdev, pintx); |
245 | 0 | default: |
246 | 0 | break; |
247 | 0 | } |
248 | 0 |
|
249 | 0 | out: |
250 | 0 | spin_unlock(&d->arch.hvm_domain.irq_lock); |
251 | 0 | } |
252 | | |
253 | | void hvm_maybe_deassert_evtchn_irq(void) |
254 | 5.08M | { |
255 | 5.08M | struct domain *d = current->domain; |
256 | 5.08M | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
257 | 5.08M | |
258 | 5.08M | if ( hvm_irq->callback_via_asserted && |
259 | 0 | !vcpu_info(d->vcpu[0], evtchn_upcall_pending) ) |
260 | 0 | hvm_set_callback_irq_level(d->vcpu[0]); |
261 | 5.08M | } |
262 | | |
263 | | void hvm_assert_evtchn_irq(struct vcpu *v) |
264 | 105k | { |
265 | 105k | if ( unlikely(in_irq() || !local_irq_is_enabled()) ) |
266 | 5.70k | { |
267 | 5.70k | tasklet_schedule(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet); |
268 | 5.70k | return; |
269 | 5.70k | } |
270 | 105k | |
271 | 99.6k | if ( v->arch.hvm_vcpu.evtchn_upcall_vector != 0 ) |
272 | 0 | { |
273 | 0 | uint8_t vector = v->arch.hvm_vcpu.evtchn_upcall_vector; |
274 | 0 |
|
275 | 0 | vlapic_set_irq(vcpu_vlapic(v), vector, 0); |
276 | 0 | } |
277 | 99.6k | else if ( is_hvm_pv_evtchn_vcpu(v) ) |
278 | 99.7k | vcpu_kick(v); |
279 | 18.4E | else if ( v->vcpu_id == 0 ) |
280 | 0 | hvm_set_callback_irq_level(v); |
281 | 99.6k | } |
282 | | |
283 | | int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq) |
284 | 0 | { |
285 | 0 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
286 | 0 | u8 old_isa_irq; |
287 | 0 | int i; |
288 | 0 |
|
289 | 0 | if ( (link > 3) || (isa_irq > 15) ) |
290 | 0 | return -EINVAL; |
291 | 0 |
|
292 | 0 | spin_lock(&d->arch.hvm_domain.irq_lock); |
293 | 0 |
|
294 | 0 | old_isa_irq = hvm_irq->pci_link.route[link]; |
295 | 0 | if ( old_isa_irq == isa_irq ) |
296 | 0 | goto out; |
297 | 0 | hvm_irq->pci_link.route[link] = isa_irq; |
298 | 0 |
|
299 | 0 | /* PCI pass-through fixup. */ |
300 | 0 | if ( hvm_irq->dpci ) |
301 | 0 | { |
302 | 0 | if ( old_isa_irq ) |
303 | 0 | clear_bit(old_isa_irq, &hvm_irq->dpci->isairq_map); |
304 | 0 |
|
305 | 0 | for ( i = 0; i < NR_LINK; i++ ) |
306 | 0 | if ( hvm_irq->dpci->link_cnt[i] && hvm_irq->pci_link.route[i] ) |
307 | 0 | set_bit(hvm_irq->pci_link.route[i], |
308 | 0 | &hvm_irq->dpci->isairq_map); |
309 | 0 | } |
310 | 0 |
|
311 | 0 | if ( hvm_irq->pci_link_assert_count[link] == 0 ) |
312 | 0 | goto out; |
313 | 0 |
|
314 | 0 | if ( old_isa_irq && (--hvm_irq->gsi_assert_count[old_isa_irq] == 0) ) |
315 | 0 | vpic_irq_negative_edge(d, old_isa_irq); |
316 | 0 |
|
317 | 0 | if ( isa_irq && (hvm_irq->gsi_assert_count[isa_irq]++ == 0) ) |
318 | 0 | { |
319 | 0 | vioapic_irq_positive_edge(d, isa_irq); |
320 | 0 | vpic_irq_positive_edge(d, isa_irq); |
321 | 0 | } |
322 | 0 |
|
323 | 0 | out: |
324 | 0 | spin_unlock(&d->arch.hvm_domain.irq_lock); |
325 | 0 |
|
326 | 0 | dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n", |
327 | 0 | d->domain_id, link, old_isa_irq, isa_irq); |
328 | 0 |
|
329 | 0 | return 0; |
330 | 0 | } |
331 | | |
332 | | int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data) |
333 | 0 | { |
334 | 0 | uint32_t tmp = (uint32_t) addr; |
335 | 0 | uint8_t dest = (tmp & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; |
336 | 0 | uint8_t dest_mode = !!(tmp & MSI_ADDR_DESTMODE_MASK); |
337 | 0 | uint8_t delivery_mode = (data & MSI_DATA_DELIVERY_MODE_MASK) |
338 | 0 | >> MSI_DATA_DELIVERY_MODE_SHIFT; |
339 | 0 | uint8_t trig_mode = (data & MSI_DATA_TRIGGER_MASK) |
340 | 0 | >> MSI_DATA_TRIGGER_SHIFT; |
341 | 0 | uint8_t vector = data & MSI_DATA_VECTOR_MASK; |
342 | 0 |
|
343 | 0 | if ( !vector ) |
344 | 0 | { |
345 | 0 | int pirq = ((addr >> 32) & 0xffffff00) | dest; |
346 | 0 |
|
347 | 0 | if ( pirq > 0 ) |
348 | 0 | { |
349 | 0 | struct pirq *info = pirq_info(d, pirq); |
350 | 0 |
|
351 | 0 | /* if it is the first time, allocate the pirq */ |
352 | 0 | if ( !info || info->arch.hvm.emuirq == IRQ_UNBOUND ) |
353 | 0 | { |
354 | 0 | int rc; |
355 | 0 |
|
356 | 0 | spin_lock(&d->event_lock); |
357 | 0 | rc = map_domain_emuirq_pirq(d, pirq, IRQ_MSI_EMU); |
358 | 0 | spin_unlock(&d->event_lock); |
359 | 0 | if ( rc ) |
360 | 0 | return rc; |
361 | 0 | info = pirq_info(d, pirq); |
362 | 0 | if ( !info ) |
363 | 0 | return -EBUSY; |
364 | 0 | } |
365 | 0 | else if ( info->arch.hvm.emuirq != IRQ_MSI_EMU ) |
366 | 0 | return -EINVAL; |
367 | 0 | send_guest_pirq(d, info); |
368 | 0 | return 0; |
369 | 0 | } |
370 | 0 | return -ERANGE; |
371 | 0 | } |
372 | 0 |
|
373 | 0 | return vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode); |
374 | 0 | } |
375 | | |
376 | | void hvm_set_callback_via(struct domain *d, uint64_t via) |
377 | 1 | { |
378 | 1 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
379 | 1 | unsigned int gsi=0, pdev=0, pintx=0; |
380 | 1 | uint8_t via_type; |
381 | 1 | |
382 | 1 | via_type = (uint8_t)MASK_EXTR(via, HVM_PARAM_CALLBACK_IRQ_TYPE_MASK) + 1; |
383 | 1 | if ( ((via_type == HVMIRQ_callback_gsi) && (via == 0)) || |
384 | 1 | (via_type > HVMIRQ_callback_vector) ) |
385 | 0 | via_type = HVMIRQ_callback_none; |
386 | 1 | |
387 | 1 | if ( via_type != HVMIRQ_callback_vector && |
388 | 0 | (!has_vlapic(d) || !has_vioapic(d) || !has_vpic(d)) ) |
389 | 0 | return; |
390 | 1 | |
391 | 1 | spin_lock(&d->arch.hvm_domain.irq_lock); |
392 | 1 | |
393 | 1 | /* Tear down old callback via. */ |
394 | 1 | if ( hvm_irq->callback_via_asserted ) |
395 | 0 | { |
396 | 0 | switch ( hvm_irq->callback_via_type ) |
397 | 0 | { |
398 | 0 | case HVMIRQ_callback_gsi: |
399 | 0 | gsi = hvm_irq->callback_via.gsi; |
400 | 0 | if ( (--hvm_irq->gsi_assert_count[gsi] == 0) && (gsi <= 15) ) |
401 | 0 | vpic_irq_negative_edge(d, gsi); |
402 | 0 | break; |
403 | 0 | case HVMIRQ_callback_pci_intx: |
404 | 0 | pdev = hvm_irq->callback_via.pci.dev; |
405 | 0 | pintx = hvm_irq->callback_via.pci.intx; |
406 | 0 | __hvm_pci_intx_deassert(d, pdev, pintx); |
407 | 0 | break; |
408 | 0 | default: |
409 | 0 | break; |
410 | 0 | } |
411 | 0 | } |
412 | 1 | |
413 | 1 | /* Set up new callback via. */ |
414 | 1 | switch ( hvm_irq->callback_via_type = via_type ) |
415 | 1 | { |
416 | 0 | case HVMIRQ_callback_gsi: |
417 | 0 | gsi = hvm_irq->callback_via.gsi = (uint8_t)via; |
418 | 0 | if ( (gsi == 0) || (gsi >= hvm_irq->nr_gsis) ) |
419 | 0 | hvm_irq->callback_via_type = HVMIRQ_callback_none; |
420 | 0 | else if ( hvm_irq->callback_via_asserted && |
421 | 0 | (hvm_irq->gsi_assert_count[gsi]++ == 0) ) |
422 | 0 | { |
423 | 0 | vioapic_irq_positive_edge(d, gsi); |
424 | 0 | if ( gsi <= 15 ) |
425 | 0 | vpic_irq_positive_edge(d, gsi); |
426 | 0 | } |
427 | 0 | break; |
428 | 0 | case HVMIRQ_callback_pci_intx: |
429 | 0 | pdev = hvm_irq->callback_via.pci.dev = (uint8_t)(via >> 11) & 31; |
430 | 0 | pintx = hvm_irq->callback_via.pci.intx = (uint8_t)via & 3; |
431 | 0 | if ( hvm_irq->callback_via_asserted ) |
432 | 0 | __hvm_pci_intx_assert(d, pdev, pintx); |
433 | 0 | break; |
434 | 1 | case HVMIRQ_callback_vector: |
435 | 1 | hvm_irq->callback_via.vector = (uint8_t)via; |
436 | 1 | break; |
437 | 0 | default: |
438 | 0 | break; |
439 | 1 | } |
440 | 1 | |
441 | 1 | spin_unlock(&d->arch.hvm_domain.irq_lock); |
442 | 1 | |
443 | 1 | #ifndef NDEBUG |
444 | 1 | printk(XENLOG_G_INFO "Dom%u callback via changed to ", d->domain_id); |
445 | 1 | switch ( via_type ) |
446 | 1 | { |
447 | 0 | case HVMIRQ_callback_gsi: |
448 | 0 | printk("GSI %u\n", gsi); |
449 | 0 | break; |
450 | 0 | case HVMIRQ_callback_pci_intx: |
451 | 0 | printk("PCI INTx Dev 0x%02x Int%c\n", pdev, 'A' + pintx); |
452 | 0 | break; |
453 | 1 | case HVMIRQ_callback_vector: |
454 | 1 | printk("Direct Vector 0x%02x\n", (uint8_t)via); |
455 | 1 | break; |
456 | 0 | default: |
457 | 0 | printk("None\n"); |
458 | 0 | break; |
459 | 1 | } |
460 | 1 | #endif |
461 | 1 | } |
462 | | |
463 | | struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v) |
464 | 9.79M | { |
465 | 9.79M | struct hvm_domain *plat = &v->domain->arch.hvm_domain; |
466 | 9.79M | int vector; |
467 | 9.79M | |
468 | 9.79M | if ( unlikely(v->nmi_pending) ) |
469 | 0 | return hvm_intack_nmi; |
470 | 9.79M | |
471 | 9.79M | if ( unlikely(v->mce_pending) ) |
472 | 0 | return hvm_intack_mce; |
473 | 9.79M | |
474 | 9.79M | if ( (plat->irq->callback_via_type == HVMIRQ_callback_vector) |
475 | 9.79M | && vcpu_info(v, evtchn_upcall_pending) ) |
476 | 2.15M | return hvm_intack_vector(plat->irq->callback_via.vector); |
477 | 9.79M | |
478 | 7.63M | if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output ) |
479 | 0 | return hvm_intack_pic(0); |
480 | 7.63M | |
481 | 7.63M | vector = vlapic_has_pending_irq(v); |
482 | 7.63M | if ( vector != -1 ) |
483 | 8.42k | return hvm_intack_lapic(vector); |
484 | 7.63M | |
485 | 7.62M | return hvm_intack_none; |
486 | 7.63M | } |
487 | | |
488 | | struct hvm_intack hvm_vcpu_ack_pending_irq( |
489 | | struct vcpu *v, struct hvm_intack intack) |
490 | 103k | { |
491 | 103k | int vector; |
492 | 103k | |
493 | 103k | switch ( intack.source ) |
494 | 103k | { |
495 | 0 | case hvm_intsrc_nmi: |
496 | 0 | if ( !test_and_clear_bool(v->nmi_pending) ) |
497 | 0 | intack = hvm_intack_none; |
498 | 0 | break; |
499 | 0 | case hvm_intsrc_mce: |
500 | 0 | if ( !test_and_clear_bool(v->mce_pending) ) |
501 | 0 | intack = hvm_intack_none; |
502 | 0 | break; |
503 | 0 | case hvm_intsrc_pic: |
504 | 0 | if ( (vector = vpic_ack_pending_irq(v)) == -1 ) |
505 | 0 | intack = hvm_intack_none; |
506 | 0 | else |
507 | 0 | intack.vector = (uint8_t)vector; |
508 | 0 | break; |
509 | 4.20k | case hvm_intsrc_lapic: |
510 | 4.20k | if ( !vlapic_ack_pending_irq(v, intack.vector, 0) ) |
511 | 0 | intack = hvm_intack_none; |
512 | 4.20k | break; |
513 | 98.7k | case hvm_intsrc_vector: |
514 | 98.7k | break; |
515 | 0 | default: |
516 | 0 | intack = hvm_intack_none; |
517 | 0 | break; |
518 | 103k | } |
519 | 103k | |
520 | 102k | return intack; |
521 | 103k | } |
522 | | |
523 | | int hvm_local_events_need_delivery(struct vcpu *v) |
524 | 73.0k | { |
525 | 73.0k | struct hvm_intack intack = hvm_vcpu_has_pending_irq(v); |
526 | 73.0k | |
527 | 73.0k | if ( likely(intack.source == hvm_intsrc_none) ) |
528 | 72.8k | return 0; |
529 | 73.0k | |
530 | 217 | return !hvm_interrupt_blocked(v, intack); |
531 | 73.0k | } |
532 | | |
533 | | void arch_evtchn_inject(struct vcpu *v) |
534 | 12 | { |
535 | 12 | if ( is_hvm_vcpu(v) ) |
536 | 12 | hvm_assert_evtchn_irq(v); |
537 | 12 | } |
538 | | |
539 | | static void irq_dump(struct domain *d) |
540 | 0 | { |
541 | 0 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
542 | 0 | int i; |
543 | 0 | printk("Domain %d:\n", d->domain_id); |
544 | 0 | printk("PCI 0x%16.16"PRIx64"%16.16"PRIx64 |
545 | 0 | " ISA 0x%8.8"PRIx32" ROUTE %u %u %u %u\n", |
546 | 0 | hvm_irq->pci_intx.pad[0], hvm_irq->pci_intx.pad[1], |
547 | 0 | (uint32_t) hvm_irq->isa_irq.pad[0], |
548 | 0 | hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1], |
549 | 0 | hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]); |
550 | 0 | for ( i = 0; i < hvm_irq->nr_gsis && i + 8 <= hvm_irq->nr_gsis; i += 8 ) |
551 | 0 | printk("GSI [%x - %x] %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8 |
552 | 0 | " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", |
553 | 0 | i, i+7, |
554 | 0 | hvm_irq->gsi_assert_count[i+0], |
555 | 0 | hvm_irq->gsi_assert_count[i+1], |
556 | 0 | hvm_irq->gsi_assert_count[i+2], |
557 | 0 | hvm_irq->gsi_assert_count[i+3], |
558 | 0 | hvm_irq->gsi_assert_count[i+4], |
559 | 0 | hvm_irq->gsi_assert_count[i+5], |
560 | 0 | hvm_irq->gsi_assert_count[i+6], |
561 | 0 | hvm_irq->gsi_assert_count[i+7]); |
562 | 0 | if ( i != hvm_irq->nr_gsis ) |
563 | 0 | { |
564 | 0 | printk("GSI [%x - %x]", i, hvm_irq->nr_gsis - 1); |
565 | 0 | for ( ; i < hvm_irq->nr_gsis; i++) |
566 | 0 | printk(" %2"PRIu8, hvm_irq->gsi_assert_count[i]); |
567 | 0 | printk("\n"); |
568 | 0 | } |
569 | 0 | printk("Link %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n", |
570 | 0 | hvm_irq->pci_link_assert_count[0], |
571 | 0 | hvm_irq->pci_link_assert_count[1], |
572 | 0 | hvm_irq->pci_link_assert_count[2], |
573 | 0 | hvm_irq->pci_link_assert_count[3]); |
574 | 0 | printk("Callback via %i:%#"PRIx32",%s asserted\n", |
575 | 0 | hvm_irq->callback_via_type, hvm_irq->callback_via.gsi, |
576 | 0 | hvm_irq->callback_via_asserted ? "" : " not"); |
577 | 0 | } |
578 | | |
579 | | static void dump_irq_info(unsigned char key) |
580 | 0 | { |
581 | 0 | struct domain *d; |
582 | 0 |
|
583 | 0 | printk("'%c' pressed -> dumping HVM irq info\n", key); |
584 | 0 |
|
585 | 0 | rcu_read_lock(&domlist_read_lock); |
586 | 0 |
|
587 | 0 | for_each_domain ( d ) |
588 | 0 | if ( is_hvm_domain(d) ) |
589 | 0 | irq_dump(d); |
590 | 0 |
|
591 | 0 | rcu_read_unlock(&domlist_read_lock); |
592 | 0 | } |
593 | | |
594 | | static int __init dump_irq_info_key_init(void) |
595 | 1 | { |
596 | 1 | register_keyhandler('I', dump_irq_info, "dump HVM irq info", 1); |
597 | 1 | return 0; |
598 | 1 | } |
599 | | __initcall(dump_irq_info_key_init); |
600 | | |
601 | | static int irq_save_pci(struct domain *d, hvm_domain_context_t *h) |
602 | 0 | { |
603 | 0 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
604 | 0 | unsigned int asserted, pdev, pintx; |
605 | 0 | int rc; |
606 | 0 |
|
607 | 0 | spin_lock(&d->arch.hvm_domain.irq_lock); |
608 | 0 |
|
609 | 0 | pdev = hvm_irq->callback_via.pci.dev; |
610 | 0 | pintx = hvm_irq->callback_via.pci.intx; |
611 | 0 | asserted = (hvm_irq->callback_via_asserted && |
612 | 0 | (hvm_irq->callback_via_type == HVMIRQ_callback_pci_intx)); |
613 | 0 |
|
614 | 0 | /* |
615 | 0 | * Deassert virtual interrupt via PCI INTx line. The virtual interrupt |
616 | 0 | * status is not save/restored, so the INTx line must be deasserted in |
617 | 0 | * the restore context. |
618 | 0 | */ |
619 | 0 | if ( asserted ) |
620 | 0 | __hvm_pci_intx_deassert(d, pdev, pintx); |
621 | 0 |
|
622 | 0 | /* Save PCI IRQ lines */ |
623 | 0 | rc = hvm_save_entry(PCI_IRQ, 0, h, &hvm_irq->pci_intx); |
624 | 0 |
|
625 | 0 | if ( asserted ) |
626 | 0 | __hvm_pci_intx_assert(d, pdev, pintx); |
627 | 0 |
|
628 | 0 | spin_unlock(&d->arch.hvm_domain.irq_lock); |
629 | 0 |
|
630 | 0 | return rc; |
631 | 0 | } |
632 | | |
633 | | static int irq_save_isa(struct domain *d, hvm_domain_context_t *h) |
634 | 0 | { |
635 | 0 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
636 | 0 |
|
637 | 0 | /* Save ISA IRQ lines */ |
638 | 0 | return ( hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq) ); |
639 | 0 | } |
640 | | |
641 | | static int irq_save_link(struct domain *d, hvm_domain_context_t *h) |
642 | 0 | { |
643 | 0 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
644 | 0 |
|
645 | 0 | /* Save PCI-ISA link state */ |
646 | 0 | return ( hvm_save_entry(PCI_LINK, 0, h, &hvm_irq->pci_link) ); |
647 | 0 | } |
648 | | |
649 | | static int irq_load_pci(struct domain *d, hvm_domain_context_t *h) |
650 | 0 | { |
651 | 0 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
652 | 0 | int link, dev, intx, gsi; |
653 | 0 |
|
654 | 0 | /* Load the PCI IRQ lines */ |
655 | 0 | if ( hvm_load_entry(PCI_IRQ, h, &hvm_irq->pci_intx) != 0 ) |
656 | 0 | return -EINVAL; |
657 | 0 |
|
658 | 0 | /* Clear the PCI link assert counts */ |
659 | 0 | for ( link = 0; link < 4; link++ ) |
660 | 0 | hvm_irq->pci_link_assert_count[link] = 0; |
661 | 0 | |
662 | 0 | /* Clear the GSI link assert counts */ |
663 | 0 | for ( gsi = 0; gsi < hvm_irq->nr_gsis; gsi++ ) |
664 | 0 | hvm_irq->gsi_assert_count[gsi] = 0; |
665 | 0 |
|
666 | 0 | /* Recalculate the counts from the IRQ line state */ |
667 | 0 | for ( dev = 0; dev < 32; dev++ ) |
668 | 0 | for ( intx = 0; intx < 4; intx++ ) |
669 | 0 | if ( test_bit(dev*4 + intx, &hvm_irq->pci_intx.i) ) |
670 | 0 | { |
671 | 0 | /* Direct GSI assert */ |
672 | 0 | gsi = hvm_pci_intx_gsi(dev, intx); |
673 | 0 | hvm_irq->gsi_assert_count[gsi]++; |
674 | 0 | /* PCI-ISA bridge assert */ |
675 | 0 | link = hvm_pci_intx_link(dev, intx); |
676 | 0 | hvm_irq->pci_link_assert_count[link]++; |
677 | 0 | } |
678 | 0 |
|
679 | 0 | return 0; |
680 | 0 | } |
681 | | |
682 | | static int irq_load_isa(struct domain *d, hvm_domain_context_t *h) |
683 | 0 | { |
684 | 0 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
685 | 0 | int irq; |
686 | 0 |
|
687 | 0 | /* Load the ISA IRQ lines */ |
688 | 0 | if ( hvm_load_entry(ISA_IRQ, h, &hvm_irq->isa_irq) != 0 ) |
689 | 0 | return -EINVAL; |
690 | 0 |
|
691 | 0 | /* Adjust the GSI assert counts for the ISA IRQ line state. |
692 | 0 | * This relies on the PCI IRQ state being loaded first. */ |
693 | 0 | for ( irq = 0; platform_legacy_irq(irq); irq++ ) |
694 | 0 | if ( test_bit(irq, &hvm_irq->isa_irq.i) ) |
695 | 0 | hvm_irq->gsi_assert_count[hvm_isa_irq_to_gsi(irq)]++; |
696 | 0 |
|
697 | 0 | return 0; |
698 | 0 | } |
699 | | |
700 | | |
701 | | static int irq_load_link(struct domain *d, hvm_domain_context_t *h) |
702 | 0 | { |
703 | 0 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
704 | 0 | int link, gsi; |
705 | 0 |
|
706 | 0 | /* Load the PCI-ISA IRQ link routing table */ |
707 | 0 | if ( hvm_load_entry(PCI_LINK, h, &hvm_irq->pci_link) != 0 ) |
708 | 0 | return -EINVAL; |
709 | 0 |
|
710 | 0 | /* Sanity check */ |
711 | 0 | for ( link = 0; link < 4; link++ ) |
712 | 0 | if ( hvm_irq->pci_link.route[link] > 15 ) |
713 | 0 | { |
714 | 0 | gdprintk(XENLOG_ERR, |
715 | 0 | "HVM restore: PCI-ISA link %u out of range (%u)\n", |
716 | 0 | link, hvm_irq->pci_link.route[link]); |
717 | 0 | return -EINVAL; |
718 | 0 | } |
719 | 0 |
|
720 | 0 | /* Adjust the GSI assert counts for the link outputs. |
721 | 0 | * This relies on the PCI and ISA IRQ state being loaded first */ |
722 | 0 | for ( link = 0; link < 4; link++ ) |
723 | 0 | { |
724 | 0 | if ( hvm_irq->pci_link_assert_count[link] != 0 ) |
725 | 0 | { |
726 | 0 | gsi = hvm_irq->pci_link.route[link]; |
727 | 0 | if ( gsi != 0 ) |
728 | 0 | hvm_irq->gsi_assert_count[gsi]++; |
729 | 0 | } |
730 | 0 | } |
731 | 0 |
|
732 | 0 | return 0; |
733 | 0 | } |
734 | | |
735 | | HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci, |
736 | | 1, HVMSR_PER_DOM); |
737 | | HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa, |
738 | | 1, HVMSR_PER_DOM); |
739 | | HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link, |
740 | | 1, HVMSR_PER_DOM); |