debuggers.hg

view xen/arch/x86/hvm/irq.c @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children 62cf917432fa
line source
1 /******************************************************************************
2 * irq.c
3 *
4 * Interrupt distribution and delivery logic.
5 *
6 * Copyright (c) 2006, K A Fraser, XenSource Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 */
22 #include <xen/config.h>
23 #include <xen/types.h>
24 #include <xen/event.h>
25 #include <xen/sched.h>
26 #include <asm/hvm/domain.h>
27 #include <asm/hvm/support.h>
29 static void __hvm_pci_intx_assert(
30 struct domain *d, unsigned int device, unsigned int intx)
31 {
32 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
33 unsigned int gsi, link, isa_irq;
35 ASSERT((device <= 31) && (intx <= 3));
37 if ( __test_and_set_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
38 return;
40 gsi = hvm_pci_intx_gsi(device, intx);
41 if ( hvm_irq->gsi_assert_count[gsi]++ == 0 )
42 vioapic_irq_positive_edge(d, gsi);
44 link = hvm_pci_intx_link(device, intx);
45 isa_irq = hvm_irq->pci_link.route[link];
46 if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq &&
47 (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
48 {
49 vioapic_irq_positive_edge(d, isa_irq);
50 vpic_irq_positive_edge(d, isa_irq);
51 }
52 }
54 void hvm_pci_intx_assert(
55 struct domain *d, unsigned int device, unsigned int intx)
56 {
57 spin_lock(&d->arch.hvm_domain.irq_lock);
58 __hvm_pci_intx_assert(d, device, intx);
59 spin_unlock(&d->arch.hvm_domain.irq_lock);
60 }
62 static void __hvm_pci_intx_deassert(
63 struct domain *d, unsigned int device, unsigned int intx)
64 {
65 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
66 unsigned int gsi, link, isa_irq;
68 ASSERT((device <= 31) && (intx <= 3));
70 if ( !__test_and_clear_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
71 return;
73 gsi = hvm_pci_intx_gsi(device, intx);
74 --hvm_irq->gsi_assert_count[gsi];
76 link = hvm_pci_intx_link(device, intx);
77 isa_irq = hvm_irq->pci_link.route[link];
78 if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq &&
79 (--hvm_irq->gsi_assert_count[isa_irq] == 0) )
80 vpic_irq_negative_edge(d, isa_irq);
81 }
83 void hvm_pci_intx_deassert(
84 struct domain *d, unsigned int device, unsigned int intx)
85 {
86 spin_lock(&d->arch.hvm_domain.irq_lock);
87 __hvm_pci_intx_deassert(d, device, intx);
88 spin_unlock(&d->arch.hvm_domain.irq_lock);
89 }
91 void hvm_isa_irq_assert(
92 struct domain *d, unsigned int isa_irq)
93 {
94 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
95 unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
97 ASSERT(isa_irq <= 15);
99 spin_lock(&d->arch.hvm_domain.irq_lock);
101 if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) &&
102 (hvm_irq->gsi_assert_count[gsi]++ == 0) )
103 {
104 vioapic_irq_positive_edge(d, gsi);
105 vpic_irq_positive_edge(d, isa_irq);
106 }
108 spin_unlock(&d->arch.hvm_domain.irq_lock);
109 }
111 void hvm_isa_irq_deassert(
112 struct domain *d, unsigned int isa_irq)
113 {
114 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
115 unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
117 ASSERT(isa_irq <= 15);
119 spin_lock(&d->arch.hvm_domain.irq_lock);
121 if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) &&
122 (--hvm_irq->gsi_assert_count[gsi] == 0) )
123 vpic_irq_negative_edge(d, isa_irq);
125 spin_unlock(&d->arch.hvm_domain.irq_lock);
126 }
128 static void hvm_set_callback_irq_level(struct vcpu *v)
129 {
130 struct domain *d = v->domain;
131 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
132 unsigned int gsi, pdev, pintx, asserted;
134 ASSERT(v->vcpu_id == 0);
136 spin_lock(&d->arch.hvm_domain.irq_lock);
138 /* NB. Do not check the evtchn_upcall_mask. It is not used in HVM mode. */
139 asserted = !!vcpu_info(v, evtchn_upcall_pending);
140 if ( hvm_irq->callback_via_asserted == asserted )
141 goto out;
142 hvm_irq->callback_via_asserted = asserted;
144 /* Callback status has changed. Update the callback via. */
145 switch ( hvm_irq->callback_via_type )
146 {
147 case HVMIRQ_callback_gsi:
148 gsi = hvm_irq->callback_via.gsi;
149 if ( asserted && (hvm_irq->gsi_assert_count[gsi]++ == 0) )
150 {
151 vioapic_irq_positive_edge(d, gsi);
152 if ( gsi <= 15 )
153 vpic_irq_positive_edge(d, gsi);
154 }
155 else if ( !asserted && (--hvm_irq->gsi_assert_count[gsi] == 0) )
156 {
157 if ( gsi <= 15 )
158 vpic_irq_negative_edge(d, gsi);
159 }
160 break;
161 case HVMIRQ_callback_pci_intx:
162 pdev = hvm_irq->callback_via.pci.dev;
163 pintx = hvm_irq->callback_via.pci.intx;
164 if ( asserted )
165 __hvm_pci_intx_assert(d, pdev, pintx);
166 else
167 __hvm_pci_intx_deassert(d, pdev, pintx);
168 default:
169 break;
170 }
172 out:
173 spin_unlock(&d->arch.hvm_domain.irq_lock);
174 }
176 void hvm_maybe_deassert_evtchn_irq(void)
177 {
178 struct domain *d = current->domain;
179 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
181 if ( hvm_irq->callback_via_asserted &&
182 !vcpu_info(d->vcpu[0], evtchn_upcall_pending) )
183 hvm_set_callback_irq_level(d->vcpu[0]);
184 }
186 void hvm_assert_evtchn_irq(struct vcpu *v)
187 {
188 if ( v->vcpu_id == 0 )
189 hvm_set_callback_irq_level(v);
190 }
192 void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
193 {
194 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
195 u8 old_isa_irq;
196 int i;
198 ASSERT((link <= 3) && (isa_irq <= 15));
200 spin_lock(&d->arch.hvm_domain.irq_lock);
202 old_isa_irq = hvm_irq->pci_link.route[link];
203 if ( old_isa_irq == isa_irq )
204 goto out;
205 hvm_irq->pci_link.route[link] = isa_irq;
207 /* PCI pass-through fixup. */
208 if ( hvm_irq->dpci )
209 {
210 if ( old_isa_irq )
211 clear_bit(old_isa_irq, &hvm_irq->dpci->isairq_map);
213 for ( i = 0; i < NR_LINK; i++ )
214 if ( hvm_irq->dpci->link_cnt[i] && hvm_irq->pci_link.route[i] )
215 set_bit(hvm_irq->pci_link.route[i],
216 &hvm_irq->dpci->isairq_map);
217 }
219 if ( hvm_irq->pci_link_assert_count[link] == 0 )
220 goto out;
222 if ( old_isa_irq && (--hvm_irq->gsi_assert_count[old_isa_irq] == 0) )
223 vpic_irq_negative_edge(d, old_isa_irq);
225 if ( isa_irq && (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
226 {
227 vioapic_irq_positive_edge(d, isa_irq);
228 vpic_irq_positive_edge(d, isa_irq);
229 }
231 out:
232 spin_unlock(&d->arch.hvm_domain.irq_lock);
234 dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n",
235 d->domain_id, link, old_isa_irq, isa_irq);
236 }
238 void hvm_set_callback_via(struct domain *d, uint64_t via)
239 {
240 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
241 unsigned int gsi=0, pdev=0, pintx=0;
242 uint8_t via_type;
244 via_type = (uint8_t)(via >> 56) + 1;
245 if ( ((via_type == HVMIRQ_callback_gsi) && (via == 0)) ||
246 (via_type > HVMIRQ_callback_pci_intx) )
247 via_type = HVMIRQ_callback_none;
249 spin_lock(&d->arch.hvm_domain.irq_lock);
251 /* Tear down old callback via. */
252 if ( hvm_irq->callback_via_asserted )
253 {
254 switch ( hvm_irq->callback_via_type )
255 {
256 case HVMIRQ_callback_gsi:
257 gsi = hvm_irq->callback_via.gsi;
258 if ( (--hvm_irq->gsi_assert_count[gsi] == 0) && (gsi <= 15) )
259 vpic_irq_negative_edge(d, gsi);
260 break;
261 case HVMIRQ_callback_pci_intx:
262 pdev = hvm_irq->callback_via.pci.dev;
263 pintx = hvm_irq->callback_via.pci.intx;
264 __hvm_pci_intx_deassert(d, pdev, pintx);
265 break;
266 default:
267 break;
268 }
269 }
271 /* Set up new callback via. */
272 switch ( hvm_irq->callback_via_type = via_type )
273 {
274 case HVMIRQ_callback_gsi:
275 gsi = hvm_irq->callback_via.gsi = (uint8_t)via;
276 if ( (gsi == 0) || (gsi >= ARRAY_SIZE(hvm_irq->gsi_assert_count)) )
277 hvm_irq->callback_via_type = HVMIRQ_callback_none;
278 else if ( hvm_irq->callback_via_asserted &&
279 (hvm_irq->gsi_assert_count[gsi]++ == 0) )
280 {
281 vioapic_irq_positive_edge(d, gsi);
282 if ( gsi <= 15 )
283 vpic_irq_positive_edge(d, gsi);
284 }
285 break;
286 case HVMIRQ_callback_pci_intx:
287 pdev = hvm_irq->callback_via.pci.dev = (uint8_t)(via >> 11) & 31;
288 pintx = hvm_irq->callback_via.pci.intx = (uint8_t)via & 3;
289 if ( hvm_irq->callback_via_asserted )
290 __hvm_pci_intx_assert(d, pdev, pintx);
291 break;
292 default:
293 break;
294 }
296 spin_unlock(&d->arch.hvm_domain.irq_lock);
298 dprintk(XENLOG_G_INFO, "Dom%u callback via changed to ", d->domain_id);
299 switch ( via_type )
300 {
301 case HVMIRQ_callback_gsi:
302 printk("GSI %u\n", gsi);
303 break;
304 case HVMIRQ_callback_pci_intx:
305 printk("PCI INTx Dev 0x%02x Int%c\n", pdev, 'A' + pintx);
306 break;
307 default:
308 printk("None\n");
309 break;
310 }
311 }
313 struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
314 {
315 struct hvm_domain *plat = &v->domain->arch.hvm_domain;
316 int vector;
318 if ( unlikely(v->nmi_pending) )
319 return hvm_intack_nmi;
321 if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output )
322 return hvm_intack_pic(0);
324 vector = vlapic_has_pending_irq(v);
325 if ( vector != -1 )
326 return hvm_intack_lapic(vector);
328 return hvm_intack_none;
329 }
331 struct hvm_intack hvm_vcpu_ack_pending_irq(
332 struct vcpu *v, struct hvm_intack intack)
333 {
334 int vector;
336 switch ( intack.source )
337 {
338 case hvm_intsrc_nmi:
339 if ( !test_and_clear_bool(v->nmi_pending) )
340 intack = hvm_intack_none;
341 break;
342 case hvm_intsrc_pic:
343 ASSERT(v->vcpu_id == 0);
344 if ( (vector = vpic_ack_pending_irq(v)) == -1 )
345 intack = hvm_intack_none;
346 else
347 intack.vector = (uint8_t)vector;
348 break;
349 case hvm_intsrc_lapic:
350 if ( !vlapic_ack_pending_irq(v, intack.vector) )
351 intack = hvm_intack_none;
352 break;
353 default:
354 intack = hvm_intack_none;
355 break;
356 }
358 return intack;
359 }
361 int hvm_local_events_need_delivery(struct vcpu *v)
362 {
363 struct hvm_intack intack = hvm_vcpu_has_pending_irq(v);
365 if ( likely(intack.source == hvm_intsrc_none) )
366 return 0;
368 return !hvm_interrupt_blocked(v, intack);
369 }
371 #if 0 /* Keep for debugging */
372 static void irq_dump(struct domain *d)
373 {
374 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
375 int i;
376 printk("PCI 0x%16.16"PRIx64"%16.16"PRIx64
377 " ISA 0x%8.8"PRIx32" ROUTE %u %u %u %u\n",
378 hvm_irq->pci_intx.pad[0], hvm_irq->pci_intx.pad[1],
379 (uint32_t) hvm_irq->isa_irq.pad[0],
380 hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1],
381 hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]);
382 for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 )
383 printk("GSI %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8
384 " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
385 hvm_irq->gsi_assert_count[i+0],
386 hvm_irq->gsi_assert_count[i+1],
387 hvm_irq->gsi_assert_count[i+2],
388 hvm_irq->gsi_assert_count[i+3],
389 hvm_irq->gsi_assert_count[i+4],
390 hvm_irq->gsi_assert_count[i+5],
391 hvm_irq->gsi_assert_count[i+6],
392 hvm_irq->gsi_assert_count[i+7]);
393 printk("Link %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
394 hvm_irq->pci_link_assert_count[0],
395 hvm_irq->pci_link_assert_count[1],
396 hvm_irq->pci_link_assert_count[2],
397 hvm_irq->pci_link_assert_count[3]);
398 printk("Callback via %i:0x%"PRIx32",%s asserted\n",
399 hvm_irq->callback_via_type, hvm_irq->callback_via.gsi,
400 hvm_irq->callback_via_asserted ? "" : " not");
401 }
402 #endif
404 static int irq_save_pci(struct domain *d, hvm_domain_context_t *h)
405 {
406 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
407 unsigned int asserted, pdev, pintx;
408 int rc;
410 spin_lock(&d->arch.hvm_domain.irq_lock);
412 pdev = hvm_irq->callback_via.pci.dev;
413 pintx = hvm_irq->callback_via.pci.intx;
414 asserted = (hvm_irq->callback_via_asserted &&
415 (hvm_irq->callback_via_type == HVMIRQ_callback_pci_intx));
417 /*
418 * Deassert virtual interrupt via PCI INTx line. The virtual interrupt
419 * status is not save/restored, so the INTx line must be deasserted in
420 * the restore context.
421 */
422 if ( asserted )
423 __hvm_pci_intx_deassert(d, pdev, pintx);
425 /* Save PCI IRQ lines */
426 rc = hvm_save_entry(PCI_IRQ, 0, h, &hvm_irq->pci_intx);
428 if ( asserted )
429 __hvm_pci_intx_assert(d, pdev, pintx);
431 spin_unlock(&d->arch.hvm_domain.irq_lock);
433 return rc;
434 }
436 static int irq_save_isa(struct domain *d, hvm_domain_context_t *h)
437 {
438 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
440 /* Save ISA IRQ lines */
441 return ( hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq) );
442 }
444 static int irq_save_link(struct domain *d, hvm_domain_context_t *h)
445 {
446 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
448 /* Save PCI-ISA link state */
449 return ( hvm_save_entry(PCI_LINK, 0, h, &hvm_irq->pci_link) );
450 }
452 static int irq_load_pci(struct domain *d, hvm_domain_context_t *h)
453 {
454 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
455 int link, dev, intx, gsi;
457 /* Load the PCI IRQ lines */
458 if ( hvm_load_entry(PCI_IRQ, h, &hvm_irq->pci_intx) != 0 )
459 return -EINVAL;
461 /* Clear the PCI link assert counts */
462 for ( link = 0; link < 4; link++ )
463 hvm_irq->pci_link_assert_count[link] = 0;
465 /* Clear the GSI link assert counts */
466 for ( gsi = 0; gsi < VIOAPIC_NUM_PINS; gsi++ )
467 hvm_irq->gsi_assert_count[gsi] = 0;
469 /* Recalculate the counts from the IRQ line state */
470 for ( dev = 0; dev < 32; dev++ )
471 for ( intx = 0; intx < 4; intx++ )
472 if ( test_bit(dev*4 + intx, &hvm_irq->pci_intx.i) )
473 {
474 /* Direct GSI assert */
475 gsi = hvm_pci_intx_gsi(dev, intx);
476 hvm_irq->gsi_assert_count[gsi]++;
477 /* PCI-ISA bridge assert */
478 link = hvm_pci_intx_link(dev, intx);
479 hvm_irq->pci_link_assert_count[link]++;
480 }
482 return 0;
483 }
485 static int irq_load_isa(struct domain *d, hvm_domain_context_t *h)
486 {
487 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
488 int irq;
490 /* Load the ISA IRQ lines */
491 if ( hvm_load_entry(ISA_IRQ, h, &hvm_irq->isa_irq) != 0 )
492 return -EINVAL;
494 /* Adjust the GSI assert counts for the ISA IRQ line state.
495 * This relies on the PCI IRQ state being loaded first. */
496 for ( irq = 0; irq < 16; irq++ )
497 if ( test_bit(irq, &hvm_irq->isa_irq.i) )
498 hvm_irq->gsi_assert_count[hvm_isa_irq_to_gsi(irq)]++;
500 return 0;
501 }
504 static int irq_load_link(struct domain *d, hvm_domain_context_t *h)
505 {
506 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
507 int link, gsi;
509 /* Load the PCI-ISA IRQ link routing table */
510 if ( hvm_load_entry(PCI_LINK, h, &hvm_irq->pci_link) != 0 )
511 return -EINVAL;
513 /* Sanity check */
514 for ( link = 0; link < 4; link++ )
515 if ( hvm_irq->pci_link.route[link] > 15 )
516 {
517 gdprintk(XENLOG_ERR,
518 "HVM restore: PCI-ISA link %u out of range (%u)\n",
519 link, hvm_irq->pci_link.route[link]);
520 return -EINVAL;
521 }
523 /* Adjust the GSI assert counts for the link outputs.
524 * This relies on the PCI and ISA IRQ state being loaded first */
525 for ( link = 0; link < 4; link++ )
526 {
527 if ( hvm_irq->pci_link_assert_count[link] != 0 )
528 {
529 gsi = hvm_irq->pci_link.route[link];
530 if ( gsi != 0 )
531 hvm_irq->gsi_assert_count[gsi]++;
532 }
533 }
535 return 0;
536 }
538 HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci,
539 1, HVMSR_PER_DOM);
540 HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa,
541 1, HVMSR_PER_DOM);
542 HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link,
543 1, HVMSR_PER_DOM);