debuggers.hg

view xen/drivers/passthrough/io.c @ 20901:58b45bb15137

x86: Directly clear all pending EOIs once MSI info changed

As to unmaskable MSI, its deferred EOI policy only targets
for avoiding IRQ storm. It should be safe to clear pending
EOIs in advance when guest irq migration occurs, because next
interrupt's EOI write is still deferred, and also can avoid
storm.

Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jan 26 15:53:01 2010 +0000 (2010-01-26)
parents cb0375fcec23
children 95f5a4ce8f24
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
19 */
21 #include <xen/event.h>
22 #include <xen/iommu.h>
23 #include <asm/hvm/irq.h>
24 #include <asm/hvm/iommu.h>
25 #include <asm/hvm/support.h>
26 #include <xen/hvm/irq.h>
28 static void hvm_dirq_assist(unsigned long _d);
30 static int pt_irq_need_timer(uint32_t flags)
31 {
32 return !(flags & (HVM_IRQ_DPCI_GUEST_MSI | HVM_IRQ_DPCI_TRANSLATE));
33 }
35 static void pt_irq_time_out(void *data)
36 {
37 struct hvm_mirq_dpci_mapping *irq_map = data;
38 unsigned int guest_gsi, machine_gsi = 0;
39 struct hvm_irq_dpci *dpci = NULL;
40 struct dev_intx_gsi_link *digl;
41 struct hvm_girq_dpci_mapping *girq;
42 uint32_t device, intx;
43 unsigned int nr_pirqs = irq_map->dom->nr_pirqs;
44 DECLARE_BITMAP(machine_gsi_map, nr_pirqs);
46 bitmap_zero(machine_gsi_map, nr_pirqs);
48 spin_lock(&irq_map->dom->event_lock);
50 dpci = domain_get_irq_dpci(irq_map->dom);
51 ASSERT(dpci);
52 list_for_each_entry ( digl, &irq_map->digl_list, list )
53 {
54 guest_gsi = digl->gsi;
55 list_for_each_entry ( girq, &dpci->girq[guest_gsi], list )
56 {
57 machine_gsi = girq->machine_gsi;
58 set_bit(machine_gsi, machine_gsi_map);
59 }
60 device = digl->device;
61 intx = digl->intx;
62 hvm_pci_intx_deassert(irq_map->dom, device, intx);
63 }
65 for ( machine_gsi = find_first_bit(machine_gsi_map, nr_pirqs);
66 machine_gsi < nr_pirqs;
67 machine_gsi = find_next_bit(machine_gsi_map, nr_pirqs,
68 machine_gsi + 1) )
69 {
70 clear_bit(machine_gsi, dpci->dirq_mask);
71 dpci->mirq[machine_gsi].pending = 0;
72 }
74 spin_unlock(&irq_map->dom->event_lock);
76 for ( machine_gsi = find_first_bit(machine_gsi_map, nr_pirqs);
77 machine_gsi < nr_pirqs;
78 machine_gsi = find_next_bit(machine_gsi_map, nr_pirqs,
79 machine_gsi + 1) )
80 {
81 pirq_guest_eoi(irq_map->dom, machine_gsi);
82 }
83 }
85 void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci)
86 {
87 xfree(dpci->mirq);
88 xfree(dpci->dirq_mask);
89 xfree(dpci->mapping);
90 xfree(dpci->hvm_timer);
91 xfree(dpci);
92 }
94 int pt_irq_create_bind_vtd(
95 struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
96 {
97 struct hvm_irq_dpci *hvm_irq_dpci = NULL;
98 uint32_t machine_gsi, guest_gsi;
99 uint32_t device, intx, link;
100 struct dev_intx_gsi_link *digl;
101 struct hvm_girq_dpci_mapping *girq;
102 int rc, pirq = pt_irq_bind->machine_irq;
104 if ( pirq < 0 || pirq >= d->nr_pirqs )
105 return -EINVAL;
107 spin_lock(&d->event_lock);
109 hvm_irq_dpci = domain_get_irq_dpci(d);
110 if ( hvm_irq_dpci == NULL )
111 {
112 hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
113 if ( hvm_irq_dpci == NULL )
114 {
115 spin_unlock(&d->event_lock);
116 return -ENOMEM;
117 }
118 memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
119 tasklet_init(&hvm_irq_dpci->dirq_tasklet,
120 hvm_dirq_assist, (unsigned long)d);
121 hvm_irq_dpci->mirq = xmalloc_array(struct hvm_mirq_dpci_mapping,
122 d->nr_pirqs);
123 hvm_irq_dpci->dirq_mask = xmalloc_array(unsigned long,
124 BITS_TO_LONGS(d->nr_pirqs));
125 hvm_irq_dpci->mapping = xmalloc_array(unsigned long,
126 BITS_TO_LONGS(d->nr_pirqs));
127 hvm_irq_dpci->hvm_timer = xmalloc_array(struct timer, nr_irqs);
128 if ( !hvm_irq_dpci->mirq ||
129 !hvm_irq_dpci->dirq_mask ||
130 !hvm_irq_dpci->mapping ||
131 !hvm_irq_dpci->hvm_timer)
132 {
133 spin_unlock(&d->event_lock);
134 free_hvm_irq_dpci(hvm_irq_dpci);
135 return -ENOMEM;
136 }
137 memset(hvm_irq_dpci->mirq, 0,
138 d->nr_pirqs * sizeof(*hvm_irq_dpci->mirq));
139 bitmap_zero(hvm_irq_dpci->dirq_mask, d->nr_pirqs);
140 bitmap_zero(hvm_irq_dpci->mapping, d->nr_pirqs);
141 memset(hvm_irq_dpci->hvm_timer, 0,
142 nr_irqs * sizeof(*hvm_irq_dpci->hvm_timer));
143 for ( int i = 0; i < d->nr_pirqs; i++ ) {
144 INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
145 hvm_irq_dpci->mirq[i].gmsi.dest_vcpu_id = -1;
146 }
147 for ( int i = 0; i < NR_HVM_IRQS; i++ )
148 INIT_LIST_HEAD(&hvm_irq_dpci->girq[i]);
150 if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
151 {
152 spin_unlock(&d->event_lock);
153 free_hvm_irq_dpci(hvm_irq_dpci);
154 return -EINVAL;
155 }
156 }
158 if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI )
159 {
160 uint8_t dest, dest_mode;
161 int dest_vcpu_id;
163 if ( !test_and_set_bit(pirq, hvm_irq_dpci->mapping))
164 {
165 hvm_irq_dpci->mirq[pirq].flags = HVM_IRQ_DPCI_MACH_MSI |
166 HVM_IRQ_DPCI_GUEST_MSI;
167 hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
168 hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
169 /* bind after hvm_irq_dpci is setup to avoid race with irq handler*/
170 rc = pirq_guest_bind(d->vcpu[0], pirq, 0);
171 if ( rc == 0 && pt_irq_bind->u.msi.gtable )
172 {
173 rc = msixtbl_pt_register(d, pirq, pt_irq_bind->u.msi.gtable);
174 if ( unlikely(rc) )
175 pirq_guest_unbind(d, pirq);
176 }
177 if ( unlikely(rc) )
178 {
179 hvm_irq_dpci->mirq[pirq].gmsi.gflags = 0;
180 hvm_irq_dpci->mirq[pirq].gmsi.gvec = 0;
181 hvm_irq_dpci->mirq[pirq].flags = 0;
182 clear_bit(pirq, hvm_irq_dpci->mapping);
183 spin_unlock(&d->event_lock);
184 return rc;
185 }
186 }
187 else
188 {
189 uint32_t mask = HVM_IRQ_DPCI_MACH_MSI | HVM_IRQ_DPCI_GUEST_MSI;
191 if ( (hvm_irq_dpci->mirq[pirq].flags & mask) != mask)
192 {
193 spin_unlock(&d->event_lock);
194 return -EBUSY;
195 }
197 /* if pirq is already mapped as vmsi, update the guest data/addr */
198 if ( hvm_irq_dpci->mirq[pirq].gmsi.gvec != pt_irq_bind->u.msi.gvec ||
199 hvm_irq_dpci->mirq[pirq].gmsi.gflags != pt_irq_bind->u.msi.gflags) {
200 /* Directly clear pending EOIs before enabling new MSI info. */
201 pirq_guest_eoi(d, pirq);
203 hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
204 hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
205 }
206 }
207 /* Caculate dest_vcpu_id for MSI-type pirq migration */
208 dest = hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DEST_ID_MASK;
209 dest_mode = !!(hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DM_MASK);
210 dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
211 hvm_irq_dpci->mirq[pirq].gmsi.dest_vcpu_id = dest_vcpu_id;
212 spin_unlock(&d->event_lock);
213 if ( dest_vcpu_id >= 0 )
214 hvm_migrate_pirqs(d->vcpu[dest_vcpu_id]);
215 }
216 else
217 {
218 machine_gsi = pt_irq_bind->machine_irq;
219 device = pt_irq_bind->u.pci.device;
220 intx = pt_irq_bind->u.pci.intx;
221 guest_gsi = hvm_pci_intx_gsi(device, intx);
222 link = hvm_pci_intx_link(device, intx);
223 hvm_irq_dpci->link_cnt[link]++;
225 digl = xmalloc(struct dev_intx_gsi_link);
226 if ( !digl )
227 {
228 spin_unlock(&d->event_lock);
229 return -ENOMEM;
230 }
232 girq = xmalloc(struct hvm_girq_dpci_mapping);
233 if ( !girq )
234 {
235 xfree(digl);
236 spin_unlock(&d->event_lock);
237 return -ENOMEM;
238 }
240 digl->device = device;
241 digl->intx = intx;
242 digl->gsi = guest_gsi;
243 digl->link = link;
244 list_add_tail(&digl->list,
245 &hvm_irq_dpci->mirq[machine_gsi].digl_list);
247 girq->device = device;
248 girq->intx = intx;
249 girq->machine_gsi = machine_gsi;
250 list_add_tail(&girq->list, &hvm_irq_dpci->girq[guest_gsi]);
252 /* Bind the same mirq once in the same domain */
253 if ( !test_and_set_bit(machine_gsi, hvm_irq_dpci->mapping))
254 {
255 unsigned int irq = domain_pirq_to_irq(d, machine_gsi);
256 unsigned int share;
258 hvm_irq_dpci->mirq[machine_gsi].dom = d;
259 if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI_TRANSLATE )
260 {
261 hvm_irq_dpci->mirq[machine_gsi].flags = HVM_IRQ_DPCI_MACH_MSI |
262 HVM_IRQ_DPCI_GUEST_PCI |
263 HVM_IRQ_DPCI_TRANSLATE;
264 share = 0;
265 }
266 else /* PT_IRQ_TYPE_PCI */
267 {
268 hvm_irq_dpci->mirq[machine_gsi].flags = HVM_IRQ_DPCI_MACH_PCI |
269 HVM_IRQ_DPCI_GUEST_PCI;
270 share = BIND_PIRQ__WILL_SHARE;
271 }
273 /* Init timer before binding */
274 if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
275 init_timer(&hvm_irq_dpci->hvm_timer[irq],
276 pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
277 /* Deal with gsi for legacy devices */
278 rc = pirq_guest_bind(d->vcpu[0], machine_gsi, share);
279 if ( unlikely(rc) )
280 {
281 if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
282 kill_timer(&hvm_irq_dpci->hvm_timer[irq]);
283 hvm_irq_dpci->mirq[machine_gsi].dom = NULL;
284 clear_bit(machine_gsi, hvm_irq_dpci->mapping);
285 list_del(&girq->list);
286 xfree(girq);
287 list_del(&digl->list);
288 hvm_irq_dpci->link_cnt[link]--;
289 spin_unlock(&d->event_lock);
290 xfree(digl);
291 return rc;
292 }
293 }
295 gdprintk(XENLOG_INFO VTDPREFIX,
296 "VT-d irq bind: m_irq = %x device = %x intx = %x\n",
297 machine_gsi, device, intx);
298 spin_unlock(&d->event_lock);
299 }
300 return 0;
301 }
303 int pt_irq_destroy_bind_vtd(
304 struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
305 {
306 struct hvm_irq_dpci *hvm_irq_dpci = NULL;
307 uint32_t machine_gsi, guest_gsi;
308 uint32_t device, intx, link;
309 struct list_head *digl_list, *tmp;
310 struct dev_intx_gsi_link *digl;
311 struct hvm_girq_dpci_mapping *girq;
313 machine_gsi = pt_irq_bind->machine_irq;
314 device = pt_irq_bind->u.pci.device;
315 intx = pt_irq_bind->u.pci.intx;
316 guest_gsi = hvm_pci_intx_gsi(device, intx);
317 link = hvm_pci_intx_link(device, intx);
319 gdprintk(XENLOG_INFO,
320 "pt_irq_destroy_bind_vtd: machine_gsi=%d "
321 "guest_gsi=%d, device=%d, intx=%d.\n",
322 machine_gsi, guest_gsi, device, intx);
323 spin_lock(&d->event_lock);
325 hvm_irq_dpci = domain_get_irq_dpci(d);
327 if ( hvm_irq_dpci == NULL )
328 {
329 spin_unlock(&d->event_lock);
330 return -EINVAL;
331 }
333 hvm_irq_dpci->link_cnt[link]--;
335 list_for_each_entry ( girq, &hvm_irq_dpci->girq[guest_gsi], list )
336 {
337 if ( girq->machine_gsi == machine_gsi )
338 {
339 list_del(&girq->list);
340 xfree(girq);
341 break;
342 }
343 }
345 /* clear the mirq info */
346 if ( test_bit(machine_gsi, hvm_irq_dpci->mapping))
347 {
348 list_for_each_safe ( digl_list, tmp,
349 &hvm_irq_dpci->mirq[machine_gsi].digl_list )
350 {
351 digl = list_entry(digl_list,
352 struct dev_intx_gsi_link, list);
353 if ( digl->device == device &&
354 digl->intx == intx &&
355 digl->link == link &&
356 digl->gsi == guest_gsi )
357 {
358 list_del(&digl->list);
359 xfree(digl);
360 }
361 }
363 if ( list_empty(&hvm_irq_dpci->mirq[machine_gsi].digl_list) )
364 {
365 pirq_guest_unbind(d, machine_gsi);
366 msixtbl_pt_unregister(d, machine_gsi);
367 if ( pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
368 kill_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, machine_gsi)]);
369 hvm_irq_dpci->mirq[machine_gsi].dom = NULL;
370 hvm_irq_dpci->mirq[machine_gsi].flags = 0;
371 clear_bit(machine_gsi, hvm_irq_dpci->mapping);
372 }
373 }
374 spin_unlock(&d->event_lock);
375 gdprintk(XENLOG_INFO,
376 "XEN_DOMCTL_irq_unmapping: m_irq = 0x%x device = 0x%x intx = 0x%x\n",
377 machine_gsi, device, intx);
379 return 0;
380 }
382 int hvm_do_IRQ_dpci(struct domain *d, unsigned int mirq)
383 {
384 struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
386 ASSERT(spin_is_locked(&irq_desc[domain_pirq_to_irq(d, mirq)].lock));
387 if ( !iommu_enabled || (d == dom0) || !dpci ||
388 !test_bit(mirq, dpci->mapping))
389 return 0;
391 set_bit(mirq, dpci->dirq_mask);
392 tasklet_schedule(&dpci->dirq_tasklet);
393 return 1;
394 }
396 #ifdef SUPPORT_MSI_REMAPPING
397 /* called with d->event_lock held */
398 static void __msi_pirq_eoi(struct domain *d, int pirq)
399 {
400 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
401 irq_desc_t *desc;
403 if ( ( pirq >= 0 ) && ( pirq < d->nr_pirqs ) &&
404 test_bit(pirq, hvm_irq_dpci->mapping) &&
405 ( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MACH_MSI) )
406 {
407 BUG_ON(!local_irq_is_enabled());
408 desc = domain_spin_lock_irq_desc(d, pirq, NULL);
409 if ( !desc )
410 return;
412 desc->status &= ~IRQ_INPROGRESS;
413 spin_unlock_irq(&desc->lock);
415 pirq_guest_eoi(d, pirq);
416 }
417 }
419 void hvm_dpci_msi_eoi(struct domain *d, int vector)
420 {
421 int pirq, dest, dest_mode;
422 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
424 if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
425 return;
427 spin_lock(&d->event_lock);
428 for ( pirq = find_first_bit(hvm_irq_dpci->mapping, d->nr_pirqs);
429 pirq < d->nr_pirqs;
430 pirq = find_next_bit(hvm_irq_dpci->mapping, d->nr_pirqs, pirq + 1) )
431 {
432 if ( (!(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MACH_MSI)) ||
433 (hvm_irq_dpci->mirq[pirq].gmsi.gvec != vector) )
434 continue;
436 dest = hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DEST_ID_MASK;
437 dest_mode = !!(hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DM_MASK);
438 if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest, dest_mode) )
439 break;
440 }
441 if ( pirq < d->nr_pirqs )
442 __msi_pirq_eoi(d, pirq);
443 spin_unlock(&d->event_lock);
444 }
446 extern int vmsi_deliver(struct domain *d, int pirq);
447 static int hvm_pci_msi_assert(struct domain *d, int pirq)
448 {
449 return vmsi_deliver(d, pirq);
450 }
451 #endif
453 static void hvm_dirq_assist(unsigned long _d)
454 {
455 unsigned int pirq;
456 uint32_t device, intx;
457 struct domain *d = (struct domain *)_d;
458 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
459 struct dev_intx_gsi_link *digl;
461 ASSERT(hvm_irq_dpci);
463 for ( pirq = find_first_bit(hvm_irq_dpci->dirq_mask, d->nr_pirqs);
464 pirq < d->nr_pirqs;
465 pirq = find_next_bit(hvm_irq_dpci->dirq_mask, d->nr_pirqs, pirq + 1) )
466 {
467 if ( !test_and_clear_bit(pirq, hvm_irq_dpci->dirq_mask) )
468 continue;
470 spin_lock(&d->event_lock);
471 #ifdef SUPPORT_MSI_REMAPPING
472 if ( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_GUEST_MSI )
473 {
474 hvm_pci_msi_assert(d, pirq);
475 spin_unlock(&d->event_lock);
476 continue;
477 }
478 #endif
479 list_for_each_entry ( digl, &hvm_irq_dpci->mirq[pirq].digl_list, list )
480 {
481 device = digl->device;
482 intx = digl->intx;
483 hvm_pci_intx_assert(d, device, intx);
484 hvm_irq_dpci->mirq[pirq].pending++;
486 #ifdef SUPPORT_MSI_REMAPPING
487 if ( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_TRANSLATE )
488 {
489 /* for translated MSI to INTx interrupt, eoi as early as possible */
490 __msi_pirq_eoi(d, pirq);
491 }
492 #endif
493 }
495 /*
496 * Set a timer to see if the guest can finish the interrupt or not. For
497 * example, the guest OS may unmask the PIC during boot, before the
498 * guest driver is loaded. hvm_pci_intx_assert() may succeed, but the
499 * guest will never deal with the irq, then the physical interrupt line
500 * will never be deasserted.
501 */
502 if ( pt_irq_need_timer(hvm_irq_dpci->mirq[pirq].flags) )
503 set_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, pirq)],
504 NOW() + PT_IRQ_TIME_OUT);
505 spin_unlock(&d->event_lock);
506 }
507 }
509 static void __hvm_dpci_eoi(struct domain *d,
510 struct hvm_irq_dpci *hvm_irq_dpci,
511 struct hvm_girq_dpci_mapping *girq,
512 union vioapic_redir_entry *ent)
513 {
514 uint32_t device, intx, machine_gsi;
516 device = girq->device;
517 intx = girq->intx;
518 hvm_pci_intx_deassert(d, device, intx);
520 machine_gsi = girq->machine_gsi;
522 /*
523 * No need to get vector lock for timer
524 * since interrupt is still not EOIed
525 */
526 if ( --hvm_irq_dpci->mirq[machine_gsi].pending ||
527 ( ent && ent->fields.mask ) ||
528 ! pt_irq_need_timer(hvm_irq_dpci->mirq[machine_gsi].flags) )
529 return;
531 stop_timer(&hvm_irq_dpci->hvm_timer[domain_pirq_to_irq(d, machine_gsi)]);
532 pirq_guest_eoi(d, machine_gsi);
533 }
535 void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
536 union vioapic_redir_entry *ent)
537 {
538 struct hvm_irq_dpci *hvm_irq_dpci;
539 struct hvm_girq_dpci_mapping *girq;
541 if ( !iommu_enabled )
542 return;
544 if ( guest_gsi < NR_ISAIRQS )
545 {
546 hvm_dpci_isairq_eoi(d, guest_gsi);
547 return;
548 }
550 spin_lock(&d->event_lock);
551 hvm_irq_dpci = domain_get_irq_dpci(d);
553 if ( !hvm_irq_dpci )
554 goto unlock;
556 list_for_each_entry ( girq, &hvm_irq_dpci->girq[guest_gsi], list )
557 __hvm_dpci_eoi(d, hvm_irq_dpci, girq, ent);
559 unlock:
560 spin_unlock(&d->event_lock);
561 }