debuggers.hg

view xen/arch/x86/msi.c @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children 5c0bf00e371d
line source
1 /*
2 * File: msi.c
3 * Purpose: PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
9 #include <xen/config.h>
10 #include <xen/lib.h>
11 #include <xen/init.h>
12 #include <xen/irq.h>
13 #include <xen/delay.h>
14 #include <xen/sched.h>
15 #include <xen/acpi.h>
16 #include <xen/errno.h>
17 #include <xen/pci.h>
18 #include <xen/pci_regs.h>
19 #include <xen/keyhandler.h>
20 #include <asm/io.h>
21 #include <asm/smp.h>
22 #include <asm/desc.h>
23 #include <asm/msi.h>
24 #include <asm/fixmap.h>
25 #include <mach_apic.h>
26 #include <io_ports.h>
27 #include <public/physdev.h>
29 extern int msi_irq_enable;
31 /* PCI-dev list with MSI/MSIX capabilities */
32 DEFINE_SPINLOCK(msi_pdev_lock);
33 struct list_head msi_pdev_list;
35 struct pci_dev *get_msi_pdev(u8 bus, u8 devfn)
36 {
37 struct pci_dev *pdev = NULL;
39 list_for_each_entry(pdev, &msi_pdev_list, msi_dev_list)
40 if ( pdev->bus == bus && pdev->devfn == devfn )
41 return pdev;
43 return NULL;
44 }
46 /* bitmap indicate which fixed map is free */
47 DEFINE_SPINLOCK(msix_fixmap_lock);
48 DECLARE_BITMAP(msix_fixmap_pages, MAX_MSIX_PAGES);
50 static int msix_fixmap_alloc(void)
51 {
52 int i;
53 int rc = -1;
55 spin_lock(&msix_fixmap_lock);
56 for ( i = 0; i < MAX_MSIX_PAGES; i++ )
57 if ( !test_bit(i, &msix_fixmap_pages) )
58 break;
59 if ( i == MAX_MSIX_PAGES )
60 goto out;
61 rc = FIX_MSIX_IO_RESERV_BASE + i;
62 set_bit(i, &msix_fixmap_pages);
64 out:
65 spin_unlock(&msix_fixmap_lock);
66 return rc;
67 }
69 static void msix_fixmap_free(int idx)
70 {
71 if ( idx < FIX_MSIX_IO_RESERV_BASE )
72 return;
74 spin_lock(&msix_fixmap_lock);
75 clear_bit(idx - FIX_MSIX_IO_RESERV_BASE, &msix_fixmap_pages);
76 spin_unlock(&msix_fixmap_lock);
77 }
79 /*
80 * MSI message composition
81 */
82 static void msi_compose_msg(struct pci_dev *pdev, int vector,
83 struct msi_msg *msg)
84 {
85 unsigned dest;
86 cpumask_t tmp;
88 tmp = TARGET_CPUS;
89 if ( vector )
90 {
91 dest = cpu_mask_to_apicid(tmp);
93 msg->address_hi = MSI_ADDR_BASE_HI;
94 msg->address_lo =
95 MSI_ADDR_BASE_LO |
96 ((INT_DEST_MODE == 0) ?
97 MSI_ADDR_DESTMODE_PHYS:
98 MSI_ADDR_DESTMODE_LOGIC) |
99 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
100 MSI_ADDR_REDIRECTION_CPU:
101 MSI_ADDR_REDIRECTION_LOWPRI) |
102 MSI_ADDR_DEST_ID(dest);
104 msg->data =
105 MSI_DATA_TRIGGER_EDGE |
106 MSI_DATA_LEVEL_ASSERT |
107 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
108 MSI_DATA_DELIVERY_FIXED:
109 MSI_DATA_DELIVERY_LOWPRI) |
110 MSI_DATA_VECTOR(vector);
111 }
112 }
114 void read_msi_msg(unsigned int irq, struct msi_msg *msg)
115 {
116 struct msi_desc *entry = irq_desc[irq].msi_desc;
118 switch ( entry->msi_attrib.type )
119 {
120 case PCI_CAP_ID_MSI:
121 {
122 struct pci_dev *dev = entry->dev;
123 int pos = entry->msi_attrib.pos;
124 u16 data;
125 u8 bus = dev->bus;
126 u8 slot = PCI_SLOT(dev->devfn);
127 u8 func = PCI_FUNC(dev->devfn);
129 msg->address_lo = pci_conf_read32(bus, slot, func,
130 msi_lower_address_reg(pos));
131 if ( entry->msi_attrib.is_64 )
132 {
133 msg->address_hi = pci_conf_read32(bus, slot, func,
134 msi_upper_address_reg(pos));
135 data = pci_conf_read16(bus, slot, func, msi_data_reg(pos, 1));
136 }
137 else
138 {
139 msg->address_hi = 0;
140 data = pci_conf_read16(bus, slot, func, msi_data_reg(pos, 0));
141 }
142 msg->data = data;
143 break;
144 }
145 case PCI_CAP_ID_MSIX:
146 {
147 void __iomem *base;
148 base = entry->mask_base +
149 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
151 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
152 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
153 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
154 break;
155 }
156 default:
157 BUG();
158 }
159 }
161 static int set_vector_msi(struct msi_desc *entry)
162 {
163 irq_desc_t *desc;
164 unsigned long flags;
166 if ( entry->vector >= NR_VECTORS )
167 {
168 dprintk(XENLOG_ERR, "Trying to install msi data for Vector %d\n",
169 entry->vector);
170 return -EINVAL;
171 }
173 desc = &irq_desc[entry->vector];
174 spin_lock_irqsave(&desc->lock, flags);
175 desc->msi_desc = entry;
176 spin_unlock_irqrestore(&desc->lock, flags);
178 return 0;
179 }
181 static int unset_vector_msi(int vector)
182 {
183 irq_desc_t *desc;
184 unsigned long flags;
186 if ( vector >= NR_VECTORS )
187 {
188 dprintk(XENLOG_ERR, "Trying to uninstall msi data for Vector %d\n",
189 vector);
190 return -EINVAL;
191 }
193 desc = &irq_desc[vector];
194 spin_lock_irqsave(&desc->lock, flags);
195 desc->msi_desc = NULL;
196 spin_unlock_irqrestore(&desc->lock, flags);
198 return 0;
199 }
201 void write_msi_msg(unsigned int irq, struct msi_msg *msg)
202 {
203 struct msi_desc *entry = irq_desc[irq].msi_desc;
205 switch ( entry->msi_attrib.type )
206 {
207 case PCI_CAP_ID_MSI:
208 {
209 struct pci_dev *dev = entry->dev;
210 int pos = entry->msi_attrib.pos;
211 u8 bus = dev->bus;
212 u8 slot = PCI_SLOT(dev->devfn);
213 u8 func = PCI_FUNC(dev->devfn);
215 pci_conf_write32(bus, slot, func, msi_lower_address_reg(pos),
216 msg->address_lo);
217 if ( entry->msi_attrib.is_64 )
218 {
219 pci_conf_write32(bus, slot, func, msi_upper_address_reg(pos),
220 msg->address_hi);
221 pci_conf_write16(bus, slot, func, msi_data_reg(pos, 1),
222 msg->data);
223 }
224 else
225 pci_conf_write16(bus, slot, func, msi_data_reg(pos, 0),
226 msg->data);
227 break;
228 }
229 case PCI_CAP_ID_MSIX:
230 {
231 void __iomem *base;
232 base = entry->mask_base +
233 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
235 writel(msg->address_lo,
236 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
237 writel(msg->address_hi,
238 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
239 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
240 break;
241 }
242 default:
243 BUG();
244 }
245 entry->msg = *msg;
246 }
248 void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
249 {
250 struct msi_msg msg;
251 unsigned int dest;
253 memset(&msg, 0, sizeof(msg));
254 cpus_and(mask, mask, cpu_online_map);
255 if ( cpus_empty(mask) )
256 mask = TARGET_CPUS;
257 dest = cpu_mask_to_apicid(mask);
259 read_msi_msg(irq, &msg);
261 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
262 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
264 write_msi_msg(irq, &msg);
265 }
267 static void msi_set_enable(struct pci_dev *dev, int enable)
268 {
269 int pos;
270 u16 control;
271 u8 bus = dev->bus;
272 u8 slot = PCI_SLOT(dev->devfn);
273 u8 func = PCI_FUNC(dev->devfn);
275 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSI);
276 if ( pos )
277 {
278 control = pci_conf_read16(bus, slot, func, pos + PCI_MSI_FLAGS);
279 control &= ~PCI_MSI_FLAGS_ENABLE;
280 if ( enable )
281 control |= PCI_MSI_FLAGS_ENABLE;
282 pci_conf_write16(bus, slot, func, pos + PCI_MSI_FLAGS, control);
283 }
284 }
286 void msix_set_enable(struct pci_dev *dev, int enable)
287 {
288 int pos;
289 u16 control;
290 u8 bus = dev->bus;
291 u8 slot = PCI_SLOT(dev->devfn);
292 u8 func = PCI_FUNC(dev->devfn);
294 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSIX);
295 if ( pos )
296 {
297 control = pci_conf_read16(bus, slot, func, pos + PCI_MSIX_FLAGS);
298 control &= ~PCI_MSIX_FLAGS_ENABLE;
299 if ( enable )
300 control |= PCI_MSIX_FLAGS_ENABLE;
301 pci_conf_write16(bus, slot, func, pos + PCI_MSIX_FLAGS, control);
302 }
303 }
305 static void msix_flush_writes(unsigned int irq)
306 {
307 struct msi_desc *entry = irq_desc[irq].msi_desc;
309 BUG_ON(!entry || !entry->dev);
310 switch (entry->msi_attrib.type) {
311 case PCI_CAP_ID_MSI:
312 /* nothing to do */
313 break;
314 case PCI_CAP_ID_MSIX:
315 {
316 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
317 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
318 readl(entry->mask_base + offset);
319 break;
320 }
321 default:
322 BUG();
323 break;
324 }
325 }
327 static void msi_set_mask_bit(unsigned int irq, int flag)
328 {
329 struct msi_desc *entry = irq_desc[irq].msi_desc;
331 BUG_ON(!entry || !entry->dev);
332 switch (entry->msi_attrib.type) {
333 case PCI_CAP_ID_MSI:
334 if (entry->msi_attrib.maskbit) {
335 int pos;
336 u32 mask_bits;
337 u8 bus = entry->dev->bus;
338 u8 slot = PCI_SLOT(entry->dev->devfn);
339 u8 func = PCI_FUNC(entry->dev->devfn);
341 pos = (long)entry->mask_base;
342 mask_bits = pci_conf_read32(bus, slot, func, pos);
343 mask_bits &= ~(1);
344 mask_bits |= flag;
345 pci_conf_write32(bus, slot, func, pos, mask_bits);
346 } else {
347 msi_set_enable(entry->dev, !flag);
348 }
349 break;
350 case PCI_CAP_ID_MSIX:
351 {
352 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
353 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
354 writel(flag, entry->mask_base + offset);
355 readl(entry->mask_base + offset);
356 break;
357 }
358 default:
359 BUG();
360 break;
361 }
362 entry->msi_attrib.masked = !!flag;
363 }
365 void mask_msi_irq(unsigned int irq)
366 {
367 msi_set_mask_bit(irq, 1);
368 msix_flush_writes(irq);
369 }
371 void unmask_msi_irq(unsigned int irq)
372 {
373 msi_set_mask_bit(irq, 0);
374 msix_flush_writes(irq);
375 }
377 static struct msi_desc* alloc_msi_entry(void)
378 {
379 struct msi_desc *entry;
381 entry = xmalloc(struct msi_desc);
382 if ( !entry )
383 return NULL;
385 INIT_LIST_HEAD(&entry->list);
386 entry->dev = NULL;
388 return entry;
389 }
391 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
392 {
393 struct msi_msg msg;
395 msi_compose_msg(dev, desc->vector, &msg);
396 set_vector_msi(desc);
397 write_msi_msg(desc->vector, &msg);
399 return 0;
400 }
402 static void teardown_msi_vector(int vector)
403 {
404 unset_vector_msi(vector);
405 }
407 static void msi_free_vector(int vector)
408 {
409 struct msi_desc *entry;
411 entry = irq_desc[vector].msi_desc;
413 teardown_msi_vector(vector);
415 if ( entry->msi_attrib.type == PCI_CAP_ID_MSIX )
416 {
417 unsigned long start;
419 writel(1, entry->mask_base + entry->msi_attrib.entry_nr
420 * PCI_MSIX_ENTRY_SIZE
421 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
423 start = (unsigned long)entry->mask_base & ~(PAGE_SIZE - 1);
424 msix_fixmap_free(virt_to_fix(start));
425 destroy_xen_mappings(start, start + PAGE_SIZE);
426 }
427 list_del(&entry->list);
428 xfree(entry);
429 }
431 void msi_free_vectors(struct pci_dev* dev)
432 {
433 struct msi_desc *entry, *tmp;
435 list_for_each_entry_safe( entry, tmp, &dev->msi_list, list )
436 msi_free_vector(entry->vector);
437 }
439 static struct msi_desc *find_msi_entry(struct pci_dev *dev,
440 int vector, int cap_id)
441 {
442 struct msi_desc *entry;
444 list_for_each_entry( entry, &dev->msi_list, list )
445 {
446 if ( entry->msi_attrib.type == cap_id &&
447 (vector == -1 || entry->vector == vector) )
448 return entry;
449 }
451 return NULL;
452 }
454 /**
455 * msi_capability_init - configure device's MSI capability structure
456 * @dev: pointer to the pci_dev data structure of MSI device function
457 *
458 * Setup the MSI capability structure of device function with a single
459 * MSI irq, regardless of device function is capable of handling
460 * multiple messages. A return of zero indicates the successful setup
461 * of an entry zero with the new MSI irq or non-zero for otherwise.
462 **/
463 static int msi_capability_init(struct pci_dev *dev, int vector)
464 {
465 struct msi_desc *entry;
466 int pos, ret;
467 u16 control;
468 u8 bus = dev->bus;
469 u8 slot = PCI_SLOT(dev->devfn);
470 u8 func = PCI_FUNC(dev->devfn);
472 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSI);
473 control = pci_conf_read16(bus, slot, func, msi_control_reg(pos));
474 /* MSI Entry Initialization */
475 msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
477 entry = alloc_msi_entry();
478 if ( !entry )
479 return -ENOMEM;
481 entry->msi_attrib.type = PCI_CAP_ID_MSI;
482 entry->msi_attrib.is_64 = is_64bit_address(control);
483 entry->msi_attrib.entry_nr = 0;
484 entry->msi_attrib.maskbit = is_mask_bit_support(control);
485 entry->msi_attrib.masked = 1;
486 entry->msi_attrib.pos = pos;
487 entry->vector = vector;
488 if ( is_mask_bit_support(control) )
489 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
490 is_64bit_address(control));
491 entry->dev = dev;
492 if ( entry->msi_attrib.maskbit )
493 {
494 unsigned int maskbits, temp;
495 /* All MSIs are unmasked by default, Mask them all */
496 maskbits = pci_conf_read32(bus, slot, func,
497 msi_mask_bits_reg(pos, is_64bit_address(control)));
498 temp = (1 << multi_msi_capable(control));
499 temp = ((temp - 1) & ~temp);
500 maskbits |= temp;
501 pci_conf_write32(bus, slot, func,
502 msi_mask_bits_reg(pos, is_64bit_address(control)),
503 maskbits);
504 }
505 list_add_tail(&entry->list, &dev->msi_list);
507 /* Configure MSI capability structure */
508 ret = setup_msi_irq(dev, entry);
509 if ( ret )
510 {
511 msi_free_vector(vector);
512 return ret;
513 }
515 /* Restore the original MSI enabled bits */
516 pci_conf_write16(bus, slot, func, msi_control_reg(pos), control);
518 return 0;
519 }
521 static u64 pci_resource_start(struct pci_dev *dev, u8 bar_index)
522 {
523 u64 bar_base;
524 u32 reg_val;
525 u8 bus = dev->bus;
526 u8 slot = PCI_SLOT(dev->devfn);
527 u8 func = PCI_FUNC(dev->devfn);
529 reg_val = pci_conf_read32(bus, slot, func,
530 PCI_BASE_ADDRESS_0 + 4 * bar_index);
531 bar_base = reg_val & PCI_BASE_ADDRESS_MEM_MASK;
532 if ( ( reg_val & PCI_BASE_ADDRESS_MEM_TYPE_MASK ) ==
533 PCI_BASE_ADDRESS_MEM_TYPE_64 )
534 {
535 reg_val = pci_conf_read32(bus, slot, func,
536 PCI_BASE_ADDRESS_0 + 4 * (bar_index + 1));
537 bar_base |= ((u64)reg_val) << 32;
538 }
540 return bar_base;
541 }
543 /**
544 * msix_capability_init - configure device's MSI-X capability
545 * @dev: pointer to the pci_dev data structure of MSI-X device function
546 * @entries: pointer to an array of struct msix_entry entries
547 * @nvec: number of @entries
548 *
549 * Setup the MSI-X capability structure of device function with a
550 * single MSI-X irq. A return of zero indicates the successful setup of
551 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
552 **/
553 static int msix_capability_init(struct pci_dev *dev, int vector, int entry_nr)
554 {
555 struct msi_desc *entry;
556 int pos;
557 u16 control;
558 unsigned long phys_addr;
559 u32 table_offset;
560 u8 bir;
561 void __iomem *base;
562 int idx;
563 u8 bus = dev->bus;
564 u8 slot = PCI_SLOT(dev->devfn);
565 u8 func = PCI_FUNC(dev->devfn);
567 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSIX);
568 control = pci_conf_read16(bus, slot, func, msix_control_reg(pos));
569 msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
571 /* MSI-X Table Initialization */
572 entry = alloc_msi_entry();
573 if ( !entry )
574 return -ENOMEM;
576 /* Request & Map MSI-X table region */
577 table_offset = pci_conf_read32(bus, slot, func, msix_table_offset_reg(pos));
578 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
579 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
580 phys_addr = pci_resource_start(dev, bir) + table_offset;
581 idx = msix_fixmap_alloc();
582 if ( idx < 0 )
583 {
584 xfree(entry);
585 return -ENOMEM;
586 }
587 set_fixmap_nocache(idx, phys_addr);
588 base = (void *)(fix_to_virt(idx) + (phys_addr & ((1UL << PAGE_SHIFT) - 1)));
590 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
591 entry->msi_attrib.is_64 = 1;
592 entry->msi_attrib.entry_nr = entry_nr;
593 entry->msi_attrib.maskbit = 1;
594 entry->msi_attrib.masked = 1;
595 entry->msi_attrib.pos = pos;
596 entry->vector = vector;
597 entry->dev = dev;
598 entry->mask_base = base;
600 list_add_tail(&entry->list, &dev->msi_list);
602 setup_msi_irq(dev, entry);
604 /* Set MSI-X enabled bits */
605 pci_conf_write16(bus, slot, func, msix_control_reg(pos), control);
607 return 0;
608 }
610 /**
611 * pci_enable_msi - configure device's MSI capability structure
612 * @dev: pointer to the pci_dev data structure of MSI device function
613 *
614 * Setup the MSI capability structure of device function with
615 * a single MSI irq upon its software driver call to request for
616 * MSI mode enabled on its hardware device function. A return of zero
617 * indicates the successful setup of an entry zero with the new MSI
618 * irq or non-zero for otherwise.
619 **/
620 static int __pci_enable_msi(u8 bus, u8 devfn, int vector)
621 {
622 int status;
623 struct pci_dev *dev;
625 dev = get_msi_pdev(bus, devfn);
626 if ( !dev )
627 {
628 dev = xmalloc(struct pci_dev);
629 if ( !dev )
630 return -ENOMEM;
631 dev->bus = bus;
632 dev->devfn = devfn;
633 INIT_LIST_HEAD(&dev->msi_list);
634 }
636 if ( find_msi_entry(dev, vector, PCI_CAP_ID_MSI) )
637 {
638 dprintk(XENLOG_WARNING, "vector %d has already mapped to MSI on device \
639 %02x:%02x.%01x.\n", vector, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
640 return 0;
641 }
643 status = msi_capability_init(dev, vector);
645 if ( dev != get_msi_pdev(bus, devfn) )
646 {
647 spin_lock(&msi_pdev_lock);
648 list_add_tail(&dev->msi_dev_list, &msi_pdev_list);
649 spin_unlock(&msi_pdev_lock);
650 }
652 return status;
653 }
655 static void __pci_disable_msi(int vector)
656 {
657 struct msi_desc *entry;
658 struct pci_dev *dev;
659 int pos;
660 u16 control;
661 u8 bus, slot, func;
663 entry = irq_desc[vector].msi_desc;
664 dev = entry->dev;
665 bus = dev->bus;
666 slot = PCI_SLOT(dev->devfn);
667 func = PCI_FUNC(dev->devfn);
669 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSI);
670 control = pci_conf_read16(bus, slot, func, msi_control_reg(pos));
671 msi_set_enable(dev, 0);
673 BUG_ON(list_empty(&dev->msi_list));
675 msi_free_vector(vector);
677 pci_conf_write16(bus, slot, func, msi_control_reg(pos), control);
678 }
680 /**
681 * pci_enable_msix - configure device's MSI-X capability structure
682 * @dev: pointer to the pci_dev data structure of MSI-X device function
683 * @entries: pointer to an array of MSI-X entries
684 * @nvec: number of MSI-X irqs requested for allocation by device driver
685 *
686 * Setup the MSI-X capability structure of device function with the number
687 * of requested irqs upon its software driver call to request for
688 * MSI-X mode enabled on its hardware device function. A return of zero
689 * indicates the successful configuration of MSI-X capability structure
690 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
691 * Or a return of > 0 indicates that driver request is exceeding the number
692 * of irqs available. Driver should use the returned value to re-send
693 * its request.
694 **/
695 static int __pci_enable_msix(u8 bus, u8 devfn, int vector, int entry_nr)
696 {
697 int status, pos, nr_entries;
698 struct pci_dev *dev;
699 u16 control;
700 u8 slot = PCI_SLOT(devfn);
701 u8 func = PCI_FUNC(devfn);
703 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSIX);
704 control = pci_conf_read16(bus, slot, func, msi_control_reg(pos));
705 nr_entries = multi_msix_capable(control);
706 if (entry_nr > nr_entries)
707 return -EINVAL;
709 /* Check whether driver already requested for MSI-X irqs */
710 dev = get_msi_pdev(bus, devfn);
712 if ( !dev )
713 {
714 dev = xmalloc(struct pci_dev);
715 if ( !dev )
716 return -ENOMEM;
717 dev->bus = bus;
718 dev->devfn = devfn;
719 INIT_LIST_HEAD(&dev->msi_list);
720 }
722 if ( find_msi_entry(dev, vector, PCI_CAP_ID_MSIX) )
723 {
724 dprintk(XENLOG_WARNING, "vector %d has already mapped to MSIX on \
725 device %02x:%02x.%01x.\n", vector, bus,
726 PCI_SLOT(devfn), PCI_FUNC(devfn));
727 return 0;
728 }
730 status = msix_capability_init(dev, vector, entry_nr);
732 if ( dev != get_msi_pdev(bus, devfn) )
733 {
734 spin_lock(&msi_pdev_lock);
735 list_add_tail(&dev->msi_dev_list, &msi_pdev_list);
736 spin_unlock(&msi_pdev_lock);
737 }
739 return status;
740 }
742 static void __pci_disable_msix(int vector)
743 {
744 struct msi_desc *entry;
745 struct pci_dev *dev;
746 int pos;
747 u16 control;
748 u8 bus, slot, func;
750 entry = irq_desc[vector].msi_desc;
751 dev = entry->dev;
752 bus = dev->bus;
753 slot = PCI_SLOT(dev->devfn);
754 func = PCI_FUNC(dev->devfn);
756 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSIX);
757 control = pci_conf_read16(bus, slot, func, msix_control_reg(pos));
758 msi_set_enable(dev, 0);
760 BUG_ON(list_empty(&dev->msi_list));
762 msi_free_vector(vector);
764 pci_conf_write16(bus, slot, func, msix_control_reg(pos), control);
765 }
767 int pci_enable_msi(u8 bus, u8 devfn, int vector, int entry_nr, int msi)
768 {
769 if ( msi )
770 return __pci_enable_msi(bus, devfn, vector);
771 else
772 return __pci_enable_msix(bus, devfn, vector, entry_nr);
773 }
775 void pci_disable_msi(int vector)
776 {
777 irq_desc_t *desc;
779 desc = &irq_desc[vector];
780 if ( desc->msi_desc->msi_attrib.type == PCI_CAP_ID_MSI )
781 __pci_disable_msi(vector);
782 else if ( desc->msi_desc->msi_attrib.type == PCI_CAP_ID_MSIX )
783 __pci_disable_msix(vector);
784 }
786 void pci_cleanup_msi(u8 bus, u8 devfn)
787 {
788 struct pci_dev *dev = get_msi_pdev(bus, devfn);
790 if ( !dev )
791 return;
792 msi_free_vectors(dev);
794 /* Disable MSI and/or MSI-X */
795 msi_set_enable(dev, 0);
796 msix_set_enable(dev, 0);
797 }