debuggers.hg
changeset 18974:738513b106fa
Change the pcidevs_lock from rw_lock to spin_lock
As pcidevs_lock is changed from protecting only the alldevs_list to
more than that, it doesn't benifit too much from the rw_lock. Also the
previous patch 18906:2941b1a97c60 is wrong to use read_lock to protect some
sensitive data (thanks Espen pointed out that).
Also two minor fix in this patch:
a) deassign_device will deadlock when try to get the pcidevs_lock if
called by pci_release_devices, remove the lock to the caller.
b) The iommu_domain_teardown should not ASSERT for the pcidevs_lock
because it just update the domain's vt-d mapping.
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
As pcidevs_lock is changed from protecting only the alldevs_list to
more than that, it doesn't benifit too much from the rw_lock. Also the
previous patch 18906:2941b1a97c60 is wrong to use read_lock to protect some
sensitive data (thanks Espen pointed out that).
Also two minor fix in this patch:
a) deassign_device will deadlock when try to get the pcidevs_lock if
called by pci_release_devices, remove the lock to the caller.
b) The iommu_domain_teardown should not ASSERT for the pcidevs_lock
because it just update the domain's vt-d mapping.
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Dec 19 14:52:32 2008 +0000 (2008-12-19) |
parents | 2312cc25232b |
children | 2dffa6ceb0af |
files | xen/arch/x86/domctl.c xen/arch/x86/irq.c xen/arch/x86/msi.c xen/arch/x86/physdev.c xen/drivers/passthrough/amd/pci_amd_iommu.c xen/drivers/passthrough/iommu.c xen/drivers/passthrough/pci.c xen/drivers/passthrough/vtd/iommu.c xen/include/xen/pci.h |
line diff
1.1 --- a/xen/arch/x86/domctl.c Fri Dec 19 14:44:40 2008 +0000 1.2 +++ b/xen/arch/x86/domctl.c Fri Dec 19 14:52:32 2008 +0000 1.3 @@ -708,7 +708,9 @@ long arch_do_domctl( 1.4 break; 1.5 } 1.6 ret = 0; 1.7 + spin_lock(&pcidevs_lock); 1.8 ret = deassign_device(d, bus, devfn); 1.9 + spin_unlock(&pcidevs_lock); 1.10 gdprintk(XENLOG_INFO, "XEN_DOMCTL_deassign_device: bdf = %x:%x:%x\n", 1.11 bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 1.12
2.1 --- a/xen/arch/x86/irq.c Fri Dec 19 14:44:40 2008 +0000 2.2 +++ b/xen/arch/x86/irq.c Fri Dec 19 14:52:32 2008 +0000 2.3 @@ -850,7 +850,7 @@ int map_domain_pirq( 2.4 struct msi_desc *msi_desc; 2.5 struct pci_dev *pdev = NULL; 2.6 2.7 - ASSERT(rw_is_locked(&pcidevs_lock)); 2.8 + ASSERT(spin_is_locked(&pcidevs_lock)); 2.9 ASSERT(spin_is_locked(&d->event_lock)); 2.10 2.11 if ( !IS_PRIV(current->domain) ) 2.12 @@ -930,7 +930,7 @@ int unmap_domain_pirq(struct domain *d, 2.13 if ( !IS_PRIV(current->domain) ) 2.14 return -EINVAL; 2.15 2.16 - ASSERT(rw_is_locked(&pcidevs_lock)); 2.17 + ASSERT(spin_is_locked(&pcidevs_lock)); 2.18 ASSERT(spin_is_locked(&d->event_lock)); 2.19 2.20 vector = d->arch.pirq_vector[pirq]; 2.21 @@ -993,7 +993,7 @@ void free_domain_pirqs(struct domain *d) 2.22 { 2.23 int i; 2.24 2.25 - read_lock(&pcidevs_lock); 2.26 + spin_lock(&pcidevs_lock); 2.27 spin_lock(&d->event_lock); 2.28 2.29 for ( i = 0; i < NR_IRQS; i++ ) 2.30 @@ -1001,7 +1001,7 @@ void free_domain_pirqs(struct domain *d) 2.31 unmap_domain_pirq(d, i); 2.32 2.33 spin_unlock(&d->event_lock); 2.34 - read_unlock(&pcidevs_lock); 2.35 + spin_unlock(&pcidevs_lock); 2.36 } 2.37 2.38 extern void dump_ioapic_irq_info(void);
3.1 --- a/xen/arch/x86/msi.c Fri Dec 19 14:44:40 2008 +0000 3.2 +++ b/xen/arch/x86/msi.c Fri Dec 19 14:52:32 2008 +0000 3.3 @@ -440,7 +440,7 @@ static int msi_capability_init(struct pc 3.4 u8 slot = PCI_SLOT(dev->devfn); 3.5 u8 func = PCI_FUNC(dev->devfn); 3.6 3.7 - ASSERT(rw_is_locked(&pcidevs_lock)); 3.8 + ASSERT(spin_is_locked(&pcidevs_lock)); 3.9 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSI); 3.10 control = pci_conf_read16(bus, slot, func, msi_control_reg(pos)); 3.11 /* MSI Entry Initialization */ 3.12 @@ -509,7 +509,7 @@ static int msix_capability_init(struct p 3.13 u8 slot = PCI_SLOT(dev->devfn); 3.14 u8 func = PCI_FUNC(dev->devfn); 3.15 3.16 - ASSERT(rw_is_locked(&pcidevs_lock)); 3.17 + ASSERT(spin_is_locked(&pcidevs_lock)); 3.18 ASSERT(desc); 3.19 3.20 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSIX); 3.21 @@ -574,7 +574,7 @@ static int __pci_enable_msi(struct msi_i 3.22 int status; 3.23 struct pci_dev *pdev; 3.24 3.25 - ASSERT(rw_is_locked(&pcidevs_lock)); 3.26 + ASSERT(spin_is_locked(&pcidevs_lock)); 3.27 pdev = pci_get_pdev(msi->bus, msi->devfn); 3.28 if ( !pdev ) 3.29 return -ENODEV; 3.30 @@ -634,7 +634,7 @@ static int __pci_enable_msix(struct msi_ 3.31 u8 slot = PCI_SLOT(msi->devfn); 3.32 u8 func = PCI_FUNC(msi->devfn); 3.33 3.34 - ASSERT(rw_is_locked(&pcidevs_lock)); 3.35 + ASSERT(spin_is_locked(&pcidevs_lock)); 3.36 pdev = pci_get_pdev(msi->bus, msi->devfn); 3.37 if ( !pdev ) 3.38 return -ENODEV; 3.39 @@ -688,7 +688,7 @@ static void __pci_disable_msix(struct ms 3.40 */ 3.41 int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc) 3.42 { 3.43 - ASSERT(rw_is_locked(&pcidevs_lock)); 3.44 + ASSERT(spin_is_locked(&pcidevs_lock)); 3.45 3.46 return msi->table_base ? __pci_enable_msix(msi, desc) : 3.47 __pci_enable_msi(msi, desc);
4.1 --- a/xen/arch/x86/physdev.c Fri Dec 19 14:44:40 2008 +0000 4.2 +++ b/xen/arch/x86/physdev.c Fri Dec 19 14:52:32 2008 +0000 4.3 @@ -100,7 +100,7 @@ static int physdev_map_pirq(struct physd 4.4 goto free_domain; 4.5 } 4.6 4.7 - read_lock(&pcidevs_lock); 4.8 + spin_lock(&pcidevs_lock); 4.9 /* Verify or get pirq. */ 4.10 spin_lock(&d->event_lock); 4.11 if ( map->pirq < 0 ) 4.12 @@ -148,7 +148,7 @@ static int physdev_map_pirq(struct physd 4.13 4.14 done: 4.15 spin_unlock(&d->event_lock); 4.16 - read_unlock(&pcidevs_lock); 4.17 + spin_unlock(&pcidevs_lock); 4.18 if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) ) 4.19 free_irq_vector(vector); 4.20 free_domain: 4.21 @@ -172,11 +172,11 @@ static int physdev_unmap_pirq(struct phy 4.22 if ( d == NULL ) 4.23 return -ESRCH; 4.24 4.25 - read_lock(&pcidevs_lock); 4.26 + spin_lock(&pcidevs_lock); 4.27 spin_lock(&d->event_lock); 4.28 ret = unmap_domain_pirq(d, unmap->pirq); 4.29 spin_unlock(&d->event_lock); 4.30 - read_unlock(&pcidevs_lock); 4.31 + spin_unlock(&pcidevs_lock); 4.32 4.33 rcu_unlock_domain(d); 4.34 4.35 @@ -345,12 +345,12 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H 4.36 4.37 irq_op.vector = assign_irq_vector(irq); 4.38 4.39 - read_lock(&pcidevs_lock); 4.40 + spin_lock(&pcidevs_lock); 4.41 spin_lock(&dom0->event_lock); 4.42 ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector, 4.43 MAP_PIRQ_TYPE_GSI, NULL); 4.44 spin_unlock(&dom0->event_lock); 4.45 - read_unlock(&pcidevs_lock); 4.46 + spin_unlock(&pcidevs_lock); 4.47 4.48 if ( copy_to_guest(arg, &irq_op, 1) != 0 ) 4.49 ret = -EFAULT;
5.1 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Fri Dec 19 14:44:40 2008 +0000 5.2 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Fri Dec 19 14:52:32 2008 +0000 5.3 @@ -126,7 +126,7 @@ static void amd_iommu_setup_dom0_devices 5.4 u32 l; 5.5 int bdf; 5.6 5.7 - write_lock(&pcidevs_lock); 5.8 + spin_lock(&pcidevs_lock); 5.9 for ( bus = 0; bus < 256; bus++ ) 5.10 { 5.11 for ( dev = 0; dev < 32; dev++ ) 5.12 @@ -153,7 +153,7 @@ static void amd_iommu_setup_dom0_devices 5.13 } 5.14 } 5.15 } 5.16 - write_unlock(&pcidevs_lock); 5.17 + spin_unlock(&pcidevs_lock); 5.18 } 5.19 5.20 int amd_iov_detect(void) 5.21 @@ -282,11 +282,11 @@ static int reassign_device( struct domai 5.22 struct amd_iommu *iommu; 5.23 int bdf; 5.24 5.25 - read_lock(&pcidevs_lock); 5.26 + spin_lock(&pcidevs_lock); 5.27 pdev = pci_get_pdev_by_domain(source, bus, devfn); 5.28 if ( !pdev ) 5.29 { 5.30 - read_unlock(&pcidevs_lock); 5.31 + spin_unlock(&pcidevs_lock); 5.32 return -ENODEV; 5.33 } 5.34 5.35 @@ -297,7 +297,7 @@ static int reassign_device( struct domai 5.36 5.37 if ( !iommu ) 5.38 { 5.39 - read_unlock(&pcidevs_lock); 5.40 + spin_unlock(&pcidevs_lock); 5.41 amd_iov_error("Fail to find iommu." 5.42 " %x:%x.%x cannot be assigned to domain %d\n", 5.43 bus, PCI_SLOT(devfn), PCI_FUNC(devfn), target->domain_id); 5.44 @@ -314,7 +314,7 @@ static int reassign_device( struct domai 5.45 bus, PCI_SLOT(devfn), PCI_FUNC(devfn), 5.46 source->domain_id, target->domain_id); 5.47 5.48 - read_unlock(&pcidevs_lock); 5.49 + spin_unlock(&pcidevs_lock); 5.50 return 0; 5.51 } 5.52
6.1 --- a/xen/drivers/passthrough/iommu.c Fri Dec 19 14:44:40 2008 +0000 6.2 +++ b/xen/drivers/passthrough/iommu.c Fri Dec 19 14:52:32 2008 +0000 6.3 @@ -87,7 +87,7 @@ int iommu_add_device(struct pci_dev *pde 6.4 if ( !pdev->domain ) 6.5 return -EINVAL; 6.6 6.7 - ASSERT(rw_is_locked(&pcidevs_lock)); 6.8 + ASSERT(spin_is_locked(&pcidevs_lock)); 6.9 6.10 hd = domain_hvm_iommu(pdev->domain); 6.11 if ( !iommu_enabled || !hd->platform_ops ) 6.12 @@ -117,7 +117,7 @@ int assign_device(struct domain *d, u8 b 6.13 if ( !iommu_enabled || !hd->platform_ops ) 6.14 return 0; 6.15 6.16 - read_lock(&pcidevs_lock); 6.17 + spin_lock(&pcidevs_lock); 6.18 if ( (rc = hd->platform_ops->assign_device(d, bus, devfn)) ) 6.19 goto done; 6.20 6.21 @@ -128,7 +128,7 @@ int assign_device(struct domain *d, u8 b 6.22 goto done; 6.23 } 6.24 done: 6.25 - read_unlock(&pcidevs_lock); 6.26 + spin_unlock(&pcidevs_lock); 6.27 return rc; 6.28 } 6.29 6.30 @@ -211,7 +211,8 @@ int iommu_unmap_page(struct domain *d, u 6.31 return hd->platform_ops->unmap_page(d, gfn); 6.32 } 6.33 6.34 -int deassign_device(struct domain *d, u8 bus, u8 devfn) 6.35 +/* caller should hold the pcidevs_lock */ 6.36 +int deassign_device(struct domain *d, u8 bus, u8 devfn) 6.37 { 6.38 struct hvm_iommu *hd = domain_hvm_iommu(d); 6.39 struct pci_dev *pdev = NULL; 6.40 @@ -219,20 +220,16 @@ int deassign_device(struct domain *d, u 6.41 if ( !iommu_enabled || !hd->platform_ops ) 6.42 return -EINVAL; 6.43 6.44 - read_lock(&pcidevs_lock); 6.45 + ASSERT(spin_is_locked(&pcidevs_lock)); 6.46 pdev = pci_get_pdev(bus, devfn); 6.47 if (!pdev) 6.48 - { 6.49 - read_unlock(&pcidevs_lock); 6.50 return -ENODEV; 6.51 - } 6.52 6.53 if (pdev->domain != d) 6.54 { 6.55 - read_unlock(&pcidevs_lock); 6.56 gdprintk(XENLOG_ERR VTDPREFIX, 6.57 "IOMMU: deassign a device not owned\n"); 6.58 - return -EINVAL; 6.59 + return -EINVAL; 6.60 } 6.61 6.62 hd->platform_ops->reassign_device(d, dom0, bus, devfn); 6.63 @@ -243,8 +240,6 @@ int deassign_device(struct domain *d, u 6.64 hd->platform_ops->teardown(d); 6.65 } 6.66 6.67 - read_unlock(&pcidevs_lock); 6.68 - 6.69 return 0; 6.70 } 6.71 6.72 @@ -288,7 +283,7 @@ int iommu_get_device_group(struct domain 6.73 6.74 group_id = ops->get_device_group_id(bus, devfn); 6.75 6.76 - read_lock(&pcidevs_lock); 6.77 + spin_lock(&pcidevs_lock); 6.78 for_each_pdev( d, pdev ) 6.79 { 6.80 if ( (pdev->bus == bus) && (pdev->devfn == devfn) ) 6.81 @@ -302,13 +297,13 @@ int iommu_get_device_group(struct domain 6.82 bdf |= (pdev->devfn & 0xff) << 8; 6.83 if ( unlikely(copy_to_guest_offset(buf, i, &bdf, 1)) ) 6.84 { 6.85 - read_unlock(&pcidevs_lock); 6.86 + spin_unlock(&pcidevs_lock); 6.87 return -1; 6.88 } 6.89 i++; 6.90 } 6.91 } 6.92 - read_unlock(&pcidevs_lock); 6.93 + spin_unlock(&pcidevs_lock); 6.94 6.95 return i; 6.96 }
7.1 --- a/xen/drivers/passthrough/pci.c Fri Dec 19 14:44:40 2008 +0000 7.2 +++ b/xen/drivers/passthrough/pci.c Fri Dec 19 14:52:32 2008 +0000 7.3 @@ -28,7 +28,7 @@ 7.4 7.5 7.6 LIST_HEAD(alldevs_list); 7.7 -rwlock_t pcidevs_lock = RW_LOCK_UNLOCKED; 7.8 +spinlock_t pcidevs_lock = SPIN_LOCK_UNLOCKED; 7.9 7.10 struct pci_dev *alloc_pdev(u8 bus, u8 devfn) 7.11 { 7.12 @@ -62,7 +62,7 @@ struct pci_dev *pci_get_pdev(int bus, in 7.13 { 7.14 struct pci_dev *pdev = NULL; 7.15 7.16 - ASSERT(rw_is_locked(&pcidevs_lock)); 7.17 + ASSERT(spin_is_locked(&pcidevs_lock)); 7.18 7.19 list_for_each_entry ( pdev, &alldevs_list, alldevs_list ) 7.20 if ( (pdev->bus == bus || bus == -1) && 7.21 @@ -78,7 +78,7 @@ struct pci_dev *pci_get_pdev_by_domain(s 7.22 { 7.23 struct pci_dev *pdev = NULL; 7.24 7.25 - ASSERT(rw_is_locked(&pcidevs_lock)); 7.26 + ASSERT(spin_is_locked(&pcidevs_lock)); 7.27 7.28 list_for_each_entry ( pdev, &alldevs_list, alldevs_list ) 7.29 if ( (pdev->bus == bus || bus == -1) && 7.30 @@ -96,7 +96,7 @@ int pci_add_device(u8 bus, u8 devfn) 7.31 struct pci_dev *pdev; 7.32 int ret = -ENOMEM; 7.33 7.34 - write_lock(&pcidevs_lock); 7.35 + spin_lock(&pcidevs_lock); 7.36 pdev = alloc_pdev(bus, devfn); 7.37 if ( !pdev ) 7.38 goto out; 7.39 @@ -113,7 +113,7 @@ int pci_add_device(u8 bus, u8 devfn) 7.40 } 7.41 7.42 out: 7.43 - write_unlock(&pcidevs_lock); 7.44 + spin_unlock(&pcidevs_lock); 7.45 printk(XENLOG_DEBUG "PCI add device %02x:%02x.%x\n", bus, 7.46 PCI_SLOT(devfn), PCI_FUNC(devfn)); 7.47 return ret; 7.48 @@ -124,7 +124,7 @@ int pci_remove_device(u8 bus, u8 devfn) 7.49 struct pci_dev *pdev; 7.50 int ret = -ENODEV;; 7.51 7.52 - write_lock(&pcidevs_lock); 7.53 + spin_lock(&pcidevs_lock); 7.54 list_for_each_entry ( pdev, &alldevs_list, alldevs_list ) 7.55 if ( pdev->bus == bus && pdev->devfn == devfn ) 7.56 { 7.57 @@ -138,7 +138,7 @@ int pci_remove_device(u8 bus, u8 devfn) 7.58 break; 7.59 } 7.60 7.61 - write_unlock(&pcidevs_lock); 7.62 + spin_unlock(&pcidevs_lock); 7.63 return ret; 7.64 } 7.65 7.66 @@ -187,7 +187,7 @@ void pci_release_devices(struct domain * 7.67 struct pci_dev *pdev; 7.68 u8 bus, devfn; 7.69 7.70 - read_lock(&pcidevs_lock); 7.71 + spin_lock(&pcidevs_lock); 7.72 pci_clean_dpci_irqs(d); 7.73 while ( (pdev = pci_get_pdev_by_domain(d, -1, -1)) ) 7.74 { 7.75 @@ -195,7 +195,7 @@ void pci_release_devices(struct domain * 7.76 bus = pdev->bus; devfn = pdev->devfn; 7.77 deassign_device(d, bus, devfn); 7.78 } 7.79 - read_unlock(&pcidevs_lock); 7.80 + spin_unlock(&pcidevs_lock); 7.81 } 7.82 7.83 #ifdef SUPPORT_MSI_REMAPPING 7.84 @@ -205,7 +205,7 @@ static void dump_pci_devices(unsigned ch 7.85 struct msi_desc *msi; 7.86 7.87 printk("==== PCI devices ====\n"); 7.88 - read_lock(&pcidevs_lock); 7.89 + spin_lock(&pcidevs_lock); 7.90 7.91 list_for_each_entry ( pdev, &alldevs_list, alldevs_list ) 7.92 { 7.93 @@ -217,7 +217,7 @@ static void dump_pci_devices(unsigned ch 7.94 printk(">\n"); 7.95 } 7.96 7.97 - read_unlock(&pcidevs_lock); 7.98 + spin_unlock(&pcidevs_lock); 7.99 } 7.100 7.101 static int __init setup_dump_pcidevs(void)
8.1 --- a/xen/drivers/passthrough/vtd/iommu.c Fri Dec 19 14:44:40 2008 +0000 8.2 +++ b/xen/drivers/passthrough/vtd/iommu.c Fri Dec 19 14:52:32 2008 +0000 8.3 @@ -1037,7 +1037,7 @@ static int domain_context_mapping_one( 8.4 struct pci_dev *pdev = NULL; 8.5 int agaw; 8.6 8.7 - ASSERT(rw_is_locked(&pcidevs_lock)); 8.8 + ASSERT(spin_is_locked(&pcidevs_lock)); 8.9 spin_lock(&iommu->lock); 8.10 maddr = bus_to_context_maddr(iommu, bus); 8.11 context_entries = (struct context_entry *)map_vtd_domain_page(maddr); 8.12 @@ -1215,7 +1215,7 @@ static int domain_context_mapping(struct 8.13 if ( !drhd ) 8.14 return -ENODEV; 8.15 8.16 - ASSERT(rw_is_locked(&pcidevs_lock)); 8.17 + ASSERT(spin_is_locked(&pcidevs_lock)); 8.18 8.19 type = pdev_type(bus, devfn); 8.20 switch ( type ) 8.21 @@ -1297,7 +1297,7 @@ static int domain_context_unmap_one( 8.22 struct context_entry *context, *context_entries; 8.23 u64 maddr; 8.24 8.25 - ASSERT(rw_is_locked(&pcidevs_lock)); 8.26 + ASSERT(spin_is_locked(&pcidevs_lock)); 8.27 spin_lock(&iommu->lock); 8.28 8.29 maddr = bus_to_context_maddr(iommu, bus); 8.30 @@ -1399,7 +1399,7 @@ static int reassign_device_ownership( 8.31 struct iommu *pdev_iommu; 8.32 int ret, found = 0; 8.33 8.34 - ASSERT(rw_is_locked(&pcidevs_lock)); 8.35 + ASSERT(spin_is_locked(&pcidevs_lock)); 8.36 pdev = pci_get_pdev_by_domain(source, bus, devfn); 8.37 8.38 if (!pdev) 8.39 @@ -1439,7 +1439,6 @@ void iommu_domain_teardown(struct domain 8.40 if ( list_empty(&acpi_drhd_units) ) 8.41 return; 8.42 8.43 - ASSERT(rw_is_locked(&pcidevs_lock)); 8.44 spin_lock(&hd->mapping_lock); 8.45 iommu_free_pagetable(hd->pgd_maddr, agaw_to_level(hd->agaw)); 8.46 hd->pgd_maddr = 0; 8.47 @@ -1529,7 +1528,7 @@ static int iommu_prepare_rmrr_dev(struct 8.48 u64 base, end; 8.49 unsigned long base_pfn, end_pfn; 8.50 8.51 - ASSERT(rw_is_locked(&pcidevs_lock)); 8.52 + ASSERT(spin_is_locked(&pcidevs_lock)); 8.53 ASSERT(rmrr->base_address < rmrr->end_address); 8.54 8.55 base = rmrr->base_address & PAGE_MASK_4K; 8.56 @@ -1554,7 +1553,7 @@ static int intel_iommu_add_device(struct 8.57 u16 bdf; 8.58 int ret, i; 8.59 8.60 - ASSERT(rw_is_locked(&pcidevs_lock)); 8.61 + ASSERT(spin_is_locked(&pcidevs_lock)); 8.62 8.63 if ( !pdev->domain ) 8.64 return -EINVAL; 8.65 @@ -1617,7 +1616,7 @@ static void setup_dom0_devices(struct do 8.66 8.67 hd = domain_hvm_iommu(d); 8.68 8.69 - write_lock(&pcidevs_lock); 8.70 + spin_lock(&pcidevs_lock); 8.71 for ( bus = 0; bus < 256; bus++ ) 8.72 { 8.73 for ( dev = 0; dev < 32; dev++ ) 8.74 @@ -1637,7 +1636,7 @@ static void setup_dom0_devices(struct do 8.75 } 8.76 } 8.77 } 8.78 - write_unlock(&pcidevs_lock); 8.79 + spin_unlock(&pcidevs_lock); 8.80 } 8.81 8.82 void clear_fault_bits(struct iommu *iommu) 8.83 @@ -1710,7 +1709,7 @@ static void setup_dom0_rmrr(struct domai 8.84 u16 bdf; 8.85 int ret, i; 8.86 8.87 - read_lock(&pcidevs_lock); 8.88 + spin_lock(&pcidevs_lock); 8.89 for_each_rmrr_device ( rmrr, bdf, i ) 8.90 { 8.91 ret = iommu_prepare_rmrr_dev(d, rmrr, PCI_BUS(bdf), PCI_DEVFN2(bdf)); 8.92 @@ -1718,7 +1717,7 @@ static void setup_dom0_rmrr(struct domai 8.93 gdprintk(XENLOG_ERR VTDPREFIX, 8.94 "IOMMU: mapping reserved region failed\n"); 8.95 } 8.96 - read_unlock(&pcidevs_lock); 8.97 + spin_unlock(&pcidevs_lock); 8.98 } 8.99 8.100 int intel_vtd_setup(void) 8.101 @@ -1771,15 +1770,15 @@ int device_assigned(u8 bus, u8 devfn) 8.102 { 8.103 struct pci_dev *pdev; 8.104 8.105 - read_lock(&pcidevs_lock); 8.106 + spin_lock(&pcidevs_lock); 8.107 pdev = pci_get_pdev_by_domain(dom0, bus, devfn); 8.108 if (!pdev) 8.109 { 8.110 - read_unlock(&pcidevs_lock); 8.111 + spin_unlock(&pcidevs_lock); 8.112 return -1; 8.113 } 8.114 8.115 - read_unlock(&pcidevs_lock); 8.116 + spin_unlock(&pcidevs_lock); 8.117 return 0; 8.118 } 8.119 8.120 @@ -1793,7 +1792,7 @@ int intel_iommu_assign_device(struct dom 8.121 if ( list_empty(&acpi_drhd_units) ) 8.122 return -ENODEV; 8.123 8.124 - ASSERT(rw_is_locked(&pcidevs_lock)); 8.125 + ASSERT(spin_is_locked(&pcidevs_lock)); 8.126 pdev = pci_get_pdev(bus, devfn); 8.127 if (!pdev) 8.128 return -ENODEV;
9.1 --- a/xen/include/xen/pci.h Fri Dec 19 14:44:40 2008 +0000 9.2 +++ b/xen/include/xen/pci.h Fri Dec 19 14:52:32 2008 +0000 9.3 @@ -42,13 +42,12 @@ struct pci_dev { 9.4 list_for_each_entry(pdev, &(domain->arch.pdev_list), domain_list) 9.5 9.6 /* 9.7 - * The pcidevs_lock write-lock must be held when doing alloc_pdev() or 9.8 - * free_pdev(). Never de-reference pdev without holding pdev->lock or 9.9 - * pcidevs_lock. Always aquire pcidevs_lock before pdev->lock when 9.10 - * doing free_pdev(). 9.11 + * The pcidevs_lock protect alldevs_list, and the assignment for the 9.12 + * devices, it also sync the access to the msi capability that is not 9.13 + * interrupt handling related (the mask bit register). 9.14 */ 9.15 9.16 -extern rwlock_t pcidevs_lock; 9.17 +extern spinlock_t pcidevs_lock; 9.18 9.19 struct pci_dev *alloc_pdev(u8 bus, u8 devfn); 9.20 void free_pdev(struct pci_dev *pdev);