debuggers.hg
changeset 18953:e767f80d4bcc
Clean up use of spin_is_locked() and introduce rw_is_locked().
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Sat Dec 13 15:28:10 2008 +0000 (2008-12-13) |
parents | db0c6d297d00 |
children | 6a3c2b4459ad |
files | xen/arch/x86/irq.c xen/arch/x86/msi.c xen/common/spinlock.c xen/drivers/passthrough/iommu.c xen/drivers/passthrough/pci.c xen/drivers/passthrough/vtd/iommu.c xen/include/asm-ia64/linux-xen/asm/spinlock.h xen/include/asm-x86/spinlock.h xen/include/xen/spinlock.h |
line diff
1.1 --- a/xen/arch/x86/irq.c Sat Dec 13 15:04:53 2008 +0000 1.2 +++ b/xen/arch/x86/irq.c Sat Dec 13 15:28:10 2008 +0000 1.3 @@ -850,7 +850,7 @@ int map_domain_pirq( 1.4 struct msi_desc *msi_desc; 1.5 struct pci_dev *pdev = NULL; 1.6 1.7 - ASSERT(spin_is_locked(&pcidevs_lock)); 1.8 + ASSERT(rw_is_locked(&pcidevs_lock)); 1.9 ASSERT(spin_is_locked(&d->event_lock)); 1.10 1.11 if ( !IS_PRIV(current->domain) ) 1.12 @@ -930,7 +930,7 @@ int unmap_domain_pirq(struct domain *d, 1.13 if ( !IS_PRIV(current->domain) ) 1.14 return -EINVAL; 1.15 1.16 - ASSERT(spin_is_locked(&pcidevs_lock)); 1.17 + ASSERT(rw_is_locked(&pcidevs_lock)); 1.18 ASSERT(spin_is_locked(&d->event_lock)); 1.19 1.20 vector = d->arch.pirq_vector[pirq];
2.1 --- a/xen/arch/x86/msi.c Sat Dec 13 15:04:53 2008 +0000 2.2 +++ b/xen/arch/x86/msi.c Sat Dec 13 15:28:10 2008 +0000 2.3 @@ -440,7 +440,7 @@ static int msi_capability_init(struct pc 2.4 u8 slot = PCI_SLOT(dev->devfn); 2.5 u8 func = PCI_FUNC(dev->devfn); 2.6 2.7 - ASSERT(spin_is_locked(&pcidevs_lock)); 2.8 + ASSERT(rw_is_locked(&pcidevs_lock)); 2.9 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSI); 2.10 control = pci_conf_read16(bus, slot, func, msi_control_reg(pos)); 2.11 /* MSI Entry Initialization */ 2.12 @@ -509,7 +509,7 @@ static int msix_capability_init(struct p 2.13 u8 slot = PCI_SLOT(dev->devfn); 2.14 u8 func = PCI_FUNC(dev->devfn); 2.15 2.16 - ASSERT(spin_is_locked(&pcidevs_lock)); 2.17 + ASSERT(rw_is_locked(&pcidevs_lock)); 2.18 ASSERT(desc); 2.19 2.20 pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSIX); 2.21 @@ -574,7 +574,7 @@ static int __pci_enable_msi(struct msi_i 2.22 int status; 2.23 struct pci_dev *pdev; 2.24 2.25 - ASSERT(spin_is_locked(&pcidevs_lock)); 2.26 + ASSERT(rw_is_locked(&pcidevs_lock)); 2.27 pdev = pci_get_pdev(msi->bus, msi->devfn); 2.28 if ( !pdev ) 2.29 return -ENODEV; 2.30 @@ -634,7 +634,7 @@ static int __pci_enable_msix(struct msi_ 2.31 u8 slot = PCI_SLOT(msi->devfn); 2.32 u8 func = PCI_FUNC(msi->devfn); 2.33 2.34 - ASSERT(spin_is_locked(&pcidevs_lock)); 2.35 + ASSERT(rw_is_locked(&pcidevs_lock)); 2.36 pdev = pci_get_pdev(msi->bus, msi->devfn); 2.37 if ( !pdev ) 2.38 return -ENODEV; 2.39 @@ -688,7 +688,7 @@ static void __pci_disable_msix(struct ms 2.40 */ 2.41 int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc) 2.42 { 2.43 - ASSERT(spin_is_locked(&pcidevs_lock)); 2.44 + ASSERT(rw_is_locked(&pcidevs_lock)); 2.45 2.46 return msi->table_base ? __pci_enable_msix(msi, desc) : 2.47 __pci_enable_msi(msi, desc);
3.1 --- a/xen/common/spinlock.c Sat Dec 13 15:04:53 2008 +0000 3.2 +++ b/xen/common/spinlock.c Sat Dec 13 15:28:10 2008 +0000 3.3 @@ -215,3 +215,9 @@ void _write_unlock_irqrestore(rwlock_t * 3.4 _raw_write_unlock(&lock->raw); 3.5 local_irq_restore(flags); 3.6 } 3.7 + 3.8 +int _rw_is_locked(rwlock_t *lock) 3.9 +{ 3.10 + check_lock(&lock->debug); 3.11 + return _raw_rw_is_locked(&lock->raw); 3.12 +}
4.1 --- a/xen/drivers/passthrough/iommu.c Sat Dec 13 15:04:53 2008 +0000 4.2 +++ b/xen/drivers/passthrough/iommu.c Sat Dec 13 15:28:10 2008 +0000 4.3 @@ -87,7 +87,7 @@ int iommu_add_device(struct pci_dev *pde 4.4 if ( !pdev->domain ) 4.5 return -EINVAL; 4.6 4.7 - ASSERT(spin_is_locked(&pcidevs_lock)); 4.8 + ASSERT(rw_is_locked(&pcidevs_lock)); 4.9 4.10 hd = domain_hvm_iommu(pdev->domain); 4.11 if ( !iommu_enabled || !hd->platform_ops )
5.1 --- a/xen/drivers/passthrough/pci.c Sat Dec 13 15:04:53 2008 +0000 5.2 +++ b/xen/drivers/passthrough/pci.c Sat Dec 13 15:28:10 2008 +0000 5.3 @@ -62,7 +62,7 @@ struct pci_dev *pci_get_pdev(int bus, in 5.4 { 5.5 struct pci_dev *pdev = NULL; 5.6 5.7 - ASSERT(spin_is_locked(&pcidevs_lock)); 5.8 + ASSERT(rw_is_locked(&pcidevs_lock)); 5.9 5.10 list_for_each_entry ( pdev, &alldevs_list, alldevs_list ) 5.11 if ( (pdev->bus == bus || bus == -1) && 5.12 @@ -78,7 +78,7 @@ struct pci_dev *pci_get_pdev_by_domain(s 5.13 { 5.14 struct pci_dev *pdev = NULL; 5.15 5.16 - ASSERT(spin_is_locked(&pcidevs_lock)); 5.17 + ASSERT(rw_is_locked(&pcidevs_lock)); 5.18 5.19 list_for_each_entry ( pdev, &alldevs_list, alldevs_list ) 5.20 if ( (pdev->bus == bus || bus == -1) &&
6.1 --- a/xen/drivers/passthrough/vtd/iommu.c Sat Dec 13 15:04:53 2008 +0000 6.2 +++ b/xen/drivers/passthrough/vtd/iommu.c Sat Dec 13 15:28:10 2008 +0000 6.3 @@ -1037,7 +1037,7 @@ static int domain_context_mapping_one( 6.4 struct pci_dev *pdev = NULL; 6.5 int agaw; 6.6 6.7 - ASSERT(spin_is_locked(&pcidevs_lock)); 6.8 + ASSERT(rw_is_locked(&pcidevs_lock)); 6.9 spin_lock(&iommu->lock); 6.10 maddr = bus_to_context_maddr(iommu, bus); 6.11 context_entries = (struct context_entry *)map_vtd_domain_page(maddr); 6.12 @@ -1214,7 +1214,7 @@ static int domain_context_mapping(struct 6.13 if ( !drhd ) 6.14 return -ENODEV; 6.15 6.16 - ASSERT(spin_is_locked(&pcidevs_lock)); 6.17 + ASSERT(rw_is_locked(&pcidevs_lock)); 6.18 6.19 type = pdev_type(bus, devfn); 6.20 switch ( type ) 6.21 @@ -1298,7 +1298,7 @@ static int domain_context_unmap_one( 6.22 struct context_entry *context, *context_entries; 6.23 u64 maddr; 6.24 6.25 - ASSERT(spin_is_locked(&pcidevs_lock)); 6.26 + ASSERT(rw_is_locked(&pcidevs_lock)); 6.27 spin_lock(&iommu->lock); 6.28 6.29 maddr = bus_to_context_maddr(iommu, bus); 6.30 @@ -1388,7 +1388,7 @@ static int reassign_device_ownership( 6.31 struct iommu *pdev_iommu; 6.32 int ret, found = 0; 6.33 6.34 - ASSERT(spin_is_locked(&pcidevs_lock)); 6.35 + ASSERT(rw_is_locked(&pcidevs_lock)); 6.36 pdev = pci_get_pdev_by_domain(source, bus, devfn); 6.37 6.38 if (!pdev) 6.39 @@ -1428,7 +1428,7 @@ void iommu_domain_teardown(struct domain 6.40 if ( list_empty(&acpi_drhd_units) ) 6.41 return; 6.42 6.43 - ASSERT(spin_is_locked(&pcidevs_lock)); 6.44 + ASSERT(rw_is_locked(&pcidevs_lock)); 6.45 spin_lock(&hd->mapping_lock); 6.46 iommu_free_pagetable(hd->pgd_maddr, agaw_to_level(hd->agaw)); 6.47 hd->pgd_maddr = 0; 6.48 @@ -1518,7 +1518,7 @@ static int iommu_prepare_rmrr_dev(struct 6.49 u64 base, end; 6.50 unsigned long base_pfn, end_pfn; 6.51 6.52 - ASSERT(spin_is_locked(&pcidevs_lock)); 6.53 + ASSERT(rw_is_locked(&pcidevs_lock)); 6.54 ASSERT(rmrr->base_address < rmrr->end_address); 6.55 6.56 base = rmrr->base_address & PAGE_MASK_4K; 6.57 @@ -1543,7 +1543,7 @@ static int intel_iommu_add_device(struct 6.58 u16 bdf; 6.59 int ret, i; 6.60 6.61 - ASSERT(spin_is_locked(&pcidevs_lock)); 6.62 + ASSERT(rw_is_locked(&pcidevs_lock)); 6.63 6.64 if ( !pdev->domain ) 6.65 return -EINVAL; 6.66 @@ -1782,7 +1782,7 @@ int intel_iommu_assign_device(struct dom 6.67 if ( list_empty(&acpi_drhd_units) ) 6.68 return -ENODEV; 6.69 6.70 - ASSERT(spin_is_locked(&pcidevs_lock)); 6.71 + ASSERT(rw_is_locked(&pcidevs_lock)); 6.72 pdev = pci_get_pdev(bus, devfn); 6.73 if (!pdev) 6.74 return -ENODEV;
7.1 --- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h Sat Dec 13 15:04:53 2008 +0000 7.2 +++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h Sat Dec 13 15:28:10 2008 +0000 7.3 @@ -216,4 +216,6 @@ do { \ 7.4 clear_bit(31, (x)); \ 7.5 }) 7.6 7.7 +#define _raw_rw_is_locked(x) (*(int *)(x) != 0) 7.8 + 7.9 #endif /* _ASM_IA64_SPINLOCK_H */
8.1 --- a/xen/include/asm-x86/spinlock.h Sat Dec 13 15:04:53 2008 +0000 8.2 +++ b/xen/include/asm-x86/spinlock.h Sat Dec 13 15:28:10 2008 +0000 8.3 @@ -12,8 +12,7 @@ typedef struct { 8.4 8.5 #define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 1 } 8.6 8.7 -#define _raw_spin_is_locked(x) \ 8.8 - (*(volatile char *)(&(x)->lock) <= 0) 8.9 +#define _raw_spin_is_locked(x) ((x)->lock <= 0) 8.10 8.11 static always_inline void _raw_spin_lock(raw_spinlock_t *lock) 8.12 { 8.13 @@ -75,4 +74,6 @@ static always_inline void _raw_write_loc 8.14 "lock ; addl $" RW_LOCK_BIAS_STR ",%0" : \ 8.15 "=m" ((rw)->lock) : : "memory" ) 8.16 8.17 +#define _raw_rw_is_locked(x) ((x)->lock < RW_LOCK_BIAS) 8.18 + 8.19 #endif /* __ASM_SPINLOCK_H */
9.1 --- a/xen/include/xen/spinlock.h Sat Dec 13 15:04:53 2008 +0000 9.2 +++ b/xen/include/xen/spinlock.h Sat Dec 13 15:28:10 2008 +0000 9.3 @@ -72,6 +72,8 @@ void _write_unlock(rwlock_t *lock); 9.4 void _write_unlock_irq(rwlock_t *lock); 9.5 void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags); 9.6 9.7 +int _rw_is_locked(rwlock_t *lock); 9.8 + 9.9 #define spin_lock(l) _spin_lock(l) 9.10 #define spin_lock_irq(l) _spin_lock_irq(l) 9.11 #define spin_lock_irqsave(l, f) ((f) = _spin_lock_irqsave(l)) 9.12 @@ -80,7 +82,7 @@ void _write_unlock_irqrestore(rwlock_t * 9.13 #define spin_unlock_irq(l) _spin_unlock_irq(l) 9.14 #define spin_unlock_irqrestore(l, f) _spin_unlock_irqrestore(l, f) 9.15 9.16 -#define spin_is_locked(l) _raw_spin_is_locked(&(l)->raw) 9.17 +#define spin_is_locked(l) _spin_is_locked(l) 9.18 #define spin_trylock(l) _spin_trylock(l) 9.19 9.20 /* Ensure a lock is quiescent between two critical operations. */ 9.21 @@ -113,4 +115,6 @@ void _write_unlock_irqrestore(rwlock_t * 9.22 #define write_unlock_irq(l) _write_unlock_irq(l) 9.23 #define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f) 9.24 9.25 +#define rw_is_locked(l) _rw_is_locked(l) 9.26 + 9.27 #endif /* __SPINLOCK_H__ */