debuggers.hg

view linux-2.6-xen-sparse/include/asm-xen/asm-i386/pci.h @ 6681:d647c3d381d2

The attached patch fixes two problems I ran into with the swiotlb code
in unstable (changeset aeaa3c83f6e5). Tested on a dual Opteron x86-64
machine with 4GB of memory and a tg3 modified to only DMA below 2GB.

- swiotlb_dma_supported() checked that the device DMA mask was equal
or bigger than 4GB, when in fact all that's needed is that the device
be able to DMA into the swiotlb aperture (1GB on my AMD x86-64
machine). Some device are actually only 31-bit DMA capable, so this
would've tripped them.

- On some platforms, PCI unmaps and syncs are nops, so there's no need to
keep track of the dma_addr they need after the initial mapping. The
DMA API supports this via the DECLARE_PCI_UNMAP_ADDR macros (see
Documentation/DMA-mapping.txt). Since the swiotlb code does make us of
the dma_addr for swiotlb_unmap_xxx and (more importantly)
swiotlb_dma_sync_xxx, we need to define them to something
meaningful.

Signed-Off-By: Muli Ben-Yehuda <mulix@mulix.org>
author kaf24@firebug.cl.cam.ac.uk
date Tue Sep 06 18:25:00 2005 +0000 (2005-09-06)
parents dd668f7527cb
children 8db9c5873b9b
line source
1 #ifndef __i386_PCI_H
2 #define __i386_PCI_H
4 #include <linux/config.h>
6 #ifdef __KERNEL__
7 #include <linux/mm.h> /* for struct page */
9 /* Can be used to override the logic in pci_scan_bus for skipping
10 already-configured bus numbers - to be used for buggy BIOSes
11 or architectures with incomplete PCI setup by the loader */
13 #ifdef CONFIG_PCI
14 extern unsigned int pcibios_assign_all_busses(void);
15 #else
16 #define pcibios_assign_all_busses() 0
17 #endif
18 #define pcibios_scan_all_fns(a, b) 0
20 extern unsigned long pci_mem_start;
21 #define PCIBIOS_MIN_IO 0x1000
22 #define PCIBIOS_MIN_MEM (pci_mem_start)
24 #define PCIBIOS_MIN_CARDBUS_IO 0x4000
26 void pcibios_config_init(void);
27 struct pci_bus * pcibios_scan_root(int bus);
29 void pcibios_set_master(struct pci_dev *dev);
30 void pcibios_penalize_isa_irq(int irq);
31 struct irq_routing_table *pcibios_get_irq_routing_table(void);
32 int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
34 /* Dynamic DMA mapping stuff.
35 * i386 has everything mapped statically.
36 */
38 #include <linux/types.h>
39 #include <linux/slab.h>
40 #include <asm/scatterlist.h>
41 #include <linux/string.h>
42 #include <asm/io.h>
44 struct pci_dev;
46 #ifdef CONFIG_SWIOTLB
49 /* On Xen we use SWIOTLB instead of blk-specific bounce buffers. */
50 #define PCI_DMA_BUS_IS_PHYS (0)
52 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
53 dma_addr_t ADDR_NAME;
54 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
55 __u32 LEN_NAME;
56 #define pci_unmap_addr(PTR, ADDR_NAME) \
57 ((PTR)->ADDR_NAME)
58 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
59 (((PTR)->ADDR_NAME) = (VAL))
60 #define pci_unmap_len(PTR, LEN_NAME) \
61 ((PTR)->LEN_NAME)
62 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
63 (((PTR)->LEN_NAME) = (VAL))
65 #else
67 /* The PCI address space does equal the physical memory
68 * address space. The networking and block device layers use
69 * this boolean for bounce buffer decisions.
70 */
71 #define PCI_DMA_BUS_IS_PHYS (1)
73 /* pci_unmap_{page,single} is a nop so... */
74 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
75 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
76 #define pci_unmap_addr(PTR, ADDR_NAME) (0)
77 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
78 #define pci_unmap_len(PTR, LEN_NAME) (0)
79 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
81 #endif
83 /* This is always fine. */
84 #define pci_dac_dma_supported(pci_dev, mask) (1)
86 static inline dma64_addr_t
87 pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
88 {
89 return ((dma64_addr_t) page_to_phys(page) +
90 (dma64_addr_t) offset);
91 }
93 static inline struct page *
94 pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
95 {
96 return pfn_to_page(dma_addr >> PAGE_SHIFT);
97 }
99 static inline unsigned long
100 pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
101 {
102 return (dma_addr & ~PAGE_MASK);
103 }
105 static inline void
106 pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
107 {
108 }
110 static inline void
111 pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
112 {
113 flush_write_buffers();
114 }
116 #define HAVE_PCI_MMAP
117 extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
118 enum pci_mmap_state mmap_state, int write_combine);
121 static inline void pcibios_add_platform_entries(struct pci_dev *dev)
122 {
123 }
125 #endif /* __KERNEL__ */
127 /* implement the pci_ DMA API in terms of the generic device dma_ one */
128 #include <asm-generic/pci-dma-compat.h>
130 /* generic pci stuff */
131 #include <asm-generic/pci.h>
133 /* On Xen we have to scan all functions since Xen hides bridges from
134 * us. If a bridge is at fn=0 and that slot has a multifunction
135 * device, we won't find the additional devices without scanning all
136 * functions. */
137 #undef pcibios_scan_all_fns
138 #define pcibios_scan_all_fns(a, b) 1
140 #endif /* __i386_PCI_H */