debuggers.hg

view linux-2.6-xen-sparse/arch/xen/i386/kernel/swiotlb.c @ 6681:d647c3d381d2

The attached patch fixes two problems I ran into with the swiotlb code
in unstable (changeset aeaa3c83f6e5). Tested on a dual Opteron x86-64
machine with 4GB of memory and a tg3 modified to only DMA below 2GB.

- swiotlb_dma_supported() checked that the device DMA mask was equal
or bigger than 4GB, when in fact all that's needed is that the device
be able to DMA into the swiotlb aperture (1GB on my AMD x86-64
machine). Some device are actually only 31-bit DMA capable, so this
would've tripped them.

- On some platforms, PCI unmaps and syncs are nops, so there's no need to
keep track of the dma_addr they need after the initial mapping. The
DMA API supports this via the DECLARE_PCI_UNMAP_ADDR macros (see
Documentation/DMA-mapping.txt). Since the swiotlb code does make us of
the dma_addr for swiotlb_unmap_xxx and (more importantly)
swiotlb_dma_sync_xxx, we need to define them to something
meaningful.

Signed-Off-By: Muli Ben-Yehuda <mulix@mulix.org>
author kaf24@firebug.cl.cam.ac.uk
date Tue Sep 06 18:25:00 2005 +0000 (2005-09-06)
parents 1f460d0fd6c6
children 28a10ec0fd6b
line source
1 /*
2 * Dynamic DMA mapping support.
3 *
4 * This implementation is a fallback for platforms that do not support
5 * I/O TLBs (aka DMA address translation hardware).
6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
8 * Copyright (C) 2000, 2003 Hewlett-Packard Co
9 * David Mosberger-Tang <davidm@hpl.hp.com>
10 * Copyright (C) 2005 Keir Fraser <keir@xensource.com>
11 */
13 #include <linux/cache.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/ctype.h>
21 #include <linux/init.h>
22 #include <linux/bootmem.h>
23 #include <linux/highmem.h>
24 #include <asm/io.h>
25 #include <asm/pci.h>
26 #include <asm/dma.h>
28 #define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
30 #define SG_ENT_PHYS_ADDRESS(sg) (page_to_phys((sg)->page) + (sg)->offset)
32 /*
33 * Maximum allowable number of contiguous slabs to map,
34 * must be a power of 2. What is the appropriate value ?
35 * The complexity of {map,unmap}_single is linearly dependent on this value.
36 */
37 #define IO_TLB_SEGSIZE 128
39 /*
40 * log of the size of each IO TLB slab. The number of slabs is command line
41 * controllable.
42 */
43 #define IO_TLB_SHIFT 11
45 int swiotlb_force;
46 static char *iotlb_virt_start;
47 static unsigned long iotlb_nslabs;
49 /*
50 * Used to do a quick range check in swiotlb_unmap_single and
51 * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
52 * API.
53 */
54 static dma_addr_t iotlb_bus_start, iotlb_bus_end, iotlb_bus_mask;
56 /* Does the given dma address reside within the swiotlb aperture? */
57 #define in_swiotlb_aperture(a) (!(((a) ^ iotlb_bus_start) & iotlb_bus_mask))
59 /*
60 * When the IOMMU overflows we return a fallback buffer. This sets the size.
61 */
62 static unsigned long io_tlb_overflow = 32*1024;
64 void *io_tlb_overflow_buffer;
66 /*
67 * This is a free list describing the number of free entries available from
68 * each index
69 */
70 static unsigned int *io_tlb_list;
71 static unsigned int io_tlb_index;
73 /*
74 * We need to save away the original address corresponding to a mapped entry
75 * for the sync operations.
76 */
77 static struct phys_addr {
78 struct page *page;
79 unsigned int offset;
80 } *io_tlb_orig_addr;
82 /*
83 * Protect the above data structures in the map and unmap calls
84 */
85 static DEFINE_SPINLOCK(io_tlb_lock);
87 static int __init
88 setup_io_tlb_npages(char *str)
89 {
90 /* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
91 if (isdigit(*str)) {
92 iotlb_nslabs = simple_strtoul(str, &str, 0) <<
93 (20 - IO_TLB_SHIFT);
94 iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
95 /* Round up to power of two (xen_create_contiguous_region). */
96 while (iotlb_nslabs & (iotlb_nslabs-1))
97 iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
98 }
99 if (*str == ',')
100 ++str;
101 /*
102 * NB. 'force' enables the swiotlb, but doesn't force its use for
103 * every DMA like it does on native Linux.
104 */
105 if (!strcmp(str, "force"))
106 swiotlb_force = 1;
107 return 1;
108 }
109 __setup("swiotlb=", setup_io_tlb_npages);
110 /* make io_tlb_overflow tunable too? */
112 /*
113 * Statically reserve bounce buffer space and initialize bounce buffer data
114 * structures for the software IO TLB used to implement the PCI DMA API.
115 */
116 void
117 swiotlb_init_with_default_size (size_t default_size)
118 {
119 unsigned long i, bytes;
121 if (!iotlb_nslabs) {
122 iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
123 iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
124 /* Round up to power of two (xen_create_contiguous_region). */
125 while (iotlb_nslabs & (iotlb_nslabs-1))
126 iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
127 }
129 bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
131 /*
132 * Get IO TLB memory from the low pages
133 */
134 iotlb_virt_start = alloc_bootmem_low_pages(bytes);
135 if (!iotlb_virt_start)
136 panic("Cannot allocate SWIOTLB buffer!\n"
137 "Use dom0_mem Xen boot parameter to reserve\n"
138 "some DMA memory (e.g., dom0_mem=-128M).\n");
140 xen_create_contiguous_region(
141 (unsigned long)iotlb_virt_start, get_order(bytes));
143 /*
144 * Allocate and initialize the free list array. This array is used
145 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
146 */
147 io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
148 for (i = 0; i < iotlb_nslabs; i++)
149 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
150 io_tlb_index = 0;
151 io_tlb_orig_addr = alloc_bootmem(
152 iotlb_nslabs * sizeof(*io_tlb_orig_addr));
154 /*
155 * Get the overflow emergency buffer
156 */
157 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
159 iotlb_bus_start = virt_to_bus(iotlb_virt_start);
160 iotlb_bus_end = iotlb_bus_start + bytes;
161 iotlb_bus_mask = ~(dma_addr_t)(bytes - 1);
163 printk(KERN_INFO "Software IO TLB enabled: \n"
164 " Aperture: %lu megabytes\n"
165 " Bus range: 0x%016lx - 0x%016lx\n"
166 " Kernel range: 0x%016lx - 0x%016lx\n",
167 bytes >> 20,
168 (unsigned long)iotlb_bus_start,
169 (unsigned long)iotlb_bus_end,
170 (unsigned long)iotlb_virt_start,
171 (unsigned long)iotlb_virt_start + bytes);
172 }
174 void
175 swiotlb_init(void)
176 {
177 /* The user can forcibly enable swiotlb. */
178 if (swiotlb_force)
179 swiotlb = 1;
181 /*
182 * Otherwise, enable for domain 0 if the machine has 'lots of memory',
183 * which we take to mean more than 2GB.
184 */
185 if (xen_start_info->flags & SIF_INITDOMAIN) {
186 dom0_op_t op;
187 op.cmd = DOM0_PHYSINFO;
188 if ((HYPERVISOR_dom0_op(&op) == 0) &&
189 (op.u.physinfo.total_pages > 0x7ffff))
190 swiotlb = 1;
191 }
193 if (swiotlb)
194 swiotlb_init_with_default_size(64 * (1<<20));
196 printk(KERN_INFO "swiotlb is %sabled%s\n",
197 (swiotlb ? "en" : "dis"),
198 (swiotlb_force ? " (forced)" : ""));
199 }
201 static void
202 __sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
203 {
204 if (PageHighMem(buffer.page)) {
205 size_t len, bytes;
206 char *dev, *host, *kmp;
207 len = size;
208 while (len != 0) {
209 if (((bytes = len) + buffer.offset) > PAGE_SIZE)
210 bytes = PAGE_SIZE - buffer.offset;
211 kmp = kmap_atomic(buffer.page, KM_SWIOTLB);
212 dev = dma_addr + size - len;
213 host = kmp + buffer.offset;
214 memcpy((dir == DMA_FROM_DEVICE) ? host : dev,
215 (dir == DMA_FROM_DEVICE) ? dev : host,
216 bytes);
217 kunmap_atomic(kmp, KM_SWIOTLB);
218 len -= bytes;
219 buffer.page++;
220 buffer.offset = 0;
221 }
222 } else {
223 char *host = (char *)phys_to_virt(
224 page_to_pseudophys(buffer.page)) + buffer.offset;
225 if (dir == DMA_FROM_DEVICE)
226 memcpy(host, dma_addr, size);
227 else if (dir == DMA_TO_DEVICE)
228 memcpy(dma_addr, host, size);
229 }
230 }
232 /*
233 * Allocates bounce buffer and returns its kernel virtual address.
234 */
235 static void *
236 map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
237 {
238 unsigned long flags;
239 char *dma_addr;
240 unsigned int nslots, stride, index, wrap;
241 int i;
243 /*
244 * For mappings greater than a page, we limit the stride (and
245 * hence alignment) to a page size.
246 */
247 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
248 if (size > PAGE_SIZE)
249 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
250 else
251 stride = 1;
253 BUG_ON(!nslots);
255 /*
256 * Find suitable number of IO TLB entries size that will fit this
257 * request and allocate a buffer from that IO TLB pool.
258 */
259 spin_lock_irqsave(&io_tlb_lock, flags);
260 {
261 wrap = index = ALIGN(io_tlb_index, stride);
263 if (index >= iotlb_nslabs)
264 wrap = index = 0;
266 do {
267 /*
268 * If we find a slot that indicates we have 'nslots'
269 * number of contiguous buffers, we allocate the
270 * buffers from that slot and mark the entries as '0'
271 * indicating unavailable.
272 */
273 if (io_tlb_list[index] >= nslots) {
274 int count = 0;
276 for (i = index; i < (int)(index + nslots); i++)
277 io_tlb_list[i] = 0;
278 for (i = index - 1;
279 (OFFSET(i, IO_TLB_SEGSIZE) !=
280 IO_TLB_SEGSIZE -1) && io_tlb_list[i];
281 i--)
282 io_tlb_list[i] = ++count;
283 dma_addr = iotlb_virt_start +
284 (index << IO_TLB_SHIFT);
286 /*
287 * Update the indices to avoid searching in
288 * the next round.
289 */
290 io_tlb_index =
291 ((index + nslots) < iotlb_nslabs
292 ? (index + nslots) : 0);
294 goto found;
295 }
296 index += stride;
297 if (index >= iotlb_nslabs)
298 index = 0;
299 } while (index != wrap);
301 spin_unlock_irqrestore(&io_tlb_lock, flags);
302 return NULL;
303 }
304 found:
305 spin_unlock_irqrestore(&io_tlb_lock, flags);
307 /*
308 * Save away the mapping from the original address to the DMA address.
309 * This is needed when we sync the memory. Then we sync the buffer if
310 * needed.
311 */
312 io_tlb_orig_addr[index] = buffer;
313 if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
314 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
316 return dma_addr;
317 }
319 /*
320 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
321 */
322 static void
323 unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
324 {
325 unsigned long flags;
326 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
327 int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
328 struct phys_addr buffer = io_tlb_orig_addr[index];
330 /*
331 * First, sync the memory before unmapping the entry
332 */
333 if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
334 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
336 /*
337 * Return the buffer to the free list by setting the corresponding
338 * entries to indicate the number of contigous entries available.
339 * While returning the entries to the free list, we merge the entries
340 * with slots below and above the pool being returned.
341 */
342 spin_lock_irqsave(&io_tlb_lock, flags);
343 {
344 count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
345 io_tlb_list[index + nslots] : 0);
346 /*
347 * Step 1: return the slots to the free list, merging the
348 * slots with superceeding slots
349 */
350 for (i = index + nslots - 1; i >= index; i--)
351 io_tlb_list[i] = ++count;
352 /*
353 * Step 2: merge the returned slots with the preceding slots,
354 * if available (non zero)
355 */
356 for (i = index - 1;
357 (OFFSET(i, IO_TLB_SEGSIZE) !=
358 IO_TLB_SEGSIZE -1) && io_tlb_list[i];
359 i--)
360 io_tlb_list[i] = ++count;
361 }
362 spin_unlock_irqrestore(&io_tlb_lock, flags);
363 }
365 static void
366 sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
367 {
368 int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
369 struct phys_addr buffer = io_tlb_orig_addr[index];
370 BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
371 __sync_single(buffer, dma_addr, size, dir);
372 }
374 static void
375 swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
376 {
377 /*
378 * Ran out of IOMMU space for this operation. This is very bad.
379 * Unfortunately the drivers cannot handle this operation properly.
380 * unless they check for pci_dma_mapping_error (most don't)
381 * When the mapping is small enough return a static buffer to limit
382 * the damage, or panic when the transfer is too big.
383 */
384 printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
385 "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
387 if (size > io_tlb_overflow && do_panic) {
388 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
389 panic("PCI-DMA: Memory would be corrupted\n");
390 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
391 panic("PCI-DMA: Random memory would be DMAed\n");
392 }
393 }
395 /*
396 * Map a single buffer of the indicated size for DMA in streaming mode. The
397 * PCI address to use is returned.
398 *
399 * Once the device is given the dma address, the device owns this memory until
400 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
401 */
402 dma_addr_t
403 swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
404 {
405 dma_addr_t dev_addr = virt_to_bus(ptr);
406 void *map;
407 struct phys_addr buffer;
409 BUG_ON(dir == DMA_NONE);
411 /*
412 * If the pointer passed in happens to be in the device's DMA window,
413 * we can safely return the device addr and not worry about bounce
414 * buffering it.
415 */
416 if (!range_straddles_page_boundary(ptr, size) &&
417 !address_needs_mapping(hwdev, dev_addr))
418 return dev_addr;
420 /*
421 * Oh well, have to allocate and map a bounce buffer.
422 */
423 buffer.page = virt_to_page(ptr);
424 buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
425 map = map_single(hwdev, buffer, size, dir);
426 if (!map) {
427 swiotlb_full(hwdev, size, dir, 1);
428 map = io_tlb_overflow_buffer;
429 }
431 dev_addr = virt_to_bus(map);
432 return dev_addr;
433 }
435 /*
436 * Unmap a single streaming mode DMA translation. The dma_addr and size must
437 * match what was provided for in a previous swiotlb_map_single call. All
438 * other usages are undefined.
439 *
440 * After this call, reads by the cpu to the buffer are guaranteed to see
441 * whatever the device wrote there.
442 */
443 void
444 swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
445 int dir)
446 {
447 BUG_ON(dir == DMA_NONE);
448 if (in_swiotlb_aperture(dev_addr))
449 unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
450 }
452 /*
453 * Make physical memory consistent for a single streaming mode DMA translation
454 * after a transfer.
455 *
456 * If you perform a swiotlb_map_single() but wish to interrogate the buffer
457 * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
458 * call this function before doing so. At the next point you give the PCI dma
459 * address back to the card, you must first perform a
460 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
461 */
462 void
463 swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
464 size_t size, int dir)
465 {
466 BUG_ON(dir == DMA_NONE);
467 if (in_swiotlb_aperture(dev_addr))
468 sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
469 }
471 void
472 swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
473 size_t size, int dir)
474 {
475 BUG_ON(dir == DMA_NONE);
476 if (in_swiotlb_aperture(dev_addr))
477 sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
478 }
480 /*
481 * Map a set of buffers described by scatterlist in streaming mode for DMA.
482 * This is the scatter-gather version of the above swiotlb_map_single
483 * interface. Here the scatter gather list elements are each tagged with the
484 * appropriate dma address and length. They are obtained via
485 * sg_dma_{address,length}(SG).
486 *
487 * NOTE: An implementation may be able to use a smaller number of
488 * DMA address/length pairs than there are SG table elements.
489 * (for example via virtual mapping capabilities)
490 * The routine returns the number of addr/length pairs actually
491 * used, at most nents.
492 *
493 * Device ownership issues as mentioned above for swiotlb_map_single are the
494 * same here.
495 */
496 int
497 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
498 int dir)
499 {
500 struct phys_addr buffer;
501 dma_addr_t dev_addr;
502 char *map;
503 int i;
505 BUG_ON(dir == DMA_NONE);
507 for (i = 0; i < nelems; i++, sg++) {
508 dev_addr = SG_ENT_PHYS_ADDRESS(sg);
509 if (address_needs_mapping(hwdev, dev_addr)) {
510 buffer.page = sg->page;
511 buffer.offset = sg->offset;
512 map = map_single(hwdev, buffer, sg->length, dir);
513 if (!map) {
514 /* Don't panic here, we expect map_sg users
515 to do proper error handling. */
516 swiotlb_full(hwdev, sg->length, dir, 0);
517 swiotlb_unmap_sg(hwdev, sg - i, i, dir);
518 sg[0].dma_length = 0;
519 return 0;
520 }
521 sg->dma_address = (dma_addr_t)virt_to_bus(map);
522 } else
523 sg->dma_address = dev_addr;
524 sg->dma_length = sg->length;
525 }
526 return nelems;
527 }
529 /*
530 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
531 * concerning calls here are the same as for swiotlb_unmap_single() above.
532 */
533 void
534 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
535 int dir)
536 {
537 int i;
539 BUG_ON(dir == DMA_NONE);
541 for (i = 0; i < nelems; i++, sg++)
542 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
543 unmap_single(hwdev,
544 (void *)bus_to_virt(sg->dma_address),
545 sg->dma_length, dir);
546 }
548 /*
549 * Make physical memory consistent for a set of streaming mode DMA translations
550 * after a transfer.
551 *
552 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
553 * and usage.
554 */
555 void
556 swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
557 int nelems, int dir)
558 {
559 int i;
561 BUG_ON(dir == DMA_NONE);
563 for (i = 0; i < nelems; i++, sg++)
564 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
565 sync_single(hwdev,
566 (void *)bus_to_virt(sg->dma_address),
567 sg->dma_length, dir);
568 }
570 void
571 swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
572 int nelems, int dir)
573 {
574 int i;
576 BUG_ON(dir == DMA_NONE);
578 for (i = 0; i < nelems; i++, sg++)
579 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
580 sync_single(hwdev,
581 (void *)bus_to_virt(sg->dma_address),
582 sg->dma_length, dir);
583 }
585 dma_addr_t
586 swiotlb_map_page(struct device *hwdev, struct page *page,
587 unsigned long offset, size_t size,
588 enum dma_data_direction direction)
589 {
590 struct phys_addr buffer;
591 dma_addr_t dev_addr;
592 char *map;
594 dev_addr = page_to_phys(page) + offset;
595 if (address_needs_mapping(hwdev, dev_addr)) {
596 buffer.page = page;
597 buffer.offset = offset;
598 map = map_single(hwdev, buffer, size, direction);
599 if (!map) {
600 swiotlb_full(hwdev, size, direction, 1);
601 map = io_tlb_overflow_buffer;
602 }
603 dev_addr = (dma_addr_t)virt_to_bus(map);
604 }
606 return dev_addr;
607 }
609 void
610 swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
611 size_t size, enum dma_data_direction direction)
612 {
613 BUG_ON(direction == DMA_NONE);
614 if (in_swiotlb_aperture(dma_address))
615 unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
616 }
618 int
619 swiotlb_dma_mapping_error(dma_addr_t dma_addr)
620 {
621 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
622 }
624 /*
625 * Return whether the given PCI device DMA address mask can be supported
626 * properly. For example, if your device can only drive the low 24-bits
627 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
628 * this function.
629 */
630 int
631 swiotlb_dma_supported (struct device *hwdev, u64 mask)
632 {
633 return (mask >= (iotlb_bus_end - 1));
634 }
636 EXPORT_SYMBOL(swiotlb_init);
637 EXPORT_SYMBOL(swiotlb_map_single);
638 EXPORT_SYMBOL(swiotlb_unmap_single);
639 EXPORT_SYMBOL(swiotlb_map_sg);
640 EXPORT_SYMBOL(swiotlb_unmap_sg);
641 EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
642 EXPORT_SYMBOL(swiotlb_sync_single_for_device);
643 EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
644 EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
645 EXPORT_SYMBOL(swiotlb_map_page);
646 EXPORT_SYMBOL(swiotlb_unmap_page);
647 EXPORT_SYMBOL(swiotlb_dma_mapping_error);
648 EXPORT_SYMBOL(swiotlb_dma_supported);
650 /*
651 * Local variables:
652 * c-file-style: "linux"
653 * indent-tabs-mode: t
654 * c-indent-level: 8
655 * c-basic-offset: 8
656 * tab-width: 8
657 * End:
658 */