debuggers.hg

view xen/arch/ia64/patch/linux-2.6.11/swiotlb.c @ 4615:58efb3448933

bitkeeper revision 1.1327.1.1 (426536d2PUqtjTi2v06bzD10RFwarg)

Merge bk://xen.bkbits.net/xeno-unstable.bk
into bkbits.net:/repos/x/xen-ia64/xeno-unstable-ia64.bk
author xen-ia64.adm@bkbits.net
date Tue Apr 19 16:50:26 2005 +0000 (2005-04-19)
parents f1c946e1226a
children 5b9e241131fb
line source
1 swiotlb.c | 21 +++++++++++++--------
2 1 files changed, 13 insertions(+), 8 deletions(-)
4 Index: linux-2.6.11-xendiffs/arch/ia64/lib/swiotlb.c
5 ===================================================================
6 --- linux-2.6.11-xendiffs.orig/arch/ia64/lib/swiotlb.c 2005-04-08 12:13:54.040202667 -0500
7 +++ linux-2.6.11-xendiffs/arch/ia64/lib/swiotlb.c 2005-04-08 12:19:09.170367318 -0500
8 @@ -124,8 +124,11 @@ swiotlb_init_with_default_size (size_t d
9 /*
10 * Get IO TLB memory from the low pages
11 */
12 - io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs *
13 - (1 << IO_TLB_SHIFT));
14 + /* FIXME: Do we really need swiotlb in HV? If all memory trunks
15 + * presented to guest as <4G, are actually <4G in machine range,
16 + * no DMA intevention from HV...
17 + */
18 + io_tlb_start = alloc_xenheap_pages(get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)));
19 if (!io_tlb_start)
20 panic("Cannot allocate SWIOTLB buffer");
21 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
22 @@ -135,16 +138,16 @@ swiotlb_init_with_default_size (size_t d
23 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
24 * between io_tlb_start and io_tlb_end.
25 */
26 - io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
27 + io_tlb_list = alloc_xenheap_pages(get_order(io_tlb_nslabs * sizeof(int)));
28 for (i = 0; i < io_tlb_nslabs; i++)
29 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
30 io_tlb_index = 0;
31 - io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
32 + io_tlb_orig_addr = alloc_xenheap_pages(get_order(io_tlb_nslabs * sizeof(char *)));
34 /*
35 * Get the overflow emergency buffer
36 */
37 - io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
38 + io_tlb_overflow_buffer = alloc_xenheap_pages(get_order(io_tlb_overflow));
39 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
40 virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
41 }
42 @@ -328,13 +331,13 @@ swiotlb_alloc_coherent(struct device *hw
43 */
44 flags |= GFP_DMA;
46 - ret = (void *)__get_free_pages(flags, order);
47 + ret = (void *)alloc_xenheap_pages(get_order(size));
48 if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) {
49 /*
50 * The allocated memory isn't reachable by the device.
51 * Fall back on swiotlb_map_single().
52 */
53 - free_pages((unsigned long) ret, order);
54 + free_xenheap_pages((unsigned long) ret, order);
55 ret = NULL;
56 }
57 if (!ret) {
58 @@ -372,7 +375,7 @@ swiotlb_free_coherent(struct device *hwd
59 {
60 if (!(vaddr >= (void *)io_tlb_start
61 && vaddr < (void *)io_tlb_end))
62 - free_pages((unsigned long) vaddr, get_order(size));
63 + free_xenheap_pages((unsigned long) vaddr, get_order(size));
64 else
65 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
66 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
67 @@ -388,8 +391,10 @@ swiotlb_full(struct device *dev, size_t
68 * When the mapping is small enough return a static buffer to limit
69 * the damage, or panic when the transfer is too big.
70 */
71 +#ifndef XEN
72 printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
73 "device %s\n", size, dev ? dev->bus_id : "?");
74 +#endif
76 if (size > io_tlb_overflow && do_panic) {
77 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)