debuggers.hg

view linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/pci-dma.c @ 3289:a169836882cb

bitkeeper revision 1.1159.170.59 (41b4c2fdJ2gj_BWy27Vj3ptayZp_yg)

sync w/ head.
author cl349@arcadians.cl.cam.ac.uk
date Mon Dec 06 20:37:17 2004 +0000 (2004-12-06)
parents 13728122c78d
children
line source
1 /*
2 * Dynamic DMA mapping support.
3 *
4 * On i386 there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
7 * in asm/pci.h.
8 */
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/version.h>
15 #include <asm/io.h>
16 #include <asm-xen/balloon.h>
18 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
19 #define pte_offset_kernel pte_offset
20 #endif
22 struct dma_coherent_mem {
23 void *virt_base;
24 u32 device_base;
25 int size;
26 int flags;
27 unsigned long *bitmap;
28 };
30 static void
31 xen_contig_memory(unsigned long vstart, unsigned int order)
32 {
33 /*
34 * Ensure multi-page extents are contiguous in machine memory.
35 * This code could be cleaned up some, and the number of
36 * hypercalls reduced.
37 */
38 pgd_t *pgd;
39 pmd_t *pmd;
40 pte_t *pte;
41 unsigned long pfn, i, flags;
43 scrub_pages(vstart, 1 << order);
45 balloon_lock(flags);
47 /* 1. Zap current PTEs, giving away the underlying pages. */
48 for (i = 0; i < (1<<order); i++) {
49 pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
50 pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
51 pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
52 pfn = pte->pte_low >> PAGE_SHIFT;
53 queue_l1_entry_update(pte, 0);
54 phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
55 INVALID_P2M_ENTRY;
56 flush_page_update_queue();
57 if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
58 &pfn, 1, 0) != 1) BUG();
59 }
60 /* 2. Get a new contiguous memory extent. */
61 if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
62 &pfn, 1, order) != 1) BUG();
63 /* 3. Map the new extent in place of old pages. */
64 for (i = 0; i < (1<<order); i++) {
65 pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
66 pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
67 pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
68 queue_l1_entry_update(
69 pte, ((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
70 queue_machphys_update(
71 pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
72 phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
73 pfn+i;
74 }
75 /* Flush updates through and flush the TLB. */
76 xen_tlb_flush();
78 balloon_unlock(flags);
79 }
81 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
82 void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
83 dma_addr_t *dma_handle)
84 #else
85 void *dma_alloc_coherent(struct device *dev, size_t size,
86 dma_addr_t *dma_handle, int gfp)
87 #endif
88 {
89 void *ret;
90 unsigned int order = get_order(size);
91 unsigned long vstart;
93 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
94 int gfp = GFP_ATOMIC;
96 if (hwdev == NULL || ((u32)hwdev->dma_mask < 0xffffffff))
97 gfp |= GFP_DMA;
98 #else
99 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
101 /* ignore region specifiers */
102 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
104 if (mem) {
105 int page = bitmap_find_free_region(mem->bitmap, mem->size,
106 order);
107 if (page >= 0) {
108 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
109 ret = mem->virt_base + (page << PAGE_SHIFT);
110 memset(ret, 0, size);
111 return ret;
112 }
113 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
114 return NULL;
115 }
117 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
118 gfp |= GFP_DMA;
119 #endif
121 vstart = __get_free_pages(gfp, order);
122 ret = (void *)vstart;
123 if (ret == NULL)
124 return ret;
126 xen_contig_memory(vstart, order);
128 memset(ret, 0, size);
129 *dma_handle = virt_to_bus(ret);
131 return ret;
132 }
134 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
135 void pci_free_consistent(struct pci_dev *hwdev, size_t size,
136 void *vaddr, dma_addr_t dma_handle)
137 {
138 free_pages((unsigned long)vaddr, get_order(size));
139 }
140 #else
142 void dma_free_coherent(struct device *dev, size_t size,
143 void *vaddr, dma_addr_t dma_handle)
144 {
145 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
146 int order = get_order(size);
148 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
149 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
151 bitmap_release_region(mem->bitmap, page, order);
152 } else
153 free_pages((unsigned long)vaddr, order);
154 }
156 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
157 dma_addr_t device_addr, size_t size, int flags)
158 {
159 void __iomem *mem_base;
160 int pages = size >> PAGE_SHIFT;
161 int bitmap_size = (pages + 31)/32;
163 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
164 goto out;
165 if (!size)
166 goto out;
167 if (dev->dma_mem)
168 goto out;
170 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
172 mem_base = ioremap(bus_addr, size);
173 if (!mem_base)
174 goto out;
176 dev->dma_mem = kmalloc(GFP_KERNEL, sizeof(struct dma_coherent_mem));
177 if (!dev->dma_mem)
178 goto out;
179 memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
180 dev->dma_mem->bitmap = kmalloc(GFP_KERNEL, bitmap_size);
181 if (!dev->dma_mem->bitmap)
182 goto free1_out;
183 memset(dev->dma_mem->bitmap, 0, bitmap_size);
185 dev->dma_mem->virt_base = mem_base;
186 dev->dma_mem->device_base = device_addr;
187 dev->dma_mem->size = pages;
188 dev->dma_mem->flags = flags;
190 if (flags & DMA_MEMORY_MAP)
191 return DMA_MEMORY_MAP;
193 return DMA_MEMORY_IO;
195 free1_out:
196 kfree(dev->dma_mem->bitmap);
197 out:
198 return 0;
199 }
200 EXPORT_SYMBOL(dma_declare_coherent_memory);
202 void dma_release_declared_memory(struct device *dev)
203 {
204 struct dma_coherent_mem *mem = dev->dma_mem;
206 if(!mem)
207 return;
208 dev->dma_mem = NULL;
209 kfree(mem->bitmap);
210 kfree(mem);
211 }
212 EXPORT_SYMBOL(dma_release_declared_memory);
214 void *dma_mark_declared_memory_occupied(struct device *dev,
215 dma_addr_t device_addr, size_t size)
216 {
217 struct dma_coherent_mem *mem = dev->dma_mem;
218 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
219 int pos, err;
221 if (!mem)
222 return ERR_PTR(-EINVAL);
224 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
225 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
226 if (err != 0)
227 return ERR_PTR(err);
228 return mem->virt_base + (pos << PAGE_SHIFT);
229 }
230 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
232 #endif