debuggers.hg

view linux-2.6.10-rc2-xen-sparse/arch/xen/i386/mm/ioremap.c @ 3289:a169836882cb

bitkeeper revision 1.1159.170.59 (41b4c2fdJ2gj_BWy27Vj3ptayZp_yg)

sync w/ head.
author cl349@arcadians.cl.cam.ac.uk
date Mon Dec 06 20:37:17 2004 +0000 (2004-12-06)
parents f65b65977b19
children d455acded006
line source
1 /*
2 * arch/i386/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <asm/io.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
22 #ifndef CONFIG_XEN_PHYSDEV_ACCESS
24 void * __ioremap(unsigned long phys_addr, unsigned long size,
25 unsigned long flags)
26 {
27 return NULL;
28 }
30 void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
31 {
32 return NULL;
33 }
35 void iounmap(volatile void __iomem *addr)
36 {
37 }
39 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
40 {
41 return NULL;
42 }
44 void __init bt_iounmap(void *addr, unsigned long size)
45 {
46 }
48 #else
50 /*
51 * Does @address reside within a non-highmem page that is local to this virtual
52 * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
53 * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand
54 * why this works.
55 */
56 static inline int is_local_lowmem(unsigned long address)
57 {
58 extern unsigned long max_low_pfn;
59 unsigned long mfn = address >> PAGE_SHIFT;
60 unsigned long pfn = mfn_to_pfn(mfn);
61 return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn));
62 }
64 /*
65 * Generic mapping function (not visible outside):
66 */
68 /*
69 * Remap an arbitrary physical address space into the kernel virtual
70 * address space. Needed when the kernel wants to access high addresses
71 * directly.
72 *
73 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
74 * have to convert them into an offset in a page-aligned mapping, but the
75 * caller shouldn't need to know that small detail.
76 */
77 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
78 {
79 void __iomem * addr;
80 struct vm_struct * area;
81 unsigned long offset, last_addr;
83 /* Don't allow wraparound or zero size */
84 last_addr = phys_addr + size - 1;
85 if (!size || last_addr < phys_addr)
86 return NULL;
88 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
89 /*
90 * Don't remap the low PCI/ISA area, it's always mapped..
91 */
92 if (phys_addr >= 0x0 && last_addr < 0x100000)
93 return isa_bus_to_virt(phys_addr);
94 #endif
96 /*
97 * Don't allow anybody to remap normal RAM that we're using..
98 */
99 if (is_local_lowmem(phys_addr)) {
100 char *t_addr, *t_end;
101 struct page *page;
103 t_addr = bus_to_virt(phys_addr);
104 t_end = t_addr + (size - 1);
106 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
107 if(!PageReserved(page))
108 return NULL;
109 }
111 /*
112 * Mappings have to be page-aligned
113 */
114 offset = phys_addr & ~PAGE_MASK;
115 phys_addr &= PAGE_MASK;
116 size = PAGE_ALIGN(last_addr+1) - phys_addr;
118 /*
119 * Ok, go for it..
120 */
121 area = get_vm_area(size, VM_IOREMAP);
122 if (!area)
123 return NULL;
124 area->phys_addr = phys_addr;
125 addr = (void __iomem *) area->addr;
126 if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr, size, __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | flags), DOMID_IO)) {
127 vunmap((void __force *) addr);
128 return NULL;
129 }
130 return (void __iomem *) (offset + (char __iomem *)addr);
131 }
134 /**
135 * ioremap_nocache - map bus memory into CPU space
136 * @offset: bus address of the memory
137 * @size: size of the resource to map
138 *
139 * ioremap_nocache performs a platform specific sequence of operations to
140 * make bus memory CPU accessible via the readb/readw/readl/writeb/
141 * writew/writel functions and the other mmio helpers. The returned
142 * address is not guaranteed to be usable directly as a virtual
143 * address.
144 *
145 * This version of ioremap ensures that the memory is marked uncachable
146 * on the CPU as well as honouring existing caching rules from things like
147 * the PCI bus. Note that there are other caches and buffers on many
148 * busses. In particular driver authors should read up on PCI writes
149 *
150 * It's useful if some control registers are in such an area and
151 * write combining or read caching is not desirable:
152 *
153 * Must be freed with iounmap.
154 */
156 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
157 {
158 unsigned long last_addr;
159 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
160 if (!p)
161 return p;
163 /* Guaranteed to be > phys_addr, as per __ioremap() */
164 last_addr = phys_addr + size - 1;
166 if (is_local_lowmem(last_addr)) {
167 struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
168 unsigned long npages;
170 phys_addr &= PAGE_MASK;
172 /* This might overflow and become zero.. */
173 last_addr = PAGE_ALIGN(last_addr);
175 /* .. but that's ok, because modulo-2**n arithmetic will make
176 * the page-aligned "last - first" come out right.
177 */
178 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
180 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
181 iounmap(p);
182 p = NULL;
183 }
184 global_flush_tlb();
185 }
187 return p;
188 }
190 void iounmap(volatile void __iomem *addr)
191 {
192 struct vm_struct *p;
193 if ((void __force *) addr <= high_memory)
194 return;
195 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
196 if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
197 return;
198 #endif
199 p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
200 if (!p) {
201 printk("__iounmap: bad address %p\n", addr);
202 return;
203 }
205 if (p->flags && is_local_lowmem(p->phys_addr)) {
206 change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
207 p->size >> PAGE_SHIFT,
208 PAGE_KERNEL);
209 global_flush_tlb();
210 }
211 kfree(p);
212 }
214 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
215 {
216 unsigned long offset, last_addr;
217 unsigned int nrpages;
218 enum fixed_addresses idx;
220 /* Don't allow wraparound or zero size */
221 last_addr = phys_addr + size - 1;
222 if (!size || last_addr < phys_addr)
223 return NULL;
225 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
226 /*
227 * Don't remap the low PCI/ISA area, it's always mapped..
228 */
229 if (phys_addr >= 0x0 && last_addr < 0x100000)
230 return isa_bus_to_virt(phys_addr);
231 #endif
233 /*
234 * Mappings have to be page-aligned
235 */
236 offset = phys_addr & ~PAGE_MASK;
237 phys_addr &= PAGE_MASK;
238 size = PAGE_ALIGN(last_addr) - phys_addr;
240 /*
241 * Mappings have to fit in the FIX_BTMAP area.
242 */
243 nrpages = size >> PAGE_SHIFT;
244 if (nrpages > NR_FIX_BTMAPS)
245 return NULL;
247 /*
248 * Ok, go for it..
249 */
250 idx = FIX_BTMAP_BEGIN;
251 while (nrpages > 0) {
252 set_fixmap_ma(idx, phys_addr);
253 phys_addr += PAGE_SIZE;
254 --idx;
255 --nrpages;
256 }
257 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
258 }
260 void __init bt_iounmap(void *addr, unsigned long size)
261 {
262 unsigned long virt_addr;
263 unsigned long offset;
264 unsigned int nrpages;
265 enum fixed_addresses idx;
267 virt_addr = (unsigned long)addr;
268 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
269 return;
270 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
271 if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
272 return;
273 #endif
274 offset = virt_addr & ~PAGE_MASK;
275 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
277 idx = FIX_BTMAP_BEGIN;
278 while (nrpages > 0) {
279 clear_fixmap(idx);
280 --idx;
281 --nrpages;
282 }
283 }
285 #endif /* CONFIG_XEN_PHYSDEV_ACCESS */
287 /* These hacky macros avoid phys->machine translations. */
288 #define __direct_pte(x) ((pte_t) { (x) } )
289 #define __direct_mk_pte(page_nr,pgprot) \
290 __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
291 #define direct_mk_pte_phys(physpage, pgprot) \
292 __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
294 static inline void direct_remap_area_pte(pte_t *pte,
295 unsigned long address,
296 unsigned long size,
297 mmu_update_t **v)
298 {
299 unsigned long end;
301 address &= ~PMD_MASK;
302 end = address + size;
303 if (end > PMD_SIZE)
304 end = PMD_SIZE;
305 if (address >= end)
306 BUG();
308 do {
309 (*v)->ptr = virt_to_machine(pte);
310 (*v)++;
311 address += PAGE_SIZE;
312 pte++;
313 } while (address && (address < end));
314 }
316 static inline int direct_remap_area_pmd(struct mm_struct *mm,
317 pmd_t *pmd,
318 unsigned long address,
319 unsigned long size,
320 mmu_update_t **v)
321 {
322 unsigned long end;
324 address &= ~PGDIR_MASK;
325 end = address + size;
326 if (end > PGDIR_SIZE)
327 end = PGDIR_SIZE;
328 if (address >= end)
329 BUG();
330 do {
331 pte_t *pte = (mm == &init_mm) ?
332 pte_alloc_kernel(mm, pmd, address) :
333 pte_alloc_map(mm, pmd, address);
334 if (!pte)
335 return -ENOMEM;
336 direct_remap_area_pte(pte, address, end - address, v);
337 pte_unmap(pte);
338 address = (address + PMD_SIZE) & PMD_MASK;
339 pmd++;
340 } while (address && (address < end));
341 return 0;
342 }
344 int __direct_remap_area_pages(struct mm_struct *mm,
345 unsigned long address,
346 unsigned long size,
347 mmu_update_t *v)
348 {
349 pgd_t * dir;
350 unsigned long end = address + size;
352 dir = pgd_offset(mm, address);
353 if (address >= end)
354 BUG();
355 spin_lock(&mm->page_table_lock);
356 do {
357 pmd_t *pmd = pmd_alloc(mm, dir, address);
358 if (!pmd)
359 return -ENOMEM;
360 direct_remap_area_pmd(mm, pmd, address, end - address, &v);
361 address = (address + PGDIR_SIZE) & PGDIR_MASK;
362 dir++;
364 } while (address && (address < end));
365 spin_unlock(&mm->page_table_lock);
366 return 0;
367 }
370 int direct_remap_area_pages(struct mm_struct *mm,
371 unsigned long address,
372 unsigned long machine_addr,
373 unsigned long size,
374 pgprot_t prot,
375 domid_t domid)
376 {
377 int i;
378 unsigned long start_address;
379 #define MAX_DIRECTMAP_MMU_QUEUE 130
380 mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
382 u[0].ptr = MMU_EXTENDED_COMMAND;
383 u[0].val = MMUEXT_SET_FOREIGNDOM;
384 u[0].val |= (unsigned long)domid << 16;
385 v = w = &u[1];
387 start_address = address;
389 flush_cache_all();
391 for (i = 0; i < size; i += PAGE_SIZE) {
392 if ((v - u) == MAX_DIRECTMAP_MMU_QUEUE) {
393 /* Fill in the PTE pointers. */
394 __direct_remap_area_pages(mm,
395 start_address,
396 address-start_address,
397 w);
399 if (HYPERVISOR_mmu_update(u, v - u, NULL) < 0)
400 return -EFAULT;
401 v = w;
402 start_address = address;
403 }
405 /*
406 * Fill in the machine address: PTE ptr is done later by
407 * __direct_remap_area_pages().
408 */
409 v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
411 machine_addr += PAGE_SIZE;
412 address += PAGE_SIZE;
413 v++;
414 }
416 if (v != w) {
417 /* get the ptep's filled in */
418 __direct_remap_area_pages(mm,
419 start_address,
420 address-start_address,
421 w);
422 if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL) < 0))
423 return -EFAULT;
424 }
426 flush_tlb_all();
428 return 0;
429 }
431 EXPORT_SYMBOL(direct_remap_area_pages);