debuggers.hg

view xenolinux-2.4.21-sparse/arch/xeno/mm/ioremap.c @ 648:cda951fc1bef

bitkeeper revision 1.341 (3f1120a2WW6KGE81TArq_p654xy38Q)

Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into labyrinth.cl.cam.ac.uk:/auto/anfs/scratch/labyrinth/iap10/xeno-clone/xeno.bk
author iap10@labyrinth.cl.cam.ac.uk
date Sun Jul 13 09:04:34 2003 +0000 (2003-07-13)
parents 6879a4610638 9339f3942f4e
children 2302a65a8f7a
line source
1 /*
2 * arch/xeno/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 *
8 * Modifications for Xenolinux (c) 2003 Keir Fraser
9 */
11 #include <linux/slab.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/vmalloc.h>
15 #include <asm/io.h>
16 #include <asm/pgalloc.h>
17 #include <asm/uaccess.h>
18 #include <asm/tlb.h>
19 #include <asm/mmu.h>
21 #if defined(CONFIG_XENO_PRIV)
23 #define direct_set_pte(pteptr, pteval) \
24 queue_l1_entry_update(__pa(pteptr)|PGREQ_UNCHECKED_UPDATE, (pteval).pte_low)
25 #define __direct_pte(x) ((pte_t) { (x) } )
26 #define __direct_mk_pte(page_nr,pgprot) \
27 __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
28 #define direct_mk_pte_phys(physpage, pgprot) \
29 __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
32 static inline void direct_remap_area_pte(pte_t *pte,
33 unsigned long address,
34 unsigned long size,
35 unsigned long machine_addr,
36 pgprot_t prot)
37 {
38 unsigned long end;
40 address &= ~PMD_MASK;
41 end = address + size;
42 if (end > PMD_SIZE)
43 end = PMD_SIZE;
44 if (address >= end)
45 BUG();
46 do {
47 if (!pte_none(*pte)) {
48 printk("direct_remap_area_pte: page already exists\n");
49 BUG();
50 }
51 direct_set_pte(pte, pte_mkio(direct_mk_pte_phys(machine_addr, prot)));
52 address += PAGE_SIZE;
53 machine_addr += PAGE_SIZE;
54 pte++;
55 } while (address && (address < end));
56 }
58 static inline int direct_remap_area_pmd(struct mm_struct *mm,
59 pmd_t *pmd,
60 unsigned long address,
61 unsigned long size,
62 unsigned long machine_addr,
63 pgprot_t prot)
64 {
65 unsigned long end;
67 address &= ~PGDIR_MASK;
68 end = address + size;
69 if (end > PGDIR_SIZE)
70 end = PGDIR_SIZE;
71 machine_addr -= address;
72 if (address >= end)
73 BUG();
74 do {
75 pte_t * pte = pte_alloc(mm, pmd, address);
76 if (!pte)
77 return -ENOMEM;
78 direct_remap_area_pte(pte, address, end - address,
79 address + machine_addr, prot);
80 address = (address + PMD_SIZE) & PMD_MASK;
81 pmd++;
82 } while (address && (address < end));
83 return 0;
84 }
86 int direct_remap_area_pages(struct mm_struct *mm,
87 unsigned long address,
88 unsigned long machine_addr,
89 unsigned long size,
90 pgprot_t prot)
91 {
92 int error = 0;
93 pgd_t * dir;
94 unsigned long end = address + size;
96 machine_addr -= address;
97 dir = pgd_offset(mm, address);
98 flush_cache_all();
99 if (address >= end)
100 BUG();
101 spin_lock(&mm->page_table_lock);
102 do {
103 pmd_t *pmd = pmd_alloc(mm, dir, address);
104 error = -ENOMEM;
105 if (!pmd)
106 break;
107 error = direct_remap_area_pmd(mm, pmd, address, end - address,
108 machine_addr + address, prot);
109 if (error)
110 break;
111 address = (address + PGDIR_SIZE) & PGDIR_MASK;
112 dir++;
113 } while (address && (address < end));
114 spin_unlock(&mm->page_table_lock);
115 flush_tlb_all();
116 return error;
117 }
119 #endif /* CONFIG_XENO_PRIV */
122 /*
123 * Remap an arbitrary machine address space into the kernel virtual
124 * address space. Needed when a privileged instance of Xenolinux wants
125 * to access space outside its world directly.
126 *
127 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
128 * have to convert them into an offset in a page-aligned mapping, but the
129 * caller shouldn't need to know that small detail.
130 */
131 void * __ioremap(unsigned long machine_addr,
132 unsigned long size,
133 unsigned long flags)
134 {
135 #if defined(CONFIG_XENO_PRIV)
136 void * addr;
137 struct vm_struct * area;
138 unsigned long offset, last_addr;
139 pgprot_t prot;
141 /* Only privileged Xenolinux can make unchecked pagetable updates. */
142 if ( !(start_info.flags & SIF_PRIVILEGED) )
143 return NULL;
145 /* Don't allow wraparound or zero size */
146 last_addr = machine_addr + size - 1;
147 if (!size || last_addr < machine_addr)
148 return NULL;
150 /* Mappings have to be page-aligned */
151 offset = machine_addr & ~PAGE_MASK;
152 machine_addr &= PAGE_MASK;
153 size = PAGE_ALIGN(last_addr) - machine_addr;
155 /* Ok, go for it */
156 area = get_vm_area(size, VM_IOREMAP);
157 if (!area)
158 return NULL;
159 addr = area->addr;
160 prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
161 _PAGE_ACCESSED | flags);
162 if (direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(addr),
163 machine_addr, size, prot)) {
164 vfree(addr);
165 return NULL;
166 }
167 return (void *) (offset + (char *)addr);
168 #else
169 return NULL;
170 #endif
171 }
173 void iounmap(void *addr)
174 {
175 vfree((void *)((unsigned long)addr & PAGE_MASK));
176 }
178 void __init *bt_ioremap(unsigned long machine_addr, unsigned long size)
179 {
180 unsigned long offset, last_addr;
181 unsigned int nrpages;
182 enum fixed_addresses idx;
184 /* Don't allow wraparound or zero size */
185 last_addr = machine_addr + size - 1;
186 if (!size || last_addr < machine_addr)
187 return NULL;
189 /*
190 * Mappings have to be page-aligned
191 */
192 offset = machine_addr & ~PAGE_MASK;
193 machine_addr &= PAGE_MASK;
194 size = PAGE_ALIGN(last_addr) - machine_addr;
196 /*
197 * Mappings have to fit in the FIX_BTMAP area.
198 */
199 nrpages = size >> PAGE_SHIFT;
200 if (nrpages > NR_FIX_BTMAPS)
201 return NULL;
203 /*
204 * Ok, go for it..
205 */
206 idx = FIX_BTMAP_BEGIN;
207 while (nrpages > 0) {
208 set_fixmap(idx, machine_addr);
210 //unsigned long address = __fix_to_virt(idx);
214 //direct_set_pte(address, direct_mk_pte_phys(machine_addr, PAGE_KERNEL_NOCACHE));
216 machine_addr += PAGE_SIZE;
217 --idx;
218 --nrpages;
219 }
221 flush_tlb_all();
223 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
224 }
227 #if 0 /* We don't support these functions. They shouldn't be required. */
228 void __init *bt_ioremap(unsigned long machine_addr, unsigned long size) {}
229 void __init bt_iounmap(void *addr, unsigned long size) {}
230 #endif