debuggers.hg

view xenolinux-2.4.21-sparse/arch/xeno/mm/ioremap.c @ 649:2302a65a8f7a

bitkeeper revision 1.342 (3f1153a1RLTmwr7Jo9U1gUChA4e0Jw)

cleanup
author iap10@labyrinth.cl.cam.ac.uk
date Sun Jul 13 12:42:09 2003 +0000 (2003-07-13)
parents cda951fc1bef
children
line source
1 /*
2 * arch/xeno/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 *
8 * Modifications for Xenolinux (c) 2003 Keir Fraser
9 */
11 #include <linux/slab.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/vmalloc.h>
15 #include <asm/io.h>
16 #include <asm/pgalloc.h>
17 #include <asm/uaccess.h>
18 #include <asm/tlb.h>
19 #include <asm/mmu.h>
21 #if defined(CONFIG_XENO_PRIV)
23 #define direct_set_pte(pteptr, pteval) \
24 queue_l1_entry_update(__pa(pteptr)|PGREQ_UNCHECKED_UPDATE, (pteval).pte_low)
25 #define __direct_pte(x) ((pte_t) { (x) } )
26 #define __direct_mk_pte(page_nr,pgprot) \
27 __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
28 #define direct_mk_pte_phys(physpage, pgprot) \
29 __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
32 static inline void direct_remap_area_pte(pte_t *pte,
33 unsigned long address,
34 unsigned long size,
35 unsigned long machine_addr,
36 pgprot_t prot)
37 {
38 unsigned long end;
40 address &= ~PMD_MASK;
41 end = address + size;
42 if (end > PMD_SIZE)
43 end = PMD_SIZE;
44 if (address >= end)
45 BUG();
46 do {
47 if (!pte_none(*pte)) {
48 printk("direct_remap_area_pte: page already exists\n");
49 BUG();
50 }
51 direct_set_pte(pte, pte_mkio(direct_mk_pte_phys(machine_addr, prot)));
52 address += PAGE_SIZE;
53 machine_addr += PAGE_SIZE;
54 pte++;
55 } while (address && (address < end));
56 }
58 static inline int direct_remap_area_pmd(struct mm_struct *mm,
59 pmd_t *pmd,
60 unsigned long address,
61 unsigned long size,
62 unsigned long machine_addr,
63 pgprot_t prot)
64 {
65 unsigned long end;
67 address &= ~PGDIR_MASK;
68 end = address + size;
69 if (end > PGDIR_SIZE)
70 end = PGDIR_SIZE;
71 machine_addr -= address;
72 if (address >= end)
73 BUG();
74 do {
75 pte_t * pte = pte_alloc(mm, pmd, address);
76 if (!pte)
77 return -ENOMEM;
78 direct_remap_area_pte(pte, address, end - address,
79 address + machine_addr, prot);
80 address = (address + PMD_SIZE) & PMD_MASK;
81 pmd++;
82 } while (address && (address < end));
83 return 0;
84 }
86 int direct_remap_area_pages(struct mm_struct *mm,
87 unsigned long address,
88 unsigned long machine_addr,
89 unsigned long size,
90 pgprot_t prot)
91 {
92 int error = 0;
93 pgd_t * dir;
94 unsigned long end = address + size;
96 machine_addr -= address;
97 dir = pgd_offset(mm, address);
98 flush_cache_all();
99 if (address >= end)
100 BUG();
101 spin_lock(&mm->page_table_lock);
102 do {
103 pmd_t *pmd = pmd_alloc(mm, dir, address);
104 error = -ENOMEM;
105 if (!pmd)
106 break;
107 error = direct_remap_area_pmd(mm, pmd, address, end - address,
108 machine_addr + address, prot);
109 if (error)
110 break;
111 address = (address + PGDIR_SIZE) & PGDIR_MASK;
112 dir++;
113 } while (address && (address < end));
114 spin_unlock(&mm->page_table_lock);
115 flush_tlb_all();
116 return error;
117 }
119 #endif /* CONFIG_XENO_PRIV */
122 /*
123 * Remap an arbitrary machine address space into the kernel virtual
124 * address space. Needed when a privileged instance of Xenolinux wants
125 * to access space outside its world directly.
126 *
127 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
128 * have to convert them into an offset in a page-aligned mapping, but the
129 * caller shouldn't need to know that small detail.
130 */
131 void * __ioremap(unsigned long machine_addr,
132 unsigned long size,
133 unsigned long flags)
134 {
135 #if defined(CONFIG_XENO_PRIV)
136 void * addr;
137 struct vm_struct * area;
138 unsigned long offset, last_addr;
139 pgprot_t prot;
141 /* Only privileged Xenolinux can make unchecked pagetable updates. */
142 if ( !(start_info.flags & SIF_PRIVILEGED) )
143 return NULL;
145 /* Don't allow wraparound or zero size */
146 last_addr = machine_addr + size - 1;
147 if (!size || last_addr < machine_addr)
148 return NULL;
150 /* Mappings have to be page-aligned */
151 offset = machine_addr & ~PAGE_MASK;
152 machine_addr &= PAGE_MASK;
153 size = PAGE_ALIGN(last_addr) - machine_addr;
155 /* Ok, go for it */
156 area = get_vm_area(size, VM_IOREMAP);
157 if (!area)
158 return NULL;
159 addr = area->addr;
160 prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
161 _PAGE_ACCESSED | flags);
162 if (direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(addr),
163 machine_addr, size, prot)) {
164 vfree(addr);
165 return NULL;
166 }
167 return (void *) (offset + (char *)addr);
168 #else
169 return NULL;
170 #endif
171 }
173 void iounmap(void *addr)
174 {
175 vfree((void *)((unsigned long)addr & PAGE_MASK));
176 }
178 /* implementation of boot time ioremap for purpose of provising access
179 to the vga console for privileged domains. Unlike boot time ioremap on
180 other architectures, ours is permanent and not reclaimed when then vmalloc
181 infrastructure is started */
183 void __init *bt_ioremap(unsigned long machine_addr, unsigned long size)
184 {
185 unsigned long offset, last_addr;
186 unsigned int nrpages;
187 enum fixed_addresses idx;
189 /* Don't allow wraparound or zero size */
190 last_addr = machine_addr + size - 1;
191 if (!size || last_addr < machine_addr)
192 return NULL;
194 /*
195 * Mappings have to be page-aligned
196 */
197 offset = machine_addr & ~PAGE_MASK;
198 machine_addr &= PAGE_MASK;
199 size = PAGE_ALIGN(last_addr) - machine_addr;
201 /*
202 * Mappings have to fit in the FIX_BTMAP area.
203 */
204 nrpages = size >> PAGE_SHIFT;
205 if (nrpages > NR_FIX_BTMAPS)
206 return NULL;
208 /*
209 * Ok, go for it..
210 */
211 idx = FIX_BTMAP_BEGIN;
212 while (nrpages > 0) {
213 set_fixmap(idx, machine_addr);
214 machine_addr += PAGE_SIZE;
215 --idx;
216 --nrpages;
217 }
219 flush_tlb_all();
221 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
222 }
225 #if 0 /* We don't support these functions. They shouldn't be required. */
226 void __init bt_iounmap(void *addr, unsigned long size) {}
227 #endif