debuggers.hg
changeset 601:0f2825ead641
bitkeeper revision 1.259.4.1 (3f0bed4cJB9LOOhEpc2nIhwKM2NwAA)
ioremap.c:
new file
mkbuildtree, Makefile, dom0_memory.c:
ioremap support in Xenolinux.
.del-io.h~441bb6eed5f4a3f5:
Delete: xenolinux-2.4.21-sparse/include/asm-xeno/io.h
ioremap.c:
new file
mkbuildtree, Makefile, dom0_memory.c:
ioremap support in Xenolinux.
.del-io.h~441bb6eed5f4a3f5:
Delete: xenolinux-2.4.21-sparse/include/asm-xeno/io.h
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Wed Jul 09 10:24:12 2003 +0000 (2003-07-09) |
parents | e381bd125a43 |
children | 7dd26f39dacb |
files | .rootkeys xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_memory.c xenolinux-2.4.21-sparse/arch/xeno/mm/Makefile xenolinux-2.4.21-sparse/arch/xeno/mm/ioremap.c xenolinux-2.4.21-sparse/include/asm-xeno/io.h xenolinux-2.4.21-sparse/mkbuildtree |
line diff
1.1 --- a/.rootkeys Tue Jul 08 14:40:18 2003 +0000 1.2 +++ b/.rootkeys Wed Jul 09 10:24:12 2003 +0000 1.3 @@ -504,6 +504,7 @@ 3e5a4e66l8Q5Tv-6B3lQIRmaVbFPzg xenolinux 1.4 3e5a4e66TyNNUEXkr5RxqvQhXK1MQA xenolinux-2.4.21-sparse/arch/xeno/mm/get_unmapped_area.c 1.5 3e5a4e668SE9rixq4ahho9rNhLUUFQ xenolinux-2.4.21-sparse/arch/xeno/mm/hypervisor.c 1.6 3e5a4e661gLzzff25pJooKIIWe7IWg xenolinux-2.4.21-sparse/arch/xeno/mm/init.c 1.7 +3f0bed43UUdQichXAiVNrjV-y2Kzcg xenolinux-2.4.21-sparse/arch/xeno/mm/ioremap.c 1.8 3e5a4e66qRlSTcjafidMB6ulECADvg xenolinux-2.4.21-sparse/arch/xeno/vmlinux.lds 1.9 3ea53c6em6uzVHSiGqrbbAVofyRY_g xenolinux-2.4.21-sparse/drivers/block/genhd.c 1.10 3e5a4e66mrtlmV75L1tjKDg8RaM5gA xenolinux-2.4.21-sparse/drivers/block/ll_rw_blk.c 1.11 @@ -515,7 +516,6 @@ 3e5a4e66SYp_UpAVcF8Lc1wa3Qtgzw xenolinux 1.12 3e5a4e67w_DWgjIJ17Tlossu1LGujQ xenolinux-2.4.21-sparse/include/asm-xeno/highmem.h 1.13 3e5a4e67YtcyDLQsShhCfQwPSELfvA xenolinux-2.4.21-sparse/include/asm-xeno/hw_irq.h 1.14 3e5a4e677VBavzM1UZIEcH1B-RlXMA xenolinux-2.4.21-sparse/include/asm-xeno/hypervisor.h 1.15 -3e5a4e67Ulv-Ll8Zp4j2GwMwQ8aAXQ xenolinux-2.4.21-sparse/include/asm-xeno/io.h 1.16 3e5a4e673p7PEOyHFm3nHkYX6HQYBg xenolinux-2.4.21-sparse/include/asm-xeno/irq.h 1.17 3ead095db_LRUXnxaqs0dA1DWhPoQQ xenolinux-2.4.21-sparse/include/asm-xeno/keyboard.h 1.18 3e5a4e67zoNch27qYhEBpr2k6SABOg xenolinux-2.4.21-sparse/include/asm-xeno/mmu.h
2.1 --- a/xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_memory.c Tue Jul 08 14:40:18 2003 +0000 2.2 +++ b/xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_memory.c Wed Jul 09 10:24:12 2003 +0000 2.3 @@ -20,114 +20,22 @@ 2.4 #define MAP_DISCONT 1 2.5 2.6 extern struct list_head * find_direct(struct list_head *, unsigned long); 2.7 - 2.8 -/* 2.9 - * bd240: functions below perform direct mapping to the real physical pages 2.10 - * needed for mapping various hypervisor specific structures needed in dom0 2.11 - * userspace by various management applications such as domain builder etc. 2.12 - */ 2.13 - 2.14 -#define direct_set_pte(pteptr, pteval) queue_l1_entry_update(__pa(pteptr)|PGREQ_UNCHECKED_UPDATE, (pteval).pte_low) 2.15 - 2.16 -#define direct_pte_clear(pteptr) queue_l1_entry_update(__pa(pteptr)|PGREQ_UNCHECKED_UPDATE, 0) 2.17 - 2.18 -#define __direct_pte(x) ((pte_t) { (x) } ) 2.19 -#define __direct_mk_pte(page_nr,pgprot) __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot)) 2.20 -#define direct_mk_pte_phys(physpage, pgprot) __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot) 2.21 - 2.22 -static inline void forget_pte(pte_t page) 2.23 -{ 2.24 - if (!pte_none(page)) { 2.25 - printk("forget_pte: old mapping existed!\n"); 2.26 - BUG(); 2.27 - } 2.28 -} 2.29 - 2.30 -static inline void direct_remappte_range(pte_t * pte, unsigned long address, unsigned long size, 2.31 - unsigned long phys_addr, pgprot_t prot) 2.32 -{ 2.33 - unsigned long end; 2.34 - 2.35 - address &= ~PMD_MASK; 2.36 - end = address + size; 2.37 - if (end > PMD_SIZE) 2.38 - end = PMD_SIZE; 2.39 - do { 2.40 - pte_t oldpage; 2.41 - oldpage = ptep_get_and_clear(pte); 2.42 - 2.43 - direct_set_pte(pte, direct_mk_pte_phys(phys_addr, prot)); 2.44 - 2.45 - forget_pte(oldpage); 2.46 - address += PAGE_SIZE; 2.47 - phys_addr += PAGE_SIZE; 2.48 - pte++; 2.49 - } while (address && (address < end)); 2.50 - 2.51 -} 2.52 - 2.53 -static inline int direct_remappmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size, 2.54 - unsigned long phys_addr, pgprot_t prot) 2.55 -{ 2.56 - unsigned long end; 2.57 - 2.58 - address &= ~PGDIR_MASK; 2.59 - end = address + size; 2.60 - if (end > PGDIR_SIZE) 2.61 - end = PGDIR_SIZE; 2.62 - phys_addr -= address; 2.63 - do { 2.64 - pte_t * pte = pte_alloc(mm, pmd, address); 2.65 - if (!pte) 2.66 - return -ENOMEM; 2.67 - direct_remappte_range(pte, address, end - address, address + phys_addr, prot); 2.68 - address = (address + PMD_SIZE) & PMD_MASK; 2.69 - pmd++; 2.70 - } while (address && (address < end)); 2.71 - return 0; 2.72 -} 2.73 - 2.74 -/* Note: this is only safe if the mm semaphore is held when called. */ 2.75 -int direct_remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot) 2.76 -{ 2.77 - int error = 0; 2.78 - pgd_t * dir; 2.79 - unsigned long beg = from; 2.80 - unsigned long end = from + size; 2.81 - struct mm_struct *mm = current->mm; 2.82 - 2.83 - phys_addr -= from; 2.84 - dir = pgd_offset(mm, from); 2.85 - flush_cache_range(mm, beg, end); 2.86 - if (from >= end) 2.87 - BUG(); 2.88 - 2.89 - spin_lock(&mm->page_table_lock); 2.90 - do { 2.91 - pmd_t *pmd = pmd_alloc(mm, dir, from); 2.92 - error = -ENOMEM; 2.93 - if (!pmd) 2.94 - break; 2.95 - error = direct_remappmd_range(mm, pmd, from, end - from, phys_addr + from, prot); 2.96 - if (error) 2.97 - break; 2.98 - from = (from + PGDIR_SIZE) & PGDIR_MASK; 2.99 - dir++; 2.100 - } while (from && (from < end)); 2.101 - spin_unlock(&mm->page_table_lock); 2.102 - flush_tlb_range(mm, beg, end); 2.103 - return error; 2.104 -} 2.105 +extern int direct_remap_area_pages(struct mm_struct *, unsigned long, 2.106 + unsigned long, unsigned long, pgprot_t); 2.107 +extern void direct_zap_page_range(struct mm_struct *, unsigned long, 2.108 + unsigned long); 2.109 2.110 /* 2.111 * used for remapping discontiguous bits of domain's memory, pages to map are 2.112 * found from frame table beginning at the given first_pg index 2.113 */ 2.114 int direct_remap_disc_page_range(unsigned long from, 2.115 - unsigned long first_pg, int tot_pages, pgprot_t prot) 2.116 + unsigned long first_pg, 2.117 + int tot_pages, 2.118 + pgprot_t prot) 2.119 { 2.120 dom0_op_t dom0_op; 2.121 - unsigned long *pfns = get_free_page(GFP_KERNEL); 2.122 + unsigned long *pfns = (unsigned long *)get_free_page(GFP_KERNEL); 2.123 unsigned long start = from; 2.124 int pages, i; 2.125 2.126 @@ -145,7 +53,8 @@ int direct_remap_disc_page_range(unsigne 2.127 2.128 for ( i = 0; i < pages; i++ ) 2.129 { 2.130 - if(direct_remap_page_range(start, pfns[i] << PAGE_SHIFT, 2.131 + if(direct_remap_area_pages(current->mm, 2.132 + start, pfns[i] << PAGE_SHIFT, 2.133 PAGE_SIZE, prot)) 2.134 goto out; 2.135 start += PAGE_SIZE; 2.136 @@ -154,15 +63,10 @@ int direct_remap_disc_page_range(unsigne 2.137 } 2.138 2.139 out: 2.140 - free_page(pfns); 2.141 + free_page((unsigned long)pfns); 2.142 return tot_pages; 2.143 } 2.144 2.145 -/* below functions replace standard sys_mmap and sys_munmap which are absolutely useless 2.146 - * for direct memory mapping. direct_zap* functions are minor ammendments to the 2.147 - * original versions in mm/memory.c. the changes are to enable unmapping of real physical 2.148 - * addresses. 2.149 - */ 2.150 2.151 unsigned long direct_mmap(unsigned long phys_addr, unsigned long size, 2.152 pgprot_t prot, int flag, int tot_pages) 2.153 @@ -202,7 +106,8 @@ unsigned long direct_mmap(unsigned long 2.154 ret = direct_remap_disc_page_range(addr, phys_addr >> PAGE_SHIFT, 2.155 tot_pages, prot); 2.156 } else { 2.157 - ret = direct_remap_page_range(addr, phys_addr, size, prot); 2.158 + ret = direct_remap_area_pages(current->mm, 2.159 + addr, phys_addr, size, prot); 2.160 } 2.161 2.162 if(ret == 0) 2.163 @@ -212,104 +117,6 @@ unsigned long direct_mmap(unsigned long 2.164 return ret; 2.165 } 2.166 2.167 -/* most of the checks, refcnt updates, cache stuff have been thrown out as they are not 2.168 - * needed 2.169 - */ 2.170 -static inline int direct_zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, 2.171 - unsigned long size) 2.172 -{ 2.173 - unsigned long offset; 2.174 - pte_t * ptep; 2.175 - int freed = 0; 2.176 - 2.177 - if (pmd_none(*pmd)) 2.178 - return 0; 2.179 - if (pmd_bad(*pmd)) { 2.180 - pmd_ERROR(*pmd); 2.181 - pmd_clear(pmd); 2.182 - return 0; 2.183 - } 2.184 - ptep = pte_offset(pmd, address); 2.185 - offset = address & ~PMD_MASK; 2.186 - if (offset + size > PMD_SIZE) 2.187 - size = PMD_SIZE - offset; 2.188 - size &= PAGE_MASK; 2.189 - for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) { 2.190 - pte_t pte = *ptep; 2.191 - if (pte_none(pte)) 2.192 - continue; 2.193 - freed ++; 2.194 - direct_pte_clear(ptep); 2.195 - } 2.196 - 2.197 - return freed; 2.198 -} 2.199 - 2.200 -static inline int direct_zap_pmd_range(mmu_gather_t *tlb, pgd_t * dir, 2.201 - unsigned long address, unsigned long size) 2.202 -{ 2.203 - pmd_t * pmd; 2.204 - unsigned long end; 2.205 - int freed; 2.206 - 2.207 - if (pgd_none(*dir)) 2.208 - return 0; 2.209 - if (pgd_bad(*dir)) { 2.210 - pgd_ERROR(*dir); 2.211 - pgd_clear(dir); 2.212 - return 0; 2.213 - } 2.214 - pmd = pmd_offset(dir, address); 2.215 - end = address + size; 2.216 - if (end > ((address + PGDIR_SIZE) & PGDIR_MASK)) 2.217 - end = ((address + PGDIR_SIZE) & PGDIR_MASK); 2.218 - freed = 0; 2.219 - do { 2.220 - freed += direct_zap_pte_range(tlb, pmd, address, end - address); 2.221 - address = (address + PMD_SIZE) & PMD_MASK; 2.222 - pmd++; 2.223 - } while (address < end); 2.224 - return freed; 2.225 -} 2.226 - 2.227 -/* 2.228 - * remove user pages in a given range. 2.229 - */ 2.230 -void direct_zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size) 2.231 -{ 2.232 - mmu_gather_t *tlb; 2.233 - pgd_t * dir; 2.234 - unsigned long start = address, end = address + size; 2.235 - int freed = 0; 2.236 - 2.237 - dir = pgd_offset(mm, address); 2.238 - 2.239 - /* 2.240 - * This is a long-lived spinlock. That's fine. 2.241 - * There's no contention, because the page table 2.242 - * lock only protects against kswapd anyway, and 2.243 - * even if kswapd happened to be looking at this 2.244 - * process we _want_ it to get stuck. 2.245 - */ 2.246 - if (address >= end) 2.247 - BUG(); 2.248 - spin_lock(&mm->page_table_lock); 2.249 - flush_cache_range(mm, address, end); 2.250 - tlb = tlb_gather_mmu(mm); 2.251 - 2.252 - do { 2.253 - freed += direct_zap_pmd_range(tlb, dir, address, end - address); 2.254 - address = (address + PGDIR_SIZE) & PGDIR_MASK; 2.255 - dir++; 2.256 - } while (address && (address < end)); 2.257 - 2.258 - /* this will flush any remaining tlb entries */ 2.259 - tlb_finish_mmu(tlb, start, end); 2.260 - 2.261 - /* decrementing rss removed */ 2.262 - spin_unlock(&mm->page_table_lock); 2.263 -} 2.264 - 2.265 2.266 int direct_unmap(struct mm_struct *mm, unsigned long addr, unsigned long size) 2.267 {
3.1 --- a/xenolinux-2.4.21-sparse/arch/xeno/mm/Makefile Tue Jul 08 14:40:18 2003 +0000 3.2 +++ b/xenolinux-2.4.21-sparse/arch/xeno/mm/Makefile Wed Jul 09 10:24:12 2003 +0000 3.3 @@ -9,7 +9,7 @@ 3.4 3.5 O_TARGET := mm.o 3.6 3.7 -obj-y := init.o fault.o extable.o pageattr.o hypervisor.o get_unmapped_area.o 3.8 +obj-y := init.o fault.o extable.o pageattr.o hypervisor.o get_unmapped_area.o ioremap.o 3.9 3.10 export-objs := pageattr.o 3.11
4.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 4.2 +++ b/xenolinux-2.4.21-sparse/arch/xeno/mm/ioremap.c Wed Jul 09 10:24:12 2003 +0000 4.3 @@ -0,0 +1,303 @@ 4.4 +/* 4.5 + * arch/xeno/mm/ioremap.c 4.6 + * 4.7 + * Re-map IO memory to kernel address space so that we can access it. 4.8 + * 4.9 + * (C) Copyright 1995 1996 Linus Torvalds 4.10 + * 4.11 + * Modifications for Xenolinux (c) 2003 Keir Fraser 4.12 + */ 4.13 + 4.14 +#include <linux/slab.h> 4.15 +#include <linux/mm.h> 4.16 +#include <linux/mman.h> 4.17 +#include <linux/vmalloc.h> 4.18 +#include <asm/io.h> 4.19 +#include <asm/pgalloc.h> 4.20 +#include <asm/uaccess.h> 4.21 +#include <asm/tlb.h> 4.22 +#include <asm/mmu.h> 4.23 + 4.24 +#define direct_set_pte(pteptr, pteval) \ 4.25 + queue_l1_entry_update(__pa(pteptr)|PGREQ_UNCHECKED_UPDATE, (pteval).pte_low) 4.26 +#define direct_pte_clear(pteptr) \ 4.27 + queue_l1_entry_update(__pa(pteptr)|PGREQ_UNCHECKED_UPDATE, 0) 4.28 +#define __direct_pte(x) ((pte_t) { (x) } ) 4.29 +#define __direct_mk_pte(page_nr,pgprot) \ 4.30 + __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot)) 4.31 +#define direct_mk_pte_phys(physpage, pgprot) \ 4.32 + __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot) 4.33 + 4.34 + 4.35 + 4.36 +/******************* Mapping a page range directly ************************/ 4.37 + 4.38 +static inline void direct_remap_area_pte(pte_t *pte, 4.39 + unsigned long address, 4.40 + unsigned long size, 4.41 + unsigned long machine_addr, 4.42 + pgprot_t prot) 4.43 +{ 4.44 + unsigned long end; 4.45 + 4.46 + address &= ~PMD_MASK; 4.47 + end = address + size; 4.48 + if (end > PMD_SIZE) 4.49 + end = PMD_SIZE; 4.50 + if (address >= end) 4.51 + BUG(); 4.52 + do { 4.53 + if (!pte_none(*pte)) { 4.54 + printk("direct_remap_area_pte: page already exists\n"); 4.55 + BUG(); 4.56 + } 4.57 + direct_set_pte(pte, direct_mk_pte_phys(machine_addr, prot)); 4.58 + address += PAGE_SIZE; 4.59 + machine_addr += PAGE_SIZE; 4.60 + pte++; 4.61 + } while (address && (address < end)); 4.62 +} 4.63 + 4.64 +static inline int direct_remap_area_pmd(struct mm_struct *mm, 4.65 + pmd_t *pmd, 4.66 + unsigned long address, 4.67 + unsigned long size, 4.68 + unsigned long machine_addr, 4.69 + pgprot_t prot) 4.70 +{ 4.71 + unsigned long end; 4.72 + 4.73 + address &= ~PGDIR_MASK; 4.74 + end = address + size; 4.75 + if (end > PGDIR_SIZE) 4.76 + end = PGDIR_SIZE; 4.77 + machine_addr -= address; 4.78 + if (address >= end) 4.79 + BUG(); 4.80 + do { 4.81 + pte_t * pte = pte_alloc(mm, pmd, address); 4.82 + if (!pte) 4.83 + return -ENOMEM; 4.84 + direct_remap_area_pte(pte, address, end - address, 4.85 + address + machine_addr, prot); 4.86 + address = (address + PMD_SIZE) & PMD_MASK; 4.87 + pmd++; 4.88 + } while (address && (address < end)); 4.89 + return 0; 4.90 +} 4.91 + 4.92 +int direct_remap_area_pages(struct mm_struct *mm, 4.93 + unsigned long address, 4.94 + unsigned long machine_addr, 4.95 + unsigned long size, 4.96 + pgprot_t prot) 4.97 +{ 4.98 + int error = 0; 4.99 + pgd_t * dir; 4.100 + unsigned long end = address + size; 4.101 + 4.102 + machine_addr -= address; 4.103 + dir = pgd_offset(mm, address); 4.104 + flush_cache_all(); 4.105 + if (address >= end) 4.106 + BUG(); 4.107 + spin_lock(&mm->page_table_lock); 4.108 + do { 4.109 + pmd_t *pmd = pmd_alloc(mm, dir, address); 4.110 + error = -ENOMEM; 4.111 + if (!pmd) 4.112 + break; 4.113 + error = direct_remap_area_pmd(mm, pmd, address, end - address, 4.114 + machine_addr + address, prot); 4.115 + if (error) 4.116 + break; 4.117 + address = (address + PGDIR_SIZE) & PGDIR_MASK; 4.118 + dir++; 4.119 + } while (address && (address < end)); 4.120 + spin_unlock(&mm->page_table_lock); 4.121 + flush_tlb_all(); 4.122 + return error; 4.123 +} 4.124 + 4.125 + 4.126 + 4.127 +/************************ Zapping a page range directly *******************/ 4.128 + 4.129 +static inline int direct_zap_pte_range(mmu_gather_t *tlb, 4.130 + pmd_t * pmd, 4.131 + unsigned long address, 4.132 + unsigned long size) 4.133 +{ 4.134 + unsigned long offset; 4.135 + pte_t * ptep; 4.136 + int freed = 0; 4.137 + 4.138 + if (pmd_none(*pmd)) 4.139 + return 0; 4.140 + if (pmd_bad(*pmd)) { 4.141 + pmd_ERROR(*pmd); 4.142 + pmd_clear(pmd); 4.143 + return 0; 4.144 + } 4.145 + ptep = pte_offset(pmd, address); 4.146 + offset = address & ~PMD_MASK; 4.147 + if (offset + size > PMD_SIZE) 4.148 + size = PMD_SIZE - offset; 4.149 + size &= PAGE_MASK; 4.150 + for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) { 4.151 + pte_t pte = *ptep; 4.152 + if (pte_none(pte)) 4.153 + continue; 4.154 + freed++; 4.155 + direct_pte_clear(ptep); 4.156 + } 4.157 + 4.158 + return freed; 4.159 +} 4.160 + 4.161 +static inline int direct_zap_pmd_range(mmu_gather_t *tlb, 4.162 + pgd_t * dir, 4.163 + unsigned long address, 4.164 + unsigned long size) 4.165 +{ 4.166 + pmd_t * pmd; 4.167 + unsigned long end; 4.168 + int freed; 4.169 + 4.170 + if (pgd_none(*dir)) 4.171 + return 0; 4.172 + if (pgd_bad(*dir)) { 4.173 + pgd_ERROR(*dir); 4.174 + pgd_clear(dir); 4.175 + return 0; 4.176 + } 4.177 + pmd = pmd_offset(dir, address); 4.178 + end = address + size; 4.179 + if (end > ((address + PGDIR_SIZE) & PGDIR_MASK)) 4.180 + end = ((address + PGDIR_SIZE) & PGDIR_MASK); 4.181 + freed = 0; 4.182 + do { 4.183 + freed += direct_zap_pte_range(tlb, pmd, address, end - address); 4.184 + address = (address + PMD_SIZE) & PMD_MASK; 4.185 + pmd++; 4.186 + } while (address < end); 4.187 + return freed; 4.188 +} 4.189 + 4.190 +void direct_zap_page_range(struct mm_struct *mm, 4.191 + unsigned long address, 4.192 + unsigned long size) 4.193 +{ 4.194 + mmu_gather_t *tlb; 4.195 + pgd_t * dir; 4.196 + unsigned long start = address, end = address + size; 4.197 + int freed = 0; 4.198 + 4.199 + dir = pgd_offset(mm, address); 4.200 + 4.201 + if (address >= end) 4.202 + BUG(); 4.203 + spin_lock(&mm->page_table_lock); 4.204 + flush_cache_range(mm, address, end); 4.205 + tlb = tlb_gather_mmu(mm); 4.206 + 4.207 + do { 4.208 + freed += direct_zap_pmd_range(tlb, dir, address, end - address); 4.209 + address = (address + PGDIR_SIZE) & PGDIR_MASK; 4.210 + dir++; 4.211 + } while (address && (address < end)); 4.212 + 4.213 + /* this will flush any remaining tlb entries */ 4.214 + tlb_finish_mmu(tlb, start, end); 4.215 + 4.216 + /* decrementing rss removed */ 4.217 + spin_unlock(&mm->page_table_lock); 4.218 +} 4.219 + 4.220 + 4.221 + 4.222 +/****************** Generic public functions ****************************/ 4.223 + 4.224 +/* 4.225 + * Remap an arbitrary machine address space into the kernel virtual 4.226 + * address space. Needed when a privileged instance of Xenolinux wants 4.227 + * to access space outside its world directly. 4.228 + * 4.229 + * NOTE! We need to allow non-page-aligned mappings too: we will obviously 4.230 + * have to convert them into an offset in a page-aligned mapping, but the 4.231 + * caller shouldn't need to know that small detail. 4.232 + */ 4.233 +void * __ioremap(unsigned long machine_addr, 4.234 + unsigned long size, 4.235 + unsigned long flags) 4.236 +{ 4.237 + void * addr; 4.238 + struct vm_struct * area; 4.239 + unsigned long offset, last_addr; 4.240 + pgprot_t prot; 4.241 + 4.242 + /* Only privileged Xenolinux can make unchecked pagetable updates. */ 4.243 + if ( !(start_info.flags & SIF_PRIVILEGED) ) 4.244 + return NULL; 4.245 + 4.246 + /* Don't allow wraparound or zero size */ 4.247 + last_addr = machine_addr + size - 1; 4.248 + if (!size || last_addr < machine_addr) 4.249 + return NULL; 4.250 + 4.251 + /* Mappings have to be page-aligned */ 4.252 + offset = machine_addr & ~PAGE_MASK; 4.253 + machine_addr &= PAGE_MASK; 4.254 + size = PAGE_ALIGN(last_addr) - machine_addr; 4.255 + 4.256 + /* Ok, go for it */ 4.257 + area = get_vm_area(size, VM_IOREMAP); 4.258 + if (!area) 4.259 + return NULL; 4.260 + addr = area->addr; 4.261 + prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | 4.262 + flags); 4.263 + if (direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(addr), 4.264 + machine_addr, size, prot)) { 4.265 + vfree(addr); 4.266 + return NULL; 4.267 + } 4.268 + return (void *) (offset + (char *)addr); 4.269 +} 4.270 + 4.271 +/* 4.272 + * 'vfree' is basically inlined here. This is because we use a different 4.273 + * function to zap the associated page range. 4.274 + */ 4.275 +void iounmap(void *addr) 4.276 +{ 4.277 + struct vm_struct **p, *tmp; 4.278 + 4.279 + addr = (void *)((unsigned long)addr & PAGE_MASK); 4.280 + 4.281 + if (addr == NULL) 4.282 + return; 4.283 + 4.284 + write_lock(&vmlist_lock); 4.285 + 4.286 + for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { 4.287 + if (tmp->addr == addr) { 4.288 + *p = tmp->next; 4.289 + direct_zap_page_range(&init_mm, 4.290 + VMALLOC_VMADDR(tmp->addr), 4.291 + tmp->size); 4.292 + write_unlock(&vmlist_lock); 4.293 + kfree(tmp); 4.294 + return; 4.295 + } 4.296 + } 4.297 + 4.298 + write_unlock(&vmlist_lock); 4.299 + printk(KERN_ERR "Trying to iounmap() nonexistent vm area (%p)\n", addr); 4.300 +} 4.301 + 4.302 + 4.303 +#if 0 /* We don't support these functions. They shouldn't be required. */ 4.304 +void __init *bt_ioremap(unsigned long machine_addr, unsigned long size) {} 4.305 +void __init bt_iounmap(void *addr, unsigned long size) {} 4.306 +#endif
5.1 --- a/xenolinux-2.4.21-sparse/include/asm-xeno/io.h Tue Jul 08 14:40:18 2003 +0000 5.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 5.3 @@ -1,376 +0,0 @@ 5.4 -#ifndef _ASM_IO_H 5.5 -#define _ASM_IO_H 5.6 - 5.7 -#include <linux/config.h> 5.8 -#include <asm/hypervisor.h> 5.9 -/* 5.10 - * This file contains the definitions for the x86 IO instructions 5.11 - * inb/inw/inl/outb/outw/outl and the "string versions" of the same 5.12 - * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" 5.13 - * versions of the single-IO instructions (inb_p/inw_p/..). 5.14 - * 5.15 - * This file is not meant to be obfuscating: it's just complicated 5.16 - * to (a) handle it all in a way that makes gcc able to optimize it 5.17 - * as well as possible and (b) trying to avoid writing the same thing 5.18 - * over and over again with slight variations and possibly making a 5.19 - * mistake somewhere. 5.20 - */ 5.21 - 5.22 -/* 5.23 - * Thanks to James van Artsdalen for a better timing-fix than 5.24 - * the two short jumps: using outb's to a nonexistent port seems 5.25 - * to guarantee better timings even on fast machines. 5.26 - * 5.27 - * On the other hand, I'd like to be sure of a non-existent port: 5.28 - * I feel a bit unsafe about using 0x80 (should be safe, though) 5.29 - * 5.30 - * Linus 5.31 - */ 5.32 - 5.33 - /* 5.34 - * Bit simplified and optimized by Jan Hubicka 5.35 - * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. 5.36 - * 5.37 - * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, 5.38 - * isa_read[wl] and isa_write[wl] fixed 5.39 - * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> 5.40 - */ 5.41 - 5.42 -#define IO_SPACE_LIMIT 0xffff 5.43 - 5.44 -#define XQUAD_PORTIO_BASE 0xfe400000 5.45 -#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ 5.46 -#define XQUAD_PORTIO_LEN 0x80000 /* Only remapping first 2 quads */ 5.47 - 5.48 -#ifdef __KERNEL__ 5.49 - 5.50 -#include <linux/vmalloc.h> 5.51 - 5.52 -/* 5.53 - * Temporary debugging check to catch old code using 5.54 - * unmapped ISA addresses. Will be removed in 2.4. 5.55 - */ 5.56 -#if CONFIG_DEBUG_IOVIRT 5.57 - extern void *__io_virt_debug(unsigned long x, const char *file, int line); 5.58 - extern unsigned long __io_phys_debug(unsigned long x, const char *file, int line); 5.59 - #define __io_virt(x) __io_virt_debug((unsigned long)(x), __FILE__, __LINE__) 5.60 -//#define __io_phys(x) __io_phys_debug((unsigned long)(x), __FILE__, __LINE__) 5.61 -#else 5.62 - #define __io_virt(x) ((void *)(x)) 5.63 -//#define __io_phys(x) __pa(x) 5.64 -#endif 5.65 - 5.66 -/** 5.67 - * virt_to_phys - map virtual addresses to physical 5.68 - * @address: address to remap 5.69 - * 5.70 - * The returned physical address is the physical (CPU) mapping for 5.71 - * the memory address given. It is only valid to use this function on 5.72 - * addresses directly mapped or allocated via kmalloc. 5.73 - * 5.74 - * This function does not give bus mappings for DMA transfers. In 5.75 - * almost all conceivable cases a device driver should not be using 5.76 - * this function 5.77 - */ 5.78 - 5.79 -static inline unsigned long virt_to_phys(volatile void * address) 5.80 -{ 5.81 - return __pa(address); 5.82 -} 5.83 - 5.84 -/** 5.85 - * phys_to_virt - map physical address to virtual 5.86 - * @address: address to remap 5.87 - * 5.88 - * The returned virtual address is a current CPU mapping for 5.89 - * the memory address given. It is only valid to use this function on 5.90 - * addresses that have a kernel mapping 5.91 - * 5.92 - * This function does not handle bus mappings for DMA transfers. In 5.93 - * almost all conceivable cases a device driver should not be using 5.94 - * this function 5.95 - */ 5.96 - 5.97 -static inline void * phys_to_virt(unsigned long address) 5.98 -{ 5.99 - return __va(address); 5.100 -} 5.101 - 5.102 -/* 5.103 - * Change "struct page" to physical address. 5.104 - */ 5.105 -#ifdef CONFIG_HIGHMEM64G 5.106 -#define page_to_phys(page) ((u64)(page - mem_map) << PAGE_SHIFT) 5.107 -#else 5.108 -#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) 5.109 -#endif 5.110 - 5.111 -/* 5.112 - * IO bus memory addresses are also 1:1 with the physical address 5.113 - */ 5.114 -#define virt_to_bus virt_to_phys 5.115 -#define bus_to_virt phys_to_virt 5.116 -#define page_to_bus page_to_phys 5.117 - 5.118 -/* 5.119 - * readX/writeX() are used to access memory mapped devices. On some 5.120 - * architectures the memory mapped IO stuff needs to be accessed 5.121 - * differently. On the x86 architecture, we just read/write the 5.122 - * memory location directly. 5.123 - */ 5.124 - 5.125 -#define readb(addr) (*(volatile unsigned char *) __io_virt(addr)) 5.126 -#define readw(addr) (*(volatile unsigned short *) __io_virt(addr)) 5.127 -#define readl(addr) (*(volatile unsigned int *) __io_virt(addr)) 5.128 -#define __raw_readb readb 5.129 -#define __raw_readw readw 5.130 -#define __raw_readl readl 5.131 - 5.132 -#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b)) 5.133 -#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b)) 5.134 -#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b)) 5.135 -#define __raw_writeb writeb 5.136 -#define __raw_writew writew 5.137 -#define __raw_writel writel 5.138 - 5.139 -#define memset_io(a,b,c) __memset(__io_virt(a),(b),(c)) 5.140 -#define memcpy_fromio(a,b,c) __memcpy((a),__io_virt(b),(c)) 5.141 -#define memcpy_toio(a,b,c) __memcpy(__io_virt(a),(b),(c)) 5.142 - 5.143 -/* 5.144 - * ISA space is 'always mapped' on a typical x86 system, no need to 5.145 - * explicitly ioremap() it. The fact that the ISA IO space is mapped 5.146 - * to PAGE_OFFSET is pure coincidence - it does not mean ISA values 5.147 - * are physical addresses. The following constant pointer can be 5.148 - * used as the IO-area pointer (it can be iounmapped as well, so the 5.149 - * analogy with PCI is quite large): 5.150 - */ 5.151 -#define __ISA_IO_base ((char *)(PAGE_OFFSET)) 5.152 - 5.153 -#define isa_readb(a) readb(__ISA_IO_base + (a)) 5.154 -#define isa_readw(a) readw(__ISA_IO_base + (a)) 5.155 -#define isa_readl(a) readl(__ISA_IO_base + (a)) 5.156 -#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a)) 5.157 -#define isa_writew(w,a) writew(w,__ISA_IO_base + (a)) 5.158 -#define isa_writel(l,a) writel(l,__ISA_IO_base + (a)) 5.159 -#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c)) 5.160 -#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c)) 5.161 -#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c)) 5.162 - 5.163 - 5.164 -/* 5.165 - * Again, i386 does not require mem IO specific function. 5.166 - */ 5.167 - 5.168 -#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(b),(c),(d)) 5.169 -#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(__ISA_IO_base + (b)),(c),(d)) 5.170 - 5.171 -/** 5.172 - * check_signature - find BIOS signatures 5.173 - * @io_addr: mmio address to check 5.174 - * @signature: signature block 5.175 - * @length: length of signature 5.176 - * 5.177 - * Perform a signature comparison with the mmio address io_addr. This 5.178 - * address should have been obtained by ioremap. 5.179 - * Returns 1 on a match. 5.180 - */ 5.181 - 5.182 -static inline int check_signature(unsigned long io_addr, 5.183 - const unsigned char *signature, int length) 5.184 -{ 5.185 - int retval = 0; 5.186 - do { 5.187 - if (readb(io_addr) != *signature) 5.188 - goto out; 5.189 - io_addr++; 5.190 - signature++; 5.191 - length--; 5.192 - } while (length); 5.193 - retval = 1; 5.194 -out: 5.195 - return retval; 5.196 -} 5.197 - 5.198 -/** 5.199 - * isa_check_signature - find BIOS signatures 5.200 - * @io_addr: mmio address to check 5.201 - * @signature: signature block 5.202 - * @length: length of signature 5.203 - * 5.204 - * Perform a signature comparison with the ISA mmio address io_addr. 5.205 - * Returns 1 on a match. 5.206 - * 5.207 - * This function is deprecated. New drivers should use ioremap and 5.208 - * check_signature. 5.209 - */ 5.210 - 5.211 - 5.212 -static inline int isa_check_signature(unsigned long io_addr, 5.213 - const unsigned char *signature, int length) 5.214 -{ 5.215 - int retval = 0; 5.216 - do { 5.217 - if (isa_readb(io_addr) != *signature) 5.218 - goto out; 5.219 - io_addr++; 5.220 - signature++; 5.221 - length--; 5.222 - } while (length); 5.223 - retval = 1; 5.224 -out: 5.225 - return retval; 5.226 -} 5.227 - 5.228 -/* 5.229 - * Cache management 5.230 - * 5.231 - * This needed for two cases 5.232 - * 1. Out of order aware processors 5.233 - * 2. Accidentally out of order processors (PPro errata #51) 5.234 - */ 5.235 - 5.236 -#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) 5.237 - 5.238 -static inline void flush_write_buffers(void) 5.239 -{ 5.240 - __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory"); 5.241 -} 5.242 - 5.243 -#define dma_cache_inv(_start,_size) flush_write_buffers() 5.244 -#define dma_cache_wback(_start,_size) flush_write_buffers() 5.245 -#define dma_cache_wback_inv(_start,_size) flush_write_buffers() 5.246 - 5.247 -#else 5.248 - 5.249 -/* Nothing to do */ 5.250 - 5.251 -#define dma_cache_inv(_start,_size) do { } while (0) 5.252 -#define dma_cache_wback(_start,_size) do { } while (0) 5.253 -#define dma_cache_wback_inv(_start,_size) do { } while (0) 5.254 -#define flush_write_buffers() 5.255 - 5.256 -#endif 5.257 - 5.258 -#endif /* __KERNEL__ */ 5.259 - 5.260 -#ifdef SLOW_IO_BY_JUMPING 5.261 -#define __SLOW_DOWN_IO "\njmp 1f\n1:\tjmp 1f\n1:" 5.262 -#else 5.263 -#define __SLOW_DOWN_IO "\noutb %%al,$0x80" 5.264 -#endif 5.265 - 5.266 -#ifdef REALLY_SLOW_IO 5.267 -#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO 5.268 -#else 5.269 -#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO 5.270 -#endif 5.271 - 5.272 -#ifdef CONFIG_MULTIQUAD 5.273 -extern void *xquad_portio; /* Where the IO area was mapped */ 5.274 -#endif /* CONFIG_MULTIQUAD */ 5.275 - 5.276 -/* 5.277 - * Talk about misusing macros.. 5.278 - */ 5.279 -#define __OUT1(s,x) \ 5.280 -static inline void out##s(unsigned x value, unsigned short port) { 5.281 - 5.282 -#define __OUT2(s,s1,s2) \ 5.283 -__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" 5.284 - 5.285 -#if defined (CONFIG_MULTIQUAD) && !defined(STANDALONE) 5.286 -#define __OUTQ(s,ss,x) /* Do the equivalent of the portio op on quads */ \ 5.287 -static inline void out##ss(unsigned x value, unsigned short port) { \ 5.288 - if (xquad_portio) \ 5.289 - write##s(value, (unsigned long) xquad_portio + port); \ 5.290 - else /* We're still in early boot, running on quad 0 */ \ 5.291 - out##ss##_local(value, port); \ 5.292 -} \ 5.293 -static inline void out##ss##_quad(unsigned x value, unsigned short port, int quad) { \ 5.294 - if (xquad_portio) \ 5.295 - write##s(value, (unsigned long) xquad_portio + (XQUAD_PORTIO_QUAD*quad)\ 5.296 - + port); \ 5.297 -} 5.298 - 5.299 -#define __INQ(s,ss) /* Do the equivalent of the portio op on quads */ \ 5.300 -static inline RETURN_TYPE in##ss(unsigned short port) { \ 5.301 - if (xquad_portio) \ 5.302 - return read##s((unsigned long) xquad_portio + port); \ 5.303 - else /* We're still in early boot, running on quad 0 */ \ 5.304 - return in##ss##_local(port); \ 5.305 -} \ 5.306 -static inline RETURN_TYPE in##ss##_quad(unsigned short port, int quad) { \ 5.307 - if (xquad_portio) \ 5.308 - return read##s((unsigned long) xquad_portio + (XQUAD_PORTIO_QUAD*quad)\ 5.309 - + port); \ 5.310 - else\ 5.311 - return 0;\ 5.312 -} 5.313 -#endif /* CONFIG_MULTIQUAD && !STANDALONE */ 5.314 - 5.315 -#if !defined(CONFIG_MULTIQUAD) || defined(STANDALONE) 5.316 -#define __OUT(s,s1,x) \ 5.317 -__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ 5.318 -__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} 5.319 -#else 5.320 -/* Make the default portio routines operate on quad 0 */ 5.321 -#define __OUT(s,s1,x) \ 5.322 -__OUT1(s##_local,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ 5.323 -__OUT1(s##_p_local,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \ 5.324 -__OUTQ(s,s,x) \ 5.325 -__OUTQ(s,s##_p,x) 5.326 -#endif /* !CONFIG_MULTIQUAD || STANDALONE */ 5.327 - 5.328 -#define __IN1(s) \ 5.329 -static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; 5.330 - 5.331 -#define __IN2(s,s1,s2) \ 5.332 -__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" 5.333 - 5.334 -#if !defined(CONFIG_MULTIQUAD) || defined(STANDALONE) 5.335 -#define __IN(s,s1,i...) \ 5.336 -__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ 5.337 -__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } 5.338 -#else 5.339 -/* Make the default portio routines operate on quad 0 */ 5.340 -#define __IN(s,s1,i...) \ 5.341 -__IN1(s##_local) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ 5.342 -__IN1(s##_p_local) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ 5.343 -__INQ(s,s) \ 5.344 -__INQ(s,s##_p) 5.345 -#endif /* !CONFIG_MULTIQUAD || STANDALONE */ 5.346 - 5.347 -#define __INS(s) \ 5.348 -static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ 5.349 -{ __asm__ __volatile__ ("rep ; ins" #s \ 5.350 -: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } 5.351 - 5.352 -#define __OUTS(s) \ 5.353 -static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ 5.354 -{ __asm__ __volatile__ ("rep ; outs" #s \ 5.355 -: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } 5.356 - 5.357 -#define RETURN_TYPE unsigned char 5.358 -__IN(b,"") 5.359 -#undef RETURN_TYPE 5.360 -#define RETURN_TYPE unsigned short 5.361 -__IN(w,"") 5.362 -#undef RETURN_TYPE 5.363 -#define RETURN_TYPE unsigned int 5.364 -__IN(l,"") 5.365 -#undef RETURN_TYPE 5.366 - 5.367 -__OUT(b,"b",char) 5.368 -__OUT(w,"w",short) 5.369 -__OUT(l,,int) 5.370 - 5.371 -__INS(b) 5.372 -__INS(w) 5.373 -__INS(l) 5.374 - 5.375 -__OUTS(b) 5.376 -__OUTS(w) 5.377 -__OUTS(l) 5.378 - 5.379 -#endif
6.1 --- a/xenolinux-2.4.21-sparse/mkbuildtree Tue Jul 08 14:40:18 2003 +0000 6.2 +++ b/xenolinux-2.4.21-sparse/mkbuildtree Wed Jul 09 10:24:12 2003 +0000 6.3 @@ -111,6 +111,7 @@ ln -sf ../asm-i386/hdreg.h 6.4 ln -sf ../asm-i386/i387.h 6.5 ln -sf ../asm-i386/ide.h 6.6 ln -sf ../asm-i386/init.h 6.7 +ln -sf ../asm-i386/io.h 6.8 ln -sf ../asm-i386/io_apic.h 6.9 ln -sf ../asm-i386/ioctl.h 6.10 ln -sf ../asm-i386/ioctls.h