debuggers.hg
changeset 604:c013c04b116d
bitkeeper revision 1.310 (3f0bf553kbldKc8vu6lujXO6BTBWpg)
Misc. tidy ups. Continue moving bits of teh /proc interface
from stupidly overloaded calls to read and write to using
a slightly less awful ioctl interface.
Misc. tidy ups. Continue moving bits of teh /proc interface
from stupidly overloaded calls to read and write to using
a slightly less awful ioctl interface.
author | sos22@labyrinth.cl.cam.ac.uk |
---|---|
date | Wed Jul 09 10:58:27 2003 +0000 (2003-07-09) |
parents | 83633bb4e30d |
children | f11254826a31 |
files | tools/internal/mem_defs.h tools/internal/xi_build.c xen/include/hypervisor-ifs/dom0_ops.h xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_core.c xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_memory.c |
line diff
1.1 --- a/tools/internal/mem_defs.h Wed Jul 09 08:29:19 2003 +0000 1.2 +++ b/tools/internal/mem_defs.h Wed Jul 09 10:58:27 2003 +0000 1.3 @@ -42,4 +42,4 @@ typedef struct { unsigned long l2_lo; } 1.4 1.5 /* local definitions */ 1.6 1.7 -#define nr_2_page(x) (x << PAGE_SHIFT) 1.8 +#define nr_2_page(x) ((x) << PAGE_SHIFT)
2.1 --- a/tools/internal/xi_build.c Wed Jul 09 08:29:19 2003 +0000 2.2 +++ b/tools/internal/xi_build.c Wed Jul 09 10:58:27 2003 +0000 2.3 @@ -8,13 +8,11 @@ 2.4 #include <stdio.h> 2.5 #include <errno.h> 2.6 #include <fcntl.h> 2.7 -#include <sys/mman.h> 2.8 -#include <sys/types.h> 2.9 #include <sys/stat.h> 2.10 +#include <sys/ioctl.h> 2.11 +#include <string.h> 2.12 #include <stdlib.h> 2.13 -#include <sys/ioctl.h> 2.14 2.15 -#include "asm-i386/types.h" 2.16 #include "hypervisor-ifs/hypervisor-if.h" 2.17 #include "dom0_ops.h" 2.18 #include "dom0_defs.h" 2.19 @@ -51,7 +49,7 @@ static void dom_mem_cleanup(dom_mem_t * 2.20 { 2.21 int fd; 2.22 struct dom0_unmapdommem_args argbuf; 2.23 - 2.24 + 2.25 fd = open("/proc/xeno/dom0_cmd", O_WRONLY); 2.26 if(fd < 0){ 2.27 perror(PERR_STRING); 2.28 @@ -68,85 +66,33 @@ static void dom_mem_cleanup(dom_mem_t * 2.29 close(fd); 2.30 } 2.31 2.32 -/* ask dom0 to export domains memory through /proc */ 2.33 -static int setup_dom_memmap(unsigned long pfn, int pages, int dom) 2.34 -{ 2.35 - char cmd_path[MAX_PATH]; 2.36 - dom0_op_t dop; 2.37 - int cmd_fd; 2.38 - 2.39 - dop.cmd = MAP_DOM_MEM; 2.40 - dop.u.dommem.start_pfn = pfn; 2.41 - dop.u.dommem.tot_pages = pages; 2.42 - dop.u.dommem.domain = dom; 2.43 - 2.44 - /* open the /proc command interface */ 2.45 - sprintf(cmd_path, "%s%s%s%s", "/proc/", PROC_XENO_ROOT, "/", PROC_CMD); 2.46 - cmd_fd = open(cmd_path, O_WRONLY); 2.47 - if ( cmd_fd < 0 ) 2.48 - { 2.49 - perror(PERR_STRING); 2.50 - return -1; 2.51 - } 2.52 - 2.53 - write(cmd_fd, &dop, sizeof(dom0_op_t)); 2.54 - close(cmd_fd); 2.55 - 2.56 - return 0; 2.57 -} 2.58 - 2.59 -/* request the actual mapping from dom0 */ 2.60 -static unsigned long get_vaddr(unsigned int dom) 2.61 -{ 2.62 - char mem_path[MAX_PATH]; 2.63 - unsigned long addr; 2.64 - int mem_fd; 2.65 - 2.66 - /* open the domain's /proc mem interface */ 2.67 - sprintf(mem_path, "%s%s%s%s%d%s%s", "/proc/", PROC_XENO_ROOT, "/", 2.68 - PROC_DOM_PREFIX, dom, "/", PROC_DOM_MEM); 2.69 - 2.70 - mem_fd = open(mem_path, O_RDONLY); 2.71 - if(mem_fd < 0){ 2.72 - perror(PERR_STRING); 2.73 - return 0; 2.74 - } 2.75 - 2.76 - /* get virtual address of mapped region */ 2.77 - read(mem_fd, &addr, sizeof(addr)); 2.78 - 2.79 - close(mem_fd); 2.80 - 2.81 - return addr; 2.82 -} 2.83 - 2.84 static int map_dom_mem(unsigned long pfn, int pages, int dom, 2.85 dom_mem_t * dom_mem) 2.86 { 2.87 - struct dom0_mapdommem_args argbuf; 2.88 - int fd; 2.89 + struct dom0_mapdommem_args argbuf; 2.90 + int fd; 2.91 2.92 - argbuf.domain = dom; 2.93 - argbuf.start_pfn = pfn; 2.94 - argbuf.tot_pages = pages; 2.95 + argbuf.domain = dom; 2.96 + argbuf.start_pfn = pfn; 2.97 + argbuf.tot_pages = pages; 2.98 2.99 - fd = open("/proc/xeno/dom0_cmd", O_RDWR); 2.100 - if (fd < 0) { 2.101 - perror("openning /proc/xeno/dom0_cmd"); 2.102 - return -1; 2.103 - } 2.104 + fd = open("/proc/xeno/dom0_cmd", O_RDWR); 2.105 + if (fd < 0) { 2.106 + perror("openning /proc/xeno/dom0_cmd"); 2.107 + return -1; 2.108 + } 2.109 2.110 - dom_mem->domain = dom; 2.111 - dom_mem->start_pfn = pfn; 2.112 - dom_mem->tot_pages = pages; 2.113 - dom_mem->vaddr = ioctl(fd, IOCTL_DOM0_MAPDOMMEM, &argbuf); 2.114 + dom_mem->domain = dom; 2.115 + dom_mem->start_pfn = pfn; 2.116 + dom_mem->tot_pages = pages; 2.117 + dom_mem->vaddr = ioctl(fd, IOCTL_DOM0_MAPDOMMEM, &argbuf); 2.118 + 2.119 + if (dom_mem->vaddr == -1) { 2.120 + perror("mapping domain memory"); 2.121 + return -1; 2.122 + } 2.123 2.124 - if (dom_mem->vaddr == -1) { 2.125 - perror("mapping domain memory"); 2.126 - return -1; 2.127 - } 2.128 - 2.129 - return 0; 2.130 + return 0; 2.131 } 2.132 2.133 /* open kernel image and do some sanity checks */ 2.134 @@ -215,14 +161,13 @@ static dom_meminfo_t *setup_guestos(int 2.135 dom_meminfo_t *meminfo; 2.136 unsigned long *page_array; 2.137 page_update_request_t *pgt_updates; 2.138 - dom_mem_t mem_map; 2.139 dom_meminfo_t *ret = NULL; 2.140 int alloc_index, num_pt_pages; 2.141 unsigned long l2tab; 2.142 unsigned long l1tab = 0; 2.143 unsigned long num_pgt_updates = 0; 2.144 unsigned long count, pt_start; 2.145 - dom0_op_t pgupdate_req; 2.146 + struct dom0_dopgupdates_args pgupdate_req; 2.147 char cmd_path[MAX_PATH]; 2.148 int cmd_fd; 2.149 2.150 @@ -324,10 +269,9 @@ static dom_meminfo_t *setup_guestos(int 2.151 */ 2.152 sprintf(cmd_path, "%s%s%s%s", "/proc/", PROC_XENO_ROOT, "/", PROC_CMD); 2.153 if ( (cmd_fd = open(cmd_path, O_WRONLY)) < 0 ) goto out; 2.154 - pgupdate_req.cmd = DO_PGUPDATES; 2.155 - pgupdate_req.u.pgupdate.pgt_update_arr = (unsigned long)dom_mem->vaddr; 2.156 - pgupdate_req.u.pgupdate.num_pgt_updates = num_pgt_updates; 2.157 - write(cmd_fd, &pgupdate_req, sizeof(dom0_op_t)); 2.158 + pgupdate_req.pgt_update_arr = (unsigned long)dom_mem->vaddr; 2.159 + pgupdate_req.num_pgt_updates = num_pgt_updates; 2.160 + if (ioctl(cmd_fd, IOCTL_DOM0_DOPGUPDATES, &pgupdate_req) < 0) goto out; 2.161 close(cmd_fd); 2.162 2.163 /* Load the guest OS image. */ 2.164 @@ -435,11 +379,9 @@ int main(int argc, char **argv) 2.165 { 2.166 2.167 dom_mem_t dom_os_image; 2.168 - dom_mem_t dom_pgt; 2.169 dom_meminfo_t * meminfo; 2.170 size_t ksize; 2.171 unsigned long load_addr; 2.172 - char status[1024]; 2.173 int kernel_fd, initrd_fd = 0; 2.174 int count; 2.175 int cmd_len; 2.176 @@ -450,8 +392,6 @@ int main(int argc, char **argv) 2.177 int pg_head; 2.178 int tot_pages; 2.179 2.180 - unsigned long addr; 2.181 - 2.182 /**** this argument parsing code is really _gross_. rewrite me! ****/ 2.183 2.184 if(argc < 4) {
3.1 --- a/xen/include/hypervisor-ifs/dom0_ops.h Wed Jul 09 08:29:19 2003 +0000 3.2 +++ b/xen/include/hypervisor-ifs/dom0_ops.h Wed Jul 09 10:58:27 2003 +0000 3.3 @@ -118,25 +118,32 @@ typedef struct dom0_op_st 3.4 #define IOCTL_DOM0_CREATEDOMAIN _IOC(_IOC_READ, 'x', 0, sizeof(struct dom0_createdomain_args)) 3.5 #define IOCTL_DOM0_MAPDOMMEM _IOC(_IOC_READ, 'x', 1, sizeof(struct dom0_mapdommem_args)) 3.6 #define IOCTL_DOM0_UNMAPDOMMEM _IOC(_IOC_READ, 'x', 2, sizeof(struct dom0_unmapdommem_args)) 3.7 +#define IOCTL_DOM0_DOPGUPDATES _IOC(_IOC_READ, 'x', 3, sizeof(struct dom0_dopgupdates_args)) 3.8 3.9 struct dom0_createdomain_args 3.10 { 3.11 - unsigned int kb_mem; 3.12 - const char *name; 3.13 + unsigned int kb_mem; 3.14 + const char *name; 3.15 }; 3.16 3.17 struct dom0_mapdommem_args 3.18 { 3.19 - unsigned int domain; 3.20 - unsigned start_pfn; 3.21 - unsigned tot_pages; 3.22 + unsigned int domain; 3.23 + unsigned start_pfn; 3.24 + unsigned tot_pages; 3.25 }; 3.26 3.27 struct dom0_unmapdommem_args 3.28 { 3.29 - unsigned long vaddr; 3.30 - unsigned long start_pfn; 3.31 - unsigned long tot_pages; 3.32 + unsigned long vaddr; 3.33 + unsigned long start_pfn; 3.34 + unsigned long tot_pages; 3.35 +}; 3.36 + 3.37 +struct dom0_dopgupdates_args 3.38 +{ 3.39 + unsigned long pgt_update_arr; 3.40 + unsigned long num_pgt_updates; 3.41 }; 3.42 3.43 #endif
4.1 --- a/xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_core.c Wed Jul 09 08:29:19 2003 +0000 4.2 +++ b/xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_core.c Wed Jul 09 10:58:27 2003 +0000 4.3 @@ -162,8 +162,8 @@ static int dom0_cmd_write(struct file *f 4.4 } 4.5 else if ( op.cmd == DO_PGUPDATES ) 4.6 { 4.7 - ret = HYPERVISOR_pt_update((void *)op.u.pgupdate.pgt_update_arr, 4.8 - op.u.pgupdate.num_pgt_updates); 4.9 + /* Now an ioctl. */ 4.10 + ret = -EOPNOTSUPP; 4.11 } 4.12 else if (op.cmd == DOM0_CREATEDOMAIN) 4.13 { 4.14 @@ -274,6 +274,8 @@ static struct file_operations proc_xeno_ 4.15 release: seq_release, 4.16 }; 4.17 4.18 +/* END OF /proc/xeno/domains */ 4.19 + 4.20 static int handle_dom0_cmd_createdomain(unsigned long data) 4.21 { 4.22 struct dom0_createdomain_args argbuf; 4.23 @@ -344,6 +346,18 @@ static int handle_dom0_cmd_unmapdommem(u 4.24 argbuf.tot_pages << PAGE_SHIFT); 4.25 } 4.26 4.27 +static int handle_dom0_cmd_dopgupdates(unsigned long data) 4.28 +{ 4.29 + struct dom0_dopgupdates_args argbuf; 4.30 + 4.31 + if (copy_from_user(&argbuf, (void *)data, sizeof(argbuf))) 4.32 + return -EFAULT; 4.33 + 4.34 + /* argbuf.pgt_update_arr had better be direct mapped... */ 4.35 + return HYPERVISOR_pt_update(argbuf.pgt_update_arr, 4.36 + argbuf.num_pgt_updates); 4.37 +} 4.38 + 4.39 static int dom0_cmd_ioctl(struct inode *inode, struct file *file, 4.40 unsigned int cmd, unsigned long data) 4.41 { 4.42 @@ -355,6 +369,8 @@ static int dom0_cmd_ioctl(struct inode * 4.43 return handle_dom0_cmd_mapdommem(data); 4.44 case IOCTL_DOM0_UNMAPDOMMEM: 4.45 return handle_dom0_cmd_unmapdommem(data); 4.46 + case IOCTL_DOM0_DOPGUPDATES: 4.47 + return handle_dom0_cmd_dopgupdates(data); 4.48 default: 4.49 printk("Unknown dom0_cmd ioctl!\n"); 4.50 return -EINVAL;
5.1 --- a/xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_memory.c Wed Jul 09 08:29:19 2003 +0000 5.2 +++ b/xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_memory.c Wed Jul 09 10:58:27 2003 +0000 5.3 @@ -32,88 +32,43 @@ extern struct list_head * find_direct(st 5.4 #define __direct_mk_pte(page_nr,pgprot) __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot)) 5.5 #define direct_mk_pte_phys(physpage, pgprot) __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot) 5.6 5.7 -static inline void forget_pte(pte_t page) 5.8 +/* Note: this is only safe if the mm semaphore is held when called. */ 5.9 + 5.10 +static int direct_remap_page(unsigned long from, unsigned long phys_addr, pgprot_t prot) 5.11 { 5.12 - if (!pte_none(page)) { 5.13 - printk("forget_pte: old mapping existed!\n"); 5.14 + struct mm_struct *mm = current->mm; 5.15 + pgd_t * dir; 5.16 + pmd_t *pmd; 5.17 + pte_t *pte; 5.18 + 5.19 + pte_t oldpage; 5.20 + 5.21 + dir = pgd_offset(mm, from); 5.22 + flush_cache_range(mm, from, from + PAGE_SIZE); 5.23 + 5.24 + spin_lock(&mm->page_table_lock); 5.25 + pmd = pmd_alloc(mm, dir, from); 5.26 + if (!pmd) 5.27 + return -ENOMEM; 5.28 + pte = pte_alloc(mm, pmd, address); 5.29 + if (!pte) { 5.30 + /* XXX free pmd? */ 5.31 + return -ENOMEM; 5.32 + } 5.33 + 5.34 + /* Sanity check */ 5.35 + oldpage = ptep_get_and_clear(pte); 5.36 + if (!pte_none(oldpage)) { 5.37 + printk("Page already in use!\n"); 5.38 BUG(); 5.39 } 5.40 -} 5.41 - 5.42 -static inline void direct_remappte_range(pte_t * pte, unsigned long address, unsigned long size, 5.43 - unsigned long phys_addr, pgprot_t prot) 5.44 -{ 5.45 - unsigned long end; 5.46 - 5.47 - address &= ~PMD_MASK; 5.48 - end = address + size; 5.49 - if (end > PMD_SIZE) 5.50 - end = PMD_SIZE; 5.51 - do { 5.52 - pte_t oldpage; 5.53 - oldpage = ptep_get_and_clear(pte); 5.54 + direct_set_pte(pte, direct_mk_pte_phys(phys_addr, prot)); 5.55 5.56 - direct_set_pte(pte, direct_mk_pte_phys(phys_addr, prot)); 5.57 - 5.58 - forget_pte(oldpage); 5.59 - address += PAGE_SIZE; 5.60 - phys_addr += PAGE_SIZE; 5.61 - pte++; 5.62 - } while (address && (address < end)); 5.63 - 5.64 -} 5.65 - 5.66 -static inline int direct_remappmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size, 5.67 - unsigned long phys_addr, pgprot_t prot) 5.68 -{ 5.69 - unsigned long end; 5.70 + spin_unlock(&mm->page_table_lock); 5.71 5.72 - address &= ~PGDIR_MASK; 5.73 - end = address + size; 5.74 - if (end > PGDIR_SIZE) 5.75 - end = PGDIR_SIZE; 5.76 - phys_addr -= address; 5.77 - do { 5.78 - pte_t * pte = pte_alloc(mm, pmd, address); 5.79 - if (!pte) 5.80 - return -ENOMEM; 5.81 - direct_remappte_range(pte, address, end - address, address + phys_addr, prot); 5.82 - address = (address + PMD_SIZE) & PMD_MASK; 5.83 - pmd++; 5.84 - } while (address && (address < end)); 5.85 - return 0; 5.86 -} 5.87 + flush_tlb_range(mm, from, from + PAGE_SIZE); 5.88 5.89 -/* Note: this is only safe if the mm semaphore is held when called. */ 5.90 -static int direct_remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot) 5.91 -{ 5.92 - int error = 0; 5.93 - pgd_t * dir; 5.94 - unsigned long beg = from; 5.95 - unsigned long end = from + size; 5.96 - struct mm_struct *mm = current->mm; 5.97 - 5.98 - phys_addr -= from; 5.99 - dir = pgd_offset(mm, from); 5.100 - flush_cache_range(mm, beg, end); 5.101 - if (from >= end) 5.102 - BUG(); 5.103 - 5.104 - spin_lock(&mm->page_table_lock); 5.105 - do { 5.106 - pmd_t *pmd = pmd_alloc(mm, dir, from); 5.107 - error = -ENOMEM; 5.108 - if (!pmd) 5.109 - break; 5.110 - error = direct_remappmd_range(mm, pmd, from, end - from, phys_addr + from, prot); 5.111 - if (error) 5.112 - break; 5.113 - from = (from + PGDIR_SIZE) & PGDIR_MASK; 5.114 - dir++; 5.115 - } while (from && (from < end)); 5.116 - spin_unlock(&mm->page_table_lock); 5.117 - flush_tlb_range(mm, beg, end); 5.118 - return error; 5.119 + return 0; 5.120 } 5.121 5.122 /* 5.123 @@ -142,8 +97,8 @@ static int direct_remap_disc_page_range( 5.124 5.125 for ( i = 0; i < pages; i++ ) 5.126 { 5.127 - if(direct_remap_page_range(start, pfns[i] << PAGE_SHIFT, 5.128 - PAGE_SIZE, prot)) 5.129 + if(direct_remap_page(start, pfns[i] << PAGE_SHIFT, 5.130 + prot)) 5.131 goto out; 5.132 start += PAGE_SIZE; 5.133 tot_pages--; 5.134 @@ -155,10 +110,10 @@ static int direct_remap_disc_page_range( 5.135 return tot_pages; 5.136 } 5.137 5.138 -/* below functions replace standard sys_mmap and sys_munmap which are absolutely useless 5.139 - * for direct memory mapping. direct_zap* functions are minor ammendments to the 5.140 - * original versions in mm/memory.c. the changes are to enable unmapping of real physical 5.141 - * addresses. 5.142 +/* below functions replace standard sys_mmap and sys_munmap which are 5.143 + * absolutely useless for direct memory mapping. direct_zap* functions 5.144 + * are minor ammendments to the original versions in mm/memory.c. the 5.145 + * changes are to enable unmapping of real physical addresses. 5.146 */ 5.147 5.148 unsigned long direct_mmap(unsigned long phys_addr, unsigned long size, 5.149 @@ -169,6 +124,9 @@ unsigned long direct_mmap(unsigned long 5.150 unsigned long addr; 5.151 int ret = 0; 5.152 5.153 + if(!(size & ~PAGE_MASK)) 5.154 + return -EINVAL; 5.155 + 5.156 if(!capable(CAP_SYS_ADMIN)) 5.157 return -EPERM; 5.158 5.159 @@ -185,16 +143,16 @@ unsigned long direct_mmap(unsigned long 5.160 dmmap->vm_end = addr + size; 5.161 entry = find_direct(¤t->mm->context.direct_list, addr); 5.162 if(entry != ¤t->mm->context.direct_list){ 5.163 - list_add_tail(&dmmap->list, entry); 5.164 + list_add_tail(&dmmap->list, entry); 5.165 } else { 5.166 - list_add_tail(&dmmap->list, ¤t->mm->context.direct_list); 5.167 + list_add_tail(&dmmap->list, ¤t->mm->context.direct_list); 5.168 } 5.169 5.170 - /* XXX kfree(dmmap)? */ 5.171 - 5.172 + /* Acquire mm sem? */ 5.173 /* and perform the mapping */ 5.174 ret = direct_remap_disc_page_range(addr, phys_addr >> PAGE_SHIFT, 5.175 tot_pages, prot); 5.176 + /* Drop mm sem? */ 5.177 5.178 if(ret == 0) 5.179 return addr; 5.180 @@ -202,75 +160,19 @@ unsigned long direct_mmap(unsigned long 5.181 return ret; 5.182 } 5.183 5.184 -/* most of the checks, refcnt updates, cache stuff have been thrown out as they are not 5.185 - * needed 5.186 +/* 5.187 + * remove a user page 5.188 + * 5.189 + * There used to be a function here which could remove a whole range 5.190 + * of pages, but it was only ever called with that range equal to a 5.191 + * single page, so I simplified it a bit -- sos22. 5.192 */ 5.193 -static inline int direct_zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, 5.194 - unsigned long size) 5.195 -{ 5.196 - unsigned long offset; 5.197 - pte_t * ptep; 5.198 - int freed = 0; 5.199 - 5.200 - if (pmd_none(*pmd)) 5.201 - return 0; 5.202 - if (pmd_bad(*pmd)) { 5.203 - pmd_ERROR(*pmd); 5.204 - pmd_clear(pmd); 5.205 - return 0; 5.206 - } 5.207 - ptep = pte_offset(pmd, address); 5.208 - offset = address & ~PMD_MASK; 5.209 - if (offset + size > PMD_SIZE) 5.210 - size = PMD_SIZE - offset; 5.211 - size &= PAGE_MASK; 5.212 - for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) { 5.213 - pte_t pte = *ptep; 5.214 - if (pte_none(pte)) 5.215 - continue; 5.216 - freed ++; 5.217 - direct_pte_clear(ptep); 5.218 - } 5.219 - 5.220 - return freed; 5.221 -} 5.222 - 5.223 -static inline int direct_zap_pmd_range(mmu_gather_t *tlb, pgd_t * dir, 5.224 - unsigned long address, unsigned long size) 5.225 -{ 5.226 - pmd_t * pmd; 5.227 - unsigned long end; 5.228 - int freed; 5.229 - 5.230 - if (pgd_none(*dir)) 5.231 - return 0; 5.232 - if (pgd_bad(*dir)) { 5.233 - pgd_ERROR(*dir); 5.234 - pgd_clear(dir); 5.235 - return 0; 5.236 - } 5.237 - pmd = pmd_offset(dir, address); 5.238 - end = address + size; 5.239 - if (end > ((address + PGDIR_SIZE) & PGDIR_MASK)) 5.240 - end = ((address + PGDIR_SIZE) & PGDIR_MASK); 5.241 - freed = 0; 5.242 - do { 5.243 - freed += direct_zap_pte_range(tlb, pmd, address, end - address); 5.244 - address = (address + PMD_SIZE) & PMD_MASK; 5.245 - pmd++; 5.246 - } while (address < end); 5.247 - return freed; 5.248 -} 5.249 - 5.250 -/* 5.251 - * remove user pages in a given range. 5.252 - */ 5.253 -static void direct_zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size) 5.254 +static void direct_zap_page(struct mm_struct *mm, unsigned long address) 5.255 { 5.256 mmu_gather_t *tlb; 5.257 pgd_t * dir; 5.258 - unsigned long start = address, end = address + size; 5.259 - int freed = 0; 5.260 + pmd_t * pmd; 5.261 + pte_t * pte; 5.262 5.263 dir = pgd_offset(mm, address); 5.264 5.265 @@ -281,20 +183,14 @@ static void direct_zap_page_range(struct 5.266 * even if kswapd happened to be looking at this 5.267 * process we _want_ it to get stuck. 5.268 */ 5.269 - if (address >= end) 5.270 - BUG(); 5.271 spin_lock(&mm->page_table_lock); 5.272 - flush_cache_range(mm, address, end); 5.273 + flush_cache_range(mm, address, address + PAGE_SIZE); 5.274 + 5.275 tlb = tlb_gather_mmu(mm); 5.276 - 5.277 - do { 5.278 - freed += direct_zap_pmd_range(tlb, dir, address, end - address); 5.279 - address = (address + PGDIR_SIZE) & PGDIR_MASK; 5.280 - dir++; 5.281 - } while (address && (address < end)); 5.282 - 5.283 - /* this will flush any remaining tlb entries */ 5.284 - tlb_finish_mmu(tlb, start, end); 5.285 + pmd = pmd_offset(dir, address); 5.286 + pte = pte_offset(pmd, address); 5.287 + direct_pte_clear(pte); 5.288 + tlb_finish_mmu(tlb, address, address + PAGE_SIZE); 5.289 5.290 /* decrementing rss removed */ 5.291 spin_unlock(&mm->page_table_lock); 5.292 @@ -303,16 +199,16 @@ static void direct_zap_page_range(struct 5.293 5.294 int direct_unmap(struct mm_struct *mm, unsigned long addr, unsigned long size) 5.295 { 5.296 - int count = 0, tot_pages = (size+PAGE_SIZE-1) >> PAGE_SHIFT; 5.297 direct_mmap_node_t * node; 5.298 struct list_head * curr; 5.299 struct list_head * direct_list = &mm->context.direct_list; 5.300 + unsigned long end; 5.301 5.302 curr = direct_list->next; 5.303 while ( curr != direct_list ) 5.304 { 5.305 node = list_entry(curr, direct_mmap_node_t, list); 5.306 - if ( node->vm_start == addr ) 5.307 + if ( node->vm_start == addr && node->vm_end == addr + size) 5.308 break; 5.309 curr = curr->next; 5.310 } 5.311 @@ -323,11 +219,17 @@ int direct_unmap(struct mm_struct *mm, u 5.312 list_del(&node->list); 5.313 kfree(node); 5.314 5.315 - while ( count < tot_pages ) 5.316 + if (size & ~PAGE_MASK) { 5.317 + printk("Managed to map something which isn\'t a multiple of a page size...\n"); 5.318 + BUG(); 5.319 + return -EINVAL; 5.320 + } 5.321 + 5.322 + end = addr + size; 5.323 + while ( addr < end ) 5.324 { 5.325 - direct_zap_page_range(mm, addr, PAGE_SIZE); 5.326 + direct_zap_page(mm, addr); 5.327 addr += PAGE_SIZE; 5.328 - count++; 5.329 } 5.330 5.331 return 0;