debuggers.hg
changeset 583:5a8f903eccbd
bitkeeper revision 1.259.2.3 (3f0ad1f5bs1q4bh_4dbAVVvBLeoAlw)
Many files:
Fix auto destruction of direct-mapped vm areas.
.del-mmu_context.c~74789121d58c5b63:
Delete: xenolinux-2.4.21-sparse/arch/xeno/mm/mmu_context.c
Many files:
Fix auto destruction of direct-mapped vm areas.
.del-mmu_context.c~74789121d58c5b63:
Delete: xenolinux-2.4.21-sparse/arch/xeno/mm/mmu_context.c
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Tue Jul 08 14:15:17 2003 +0000 (2003-07-08) |
parents | 970fdf86f98e |
children | 6361e72ebf4c e381bd125a43 |
files | .rootkeys xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_core.c xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_memory.c xenolinux-2.4.21-sparse/arch/xeno/kernel/process.c xenolinux-2.4.21-sparse/arch/xeno/mm/Makefile xenolinux-2.4.21-sparse/arch/xeno/mm/get_unmapped_area.c xenolinux-2.4.21-sparse/arch/xeno/mm/mmu_context.c xenolinux-2.4.21-sparse/include/asm-xeno/mmu_context.h |
line diff
1.1 --- a/.rootkeys Sat Jul 05 14:52:49 2003 +0000 1.2 +++ b/.rootkeys Tue Jul 08 14:15:17 2003 +0000 1.3 @@ -504,7 +504,6 @@ 3e5a4e66l8Q5Tv-6B3lQIRmaVbFPzg xenolinux 1.4 3e5a4e66TyNNUEXkr5RxqvQhXK1MQA xenolinux-2.4.21-sparse/arch/xeno/mm/get_unmapped_area.c 1.5 3e5a4e668SE9rixq4ahho9rNhLUUFQ xenolinux-2.4.21-sparse/arch/xeno/mm/hypervisor.c 1.6 3e5a4e661gLzzff25pJooKIIWe7IWg xenolinux-2.4.21-sparse/arch/xeno/mm/init.c 1.7 -3e5a4e66U45cAIoHmxg0y1e1XhzVCA xenolinux-2.4.21-sparse/arch/xeno/mm/mmu_context.c 1.8 3e5a4e66qRlSTcjafidMB6ulECADvg xenolinux-2.4.21-sparse/arch/xeno/vmlinux.lds 1.9 3ea53c6em6uzVHSiGqrbbAVofyRY_g xenolinux-2.4.21-sparse/drivers/block/genhd.c 1.10 3e5a4e66mrtlmV75L1tjKDg8RaM5gA xenolinux-2.4.21-sparse/drivers/block/ll_rw_blk.c
2.1 --- a/xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_core.c Sat Jul 05 14:52:49 2003 +0000 2.2 +++ b/xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_core.c Tue Jul 08 14:15:17 2003 +0000 2.3 @@ -52,7 +52,6 @@ static struct proc_dir_entry *dom_list_i 2.4 2.5 unsigned long direct_mmap(unsigned long, unsigned long, pgprot_t, int, int); 2.6 int direct_unmap(unsigned long, unsigned long); 2.7 -int direct_disc_unmap(unsigned long, unsigned long, int); 2.8 2.9 static unsigned char readbuf[1204]; 2.10 2.11 @@ -161,8 +160,8 @@ static ssize_t dom_mem_write(struct file 2.12 2.13 copy_from_user(&mem_data, (dom_mem_t *)buff, sizeof(dom_mem_t)); 2.14 2.15 - if(direct_disc_unmap(mem_data.vaddr, mem_data.start_pfn, 2.16 - mem_data.tot_pages) == 0){ 2.17 + if ( direct_unmap(mem_data.vaddr, 2.18 + mem_data.tot_pages << PAGE_SHIFT) == 0 ) { 2.19 return sizeof(sizeof(dom_mem_t)); 2.20 } else { 2.21 return -1;
3.1 --- a/xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_memory.c Sat Jul 05 14:52:49 2003 +0000 3.2 +++ b/xenolinux-2.4.21-sparse/arch/xeno/drivers/dom0/dom0_memory.c Tue Jul 08 14:15:17 2003 +0000 3.3 @@ -44,79 +44,79 @@ static inline void forget_pte(pte_t page 3.4 } 3.5 3.6 static inline void direct_remappte_range(pte_t * pte, unsigned long address, unsigned long size, 3.7 - unsigned long phys_addr, pgprot_t prot) 3.8 + unsigned long phys_addr, pgprot_t prot) 3.9 { 3.10 - unsigned long end; 3.11 + unsigned long end; 3.12 3.13 - address &= ~PMD_MASK; 3.14 - end = address + size; 3.15 - if (end > PMD_SIZE) 3.16 - end = PMD_SIZE; 3.17 - do { 3.18 - pte_t oldpage; 3.19 - oldpage = ptep_get_and_clear(pte); 3.20 + address &= ~PMD_MASK; 3.21 + end = address + size; 3.22 + if (end > PMD_SIZE) 3.23 + end = PMD_SIZE; 3.24 + do { 3.25 + pte_t oldpage; 3.26 + oldpage = ptep_get_and_clear(pte); 3.27 3.28 - direct_set_pte(pte, direct_mk_pte_phys(phys_addr, prot)); 3.29 + direct_set_pte(pte, direct_mk_pte_phys(phys_addr, prot)); 3.30 3.31 - forget_pte(oldpage); 3.32 - address += PAGE_SIZE; 3.33 - phys_addr += PAGE_SIZE; 3.34 - pte++; 3.35 - } while (address && (address < end)); 3.36 + forget_pte(oldpage); 3.37 + address += PAGE_SIZE; 3.38 + phys_addr += PAGE_SIZE; 3.39 + pte++; 3.40 + } while (address && (address < end)); 3.41 3.42 } 3.43 3.44 static inline int direct_remappmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size, 3.45 - unsigned long phys_addr, pgprot_t prot) 3.46 + unsigned long phys_addr, pgprot_t prot) 3.47 { 3.48 - unsigned long end; 3.49 + unsigned long end; 3.50 3.51 - address &= ~PGDIR_MASK; 3.52 - end = address + size; 3.53 - if (end > PGDIR_SIZE) 3.54 - end = PGDIR_SIZE; 3.55 - phys_addr -= address; 3.56 - do { 3.57 - pte_t * pte = pte_alloc(mm, pmd, address); 3.58 - if (!pte) 3.59 - return -ENOMEM; 3.60 - direct_remappte_range(pte, address, end - address, address + phys_addr, prot); 3.61 - address = (address + PMD_SIZE) & PMD_MASK; 3.62 - pmd++; 3.63 - } while (address && (address < end)); 3.64 - return 0; 3.65 + address &= ~PGDIR_MASK; 3.66 + end = address + size; 3.67 + if (end > PGDIR_SIZE) 3.68 + end = PGDIR_SIZE; 3.69 + phys_addr -= address; 3.70 + do { 3.71 + pte_t * pte = pte_alloc(mm, pmd, address); 3.72 + if (!pte) 3.73 + return -ENOMEM; 3.74 + direct_remappte_range(pte, address, end - address, address + phys_addr, prot); 3.75 + address = (address + PMD_SIZE) & PMD_MASK; 3.76 + pmd++; 3.77 + } while (address && (address < end)); 3.78 + return 0; 3.79 } 3.80 3.81 /* Note: this is only safe if the mm semaphore is held when called. */ 3.82 int direct_remap_page_range(unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot) 3.83 { 3.84 - int error = 0; 3.85 - pgd_t * dir; 3.86 - unsigned long beg = from; 3.87 - unsigned long end = from + size; 3.88 - struct mm_struct *mm = current->mm; 3.89 + int error = 0; 3.90 + pgd_t * dir; 3.91 + unsigned long beg = from; 3.92 + unsigned long end = from + size; 3.93 + struct mm_struct *mm = current->mm; 3.94 3.95 - phys_addr -= from; 3.96 - dir = pgd_offset(mm, from); 3.97 - flush_cache_range(mm, beg, end); 3.98 - if (from >= end) 3.99 - BUG(); 3.100 + phys_addr -= from; 3.101 + dir = pgd_offset(mm, from); 3.102 + flush_cache_range(mm, beg, end); 3.103 + if (from >= end) 3.104 + BUG(); 3.105 3.106 - spin_lock(&mm->page_table_lock); 3.107 - do { 3.108 - pmd_t *pmd = pmd_alloc(mm, dir, from); 3.109 - error = -ENOMEM; 3.110 - if (!pmd) 3.111 - break; 3.112 - error = direct_remappmd_range(mm, pmd, from, end - from, phys_addr + from, prot); 3.113 - if (error) 3.114 - break; 3.115 - from = (from + PGDIR_SIZE) & PGDIR_MASK; 3.116 - dir++; 3.117 - } while (from && (from < end)); 3.118 - spin_unlock(&mm->page_table_lock); 3.119 - flush_tlb_range(mm, beg, end); 3.120 - return error; 3.121 + spin_lock(&mm->page_table_lock); 3.122 + do { 3.123 + pmd_t *pmd = pmd_alloc(mm, dir, from); 3.124 + error = -ENOMEM; 3.125 + if (!pmd) 3.126 + break; 3.127 + error = direct_remappmd_range(mm, pmd, from, end - from, phys_addr + from, prot); 3.128 + if (error) 3.129 + break; 3.130 + from = (from + PGDIR_SIZE) & PGDIR_MASK; 3.131 + dir++; 3.132 + } while (from && (from < end)); 3.133 + spin_unlock(&mm->page_table_lock); 3.134 + flush_tlb_range(mm, beg, end); 3.135 + return error; 3.136 } 3.137 3.138 /* 3.139 @@ -124,7 +124,7 @@ int direct_remap_page_range(unsigned lon 3.140 * found from frame table beginning at the given first_pg index 3.141 */ 3.142 int direct_remap_disc_page_range(unsigned long from, 3.143 - unsigned long first_pg, int tot_pages, pgprot_t prot) 3.144 + unsigned long first_pg, int tot_pages, pgprot_t prot) 3.145 { 3.146 dom0_op_t dom0_op; 3.147 unsigned long *pfns = get_free_page(GFP_KERNEL); 3.148 @@ -153,7 +153,7 @@ int direct_remap_disc_page_range(unsigne 3.149 } 3.150 } 3.151 3.152 -out: 3.153 + out: 3.154 free_page(pfns); 3.155 return tot_pages; 3.156 } 3.157 @@ -165,7 +165,7 @@ out: 3.158 */ 3.159 3.160 unsigned long direct_mmap(unsigned long phys_addr, unsigned long size, 3.161 - pgprot_t prot, int flag, int tot_pages) 3.162 + pgprot_t prot, int flag, int tot_pages) 3.163 { 3.164 direct_mmap_node_t * dmmap; 3.165 struct list_head * entry; 3.166 @@ -190,17 +190,17 @@ unsigned long direct_mmap(unsigned long 3.167 dmmap = (direct_mmap_node_t *)kmalloc(sizeof(direct_mmap_node_t), GFP_KERNEL); 3.168 dmmap->vm_start = addr; 3.169 dmmap->vm_end = addr + size; 3.170 - entry = find_direct(¤t->mm->context.direct_list, addr); 3.171 - if(entry != ¤t->mm->context.direct_list){ 3.172 - list_add_tail(&dmmap->list, entry); 3.173 - } else { 3.174 + entry = find_direct(¤t->mm->context.direct_list, addr); 3.175 + if(entry != ¤t->mm->context.direct_list){ 3.176 + list_add_tail(&dmmap->list, entry); 3.177 + } else { 3.178 list_add_tail(&dmmap->list, ¤t->mm->context.direct_list); 3.179 - } 3.180 + } 3.181 3.182 /* and perform the mapping */ 3.183 if(flag == MAP_DISCONT){ 3.184 ret = direct_remap_disc_page_range(addr, phys_addr >> PAGE_SHIFT, 3.185 - tot_pages, prot); 3.186 + tot_pages, prot); 3.187 } else { 3.188 ret = direct_remap_page_range(addr, phys_addr, size, prot); 3.189 } 3.190 @@ -208,7 +208,7 @@ unsigned long direct_mmap(unsigned long 3.191 if(ret == 0) 3.192 ret = addr; 3.193 3.194 -out: 3.195 + out: 3.196 return ret; 3.197 } 3.198 3.199 @@ -216,60 +216,60 @@ out: 3.200 * needed 3.201 */ 3.202 static inline int direct_zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, 3.203 - unsigned long size) 3.204 + unsigned long size) 3.205 { 3.206 - unsigned long offset; 3.207 - pte_t * ptep; 3.208 - int freed = 0; 3.209 + unsigned long offset; 3.210 + pte_t * ptep; 3.211 + int freed = 0; 3.212 3.213 - if (pmd_none(*pmd)) 3.214 - return 0; 3.215 - if (pmd_bad(*pmd)) { 3.216 - pmd_ERROR(*pmd); 3.217 - pmd_clear(pmd); 3.218 - return 0; 3.219 - } 3.220 - ptep = pte_offset(pmd, address); 3.221 - offset = address & ~PMD_MASK; 3.222 - if (offset + size > PMD_SIZE) 3.223 - size = PMD_SIZE - offset; 3.224 - size &= PAGE_MASK; 3.225 - for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) { 3.226 - pte_t pte = *ptep; 3.227 - if (pte_none(pte)) 3.228 - continue; 3.229 - freed ++; 3.230 - direct_pte_clear(ptep); 3.231 - } 3.232 + if (pmd_none(*pmd)) 3.233 + return 0; 3.234 + if (pmd_bad(*pmd)) { 3.235 + pmd_ERROR(*pmd); 3.236 + pmd_clear(pmd); 3.237 + return 0; 3.238 + } 3.239 + ptep = pte_offset(pmd, address); 3.240 + offset = address & ~PMD_MASK; 3.241 + if (offset + size > PMD_SIZE) 3.242 + size = PMD_SIZE - offset; 3.243 + size &= PAGE_MASK; 3.244 + for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) { 3.245 + pte_t pte = *ptep; 3.246 + if (pte_none(pte)) 3.247 + continue; 3.248 + freed ++; 3.249 + direct_pte_clear(ptep); 3.250 + } 3.251 3.252 - return freed; 3.253 + return freed; 3.254 } 3.255 3.256 static inline int direct_zap_pmd_range(mmu_gather_t *tlb, pgd_t * dir, 3.257 - unsigned long address, unsigned long size) 3.258 + unsigned long address, unsigned long size) 3.259 { 3.260 - pmd_t * pmd; 3.261 - unsigned long end; 3.262 - int freed; 3.263 + pmd_t * pmd; 3.264 + unsigned long end; 3.265 + int freed; 3.266 3.267 - if (pgd_none(*dir)) 3.268 - return 0; 3.269 - if (pgd_bad(*dir)) { 3.270 - pgd_ERROR(*dir); 3.271 - pgd_clear(dir); 3.272 - return 0; 3.273 - } 3.274 - pmd = pmd_offset(dir, address); 3.275 - end = address + size; 3.276 - if (end > ((address + PGDIR_SIZE) & PGDIR_MASK)) 3.277 - end = ((address + PGDIR_SIZE) & PGDIR_MASK); 3.278 - freed = 0; 3.279 - do { 3.280 - freed += direct_zap_pte_range(tlb, pmd, address, end - address); 3.281 - address = (address + PMD_SIZE) & PMD_MASK; 3.282 - pmd++; 3.283 - } while (address < end); 3.284 - return freed; 3.285 + if (pgd_none(*dir)) 3.286 + return 0; 3.287 + if (pgd_bad(*dir)) { 3.288 + pgd_ERROR(*dir); 3.289 + pgd_clear(dir); 3.290 + return 0; 3.291 + } 3.292 + pmd = pmd_offset(dir, address); 3.293 + end = address + size; 3.294 + if (end > ((address + PGDIR_SIZE) & PGDIR_MASK)) 3.295 + end = ((address + PGDIR_SIZE) & PGDIR_MASK); 3.296 + freed = 0; 3.297 + do { 3.298 + freed += direct_zap_pte_range(tlb, pmd, address, end - address); 3.299 + address = (address + PMD_SIZE) & PMD_MASK; 3.300 + pmd++; 3.301 + } while (address < end); 3.302 + return freed; 3.303 } 3.304 3.305 /* 3.306 @@ -277,91 +277,67 @@ static inline int direct_zap_pmd_range(m 3.307 */ 3.308 void direct_zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size) 3.309 { 3.310 - mmu_gather_t *tlb; 3.311 - pgd_t * dir; 3.312 - unsigned long start = address, end = address + size; 3.313 - int freed = 0; 3.314 + mmu_gather_t *tlb; 3.315 + pgd_t * dir; 3.316 + unsigned long start = address, end = address + size; 3.317 + int freed = 0; 3.318 3.319 - dir = pgd_offset(mm, address); 3.320 + dir = pgd_offset(mm, address); 3.321 3.322 - /* 3.323 - * This is a long-lived spinlock. That's fine. 3.324 - * There's no contention, because the page table 3.325 - * lock only protects against kswapd anyway, and 3.326 - * even if kswapd happened to be looking at this 3.327 - * process we _want_ it to get stuck. 3.328 - */ 3.329 - if (address >= end) 3.330 - BUG(); 3.331 - spin_lock(&mm->page_table_lock); 3.332 - flush_cache_range(mm, address, end); 3.333 - tlb = tlb_gather_mmu(mm); 3.334 + /* 3.335 + * This is a long-lived spinlock. That's fine. 3.336 + * There's no contention, because the page table 3.337 + * lock only protects against kswapd anyway, and 3.338 + * even if kswapd happened to be looking at this 3.339 + * process we _want_ it to get stuck. 3.340 + */ 3.341 + if (address >= end) 3.342 + BUG(); 3.343 + spin_lock(&mm->page_table_lock); 3.344 + flush_cache_range(mm, address, end); 3.345 + tlb = tlb_gather_mmu(mm); 3.346 3.347 - do { 3.348 - freed += direct_zap_pmd_range(tlb, dir, address, end - address); 3.349 - address = (address + PGDIR_SIZE) & PGDIR_MASK; 3.350 - dir++; 3.351 - } while (address && (address < end)); 3.352 + do { 3.353 + freed += direct_zap_pmd_range(tlb, dir, address, end - address); 3.354 + address = (address + PGDIR_SIZE) & PGDIR_MASK; 3.355 + dir++; 3.356 + } while (address && (address < end)); 3.357 3.358 - /* this will flush any remaining tlb entries */ 3.359 - tlb_finish_mmu(tlb, start, end); 3.360 + /* this will flush any remaining tlb entries */ 3.361 + tlb_finish_mmu(tlb, start, end); 3.362 3.363 /* decrementing rss removed */ 3.364 + spin_unlock(&mm->page_table_lock); 3.365 +} 3.366 3.367 - spin_unlock(&mm->page_table_lock); 3.368 -} 3.369 3.370 int direct_unmap(unsigned long addr, unsigned long size) 3.371 { 3.372 + int count = 0, tot_pages = (size+PAGE_SIZE-1) >> PAGE_SHIFT; 3.373 direct_mmap_node_t * node; 3.374 struct list_head * curr; 3.375 struct list_head * direct_list = ¤t->mm->context.direct_list; 3.376 3.377 curr = direct_list->next; 3.378 - while(curr != direct_list){ 3.379 + while ( curr != direct_list ) 3.380 + { 3.381 node = list_entry(curr, direct_mmap_node_t, list); 3.382 - if(node->vm_start == addr) 3.383 + if ( node->vm_start == addr ) 3.384 break; 3.385 curr = curr->next; 3.386 } 3.387 3.388 - if(curr == direct_list) 3.389 + if ( curr == direct_list ) 3.390 return -1; 3.391 3.392 list_del(&node->list); 3.393 kfree(node); 3.394 3.395 - direct_zap_page_range(current->mm, addr, size); 3.396 - 3.397 - return 0; 3.398 -} 3.399 - 3.400 -int direct_disc_unmap(unsigned long from, unsigned long first_pg, int tot_pages) 3.401 -{ 3.402 - int count = 0; 3.403 - direct_mmap_node_t * node; 3.404 - struct list_head * curr; 3.405 - struct list_head * direct_list = ¤t->mm->context.direct_list; 3.406 - 3.407 - curr = direct_list->next; 3.408 - while(curr != direct_list){ 3.409 - node = list_entry(curr, direct_mmap_node_t, list); 3.410 - 3.411 - if(node->vm_start == from) 3.412 - break; 3.413 - curr = curr->next; 3.414 - } 3.415 - 3.416 - if(curr == direct_list) 3.417 - return -1; 3.418 - 3.419 - list_del(&node->list); 3.420 - kfree(node); 3.421 - 3.422 - while(count < tot_pages){ 3.423 - direct_zap_page_range(current->mm, from, PAGE_SIZE); 3.424 - from += PAGE_SIZE; 3.425 - count++; 3.426 + while ( count < tot_pages ) 3.427 + { 3.428 + direct_zap_page_range(current->mm, addr, PAGE_SIZE); 3.429 + addr += PAGE_SIZE; 3.430 + count++; 3.431 } 3.432 3.433 return 0;
4.1 --- a/xenolinux-2.4.21-sparse/arch/xeno/kernel/process.c Sat Jul 05 14:52:49 2003 +0000 4.2 +++ b/xenolinux-2.4.21-sparse/arch/xeno/kernel/process.c Tue Jul 08 14:15:17 2003 +0000 4.3 @@ -145,6 +145,9 @@ void release_segments(struct mm_struct * 4.4 flush_page_update_queue(); 4.5 vfree(ldt); 4.6 } 4.7 + 4.8 + /* YUK! We do this here because destroy_context() is too late. */ 4.9 + destroy_direct_list(mm); 4.10 } 4.11 4.12 /*
5.1 --- a/xenolinux-2.4.21-sparse/arch/xeno/mm/Makefile Sat Jul 05 14:52:49 2003 +0000 5.2 +++ b/xenolinux-2.4.21-sparse/arch/xeno/mm/Makefile Tue Jul 08 14:15:17 2003 +0000 5.3 @@ -9,7 +9,7 @@ 5.4 5.5 O_TARGET := mm.o 5.6 5.7 -obj-y := init.o fault.o extable.o pageattr.o hypervisor.o get_unmapped_area.o mmu_context.o 5.8 +obj-y := init.o fault.o extable.o pageattr.o hypervisor.o get_unmapped_area.o 5.9 5.10 export-objs := pageattr.o 5.11
6.1 --- a/xenolinux-2.4.21-sparse/arch/xeno/mm/get_unmapped_area.c Sat Jul 05 14:52:49 2003 +0000 6.2 +++ b/xenolinux-2.4.21-sparse/arch/xeno/mm/get_unmapped_area.c Tue Jul 08 14:15:17 2003 +0000 6.3 @@ -14,6 +14,28 @@ 6.4 #include <asm/uaccess.h> 6.5 #include <asm/pgalloc.h> 6.6 6.7 +extern int direct_unmap(unsigned long, unsigned long); 6.8 + 6.9 + 6.10 +int init_direct_list(struct mm_struct *mm) 6.11 +{ 6.12 + INIT_LIST_HEAD(&mm->context.direct_list); 6.13 + return 0; 6.14 +} 6.15 + 6.16 + 6.17 +void destroy_direct_list(struct mm_struct *mm) 6.18 +{ 6.19 + struct list_head *curr, *direct_list = &mm->context.direct_list; 6.20 + while ( (curr = direct_list->next) != direct_list ) 6.21 + { 6.22 + direct_mmap_node_t *node = list_entry(curr, direct_mmap_node_t, list); 6.23 + if ( direct_unmap(node->vm_start, node->vm_end - node->vm_start) != 0 ) 6.24 + BUG(); 6.25 + } 6.26 +} 6.27 + 6.28 + 6.29 struct list_head *find_direct(struct list_head *list, unsigned long addr) 6.30 { 6.31 struct list_head * curr; 6.32 @@ -29,6 +51,7 @@ struct list_head *find_direct(struct lis 6.33 return curr; 6.34 } 6.35 6.36 + 6.37 unsigned long arch_get_unmapped_area(struct file *filp, 6.38 unsigned long addr, 6.39 unsigned long len,
7.1 --- a/xenolinux-2.4.21-sparse/arch/xeno/mm/mmu_context.c Sat Jul 05 14:52:49 2003 +0000 7.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 7.3 @@ -1,26 +0,0 @@ 7.4 - 7.5 -#include <linux/slab.h> 7.6 -#include <linux/list.h> 7.7 - 7.8 -int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 7.9 -{ 7.10 - INIT_LIST_HEAD(&mm->context.direct_list); 7.11 - return 0; 7.12 -} 7.13 - 7.14 -/* just free all elements of list identifying directly mapped areas */ 7.15 -void destroy_context(struct mm_struct *mm) 7.16 -{ 7.17 - direct_mmap_node_t * node; 7.18 - struct list_head * curr; 7.19 - struct list_head * direct_list = &mm->context.direct_list; 7.20 - 7.21 - curr = direct_list->next; 7.22 - while(curr != direct_list){ 7.23 - node = list_entry(curr, direct_mmap_node_t, list); 7.24 - curr = curr->next; 7.25 - list_del(&node->list); 7.26 - kfree(node); 7.27 - } 7.28 - 7.29 -}
8.1 --- a/xenolinux-2.4.21-sparse/include/asm-xeno/mmu_context.h Sat Jul 05 14:52:49 2003 +0000 8.2 +++ b/xenolinux-2.4.21-sparse/include/asm-xeno/mmu_context.h Tue Jul 08 14:15:17 2003 +0000 8.3 @@ -7,15 +7,13 @@ 8.4 #include <asm/pgalloc.h> 8.5 #include <asm/multicall.h> 8.6 8.7 -/* 8.8 - * possibly do the LDT unload here? 8.9 - */ 8.10 +/* Hooked directly from 'init_new_context'. */ 8.11 +extern int init_direct_list(struct mm_struct *); 8.12 +/* Called from 'release_segments'. */ 8.13 +extern void destroy_direct_list(struct mm_struct *); 8.14 8.15 -extern int init_new_context(struct task_struct *tsk, struct mm_struct *); 8.16 -extern void destroy_context(struct mm_struct *); 8.17 - 8.18 -//#define destroy_context(mm) do { } while(0) 8.19 -//#define init_new_context(tsk,mm) 0 8.20 +#define destroy_context(mm) do { } while(0) 8.21 +#define init_new_context(tsk,mm) init_direct_list(mm) 8.22 8.23 #ifdef CONFIG_SMP 8.24