debuggers.hg
changeset 9855:4ed269e73e95
[IA64] cleanup CONFIG_VIRTUAL_MEM_MAP.
Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
author | awilliam@xenbuild.aw |
---|---|
date | Mon Apr 17 08:47:36 2006 -0600 (2006-04-17) |
parents | bdb08c9ef3d1 |
children | 41823e46d6ac |
files | xen/arch/ia64/xen/mm_init.c xen/include/asm-ia64/mm.h |
line diff
1.1 --- a/xen/arch/ia64/xen/mm_init.c Mon Apr 17 08:46:52 2006 -0600 1.2 +++ b/xen/arch/ia64/xen/mm_init.c Mon Apr 17 08:47:36 2006 -0600 1.3 @@ -8,287 +8,11 @@ 1.4 #include <linux/kernel.h> 1.5 #include <linux/init.h> 1.6 1.7 -#ifdef XEN 1.8 #include <xen/sched.h> 1.9 -#endif 1.10 -#include <linux/bootmem.h> 1.11 -#include <linux/efi.h> 1.12 -#include <linux/elf.h> 1.13 -#include <linux/mm.h> 1.14 -#include <linux/mmzone.h> 1.15 -#include <linux/module.h> 1.16 -#ifndef XEN 1.17 -#include <linux/personality.h> 1.18 -#endif 1.19 -#include <linux/reboot.h> 1.20 -#include <linux/slab.h> 1.21 -#include <linux/swap.h> 1.22 -#ifndef XEN 1.23 -#include <linux/proc_fs.h> 1.24 -#endif 1.25 - 1.26 -#ifndef XEN 1.27 -#include <asm/a.out.h> 1.28 -#endif 1.29 -#include <asm/bitops.h> 1.30 -#include <asm/dma.h> 1.31 -#ifndef XEN 1.32 -#include <asm/ia32.h> 1.33 -#endif 1.34 -#include <asm/io.h> 1.35 -#include <asm/machvec.h> 1.36 -#include <asm/numa.h> 1.37 -#include <asm/patch.h> 1.38 -#include <asm/pgalloc.h> 1.39 -#include <asm/sal.h> 1.40 -#include <asm/sections.h> 1.41 -#include <asm/system.h> 1.42 -#include <asm/tlb.h> 1.43 -#include <asm/uaccess.h> 1.44 -#include <asm/unistd.h> 1.45 -#include <asm/mca.h> 1.46 #include <asm/vhpt.h> 1.47 1.48 -#ifndef XEN 1.49 -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 1.50 -#endif 1.51 - 1.52 extern void ia64_tlb_init (void); 1.53 1.54 -unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; 1.55 - 1.56 -#ifdef CONFIG_VIRTUAL_MEM_MAP 1.57 -unsigned long vmalloc_end = VMALLOC_END_INIT; 1.58 -EXPORT_SYMBOL(vmalloc_end); 1.59 -struct page_info *vmem_map; 1.60 -EXPORT_SYMBOL(vmem_map); 1.61 -#endif 1.62 - 1.63 -// static int pgt_cache_water[2] = { 25, 50 }; 1.64 - 1.65 -#ifndef XEN 1.66 -struct page_info *zero_page_memmap_ptr; /* map entry for zero page */ 1.67 -EXPORT_SYMBOL(zero_page_memmap_ptr); 1.68 - 1.69 -void *high_memory; 1.70 -EXPORT_SYMBOL(high_memory); 1.71 - 1.72 -///////////////////////////////////////////// 1.73 -// following from linux-2.6.7/mm/mmap.c 1.74 -/* description of effects of mapping type and prot in current implementation. 1.75 - * this is due to the limited x86 page protection hardware. The expected 1.76 - * behavior is in parens: 1.77 - * 1.78 - * map_type prot 1.79 - * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC 1.80 - * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes 1.81 - * w: (no) no w: (no) no w: (yes) yes w: (no) no 1.82 - * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 1.83 - * 1.84 - * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes 1.85 - * w: (no) no w: (no) no w: (copy) copy w: (no) no 1.86 - * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 1.87 - * 1.88 - */ 1.89 -pgprot_t protection_map[16] = { 1.90 - __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, 1.91 - __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 1.92 -}; 1.93 - 1.94 -void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) 1.95 -{ 1.96 - printf("insert_vm_struct: called, not implemented yet\n"); 1.97 -} 1.98 - 1.99 -///////////////////////////////////////////// 1.100 -//following from linux/mm/memory.c 1.101 - 1.102 -#ifndef __ARCH_HAS_4LEVEL_HACK 1.103 -/* 1.104 - * Allocate page upper directory. 1.105 - * 1.106 - * We've already handled the fast-path in-line, and we own the 1.107 - * page table lock. 1.108 - * 1.109 - * On a two-level or three-level page table, this ends up actually being 1.110 - * entirely optimized away. 1.111 - */ 1.112 -pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 1.113 -{ 1.114 - pud_t *new; 1.115 - 1.116 - spin_unlock(&mm->page_table_lock); 1.117 - new = pud_alloc_one(mm, address); 1.118 - spin_lock(&mm->page_table_lock); 1.119 - if (!new) 1.120 - return NULL; 1.121 - 1.122 - /* 1.123 - * Because we dropped the lock, we should re-check the 1.124 - * entry, as somebody else could have populated it.. 1.125 - */ 1.126 - if (pgd_present(*pgd)) { 1.127 - pud_free(new); 1.128 - goto out; 1.129 - } 1.130 - pgd_populate(mm, pgd, new); 1.131 - out: 1.132 - return pud_offset(pgd, address); 1.133 -} 1.134 - 1.135 -/* 1.136 - * Allocate page middle directory. 1.137 - * 1.138 - * We've already handled the fast-path in-line, and we own the 1.139 - * page table lock. 1.140 - * 1.141 - * On a two-level page table, this ends up actually being entirely 1.142 - * optimized away. 1.143 - */ 1.144 -pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 1.145 -{ 1.146 - pmd_t *new; 1.147 - 1.148 - spin_unlock(&mm->page_table_lock); 1.149 - new = pmd_alloc_one(mm, address); 1.150 - spin_lock(&mm->page_table_lock); 1.151 - if (!new) 1.152 - return NULL; 1.153 - 1.154 - /* 1.155 - * Because we dropped the lock, we should re-check the 1.156 - * entry, as somebody else could have populated it.. 1.157 - */ 1.158 - if (pud_present(*pud)) { 1.159 - pmd_free(new); 1.160 - goto out; 1.161 - } 1.162 - pud_populate(mm, pud, new); 1.163 - out: 1.164 - return pmd_offset(pud, address); 1.165 -} 1.166 -#endif 1.167 - 1.168 -pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 1.169 -{ 1.170 - if (!pmd_present(*pmd)) { 1.171 - struct page_info *new; 1.172 - 1.173 - spin_unlock(&mm->page_table_lock); 1.174 - new = pte_alloc_one(mm, address); 1.175 - spin_lock(&mm->page_table_lock); 1.176 - if (!new) 1.177 - return NULL; 1.178 - 1.179 - /* 1.180 - * Because we dropped the lock, we should re-check the 1.181 - * entry, as somebody else could have populated it.. 1.182 - */ 1.183 - if (pmd_present(*pmd)) { 1.184 - pte_free(new); 1.185 - goto out; 1.186 - } 1.187 - inc_page_state(nr_page_table_pages); 1.188 - pmd_populate(mm, pmd, new); 1.189 - } 1.190 -out: 1.191 - return pte_offset_map(pmd, address); 1.192 -} 1.193 -///////////////////////////////////////////// 1.194 -#endif /* XEN */ 1.195 - 1.196 -#if 0 1.197 -void 1.198 -update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte) 1.199 -{ 1.200 - unsigned long addr; 1.201 - struct page_info *page; 1.202 - 1.203 - if (!pte_exec(pte)) 1.204 - return; /* not an executable page... */ 1.205 - 1.206 - page = pte_page(pte); 1.207 - /* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */ 1.208 - addr = (unsigned long) page_address(page); 1.209 - 1.210 - if (test_bit(PG_arch_1, &page->flags)) 1.211 - return; /* i-cache is already coherent with d-cache */ 1.212 - 1.213 - flush_icache_range(addr, addr + PAGE_SIZE); 1.214 - set_bit(PG_arch_1, &page->flags); /* mark page as clean */ 1.215 -} 1.216 -#endif 1.217 - 1.218 -#if 0 1.219 -inline void 1.220 -ia64_set_rbs_bot (void) 1.221 -{ 1.222 -#ifdef XEN 1.223 - unsigned long stack_size = MAX_USER_STACK_SIZE; 1.224 -#else 1.225 - unsigned long stack_size = current->rlim[RLIMIT_STACK].rlim_max & -16; 1.226 -#endif 1.227 - 1.228 - if (stack_size > MAX_USER_STACK_SIZE) 1.229 - stack_size = MAX_USER_STACK_SIZE; 1.230 - current->arch._thread.rbs_bot = STACK_TOP - stack_size; 1.231 -} 1.232 -#endif 1.233 - 1.234 -/* 1.235 - * This performs some platform-dependent address space initialization. 1.236 - * On IA-64, we want to setup the VM area for the register backing 1.237 - * store (which grows upwards) and install the gateway page which is 1.238 - * used for signal trampolines, etc. 1.239 - */ 1.240 -#if 0 1.241 -void 1.242 -ia64_init_addr_space (void) 1.243 -{ 1.244 -#ifdef XEN 1.245 -printf("ia64_init_addr_space: called, not implemented\n"); 1.246 -#else 1.247 - struct vm_area_struct *vma; 1.248 - 1.249 - ia64_set_rbs_bot(); 1.250 - 1.251 - /* 1.252 - * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore 1.253 - * the problem. When the process attempts to write to the register backing store 1.254 - * for the first time, it will get a SEGFAULT in this case. 1.255 - */ 1.256 - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 1.257 - if (vma) { 1.258 - memset(vma, 0, sizeof(*vma)); 1.259 - vma->vm_mm = current->mm; 1.260 - vma->vm_start = current->arch._thread.rbs_bot & PAGE_MASK; 1.261 - vma->vm_end = vma->vm_start + PAGE_SIZE; 1.262 - vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7]; 1.263 - vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP; 1.264 - insert_vm_struct(current->mm, vma); 1.265 - } 1.266 - 1.267 - /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ 1.268 - if (!(current->personality & MMAP_PAGE_ZERO)) { 1.269 - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 1.270 - if (vma) { 1.271 - memset(vma, 0, sizeof(*vma)); 1.272 - vma->vm_mm = current->mm; 1.273 - vma->vm_end = PAGE_SIZE; 1.274 - vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); 1.275 - vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED; 1.276 - insert_vm_struct(current->mm, vma); 1.277 - } 1.278 - } 1.279 -#endif 1.280 -} 1.281 -#endif 1.282 - 1.283 -void setup_gate (void) 1.284 -{ 1.285 - printk("setup_gate not-implemented.\n"); 1.286 -} 1.287 - 1.288 void __devinit 1.289 ia64_mmu_init (void *my_cpu_data) 1.290 { 1.291 @@ -384,166 +108,6 @@ ia64_mmu_init (void *my_cpu_data) 1.292 #endif 1.293 } 1.294 1.295 -#ifdef CONFIG_VIRTUAL_MEM_MAP 1.296 - 1.297 -int 1.298 -create_mem_map_page_table (u64 start, u64 end, void *arg) 1.299 -{ 1.300 - unsigned long address, start_page, end_page; 1.301 - struct page_info *map_start, *map_end; 1.302 - int node; 1.303 - pgd_t *pgd; 1.304 - pmd_t *pmd; 1.305 - pte_t *pte; 1.306 - 1.307 - map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); 1.308 - map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); 1.309 - 1.310 - start_page = (unsigned long) map_start & PAGE_MASK; 1.311 - end_page = PAGE_ALIGN((unsigned long) map_end); 1.312 - node = paddr_to_nid(__pa(start)); 1.313 - 1.314 - for (address = start_page; address < end_page; address += PAGE_SIZE) { 1.315 - pgd = pgd_offset_k(address); 1.316 - if (pgd_none(*pgd)) 1.317 - pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); 1.318 - pmd = pmd_offset(pgd, address); 1.319 - 1.320 - if (pmd_none(*pmd)) 1.321 - pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); 1.322 - pte = pte_offset_kernel(pmd, address); 1.323 - 1.324 - if (pte_none(*pte)) 1.325 - set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT, 1.326 - PAGE_KERNEL)); 1.327 - } 1.328 - return 0; 1.329 -} 1.330 - 1.331 -struct memmap_init_callback_data { 1.332 - struct page_info *start; 1.333 - struct page_info *end; 1.334 - int nid; 1.335 - unsigned long zone; 1.336 -}; 1.337 - 1.338 -static int 1.339 -virtual_memmap_init (u64 start, u64 end, void *arg) 1.340 -{ 1.341 - struct memmap_init_callback_data *args; 1.342 - struct page_info *map_start, *map_end; 1.343 - 1.344 - args = (struct memmap_init_callback_data *) arg; 1.345 - 1.346 - map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); 1.347 - map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); 1.348 - 1.349 - if (map_start < args->start) 1.350 - map_start = args->start; 1.351 - if (map_end > args->end) 1.352 - map_end = args->end; 1.353 - 1.354 - /* 1.355 - * We have to initialize "out of bounds" struct page_info elements that fit completely 1.356 - * on the same pages that were allocated for the "in bounds" elements because they 1.357 - * may be referenced later (and found to be "reserved"). 1.358 - */ 1.359 - map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page_info); 1.360 - map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) 1.361 - / sizeof(struct page_info)); 1.362 - 1.363 - if (map_start < map_end) 1.364 - memmap_init_zone(map_start, (unsigned long) (map_end - map_start), 1.365 - args->nid, args->zone, page_to_mfn(map_start)); 1.366 - return 0; 1.367 -} 1.368 - 1.369 -void 1.370 -memmap_init (struct page_info *start, unsigned long size, int nid, 1.371 - unsigned long zone, unsigned long start_pfn) 1.372 -{ 1.373 - if (!vmem_map) 1.374 - memmap_init_zone(start, size, nid, zone, start_pfn); 1.375 - else { 1.376 - struct memmap_init_callback_data args; 1.377 - 1.378 - args.start = start; 1.379 - args.end = start + size; 1.380 - args.nid = nid; 1.381 - args.zone = zone; 1.382 - 1.383 - efi_memmap_walk(virtual_memmap_init, &args); 1.384 - } 1.385 -} 1.386 - 1.387 -int 1.388 -ia64_mfn_valid (unsigned long pfn) 1.389 -{ 1.390 - char byte; 1.391 - struct page_info *pg = mfn_to_page(pfn); 1.392 - 1.393 - return (__get_user(byte, (char *) pg) == 0) 1.394 - && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK)) 1.395 - || (__get_user(byte, (char *) (pg + 1) - 1) == 0)); 1.396 -} 1.397 -EXPORT_SYMBOL(ia64_mfn_valid); 1.398 - 1.399 -int 1.400 -find_largest_hole (u64 start, u64 end, void *arg) 1.401 -{ 1.402 - u64 *max_gap = arg; 1.403 - 1.404 - static u64 last_end = PAGE_OFFSET; 1.405 - 1.406 - /* NOTE: this algorithm assumes efi memmap table is ordered */ 1.407 - 1.408 -#ifdef XEN 1.409 -//printf("find_largest_hole: start=%lx,end=%lx,max_gap=%lx\n",start,end,*(unsigned long *)arg); 1.410 -#endif 1.411 - if (*max_gap < (start - last_end)) 1.412 - *max_gap = start - last_end; 1.413 - last_end = end; 1.414 -#ifdef XEN 1.415 -//printf("find_largest_hole2: max_gap=%lx,last_end=%lx\n",*max_gap,last_end); 1.416 -#endif 1.417 - return 0; 1.418 -} 1.419 -#endif /* CONFIG_VIRTUAL_MEM_MAP */ 1.420 - 1.421 -#ifndef XEN 1.422 -static int 1.423 -count_reserved_pages (u64 start, u64 end, void *arg) 1.424 -{ 1.425 - unsigned long num_reserved = 0; 1.426 - unsigned long *count = arg; 1.427 - 1.428 - for (; start < end; start += PAGE_SIZE) 1.429 - if (PageReserved(virt_to_page(start))) 1.430 - ++num_reserved; 1.431 - *count += num_reserved; 1.432 - return 0; 1.433 -} 1.434 -#endif 1.435 - 1.436 -/* 1.437 - * Boot command-line option "nolwsys" can be used to disable the use of any light-weight 1.438 - * system call handler. When this option is in effect, all fsyscalls will end up bubbling 1.439 - * down into the kernel and calling the normal (heavy-weight) syscall handler. This is 1.440 - * useful for performance testing, but conceivably could also come in handy for debugging 1.441 - * purposes. 1.442 - */ 1.443 - 1.444 -static int nolwsys; 1.445 - 1.446 -static int __init 1.447 -nolwsys_setup (char *s) 1.448 -{ 1.449 - nolwsys = 1; 1.450 - return 1; 1.451 -} 1.452 - 1.453 -__setup("nolwsys", nolwsys_setup); 1.454 - 1.455 void 1.456 mem_init (void) 1.457 {
2.1 --- a/xen/include/asm-ia64/mm.h Mon Apr 17 08:46:52 2006 -0600 2.2 +++ b/xen/include/asm-ia64/mm.h Mon Apr 17 08:47:36 2006 -0600 2.3 @@ -139,11 +139,7 @@ extern spinlock_t free_list_lock; 2.4 extern unsigned int free_pfns; 2.5 extern unsigned long max_page; 2.6 2.7 -#ifdef CONFIG_VIRTUAL_MEM_MAP 2.8 -void __init init_frametable(void *frametable_vstart, unsigned long nr_pages); 2.9 -#else 2.10 extern void __init init_frametable(void); 2.11 -#endif 2.12 void add_to_domain_alloc_list(unsigned long ps, unsigned long pe); 2.13 2.14 extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);