debuggers.hg

view xen/arch/ia64/mm_init.c @ 4615:58efb3448933

bitkeeper revision 1.1327.1.1 (426536d2PUqtjTi2v06bzD10RFwarg)

Merge bk://xen.bkbits.net/xeno-unstable.bk
into bkbits.net:/repos/x/xen-ia64/xeno-unstable-ia64.bk
author xen-ia64.adm@bkbits.net
date Tue Apr 19 16:50:26 2005 +0000 (2005-04-19)
parents 445b12a7221a f1c946e1226a
children 5b9e241131fb
line source
1 /*
2 * Initialize MMU support.
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #ifdef XEN
12 #include <xen/sched.h>
13 #endif
14 #include <linux/bootmem.h>
15 #include <linux/efi.h>
16 #include <linux/elf.h>
17 #include <linux/mm.h>
18 #include <linux/mmzone.h>
19 #include <linux/module.h>
20 #ifndef XEN
21 #include <linux/personality.h>
22 #endif
23 #include <linux/reboot.h>
24 #include <linux/slab.h>
25 #include <linux/swap.h>
26 #ifndef XEN
27 #include <linux/proc_fs.h>
28 #endif
30 #ifndef XEN
31 #include <asm/a.out.h>
32 #endif
33 #include <asm/bitops.h>
34 #include <asm/dma.h>
35 #ifndef XEN
36 #include <asm/ia32.h>
37 #endif
38 #include <asm/io.h>
39 #include <asm/machvec.h>
40 #include <asm/numa.h>
41 #include <asm/patch.h>
42 #include <asm/pgalloc.h>
43 #include <asm/sal.h>
44 #include <asm/sections.h>
45 #include <asm/system.h>
46 #include <asm/tlb.h>
47 #include <asm/uaccess.h>
48 #include <asm/unistd.h>
49 #include <asm/mca.h>
51 #ifndef XEN
52 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
53 #endif
55 extern void ia64_tlb_init (void);
57 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
59 #ifdef CONFIG_VIRTUAL_MEM_MAP
60 unsigned long vmalloc_end = VMALLOC_END_INIT;
61 EXPORT_SYMBOL(vmalloc_end);
62 struct page *vmem_map;
63 EXPORT_SYMBOL(vmem_map);
64 #endif
66 static int pgt_cache_water[2] = { 25, 50 };
68 struct page *zero_page_memmap_ptr; /* map entry for zero page */
69 EXPORT_SYMBOL(zero_page_memmap_ptr);
71 #ifdef XEN
72 void *high_memory;
73 EXPORT_SYMBOL(high_memory);
75 /////////////////////////////////////////////
76 // following from linux-2.6.7/mm/mmap.c
77 /* description of effects of mapping type and prot in current implementation.
78 * this is due to the limited x86 page protection hardware. The expected
79 * behavior is in parens:
80 *
81 * map_type prot
82 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
83 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
84 * w: (no) no w: (no) no w: (yes) yes w: (no) no
85 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
86 *
87 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
88 * w: (no) no w: (no) no w: (copy) copy w: (no) no
89 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
90 *
91 */
92 pgprot_t protection_map[16] = {
93 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
94 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
95 };
97 void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
98 {
99 printf("insert_vm_struct: called, not implemented yet\n");
100 }
102 /////////////////////////////////////////////
103 //following from linux/mm/memory.c
105 #ifndef __ARCH_HAS_4LEVEL_HACK
106 /*
107 * Allocate page upper directory.
108 *
109 * We've already handled the fast-path in-line, and we own the
110 * page table lock.
111 *
112 * On a two-level or three-level page table, this ends up actually being
113 * entirely optimized away.
114 */
115 pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
116 {
117 pud_t *new;
119 spin_unlock(&mm->page_table_lock);
120 new = pud_alloc_one(mm, address);
121 spin_lock(&mm->page_table_lock);
122 if (!new)
123 return NULL;
125 /*
126 * Because we dropped the lock, we should re-check the
127 * entry, as somebody else could have populated it..
128 */
129 if (pgd_present(*pgd)) {
130 pud_free(new);
131 goto out;
132 }
133 pgd_populate(mm, pgd, new);
134 out:
135 return pud_offset(pgd, address);
136 }
138 /*
139 * Allocate page middle directory.
140 *
141 * We've already handled the fast-path in-line, and we own the
142 * page table lock.
143 *
144 * On a two-level page table, this ends up actually being entirely
145 * optimized away.
146 */
147 pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
148 {
149 pmd_t *new;
151 spin_unlock(&mm->page_table_lock);
152 new = pmd_alloc_one(mm, address);
153 spin_lock(&mm->page_table_lock);
154 if (!new)
155 return NULL;
157 /*
158 * Because we dropped the lock, we should re-check the
159 * entry, as somebody else could have populated it..
160 */
161 if (pud_present(*pud)) {
162 pmd_free(new);
163 goto out;
164 }
165 pud_populate(mm, pud, new);
166 out:
167 return pmd_offset(pud, address);
168 }
169 #endif
171 pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
172 {
173 if (!pmd_present(*pmd)) {
174 struct page *new;
176 spin_unlock(&mm->page_table_lock);
177 new = pte_alloc_one(mm, address);
178 spin_lock(&mm->page_table_lock);
179 if (!new)
180 return NULL;
182 /*
183 * Because we dropped the lock, we should re-check the
184 * entry, as somebody else could have populated it..
185 */
186 if (pmd_present(*pmd)) {
187 pte_free(new);
188 goto out;
189 }
190 inc_page_state(nr_page_table_pages);
191 pmd_populate(mm, pmd, new);
192 }
193 out:
194 return pte_offset_map(pmd, address);
195 }
196 /////////////////////////////////////////////
197 #endif /* XEN */
199 void
200 update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
201 {
202 unsigned long addr;
203 struct page *page;
205 if (!pte_exec(pte))
206 return; /* not an executable page... */
208 page = pte_page(pte);
209 /* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
210 addr = (unsigned long) page_address(page);
212 if (test_bit(PG_arch_1, &page->flags))
213 return; /* i-cache is already coherent with d-cache */
215 flush_icache_range(addr, addr + PAGE_SIZE);
216 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
217 }
219 inline void
220 ia64_set_rbs_bot (void)
221 {
222 #ifdef XEN
223 unsigned stack_size = MAX_USER_STACK_SIZE;
224 #else
225 unsigned long stack_size = current->rlim[RLIMIT_STACK].rlim_max & -16;
226 #endif
228 if (stack_size > MAX_USER_STACK_SIZE)
229 stack_size = MAX_USER_STACK_SIZE;
230 current->thread.rbs_bot = STACK_TOP - stack_size;
231 }
233 /*
234 * This performs some platform-dependent address space initialization.
235 * On IA-64, we want to setup the VM area for the register backing
236 * store (which grows upwards) and install the gateway page which is
237 * used for signal trampolines, etc.
238 */
239 void
240 ia64_init_addr_space (void)
241 {
242 #ifdef XEN
243 printf("ia64_init_addr_space: called, not implemented\n");
244 #else
245 struct vm_area_struct *vma;
247 ia64_set_rbs_bot();
249 /*
250 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
251 * the problem. When the process attempts to write to the register backing store
252 * for the first time, it will get a SEGFAULT in this case.
253 */
254 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
255 if (vma) {
256 memset(vma, 0, sizeof(*vma));
257 vma->vm_mm = current->mm;
258 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
259 vma->vm_end = vma->vm_start + PAGE_SIZE;
260 vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
261 vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
262 insert_vm_struct(current->mm, vma);
263 }
265 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
266 if (!(current->personality & MMAP_PAGE_ZERO)) {
267 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
268 if (vma) {
269 memset(vma, 0, sizeof(*vma));
270 vma->vm_mm = current->mm;
271 vma->vm_end = PAGE_SIZE;
272 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
273 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
274 insert_vm_struct(current->mm, vma);
275 }
276 }
277 #endif
278 }
280 setup_gate (void)
281 {
282 printk("setup_gate not-implemented.\n");
283 }
285 void __devinit
286 ia64_mmu_init (void *my_cpu_data)
287 {
288 unsigned long psr, pta, impl_va_bits;
289 extern void __devinit tlb_init (void);
290 int cpu;
292 #ifdef CONFIG_DISABLE_VHPT
293 # define VHPT_ENABLE_BIT 0
294 #else
295 # define VHPT_ENABLE_BIT 1
296 #endif
298 /* Pin mapping for percpu area into TLB */
299 psr = ia64_clear_ic();
300 ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
301 pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
302 PERCPU_PAGE_SHIFT);
304 ia64_set_psr(psr);
305 ia64_srlz_i();
307 /*
308 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
309 * address space. The IA-64 architecture guarantees that at least 50 bits of
310 * virtual address space are implemented but if we pick a large enough page size
311 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
312 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
313 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
314 * problem in practice. Alternatively, we could truncate the top of the mapped
315 * address space to not permit mappings that would overlap with the VMLPT.
316 * --davidm 00/12/06
317 */
318 # define pte_bits 3
319 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
320 /*
321 * The virtual page table has to cover the entire implemented address space within
322 * a region even though not all of this space may be mappable. The reason for
323 * this is that the Access bit and Dirty bit fault handlers perform
324 * non-speculative accesses to the virtual page table, so the address range of the
325 * virtual page table itself needs to be covered by virtual page table.
326 */
327 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
328 # define POW2(n) (1ULL << (n))
330 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
332 if (impl_va_bits < 51 || impl_va_bits > 61)
333 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
335 #ifdef XEN
336 vhpt_init();
337 #endif
338 #if 0
339 /* place the VMLPT at the end of each page-table mapped region: */
340 pta = POW2(61) - POW2(vmlpt_bits);
342 if (POW2(mapped_space_bits) >= pta)
343 panic("mm/init: overlap between virtually mapped linear page table and "
344 "mapped kernel space!");
345 /*
346 * Set the (virtually mapped linear) page table address. Bit
347 * 8 selects between the short and long format, bits 2-7 the
348 * size of the table, and bit 0 whether the VHPT walker is
349 * enabled.
350 */
351 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
352 #endif
353 ia64_tlb_init();
355 #ifdef CONFIG_HUGETLB_PAGE
356 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
357 ia64_srlz_d();
358 #endif
360 cpu = smp_processor_id();
362 #ifndef XEN
363 /* mca handler uses cr.lid as key to pick the right entry */
364 ia64_mca_tlb_list[cpu].cr_lid = ia64_getreg(_IA64_REG_CR_LID);
366 /* insert this percpu data information into our list for MCA recovery purposes */
367 ia64_mca_tlb_list[cpu].percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
368 /* Also save per-cpu tlb flush recipe for use in physical mode mca handler */
369 ia64_mca_tlb_list[cpu].ptce_base = local_cpu_data->ptce_base;
370 ia64_mca_tlb_list[cpu].ptce_count[0] = local_cpu_data->ptce_count[0];
371 ia64_mca_tlb_list[cpu].ptce_count[1] = local_cpu_data->ptce_count[1];
372 ia64_mca_tlb_list[cpu].ptce_stride[0] = local_cpu_data->ptce_stride[0];
373 ia64_mca_tlb_list[cpu].ptce_stride[1] = local_cpu_data->ptce_stride[1];
374 #endif
375 }
377 #ifdef CONFIG_VIRTUAL_MEM_MAP
379 int
380 create_mem_map_page_table (u64 start, u64 end, void *arg)
381 {
382 unsigned long address, start_page, end_page;
383 struct page *map_start, *map_end;
384 int node;
385 pgd_t *pgd;
386 pmd_t *pmd;
387 pte_t *pte;
389 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
390 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
392 start_page = (unsigned long) map_start & PAGE_MASK;
393 end_page = PAGE_ALIGN((unsigned long) map_end);
394 node = paddr_to_nid(__pa(start));
396 for (address = start_page; address < end_page; address += PAGE_SIZE) {
397 pgd = pgd_offset_k(address);
398 if (pgd_none(*pgd))
399 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
400 pmd = pmd_offset(pgd, address);
402 if (pmd_none(*pmd))
403 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
404 pte = pte_offset_kernel(pmd, address);
406 if (pte_none(*pte))
407 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
408 PAGE_KERNEL));
409 }
410 return 0;
411 }
413 struct memmap_init_callback_data {
414 struct page *start;
415 struct page *end;
416 int nid;
417 unsigned long zone;
418 };
420 static int
421 virtual_memmap_init (u64 start, u64 end, void *arg)
422 {
423 struct memmap_init_callback_data *args;
424 struct page *map_start, *map_end;
426 args = (struct memmap_init_callback_data *) arg;
428 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
429 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
431 if (map_start < args->start)
432 map_start = args->start;
433 if (map_end > args->end)
434 map_end = args->end;
436 /*
437 * We have to initialize "out of bounds" struct page elements that fit completely
438 * on the same pages that were allocated for the "in bounds" elements because they
439 * may be referenced later (and found to be "reserved").
440 */
441 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
442 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
443 / sizeof(struct page));
445 if (map_start < map_end)
446 memmap_init_zone(map_start, (unsigned long) (map_end - map_start),
447 args->nid, args->zone, page_to_pfn(map_start));
448 return 0;
449 }
451 void
452 memmap_init (struct page *start, unsigned long size, int nid,
453 unsigned long zone, unsigned long start_pfn)
454 {
455 if (!vmem_map)
456 memmap_init_zone(start, size, nid, zone, start_pfn);
457 else {
458 struct memmap_init_callback_data args;
460 args.start = start;
461 args.end = start + size;
462 args.nid = nid;
463 args.zone = zone;
465 efi_memmap_walk(virtual_memmap_init, &args);
466 }
467 }
469 int
470 ia64_pfn_valid (unsigned long pfn)
471 {
472 char byte;
473 struct page *pg = pfn_to_page(pfn);
475 return (__get_user(byte, (char *) pg) == 0)
476 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
477 || (__get_user(byte, (char *) (pg + 1) - 1) == 0));
478 }
479 EXPORT_SYMBOL(ia64_pfn_valid);
481 int
482 find_largest_hole (u64 start, u64 end, void *arg)
483 {
484 u64 *max_gap = arg;
486 static u64 last_end = PAGE_OFFSET;
488 /* NOTE: this algorithm assumes efi memmap table is ordered */
490 #ifdef XEN
491 //printf("find_largest_hole: start=%lx,end=%lx,max_gap=%lx\n",start,end,*(unsigned long *)arg);
492 #endif
493 if (*max_gap < (start - last_end))
494 *max_gap = start - last_end;
495 last_end = end;
496 #ifdef XEN
497 //printf("find_largest_hole2: max_gap=%lx,last_end=%lx\n",*max_gap,last_end);
498 #endif
499 return 0;
500 }
501 #endif /* CONFIG_VIRTUAL_MEM_MAP */
503 static int
504 count_reserved_pages (u64 start, u64 end, void *arg)
505 {
506 unsigned long num_reserved = 0;
507 unsigned long *count = arg;
509 for (; start < end; start += PAGE_SIZE)
510 if (PageReserved(virt_to_page(start)))
511 ++num_reserved;
512 *count += num_reserved;
513 return 0;
514 }
516 /*
517 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
518 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
519 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
520 * useful for performance testing, but conceivably could also come in handy for debugging
521 * purposes.
522 */
524 static int nolwsys;
526 static int __init
527 nolwsys_setup (char *s)
528 {
529 nolwsys = 1;
530 return 1;
531 }
533 __setup("nolwsys", nolwsys_setup);
535 void
536 mem_init (void)
537 {
538 #ifdef CONFIG_PCI
539 /*
540 * This needs to be called _after_ the command line has been parsed but _before_
541 * any drivers that may need the PCI DMA interface are initialized or bootmem has
542 * been freed.
543 */
544 platform_dma_init();
545 #endif
547 }