debuggers.hg

view linux-2.6.10-rc2-xen-sparse/arch/xen/i386/mm/init.c @ 3289:a169836882cb

bitkeeper revision 1.1159.170.59 (41b4c2fdJ2gj_BWy27Vj3ptayZp_yg)

sync w/ head.
author cl349@arcadians.cl.cam.ac.uk
date Mon Dec 06 20:37:17 2004 +0000 (2004-12-06)
parents f65b65977b19
children
line source
1 /*
2 * linux/arch/i386/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/hugetlb.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/highmem.h>
25 #include <linux/pagemap.h>
26 #include <linux/bootmem.h>
27 #include <linux/slab.h>
28 #include <linux/proc_fs.h>
29 #include <linux/efi.h>
31 #include <asm/processor.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34 #include <asm/pgtable.h>
35 #include <asm/dma.h>
36 #include <asm/fixmap.h>
37 #include <asm/e820.h>
38 #include <asm/apic.h>
39 #include <asm/tlb.h>
40 #include <asm/tlbflush.h>
41 #include <asm/sections.h>
42 #include <asm-xen/hypervisor.h>
44 unsigned int __VMALLOC_RESERVE = 128 << 20;
46 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
47 unsigned long highstart_pfn, highend_pfn;
49 static int noinline do_test_wp_bit(void);
51 /*
52 * Creates a middle page table and puts a pointer to it in the
53 * given global directory entry. This only returns the gd entry
54 * in non-PAE compilation mode, since the middle layer is folded.
55 */
56 static pmd_t * __init one_md_table_init(pgd_t *pgd)
57 {
58 pmd_t *pmd_table;
60 #ifdef CONFIG_X86_PAE
61 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
62 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
63 if (pmd_table != pmd_offset(pgd, 0))
64 BUG();
65 #else
66 pmd_table = pmd_offset(pgd, 0);
67 #endif
69 return pmd_table;
70 }
72 /*
73 * Create a page table and place a pointer to it in a middle page
74 * directory entry.
75 */
76 static pte_t * __init one_page_table_init(pmd_t *pmd)
77 {
78 if (pmd_none(*pmd)) {
79 pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
80 make_page_readonly(page_table);
81 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
82 if (page_table != pte_offset_kernel(pmd, 0))
83 BUG();
85 return page_table;
86 }
88 return pte_offset_kernel(pmd, 0);
89 }
91 /*
92 * This function initializes a certain range of kernel virtual memory
93 * with new bootmem page tables, everywhere page tables are missing in
94 * the given range.
95 */
97 /*
98 * NOTE: The pagetables are allocated contiguous on the physical space
99 * so we can cache the place of the first one and move around without
100 * checking the pgd every time.
101 */
102 static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
103 {
104 pgd_t *pgd;
105 pmd_t *pmd;
106 int pgd_idx, pmd_idx;
107 unsigned long vaddr;
109 vaddr = start;
110 pgd_idx = pgd_index(vaddr);
111 pmd_idx = pmd_index(vaddr);
112 pgd = pgd_base + pgd_idx;
114 for ( ; (pgd_idx < PTRS_PER_PGD_NO_HV) && (vaddr != end); pgd++, pgd_idx++) {
115 if (pgd_none(*pgd))
116 one_md_table_init(pgd);
118 pmd = pmd_offset(pgd, vaddr);
119 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
120 if (pmd_none(*pmd))
121 one_page_table_init(pmd);
123 vaddr += PMD_SIZE;
124 }
125 pmd_idx = 0;
126 }
127 }
129 static inline int is_kernel_text(unsigned long addr)
130 {
131 if (addr >= (unsigned long)_stext && addr <= (unsigned long)__init_end)
132 return 1;
133 return 0;
134 }
136 /*
137 * This maps the physical memory to kernel virtual address space, a total
138 * of max_low_pfn pages, by creating page tables starting from address
139 * PAGE_OFFSET.
140 */
141 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
142 {
143 unsigned long pfn;
144 pgd_t *pgd;
145 pmd_t *pmd;
146 pte_t *pte;
147 int pgd_idx, pmd_idx, pte_ofs;
149 unsigned long max_ram_pfn = xen_start_info.nr_pages;
150 if (max_ram_pfn > max_low_pfn)
151 max_ram_pfn = max_low_pfn;
153 pgd_idx = pgd_index(PAGE_OFFSET);
154 pgd = pgd_base + pgd_idx;
155 pfn = 0;
156 pmd_idx = pmd_index(PAGE_OFFSET);
157 pte_ofs = pte_index(PAGE_OFFSET);
159 for (; pgd_idx < PTRS_PER_PGD_NO_HV; pgd++, pgd_idx++) {
160 pmd = one_md_table_init(pgd);
161 if (pfn >= max_low_pfn)
162 continue;
163 pmd += pmd_idx;
164 for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
165 unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
167 /* Map with big pages if possible, otherwise create normal page tables. */
168 if (cpu_has_pse) {
169 unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
171 if (is_kernel_text(address) || is_kernel_text(address2))
172 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
173 else
174 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
175 pfn += PTRS_PER_PTE;
176 } else {
177 pte = one_page_table_init(pmd);
179 pte += pte_ofs;
180 /* XEN: Only map initial RAM allocation. */
181 for (; pte_ofs < PTRS_PER_PTE && pfn < max_ram_pfn; pte++, pfn++, pte_ofs++) {
182 if (pte_present(*pte))
183 continue;
184 if (is_kernel_text(address))
185 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
186 else
187 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
188 }
189 pte_ofs = 0;
190 }
191 flush_page_update_queue();
192 }
193 pmd_idx = 0;
194 }
195 }
197 static inline int page_kills_ppro(unsigned long pagenr)
198 {
199 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
200 return 1;
201 return 0;
202 }
204 extern int is_available_memory(efi_memory_desc_t *);
206 static inline int page_is_ram(unsigned long pagenr)
207 {
208 int i;
209 unsigned long addr, end;
211 if (efi_enabled) {
212 efi_memory_desc_t *md;
214 for (i = 0; i < memmap.nr_map; i++) {
215 md = &memmap.map[i];
216 if (!is_available_memory(md))
217 continue;
218 addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
219 end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
221 if ((pagenr >= addr) && (pagenr < end))
222 return 1;
223 }
224 return 0;
225 }
227 for (i = 0; i < e820.nr_map; i++) {
229 if (e820.map[i].type != E820_RAM) /* not usable memory */
230 continue;
231 /*
232 * !!!FIXME!!! Some BIOSen report areas as RAM that
233 * are not. Notably the 640->1Mb area. We need a sanity
234 * check here.
235 */
236 addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
237 end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
238 if ((pagenr >= addr) && (pagenr < end))
239 return 1;
240 }
241 return 0;
242 }
244 #ifdef CONFIG_HIGHMEM
245 pte_t *kmap_pte;
246 pgprot_t kmap_prot;
248 EXPORT_SYMBOL(kmap_prot);
249 EXPORT_SYMBOL(kmap_pte);
251 #define kmap_get_fixmap_pte(vaddr) \
252 pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
254 void __init kmap_init(void)
255 {
256 unsigned long kmap_vstart;
258 /* cache the first kmap pte */
259 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
260 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
262 kmap_prot = PAGE_KERNEL;
263 }
265 void __init permanent_kmaps_init(pgd_t *pgd_base)
266 {
267 pgd_t *pgd;
268 pmd_t *pmd;
269 pte_t *pte;
270 unsigned long vaddr;
272 vaddr = PKMAP_BASE;
273 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
275 pgd = swapper_pg_dir + pgd_index(vaddr);
276 pmd = pmd_offset(pgd, vaddr);
277 pte = pte_offset_kernel(pmd, vaddr);
278 pkmap_page_table = pte;
279 }
281 void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
282 {
283 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
284 ClearPageReserved(page);
285 set_bit(PG_highmem, &page->flags);
286 set_page_count(page, 1);
287 if (pfn < xen_start_info.nr_pages)
288 __free_page(page);
289 totalhigh_pages++;
290 } else
291 SetPageReserved(page);
292 }
294 #ifndef CONFIG_DISCONTIGMEM
295 void __init set_highmem_pages_init(int bad_ppro)
296 {
297 int pfn;
298 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
299 one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
300 totalram_pages += totalhigh_pages;
301 }
302 #else
303 extern void set_highmem_pages_init(int);
304 #endif /* !CONFIG_DISCONTIGMEM */
306 #else
307 #define kmap_init() do { } while (0)
308 #define permanent_kmaps_init(pgd_base) do { } while (0)
309 #define set_highmem_pages_init(bad_ppro) do { } while (0)
310 #endif /* CONFIG_HIGHMEM */
312 unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
313 unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
315 #ifndef CONFIG_DISCONTIGMEM
316 #define remap_numa_kva() do {} while (0)
317 #else
318 extern void __init remap_numa_kva(void);
319 #endif
321 static void __init pagetable_init (void)
322 {
323 unsigned long vaddr;
324 pgd_t *old_pgd = (pgd_t *)xen_start_info.pt_base;
325 pgd_t *new_pgd = swapper_pg_dir;
327 #ifdef CONFIG_X86_PAE
328 int i;
329 /* Init entries of the first-level page table to the zero page */
330 for (i = 0; i < PTRS_PER_PGD; i++)
331 set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
332 #endif
334 /* Enable PSE if available */
335 if (cpu_has_pse) {
336 set_in_cr4(X86_CR4_PSE);
337 }
339 /* Enable PGE if available */
340 if (cpu_has_pge) {
341 set_in_cr4(X86_CR4_PGE);
342 __PAGE_KERNEL |= _PAGE_GLOBAL;
343 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
344 }
346 /*
347 * Switch to proper mm_init page directory. Initialise from the current
348 * page directory, write-protect the new page directory, then switch to
349 * it. We clean up by write-enabling and then freeing the old page dir.
350 */
351 memcpy(new_pgd, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t));
352 make_page_readonly(new_pgd);
353 queue_pgd_pin(__pa(new_pgd));
354 load_cr3(new_pgd);
355 queue_pgd_unpin(__pa(old_pgd));
356 __flush_tlb_all(); /* implicit flush */
357 make_page_writable(old_pgd);
358 flush_page_update_queue();
359 free_bootmem(__pa(old_pgd), PAGE_SIZE);
361 kernel_physical_mapping_init(new_pgd);
362 remap_numa_kva();
364 /*
365 * Fixed mappings, only the page table structure has to be
366 * created - mappings will be set by set_fixmap():
367 */
368 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
369 page_table_range_init(vaddr, 0, new_pgd);
371 permanent_kmaps_init(new_pgd);
373 #ifdef CONFIG_X86_PAE
374 /*
375 * Add low memory identity-mappings - SMP needs it when
376 * starting up on an AP from real-mode. In the non-PAE
377 * case we already have these mappings through head.S.
378 * All user-space mappings are explicitly cleared after
379 * SMP startup.
380 */
381 new_pgd[0] = new_pgd[USER_PTRS_PER_PGD];
382 #endif
383 }
385 #if defined(CONFIG_PM_DISK) || defined(CONFIG_SOFTWARE_SUSPEND)
386 /*
387 * Swap suspend & friends need this for resume because things like the intel-agp
388 * driver might have split up a kernel 4MB mapping.
389 */
390 char __nosavedata swsusp_pg_dir[PAGE_SIZE]
391 __attribute__ ((aligned (PAGE_SIZE)));
393 static inline void save_pg_dir(void)
394 {
395 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
396 }
397 #else
398 static inline void save_pg_dir(void)
399 {
400 }
401 #endif
403 void zap_low_mappings (void)
404 {
405 int i;
407 save_pg_dir();
409 /*
410 * Zap initial low-memory mappings.
411 *
412 * Note that "pgd_clear()" doesn't do it for
413 * us, because pgd_clear() is a no-op on i386.
414 */
415 for (i = 0; i < USER_PTRS_PER_PGD; i++)
416 #ifdef CONFIG_X86_PAE
417 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
418 #else
419 set_pgd(swapper_pg_dir+i, __pgd(0));
420 #endif
421 flush_tlb_all();
422 }
424 #ifndef CONFIG_DISCONTIGMEM
425 void __init zone_sizes_init(void)
426 {
427 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
428 unsigned int /*max_dma,*/ high, low;
430 /*
431 * XEN: Our notion of "DMA memory" is fake when running over Xen.
432 * We simply put all RAM in the DMA zone so that those drivers which
433 * needlessly specify GFP_DMA do not get starved of RAM unnecessarily.
434 * Those drivers that *do* require lowmem are screwed anyway when
435 * running over Xen!
436 */
437 /*max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;*/
438 low = max_low_pfn;
439 high = highend_pfn;
441 /*if (low < max_dma)*/
442 zones_size[ZONE_DMA] = low;
443 /*else*/ {
444 /*zones_size[ZONE_DMA] = max_dma;*/
445 /*zones_size[ZONE_NORMAL] = low - max_dma;*/
446 #ifdef CONFIG_HIGHMEM
447 zones_size[ZONE_HIGHMEM] = high - low;
448 #endif
449 }
450 free_area_init(zones_size);
451 }
452 #else
453 extern void zone_sizes_init(void);
454 #endif /* !CONFIG_DISCONTIGMEM */
456 static int disable_nx __initdata = 0;
457 u64 __supported_pte_mask = ~_PAGE_NX;
459 /*
460 * noexec = on|off
461 *
462 * Control non executable mappings.
463 *
464 * on Enable
465 * off Disable
466 */
467 static int __init noexec_setup(char *str)
468 {
469 if (!strncmp(str, "on",2) && cpu_has_nx) {
470 __supported_pte_mask |= _PAGE_NX;
471 disable_nx = 0;
472 } else if (!strncmp(str,"off",3)) {
473 disable_nx = 1;
474 __supported_pte_mask &= ~_PAGE_NX;
475 }
476 return 1;
477 }
479 __setup("noexec=", noexec_setup);
481 int nx_enabled = 0;
482 #ifdef CONFIG_X86_PAE
484 static void __init set_nx(void)
485 {
486 unsigned int v[4], l, h;
488 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
489 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
490 if ((v[3] & (1 << 20)) && !disable_nx) {
491 rdmsr(MSR_EFER, l, h);
492 l |= EFER_NX;
493 wrmsr(MSR_EFER, l, h);
494 nx_enabled = 1;
495 __supported_pte_mask |= _PAGE_NX;
496 }
497 }
498 }
500 /*
501 * Enables/disables executability of a given kernel page and
502 * returns the previous setting.
503 */
504 int __init set_kernel_exec(unsigned long vaddr, int enable)
505 {
506 pte_t *pte;
507 int ret = 1;
509 if (!nx_enabled)
510 goto out;
512 pte = lookup_address(vaddr);
513 BUG_ON(!pte);
515 if (!pte_exec_kernel(*pte))
516 ret = 0;
518 if (enable)
519 pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
520 else
521 pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
522 __flush_tlb_all();
523 out:
524 return ret;
525 }
527 #endif
529 /*
530 * paging_init() sets up the page tables - note that the first 8MB are
531 * already mapped by head.S.
532 *
533 * This routines also unmaps the page at virtual kernel address 0, so
534 * that we can trap those pesky NULL-reference errors in the kernel.
535 */
536 void __init paging_init(void)
537 {
538 #ifdef CONFIG_XEN_PHYSDEV_ACCESS
539 int i;
540 #endif
542 #ifdef CONFIG_X86_PAE
543 set_nx();
544 if (nx_enabled)
545 printk("NX (Execute Disable) protection: active\n");
546 #endif
548 pagetable_init();
550 #ifdef CONFIG_X86_PAE
551 /*
552 * We will bail out later - printk doesn't work right now so
553 * the user would just see a hanging kernel.
554 */
555 if (cpu_has_pae)
556 set_in_cr4(X86_CR4_PAE);
557 #endif
558 __flush_tlb_all();
560 kmap_init();
561 zone_sizes_init();
563 /* Switch to the real shared_info page, and clear the dummy page. */
564 flush_page_update_queue();
565 set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
566 HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
567 memset(empty_zero_page, 0, sizeof(empty_zero_page));
569 #ifdef CONFIG_XEN_PHYSDEV_ACCESS
570 /* Setup mapping of lower 1st MB */
571 for (i = 0; i < NR_FIX_ISAMAPS; i++)
572 if (xen_start_info.flags & SIF_PRIVILEGED)
573 set_fixmap_ma(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
574 else
575 set_fixmap_ma_ro(FIX_ISAMAP_BEGIN - i,
576 virt_to_machine(empty_zero_page));
577 #endif
578 }
580 /*
581 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
582 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
583 * used to involve black magic jumps to work around some nasty CPU bugs,
584 * but fortunately the switch to using exceptions got rid of all that.
585 */
587 void __init test_wp_bit(void)
588 {
589 printk("Checking if this processor honours the WP bit even in supervisor mode... ");
591 /* Any page-aligned address will do, the test is non-destructive */
592 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
593 boot_cpu_data.wp_works_ok = do_test_wp_bit();
594 clear_fixmap(FIX_WP_TEST);
596 if (!boot_cpu_data.wp_works_ok) {
597 printk("No.\n");
598 #ifdef CONFIG_X86_WP_WORKS_OK
599 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
600 #endif
601 } else {
602 printk("Ok.\n");
603 }
604 }
606 #ifndef CONFIG_DISCONTIGMEM
607 static void __init set_max_mapnr_init(void)
608 {
609 #ifdef CONFIG_HIGHMEM
610 highmem_start_page = pfn_to_page(highstart_pfn);
611 max_mapnr = num_physpages = highend_pfn;
612 #else
613 max_mapnr = num_physpages = max_low_pfn;
614 #endif
615 }
616 #define __free_all_bootmem() free_all_bootmem()
617 #else
618 #define __free_all_bootmem() free_all_bootmem_node(NODE_DATA(0))
619 extern void set_max_mapnr_init(void);
620 #endif /* !CONFIG_DISCONTIGMEM */
622 static struct kcore_list kcore_mem, kcore_vmalloc;
624 void __init mem_init(void)
625 {
626 extern int ppro_with_ram_bug(void);
627 int codesize, reservedpages, datasize, initsize;
628 int tmp;
629 int bad_ppro;
631 #ifndef CONFIG_DISCONTIGMEM
632 if (!mem_map)
633 BUG();
634 #endif
636 bad_ppro = ppro_with_ram_bug();
638 #ifdef CONFIG_HIGHMEM
639 /* check that fixmap and pkmap do not overlap */
640 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
641 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
642 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
643 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
644 BUG();
645 }
646 #endif
648 set_max_mapnr_init();
650 #ifdef CONFIG_HIGHMEM
651 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE);
652 #else
653 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
654 #endif
656 /* this will put all low memory onto the freelists */
657 totalram_pages += __free_all_bootmem();
659 reservedpages = 0;
660 for (tmp = 0; tmp < max_low_pfn; tmp++)
661 /*
662 * Only count reserved RAM pages
663 */
664 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
665 reservedpages++;
667 set_highmem_pages_init(bad_ppro);
669 codesize = (unsigned long) &_etext - (unsigned long) &_text;
670 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
671 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
673 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
674 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
675 VMALLOC_END-VMALLOC_START);
677 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
678 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
679 num_physpages << (PAGE_SHIFT-10),
680 codesize >> 10,
681 reservedpages << (PAGE_SHIFT-10),
682 datasize >> 10,
683 initsize >> 10,
684 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
685 );
687 #ifdef CONFIG_X86_PAE
688 if (!cpu_has_pae)
689 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
690 #endif
691 if (boot_cpu_data.wp_works_ok < 0)
692 test_wp_bit();
694 /*
695 * Subtle. SMP is doing it's boot stuff late (because it has to
696 * fork idle threads) - but it also needs low mappings for the
697 * protected-mode entry to work. We zap these entries only after
698 * the WP-bit has been tested.
699 */
700 #ifndef CONFIG_SMP
701 zap_low_mappings();
702 #endif
703 }
705 kmem_cache_t *pgd_cache;
706 kmem_cache_t *pmd_cache;
707 kmem_cache_t *pte_cache;
709 void __init pgtable_cache_init(void)
710 {
711 pte_cache = kmem_cache_create("pte",
712 PTRS_PER_PTE*sizeof(pte_t),
713 PTRS_PER_PTE*sizeof(pte_t),
714 0,
715 pte_ctor,
716 pte_dtor);
717 if (!pte_cache)
718 panic("pgtable_cache_init(): Cannot create pte cache");
719 if (PTRS_PER_PMD > 1) {
720 pmd_cache = kmem_cache_create("pmd",
721 PTRS_PER_PMD*sizeof(pmd_t),
722 PTRS_PER_PMD*sizeof(pmd_t),
723 0,
724 pmd_ctor,
725 NULL);
726 if (!pmd_cache)
727 panic("pgtable_cache_init(): cannot create pmd cache");
728 }
729 pgd_cache = kmem_cache_create("pgd",
730 PTRS_PER_PGD*sizeof(pgd_t),
731 PTRS_PER_PGD*sizeof(pgd_t),
732 0,
733 pgd_ctor,
734 pgd_dtor);
735 if (!pgd_cache)
736 panic("pgtable_cache_init(): Cannot create pgd cache");
737 }
739 /*
740 * This function cannot be __init, since exceptions don't work in that
741 * section. Put this after the callers, so that it cannot be inlined.
742 */
743 static int noinline do_test_wp_bit(void)
744 {
745 char tmp_reg;
746 int flag;
748 __asm__ __volatile__(
749 " movb %0,%1 \n"
750 "1: movb %1,%0 \n"
751 " xorl %2,%2 \n"
752 "2: \n"
753 ".section __ex_table,\"a\"\n"
754 " .align 4 \n"
755 " .long 1b,2b \n"
756 ".previous \n"
757 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
758 "=q" (tmp_reg),
759 "=r" (flag)
760 :"2" (1)
761 :"memory");
763 return flag;
764 }
766 void free_initmem(void)
767 {
768 unsigned long addr;
770 addr = (unsigned long)(&__init_begin);
771 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
772 ClearPageReserved(virt_to_page(addr));
773 set_page_count(virt_to_page(addr), 1);
774 memset((void *)addr, 0xcc, PAGE_SIZE);
775 free_page(addr);
776 totalram_pages++;
777 }
778 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
779 }
781 #ifdef CONFIG_BLK_DEV_INITRD
782 void free_initrd_mem(unsigned long start, unsigned long end)
783 {
784 if (start < end)
785 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
786 for (; start < end; start += PAGE_SIZE) {
787 ClearPageReserved(virt_to_page(start));
788 set_page_count(virt_to_page(start), 1);
789 free_page(start);
790 totalram_pages++;
791 }
792 }
793 #endif