debuggers.hg

view linux-2.4.28-xen-sparse/arch/xen/mm/init.c @ 3289:a169836882cb

bitkeeper revision 1.1159.170.59 (41b4c2fdJ2gj_BWy27Vj3ptayZp_yg)

sync w/ head.
author cl349@arcadians.cl.cam.ac.uk
date Mon Dec 06 20:37:17 2004 +0000 (2004-12-06)
parents f65b65977b19 6f0846972a4c
children fd0d4d8e6193 0cbf74c61595
line source
1 /*
2 * linux/arch/i386/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 */
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #ifdef CONFIG_BLK_DEV_INITRD
23 #include <linux/blk.h>
24 #endif
25 #include <linux/highmem.h>
26 #include <linux/pagemap.h>
27 #include <linux/bootmem.h>
28 #include <linux/slab.h>
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
35 #include <asm/dma.h>
36 #include <asm/apic.h>
37 #include <asm/tlb.h>
39 mmu_gather_t mmu_gathers[NR_CPUS];
40 unsigned long highstart_pfn, highend_pfn;
41 static unsigned long totalram_pages;
42 static unsigned long totalhigh_pages;
44 int do_check_pgt_cache(int low, int high)
45 {
46 int freed = 0;
47 if(pgtable_cache_size > high) {
48 do {
49 if (!QUICKLIST_EMPTY(pgd_quicklist)) {
50 free_pgd_slow(get_pgd_fast());
51 freed++;
52 }
53 if (!QUICKLIST_EMPTY(pte_quicklist)) {
54 pte_free_slow(pte_alloc_one_fast(NULL, 0));
55 freed++;
56 }
57 } while(pgtable_cache_size > low);
58 }
59 return freed;
60 }
62 /*
63 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
64 * physical space so we can cache the place of the first one and move
65 * around without checking the pgd every time.
66 */
68 #if CONFIG_HIGHMEM
69 pte_t *kmap_pte;
70 pgprot_t kmap_prot;
72 #define kmap_get_fixmap_pte(vaddr) \
73 pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
75 void __init kmap_init(void)
76 {
77 unsigned long kmap_vstart;
79 /* cache the first kmap pte */
80 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
81 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
83 kmap_prot = PAGE_KERNEL;
84 }
85 #endif /* CONFIG_HIGHMEM */
87 void show_mem(void)
88 {
89 int i, total = 0, reserved = 0;
90 int shared = 0, cached = 0;
91 int highmem = 0;
93 printk("Mem-info:\n");
94 show_free_areas();
95 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
96 i = max_mapnr;
97 while (i-- > 0) {
98 total++;
99 if (PageHighMem(mem_map+i))
100 highmem++;
101 if (PageReserved(mem_map+i))
102 reserved++;
103 else if (PageSwapCache(mem_map+i))
104 cached++;
105 else if (page_count(mem_map+i))
106 shared += page_count(mem_map+i) - 1;
107 }
108 printk("%d pages of RAM\n", total);
109 printk("%d pages of HIGHMEM\n",highmem);
110 printk("%d reserved pages\n",reserved);
111 printk("%d pages shared\n",shared);
112 printk("%d pages swap cached\n",cached);
113 printk("%ld pages in page table cache\n",pgtable_cache_size);
114 show_buffers();
115 }
117 /* References to section boundaries */
119 extern char _text, _etext, _edata, __bss_start, _end;
120 extern char __init_begin, __init_end;
122 static inline void set_pte_phys (unsigned long vaddr,
123 unsigned long phys, pgprot_t prot)
124 {
125 pgd_t *pgd;
126 pmd_t *pmd;
127 pte_t *pte;
129 pgd = init_mm.pgd + __pgd_offset(vaddr);
130 if (pgd_none(*pgd)) {
131 printk("PAE BUG #00!\n");
132 return;
133 }
134 pmd = pmd_offset(pgd, vaddr);
135 if (pmd_none(*pmd)) {
136 printk("PAE BUG #01!\n");
137 return;
138 }
139 pte = pte_offset(pmd, vaddr);
141 queue_l1_entry_update(pte, phys | pgprot_val(prot));
143 /*
144 * It's enough to flush this one mapping.
145 * (PGE mappings get flushed as well)
146 */
147 __flush_tlb_one(vaddr);
148 }
150 void __set_fixmap(enum fixed_addresses idx, unsigned long phys,
151 pgprot_t flags)
152 {
153 unsigned long address = __fix_to_virt(idx);
155 if (idx >= __end_of_fixed_addresses) {
156 printk("Invalid __set_fixmap\n");
157 return;
158 }
159 set_pte_phys(address, phys, flags);
160 }
162 void clear_fixmap(enum fixed_addresses idx)
163 {
164 set_pte_phys(__fix_to_virt(idx), 0, __pgprot(0));
165 }
167 static void __init fixrange_init (unsigned long start,
168 unsigned long end, pgd_t *pgd_base)
169 {
170 pgd_t *pgd, *kpgd;
171 pmd_t *pmd, *kpmd;
172 pte_t *pte, *kpte;
173 int i, j;
174 unsigned long vaddr;
176 vaddr = start;
177 i = __pgd_offset(vaddr);
178 j = __pmd_offset(vaddr);
179 pgd = pgd_base + i;
181 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
182 #if CONFIG_X86_PAE
183 if (pgd_none(*pgd)) {
184 pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
185 set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
186 if (pmd != pmd_offset(pgd, 0))
187 printk("PAE BUG #02!\n");
188 }
189 pmd = pmd_offset(pgd, vaddr);
190 #else
191 pmd = (pmd_t *)pgd;
192 #endif
193 for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
194 if (pmd_none(*pmd)) {
195 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
196 clear_page(pte);
197 kpgd = pgd_offset_k((unsigned long)pte);
198 kpmd = pmd_offset(kpgd, (unsigned long)pte);
199 kpte = pte_offset(kpmd, (unsigned long)pte);
200 queue_l1_entry_update(kpte,
201 (*(unsigned long *)kpte)&~_PAGE_RW);
203 set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
204 }
205 vaddr += PMD_SIZE;
206 }
207 j = 0;
208 }
210 XEN_flush_page_update_queue();
211 }
214 static void __init pagetable_init (void)
215 {
216 unsigned long vaddr, end, ram_end;
217 pgd_t *kpgd, *pgd, *pgd_base;
218 int i, j, k;
219 pmd_t *kpmd, *pmd;
220 pte_t *kpte, *pte, *pte_base;
222 end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
223 ram_end = (unsigned long)__va(xen_start_info.nr_pages * PAGE_SIZE);
224 if ( ram_end > end )
225 ram_end = end;
227 pgd_base = init_mm.pgd;
228 i = __pgd_offset(PAGE_OFFSET);
229 pgd = pgd_base + i;
231 for (; i < PTRS_PER_PGD; pgd++, i++) {
232 vaddr = i*PGDIR_SIZE;
233 if (vaddr >= end)
234 break;
235 pmd = (pmd_t *)pgd;
236 for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
237 vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
238 if (vaddr >= end)
239 break;
241 /* Filled in for us already? */
242 if ( pmd_val(*pmd) & _PAGE_PRESENT )
243 continue;
245 pte_base = pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
246 clear_page(pte_base);
248 for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
249 vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
250 if (vaddr >= ram_end)
251 break;
252 *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
253 }
254 kpgd = pgd_offset_k((unsigned long)pte_base);
255 kpmd = pmd_offset(kpgd, (unsigned long)pte_base);
256 kpte = pte_offset(kpmd, (unsigned long)pte_base);
257 queue_l1_entry_update(kpte,
258 (*(unsigned long *)kpte)&~_PAGE_RW);
259 set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
260 XEN_flush_page_update_queue();
261 }
262 }
264 /*
265 * Fixed mappings, only the page table structure has to be
266 * created - mappings will be set by set_fixmap():
267 */
268 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
269 fixrange_init(vaddr, HYPERVISOR_VIRT_START, init_mm.pgd);
271 #if CONFIG_HIGHMEM
272 /*
273 * Permanent kmaps:
274 */
275 vaddr = PKMAP_BASE;
276 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, init_mm.pgd);
278 pgd = init_mm.pgd + __pgd_offset(vaddr);
279 pmd = pmd_offset(pgd, vaddr);
280 pte = pte_offset(pmd, vaddr);
281 pkmap_page_table = pte;
282 #endif
283 }
285 static void __init zone_sizes_init(void)
286 {
287 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
288 unsigned int max_dma, high, low;
290 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
291 low = max_low_pfn;
292 high = highend_pfn;
294 if (low < max_dma)
295 zones_size[ZONE_DMA] = low;
296 else {
297 zones_size[ZONE_DMA] = max_dma;
298 zones_size[ZONE_NORMAL] = low - max_dma;
299 #ifdef CONFIG_HIGHMEM
300 zones_size[ZONE_HIGHMEM] = high - low;
301 #endif
302 }
303 free_area_init(zones_size);
304 }
306 void __init paging_init(void)
307 {
308 pagetable_init();
310 zone_sizes_init();
311 /* Switch to the real shared_info page, and clear the dummy page. */
312 set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
313 HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
314 memset(empty_zero_page, 0, sizeof(empty_zero_page));
316 #ifdef CONFIG_HIGHMEM
317 kmap_init();
318 #endif
319 }
321 static inline int page_is_ram (unsigned long pagenr)
322 {
323 return 1;
324 }
326 #ifdef CONFIG_HIGHMEM
327 void __init one_highpage_init(struct page *page, int free_page)
328 {
329 ClearPageReserved(page);
330 set_bit(PG_highmem, &page->flags);
331 atomic_set(&page->count, 1);
332 if ( free_page )
333 __free_page(page);
334 totalhigh_pages++;
335 }
336 #endif /* CONFIG_HIGHMEM */
338 static void __init set_max_mapnr_init(void)
339 {
340 #ifdef CONFIG_HIGHMEM
341 highmem_start_page = mem_map + highstart_pfn;
342 max_mapnr = num_physpages = highend_pfn;
343 num_mappedpages = max_low_pfn;
344 #else
345 max_mapnr = num_mappedpages = num_physpages = max_low_pfn;
346 #endif
347 }
349 static int __init free_pages_init(void)
350 {
351 #ifdef CONFIG_HIGHMEM
352 int bad_ppro = 0;
353 #endif
354 int reservedpages, pfn;
356 /* add only boot_pfn pages of low memory to free list.
357 * max_low_pfn may be sized for
358 * pages yet to be allocated from the hypervisor, or it may be set
359 * to override the xen_start_info amount of memory
360 */
361 int boot_pfn = min(xen_start_info.nr_pages,max_low_pfn);
363 /* this will put all low memory onto the freelists */
364 totalram_pages += free_all_bootmem();
366 reservedpages = 0;
367 for (pfn = 0; pfn < boot_pfn ; pfn++) {
368 /*
369 * Only count reserved RAM pages
370 */
371 if (page_is_ram(pfn) && PageReserved(mem_map+pfn))
372 reservedpages++;
373 }
374 #ifdef CONFIG_HIGHMEM
375 for (pfn = highend_pfn-1; pfn >= highstart_pfn; pfn--)
376 one_highpage_init((struct page *) (mem_map + pfn), pfn,
377 (pfn < xen_start_info.nr_pages));
378 totalram_pages += totalhigh_pages;
379 #endif
380 return reservedpages;
381 }
383 void __init mem_init(void)
384 {
385 int codesize, reservedpages, datasize, initsize;
387 if (!mem_map)
388 BUG();
390 #ifdef CONFIG_HIGHMEM
391 /* check that fixmap and pkmap do not overlap */
392 if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
393 printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
394 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
395 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
396 BUG();
397 }
398 #endif
400 set_max_mapnr_init();
402 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
404 /* clear the zero-page */
405 memset(empty_zero_page, 0, PAGE_SIZE);
407 reservedpages = free_pages_init();
409 codesize = (unsigned long) &_etext - (unsigned long) &_text;
410 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
411 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
413 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
414 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
415 max_mapnr << (PAGE_SHIFT-10),
416 codesize >> 10,
417 reservedpages << (PAGE_SHIFT-10),
418 datasize >> 10,
419 initsize >> 10,
420 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
421 );
423 boot_cpu_data.wp_works_ok = 1;
424 }
426 void free_initmem(void)
427 {
428 unsigned long addr;
430 addr = (unsigned long)(&__init_begin);
431 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
432 ClearPageReserved(virt_to_page(addr));
433 set_page_count(virt_to_page(addr), 1);
434 free_page(addr);
435 totalram_pages++;
436 }
437 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
438 }
440 #ifdef CONFIG_BLK_DEV_INITRD
441 void free_initrd_mem(unsigned long start, unsigned long end)
442 {
443 if (start < end)
444 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
445 for (; start < end; start += PAGE_SIZE) {
446 ClearPageReserved(virt_to_page(start));
447 set_page_count(virt_to_page(start), 1);
448 free_page(start);
449 totalram_pages++;
450 }
451 }
452 #endif
454 void si_meminfo(struct sysinfo *val)
455 {
456 val->totalram = max_pfn;
457 val->sharedram = 0;
458 val->freeram = nr_free_pages();
459 val->bufferram = atomic_read(&buffermem_pages);
460 val->totalhigh = max_pfn-max_low_pfn;
461 val->freehigh = nr_free_highpages();
462 val->mem_unit = PAGE_SIZE;
463 return;
464 }
466 #if defined(CONFIG_X86_PAE)
467 struct kmem_cache_s *pae_pgd_cachep;
468 void __init pgtable_cache_init(void)
469 {
470 /*
471 * PAE pgds must be 16-byte aligned:
472 */
473 pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0,
474 SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, NULL, NULL);
475 if (!pae_pgd_cachep)
476 panic("init_pae(): Cannot alloc pae_pgd SLAB cache");
477 }
478 #endif /* CONFIG_X86_PAE */