debuggers.hg

view extras/mini-os/arch/x86/mm.c @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children 5c0bf00e371d
line source
1 /*
2 ****************************************************************************
3 * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
4 * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
5 ****************************************************************************
6 *
7 * File: mm.c
8 * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
9 * Changes: Grzegorz Milos
10 *
11 * Date: Aug 2003, chages Aug 2005
12 *
13 * Environment: Xen Minimal OS
14 * Description: memory management related functions
15 * contains buddy page allocator from Xen.
16 *
17 ****************************************************************************
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this software and associated documentation files (the "Software"), to
20 * deal in the Software without restriction, including without limitation the
21 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
22 * sell copies of the Software, and to permit persons to whom the Software is
23 * furnished to do so, subject to the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
34 * DEALINGS IN THE SOFTWARE.
35 */
37 #include <os.h>
38 #include <hypervisor.h>
39 #include <mm.h>
40 #include <types.h>
41 #include <lib.h>
42 #include <xmalloc.h>
43 #include <xen/memory.h>
45 #ifdef MM_DEBUG
46 #define DEBUG(_f, _a...) \
47 printk("MINI_OS(file=mm.c, line=%d) " _f "\n", __LINE__, ## _a)
48 #else
49 #define DEBUG(_f, _a...) ((void)0)
50 #endif
52 unsigned long *phys_to_machine_mapping;
53 unsigned long mfn_zero;
54 extern char stack[];
55 extern void page_walk(unsigned long virt_addr);
57 void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn,
58 unsigned long offset, unsigned long level)
59 {
60 pgentry_t *tab = (pgentry_t *)start_info.pt_base;
61 unsigned long pt_page = (unsigned long)pfn_to_virt(*pt_pfn);
62 unsigned long prot_e, prot_t, pincmd;
63 mmu_update_t mmu_updates[1];
64 struct mmuext_op pin_request;
66 prot_e = prot_t = pincmd = 0;
67 DEBUG("Allocating new L%d pt frame for pt_pfn=%lx, "
68 "prev_l_mfn=%lx, offset=%lx",
69 level, *pt_pfn, prev_l_mfn, offset);
71 /* We need to clear the page, otherwise we might fail to map it
72 as a page table page */
73 memset((unsigned long*)pfn_to_virt(*pt_pfn), 0, PAGE_SIZE);
75 switch ( level )
76 {
77 case L1_FRAME:
78 prot_e = L1_PROT;
79 prot_t = L2_PROT;
80 pincmd = MMUEXT_PIN_L1_TABLE;
81 break;
82 case L2_FRAME:
83 prot_e = L2_PROT;
84 prot_t = L3_PROT;
85 pincmd = MMUEXT_PIN_L2_TABLE;
86 break;
87 #if defined(__x86_64__)
88 case L3_FRAME:
89 prot_e = L3_PROT;
90 prot_t = L4_PROT;
91 pincmd = MMUEXT_PIN_L3_TABLE;
92 break;
93 #endif
94 default:
95 printk("new_pt_frame() called with invalid level number %d\n", level);
96 do_exit();
97 break;
98 }
100 /* Update the entry */
101 #if defined(__x86_64__)
102 tab = pte_to_virt(tab[l4_table_offset(pt_page)]);
103 #endif
104 tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
106 mmu_updates[0].ptr = ((pgentry_t)tab[l2_table_offset(pt_page)] & PAGE_MASK) +
107 sizeof(pgentry_t) * l1_table_offset(pt_page);
108 mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT |
109 (prot_e & ~_PAGE_RW);
110 if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
111 {
112 printk("PTE for new page table page could not be updated\n");
113 do_exit();
114 }
116 /* Pin the page to provide correct protection */
117 pin_request.cmd = pincmd;
118 pin_request.arg1.mfn = pfn_to_mfn(*pt_pfn);
119 if(HYPERVISOR_mmuext_op(&pin_request, 1, NULL, DOMID_SELF) < 0)
120 {
121 printk("ERROR: pinning failed\n");
122 do_exit();
123 }
125 /* Now fill the new page table page with entries.
126 Update the page directory as well. */
127 mmu_updates[0].ptr = ((pgentry_t)prev_l_mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
128 mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | prot_t;
129 if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
130 {
131 printk("ERROR: mmu_update failed\n");
132 do_exit();
133 }
135 *pt_pfn += 1;
136 }
138 /* Checks if a pagetable frame is needed (if weren't allocated by Xen) */
139 static int need_pt_frame(unsigned long virt_address, int level)
140 {
141 unsigned long hyp_virt_start = HYPERVISOR_VIRT_START;
142 #if defined(__x86_64__)
143 unsigned long hyp_virt_end = HYPERVISOR_VIRT_END;
144 #else
145 unsigned long hyp_virt_end = 0xffffffff;
146 #endif
148 /* In general frames will _not_ be needed if they were already
149 allocated to map the hypervisor into our VA space */
150 #if defined(__x86_64__)
151 if(level == L3_FRAME)
152 {
153 if(l4_table_offset(virt_address) >=
154 l4_table_offset(hyp_virt_start) &&
155 l4_table_offset(virt_address) <=
156 l4_table_offset(hyp_virt_end))
157 return 0;
158 return 1;
159 } else
160 #endif
162 if(level == L2_FRAME)
163 {
164 #if defined(__x86_64__)
165 if(l4_table_offset(virt_address) >=
166 l4_table_offset(hyp_virt_start) &&
167 l4_table_offset(virt_address) <=
168 l4_table_offset(hyp_virt_end))
169 #endif
170 if(l3_table_offset(virt_address) >=
171 l3_table_offset(hyp_virt_start) &&
172 l3_table_offset(virt_address) <=
173 l3_table_offset(hyp_virt_end))
174 return 0;
176 return 1;
177 } else
179 /* Always need l1 frames */
180 if(level == L1_FRAME)
181 return 1;
183 printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n",
184 level, hyp_virt_start, hyp_virt_end);
185 return -1;
186 }
188 void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
189 {
190 unsigned long start_address, end_address;
191 unsigned long pfn_to_map, pt_pfn = *start_pfn;
192 static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
193 pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
194 unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
195 unsigned long offset;
196 int count = 0;
198 pfn_to_map = (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES;
200 if (*max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START))
201 {
202 printk("WARNING: Mini-OS trying to use Xen virtual space. "
203 "Truncating memory from %dMB to ",
204 ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
205 *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE);
206 printk("%dMB\n",
207 ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
208 }
210 start_address = (unsigned long)pfn_to_virt(pfn_to_map);
211 end_address = (unsigned long)pfn_to_virt(*max_pfn);
213 /* We worked out the virtual memory range to map, now mapping loop */
214 printk("Mapping memory range 0x%lx - 0x%lx\n", start_address, end_address);
216 while(start_address < end_address)
217 {
218 tab = (pgentry_t *)start_info.pt_base;
219 mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
221 #if defined(__x86_64__)
222 offset = l4_table_offset(start_address);
223 /* Need new L3 pt frame */
224 if(!(start_address & L3_MASK))
225 if(need_pt_frame(start_address, L3_FRAME))
226 new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
228 page = tab[offset];
229 mfn = pte_to_mfn(page);
230 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
231 #endif
232 offset = l3_table_offset(start_address);
233 /* Need new L2 pt frame */
234 if(!(start_address & L2_MASK))
235 if(need_pt_frame(start_address, L2_FRAME))
236 new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
238 page = tab[offset];
239 mfn = pte_to_mfn(page);
240 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
241 offset = l2_table_offset(start_address);
242 /* Need new L1 pt frame */
243 if(!(start_address & L1_MASK))
244 if(need_pt_frame(start_address, L1_FRAME))
245 new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
247 page = tab[offset];
248 mfn = pte_to_mfn(page);
249 offset = l1_table_offset(start_address);
251 mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
252 mmu_updates[count].val = (pgentry_t)pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT;
253 count++;
254 if (count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn)
255 {
256 if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
257 {
258 printk("PTE could not be updated\n");
259 do_exit();
260 }
261 count = 0;
262 }
263 start_address += PAGE_SIZE;
264 }
266 *start_pfn = pt_pfn;
267 }
269 extern void shared_info;
270 static void set_readonly(void *text, void *etext)
271 {
272 unsigned long start_address = ((unsigned long) text + PAGE_SIZE - 1) & PAGE_MASK;
273 unsigned long end_address = (unsigned long) etext;
274 static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
275 pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
276 unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
277 unsigned long offset;
278 int count = 0;
280 printk("setting %p-%p readonly\n", text, etext);
282 while (start_address + PAGE_SIZE <= end_address) {
283 tab = (pgentry_t *)start_info.pt_base;
284 mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
286 #if defined(__x86_64__)
287 offset = l4_table_offset(start_address);
288 page = tab[offset];
289 mfn = pte_to_mfn(page);
290 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
291 #endif
292 offset = l3_table_offset(start_address);
293 page = tab[offset];
294 mfn = pte_to_mfn(page);
295 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
296 offset = l2_table_offset(start_address);
297 page = tab[offset];
298 mfn = pte_to_mfn(page);
299 tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
301 offset = l1_table_offset(start_address);
303 if (start_address != (unsigned long)&shared_info) {
304 mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
305 mmu_updates[count].val = tab[offset] & ~_PAGE_RW;
306 count++;
307 } else
308 printk("skipped %p\n", start_address);
310 start_address += PAGE_SIZE;
312 if (count == L1_PAGETABLE_ENTRIES || start_address + PAGE_SIZE > end_address)
313 {
314 if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0)
315 {
316 printk("PTE could not be updated\n");
317 do_exit();
318 }
319 count = 0;
320 }
321 }
323 {
324 mmuext_op_t op = {
325 .cmd = MMUEXT_TLB_FLUSH_ALL,
326 };
327 int count;
328 HYPERVISOR_mmuext_op(&op, 1, &count, DOMID_SELF);
329 }
330 }
332 void mem_test(unsigned long *start_add, unsigned long *end_add)
333 {
334 unsigned long mask = 0x10000;
335 unsigned long *pointer;
337 for(pointer = start_add; pointer < end_add; pointer++)
338 {
339 if(!(((unsigned long)pointer) & 0xfffff))
340 {
341 printk("Writing to %lx\n", pointer);
342 page_walk((unsigned long)pointer);
343 }
344 *pointer = (unsigned long)pointer & ~mask;
345 }
347 for(pointer = start_add; pointer < end_add; pointer++)
348 {
349 if(((unsigned long)pointer & ~mask) != *pointer)
350 printk("Read error at 0x%lx. Read: 0x%lx, should read 0x%lx\n",
351 (unsigned long)pointer,
352 *pointer,
353 ((unsigned long)pointer & ~mask));
354 }
356 }
358 static pgentry_t *get_pgt(unsigned long addr)
359 {
360 unsigned long mfn;
361 pgentry_t *tab;
362 unsigned offset;
364 tab = (pgentry_t *)start_info.pt_base;
365 mfn = virt_to_mfn(start_info.pt_base);
367 #if defined(__x86_64__)
368 offset = l4_table_offset(addr);
369 if (!(tab[offset] & _PAGE_PRESENT))
370 return NULL;
371 mfn = pte_to_mfn(tab[offset]);
372 tab = mfn_to_virt(mfn);
373 #endif
374 offset = l3_table_offset(addr);
375 if (!(tab[offset] & _PAGE_PRESENT))
376 return NULL;
377 mfn = pte_to_mfn(tab[offset]);
378 tab = mfn_to_virt(mfn);
379 offset = l2_table_offset(addr);
380 if (!(tab[offset] & _PAGE_PRESENT))
381 return NULL;
382 mfn = pte_to_mfn(tab[offset]);
383 tab = mfn_to_virt(mfn);
384 offset = l1_table_offset(addr);
385 return &tab[offset];
386 }
388 static pgentry_t *need_pgt(unsigned long addr)
389 {
390 unsigned long mfn;
391 pgentry_t *tab;
392 unsigned long pt_pfn;
393 unsigned offset;
395 tab = (pgentry_t *)start_info.pt_base;
396 mfn = virt_to_mfn(start_info.pt_base);
398 #if defined(__x86_64__)
399 offset = l4_table_offset(addr);
400 if (!(tab[offset] & _PAGE_PRESENT)) {
401 pt_pfn = virt_to_pfn(alloc_page());
402 new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
403 }
404 ASSERT(tab[offset] & _PAGE_PRESENT);
405 mfn = pte_to_mfn(tab[offset]);
406 tab = mfn_to_virt(mfn);
407 #endif
408 offset = l3_table_offset(addr);
409 if (!(tab[offset] & _PAGE_PRESENT)) {
410 pt_pfn = virt_to_pfn(alloc_page());
411 new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
412 }
413 ASSERT(tab[offset] & _PAGE_PRESENT);
414 mfn = pte_to_mfn(tab[offset]);
415 tab = mfn_to_virt(mfn);
416 offset = l2_table_offset(addr);
417 if (!(tab[offset] & _PAGE_PRESENT)) {
418 pt_pfn = virt_to_pfn(alloc_page());
419 new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
420 }
421 ASSERT(tab[offset] & _PAGE_PRESENT);
422 mfn = pte_to_mfn(tab[offset]);
423 tab = mfn_to_virt(mfn);
425 offset = l1_table_offset(addr);
426 return &tab[offset];
427 }
429 static unsigned long demand_map_area_start;
430 #ifdef __x86_64__
431 #define DEMAND_MAP_PAGES ((128ULL << 30) / PAGE_SIZE)
432 #else
433 #define DEMAND_MAP_PAGES ((2ULL << 30) / PAGE_SIZE)
434 #endif
436 #ifdef HAVE_LIBC
437 unsigned long heap, brk, heap_mapped, heap_end;
438 #ifdef __x86_64__
439 #define HEAP_PAGES ((128ULL << 30) / PAGE_SIZE)
440 #else
441 #define HEAP_PAGES ((1ULL << 30) / PAGE_SIZE)
442 #endif
443 #endif
445 void arch_init_demand_mapping_area(unsigned long cur_pfn)
446 {
447 cur_pfn++;
449 demand_map_area_start = (unsigned long) pfn_to_virt(cur_pfn);
450 cur_pfn += DEMAND_MAP_PAGES;
451 printk("Demand map pfns at %lx-%lx.\n", demand_map_area_start, pfn_to_virt(cur_pfn));
453 #ifdef HAVE_LIBC
454 cur_pfn++;
455 heap_mapped = brk = heap = (unsigned long) pfn_to_virt(cur_pfn);
456 cur_pfn += HEAP_PAGES;
457 heap_end = (unsigned long) pfn_to_virt(cur_pfn);
458 printk("Heap resides at %lx-%lx.\n", brk, heap_end);
459 #endif
460 }
462 #define MAP_BATCH ((STACK_SIZE / 2) / sizeof(mmu_update_t))
463 void do_map_frames(unsigned long addr,
464 unsigned long *f, unsigned long n, unsigned long stride,
465 unsigned long increment, domid_t id, int may_fail, unsigned long prot)
466 {
467 pgentry_t *pgt = NULL;
468 unsigned long done = 0;
469 unsigned long i;
470 int rc;
472 while (done < n) {
473 unsigned long todo;
475 if (may_fail)
476 todo = 1;
477 else
478 todo = n - done;
480 if (todo > MAP_BATCH)
481 todo = MAP_BATCH;
483 {
484 mmu_update_t mmu_updates[todo];
486 for (i = 0; i < todo; i++, addr += PAGE_SIZE, pgt++) {
487 if (!pgt || !(addr & L1_MASK))
488 pgt = need_pgt(addr);
489 mmu_updates[i].ptr = virt_to_mach(pgt);
490 mmu_updates[i].val = ((f[(done + i) * stride] + (done + i) * increment) << PAGE_SHIFT) | prot;
491 }
493 rc = HYPERVISOR_mmu_update(mmu_updates, todo, NULL, id);
494 if (rc < 0) {
495 if (may_fail)
496 f[done * stride] |= 0xF0000000;
497 else {
498 printk("Map %ld (%lx, ...) at %p failed: %d.\n", todo, f[done * stride] + done * increment, addr, rc);
499 do_exit();
500 }
501 }
502 }
504 done += todo;
505 }
506 }
508 void *map_frames_ex(unsigned long *f, unsigned long n, unsigned long stride,
509 unsigned long increment, unsigned long alignment, domid_t id,
510 int may_fail, unsigned long prot)
511 {
512 unsigned long x;
513 unsigned long y = 0;
515 /* Find a properly aligned run of n contiguous frames */
516 for (x = 0; x <= DEMAND_MAP_PAGES - n; x = (x + y + 1 + alignment - 1) & ~(alignment - 1)) {
517 unsigned long addr = demand_map_area_start + x * PAGE_SIZE;
518 pgentry_t *pgt = get_pgt(addr);
519 for (y = 0; y < n; y++, addr += PAGE_SIZE) {
520 if (!(addr & L1_MASK))
521 pgt = get_pgt(addr);
522 if (pgt) {
523 if (*pgt & _PAGE_PRESENT)
524 break;
525 pgt++;
526 }
527 }
528 if (y == n)
529 break;
530 }
531 if (y != n) {
532 printk("Failed to find %ld frames!\n", n);
533 return NULL;
534 }
536 /* Found it at x. Map it in. */
537 do_map_frames(demand_map_area_start + x * PAGE_SIZE, f, n, stride, increment, id, may_fail, prot);
539 return (void *)(unsigned long)(demand_map_area_start + x * PAGE_SIZE);
540 }
542 static void clear_bootstrap(void)
543 {
544 xen_pfn_t mfns[] = { virt_to_mfn(&shared_info) };
545 int n = sizeof(mfns)/sizeof(*mfns);
546 pte_t nullpte = { };
548 /* Use first page as the CoW zero page */
549 memset(&_text, 0, PAGE_SIZE);
550 mfn_zero = pfn_to_mfn((unsigned long) &_text);
551 if (HYPERVISOR_update_va_mapping((unsigned long) &_text, nullpte, UVMF_INVLPG))
552 printk("Unable to unmap first page\n");
554 if (free_physical_pages(mfns, n) != n)
555 printk("Unable to free bootstrap pages\n");
556 }
558 void arch_init_p2m(unsigned long max_pfn)
559 {
560 #define L1_P2M_SHIFT 9
561 #define L2_P2M_SHIFT 18
562 #define L3_P2M_SHIFT 27
563 #define L1_P2M_ENTRIES (1 << L1_P2M_SHIFT)
564 #define L2_P2M_ENTRIES (1 << (L2_P2M_SHIFT - L1_P2M_SHIFT))
565 #define L3_P2M_ENTRIES (1 << (L3_P2M_SHIFT - L2_P2M_SHIFT))
566 #define L1_P2M_MASK (L1_P2M_ENTRIES - 1)
567 #define L2_P2M_MASK (L2_P2M_ENTRIES - 1)
568 #define L3_P2M_MASK (L3_P2M_ENTRIES - 1)
570 unsigned long *l1_list, *l2_list, *l3_list;
571 unsigned long pfn;
573 l3_list = (unsigned long *)alloc_page();
574 for(pfn=0; pfn<max_pfn; pfn++)
575 {
576 if(!(pfn % (L1_P2M_ENTRIES * L2_P2M_ENTRIES)))
577 {
578 l2_list = (unsigned long*)alloc_page();
579 if((pfn >> L3_P2M_SHIFT) > 0)
580 {
581 printk("Error: Too many pfns.\n");
582 do_exit();
583 }
584 l3_list[(pfn >> L2_P2M_SHIFT)] = virt_to_mfn(l2_list);
585 }
586 if(!(pfn % (L1_P2M_ENTRIES)))
587 {
588 l1_list = (unsigned long*)alloc_page();
589 l2_list[(pfn >> L1_P2M_SHIFT) & L2_P2M_MASK] =
590 virt_to_mfn(l1_list);
591 }
593 l1_list[pfn & L1_P2M_MASK] = pfn_to_mfn(pfn);
594 }
595 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
596 virt_to_mfn(l3_list);
597 HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
598 }
600 void arch_init_mm(unsigned long* start_pfn_p, unsigned long* max_pfn_p)
601 {
603 unsigned long start_pfn, max_pfn;
605 printk(" _text: %p\n", &_text);
606 printk(" _etext: %p\n", &_etext);
607 printk(" _erodata: %p\n", &_erodata);
608 printk(" _edata: %p\n", &_edata);
609 printk(" stack start: %p\n", stack);
610 printk(" _end: %p\n", &_end);
612 /* First page follows page table pages and 3 more pages (store page etc) */
613 start_pfn = PFN_UP(to_phys(start_info.pt_base)) +
614 start_info.nr_pt_frames + 3;
615 max_pfn = start_info.nr_pages;
617 printk(" start_pfn: %lx\n", start_pfn);
618 printk(" max_pfn: %lx\n", max_pfn);
620 build_pagetable(&start_pfn, &max_pfn);
621 clear_bootstrap();
622 set_readonly(&_text, &_erodata);
624 *start_pfn_p = start_pfn;
625 *max_pfn_p = max_pfn;
626 }