debuggers.hg

view xen/arch/x86/domain_build.c @ 4629:6375127fdf23

bitkeeper revision 1.1311.1.1 (426641eeBv97w6sl983zxeR4Dc3Utg)

Cleanup page table handling. Add macros to access page table
entries, fixup plenty of places in the code to use the page
table types instead of "unsigned long".

Signed-off-by: Gerd Knorr <kraxel@bytesex.org>
Signed-off-by: michael.fetterman@cl.cam.ac.uk
author mafetter@fleming.research
date Wed Apr 20 11:50:06 2005 +0000 (2005-04-20)
parents 6bbac0aca316
children 1803018b3b05
line source
1 /******************************************************************************
2 * domain_build.c
3 *
4 * Copyright (c) 2002-2005, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/init.h>
9 #include <xen/lib.h>
10 #include <xen/sched.h>
11 #include <xen/smp.h>
12 #include <xen/delay.h>
13 #include <xen/event.h>
14 #include <xen/elf.h>
15 #include <xen/kernel.h>
16 #include <asm/regs.h>
17 #include <asm/system.h>
18 #include <asm/io.h>
19 #include <asm/processor.h>
20 #include <asm/desc.h>
21 #include <asm/i387.h>
22 #include <asm/shadow.h>
24 /* opt_dom0_mem: Kilobytes of memory allocated to domain 0. */
25 static unsigned int opt_dom0_mem = 0;
26 integer_unit_param("dom0_mem", opt_dom0_mem);
28 static unsigned int opt_dom0_shadow = 0;
29 boolean_param("dom0_shadow", opt_dom0_shadow);
31 static unsigned int opt_dom0_translate = 0;
32 boolean_param("dom0_translate", opt_dom0_translate);
34 #if defined(__i386__)
35 /* No ring-3 access in initial leaf page tables. */
36 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
37 #elif defined(__x86_64__)
38 /* Allow ring-3 access in long mode as guest cannot use ring 1. */
39 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
40 #endif
41 /* Don't change these: Linux expects just these bits to be set. */
42 /* (And that includes the bogus _PAGE_DIRTY!) */
43 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
44 #define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
45 #define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
47 #define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
48 #define round_pgdown(_p) ((_p)&PAGE_MASK)
50 static struct pfn_info *alloc_largest(struct domain *d, unsigned long max)
51 {
52 struct pfn_info *page;
53 unsigned int order = get_order(max * PAGE_SIZE);
54 if ( (max & (max-1)) != 0 )
55 order--;
56 while ( (page = alloc_domheap_pages(d, order)) == NULL )
57 if ( order-- == 0 )
58 break;
59 return page;
60 }
62 int construct_dom0(struct domain *d,
63 unsigned long _image_start, unsigned long image_len,
64 unsigned long _initrd_start, unsigned long initrd_len,
65 char *cmdline)
66 {
67 char *dst;
68 int i, rc;
69 unsigned long pfn, mfn;
70 unsigned long nr_pages;
71 unsigned long nr_pt_pages;
72 unsigned long alloc_start;
73 unsigned long alloc_end;
74 unsigned long count;
75 struct pfn_info *page = NULL;
76 start_info_t *si;
77 struct exec_domain *ed = d->exec_domain[0];
78 #if defined(__i386__)
79 char *image_start = (char *)_image_start; /* use lowmem mappings */
80 char *initrd_start = (char *)_initrd_start; /* use lowmem mappings */
81 #elif defined(__x86_64__)
82 char *image_start = __va(_image_start);
83 char *initrd_start = __va(_initrd_start);
84 l4_pgentry_t *l4tab = NULL, *l4start = NULL;
85 l3_pgentry_t *l3tab = NULL, *l3start = NULL;
86 #endif
87 l2_pgentry_t *l2tab = NULL, *l2start = NULL;
88 l1_pgentry_t *l1tab = NULL, *l1start = NULL;
90 /*
91 * This fully describes the memory layout of the initial domain. All
92 * *_start address are page-aligned, except v_start (and v_end) which are
93 * superpage-aligned.
94 */
95 struct domain_setup_info dsi;
96 unsigned long vinitrd_start;
97 unsigned long vinitrd_end;
98 unsigned long vphysmap_start;
99 unsigned long vphysmap_end;
100 unsigned long vstartinfo_start;
101 unsigned long vstartinfo_end;
102 unsigned long vstack_start;
103 unsigned long vstack_end;
104 unsigned long vpt_start;
105 unsigned long vpt_end;
106 unsigned long v_end;
108 /* Machine address of next candidate page-table page. */
109 unsigned long mpt_alloc;
111 extern void physdev_init_dom0(struct domain *);
112 extern void translate_l2pgtable(struct domain *d, l1_pgentry_t *p2m, unsigned long l2mfn);
114 /* Sanity! */
115 if ( d->id != 0 )
116 BUG();
117 if ( test_bit(DF_CONSTRUCTED, &d->d_flags) )
118 BUG();
120 memset(&dsi, 0, sizeof(struct domain_setup_info));
121 dsi.image_addr = (unsigned long)image_start;
122 dsi.image_len = image_len;
124 printk("*** LOADING DOMAIN 0 ***\n");
126 /* By default DOM0 is allocated all available memory. */
127 d->max_pages = ~0U;
128 if ( (nr_pages = opt_dom0_mem >> (PAGE_SHIFT - 10)) == 0 )
129 nr_pages = avail_domheap_pages() +
130 ((initrd_len + PAGE_SIZE - 1) >> PAGE_SHIFT) +
131 ((image_len + PAGE_SIZE - 1) >> PAGE_SHIFT);
132 if ( (page = alloc_largest(d, nr_pages)) == NULL )
133 panic("Not enough RAM for DOM0 reservation.\n");
134 alloc_start = page_to_phys(page);
135 alloc_end = alloc_start + (d->tot_pages << PAGE_SHIFT);
137 if ( (rc = parseelfimage(&dsi)) != 0 )
138 return rc;
140 /* Align load address to 4MB boundary. */
141 dsi.v_start &= ~((1UL<<22)-1);
143 /*
144 * Why do we need this? The number of page-table frames depends on the
145 * size of the bootstrap address space. But the size of the address space
146 * depends on the number of page-table frames (since each one is mapped
147 * read-only). We have a pair of simultaneous equations in two unknowns,
148 * which we solve by exhaustive search.
149 */
150 vinitrd_start = round_pgup(dsi.v_end);
151 vinitrd_end = vinitrd_start + initrd_len;
152 vphysmap_start = round_pgup(vinitrd_end);
153 vphysmap_end = vphysmap_start + (nr_pages * sizeof(u32));
154 vpt_start = round_pgup(vphysmap_end);
155 for ( nr_pt_pages = 2; ; nr_pt_pages++ )
156 {
157 vpt_end = vpt_start + (nr_pt_pages * PAGE_SIZE);
158 vstartinfo_start = vpt_end;
159 vstartinfo_end = vstartinfo_start + PAGE_SIZE;
160 vstack_start = vstartinfo_end;
161 vstack_end = vstack_start + PAGE_SIZE;
162 v_end = (vstack_end + (1UL<<22)-1) & ~((1UL<<22)-1);
163 if ( (v_end - vstack_end) < (512UL << 10) )
164 v_end += 1UL << 22; /* Add extra 4MB to get >= 512kB padding. */
165 #if defined(__i386__)
166 if ( (((v_end - dsi.v_start + ((1UL<<L2_PAGETABLE_SHIFT)-1)) >>
167 L2_PAGETABLE_SHIFT) + 1) <= nr_pt_pages )
168 break;
169 #elif defined(__x86_64__)
170 #define NR(_l,_h,_s) \
171 (((((_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \
172 ((_l) & ~((1UL<<(_s))-1))) >> (_s))
173 if ( (1 + /* # L4 */
174 NR(dsi.v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
175 NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT) + /* # L2 */
176 NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */
177 <= nr_pt_pages )
178 break;
179 #endif
180 }
182 if ( (v_end - dsi.v_start) > (alloc_end - alloc_start) )
183 panic("Insufficient contiguous RAM to build kernel image.\n");
185 printk("PHYSICAL MEMORY ARRANGEMENT:\n"
186 " Dom0 alloc.: %p->%p",
187 alloc_start, alloc_end);
188 if ( d->tot_pages < nr_pages )
189 printk(" (%d pages to be allocated)",
190 nr_pages - d->tot_pages);
191 printk("\nVIRTUAL MEMORY ARRANGEMENT:\n"
192 " Loaded kernel: %p->%p\n"
193 " Init. ramdisk: %p->%p\n"
194 " Phys-Mach map: %p->%p\n"
195 " Page tables: %p->%p\n"
196 " Start info: %p->%p\n"
197 " Boot stack: %p->%p\n"
198 " TOTAL: %p->%p\n",
199 dsi.v_kernstart, dsi.v_kernend,
200 vinitrd_start, vinitrd_end,
201 vphysmap_start, vphysmap_end,
202 vpt_start, vpt_end,
203 vstartinfo_start, vstartinfo_end,
204 vstack_start, vstack_end,
205 dsi.v_start, v_end);
206 printk(" ENTRY ADDRESS: %p\n", dsi.v_kernentry);
208 if ( (v_end - dsi.v_start) > (nr_pages * PAGE_SIZE) )
209 {
210 printk("Initial guest OS requires too much space\n"
211 "(%luMB is greater than %luMB limit)\n",
212 (v_end-dsi.v_start)>>20, (nr_pages<<PAGE_SHIFT)>>20);
213 return -ENOMEM;
214 }
216 mpt_alloc = (vpt_start - dsi.v_start) + alloc_start;
218 SET_GDT_ENTRIES(ed, DEFAULT_GDT_ENTRIES);
219 SET_GDT_ADDRESS(ed, DEFAULT_GDT_ADDRESS);
221 /*
222 * We're basically forcing default RPLs to 1, so that our "what privilege
223 * level are we returning to?" logic works.
224 */
225 ed->arch.failsafe_selector = FLAT_KERNEL_CS;
226 ed->arch.event_selector = FLAT_KERNEL_CS;
227 ed->arch.kernel_ss = FLAT_KERNEL_SS;
228 for ( i = 0; i < 256; i++ )
229 ed->arch.traps[i].cs = FLAT_KERNEL_CS;
231 #if defined(__i386__)
233 /*
234 * Protect the lowest 1GB of memory. We use a temporary mapping there
235 * from which we copy the kernel and ramdisk images.
236 */
237 if ( dsi.v_start < (1UL<<30) )
238 {
239 printk("Initial loading isn't allowed to lowest 1GB of memory.\n");
240 return -EINVAL;
241 }
243 /* WARNING: The new domain must have its 'processor' field filled in! */
244 l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
245 memcpy(l2tab, &idle_pg_table[0], PAGE_SIZE);
246 l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
247 l2e_create_phys((unsigned long)l2start, __PAGE_HYPERVISOR);
248 l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
249 l2e_create_phys(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
250 ed->arch.guest_table = mk_pagetable((unsigned long)l2start);
252 l2tab += l2_table_offset(dsi.v_start);
253 mfn = alloc_start >> PAGE_SHIFT;
254 for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++ )
255 {
256 if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
257 {
258 l1start = l1tab = (l1_pgentry_t *)mpt_alloc;
259 mpt_alloc += PAGE_SIZE;
260 *l2tab = l2e_create_phys((unsigned long)l1start, L2_PROT);
261 l2tab++;
262 clear_page(l1tab);
263 if ( count == 0 )
264 l1tab += l1_table_offset(dsi.v_start);
265 }
266 *l1tab = l1e_create_pfn(mfn, L1_PROT);
267 l1tab++;
269 page = &frame_table[mfn];
270 if ( !get_page_and_type(page, d, PGT_writable_page) )
271 BUG();
273 mfn++;
274 }
276 /* Pages that are part of page tables must be read only. */
277 l2tab = l2start + l2_table_offset(vpt_start);
278 l1start = l1tab = (l1_pgentry_t *)l2e_get_phys(*l2tab);
279 l1tab += l1_table_offset(vpt_start);
280 for ( count = 0; count < nr_pt_pages; count++ )
281 {
282 page = &frame_table[l1e_get_pfn(*l1tab)];
283 if ( !opt_dom0_shadow )
284 l1e_remove_flags(l1tab, _PAGE_RW);
285 else
286 if ( !get_page_type(page, PGT_writable_page) )
287 BUG();
289 if ( count == 0 )
290 {
291 page->u.inuse.type_info &= ~PGT_type_mask;
292 page->u.inuse.type_info |= PGT_l2_page_table;
294 /*
295 * No longer writable: decrement the type_count.
296 * Installed as CR3: increment both the ref_count and type_count.
297 * Net: just increment the ref_count.
298 */
299 get_page(page, d); /* an extra ref because of readable mapping */
301 /* Get another ref to L2 page so that it can be pinned. */
302 if ( !get_page_and_type(page, d, PGT_l2_page_table) )
303 BUG();
304 set_bit(_PGT_pinned, &page->u.inuse.type_info);
305 }
306 else
307 {
308 page->u.inuse.type_info &= ~PGT_type_mask;
309 page->u.inuse.type_info |= PGT_l1_page_table;
310 page->u.inuse.type_info |=
311 ((dsi.v_start>>L2_PAGETABLE_SHIFT)+(count-1))<<PGT_va_shift;
313 /*
314 * No longer writable: decrement the type_count.
315 * This is an L1 page, installed in a validated L2 page:
316 * increment both the ref_count and type_count.
317 * Net: just increment the ref_count.
318 */
319 get_page(page, d); /* an extra ref because of readable mapping */
320 }
321 if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
322 l1start = l1tab = (l1_pgentry_t *)l2e_get_phys(*++l2tab);
323 }
325 #elif defined(__x86_64__)
327 /* Overlap with Xen protected area? */
328 if ( (dsi.v_start < HYPERVISOR_VIRT_END) &&
329 (v_end > HYPERVISOR_VIRT_START) )
330 {
331 printk("DOM0 image overlaps with Xen private area.\n");
332 return -EINVAL;
333 }
335 /* WARNING: The new domain must have its 'processor' field filled in! */
336 phys_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
337 l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
338 memcpy(l4tab, &idle_pg_table[0], PAGE_SIZE);
339 l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
340 l4e_create_phys(__pa(l4start), __PAGE_HYPERVISOR);
341 l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
342 l4e_create_phys(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
343 ed->arch.guest_table = mk_pagetable(__pa(l4start));
345 l4tab += l4_table_offset(dsi.v_start);
346 mfn = alloc_start >> PAGE_SHIFT;
347 for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++ )
348 {
349 if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
350 {
351 phys_to_page(mpt_alloc)->u.inuse.type_info = PGT_l1_page_table;
352 l1start = l1tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
353 clear_page(l1tab);
354 if ( count == 0 )
355 l1tab += l1_table_offset(dsi.v_start);
356 if ( !((unsigned long)l2tab & (PAGE_SIZE-1)) )
357 {
358 phys_to_page(mpt_alloc)->u.inuse.type_info = PGT_l2_page_table;
359 l2start = l2tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
360 clear_page(l2tab);
361 if ( count == 0 )
362 l2tab += l2_table_offset(dsi.v_start);
363 if ( !((unsigned long)l3tab & (PAGE_SIZE-1)) )
364 {
365 phys_to_page(mpt_alloc)->u.inuse.type_info =
366 PGT_l3_page_table;
367 l3start = l3tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
368 clear_page(l3tab);
369 if ( count == 0 )
370 l3tab += l3_table_offset(dsi.v_start);
371 *l4tab = l4e_create_phys(__pa(l3start), L4_PROT);
372 l4tab++;
373 }
374 *l3tab = l3e_create_phys(__pa(l2start), L3_PROT);
375 l3tab++;
376 }
377 *l2tab = l2e_create_phys(__pa(l1start), L2_PROT);
378 l2tab++;
379 }
380 *l1tab = l1e_create_pfn(mfn, L1_PROT);
381 l1tab++;
383 page = &frame_table[mfn];
384 if ( (page->u.inuse.type_info == 0) &&
385 !get_page_and_type(page, d, PGT_writable_page) )
386 BUG();
388 mfn++;
389 }
391 /* Pages that are part of page tables must be read only. */
392 l4tab = l4start + l4_table_offset(vpt_start);
393 l3start = l3tab = l4e_to_l3e(*l4tab);
394 l3tab += l3_table_offset(vpt_start);
395 l2start = l2tab = l3e_to_l2e(*l3tab);
396 l2tab += l2_table_offset(vpt_start);
397 l1start = l1tab = l2e_to_l1e(*l2tab);
398 l1tab += l1_table_offset(vpt_start);
399 for ( count = 0; count < nr_pt_pages; count++ )
400 {
401 l1e_remove_flags(l1tab, _PAGE_RW);
402 page = &frame_table[l1e_get_pfn(*l1tab)];
404 /* Read-only mapping + PGC_allocated + page-table page. */
405 page->count_info = PGC_allocated | 3;
406 page->u.inuse.type_info |= PGT_validated | 1;
408 /* Top-level p.t. is pinned. */
409 if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_l4_page_table )
410 {
411 page->count_info += 1;
412 page->u.inuse.type_info += 1 | PGT_pinned;
413 }
415 /* Iterate. */
416 if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
417 {
418 if ( !((unsigned long)++l2tab & (PAGE_SIZE - 1)) )
419 {
420 if ( !((unsigned long)++l3tab & (PAGE_SIZE - 1)) )
421 l3start = l3tab = l4e_to_l3e(*++l4tab);
422 l2start = l2tab = l3e_to_l2e(*l3tab);
423 }
424 l1start = l1tab = l2e_to_l1e(*l2tab);
425 }
426 }
428 #endif /* __x86_64__ */
430 /* Mask all upcalls... */
431 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
432 d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
433 d->shared_info->n_vcpu = smp_num_cpus;
435 /* Set up monitor table */
436 update_pagetables(ed);
438 /* Install the new page tables. */
439 local_irq_disable();
440 write_ptbase(ed);
442 /* Copy the OS image and free temporary buffer. */
443 (void)loadelfimage(&dsi);
445 init_domheap_pages(
446 _image_start, (_image_start+image_len+PAGE_SIZE-1) & PAGE_MASK);
448 /* Copy the initial ramdisk and free temporary buffer. */
449 if ( initrd_len != 0 )
450 {
451 memcpy((void *)vinitrd_start, initrd_start, initrd_len);
452 init_domheap_pages(
453 _initrd_start, (_initrd_start+initrd_len+PAGE_SIZE-1) & PAGE_MASK);
454 }
456 d->next_io_page = max_page;
458 /* Set up start info area. */
459 si = (start_info_t *)vstartinfo_start;
460 memset(si, 0, PAGE_SIZE);
461 si->nr_pages = nr_pages;
463 if ( opt_dom0_translate )
464 {
465 si->shared_info = d->next_io_page << PAGE_SHIFT;
466 set_machinetophys(virt_to_phys(d->shared_info) >> PAGE_SHIFT,
467 d->next_io_page);
468 d->next_io_page++;
469 }
470 else
471 si->shared_info = virt_to_phys(d->shared_info);
473 si->flags = SIF_PRIVILEGED | SIF_INITDOMAIN;
474 si->pt_base = vpt_start;
475 si->nr_pt_frames = nr_pt_pages;
476 si->mfn_list = vphysmap_start;
478 /* Write the phys->machine and machine->phys table entries. */
479 for ( pfn = 0; pfn < d->tot_pages; pfn++ )
480 {
481 mfn = pfn + (alloc_start>>PAGE_SHIFT);
482 #ifndef NDEBUG
483 #define REVERSE_START ((v_end - dsi.v_start) >> PAGE_SHIFT)
484 if ( !opt_dom0_translate && (pfn > REVERSE_START) )
485 mfn = (alloc_end>>PAGE_SHIFT) - (pfn - REVERSE_START);
486 #endif
487 ((u32 *)vphysmap_start)[pfn] = mfn;
488 machine_to_phys_mapping[mfn] = pfn;
489 }
490 while ( pfn < nr_pages )
491 {
492 if ( (page = alloc_largest(d, nr_pages - d->tot_pages)) == NULL )
493 panic("Not enough RAM for DOM0 reservation.\n");
494 while ( pfn < d->tot_pages )
495 {
496 mfn = page_to_pfn(page);
497 #ifndef NDEBUG
498 #define pfn (nr_pages - 1 - (pfn - ((alloc_end - alloc_start) >> PAGE_SHIFT)))
499 #endif
500 ((u32 *)vphysmap_start)[pfn] = mfn;
501 machine_to_phys_mapping[mfn] = pfn;
502 #undef pfn
503 page++; pfn++;
504 }
505 }
507 if ( initrd_len != 0 )
508 {
509 si->mod_start = vinitrd_start;
510 si->mod_len = initrd_len;
511 printk("Initrd len 0x%lx, start at 0x%p\n",
512 si->mod_len, si->mod_start);
513 }
515 dst = (char *)si->cmd_line;
516 if ( cmdline != NULL )
517 {
518 for ( i = 0; i < 255; i++ )
519 {
520 if ( cmdline[i] == '\0' )
521 break;
522 *dst++ = cmdline[i];
523 }
524 }
525 *dst = '\0';
527 /* Reinstate the caller's page tables. */
528 write_ptbase(current);
529 local_irq_enable();
531 #if defined(__i386__)
532 /* Destroy low mappings - they were only for our convenience. */
533 for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
534 if ( l2e_get_flags(l2start[i]) & _PAGE_PSE )
535 l2start[i] = l2e_empty();
536 zap_low_mappings(); /* Do the same for the idle page tables. */
537 #endif
539 /* DOM0 gets access to everything. */
540 physdev_init_dom0(d);
542 set_bit(DF_CONSTRUCTED, &d->d_flags);
544 new_thread(ed, dsi.v_kernentry, vstack_end, vstartinfo_start);
546 if ( opt_dom0_shadow || opt_dom0_translate )
547 {
548 shadow_mode_enable(d, (opt_dom0_translate
549 ? SHM_enable | SHM_translate
550 : SHM_enable));
551 if ( opt_dom0_translate )
552 {
553 /* Hmm, what does this?
554 Looks like isn't portable across 32/64 bit and pae/non-pae ...
555 -- kraxel */
557 /* mafetter: This code is mostly a hack in order to be able to
558 * test with dom0's which are running with shadow translate.
559 * I expect we'll rip this out once we have a stable set of
560 * domU clients which use the various shadow modes, but it's
561 * useful to leave this here for now...
562 */
564 // map this domain's p2m table into current page table,
565 // so that we can easily access it.
566 //
567 ASSERT( root_get_value(idle_pg_table[1]) == 0 );
568 ASSERT( pagetable_val(d->arch.phys_table) );
569 idle_pg_table[1] = root_create_phys(pagetable_val(d->arch.phys_table),
570 __PAGE_HYPERVISOR);
571 translate_l2pgtable(d, (l1_pgentry_t *)(1u << L2_PAGETABLE_SHIFT),
572 pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT);
573 idle_pg_table[1] = root_empty();
574 local_flush_tlb();
575 }
577 update_pagetables(ed); /* XXX SMP */
578 }
580 return 0;
581 }
583 int elf_sanity_check(Elf_Ehdr *ehdr)
584 {
585 if ( !IS_ELF(*ehdr) ||
586 #if defined(__i386__)
587 (ehdr->e_ident[EI_CLASS] != ELFCLASS32) ||
588 (ehdr->e_machine != EM_386) ||
589 #elif defined(__x86_64__)
590 (ehdr->e_ident[EI_CLASS] != ELFCLASS64) ||
591 (ehdr->e_machine != EM_X86_64) ||
592 #endif
593 (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) ||
594 (ehdr->e_type != ET_EXEC) )
595 {
596 printk("DOM0 image is not a Xen-compatible Elf image.\n");
597 return 0;
598 }
600 return 1;
601 }
603 /*
604 * Local variables:
605 * mode: C
606 * c-set-style: "BSD"
607 * c-basic-offset: 4
608 * tab-width: 4
609 * indent-tabs-mode: nil
610 * End:
611 */