debuggers.hg

view tools/libxc/xc_netbsd_build.c @ 2626:1e99cd1cb3a3

bitkeeper revision 1.1159.1.199 (415eb976aT_IbGUyZQZwMzprjVdnPA)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into freefall.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xeno.bk-nbsd
author cl349@freefall.cl.cam.ac.uk
date Sat Oct 02 14:21:42 2004 +0000 (2004-10-02)
parents aed97013f9fe 23bc5e8a9321
children
line source
1 /******************************************************************************
2 * xc_netbsd_build.c
3 */
5 #include "xc_private.h"
6 #define ELFSIZE 32 /* XXX */
7 #include "xc_elf.h"
8 #include <zlib.h>
10 #ifdef DEBUG
11 #define DPRINTF(x) printf x
12 #else
13 #define DPRINTF(x)
14 #endif
16 static int loadelfimage(gzFile, int, u32, unsigned long *, unsigned long,
17 unsigned long *, unsigned long *,
18 unsigned long *, unsigned long *);
20 #define ELFROUND (ELFSIZE / 8)
22 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
23 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
25 static long get_tot_pages(int xc_handle, u32 domid)
26 {
27 dom0_op_t op;
28 op.cmd = DOM0_GETDOMAININFO;
29 op.u.getdomaininfo.domain = (domid_t)domid;
30 op.u.getdomaininfo.ctxt = NULL;
31 return (do_dom0_op(xc_handle, &op) < 0) ?
32 -1 : op.u.getdomaininfo.tot_pages;
33 }
35 static int get_pfn_list(int xc_handle,
36 u32 domid,
37 unsigned long *pfn_buf,
38 unsigned long max_pfns)
39 {
40 dom0_op_t op;
41 int ret;
42 op.cmd = DOM0_GETMEMLIST;
43 op.u.getmemlist.domain = (domid_t)domid;
44 op.u.getmemlist.max_pfns = max_pfns;
45 op.u.getmemlist.buffer = pfn_buf;
47 if ( mlock(pfn_buf, max_pfns * sizeof(unsigned long)) != 0 )
48 return -1;
50 ret = do_dom0_op(xc_handle, &op);
52 (void)munlock(pfn_buf, max_pfns * sizeof(unsigned long));
54 return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
55 }
57 static int setup_guestos(int xc_handle,
58 u32 dom,
59 gzFile kernel_gfd,
60 unsigned long tot_pages,
61 unsigned long *virt_startinfo_addr,
62 unsigned long *virt_load_addr,
63 full_execution_context_t *ctxt,
64 const char *cmdline,
65 unsigned long shared_info_frame,
66 unsigned int control_evtchn)
67 {
68 l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
69 l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
70 unsigned long *page_array = NULL;
71 int alloc_index, num_pt_pages;
72 unsigned long l2tab;
73 unsigned long l1tab;
74 unsigned long count, pt_start;
75 unsigned long symtab_addr = 0, symtab_len = 0;
76 start_info_t *start_info;
77 shared_info_t *shared_info;
78 unsigned long ksize;
79 mmu_t *mmu = NULL;
80 int i;
82 if ( (page_array = malloc(tot_pages * sizeof(unsigned long))) == NULL )
83 {
84 PERROR("Could not allocate memory");
85 goto error_out;
86 }
88 if ( get_pfn_list(xc_handle, dom, page_array, tot_pages) != tot_pages )
89 {
90 PERROR("Could not get the page frame list");
91 goto error_out;
92 }
94 if (loadelfimage(kernel_gfd, xc_handle, dom, page_array, tot_pages,
95 virt_load_addr, &ksize, &symtab_addr, &symtab_len))
96 goto error_out;
98 /* ksize is kernel-image size rounded up to a page boundary. */
100 alloc_index = tot_pages - 1;
102 /* Count bottom-level PTs, rounding up. */
103 num_pt_pages = (l1_table_offset(*virt_load_addr) + tot_pages + 1023)
104 / 1024;
106 /* We must also count the page directory. */
107 num_pt_pages++;
109 /* Index of first PT page. */
110 pt_start = tot_pages - num_pt_pages;
112 /*
113 * First allocate page for page dir. Allocation goes backwards from the end
114 * of the allocated physical address space.
115 */
116 l2tab = page_array[alloc_index] << PAGE_SHIFT;
117 alloc_index--;
118 ctxt->pt_base = l2tab;
120 if ( (mmu = init_mmu_updates(xc_handle, dom)) == NULL )
121 goto error_out;
123 /* Initialise the page tables. */
124 if ( (vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
125 PROT_READ|PROT_WRITE,
126 l2tab >> PAGE_SHIFT)) == NULL )
127 goto error_out;
128 memset(vl2tab, 0, PAGE_SIZE);
129 vl2e = &vl2tab[l2_table_offset(*virt_load_addr)];
130 for ( count = 0; count < tot_pages; count++ )
131 {
132 if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
133 {
134 l1tab = page_array[alloc_index--] << PAGE_SHIFT;
135 if ( vl1tab != NULL )
136 munmap(vl1tab, PAGE_SIZE);
137 if ( (vl1tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
138 PROT_READ|PROT_WRITE,
139 l1tab >> PAGE_SHIFT)) == NULL )
140 {
141 munmap(vl2tab, PAGE_SIZE);
142 goto error_out;
143 }
144 memset(vl1tab, 0, PAGE_SIZE);
145 vl1e = &vl1tab[l1_table_offset(*virt_load_addr +
146 (count<<PAGE_SHIFT))];
147 *vl2e++ = l1tab | L2_PROT;
148 }
150 *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
151 if ( count >= pt_start )
152 *vl1e &= ~_PAGE_RW;
153 vl1e++;
155 if ( add_mmu_update(xc_handle, mmu,
156 (page_array[count] << PAGE_SHIFT) |
157 MMU_MACHPHYS_UPDATE, count) )
158 {
159 munmap(vl1tab, PAGE_SIZE);
160 munmap(vl2tab, PAGE_SIZE);
161 goto error_out;
162 }
163 }
164 munmap(vl1tab, PAGE_SIZE);
165 munmap(vl2tab, PAGE_SIZE);
167 /*
168 * Pin down l2tab addr as page dir page - causes hypervisor to provide
169 * correct protection for the page
170 */
171 if ( add_mmu_update(xc_handle, mmu,
172 l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_L2_TABLE) )
173 goto error_out;
175 *virt_startinfo_addr =
176 *virt_load_addr + ((alloc_index-1) << PAGE_SHIFT);
178 start_info = xc_map_foreign_range(
179 xc_handle, dom, PAGE_SIZE, PROT_WRITE, page_array[alloc_index-1]);
180 memset(start_info, 0, sizeof(*start_info));
181 start_info->pt_base = *virt_load_addr + ((tot_pages-1) << PAGE_SHIFT);
182 start_info->mod_start = symtab_addr;
183 start_info->mod_len = symtab_len;
184 start_info->nr_pages = tot_pages;
185 start_info->shared_info = shared_info_frame << PAGE_SHIFT;
186 start_info->flags = 0;
187 start_info->domain_controller_evtchn = control_evtchn;
188 strncpy(start_info->cmd_line, cmdline, MAX_CMDLINE);
189 start_info->cmd_line[MAX_CMDLINE-1] = '\0';
190 munmap(start_info, PAGE_SIZE);
192 /* shared_info page starts its life empty. */
193 shared_info = xc_map_foreign_range(
194 xc_handle, dom, PAGE_SIZE, PROT_WRITE, shared_info_frame);
195 memset(shared_info, 0, PAGE_SIZE);
196 /* Mask all upcalls... */
197 for ( i = 0; i < MAX_VIRT_CPUS; i++ )
198 shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
199 munmap(shared_info, PAGE_SIZE);
201 /* Send the page update requests down to the hypervisor. */
202 if ( finish_mmu_updates(xc_handle, mmu) )
203 goto error_out;
205 free(mmu);
206 free(page_array);
207 return 0;
209 error_out:
210 if ( mmu != NULL )
211 free(mmu);
212 if ( page_array == NULL )
213 free(page_array);
214 return -1;
215 }
217 int xc_netbsd_build(int xc_handle,
218 u32 domid,
219 const char *image_name,
220 const char *cmdline,
221 unsigned int control_evtchn)
222 {
223 dom0_op_t launch_op, op;
224 unsigned long load_addr;
225 long tot_pages;
226 int kernel_fd = -1;
227 gzFile kernel_gfd = NULL;
228 int rc, i;
229 full_execution_context_t st_ctxt, *ctxt = &st_ctxt;
230 unsigned long virt_startinfo_addr;
232 if ( (tot_pages = get_tot_pages(xc_handle, domid)) < 0 )
233 {
234 PERROR("Could not find total pages for domain");
235 return 1;
236 }
238 kernel_fd = open(image_name, O_RDONLY);
239 if ( kernel_fd < 0 )
240 {
241 PERROR("Could not open kernel image");
242 return 1;
243 }
245 if ( (kernel_gfd = gzdopen(kernel_fd, "rb")) == NULL )
246 {
247 PERROR("Could not allocate decompression state for state file");
248 close(kernel_fd);
249 return 1;
250 }
252 if ( mlock(&st_ctxt, sizeof(st_ctxt) ) )
253 {
254 PERROR("Unable to mlock ctxt");
255 return 1;
256 }
258 op.cmd = DOM0_GETDOMAININFO;
259 op.u.getdomaininfo.domain = (domid_t)domid;
260 op.u.getdomaininfo.ctxt = ctxt;
261 if ( (do_dom0_op(xc_handle, &op) < 0) ||
262 ((u16)op.u.getdomaininfo.domain != domid) )
263 {
264 PERROR("Could not get info on domain");
265 goto error_out;
266 }
267 if ( !(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) ||
268 (op.u.getdomaininfo.ctxt->pt_base != 0) )
269 {
270 ERROR("Domain is already constructed");
271 goto error_out;
272 }
274 if ( setup_guestos(xc_handle, domid, kernel_gfd, tot_pages,
275 &virt_startinfo_addr,
276 &load_addr, &st_ctxt, cmdline,
277 op.u.getdomaininfo.shared_info_frame,
278 control_evtchn) < 0 )
279 {
280 ERROR("Error constructing guest OS");
281 goto error_out;
282 }
284 if ( kernel_fd >= 0 )
285 close(kernel_fd);
286 if( kernel_gfd )
287 gzclose(kernel_gfd);
289 ctxt->flags = 0;
291 /*
292 * Initial register values:
293 * DS,ES,FS,GS = FLAT_GUESTOS_DS
294 * CS:EIP = FLAT_GUESTOS_CS:start_pc
295 * SS:ESP = FLAT_GUESTOS_DS:start_stack
296 * ESI = start_info
297 * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
298 * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
299 */
300 ctxt->cpu_ctxt.ds = FLAT_GUESTOS_DS;
301 ctxt->cpu_ctxt.es = FLAT_GUESTOS_DS;
302 ctxt->cpu_ctxt.fs = FLAT_GUESTOS_DS;
303 ctxt->cpu_ctxt.gs = FLAT_GUESTOS_DS;
304 ctxt->cpu_ctxt.ss = FLAT_GUESTOS_DS;
305 ctxt->cpu_ctxt.cs = FLAT_GUESTOS_CS;
306 ctxt->cpu_ctxt.eip = load_addr;
307 ctxt->cpu_ctxt.esp = virt_startinfo_addr;
308 ctxt->cpu_ctxt.esi = virt_startinfo_addr;
309 ctxt->cpu_ctxt.eflags = (1<<9) | (1<<2);
311 /* FPU is set up to default initial state. */
312 memset(ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
314 /* Virtual IDT is empty at start-of-day. */
315 for ( i = 0; i < 256; i++ )
316 {
317 ctxt->trap_ctxt[i].vector = i;
318 ctxt->trap_ctxt[i].cs = FLAT_GUESTOS_CS;
319 }
320 ctxt->fast_trap_idx = 0;
322 /* No LDT. */
323 ctxt->ldt_ents = 0;
325 /* Use the default Xen-provided GDT. */
326 ctxt->gdt_ents = 0;
328 /* Ring 1 stack is the initial stack. */
329 ctxt->guestos_ss = FLAT_GUESTOS_DS;
330 ctxt->guestos_esp = virt_startinfo_addr;
332 /* No debugging. */
333 memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
335 /* No callback handlers. */
336 ctxt->event_callback_cs = FLAT_GUESTOS_CS;
337 ctxt->event_callback_eip = 0;
338 ctxt->failsafe_callback_cs = FLAT_GUESTOS_CS;
339 ctxt->failsafe_callback_eip = 0;
341 memset( &launch_op, 0, sizeof(launch_op) );
343 launch_op.u.builddomain.domain = (domid_t)domid;
344 launch_op.u.builddomain.ctxt = ctxt;
345 launch_op.cmd = DOM0_BUILDDOMAIN;
346 rc = do_dom0_op(xc_handle, &launch_op);
348 return rc;
350 error_out:
351 if ( kernel_fd >= 0 )
352 close(kernel_fd);
353 if( kernel_gfd )
354 gzclose(kernel_gfd);
356 return -1;
357 }
359 #define MYSEEK_BUFSIZE 1024
360 static off_t
361 myseek(gzFile gfd, off_t offset, int whence)
362 {
363 unsigned char tmp[MYSEEK_BUFSIZE];
364 int c;
366 if ( offset < 0 )
367 {
368 ERROR("seek back not supported");
369 return -1;
370 }
372 while ( offset != 0 )
373 {
374 c = offset;
375 if ( c > MYSEEK_BUFSIZE )
376 c = MYSEEK_BUFSIZE;
377 if ( gzread(gfd, tmp, c) != c )
378 {
379 PERROR("Error seeking in image.");
380 return -1;
381 }
382 offset -= c;
383 }
385 return 0; /* XXX */
386 }
388 /*
389 * NetBSD memory layout:
390 *
391 * ---------------- *virt_load_addr = ehdr.e_entry (0xc0100000)
392 * | kernel text |
393 * | |
394 * ----------------
395 * | kernel data |
396 * | |
397 * ----------------
398 * | kernel bss |
399 * | |
400 * ---------------- *symtab_addr
401 * | symtab size | = *symtab_len
402 * ----------------
403 * | elf header | offsets to symbol sections mangled to be relative
404 * | | to headers location
405 * ----------------
406 * | sym section |
407 * | headers |
408 * ----------------
409 * | sym sections |
410 * | |
411 * ---------------- *symtab_addr + *symtab_len
412 * | padding |
413 * ---------------- ehdr.e_entry + *ksize << PAGE_SHIFT
414 */
416 #define IS_TEXT(p) (p.p_flags & PF_X)
417 #define IS_DATA(p) (p.p_flags & PF_W)
418 #define IS_BSS(p) (p.p_filesz < p.p_memsz)
420 static int
421 loadelfimage(gzFile kernel_gfd, int xch, u32 dom, unsigned long *page_array,
422 unsigned long tot_pages, unsigned long *virt_load_addr,
423 unsigned long *ksize, unsigned long *symtab_addr,
424 unsigned long *symtab_len)
425 {
426 Elf_Ehdr ehdr;
427 Elf_Phdr *phdr;
428 Elf_Shdr *shdr;
429 void *vaddr;
430 char page[PAGE_SIZE], *p;
431 unsigned long iva, maxva, symva;
432 int c, curpos, h, i, ret, s;
434 ret = -1;
435 phdr = NULL;
436 p = NULL;
437 maxva = 0;
439 if ( gzread(kernel_gfd, &ehdr, sizeof(Elf_Ehdr)) != sizeof(Elf_Ehdr) )
440 {
441 PERROR("Error reading kernel image ELF header.");
442 goto out;
443 }
444 curpos = sizeof(Elf_Ehdr);
446 if ( !IS_ELF(ehdr) )
447 {
448 PERROR("Image does not have an ELF header.");
449 goto out;
450 }
452 *virt_load_addr = ehdr.e_entry;
454 if ( (*virt_load_addr & (PAGE_SIZE-1)) != 0 )
455 {
456 ERROR("We can only deal with page-aligned load addresses");
457 goto out;
458 }
460 if ( (*virt_load_addr + (tot_pages << PAGE_SHIFT)) >
461 HYPERVISOR_VIRT_START )
462 {
463 ERROR("Cannot map all domain memory without hitting Xen space");
464 goto out;
465 }
468 phdr = malloc(ehdr.e_phnum * sizeof(Elf_Phdr));
469 if ( phdr == NULL )
470 {
471 ERROR("Cannot allocate memory for Elf_Phdrs");
472 goto out;
473 }
475 if ( myseek(kernel_gfd, ehdr.e_phoff - curpos, SEEK_SET) == -1 )
476 {
477 ERROR("Seek to program header failed");
478 goto out;
479 }
480 curpos = ehdr.e_phoff;
482 if ( gzread(kernel_gfd, phdr, ehdr.e_phnum * sizeof(Elf_Phdr)) !=
483 ehdr.e_phnum * sizeof(Elf_Phdr) )
484 {
485 PERROR("Error reading kernel image ELF program header.");
486 goto out;
487 }
488 curpos += ehdr.e_phnum * sizeof(Elf_Phdr);
490 /* Copy run-time 'load' segments that are writeable and/or executable. */
491 for ( h = 0; h < ehdr.e_phnum; h++ )
492 {
493 if ( (phdr[h].p_type != PT_LOAD) ||
494 ((phdr[h].p_flags & (PF_W|PF_X)) == 0) )
495 continue;
497 if ( IS_TEXT(phdr[h]) || IS_DATA(phdr[h]) )
498 {
499 if ( myseek(kernel_gfd, phdr[h].p_offset - curpos,
500 SEEK_SET) == -1 )
501 {
502 ERROR("Seek to section failed");
503 goto out;
504 }
505 curpos = phdr[h].p_offset;
507 for ( iva = phdr[h].p_vaddr;
508 iva < phdr[h].p_vaddr + phdr[h].p_filesz;
509 iva += c)
510 {
511 c = PAGE_SIZE - (iva & (PAGE_SIZE - 1));
512 if (iva + c > phdr[h].p_vaddr + phdr[h].p_filesz)
513 c = phdr[h].p_vaddr + phdr[h].p_filesz - iva;
514 if ( gzread(kernel_gfd, page, c) != c )
515 {
516 PERROR("Error reading kernel image page.");
517 goto out;
518 }
519 curpos += c;
520 vaddr = xc_map_foreign_range(
521 xch, dom, PAGE_SIZE, PROT_WRITE,
522 page_array[(iva - *virt_load_addr) >> PAGE_SHIFT]);
523 if ( vaddr == NULL )
524 {
525 ERROR("Couldn't map guest memory");
526 goto out;
527 }
528 DPRINTF(("copy page %p to %p, count 0x%x\n", (void *)iva,
529 vaddr + (iva & (PAGE_SIZE - 1)), c));
530 memcpy(vaddr + (iva & (PAGE_SIZE - 1)), page, c);
531 munmap(vaddr, PAGE_SIZE);
532 }
534 if ( phdr[h].p_vaddr + phdr[h].p_filesz > maxva )
535 maxva = phdr[h].p_vaddr + phdr[h].p_filesz;
536 }
538 if ( IS_BSS(phdr[h]) )
539 {
540 /* XXX maybe clear phdr[h].p_memsz bytes from
541 phdr[h].p_vaddr + phdr[h].p_filesz ??? */
542 if (phdr[h].p_vaddr + phdr[h].p_memsz > maxva)
543 maxva = phdr[h].p_vaddr + phdr[h].p_memsz;
544 DPRINTF(("bss from %p to %p, maxva %p\n",
545 (void *)(phdr[h].p_vaddr + phdr[h].p_filesz),
546 (void *)(phdr[h].p_vaddr + phdr[h].p_memsz),
547 (void *)maxva));
548 }
549 }
551 p = malloc(sizeof(int) + sizeof(Elf_Ehdr) +
552 ehdr.e_shnum * sizeof(Elf_Shdr));
553 if ( p == NULL )
554 {
555 ERROR("Cannot allocate memory for Elf_Shdrs");
556 goto out;
557 }
559 shdr = (Elf_Shdr *)(p + sizeof(int) + sizeof(Elf_Ehdr));
561 if ( myseek(kernel_gfd, ehdr.e_shoff - curpos, SEEK_SET) == -1 )
562 {
563 ERROR("Seek to symbol header failed");
564 goto out;
565 }
566 curpos = ehdr.e_shoff;
568 if ( gzread(kernel_gfd, shdr, ehdr.e_shnum * sizeof(Elf_Shdr)) !=
569 ehdr.e_shnum * sizeof(Elf_Shdr) )
570 {
571 PERROR("Error reading kernel image ELF symbol header.");
572 goto out;
573 }
574 curpos += ehdr.e_shnum * sizeof(Elf_Shdr);
576 maxva = (maxva + ELFROUND - 1) & ~(ELFROUND - 1);
577 symva = maxva;
578 maxva += sizeof(int);
579 *symtab_addr = maxva;
580 *symtab_len = 0;
581 maxva += sizeof(Elf_Ehdr) + ehdr.e_shnum * sizeof(Elf_Shdr);
582 maxva = (maxva + ELFROUND - 1) & ~(ELFROUND - 1);
584 /* Copy kernel string / symbol tables into physical memory */
585 for ( h = 0; h < ehdr.e_shnum; h++ )
586 {
587 if ( shdr[h].sh_type == SHT_STRTAB )
588 {
589 /* Look for a strtab @i linked to symtab @h. */
590 for ( i = 0; i < ehdr.e_shnum; i++ )
591 if ( (shdr[i].sh_type == SHT_SYMTAB) &&
592 (shdr[i].sh_link == h) )
593 break;
594 /* Skip symtab @h if we found no corresponding strtab @i. */
595 if ( i == ehdr.e_shnum )
596 {
597 shdr[h].sh_offset = 0;
598 continue;
599 }
600 }
602 if ( (shdr[h].sh_type == SHT_STRTAB) ||
603 (shdr[h].sh_type == SHT_SYMTAB) )
604 {
605 if ( myseek(kernel_gfd, shdr[h].sh_offset - curpos,
606 SEEK_SET) == -1 )
607 {
608 ERROR("Seek to symbol section failed");
609 goto out;
610 }
611 curpos = shdr[h].sh_offset;
613 /* Mangled to be based on ELF header location. */
614 shdr[h].sh_offset = maxva - *symtab_addr;
616 DPRINTF(("copy section %d, size 0x%x\n", h, shdr[h].sh_size));
617 for ( i = 0; i < shdr[h].sh_size; i += c, maxva += c )
618 {
619 c = PAGE_SIZE - (maxva & (PAGE_SIZE - 1));
620 if ( c > (shdr[h].sh_size - i) )
621 c = shdr[h].sh_size - i;
622 if ( gzread(kernel_gfd, page, c) != c )
623 {
624 PERROR("Error reading kernel image page.");
625 goto out;
626 }
627 curpos += c;
629 vaddr = xc_map_foreign_range(
630 xch, dom, PAGE_SIZE, PROT_WRITE,
631 page_array[(maxva - *virt_load_addr) >> PAGE_SHIFT]);
632 if ( vaddr == NULL )
633 {
634 ERROR("Couldn't map guest memory");
635 goto out;
636 }
637 DPRINTF(("copy page %p to %p, count 0x%x\n", (void *)maxva,
638 vaddr + (maxva & (PAGE_SIZE - 1)), c));
639 memcpy(vaddr + (maxva & (PAGE_SIZE - 1)), page, c);
640 munmap(vaddr, PAGE_SIZE);
641 }
643 *symtab_len += shdr[h].sh_size;
644 maxva = (maxva + ELFROUND - 1) & ~(ELFROUND - 1);
646 }
647 shdr[h].sh_name = 0; /* Name is NULL. */
648 }
650 if ( *symtab_len == 0 )
651 {
652 DPRINTF(("no symbol table\n"));
653 *symtab_addr = 0;
654 ret = 0;
655 goto out;
656 }
658 DPRINTF(("sym header va %p from %p/%p size %x/%x\n", (void *)symva,
659 shdr, p, ehdr.e_shnum * sizeof(Elf_Shdr),
660 ehdr.e_shnum * sizeof(Elf_Shdr) + sizeof(Elf_Ehdr)));
661 ehdr.e_phoff = 0;
662 ehdr.e_shoff = sizeof(Elf_Ehdr);
663 ehdr.e_phentsize = 0;
664 ehdr.e_phnum = 0;
665 ehdr.e_shstrndx = SHN_UNDEF;
666 memcpy(p + sizeof(int), &ehdr, sizeof(Elf_Ehdr));
667 *(int *)p = maxva - *symtab_addr;
669 /* Copy total length, crafted ELF header and section header table */
670 s = sizeof(int) + sizeof(Elf_Ehdr) + ehdr.e_shnum * sizeof(Elf_Shdr);
671 for ( i = 0; i < s; i += c, symva += c )
672 {
673 c = PAGE_SIZE - (symva & (PAGE_SIZE - 1));
674 if ( c > s - i )
675 c = s - i;
676 vaddr = xc_map_foreign_range(
677 xch, dom, PAGE_SIZE, PROT_WRITE,
678 page_array[(symva - *virt_load_addr) >> PAGE_SHIFT]);
679 if ( vaddr == NULL )
680 {
681 ERROR("Couldn't map guest memory");
682 goto out;
683 }
684 DPRINTF(("copy page %p to %p, count 0x%x\n", (void *)symva,
685 vaddr + (symva & (PAGE_SIZE - 1)), c));
686 memcpy(vaddr + (symva & (PAGE_SIZE - 1)), p + i, c);
687 munmap(vaddr, PAGE_SIZE);
688 }
690 *symtab_len = maxva - *symtab_addr;
692 ret = 0;
694 out:
695 if ( ret == 0 )
696 {
697 maxva = (maxva + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
698 *ksize = (maxva - *virt_load_addr) >> PAGE_SHIFT;
700 DPRINTF(("virt_addr %p, kpages 0x%lx, symtab_addr %p, symtab_len %p\n",
701 (void *)*virt_load_addr, *ksize, (void *)*symtab_addr,
702 (void *)*symtab_len));
703 }
705 if ( phdr != NULL )
706 free(phdr);
707 if ( p != NULL )
708 free(p);
709 return ret;
710 }