debuggers.hg

view tools/libxc/xc_ptrace_core.c @ 19826:2f9e1348aa98

x86_64: allow more vCPU-s per guest

Since the shared info layout is fixed, guests are required to use
VCPUOP_register_vcpu_info prior to booting any vCPU beyond the
traditional limit of 32.

MAX_VIRT_CPUS, being an implemetation detail of the hypervisor, is no
longer being exposed in the public headers.

The tools changes are clearly incomplete (and done only so things
would
build again), and the current state of the tools (using scalar
variables all over the place to represent vCPU bitmaps) very likely
doesn't permit booting DomU-s with more than the traditional number of
vCPU-s. Testing of the extended functionality was done with Dom0 (96
vCPU-s, as well as 128 vCPU-s out of which the kernel elected - by way
of a simple kernel side patch - to use only some, resulting in a
sparse
bitmap).

ia64 changes only to make things build, and build-tested only (and the
tools part only as far as the build would go without encountering
unrelated problems in the blktap code).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 18 10:14:16 2009 +0100 (2009-06-18)
parents 75fa5c00a100
children 3ffdb094c2c0
line source
1 /*
2 * New elf format support.
3 * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
4 * VA Linux Systems Japan K.K.
5 */
7 #include <sys/ptrace.h>
8 #include <sys/wait.h>
9 #include "xc_private.h"
10 #include "xg_private.h"
11 #include "xc_ptrace.h"
12 #include <time.h>
13 #include <inttypes.h>
15 static unsigned int max_nr_vcpus;
16 static unsigned long *cr3;
17 static unsigned long *cr3_phys;
18 static unsigned long **cr3_virt;
19 static unsigned long *pde_phys;
20 static unsigned long **pde_virt;
21 static unsigned long *page_phys;
22 static unsigned long **page_virt;
24 static vcpu_guest_context_t *
25 ptrace_core_get_vcpu_ctxt(unsigned int nr_vcpus)
26 {
27 if (nr_vcpus > max_nr_vcpus) {
28 void *new;
30 #define REALLOC(what) \
31 new = realloc(what, nr_vcpus * sizeof(*what)); \
32 if (!new) \
33 return NULL; \
34 memset(what + max_nr_vcpus, 0, \
35 (nr_vcpus - max_nr_vcpus) * sizeof(*what)); \
36 what = new
38 REALLOC(cr3);
39 REALLOC(cr3_phys);
40 REALLOC(cr3_virt);
41 REALLOC(pde_phys);
42 REALLOC(pde_virt);
43 REALLOC(page_phys);
44 REALLOC(page_virt);
46 #undef REALLOC
47 max_nr_vcpus = nr_vcpus;
48 }
50 return &xc_ptrace_get_vcpu_ctxt(nr_vcpus)->c;
51 }
53 /* Leave the code for the old format as is. */
54 /* --- compatible layer for old format ------------------------------------- */
55 /* XXX application state */
57 static int current_is_hvm_compat = 0;
58 static long nr_pages_compat = 0;
59 static unsigned long *p2m_array_compat = NULL;
60 static unsigned long *m2p_array_compat = NULL;
61 static unsigned long pages_offset_compat;
63 /* --------------------- */
65 static unsigned long
66 map_mtop_offset_compat(unsigned long ma)
67 {
68 return pages_offset_compat + (m2p_array_compat[ma >> PAGE_SHIFT] << PAGE_SHIFT);
69 return 0;
70 }
73 static void *
74 map_domain_va_core_compat(unsigned long domfd, int cpu, void *guest_va)
75 {
76 unsigned long pde, page;
77 unsigned long va = (unsigned long)guest_va;
78 void *v;
80 if (cr3[cpu] != cr3_phys[cpu])
81 {
82 cr3_phys[cpu] = cr3[cpu];
83 if (cr3_virt[cpu])
84 munmap(cr3_virt[cpu], PAGE_SIZE);
85 v = mmap(
86 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
87 map_mtop_offset_compat(xen_cr3_to_pfn(cr3_phys[cpu])));
88 if (v == MAP_FAILED)
89 {
90 perror("mmap failed");
91 return NULL;
92 }
93 cr3_virt[cpu] = v;
94 }
95 if ((pde = cr3_virt[cpu][l2_table_offset_i386(va)]) == 0) /* logical address */
96 return NULL;
97 if (current_is_hvm_compat)
98 pde = p2m_array_compat[pde >> PAGE_SHIFT] << PAGE_SHIFT;
99 if (pde != pde_phys[cpu])
100 {
101 pde_phys[cpu] = pde;
102 if (pde_virt[cpu])
103 munmap(pde_virt[cpu], PAGE_SIZE);
104 v = mmap(
105 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
106 map_mtop_offset_compat(pde_phys[cpu]));
107 if (v == MAP_FAILED)
108 return NULL;
109 pde_virt[cpu] = v;
110 }
111 if ((page = pde_virt[cpu][l1_table_offset_i386(va)]) == 0) /* logical address */
112 return NULL;
113 if (current_is_hvm_compat)
114 page = p2m_array_compat[page >> PAGE_SHIFT] << PAGE_SHIFT;
115 if (page != page_phys[cpu])
116 {
117 page_phys[cpu] = page;
118 if (page_virt[cpu])
119 munmap(page_virt[cpu], PAGE_SIZE);
120 v = mmap(
121 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
122 map_mtop_offset_compat(page_phys[cpu]));
123 if (v == MAP_FAILED)
124 {
125 IPRINTF("cr3 %lx pde %lx page %lx pti %lx\n", cr3[cpu], pde, page, l1_table_offset_i386(va));
126 page_phys[cpu] = 0;
127 return NULL;
128 }
129 page_virt[cpu] = v;
130 }
131 return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
132 }
134 static int
135 xc_waitdomain_core_compat(
136 int xc_handle,
137 int domfd,
138 int *status,
139 int options)
140 {
141 int nr_vcpus;
142 int i;
143 vcpu_guest_context_t *ctxt;
144 xc_core_header_t header;
146 if ( nr_pages_compat == 0 )
147 {
148 if (read(domfd, &header, sizeof(header)) != sizeof(header))
149 return -1;
151 current_is_hvm_compat = (header.xch_magic == XC_CORE_MAGIC_HVM);
152 if ( !current_is_hvm_compat && (header.xch_magic != XC_CORE_MAGIC) )
153 {
154 IPRINTF("Magic number missmatch: 0x%08x (file) != "
155 " 0x%08x (code)\n", header.xch_magic,
156 XC_CORE_MAGIC);
157 return -1;
158 }
160 nr_pages_compat = header.xch_nr_pages;
161 nr_vcpus = header.xch_nr_vcpus;
162 pages_offset_compat = header.xch_pages_offset;
164 if ((ctxt = ptrace_core_get_vcpu_ctxt(nr_vcpus)) == NULL)
165 {
166 IPRINTF("Could not allocate vcpu context array\n");
167 return -1;
168 }
170 if (read(domfd, ctxt, sizeof(vcpu_guest_context_t)*nr_vcpus) !=
171 sizeof(vcpu_guest_context_t)*nr_vcpus)
172 return -1;
174 for (i = 0; i < nr_vcpus; i++)
175 cr3[i] = ctxt[i].ctrlreg[3];
177 if ((p2m_array_compat = malloc(nr_pages_compat * sizeof(unsigned long))) == NULL)
178 {
179 IPRINTF("Could not allocate p2m_array\n");
180 return -1;
181 }
183 if (read(domfd, p2m_array_compat, sizeof(unsigned long)*nr_pages_compat) !=
184 sizeof(unsigned long)*nr_pages_compat)
185 return -1;
187 if ((m2p_array_compat = malloc((1<<20) * sizeof(unsigned long))) == NULL)
188 {
189 IPRINTF("Could not allocate m2p array\n");
190 return -1;
191 }
192 memset(m2p_array_compat, 0, sizeof(unsigned long)* 1 << 20);
194 for (i = 0; i < nr_pages_compat; i++)
195 m2p_array_compat[p2m_array_compat[i]] = i;
196 }
197 return 0;
198 }
201 /* --- new format based on ELF -------------------------------------------- */
202 #include "xc_core.h"
204 static int
205 pread_exact(int fd, void* buffer, size_t size, off_t offset)
206 {
207 off_t ret;
208 unsigned char *buf = buffer;
209 size_t done = 0;
210 ret = lseek(fd, offset, SEEK_SET);
211 if (ret < 0 || ret != offset)
212 return -1;
214 while (done < size) {
215 ssize_t s = read(fd, buf, size - done);
216 if (s == -1 && errno == EINTR)
217 continue;
218 if (s <= 0)
219 return -1;
221 done += s;
222 buf += s;
223 }
224 return 0;
225 }
227 struct elf_core
228 {
229 int domfd;
230 Elf64_Ehdr ehdr;
232 char* shdr;
234 char* shstrtab;
235 uint64_t shstrtab_size;
237 char* note_sec;
238 uint64_t note_sec_size;
239 };
241 static int
242 elf_core_alloc_read_sec_by_index(struct elf_core* ecore, uint16_t index,
243 char** buf, uint64_t* size);
244 static int
245 elf_core_alloc_read_sec_by_name(struct elf_core* ecore, const char* name,
246 char** buf, uint64_t* size);
248 static void
249 elf_core_free(struct elf_core* ecore)
250 {
251 if (ecore->shdr != NULL) {
252 free(ecore->shdr);
253 ecore->shdr = NULL;
254 }
255 if (ecore->shstrtab != NULL) {
256 free(ecore->shstrtab);
257 ecore->shstrtab = NULL;
258 }
259 if (ecore->note_sec != NULL) {
260 free(ecore->note_sec);
261 ecore->note_sec = NULL;
262 }
263 }
265 static int
266 elf_core_init(struct elf_core* ecore, int domfd)
267 {
268 uint64_t sh_size;
269 ecore->domfd = domfd;
270 ecore->shdr = NULL;
271 ecore->shstrtab = NULL;
272 ecore->note_sec = NULL;
274 if (pread_exact(ecore->domfd, &ecore->ehdr, sizeof(ecore->ehdr), 0) < 0)
275 goto out;
277 /* check elf header */
278 if (!IS_ELF(ecore->ehdr) || ecore->ehdr.e_type != ET_CORE)
279 goto out;
280 if (ecore->ehdr.e_ident[EI_CLASS] != ELFCLASS64)
281 goto out;
282 /* check elf header more: EI_DATA, EI_VERSION, e_machine... */
284 /* read section headers */
285 sh_size = ecore->ehdr.e_shentsize * ecore->ehdr.e_shnum;
286 ecore->shdr = malloc(sh_size);
287 if (ecore->shdr == NULL)
288 goto out;
289 if (pread_exact(ecore->domfd, ecore->shdr, sh_size,
290 ecore->ehdr.e_shoff) < 0)
291 goto out;
293 /* read shstrtab */
294 if (elf_core_alloc_read_sec_by_index(ecore, ecore->ehdr.e_shstrndx,
295 &ecore->shstrtab,
296 &ecore->shstrtab_size) < 0)
297 goto out;
299 /* read .note.Xen section */
300 if (elf_core_alloc_read_sec_by_name(ecore, XEN_DUMPCORE_SEC_NOTE,
301 &ecore->note_sec,
302 &ecore->note_sec_size) < 0)
303 goto out;
305 return 0;
306 out:
307 elf_core_free(ecore);
308 return -1;
309 }
311 static int
312 elf_core_search_note(struct elf_core* ecore, const char* name, uint32_t type,
313 void** elfnotep)
314 {
315 const char* note_sec_end = ecore->note_sec + ecore->note_sec_size;
316 const char* n;
318 n = ecore->note_sec;
319 while (n < note_sec_end) {
320 const struct elfnote *elfnote = (const struct elfnote *)n;
321 if (elfnote->namesz == strlen(name) + 1 &&
322 strncmp(elfnote->name, name, elfnote->namesz) == 0 &&
323 elfnote->type == type) {
324 *elfnotep = (void*)elfnote;
325 return 0;
326 }
328 n += sizeof(*elfnote) + elfnote->descsz;
329 }
330 return -1;
331 }
333 static int
334 elf_core_alloc_read_sec(struct elf_core* ecore, const Elf64_Shdr* shdr,
335 char** buf)
336 {
337 int ret;
338 *buf = malloc(shdr->sh_size);
339 if (*buf == NULL)
340 return -1;
341 ret = pread_exact(ecore->domfd, *buf, shdr->sh_size, shdr->sh_offset);
342 if (ret < 0) {
343 free(*buf);
344 *buf = NULL;
345 }
346 return ret;
347 }
349 static Elf64_Shdr*
350 elf_core_shdr_by_index(struct elf_core* ecore, uint16_t index)
351 {
352 if (index >= ecore->ehdr.e_shnum)
353 return NULL;
354 return (Elf64_Shdr*)(ecore->shdr + ecore->ehdr.e_shentsize * index);
355 }
357 static int
358 elf_core_alloc_read_sec_by_index(struct elf_core* ecore, uint16_t index,
359 char** buf, uint64_t* size)
360 {
361 Elf64_Shdr* shdr = elf_core_shdr_by_index(ecore, index);
362 if (shdr == NULL)
363 return -1;
364 if (size != NULL)
365 *size = shdr->sh_size;
366 return elf_core_alloc_read_sec(ecore, shdr, buf);
367 }
369 static Elf64_Shdr*
370 elf_core_shdr_by_name(struct elf_core* ecore, const char* name)
371 {
372 const char* s;
373 for (s = ecore->shdr;
374 s < ecore->shdr + ecore->ehdr.e_shentsize * ecore->ehdr.e_shnum;
375 s += ecore->ehdr.e_shentsize) {
376 Elf64_Shdr* shdr = (Elf64_Shdr*)s;
378 if (strncmp(ecore->shstrtab + shdr->sh_name, name, strlen(name)) == 0)
379 return shdr;
380 }
382 return NULL;
383 }
385 static int
386 elf_core_read_sec_by_name(struct elf_core* ecore, const char* name, char* buf)
387 {
388 Elf64_Shdr* shdr = elf_core_shdr_by_name(ecore, name);
389 return pread_exact(ecore->domfd, buf, shdr->sh_size, shdr->sh_offset);
391 }
393 static int
394 elf_core_alloc_read_sec_by_name(struct elf_core* ecore, const char* name,
395 char** buf, uint64_t* size)
396 {
397 Elf64_Shdr* shdr = elf_core_shdr_by_name(ecore, name);
398 if (shdr == NULL)
399 return -1;
400 if (size != NULL)
401 *size = shdr->sh_size;
402 return elf_core_alloc_read_sec(ecore, shdr, buf);
403 }
405 /* XXX application state */
406 static int current_is_auto_translated_physmap = 0;
407 static struct xen_dumpcore_p2m* p2m_array = NULL; /* for non auto translated physmap mode */
408 static uint64_t p2m_array_size = 0;
409 static uint64_t* pfn_array = NULL; /* for auto translated physmap mode */
410 static uint64_t pfn_array_size = 0;
411 static long nr_pages = 0;
412 static uint64_t pages_offset;
414 static const struct xen_dumpcore_elfnote_format_version_desc
415 known_format_version[] =
416 {
417 {XEN_DUMPCORE_FORMAT_VERSION((uint64_t)0, (uint64_t)1)},
418 };
419 #define KNOWN_FORMAT_VERSION_NR \
420 (sizeof(known_format_version)/sizeof(known_format_version[0]))
422 static unsigned long
423 map_gmfn_to_offset_elf(unsigned long gmfn)
424 {
425 /*
426 * linear search
427 */
428 unsigned long i;
429 if (current_is_auto_translated_physmap) {
430 if (pfn_array == NULL)
431 return 0;
432 for (i = 0; i < pfn_array_size; i++) {
433 if (pfn_array[i] == gmfn) {
434 return pages_offset + (i << PAGE_SHIFT);
435 }
436 }
437 } else {
438 if (p2m_array == NULL)
439 return 0;
440 for (i = 0; i < p2m_array_size; i++) {
441 if (p2m_array[i].gmfn == gmfn) {
442 return pages_offset + (i << PAGE_SHIFT);
443 }
444 }
445 }
446 return 0;
447 }
449 static void *
450 map_domain_va_core_elf(unsigned long domfd, int cpu, void *guest_va)
451 {
452 unsigned long pde, page;
453 unsigned long va = (unsigned long)guest_va;
454 unsigned long offset;
455 void *v;
457 if (cr3[cpu] != cr3_phys[cpu])
458 {
459 if (cr3_virt[cpu])
460 {
461 munmap(cr3_virt[cpu], PAGE_SIZE);
462 cr3_virt[cpu] = NULL;
463 cr3_phys[cpu] = 0;
464 }
465 offset = map_gmfn_to_offset_elf(xen_cr3_to_pfn(cr3[cpu]));
466 if (offset == 0)
467 return NULL;
468 v = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd, offset);
469 if (v == MAP_FAILED)
470 {
471 perror("mmap failed");
472 return NULL;
473 }
474 cr3_phys[cpu] = cr3[cpu];
475 cr3_virt[cpu] = v;
476 }
477 if ((pde = cr3_virt[cpu][l2_table_offset_i386(va)]) == 0) /* logical address */
478 return NULL;
479 if (pde != pde_phys[cpu])
480 {
481 if (pde_virt[cpu])
482 {
483 munmap(pde_virt[cpu], PAGE_SIZE);
484 pde_virt[cpu] = NULL;
485 pde_phys[cpu] = 0;
486 }
487 offset = map_gmfn_to_offset_elf(pde >> PAGE_SHIFT);
488 if (offset == 0)
489 return NULL;
490 v = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd, offset);
491 if (v == MAP_FAILED)
492 return NULL;
493 pde_phys[cpu] = pde;
494 pde_virt[cpu] = v;
495 }
496 if ((page = pde_virt[cpu][l1_table_offset_i386(va)]) == 0) /* logical address */
497 return NULL;
498 if (page != page_phys[cpu])
499 {
500 if (page_virt[cpu])
501 {
502 munmap(page_virt[cpu], PAGE_SIZE);
503 page_virt[cpu] = NULL;
504 page_phys[cpu] = 0;
505 }
506 offset = map_gmfn_to_offset_elf(page >> PAGE_SHIFT);
507 if (offset == 0)
508 return NULL;
509 v = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd, offset);
510 if (v == MAP_FAILED)
511 {
512 IPRINTF("cr3 %lx pde %lx page %lx pti %lx\n",
513 cr3[cpu], pde, page, l1_table_offset_i386(va));
514 return NULL;
515 }
516 page_phys[cpu] = page;
517 page_virt[cpu] = v;
518 }
519 return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
520 }
522 static int
523 xc_waitdomain_core_elf(
524 int xc_handle,
525 int domfd,
526 int *status,
527 int options)
528 {
529 int i;
530 vcpu_guest_context_t *ctxt;
531 struct elf_core ecore;
533 struct xen_dumpcore_elfnote_none *none;
534 struct xen_dumpcore_elfnote_header *header;
535 struct xen_dumpcore_elfnote_xen_version *xen_version;
536 struct xen_dumpcore_elfnote_format_version *format_version;
538 Elf64_Shdr* table_shdr;
539 Elf64_Shdr* pages_shdr;
541 if (elf_core_init(&ecore, domfd) < 0)
542 goto out;
544 /* .note.Xen: none */
545 if (elf_core_search_note(&ecore, XEN_DUMPCORE_ELFNOTE_NAME,
546 XEN_ELFNOTE_DUMPCORE_NONE, (void**)&none) < 0)
547 goto out;
549 /* .note.Xen: header */
550 if (elf_core_search_note(&ecore, XEN_DUMPCORE_ELFNOTE_NAME,
551 XEN_ELFNOTE_DUMPCORE_HEADER, (void**)&header) < 0)
552 goto out;
553 if ((header->header.xch_magic != XC_CORE_MAGIC &&
554 header->header.xch_magic != XC_CORE_MAGIC_HVM) ||
555 header->header.xch_nr_vcpus == 0 ||
556 header->header.xch_nr_pages == 0 ||
557 header->header.xch_page_size != PAGE_SIZE)
558 goto out;
559 current_is_auto_translated_physmap =
560 (header->header.xch_magic == XC_CORE_MAGIC_HVM);
561 nr_pages = header->header.xch_nr_pages;
563 /* .note.Xen: xen_version */
564 if (elf_core_search_note(&ecore, XEN_DUMPCORE_ELFNOTE_NAME,
565 XEN_ELFNOTE_DUMPCORE_XEN_VERSION,
566 (void**)&xen_version) < 0)
567 goto out;
568 /* shifted case covers 32 bit FV guest core file created on 64 bit Dom0 */
569 if (xen_version->xen_version.pagesize != PAGE_SIZE &&
570 (xen_version->xen_version.pagesize >> 32) != PAGE_SIZE)
571 goto out;
573 /* .note.Xen: format_version */
574 if (elf_core_search_note(&ecore, XEN_DUMPCORE_ELFNOTE_NAME,
575 XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION,
576 (void**)&format_version) < 0)
577 goto out;
578 for (i = 0; i < KNOWN_FORMAT_VERSION_NR; i++) {
579 if (format_version->format_version.version ==
580 known_format_version[i].version)
581 break;
582 }
583 if (i == KNOWN_FORMAT_VERSION_NR) {
584 /* complain if unknown format */
585 IPRINTF("warning:unknown format version. %"PRIx64"\n",
586 format_version->format_version.version);
587 }
589 if ((ctxt = ptrace_core_get_vcpu_ctxt(header->header.xch_nr_vcpus)) == NULL)
590 goto out;
592 /* .xen_prstatus: read vcpu_guest_context_t*/
593 if (elf_core_read_sec_by_name(&ecore, XEN_DUMPCORE_SEC_PRSTATUS,
594 (char*)ctxt) < 0)
595 goto out;
596 for (i = 0; i < header->header.xch_nr_vcpus; i++)
597 cr3[i] = ctxt[i].ctrlreg[3];
599 /* read .xen_p2m or .xen_pfn */
600 if (current_is_auto_translated_physmap) {
601 table_shdr = elf_core_shdr_by_name(&ecore, XEN_DUMPCORE_SEC_PFN);
602 if (table_shdr == NULL)
603 goto out;
604 pfn_array_size = table_shdr->sh_size / table_shdr->sh_entsize;
605 if (pfn_array != NULL)
606 free(pfn_array);
607 if (elf_core_alloc_read_sec(&ecore, table_shdr,
608 (char**)&pfn_array) < 0)
609 goto out;
610 if (table_shdr->sh_entsize != sizeof(pfn_array[0]))
611 goto out;
612 } else {
613 table_shdr = elf_core_shdr_by_name(&ecore, XEN_DUMPCORE_SEC_P2M);
614 if (table_shdr == NULL)
615 goto out;
616 p2m_array_size = table_shdr->sh_size / table_shdr->sh_entsize;
617 if (p2m_array != NULL)
618 free(p2m_array);
619 if (elf_core_alloc_read_sec(&ecore, table_shdr,
620 (char**)&p2m_array) < 0)
621 goto out;
622 if (table_shdr->sh_entsize != sizeof(p2m_array[0]))
623 goto out;
624 }
625 if (table_shdr->sh_size / table_shdr->sh_entsize != nr_pages)
626 goto out;
628 /* pages_offset and check the file size */
629 pages_shdr = elf_core_shdr_by_name(&ecore, XEN_DUMPCORE_SEC_PAGES);
630 if (pages_shdr == NULL)
631 goto out;
632 pages_offset = pages_shdr->sh_offset;
633 if ((pages_shdr->sh_size / pages_shdr->sh_entsize) != nr_pages ||
634 pages_shdr->sh_entsize != PAGE_SIZE ||
635 (pages_shdr->sh_addralign % PAGE_SIZE) != 0 ||
636 (pages_offset % PAGE_SIZE) != 0)
637 goto out;
639 elf_core_free(&ecore);
640 return 0;
642 out:
643 elf_core_free(&ecore);
644 return -1;
645 }
647 /* --- interface ----------------------------------------------------------- */
649 typedef int (*xc_waitdomain_core_t)(int xc_handle,
650 int domfd,
651 int *status,
652 int options);
653 typedef void *(*map_domain_va_core_t)(unsigned long domfd,
654 int cpu,
655 void *guest_va);
656 struct xc_core_format_type {
657 xc_waitdomain_core_t waitdomain_core;
658 map_domain_va_core_t map_domain_va_core;
659 };
661 static const struct xc_core_format_type format_type[] = {
662 {xc_waitdomain_core_elf, map_domain_va_core_elf},
663 {xc_waitdomain_core_compat, map_domain_va_core_compat},
664 };
665 #define NR_FORMAT_TYPE (sizeof(format_type)/sizeof(format_type[0]))
667 /* XXX application state */
668 static const struct xc_core_format_type* current_format_type = NULL;
670 void *
671 map_domain_va_core(unsigned long domfd, int cpu, void *guest_va)
672 {
673 if (current_format_type == NULL)
674 return NULL;
675 return (current_format_type->map_domain_va_core)(domfd, cpu, guest_va);
676 }
678 int
679 xc_waitdomain_core(int xc_handle, int domfd, int *status, int options)
680 {
681 int ret;
682 int i;
684 for (i = 0; i < NR_FORMAT_TYPE; i++) {
685 ret = (format_type[i].waitdomain_core)(xc_handle, domfd, status,
686 options);
687 if (ret == 0) {
688 current_format_type = &format_type[i];
689 break;
690 }
691 }
692 return ret;
693 }
695 /*
696 * Local variables:
697 * mode: C
698 * c-set-style: "BSD"
699 * c-basic-offset: 4
700 * tab-width: 4
701 * indent-tabs-mode: nil
702 * End:
703 */