debuggers.hg

view tools/libxc/xc_ptrace_core.c @ 16715:c5deb251b9dc

Update version to 3.2.0-rc4
author Keir Fraser <keir.fraser@citrix.com>
date Sat Dec 29 17:57:37 2007 +0000 (2007-12-29)
parents ca7cd6752cc2
children d3a87899985d
line source
1 /*
2 * New elf format support.
3 * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
4 * VA Linux Systems Japan K.K.
5 */
7 #include <sys/ptrace.h>
8 #include <sys/wait.h>
9 #include "xc_private.h"
10 #include "xg_private.h"
11 #include "xc_ptrace.h"
12 #include <time.h>
13 #include <inttypes.h>
15 /* Leave the code for the old format as is. */
16 /* --- compatible layer for old format ------------------------------------- */
17 /* XXX application state */
19 static int current_is_hvm_compat = 0;
20 static long nr_pages_compat = 0;
21 static unsigned long *p2m_array_compat = NULL;
22 static unsigned long *m2p_array_compat = NULL;
23 static unsigned long pages_offset_compat;
24 static unsigned long cr3_compat[MAX_VIRT_CPUS];
26 /* --------------------- */
28 static unsigned long
29 map_mtop_offset_compat(unsigned long ma)
30 {
31 return pages_offset_compat + (m2p_array_compat[ma >> PAGE_SHIFT] << PAGE_SHIFT);
32 return 0;
33 }
36 static void *
37 map_domain_va_core_compat(unsigned long domfd, int cpu, void *guest_va,
38 vcpu_guest_context_t *ctxt)
39 {
40 unsigned long pde, page;
41 unsigned long va = (unsigned long)guest_va;
42 void *v;
44 static unsigned long cr3_phys[MAX_VIRT_CPUS];
45 static unsigned long *cr3_virt[MAX_VIRT_CPUS];
46 static unsigned long pde_phys[MAX_VIRT_CPUS];
47 static unsigned long *pde_virt[MAX_VIRT_CPUS];
48 static unsigned long page_phys[MAX_VIRT_CPUS];
49 static unsigned long *page_virt[MAX_VIRT_CPUS];
51 if (cr3_compat[cpu] != cr3_phys[cpu])
52 {
53 cr3_phys[cpu] = cr3_compat[cpu];
54 if (cr3_virt[cpu])
55 munmap(cr3_virt[cpu], PAGE_SIZE);
56 v = mmap(
57 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
58 map_mtop_offset_compat(xen_cr3_to_pfn(cr3_phys[cpu])));
59 if (v == MAP_FAILED)
60 {
61 perror("mmap failed");
62 return NULL;
63 }
64 cr3_virt[cpu] = v;
65 }
66 if ((pde = cr3_virt[cpu][l2_table_offset_i386(va)]) == 0) /* logical address */
67 return NULL;
68 if (current_is_hvm_compat)
69 pde = p2m_array_compat[pde >> PAGE_SHIFT] << PAGE_SHIFT;
70 if (pde != pde_phys[cpu])
71 {
72 pde_phys[cpu] = pde;
73 if (pde_virt[cpu])
74 munmap(pde_virt[cpu], PAGE_SIZE);
75 v = mmap(
76 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
77 map_mtop_offset_compat(pde_phys[cpu]));
78 if (v == MAP_FAILED)
79 return NULL;
80 pde_virt[cpu] = v;
81 }
82 if ((page = pde_virt[cpu][l1_table_offset_i386(va)]) == 0) /* logical address */
83 return NULL;
84 if (current_is_hvm_compat)
85 page = p2m_array_compat[page >> PAGE_SHIFT] << PAGE_SHIFT;
86 if (page != page_phys[cpu])
87 {
88 page_phys[cpu] = page;
89 if (page_virt[cpu])
90 munmap(page_virt[cpu], PAGE_SIZE);
91 v = mmap(
92 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
93 map_mtop_offset_compat(page_phys[cpu]));
94 if (v == MAP_FAILED)
95 {
96 IPRINTF("cr3 %lx pde %lx page %lx pti %lx\n", cr3_compat[cpu], pde, page, l1_table_offset_i386(va));
97 page_phys[cpu] = 0;
98 return NULL;
99 }
100 page_virt[cpu] = v;
101 }
102 return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
103 }
105 static int
106 xc_waitdomain_core_compat(
107 int xc_handle,
108 int domfd,
109 int *status,
110 int options,
111 vcpu_guest_context_t *ctxt)
112 {
113 int nr_vcpus;
114 int i;
115 xc_core_header_t header;
117 if ( nr_pages_compat == 0 )
118 {
119 if (read(domfd, &header, sizeof(header)) != sizeof(header))
120 return -1;
122 current_is_hvm_compat = (header.xch_magic == XC_CORE_MAGIC_HVM);
123 if ( !current_is_hvm_compat && (header.xch_magic != XC_CORE_MAGIC) )
124 {
125 IPRINTF("Magic number missmatch: 0x%08x (file) != "
126 " 0x%08x (code)\n", header.xch_magic,
127 XC_CORE_MAGIC);
128 return -1;
129 }
131 nr_pages_compat = header.xch_nr_pages;
132 nr_vcpus = header.xch_nr_vcpus;
133 pages_offset_compat = header.xch_pages_offset;
135 if (read(domfd, ctxt, sizeof(vcpu_guest_context_t)*nr_vcpus) !=
136 sizeof(vcpu_guest_context_t)*nr_vcpus)
137 return -1;
139 for (i = 0; i < nr_vcpus; i++)
140 cr3_compat[i] = ctxt[i].ctrlreg[3];
142 if ((p2m_array_compat = malloc(nr_pages_compat * sizeof(unsigned long))) == NULL)
143 {
144 IPRINTF("Could not allocate p2m_array\n");
145 return -1;
146 }
148 if (read(domfd, p2m_array_compat, sizeof(unsigned long)*nr_pages_compat) !=
149 sizeof(unsigned long)*nr_pages_compat)
150 return -1;
152 if ((m2p_array_compat = malloc((1<<20) * sizeof(unsigned long))) == NULL)
153 {
154 IPRINTF("Could not allocate m2p array\n");
155 return -1;
156 }
157 bzero(m2p_array_compat, sizeof(unsigned long)* 1 << 20);
159 for (i = 0; i < nr_pages_compat; i++)
160 m2p_array_compat[p2m_array_compat[i]] = i;
161 }
162 return 0;
163 }
166 /* --- new format based on ELF -------------------------------------------- */
167 #include "xc_core.h"
169 static int
170 pread_exact(int fd, void* buffer, size_t size, off_t offset)
171 {
172 off_t ret;
173 unsigned char *buf = buffer;
174 size_t done = 0;
175 ret = lseek(fd, offset, SEEK_SET);
176 if (ret < 0 || ret != offset)
177 return -1;
179 while (done < size) {
180 ssize_t s = read(fd, buf, size - done);
181 if (s == -1 && errno == EINTR)
182 continue;
183 if (s <= 0)
184 return -1;
186 done += s;
187 buf += s;
188 }
189 return 0;
190 }
192 struct elf_core
193 {
194 int domfd;
195 Elf64_Ehdr ehdr;
197 char* shdr;
199 char* shstrtab;
200 uint64_t shstrtab_size;
202 char* note_sec;
203 uint64_t note_sec_size;
204 };
206 static int
207 elf_core_alloc_read_sec_by_index(struct elf_core* ecore, uint16_t index,
208 char** buf, uint64_t* size);
209 static int
210 elf_core_alloc_read_sec_by_name(struct elf_core* ecore, const char* name,
211 char** buf, uint64_t* size);
213 static void
214 elf_core_free(struct elf_core* ecore)
215 {
216 if (ecore->shdr != NULL) {
217 free(ecore->shdr);
218 ecore->shdr = NULL;
219 }
220 if (ecore->shstrtab != NULL) {
221 free(ecore->shstrtab);
222 ecore->shstrtab = NULL;
223 }
224 if (ecore->note_sec != NULL) {
225 free(ecore->note_sec);
226 ecore->note_sec = NULL;
227 }
228 }
230 static int
231 elf_core_init(struct elf_core* ecore, int domfd)
232 {
233 uint64_t sh_size;
234 ecore->domfd = domfd;
235 ecore->shdr = NULL;
236 ecore->shstrtab = NULL;
237 ecore->note_sec = NULL;
239 if (pread_exact(ecore->domfd, &ecore->ehdr, sizeof(ecore->ehdr), 0) < 0)
240 goto out;
242 /* check elf header */
243 if (!IS_ELF(ecore->ehdr) || ecore->ehdr.e_type != ET_CORE)
244 goto out;
245 if (ecore->ehdr.e_ident[EI_CLASS] != ELFCLASS64)
246 goto out;
247 /* check elf header more: EI_DATA, EI_VERSION, e_machine... */
249 /* read section headers */
250 sh_size = ecore->ehdr.e_shentsize * ecore->ehdr.e_shnum;
251 ecore->shdr = malloc(sh_size);
252 if (ecore->shdr == NULL)
253 goto out;
254 if (pread_exact(ecore->domfd, ecore->shdr, sh_size,
255 ecore->ehdr.e_shoff) < 0)
256 goto out;
258 /* read shstrtab */
259 if (elf_core_alloc_read_sec_by_index(ecore, ecore->ehdr.e_shstrndx,
260 &ecore->shstrtab,
261 &ecore->shstrtab_size) < 0)
262 goto out;
264 /* read .note.Xen section */
265 if (elf_core_alloc_read_sec_by_name(ecore, XEN_DUMPCORE_SEC_NOTE,
266 &ecore->note_sec,
267 &ecore->note_sec_size) < 0)
268 goto out;
270 return 0;
271 out:
272 elf_core_free(ecore);
273 return -1;
274 }
276 static int
277 elf_core_search_note(struct elf_core* ecore, const char* name, uint32_t type,
278 void** elfnotep)
279 {
280 const char* note_sec_end = ecore->note_sec + ecore->note_sec_size;
281 const char* n;
283 n = ecore->note_sec;
284 while (n < note_sec_end) {
285 const struct elfnote *elfnote = (const struct elfnote *)n;
286 if (elfnote->namesz == strlen(name) + 1 &&
287 strncmp(elfnote->name, name, elfnote->namesz) == 0 &&
288 elfnote->type == type) {
289 *elfnotep = (void*)elfnote;
290 return 0;
291 }
293 n += sizeof(*elfnote) + elfnote->descsz;
294 }
295 return -1;
296 }
298 static int
299 elf_core_alloc_read_sec(struct elf_core* ecore, const Elf64_Shdr* shdr,
300 char** buf)
301 {
302 int ret;
303 *buf = malloc(shdr->sh_size);
304 if (*buf == NULL)
305 return -1;
306 ret = pread_exact(ecore->domfd, *buf, shdr->sh_size, shdr->sh_offset);
307 if (ret < 0) {
308 free(*buf);
309 *buf = NULL;
310 }
311 return ret;
312 }
314 static Elf64_Shdr*
315 elf_core_shdr_by_index(struct elf_core* ecore, uint16_t index)
316 {
317 if (index >= ecore->ehdr.e_shnum)
318 return NULL;
319 return (Elf64_Shdr*)(ecore->shdr + ecore->ehdr.e_shentsize * index);
320 }
322 static int
323 elf_core_alloc_read_sec_by_index(struct elf_core* ecore, uint16_t index,
324 char** buf, uint64_t* size)
325 {
326 Elf64_Shdr* shdr = elf_core_shdr_by_index(ecore, index);
327 if (shdr == NULL)
328 return -1;
329 if (size != NULL)
330 *size = shdr->sh_size;
331 return elf_core_alloc_read_sec(ecore, shdr, buf);
332 }
334 static Elf64_Shdr*
335 elf_core_shdr_by_name(struct elf_core* ecore, const char* name)
336 {
337 const char* s;
338 for (s = ecore->shdr;
339 s < ecore->shdr + ecore->ehdr.e_shentsize * ecore->ehdr.e_shnum;
340 s += ecore->ehdr.e_shentsize) {
341 Elf64_Shdr* shdr = (Elf64_Shdr*)s;
343 if (strncmp(ecore->shstrtab + shdr->sh_name, name, strlen(name)) == 0)
344 return shdr;
345 }
347 return NULL;
348 }
350 static int
351 elf_core_read_sec_by_name(struct elf_core* ecore, const char* name, char* buf)
352 {
353 Elf64_Shdr* shdr = elf_core_shdr_by_name(ecore, name);
354 return pread_exact(ecore->domfd, buf, shdr->sh_size, shdr->sh_offset);
356 }
358 static int
359 elf_core_alloc_read_sec_by_name(struct elf_core* ecore, const char* name,
360 char** buf, uint64_t* size)
361 {
362 Elf64_Shdr* shdr = elf_core_shdr_by_name(ecore, name);
363 if (shdr == NULL)
364 return -1;
365 if (size != NULL)
366 *size = shdr->sh_size;
367 return elf_core_alloc_read_sec(ecore, shdr, buf);
368 }
370 /* XXX application state */
371 static int current_is_auto_translated_physmap = 0;
372 static struct xen_dumpcore_p2m* p2m_array = NULL; /* for non auto translated physmap mode */
373 static uint64_t p2m_array_size = 0;
374 static uint64_t* pfn_array = NULL; /* for auto translated physmap mode */
375 static uint64_t pfn_array_size = 0;
376 static long nr_pages = 0;
377 static uint64_t pages_offset;
378 static unsigned long cr3[MAX_VIRT_CPUS];
380 static const struct xen_dumpcore_elfnote_format_version_desc
381 known_format_version[] =
382 {
383 {XEN_DUMPCORE_FORMAT_VERSION((uint64_t)0, (uint64_t)1)},
384 };
385 #define KNOWN_FORMAT_VERSION_NR \
386 (sizeof(known_format_version)/sizeof(known_format_version[0]))
388 static unsigned long
389 map_gmfn_to_offset_elf(unsigned long gmfn)
390 {
391 /*
392 * linear search
393 */
394 unsigned long i;
395 if (current_is_auto_translated_physmap) {
396 if (pfn_array == NULL)
397 return 0;
398 for (i = 0; i < pfn_array_size; i++) {
399 if (pfn_array[i] == gmfn) {
400 return pages_offset + (i << PAGE_SHIFT);
401 }
402 }
403 } else {
404 if (p2m_array == NULL)
405 return 0;
406 for (i = 0; i < p2m_array_size; i++) {
407 if (p2m_array[i].gmfn == gmfn) {
408 return pages_offset + (i << PAGE_SHIFT);
409 }
410 }
411 }
412 return 0;
413 }
415 static void *
416 map_domain_va_core_elf(unsigned long domfd, int cpu, void *guest_va,
417 vcpu_guest_context_t *ctxt)
418 {
419 unsigned long pde, page;
420 unsigned long va = (unsigned long)guest_va;
421 unsigned long offset;
422 void *v;
424 static unsigned long cr3_phys[MAX_VIRT_CPUS];
425 static unsigned long *cr3_virt[MAX_VIRT_CPUS];
426 static unsigned long pde_phys[MAX_VIRT_CPUS];
427 static unsigned long *pde_virt[MAX_VIRT_CPUS];
428 static unsigned long page_phys[MAX_VIRT_CPUS];
429 static unsigned long *page_virt[MAX_VIRT_CPUS];
431 if (cr3[cpu] != cr3_phys[cpu])
432 {
433 if (cr3_virt[cpu])
434 {
435 munmap(cr3_virt[cpu], PAGE_SIZE);
436 cr3_virt[cpu] = NULL;
437 cr3_phys[cpu] = 0;
438 }
439 offset = map_gmfn_to_offset_elf(xen_cr3_to_pfn(cr3[cpu]));
440 if (offset == 0)
441 return NULL;
442 v = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd, offset);
443 if (v == MAP_FAILED)
444 {
445 perror("mmap failed");
446 return NULL;
447 }
448 cr3_phys[cpu] = cr3[cpu];
449 cr3_virt[cpu] = v;
450 }
451 if ((pde = cr3_virt[cpu][l2_table_offset_i386(va)]) == 0) /* logical address */
452 return NULL;
453 if (pde != pde_phys[cpu])
454 {
455 if (pde_virt[cpu])
456 {
457 munmap(pde_virt[cpu], PAGE_SIZE);
458 pde_virt[cpu] = NULL;
459 pde_phys[cpu] = 0;
460 }
461 offset = map_gmfn_to_offset_elf(pde >> PAGE_SHIFT);
462 if (offset == 0)
463 return NULL;
464 v = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd, offset);
465 if (v == MAP_FAILED)
466 return NULL;
467 pde_phys[cpu] = pde;
468 pde_virt[cpu] = v;
469 }
470 if ((page = pde_virt[cpu][l1_table_offset_i386(va)]) == 0) /* logical address */
471 return NULL;
472 if (page != page_phys[cpu])
473 {
474 if (page_virt[cpu])
475 {
476 munmap(page_virt[cpu], PAGE_SIZE);
477 page_virt[cpu] = NULL;
478 page_phys[cpu] = 0;
479 }
480 offset = map_gmfn_to_offset_elf(page >> PAGE_SHIFT);
481 if (offset == 0)
482 return NULL;
483 v = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd, offset);
484 if (v == MAP_FAILED)
485 {
486 IPRINTF("cr3 %lx pde %lx page %lx pti %lx\n",
487 cr3[cpu], pde, page, l1_table_offset_i386(va));
488 return NULL;
489 }
490 page_phys[cpu] = page;
491 page_virt[cpu] = v;
492 }
493 return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
494 }
496 static int
497 xc_waitdomain_core_elf(
498 int xc_handle,
499 int domfd,
500 int *status,
501 int options,
502 vcpu_guest_context_t *ctxt)
503 {
504 int i;
505 struct elf_core ecore;
507 struct xen_dumpcore_elfnote_none *none;
508 struct xen_dumpcore_elfnote_header *header;
509 struct xen_dumpcore_elfnote_xen_version *xen_version;
510 struct xen_dumpcore_elfnote_format_version *format_version;
512 Elf64_Shdr* table_shdr;
513 Elf64_Shdr* pages_shdr;
515 if (elf_core_init(&ecore, domfd) < 0)
516 goto out;
518 /* .note.Xen: none */
519 if (elf_core_search_note(&ecore, XEN_DUMPCORE_ELFNOTE_NAME,
520 XEN_ELFNOTE_DUMPCORE_NONE, (void**)&none) < 0)
521 goto out;
523 /* .note.Xen: header */
524 if (elf_core_search_note(&ecore, XEN_DUMPCORE_ELFNOTE_NAME,
525 XEN_ELFNOTE_DUMPCORE_HEADER, (void**)&header) < 0)
526 goto out;
527 if ((header->header.xch_magic != XC_CORE_MAGIC &&
528 header->header.xch_magic != XC_CORE_MAGIC_HVM) ||
529 header->header.xch_nr_vcpus == 0 ||
530 header->header.xch_nr_vcpus >= MAX_VIRT_CPUS ||
531 header->header.xch_nr_pages == 0 ||
532 header->header.xch_page_size != PAGE_SIZE)
533 goto out;
534 current_is_auto_translated_physmap =
535 (header->header.xch_magic == XC_CORE_MAGIC_HVM);
536 nr_pages = header->header.xch_nr_pages;
538 /* .note.Xen: xen_version */
539 if (elf_core_search_note(&ecore, XEN_DUMPCORE_ELFNOTE_NAME,
540 XEN_ELFNOTE_DUMPCORE_XEN_VERSION,
541 (void**)&xen_version) < 0)
542 goto out;
543 if (xen_version->xen_version.pagesize != PAGE_SIZE)
544 goto out;
546 /* .note.Xen: format_version */
547 if (elf_core_search_note(&ecore, XEN_DUMPCORE_ELFNOTE_NAME,
548 XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION,
549 (void**)&format_version) < 0)
550 goto out;
551 for (i = 0; i < KNOWN_FORMAT_VERSION_NR; i++) {
552 if (format_version->format_version.version ==
553 known_format_version[i].version)
554 break;
555 }
556 if (i == KNOWN_FORMAT_VERSION_NR) {
557 /* complain if unknown format */
558 IPRINTF("warning:unknown format version. %"PRIx64"\n",
559 format_version->format_version.version);
560 }
562 /* .xen_prstatus: read vcpu_guest_context_t*/
563 if (elf_core_read_sec_by_name(&ecore, XEN_DUMPCORE_SEC_PRSTATUS,
564 (char*)ctxt) < 0)
565 goto out;
566 for (i = 0; i < header->header.xch_nr_vcpus; i++)
567 cr3[i] = ctxt[i].ctrlreg[3];
569 /* read .xen_p2m or .xen_pfn */
570 if (current_is_auto_translated_physmap) {
571 table_shdr = elf_core_shdr_by_name(&ecore, XEN_DUMPCORE_SEC_PFN);
572 if (table_shdr == NULL)
573 goto out;
574 pfn_array_size = table_shdr->sh_size / table_shdr->sh_entsize;
575 if (pfn_array != NULL)
576 free(pfn_array);
577 if (elf_core_alloc_read_sec(&ecore, table_shdr,
578 (char**)&pfn_array) < 0)
579 goto out;
580 if (table_shdr->sh_entsize != sizeof(pfn_array[0]))
581 goto out;
582 } else {
583 table_shdr = elf_core_shdr_by_name(&ecore, XEN_DUMPCORE_SEC_P2M);
584 if (table_shdr == NULL)
585 goto out;
586 p2m_array_size = table_shdr->sh_size / table_shdr->sh_entsize;
587 if (p2m_array != NULL)
588 free(p2m_array);
589 if (elf_core_alloc_read_sec(&ecore, table_shdr,
590 (char**)&p2m_array) < 0)
591 goto out;
592 if (table_shdr->sh_entsize != sizeof(p2m_array[0]))
593 goto out;
594 }
595 if (table_shdr->sh_size / table_shdr->sh_entsize != nr_pages)
596 goto out;
598 /* pages_offset and check the file size */
599 pages_shdr = elf_core_shdr_by_name(&ecore, XEN_DUMPCORE_SEC_PAGES);
600 if (pages_shdr == NULL)
601 goto out;
602 pages_offset = pages_shdr->sh_offset;
603 if ((pages_shdr->sh_size / pages_shdr->sh_entsize) != nr_pages ||
604 pages_shdr->sh_entsize != PAGE_SIZE ||
605 (pages_shdr->sh_addralign % PAGE_SIZE) != 0 ||
606 (pages_offset % PAGE_SIZE) != 0)
607 goto out;
609 elf_core_free(&ecore);
610 return 0;
612 out:
613 elf_core_free(&ecore);
614 return -1;
615 }
617 /* --- interface ----------------------------------------------------------- */
619 typedef int (*xc_waitdomain_core_t)(int xc_handle,
620 int domfd,
621 int *status,
622 int options,
623 vcpu_guest_context_t *ctxt);
624 typedef void *(*map_domain_va_core_t)(unsigned long domfd,
625 int cpu,
626 void *guest_va,
627 vcpu_guest_context_t *ctxt);
628 struct xc_core_format_type {
629 xc_waitdomain_core_t waitdomain_core;
630 map_domain_va_core_t map_domain_va_core;
631 };
633 static const struct xc_core_format_type format_type[] = {
634 {xc_waitdomain_core_elf, map_domain_va_core_elf},
635 {xc_waitdomain_core_compat, map_domain_va_core_compat},
636 };
637 #define NR_FORMAT_TYPE (sizeof(format_type)/sizeof(format_type[0]))
639 /* XXX application state */
640 static const struct xc_core_format_type* current_format_type = NULL;
642 void *
643 map_domain_va_core(unsigned long domfd, int cpu, void *guest_va,
644 vcpu_guest_context_t *ctxt)
645 {
646 if (current_format_type == NULL)
647 return NULL;
648 return (current_format_type->map_domain_va_core)(domfd, cpu, guest_va,
649 ctxt);
650 }
652 int
653 xc_waitdomain_core(int xc_handle, int domfd, int *status, int options,
654 vcpu_guest_context_t *ctxt)
655 {
656 int ret;
657 int i;
659 for (i = 0; i < NR_FORMAT_TYPE; i++) {
660 ret = (format_type[i].waitdomain_core)(xc_handle, domfd, status,
661 options, ctxt);
662 if (ret == 0) {
663 current_format_type = &format_type[i];
664 break;
665 }
666 }
667 return ret;
668 }
670 /*
671 * Local variables:
672 * mode: C
673 * c-set-style: "BSD"
674 * c-basic-offset: 4
675 * tab-width: 4
676 * indent-tabs-mode: nil
677 * End:
678 */