debuggers.hg

view tools/xentrace/xentrace.c @ 21067:b4a1832a916f

Update Xen version to 4.0.0-rc6
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 09 18:18:05 2010 +0000 (2010-03-09)
parents 0e453e4d932d
children e2ec5cd8b396
line source
1 /******************************************************************************
2 * tools/xentrace/xentrace.c
3 *
4 * Tool for collecting trace buffer data from Xen.
5 *
6 * Copyright (C) 2004 by Intel Research Cambridge
7 *
8 * Author: Mark Williamson, mark.a.williamson@intel.com
9 * Date: February 2004
10 */
12 #include <time.h>
13 #include <stdlib.h>
14 #include <stdio.h>
15 #include <sys/mman.h>
16 #include <sys/stat.h>
17 #include <sys/types.h>
18 #include <fcntl.h>
19 #include <unistd.h>
20 #include <errno.h>
21 #include <signal.h>
22 #include <inttypes.h>
23 #include <string.h>
24 #include <getopt.h>
25 #include <assert.h>
26 #include <sys/poll.h>
27 #include <sys/statvfs.h>
29 #include <xen/xen.h>
30 #include <xen/trace.h>
32 #include <xenctrl.h>
34 #define PERROR(_m, _a...) \
35 do { \
36 int __saved_errno = errno; \
37 fprintf(stderr, "ERROR: " _m " (%d = %s)\n" , ## _a , \
38 __saved_errno, strerror(__saved_errno)); \
39 errno = __saved_errno; \
40 } while (0)
43 /***** Compile time configuration of defaults ********************************/
45 /* sleep for this long (milliseconds) between checking the trace buffers */
46 #define POLL_SLEEP_MILLIS 100
48 #define DEFAULT_TBUF_SIZE 20
49 /***** The code **************************************************************/
51 typedef struct settings_st {
52 char *outfile;
53 unsigned long poll_sleep; /* milliseconds to sleep between polls */
54 uint32_t evt_mask;
55 uint32_t cpu_mask;
56 unsigned long tbuf_size;
57 unsigned long disk_rsvd;
58 unsigned long timeout;
59 unsigned long memory_buffer;
60 uint8_t discard:1,
61 disable_tracing:1;
62 } settings_t;
64 struct t_struct {
65 struct t_info *t_info; /* Structure with information about individual buffers */
66 struct t_buf **meta; /* Pointers to trace buffer metadata */
67 unsigned char **data; /* Pointers to trace buffer data areas */
68 };
70 settings_t opts;
72 int interrupted = 0; /* gets set if we get a SIGHUP */
74 static int xc_handle = -1;
75 static int event_fd = -1;
76 static int virq_port = -1;
77 static int outfd = 1;
79 static void close_handler(int signal)
80 {
81 interrupted = 1;
82 }
84 static struct {
85 char * buf;
86 unsigned long prod, cons, size;
87 unsigned long pending_size, pending_prod;
88 } membuf = { 0 };
90 #define MEMBUF_INDEX_RESET_THRESHOLD (1<<29)
92 /* FIXME -- make a power of 2 so we can mask instead. */
93 #define MEMBUF_POINTER(_i) (membuf.buf + ((_i) % membuf.size))
94 #define MEMBUF_CONS_INCREMENT(_n) \
95 do { \
96 membuf.cons += (_n); \
97 } while(0)
98 #define MEMBUF_PROD_SET(_x) \
99 do { \
100 if ( (_x) < membuf.prod ) { \
101 fprintf(stderr, "%s: INTERNAL_ERROR: prod %lu, trying to set to %lu!\n", \
102 __func__, membuf.prod, (unsigned long)(_x)); \
103 exit(1); \
104 } \
105 membuf.prod = (_x); \
106 if ( (_x) > MEMBUF_INDEX_RESET_THRESHOLD ) \
107 { \
108 membuf.prod %= membuf.size; \
109 membuf.cons %= membuf.size; \
110 if( membuf.prod < membuf.cons ) \
111 membuf.prod += membuf.size; \
112 } \
113 } while(0)
115 struct cpu_change_record {
116 uint32_t header;
117 struct {
118 int cpu;
119 unsigned window_size;
120 } data;
121 };
123 #define CPU_CHANGE_HEADER \
124 (TRC_TRACE_CPU_CHANGE \
125 | (((sizeof(struct cpu_change_record)/sizeof(uint32_t)) - 1) \
126 << TRACE_EXTRA_SHIFT) )
128 void membuf_alloc(unsigned long size)
129 {
130 membuf.buf = malloc(size);
132 if(!membuf.buf)
133 {
134 fprintf(stderr, "%s: Couldn't malloc %lu bytes!\n",
135 __func__, size);
136 exit(1);
137 }
139 membuf.prod = membuf.cons = 0;
140 membuf.size = size;
141 }
143 /*
144 * Reserve a new window in the buffer. Move the 'consumer' forward size
145 * bytes, re-adjusting the cpu window sizes as necessary, and insert a
146 * cpu_change record.
147 */
148 void membuf_reserve_window(unsigned cpu, unsigned long window_size)
149 {
150 struct cpu_change_record *rec;
151 long need_to_consume, free, freed;
153 if ( membuf.pending_size > 0 )
154 {
155 fprintf(stderr, "%s: INTERNAL_ERROR: pending_size %lu\n",
156 __func__, membuf.pending_size);
157 exit(1);
158 }
160 need_to_consume = window_size + sizeof(*rec);
162 if ( window_size > membuf.size )
163 {
164 fprintf(stderr, "%s: reserve size %lu larger than buffer size %lu!\n",
165 __func__, window_size, membuf.size);
166 exit(1);
167 }
169 /* Subtract free space already in buffer. */
170 free = membuf.size - (membuf.prod - membuf.cons);
171 if( need_to_consume < free)
172 goto start_window;
174 need_to_consume -= free;
176 /*
177 * "Free" up full windows until we have enough for this window.
178 * It's a bit wasteful to throw away partial buffers, but the only
179 * other option is to scan throught he buffer headers. Since the
180 * common case is that it's going to be thrown away next anyway, I
181 * think minimizing the overall impact is more important.
182 */
183 do {
184 rec = (struct cpu_change_record *)MEMBUF_POINTER(membuf.cons);
185 if( rec->header != CPU_CHANGE_HEADER )
186 {
187 fprintf(stderr, "%s: INTERNAL ERROR: no cpu_change record at consumer!\n",
188 __func__);
189 exit(EXIT_FAILURE);
190 }
192 freed = sizeof(*rec) + rec->data.window_size;
194 if ( need_to_consume > 0 )
195 {
196 MEMBUF_CONS_INCREMENT(freed);
197 need_to_consume -= freed;
198 }
199 } while( need_to_consume > 0 );
201 start_window:
202 /*
203 * Start writing "pending" data. Update prod once all this data is
204 * written.
205 */
206 membuf.pending_prod = membuf.prod;
207 membuf.pending_size = window_size;
209 rec = (struct cpu_change_record *)MEMBUF_POINTER(membuf.pending_prod);
211 rec->header = CPU_CHANGE_HEADER;
212 rec->data.cpu = cpu;
213 rec->data.window_size = window_size;
215 membuf.pending_prod += sizeof(*rec);
216 }
218 void membuf_write(void *start, unsigned long size) {
219 char * p;
220 unsigned long wsize;
222 if( (membuf.size - (membuf.prod - membuf.cons)) < size )
223 {
224 fprintf(stderr, "%s: INTERNAL ERROR: need %lu bytes, only have %lu!\n",
225 __func__, size, membuf.prod - membuf.cons);
226 exit(1);
227 }
229 if( size > membuf.pending_size )
230 {
231 fprintf(stderr, "%s: INTERNAL ERROR: size %lu, pending %lu!\n",
232 __func__, size, membuf.pending_size);
233 exit(1);
234 }
236 wsize = size;
237 p = MEMBUF_POINTER(membuf.pending_prod);
239 /* If the buffer overlaps the "wrap", do an extra write */
240 if ( p + size > membuf.buf + membuf.size )
241 {
242 int usize = ( membuf.buf + membuf.size ) - p;
244 memcpy(p, start, usize);
246 start += usize;
247 wsize -= usize;
248 p = membuf.buf;
249 }
251 memcpy(p, start, wsize);
253 membuf.pending_prod += size;
254 membuf.pending_size -= size;
256 if ( membuf.pending_size == 0 )
257 {
258 MEMBUF_PROD_SET(membuf.pending_prod);
259 }
260 }
262 void membuf_dump(void) {
263 /* Dump circular memory buffer */
264 int cons, prod, wsize, written;
265 char * wstart;
267 fprintf(stderr, "Dumping memory buffer.\n");
269 cons = membuf.cons % membuf.size;
270 prod = membuf.prod % membuf.size;
272 if(prod > cons)
273 {
274 /* Write in one go */
275 wstart = membuf.buf + cons;
276 wsize = prod - cons;
278 written = write(outfd, wstart, wsize);
279 if ( written != wsize )
280 goto fail;
281 }
282 else
283 {
284 /* Write in two pieces: cons->end, beginning->prod. */
285 wstart = membuf.buf + cons;
286 wsize = membuf.size - cons;
288 written = write(outfd, wstart, wsize);
289 if ( written != wsize )
290 {
291 fprintf(stderr, "Write failed! (size %d, returned %d)\n",
292 wsize, written);
293 goto fail;
294 }
296 wstart = membuf.buf;
297 wsize = prod;
299 written = write(outfd, wstart, wsize);
300 if ( written != wsize )
301 {
302 fprintf(stderr, "Write failed! (size %d, returned %d)\n",
303 wsize, written);
304 goto fail;
305 }
306 }
308 membuf.cons = membuf.prod = 0;
310 return;
311 fail:
312 exit(1);
313 return;
314 }
316 /**
317 * write_buffer - write a section of the trace buffer
318 * @cpu - source buffer CPU ID
319 * @start
320 * @size - size of write (may be less than total window size)
321 * @total_size - total size of the window (0 on 2nd write of wrapped windows)
322 * @out - output stream
323 *
324 * Outputs the trace buffer to a filestream, prepending the CPU and size
325 * of the buffer write.
326 */
327 static void write_buffer(unsigned int cpu, unsigned char *start, int size,
328 int total_size)
329 {
330 struct statvfs stat;
331 size_t written = 0;
333 if ( opts.memory_buffer == 0 && opts.disk_rsvd != 0 )
334 {
335 unsigned long long freespace;
337 /* Check that filesystem has enough space. */
338 if ( fstatvfs (outfd, &stat) )
339 {
340 fprintf(stderr, "Statfs failed!\n");
341 goto fail;
342 }
344 freespace = stat.f_frsize * (unsigned long long)stat.f_bfree;
346 if ( total_size )
347 freespace -= total_size;
348 else
349 freespace -= size;
351 freespace >>= 20; /* Convert to MB */
353 if ( freespace <= opts.disk_rsvd )
354 {
355 fprintf(stderr, "Disk space limit reached (free space: %lluMB, limit: %luMB).\n", freespace, opts.disk_rsvd);
356 exit (EXIT_FAILURE);
357 }
358 }
360 /* Write a CPU_BUF record on each buffer "window" written. Wrapped
361 * windows may involve two writes, so only write the record on the
362 * first write. */
363 if ( total_size != 0 )
364 {
365 if ( opts.memory_buffer )
366 {
367 membuf_reserve_window(cpu, total_size);
368 }
369 else
370 {
371 struct cpu_change_record rec;
373 rec.header = CPU_CHANGE_HEADER;
374 rec.data.cpu = cpu;
375 rec.data.window_size = total_size;
377 written = write(outfd, &rec, sizeof(rec));
378 if ( written != sizeof(rec) )
379 {
380 fprintf(stderr, "Cannot write cpu change (write returned %zd)\n",
381 written);
382 goto fail;
383 }
384 }
385 }
387 if ( opts.memory_buffer )
388 {
389 membuf_write(start, size);
390 }
391 else
392 {
393 written = write(outfd, start, size);
394 if ( written != size )
395 {
396 fprintf(stderr, "Write failed! (size %d, returned %zd)\n",
397 size, written);
398 goto fail;
399 }
400 }
402 return;
404 fail:
405 PERROR("Failed to write trace data");
406 exit(EXIT_FAILURE);
407 }
409 static void disable_tbufs(void)
410 {
411 int xc_handle = xc_interface_open();
412 int ret;
414 if ( xc_handle < 0 )
415 {
416 perror("Couldn't open xc handle to disable tbufs.");
417 goto out;
418 }
420 ret = xc_tbuf_disable(xc_handle);
422 if ( ret != 0 )
423 {
424 perror("Couldn't disable trace buffers");
425 }
427 out:
428 xc_interface_close(xc_handle);
429 }
431 static void get_tbufs(unsigned long *mfn, unsigned long *size)
432 {
433 int ret;
435 if(!opts.tbuf_size)
436 opts.tbuf_size = DEFAULT_TBUF_SIZE;
438 ret = xc_tbuf_enable(xc_handle, opts.tbuf_size, mfn, size);
440 if ( ret != 0 )
441 {
442 perror("Couldn't enable trace buffers");
443 exit(1);
444 }
445 }
447 /**
448 * map_tbufs - memory map Xen trace buffers into user space
449 * @tbufs_mfn: mfn of the trace buffers
450 * @num: number of trace buffers to map
451 * @size: size of each trace buffer
452 *
453 * Maps the Xen trace buffers them into process address space.
454 */
455 static struct t_struct *map_tbufs(unsigned long tbufs_mfn, unsigned int num,
456 unsigned long tinfo_size)
457 {
458 static struct t_struct tbufs = { 0 };
459 int i;
461 /* Map t_info metadata structure */
462 tbufs.t_info = xc_map_foreign_range(xc_handle, DOMID_XEN,
463 tinfo_size, PROT_READ | PROT_WRITE,
464 tbufs_mfn);
466 if ( tbufs.t_info == 0 )
467 {
468 PERROR("Failed to mmap trace buffers");
469 exit(EXIT_FAILURE);
470 }
472 if ( tbufs.t_info->tbuf_size == 0 )
473 {
474 fprintf(stderr, "%s: tbuf_size 0!\n", __func__);
475 exit(EXIT_FAILURE);
476 }
478 /* Map per-cpu buffers */
479 tbufs.meta = (struct t_buf **)calloc(num, sizeof(struct t_buf *));
480 tbufs.data = (unsigned char **)calloc(num, sizeof(unsigned char *));
481 if ( tbufs.meta == NULL || tbufs.data == NULL )
482 {
483 PERROR( "Failed to allocate memory for buffer pointers\n");
484 exit(EXIT_FAILURE);
485 }
487 for(i=0; i<num; i++)
488 {
490 uint32_t *mfn_list = ((uint32_t *)tbufs.t_info) + tbufs.t_info->mfn_offset[i];
491 int j;
492 xen_pfn_t pfn_list[tbufs.t_info->tbuf_size];
494 for ( j=0; j<tbufs.t_info->tbuf_size; j++)
495 pfn_list[j] = (xen_pfn_t)mfn_list[j];
497 tbufs.meta[i] = xc_map_foreign_batch(xc_handle, DOMID_XEN,
498 PROT_READ | PROT_WRITE,
499 pfn_list,
500 tbufs.t_info->tbuf_size);
501 if ( tbufs.meta[i] == NULL )
502 {
503 PERROR("Failed to map cpu buffer!");
504 exit(EXIT_FAILURE);
505 }
506 tbufs.data[i] = (unsigned char *)(tbufs.meta[i]+1);
507 }
509 return &tbufs;
510 }
512 /**
513 * set_mask - set the cpu/event mask in HV
514 * @mask: the new mask
515 * @type: the new mask type,0-event mask, 1-cpu mask
516 *
517 */
518 static void set_mask(uint32_t mask, int type)
519 {
520 int ret = 0;
522 if (type == 1) {
523 ret = xc_tbuf_set_cpu_mask(xc_handle, mask);
524 fprintf(stderr, "change cpumask to 0x%x\n", mask);
525 } else if (type == 0) {
526 ret = xc_tbuf_set_evt_mask(xc_handle, mask);
527 fprintf(stderr, "change evtmask to 0x%x\n", mask);
528 }
530 if ( ret != 0 )
531 {
532 PERROR("Failure to get trace buffer pointer from Xen and set the new mask");
533 exit(EXIT_FAILURE);
534 }
535 }
537 /**
538 * get_num_cpus - get the number of logical CPUs
539 */
540 static unsigned int get_num_cpus(void)
541 {
542 xc_physinfo_t physinfo = { 0 };
543 int ret;
545 ret = xc_physinfo(xc_handle, &physinfo);
547 if ( ret != 0 )
548 {
549 PERROR("Failure to get logical CPU count from Xen");
550 exit(EXIT_FAILURE);
551 }
553 return physinfo.nr_cpus;
554 }
556 /**
557 * event_init - setup to receive the VIRQ_TBUF event
558 */
559 static void event_init(void)
560 {
561 int rc;
563 rc = xc_evtchn_open();
564 if (rc < 0) {
565 perror(xc_get_last_error()->message);
566 exit(EXIT_FAILURE);
567 }
568 event_fd = rc;
570 rc = xc_evtchn_bind_virq(event_fd, VIRQ_TBUF);
571 if (rc == -1) {
572 PERROR("failed to bind to VIRQ port");
573 exit(EXIT_FAILURE);
574 }
575 virq_port = rc;
576 }
578 /**
579 * wait_for_event_or_timeout - sleep for the specified number of milliseconds,
580 * or until an VIRQ_TBUF event occurs
581 */
582 static void wait_for_event_or_timeout(unsigned long milliseconds)
583 {
584 int rc;
585 struct pollfd fd = { .fd = event_fd,
586 .events = POLLIN | POLLERR };
587 int port;
589 rc = poll(&fd, 1, milliseconds);
590 if (rc == -1) {
591 if (errno == EINTR)
592 return;
593 PERROR("poll exitted with an error");
594 exit(EXIT_FAILURE);
595 }
597 if (rc == 1) {
598 port = xc_evtchn_pending(event_fd);
599 if (port == -1) {
600 PERROR("failed to read port from evtchn");
601 exit(EXIT_FAILURE);
602 }
603 if (port != virq_port) {
604 fprintf(stderr,
605 "unexpected port returned from evtchn (got %d vs expected %d)\n",
606 port, virq_port);
607 exit(EXIT_FAILURE);
608 }
609 rc = xc_evtchn_unmask(event_fd, port);
610 if (rc == -1) {
611 PERROR("failed to write port to evtchn");
612 exit(EXIT_FAILURE);
613 }
614 }
615 }
618 /**
619 * monitor_tbufs - monitor the contents of tbufs and output to a file
620 * @logfile: the FILE * representing the file to log to
621 */
622 static int monitor_tbufs(void)
623 {
624 int i;
626 struct t_struct *tbufs; /* Pointer to hypervisor maps */
627 struct t_buf **meta; /* pointers to the trace buffer metadata */
628 unsigned char **data; /* pointers to the trace buffer data areas
629 * where they are mapped into user space. */
630 unsigned long tbufs_mfn; /* mfn of the tbufs */
631 unsigned int num; /* number of trace buffers / logical CPUS */
632 unsigned long tinfo_size; /* size of t_info metadata map */
633 unsigned long size; /* size of a single trace buffer */
635 unsigned long data_size;
637 int last_read = 1;
639 /* prepare to listen for VIRQ_TBUF */
640 event_init();
642 /* get number of logical CPUs (and therefore number of trace buffers) */
643 num = get_num_cpus();
645 /* setup access to trace buffers */
646 get_tbufs(&tbufs_mfn, &tinfo_size);
647 tbufs = map_tbufs(tbufs_mfn, num, tinfo_size);
649 size = tbufs->t_info->tbuf_size * XC_PAGE_SIZE;
651 data_size = size - sizeof(struct t_buf);
653 meta = tbufs->meta;
654 data = tbufs->data;
656 if ( opts.discard )
657 for ( i = 0; i < num; i++ )
658 meta[i]->cons = meta[i]->prod;
660 /* now, scan buffers for events */
661 while ( 1 )
662 {
663 for ( i = 0; i < num; i++ )
664 {
665 unsigned long start_offset, end_offset, window_size, cons, prod;
667 /* Read window information only once. */
668 cons = meta[i]->cons;
669 prod = meta[i]->prod;
670 xen_rmb(); /* read prod, then read item. */
672 if ( cons == prod )
673 continue;
675 assert(cons < 2*data_size);
676 assert(prod < 2*data_size);
678 // NB: if (prod<cons), then (prod-cons)%data_size will not yield
679 // the correct answer because data_size is not a power of 2.
680 if ( prod < cons )
681 window_size = (prod + 2*data_size) - cons;
682 else
683 window_size = prod - cons;
684 assert(window_size > 0);
685 assert(window_size <= data_size);
687 start_offset = cons % data_size;
688 end_offset = prod % data_size;
690 if ( end_offset > start_offset )
691 {
692 /* If window does not wrap, write in one big chunk */
693 write_buffer(i, data[i]+start_offset,
694 window_size,
695 window_size);
696 }
697 else
698 {
699 /* If wrapped, write in two chunks:
700 * - first, start to the end of the buffer
701 * - second, start of buffer to end of window
702 */
703 write_buffer(i, data[i] + start_offset,
704 data_size - start_offset,
705 window_size);
706 write_buffer(i, data[i],
707 end_offset,
708 0);
709 }
711 xen_mb(); /* read buffer, then update cons. */
712 meta[i]->cons = prod;
714 }
716 if ( interrupted )
717 {
718 if ( last_read )
719 {
720 /* Disable tracing, then read through all the buffers one last time */
721 if ( opts.disable_tracing )
722 disable_tbufs();
723 last_read = 0;
724 continue;
725 }
726 else
727 break;
728 }
730 wait_for_event_or_timeout(opts.poll_sleep);
731 }
733 if ( opts.memory_buffer )
734 membuf_dump();
736 /* cleanup */
737 free(meta);
738 free(data);
739 /* don't need to munmap - cleanup is automatic */
740 close(outfd);
742 return 0;
743 }
746 /******************************************************************************
747 * Command line handling
748 *****************************************************************************/
750 #define xstr(x) str(x)
751 #define str(x) #x
753 const char *program_version = "xentrace v1.2";
754 const char *program_bug_address = "<mark.a.williamson@intel.com>";
756 static void usage(void)
757 {
758 #define USAGE_STR \
759 "Usage: xentrace [OPTION...] [output file]\n" \
760 "Tool to capture Xen trace buffer data\n" \
761 "\n" \
762 " -c, --cpu-mask=c Set cpu-mask\n" \
763 " -e, --evt-mask=e Set evt-mask\n" \
764 " -s, --poll-sleep=p Set sleep time, p, in milliseconds between\n" \
765 " polling the trace buffer for new data\n" \
766 " (default " xstr(POLL_SLEEP_MILLIS) ").\n" \
767 " -S, --trace-buf-size=N Set trace buffer size in pages (default " \
768 xstr(DEFAULT_TBUF_SIZE) ").\n" \
769 " N.B. that the trace buffer cannot be resized.\n" \
770 " if it has already been set this boot cycle,\n" \
771 " this argument will be ignored.\n" \
772 " -D --discard-buffers Discard all records currently in the trace\n" \
773 " buffers before beginning.\n" \
774 " -x --dont-disable-tracing\n" \
775 " By default, xentrace will disable tracing when\n" \
776 " it exits. Selecting this option will tell it to\n" \
777 " keep tracing on. Traces will be collected in\n" \
778 " Xen's trace buffers until they become full.\n" \
779 " -T --time-interval=s Run xentrace for s seconds and quit.\n" \
780 " -?, --help Show this message\n" \
781 " -V, --version Print program version\n" \
782 " -M, --memory-buffer=b Copy trace records to a circular memory buffer.\n" \
783 " Dump to file on exit.\n" \
784 "\n" \
785 "This tool is used to capture trace buffer data from Xen. The\n" \
786 "data is output in a binary format, in the following order:\n" \
787 "\n" \
788 " CPU(uint) TSC(uint64_t) EVENT(uint32_t) D1 D2 D3 D4 D5 (all uint32_t)\n" \
789 "\n" \
790 "The output should be parsed using the tool xentrace_format,\n" \
791 "which can produce human-readable output in ASCII format.\n"
793 printf(USAGE_STR);
794 printf("\nReport bugs to %s\n", program_bug_address);
796 exit(EXIT_FAILURE);
797 }
799 /* convert the argument string pointed to by arg to a long int representation,
800 * including suffixes such as 'M' and 'k'. */
801 #define MB (1024*1024)
802 #define KB (1024)
803 long sargtol(const char *restrict arg, int base)
804 {
805 char *endp;
806 long val;
808 errno = 0;
809 val = strtol(arg, &endp, base);
811 if ( errno != 0 )
812 {
813 fprintf(stderr, "Invalid option argument: %s\n", arg);
814 fprintf(stderr, "Error: %s\n\n", strerror(errno));
815 usage();
816 }
817 else if (endp == arg)
818 {
819 goto invalid;
820 }
822 switch(*endp)
823 {
824 case '\0':
825 break;
826 case 'M':
827 val *= MB;
828 break;
829 case 'K':
830 case 'k':
831 val *= KB;
832 break;
833 default:
834 fprintf(stderr, "Unknown suffix %c\n", *endp);
835 exit(1);
836 }
839 return val;
840 invalid:
841 return 0;
842 fprintf(stderr, "Invalid option argument: %s\n\n", arg);
843 usage();
844 }
846 /* convert the argument string pointed to by arg to a long int representation */
847 static long argtol(const char *restrict arg, int base)
848 {
849 char *endp;
850 long val;
852 errno = 0;
853 val = strtol(arg, &endp, base);
855 if (errno != 0) {
856 fprintf(stderr, "Invalid option argument: %s\n", arg);
857 fprintf(stderr, "Error: %s\n\n", strerror(errno));
858 usage();
859 } else if (endp == arg || *endp != '\0') {
860 fprintf(stderr, "Invalid option argument: %s\n\n", arg);
861 usage();
862 }
864 return val;
865 }
867 static int parse_evtmask(char *arg)
868 {
869 /* search filtering class */
870 if (strcmp(arg, "gen") == 0){
871 opts.evt_mask |= TRC_GEN;
872 } else if(strcmp(arg, "sched") == 0){
873 opts.evt_mask |= TRC_SCHED;
874 } else if(strcmp(arg, "dom0op") == 0){
875 opts.evt_mask |= TRC_DOM0OP;
876 } else if(strcmp(arg, "hvm") == 0){
877 opts.evt_mask |= TRC_HVM;
878 } else if(strcmp(arg, "all") == 0){
879 opts.evt_mask |= TRC_ALL;
880 } else {
881 opts.evt_mask = argtol(arg, 0);
882 }
884 return 0;
885 }
887 /* parse command line arguments */
888 static void parse_args(int argc, char **argv)
889 {
890 int option;
891 static struct option long_options[] = {
892 { "log-thresh", required_argument, 0, 't' },
893 { "poll-sleep", required_argument, 0, 's' },
894 { "cpu-mask", required_argument, 0, 'c' },
895 { "evt-mask", required_argument, 0, 'e' },
896 { "trace-buf-size", required_argument, 0, 'S' },
897 { "reserve-disk-space", required_argument, 0, 'r' },
898 { "time-interval", required_argument, 0, 'T' },
899 { "memory-buffer", required_argument, 0, 'M' },
900 { "discard-buffers", no_argument, 0, 'D' },
901 { "dont-disable-tracing", no_argument, 0, 'x' },
902 { "help", no_argument, 0, '?' },
903 { "version", no_argument, 0, 'V' },
904 { 0, 0, 0, 0 }
905 };
907 while ( (option = getopt_long(argc, argv, "t:s:c:e:S:r:T:M:Dx?V",
908 long_options, NULL)) != -1)
909 {
910 switch ( option )
911 {
912 case 's': /* set sleep time (given in milliseconds) */
913 opts.poll_sleep = argtol(optarg, 0);
914 break;
916 case 'c': /* set new cpu mask for filtering*/
917 opts.cpu_mask = argtol(optarg, 0);
918 break;
920 case 'e': /* set new event mask for filtering*/
921 parse_evtmask(optarg);
922 break;
924 case 'S': /* set tbuf size (given in pages) */
925 opts.tbuf_size = argtol(optarg, 0);
926 break;
928 case 'V': /* print program version */
929 printf("%s\n", program_version);
930 exit(EXIT_SUCCESS);
931 break;
933 case 'D': /* Discard traces currently in buffer */
934 opts.discard = 1;
935 break;
937 case 'r': /* Disk-space reservation */
938 opts.disk_rsvd = argtol(optarg, 0);
939 break;
941 case 'x': /* Don't disable tracing */
942 opts.disable_tracing = 0;
943 break;
945 case 'T':
946 opts.timeout = argtol(optarg, 0);
947 break;
949 case 'M':
950 opts.memory_buffer = sargtol(optarg, 0);
951 break;
953 default:
954 usage();
955 }
956 }
958 /* get outfile (required last argument) */
959 if (optind != (argc-1))
960 usage();
962 opts.outfile = argv[optind];
963 }
965 /* *BSD has no O_LARGEFILE */
966 #ifndef O_LARGEFILE
967 #define O_LARGEFILE 0
968 #endif
970 int main(int argc, char **argv)
971 {
972 int ret;
973 struct sigaction act;
975 opts.outfile = 0;
976 opts.poll_sleep = POLL_SLEEP_MILLIS;
977 opts.evt_mask = 0;
978 opts.cpu_mask = 0;
979 opts.disk_rsvd = 0;
980 opts.disable_tracing = 1;
981 opts.timeout = 0;
983 parse_args(argc, argv);
985 xc_handle = xc_interface_open();
986 if ( xc_handle < 0 )
987 {
988 perror(xc_get_last_error()->message);
989 exit(EXIT_FAILURE);
990 }
992 if ( opts.evt_mask != 0 )
993 set_mask(opts.evt_mask, 0);
995 if ( opts.cpu_mask != 0 )
996 set_mask(opts.cpu_mask, 1);
998 if ( opts.timeout != 0 )
999 alarm(opts.timeout);
1001 if ( opts.outfile )
1002 outfd = open(opts.outfile,
1003 O_WRONLY | O_CREAT | O_TRUNC | O_LARGEFILE,
1004 0644);
1006 if ( outfd < 0 )
1008 perror("Could not open output file");
1009 exit(EXIT_FAILURE);
1012 if ( isatty(outfd) )
1014 fprintf(stderr, "Cannot output to a TTY, specify a log file.\n");
1015 exit(EXIT_FAILURE);
1018 if ( opts.memory_buffer > 0 )
1019 membuf_alloc(opts.memory_buffer);
1021 /* ensure that if we get a signal, we'll do cleanup, then exit */
1022 act.sa_handler = close_handler;
1023 act.sa_flags = 0;
1024 sigemptyset(&act.sa_mask);
1025 sigaction(SIGHUP, &act, NULL);
1026 sigaction(SIGTERM, &act, NULL);
1027 sigaction(SIGINT, &act, NULL);
1028 sigaction(SIGALRM, &act, NULL);
1030 ret = monitor_tbufs();
1032 return ret;
1034 /*
1035 * Local variables:
1036 * mode: C
1037 * c-set-style: "BSD"
1038 * c-basic-offset: 4
1039 * tab-width: 4
1040 * indent-tabs-mode: nil
1041 * End:
1042 */