debuggers.hg

view xen/common/xenoprof.c @ 22906:700ac6445812

Now add KDB to the non-kdb tree
author Mukesh Rathor
date Thu Feb 03 15:42:41 2011 -0800 (2011-02-03)
parents a904221a56c2
children
line source
1 /*
2 * Copyright (C) 2005 Hewlett-Packard Co.
3 * written by Aravind Menon & Jose Renato Santos
4 * (email: xenoprof@groups.hp.com)
5 *
6 * arch generic xenoprof and IA64 support.
7 * dynamic map/unmap xenoprof buffer support.
8 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
9 * VA Linux Systems Japan K.K.
10 */
12 #ifndef COMPAT
13 #include <xen/guest_access.h>
14 #include <xen/sched.h>
15 #include <xen/event.h>
16 #include <public/xenoprof.h>
17 #include <xen/paging.h>
18 #include <xsm/xsm.h>
19 #include <xen/hypercall.h>
21 /* Limit amount of pages used for shared buffer (per domain) */
22 #define MAX_OPROF_SHARED_PAGES 32
24 /* Lock protecting the following global state */
25 static DEFINE_SPINLOCK(xenoprof_lock);
27 static DEFINE_SPINLOCK(pmu_owner_lock);
28 int pmu_owner = 0;
29 int pmu_hvm_refcount = 0;
31 static struct domain *active_domains[MAX_OPROF_DOMAINS];
32 static int active_ready[MAX_OPROF_DOMAINS];
33 static unsigned int adomains;
35 static struct domain *passive_domains[MAX_OPROF_DOMAINS];
36 static unsigned int pdomains;
38 static unsigned int activated;
39 static struct domain *xenoprof_primary_profiler;
40 static int xenoprof_state = XENOPROF_IDLE;
41 static unsigned long backtrace_depth;
43 static u64 total_samples;
44 static u64 invalid_buffer_samples;
45 static u64 corrupted_buffer_samples;
46 static u64 lost_samples;
47 static u64 active_samples;
48 static u64 passive_samples;
49 static u64 idle_samples;
50 static u64 others_samples;
52 int acquire_pmu_ownership(int pmu_ownship)
53 {
54 spin_lock(&pmu_owner_lock);
55 if ( pmu_owner == PMU_OWNER_NONE )
56 {
57 pmu_owner = pmu_ownship;
58 goto out;
59 }
61 if ( pmu_owner == pmu_ownship )
62 goto out;
64 spin_unlock(&pmu_owner_lock);
65 return 0;
66 out:
67 if ( pmu_owner == PMU_OWNER_HVM )
68 pmu_hvm_refcount++;
69 spin_unlock(&pmu_owner_lock);
70 return 1;
71 }
73 void release_pmu_ownship(int pmu_ownship)
74 {
75 spin_lock(&pmu_owner_lock);
76 if ( pmu_ownship == PMU_OWNER_HVM )
77 pmu_hvm_refcount--;
78 if ( !pmu_hvm_refcount )
79 pmu_owner = PMU_OWNER_NONE;
80 spin_unlock(&pmu_owner_lock);
81 }
83 int is_active(struct domain *d)
84 {
85 struct xenoprof *x = d->xenoprof;
86 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE));
87 }
89 int is_passive(struct domain *d)
90 {
91 struct xenoprof *x = d->xenoprof;
92 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE));
93 }
95 static int is_profiled(struct domain *d)
96 {
97 return (is_active(d) || is_passive(d));
98 }
100 static void xenoprof_reset_stat(void)
101 {
102 total_samples = 0;
103 invalid_buffer_samples = 0;
104 corrupted_buffer_samples = 0;
105 lost_samples = 0;
106 active_samples = 0;
107 passive_samples = 0;
108 idle_samples = 0;
109 others_samples = 0;
110 }
112 static void xenoprof_reset_buf(struct domain *d)
113 {
114 int j;
115 xenoprof_buf_t *buf;
117 if ( d->xenoprof == NULL )
118 {
119 printk("xenoprof_reset_buf: ERROR - Unexpected "
120 "Xenoprof NULL pointer \n");
121 return;
122 }
124 for ( j = 0; j < d->max_vcpus; j++ )
125 {
126 buf = d->xenoprof->vcpu[j].buffer;
127 if ( buf != NULL )
128 {
129 xenoprof_buf(d, buf, event_head) = 0;
130 xenoprof_buf(d, buf, event_tail) = 0;
131 }
132 }
133 }
135 static int
136 share_xenoprof_page_with_guest(struct domain *d, unsigned long mfn, int npages)
137 {
138 int i;
140 /* Check if previous page owner has released the page. */
141 for ( i = 0; i < npages; i++ )
142 {
143 struct page_info *page = mfn_to_page(mfn + i);
144 if ( (page->count_info & (PGC_allocated|PGC_count_mask)) != 0 )
145 {
146 gdprintk(XENLOG_INFO, "mfn 0x%lx page->count_info 0x%lx\n",
147 mfn + i, (unsigned long)page->count_info);
148 return -EBUSY;
149 }
150 page_set_owner(page, NULL);
151 }
153 for ( i = 0; i < npages; i++ )
154 share_xen_page_with_guest(mfn_to_page(mfn + i), d, XENSHARE_writable);
156 return 0;
157 }
159 static void
160 unshare_xenoprof_page_with_guest(struct xenoprof *x)
161 {
162 int i, npages = x->npages;
163 unsigned long mfn = virt_to_mfn(x->rawbuf);
165 for ( i = 0; i < npages; i++ )
166 {
167 struct page_info *page = mfn_to_page(mfn + i);
168 BUG_ON(page_get_owner(page) != current->domain);
169 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
170 put_page(page);
171 }
172 }
174 static void
175 xenoprof_shared_gmfn_with_guest(
176 struct domain *d, unsigned long maddr, unsigned long gmaddr, int npages)
177 {
178 int i;
180 for ( i = 0; i < npages; i++, maddr += PAGE_SIZE, gmaddr += PAGE_SIZE )
181 {
182 BUG_ON(page_get_owner(maddr_to_page(maddr)) != d);
183 xenoprof_shared_gmfn(d, gmaddr, maddr);
184 }
185 }
187 static int alloc_xenoprof_struct(
188 struct domain *d, int max_samples, int is_passive)
189 {
190 struct vcpu *v;
191 int nvcpu, npages, bufsize, max_bufsize;
192 unsigned max_max_samples;
193 int i;
195 d->xenoprof = xmalloc(struct xenoprof);
197 if ( d->xenoprof == NULL )
198 {
199 printk("alloc_xenoprof_struct(): memory allocation failed\n");
200 return -ENOMEM;
201 }
203 memset(d->xenoprof, 0, sizeof(*d->xenoprof));
205 d->xenoprof->vcpu = xmalloc_array(struct xenoprof_vcpu, d->max_vcpus);
206 if ( d->xenoprof->vcpu == NULL )
207 {
208 xfree(d->xenoprof);
209 d->xenoprof = NULL;
210 printk("alloc_xenoprof_struct(): vcpu array allocation failed\n");
211 return -ENOMEM;
212 }
214 memset(d->xenoprof->vcpu, 0, d->max_vcpus * sizeof(*d->xenoprof->vcpu));
216 nvcpu = 0;
217 for_each_vcpu ( d, v )
218 nvcpu++;
220 bufsize = sizeof(struct xenoprof_buf);
221 i = sizeof(struct event_log);
222 #ifdef CONFIG_COMPAT
223 d->xenoprof->is_compat = is_pv_32on64_domain(is_passive ? dom0 : d);
224 if ( XENOPROF_COMPAT(d->xenoprof) )
225 {
226 bufsize = sizeof(struct compat_oprof_buf);
227 i = sizeof(struct compat_event_log);
228 }
229 #endif
231 /* reduce max_samples if necessary to limit pages allocated */
232 max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
233 max_max_samples = ( (max_bufsize - bufsize) / i ) + 1;
234 if ( (unsigned)max_samples > max_max_samples )
235 max_samples = max_max_samples;
237 bufsize += (max_samples - 1) * i;
238 npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
240 d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages), 0);
241 if ( d->xenoprof->rawbuf == NULL )
242 {
243 xfree(d->xenoprof);
244 d->xenoprof = NULL;
245 return -ENOMEM;
246 }
248 d->xenoprof->npages = npages;
249 d->xenoprof->nbuf = nvcpu;
250 d->xenoprof->bufsize = bufsize;
251 d->xenoprof->domain_ready = 0;
252 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
254 /* Update buffer pointers for active vcpus */
255 i = 0;
256 for_each_vcpu ( d, v )
257 {
258 xenoprof_buf_t *buf = (xenoprof_buf_t *)
259 &d->xenoprof->rawbuf[i * bufsize];
261 d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
262 d->xenoprof->vcpu[v->vcpu_id].buffer = buf;
263 xenoprof_buf(d, buf, event_size) = max_samples;
264 xenoprof_buf(d, buf, vcpu_id) = v->vcpu_id;
266 i++;
267 /* in the unlikely case that the number of active vcpus changes */
268 if ( i >= nvcpu )
269 break;
270 }
272 return 0;
273 }
275 void free_xenoprof_pages(struct domain *d)
276 {
277 struct xenoprof *x;
278 int order;
280 x = d->xenoprof;
281 if ( x == NULL )
282 return;
284 if ( x->rawbuf != NULL )
285 {
286 order = get_order_from_pages(x->npages);
287 free_xenheap_pages(x->rawbuf, order);
288 }
290 xfree(x);
291 d->xenoprof = NULL;
292 }
294 static int active_index(struct domain *d)
295 {
296 int i;
298 for ( i = 0; i < adomains; i++ )
299 if ( active_domains[i] == d )
300 return i;
302 return -1;
303 }
305 static int set_active(struct domain *d)
306 {
307 int ind;
308 struct xenoprof *x;
310 ind = active_index(d);
311 if ( ind < 0 )
312 return -EPERM;
314 x = d->xenoprof;
315 if ( x == NULL )
316 return -EPERM;
318 x->domain_ready = 1;
319 x->domain_type = XENOPROF_DOMAIN_ACTIVE;
320 active_ready[ind] = 1;
321 activated++;
323 return 0;
324 }
326 static int reset_active(struct domain *d)
327 {
328 int ind;
329 struct xenoprof *x;
331 ind = active_index(d);
332 if ( ind < 0 )
333 return -EPERM;
335 x = d->xenoprof;
336 if ( x == NULL )
337 return -EPERM;
339 x->domain_ready = 0;
340 x->domain_type = XENOPROF_DOMAIN_IGNORED;
341 active_ready[ind] = 0;
342 active_domains[ind] = NULL;
343 activated--;
344 put_domain(d);
346 if ( activated <= 0 )
347 adomains = 0;
349 return 0;
350 }
352 static void reset_passive(struct domain *d)
353 {
354 struct xenoprof *x;
356 if ( d == NULL )
357 return;
359 x = d->xenoprof;
360 if ( x == NULL )
361 return;
363 unshare_xenoprof_page_with_guest(x);
364 x->domain_type = XENOPROF_DOMAIN_IGNORED;
365 }
367 static void reset_active_list(void)
368 {
369 int i;
371 for ( i = 0; i < adomains; i++ )
372 if ( active_ready[i] )
373 reset_active(active_domains[i]);
375 adomains = 0;
376 activated = 0;
377 }
379 static void reset_passive_list(void)
380 {
381 int i;
383 for ( i = 0; i < pdomains; i++ )
384 {
385 reset_passive(passive_domains[i]);
386 put_domain(passive_domains[i]);
387 passive_domains[i] = NULL;
388 }
390 pdomains = 0;
391 }
393 static int add_active_list(domid_t domid)
394 {
395 struct domain *d;
397 if ( adomains >= MAX_OPROF_DOMAINS )
398 return -E2BIG;
400 d = get_domain_by_id(domid);
401 if ( d == NULL )
402 return -EINVAL;
404 active_domains[adomains] = d;
405 active_ready[adomains] = 0;
406 adomains++;
408 return 0;
409 }
411 static int add_passive_list(XEN_GUEST_HANDLE(void) arg)
412 {
413 struct xenoprof_passive passive;
414 struct domain *d;
415 int ret = 0;
417 if ( pdomains >= MAX_OPROF_DOMAINS )
418 return -E2BIG;
420 if ( copy_from_guest(&passive, arg, 1) )
421 return -EFAULT;
423 d = get_domain_by_id(passive.domain_id);
424 if ( d == NULL )
425 return -EINVAL;
427 if ( d->xenoprof == NULL )
428 {
429 ret = alloc_xenoprof_struct(d, passive.max_samples, 1);
430 if ( ret < 0 )
431 {
432 put_domain(d);
433 return -ENOMEM;
434 }
435 }
437 ret = share_xenoprof_page_with_guest(
438 current->domain, virt_to_mfn(d->xenoprof->rawbuf),
439 d->xenoprof->npages);
440 if ( ret < 0 )
441 {
442 put_domain(d);
443 return ret;
444 }
446 d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE;
447 passive.nbuf = d->xenoprof->nbuf;
448 passive.bufsize = d->xenoprof->bufsize;
449 if ( !paging_mode_translate(current->domain) )
450 passive.buf_gmaddr = __pa(d->xenoprof->rawbuf);
451 else
452 xenoprof_shared_gmfn_with_guest(
453 current->domain, __pa(d->xenoprof->rawbuf),
454 passive.buf_gmaddr, d->xenoprof->npages);
456 if ( copy_to_guest(arg, &passive, 1) )
457 {
458 put_domain(d);
459 return -EFAULT;
460 }
462 passive_domains[pdomains] = d;
463 pdomains++;
465 return ret;
466 }
469 /* Get space in the buffer */
470 static int xenoprof_buf_space(struct domain *d, xenoprof_buf_t * buf, int size)
471 {
472 int head, tail;
474 head = xenoprof_buf(d, buf, event_head);
475 tail = xenoprof_buf(d, buf, event_tail);
477 return ((tail > head) ? 0 : size) + tail - head - 1;
478 }
480 /* Check for space and add a sample. Return 1 if successful, 0 otherwise. */
481 static int xenoprof_add_sample(struct domain *d, xenoprof_buf_t *buf,
482 unsigned long eip, int mode, int event)
483 {
484 int head, tail, size;
486 head = xenoprof_buf(d, buf, event_head);
487 tail = xenoprof_buf(d, buf, event_tail);
488 size = xenoprof_buf(d, buf, event_size);
490 /* make sure indexes in shared buffer are sane */
491 if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) )
492 {
493 corrupted_buffer_samples++;
494 return 0;
495 }
497 if ( xenoprof_buf_space(d, buf, size) > 0 )
498 {
499 xenoprof_buf(d, buf, event_log[head].eip) = eip;
500 xenoprof_buf(d, buf, event_log[head].mode) = mode;
501 xenoprof_buf(d, buf, event_log[head].event) = event;
502 head++;
503 if ( head >= size )
504 head = 0;
506 xenoprof_buf(d, buf, event_head) = head;
507 }
508 else
509 {
510 xenoprof_buf(d, buf, lost_samples)++;
511 lost_samples++;
512 return 0;
513 }
515 return 1;
516 }
518 int xenoprof_add_trace(struct domain *d, struct vcpu *vcpu,
519 unsigned long eip, int mode)
520 {
521 xenoprof_buf_t *buf = d->xenoprof->vcpu[vcpu->vcpu_id].buffer;
523 /* Do not accidentally write an escape code due to a broken frame. */
524 if ( eip == XENOPROF_ESCAPE_CODE )
525 {
526 invalid_buffer_samples++;
527 return 0;
528 }
530 return xenoprof_add_sample(d, buf, eip, mode, 0);
531 }
533 void xenoprof_log_event(struct vcpu *vcpu,
534 struct cpu_user_regs * regs, unsigned long eip,
535 int mode, int event)
536 {
537 struct domain *d = vcpu->domain;
538 struct xenoprof_vcpu *v;
539 xenoprof_buf_t *buf;
541 total_samples++;
543 /* Ignore samples of un-monitored domains. */
544 if ( !is_profiled(d) )
545 {
546 others_samples++;
547 return;
548 }
550 v = &d->xenoprof->vcpu[vcpu->vcpu_id];
551 if ( v->buffer == NULL )
552 {
553 invalid_buffer_samples++;
554 return;
555 }
557 buf = v->buffer;
559 /* Provide backtrace if requested. */
560 if ( backtrace_depth > 0 )
561 {
562 if ( (xenoprof_buf_space(d, buf, v->event_size) < 2) ||
563 !xenoprof_add_sample(d, buf, XENOPROF_ESCAPE_CODE, mode,
564 XENOPROF_TRACE_BEGIN) )
565 {
566 xenoprof_buf(d, buf, lost_samples)++;
567 lost_samples++;
568 return;
569 }
570 }
572 if ( xenoprof_add_sample(d, buf, eip, mode, event) )
573 {
574 if ( is_active(vcpu->domain) )
575 active_samples++;
576 else
577 passive_samples++;
578 if ( mode == 0 )
579 xenoprof_buf(d, buf, user_samples)++;
580 else if ( mode == 1 )
581 xenoprof_buf(d, buf, kernel_samples)++;
582 else
583 xenoprof_buf(d, buf, xen_samples)++;
585 }
587 if ( backtrace_depth > 0 )
588 xenoprof_backtrace(d, vcpu, regs, backtrace_depth, mode);
589 }
593 static int xenoprof_op_init(XEN_GUEST_HANDLE(void) arg)
594 {
595 struct domain *d = current->domain;
596 struct xenoprof_init xenoprof_init;
597 int ret;
599 if ( copy_from_guest(&xenoprof_init, arg, 1) )
600 return -EFAULT;
602 if ( (ret = xenoprof_arch_init(&xenoprof_init.num_events,
603 xenoprof_init.cpu_type)) )
604 return ret;
606 xenoprof_init.is_primary =
607 ((xenoprof_primary_profiler == d) ||
608 ((xenoprof_primary_profiler == NULL) && (d->domain_id == 0)));
609 if ( xenoprof_init.is_primary )
610 xenoprof_primary_profiler = current->domain;
612 return (copy_to_guest(arg, &xenoprof_init, 1) ? -EFAULT : 0);
613 }
615 #endif /* !COMPAT */
617 static int xenoprof_op_get_buffer(XEN_GUEST_HANDLE(void) arg)
618 {
619 struct xenoprof_get_buffer xenoprof_get_buffer;
620 struct domain *d = current->domain;
621 int ret;
623 if ( copy_from_guest(&xenoprof_get_buffer, arg, 1) )
624 return -EFAULT;
626 /*
627 * We allocate xenoprof struct and buffers only at first time
628 * get_buffer is called. Memory is then kept until domain is destroyed.
629 */
630 if ( d->xenoprof == NULL )
631 {
632 ret = alloc_xenoprof_struct(d, xenoprof_get_buffer.max_samples, 0);
633 if ( ret < 0 )
634 return ret;
635 }
637 ret = share_xenoprof_page_with_guest(
638 d, virt_to_mfn(d->xenoprof->rawbuf), d->xenoprof->npages);
639 if ( ret < 0 )
640 return ret;
642 xenoprof_reset_buf(d);
644 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
645 d->xenoprof->domain_ready = 0;
646 d->xenoprof->is_primary = (xenoprof_primary_profiler == current->domain);
648 xenoprof_get_buffer.nbuf = d->xenoprof->nbuf;
649 xenoprof_get_buffer.bufsize = d->xenoprof->bufsize;
650 if ( !paging_mode_translate(d) )
651 xenoprof_get_buffer.buf_gmaddr = __pa(d->xenoprof->rawbuf);
652 else
653 xenoprof_shared_gmfn_with_guest(
654 d, __pa(d->xenoprof->rawbuf), xenoprof_get_buffer.buf_gmaddr,
655 d->xenoprof->npages);
657 if ( copy_to_guest(arg, &xenoprof_get_buffer, 1) )
658 return -EFAULT;
660 return 0;
661 }
663 #define NONPRIV_OP(op) ( (op == XENOPROF_init) \
664 || (op == XENOPROF_enable_virq) \
665 || (op == XENOPROF_disable_virq) \
666 || (op == XENOPROF_get_buffer))
668 int do_xenoprof_op(int op, XEN_GUEST_HANDLE(void) arg)
669 {
670 int ret = 0;
672 if ( (op < 0) || (op > XENOPROF_last_op) )
673 {
674 printk("xenoprof: invalid operation %d for domain %d\n",
675 op, current->domain->domain_id);
676 return -EINVAL;
677 }
679 if ( !NONPRIV_OP(op) && (current->domain != xenoprof_primary_profiler) )
680 {
681 printk("xenoprof: dom %d denied privileged operation %d\n",
682 current->domain->domain_id, op);
683 return -EPERM;
684 }
686 ret = xsm_profile(current->domain, op);
687 if ( ret )
688 return ret;
690 spin_lock(&xenoprof_lock);
692 switch ( op )
693 {
694 case XENOPROF_init:
695 ret = xenoprof_op_init(arg);
696 if ( (ret == 0) &&
697 (current->domain == xenoprof_primary_profiler) )
698 xenoprof_state = XENOPROF_INITIALIZED;
699 break;
701 case XENOPROF_get_buffer:
702 if ( !acquire_pmu_ownership(PMU_OWNER_XENOPROF) )
703 {
704 ret = -EBUSY;
705 break;
706 }
707 ret = xenoprof_op_get_buffer(arg);
708 break;
710 case XENOPROF_reset_active_list:
711 reset_active_list();
712 ret = 0;
713 break;
715 case XENOPROF_reset_passive_list:
716 reset_passive_list();
717 ret = 0;
718 break;
720 case XENOPROF_set_active:
721 {
722 domid_t domid;
723 if ( xenoprof_state != XENOPROF_INITIALIZED )
724 {
725 ret = -EPERM;
726 break;
727 }
728 if ( copy_from_guest(&domid, arg, 1) )
729 {
730 ret = -EFAULT;
731 break;
732 }
733 ret = add_active_list(domid);
734 break;
735 }
737 case XENOPROF_set_passive:
738 if ( xenoprof_state != XENOPROF_INITIALIZED )
739 {
740 ret = -EPERM;
741 break;
742 }
743 ret = add_passive_list(arg);
744 break;
746 case XENOPROF_reserve_counters:
747 if ( xenoprof_state != XENOPROF_INITIALIZED )
748 {
749 ret = -EPERM;
750 break;
751 }
752 ret = xenoprof_arch_reserve_counters();
753 if ( !ret )
754 xenoprof_state = XENOPROF_COUNTERS_RESERVED;
755 break;
757 case XENOPROF_counter:
758 if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) ||
759 (adomains == 0) )
760 {
761 ret = -EPERM;
762 break;
763 }
764 ret = xenoprof_arch_counter(arg);
765 break;
767 case XENOPROF_setup_events:
768 if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED )
769 {
770 ret = -EPERM;
771 break;
772 }
773 ret = xenoprof_arch_setup_events();
774 if ( !ret )
775 xenoprof_state = XENOPROF_READY;
776 break;
778 case XENOPROF_enable_virq:
779 {
780 int i;
782 if ( current->domain == xenoprof_primary_profiler )
783 {
784 if ( xenoprof_state != XENOPROF_READY )
785 {
786 ret = -EPERM;
787 break;
788 }
789 xenoprof_arch_enable_virq();
790 xenoprof_reset_stat();
791 for ( i = 0; i < pdomains; i++ )
792 xenoprof_reset_buf(passive_domains[i]);
793 }
794 xenoprof_reset_buf(current->domain);
795 ret = set_active(current->domain);
796 break;
797 }
799 case XENOPROF_start:
800 ret = -EPERM;
801 if ( (xenoprof_state == XENOPROF_READY) &&
802 (activated == adomains) )
803 ret = xenoprof_arch_start();
804 if ( ret == 0 )
805 xenoprof_state = XENOPROF_PROFILING;
806 break;
808 case XENOPROF_stop:
809 {
810 struct domain *d;
811 struct vcpu *v;
812 int i;
814 if ( xenoprof_state != XENOPROF_PROFILING )
815 {
816 ret = -EPERM;
817 break;
818 }
819 xenoprof_arch_stop();
821 /* Flush remaining samples. */
822 for ( i = 0; i < adomains; i++ )
823 {
824 if ( !active_ready[i] )
825 continue;
826 d = active_domains[i];
827 for_each_vcpu(d, v)
828 send_guest_vcpu_virq(v, VIRQ_XENOPROF);
829 }
830 xenoprof_state = XENOPROF_READY;
831 break;
832 }
834 case XENOPROF_disable_virq:
835 {
836 struct xenoprof *x;
837 if ( (xenoprof_state == XENOPROF_PROFILING) &&
838 (is_active(current->domain)) )
839 {
840 ret = -EPERM;
841 break;
842 }
843 if ( (ret = reset_active(current->domain)) != 0 )
844 break;
845 x = current->domain->xenoprof;
846 unshare_xenoprof_page_with_guest(x);
847 release_pmu_ownship(PMU_OWNER_XENOPROF);
848 break;
849 }
851 case XENOPROF_release_counters:
852 ret = -EPERM;
853 if ( (xenoprof_state == XENOPROF_COUNTERS_RESERVED) ||
854 (xenoprof_state == XENOPROF_READY) )
855 {
856 xenoprof_state = XENOPROF_INITIALIZED;
857 xenoprof_arch_release_counters();
858 xenoprof_arch_disable_virq();
859 reset_passive_list();
860 ret = 0;
861 }
862 break;
864 case XENOPROF_shutdown:
865 ret = -EPERM;
866 if ( xenoprof_state == XENOPROF_INITIALIZED )
867 {
868 activated = 0;
869 adomains=0;
870 xenoprof_primary_profiler = NULL;
871 backtrace_depth=0;
872 ret = 0;
873 }
874 break;
876 case XENOPROF_set_backtrace:
877 ret = 0;
878 if ( !xenoprof_backtrace_supported() )
879 ret = -EINVAL;
880 else if ( copy_from_guest(&backtrace_depth, arg, 1) )
881 ret = -EFAULT;
882 break;
884 case XENOPROF_ibs_counter:
885 if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) ||
886 (adomains == 0) )
887 {
888 ret = -EPERM;
889 break;
890 }
891 ret = xenoprof_arch_ibs_counter(arg);
892 break;
894 case XENOPROF_get_ibs_caps:
895 ret = ibs_caps;
896 break;
898 default:
899 ret = -ENOSYS;
900 }
902 spin_unlock(&xenoprof_lock);
904 if ( ret < 0 )
905 printk("xenoprof: operation %d failed for dom %d (status : %d)\n",
906 op, current->domain->domain_id, ret);
908 return ret;
909 }
911 #if defined(CONFIG_COMPAT) && !defined(COMPAT)
912 #include "compat/xenoprof.c"
913 #endif
915 /*
916 * Local variables:
917 * mode: C
918 * c-set-style: "BSD"
919 * c-basic-offset: 4
920 * tab-width: 4
921 * indent-tabs-mode: nil
922 * End:
923 */