debuggers.hg

view xen/common/xenoprof.c @ 19826:2f9e1348aa98

x86_64: allow more vCPU-s per guest

Since the shared info layout is fixed, guests are required to use
VCPUOP_register_vcpu_info prior to booting any vCPU beyond the
traditional limit of 32.

MAX_VIRT_CPUS, being an implemetation detail of the hypervisor, is no
longer being exposed in the public headers.

The tools changes are clearly incomplete (and done only so things
would
build again), and the current state of the tools (using scalar
variables all over the place to represent vCPU bitmaps) very likely
doesn't permit booting DomU-s with more than the traditional number of
vCPU-s. Testing of the extended functionality was done with Dom0 (96
vCPU-s, as well as 128 vCPU-s out of which the kernel elected - by way
of a simple kernel side patch - to use only some, resulting in a
sparse
bitmap).

ia64 changes only to make things build, and build-tested only (and the
tools part only as far as the build would go without encountering
unrelated problems in the blktap code).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 18 10:14:16 2009 +0100 (2009-06-18)
parents 71af89e70fee
children 30bfa1d8895d
line source
1 /*
2 * Copyright (C) 2005 Hewlett-Packard Co.
3 * written by Aravind Menon & Jose Renato Santos
4 * (email: xenoprof@groups.hp.com)
5 *
6 * arch generic xenoprof and IA64 support.
7 * dynamic map/unmap xenoprof buffer support.
8 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
9 * VA Linux Systems Japan K.K.
10 */
12 #ifndef COMPAT
13 #include <xen/guest_access.h>
14 #include <xen/sched.h>
15 #include <xen/event.h>
16 #include <public/xenoprof.h>
17 #include <xen/paging.h>
18 #include <xsm/xsm.h>
20 /* Limit amount of pages used for shared buffer (per domain) */
21 #define MAX_OPROF_SHARED_PAGES 32
23 /* Lock protecting the following global state */
24 static DEFINE_SPINLOCK(xenoprof_lock);
26 static DEFINE_SPINLOCK(pmu_owner_lock);
27 int pmu_owner = 0;
28 int pmu_hvm_refcount = 0;
30 static struct domain *active_domains[MAX_OPROF_DOMAINS];
31 static int active_ready[MAX_OPROF_DOMAINS];
32 static unsigned int adomains;
34 static struct domain *passive_domains[MAX_OPROF_DOMAINS];
35 static unsigned int pdomains;
37 static unsigned int activated;
38 static struct domain *xenoprof_primary_profiler;
39 static int xenoprof_state = XENOPROF_IDLE;
40 static unsigned long backtrace_depth;
42 static u64 total_samples;
43 static u64 invalid_buffer_samples;
44 static u64 corrupted_buffer_samples;
45 static u64 lost_samples;
46 static u64 active_samples;
47 static u64 passive_samples;
48 static u64 idle_samples;
49 static u64 others_samples;
51 int acquire_pmu_ownership(int pmu_ownship)
52 {
53 spin_lock(&pmu_owner_lock);
54 if ( pmu_owner == PMU_OWNER_NONE )
55 {
56 pmu_owner = pmu_ownship;
57 goto out;
58 }
60 if ( pmu_owner == pmu_ownship )
61 goto out;
63 spin_unlock(&pmu_owner_lock);
64 return 0;
65 out:
66 if ( pmu_owner == PMU_OWNER_HVM )
67 pmu_hvm_refcount++;
68 spin_unlock(&pmu_owner_lock);
69 return 1;
70 }
72 void release_pmu_ownship(int pmu_ownship)
73 {
74 spin_lock(&pmu_owner_lock);
75 if ( pmu_ownship == PMU_OWNER_HVM )
76 pmu_hvm_refcount--;
77 if ( !pmu_hvm_refcount )
78 pmu_owner = PMU_OWNER_NONE;
79 spin_unlock(&pmu_owner_lock);
80 }
82 int is_active(struct domain *d)
83 {
84 struct xenoprof *x = d->xenoprof;
85 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE));
86 }
88 int is_passive(struct domain *d)
89 {
90 struct xenoprof *x = d->xenoprof;
91 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE));
92 }
94 static int is_profiled(struct domain *d)
95 {
96 return (is_active(d) || is_passive(d));
97 }
99 static void xenoprof_reset_stat(void)
100 {
101 total_samples = 0;
102 invalid_buffer_samples = 0;
103 corrupted_buffer_samples = 0;
104 lost_samples = 0;
105 active_samples = 0;
106 passive_samples = 0;
107 idle_samples = 0;
108 others_samples = 0;
109 }
111 static void xenoprof_reset_buf(struct domain *d)
112 {
113 int j;
114 xenoprof_buf_t *buf;
116 if ( d->xenoprof == NULL )
117 {
118 printk("xenoprof_reset_buf: ERROR - Unexpected "
119 "Xenoprof NULL pointer \n");
120 return;
121 }
123 for ( j = 0; j < d->max_vcpus; j++ )
124 {
125 buf = d->xenoprof->vcpu[j].buffer;
126 if ( buf != NULL )
127 {
128 xenoprof_buf(d, buf, event_head) = 0;
129 xenoprof_buf(d, buf, event_tail) = 0;
130 }
131 }
132 }
134 static int
135 share_xenoprof_page_with_guest(struct domain *d, unsigned long mfn, int npages)
136 {
137 int i;
139 /* Check if previous page owner has released the page. */
140 for ( i = 0; i < npages; i++ )
141 {
142 struct page_info *page = mfn_to_page(mfn + i);
143 if ( (page->count_info & (PGC_allocated|PGC_count_mask)) != 0 )
144 {
145 gdprintk(XENLOG_INFO, "mfn 0x%lx page->count_info 0x%lx\n",
146 mfn + i, (unsigned long)page->count_info);
147 return -EBUSY;
148 }
149 page_set_owner(page, NULL);
150 }
152 for ( i = 0; i < npages; i++ )
153 share_xen_page_with_guest(mfn_to_page(mfn + i), d, XENSHARE_writable);
155 return 0;
156 }
158 static void
159 unshare_xenoprof_page_with_guest(struct xenoprof *x)
160 {
161 int i, npages = x->npages;
162 unsigned long mfn = virt_to_mfn(x->rawbuf);
164 for ( i = 0; i < npages; i++ )
165 {
166 struct page_info *page = mfn_to_page(mfn + i);
167 BUG_ON(page_get_owner(page) != current->domain);
168 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
169 put_page(page);
170 }
171 }
173 static void
174 xenoprof_shared_gmfn_with_guest(
175 struct domain *d, unsigned long maddr, unsigned long gmaddr, int npages)
176 {
177 int i;
179 for ( i = 0; i < npages; i++, maddr += PAGE_SIZE, gmaddr += PAGE_SIZE )
180 {
181 BUG_ON(page_get_owner(maddr_to_page(maddr)) != d);
182 xenoprof_shared_gmfn(d, gmaddr, maddr);
183 }
184 }
186 static int alloc_xenoprof_struct(
187 struct domain *d, int max_samples, int is_passive)
188 {
189 struct vcpu *v;
190 int nvcpu, npages, bufsize, max_bufsize;
191 unsigned max_max_samples;
192 int i;
194 d->xenoprof = xmalloc(struct xenoprof);
196 if ( d->xenoprof == NULL )
197 {
198 printk("alloc_xenoprof_struct(): memory allocation failed\n");
199 return -ENOMEM;
200 }
202 memset(d->xenoprof, 0, sizeof(*d->xenoprof));
204 d->xenoprof->vcpu = xmalloc_array(struct xenoprof_vcpu, d->max_vcpus);
205 if ( d->xenoprof->vcpu == NULL )
206 {
207 xfree(d->xenoprof);
208 d->xenoprof = NULL;
209 printk("alloc_xenoprof_struct(): vcpu array allocation failed\n");
210 return -ENOMEM;
211 }
213 memset(d->xenoprof->vcpu, 0, d->max_vcpus * sizeof(*d->xenoprof->vcpu));
215 nvcpu = 0;
216 for_each_vcpu ( d, v )
217 nvcpu++;
219 bufsize = sizeof(struct xenoprof_buf);
220 i = sizeof(struct event_log);
221 #ifdef CONFIG_COMPAT
222 d->xenoprof->is_compat = is_pv_32on64_domain(is_passive ? dom0 : d);
223 if ( XENOPROF_COMPAT(d->xenoprof) )
224 {
225 bufsize = sizeof(struct compat_oprof_buf);
226 i = sizeof(struct compat_event_log);
227 }
228 #endif
230 /* reduce max_samples if necessary to limit pages allocated */
231 max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
232 max_max_samples = ( (max_bufsize - bufsize) / i ) + 1;
233 if ( (unsigned)max_samples > max_max_samples )
234 max_samples = max_max_samples;
236 bufsize += (max_samples - 1) * i;
237 npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
239 d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages), 0);
240 if ( d->xenoprof->rawbuf == NULL )
241 {
242 xfree(d->xenoprof);
243 d->xenoprof = NULL;
244 return -ENOMEM;
245 }
247 d->xenoprof->npages = npages;
248 d->xenoprof->nbuf = nvcpu;
249 d->xenoprof->bufsize = bufsize;
250 d->xenoprof->domain_ready = 0;
251 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
253 /* Update buffer pointers for active vcpus */
254 i = 0;
255 for_each_vcpu ( d, v )
256 {
257 xenoprof_buf_t *buf = (xenoprof_buf_t *)
258 &d->xenoprof->rawbuf[i * bufsize];
260 d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
261 d->xenoprof->vcpu[v->vcpu_id].buffer = buf;
262 xenoprof_buf(d, buf, event_size) = max_samples;
263 xenoprof_buf(d, buf, vcpu_id) = v->vcpu_id;
265 i++;
266 /* in the unlikely case that the number of active vcpus changes */
267 if ( i >= nvcpu )
268 break;
269 }
271 return 0;
272 }
274 void free_xenoprof_pages(struct domain *d)
275 {
276 struct xenoprof *x;
277 int order;
279 x = d->xenoprof;
280 if ( x == NULL )
281 return;
283 if ( x->rawbuf != NULL )
284 {
285 order = get_order_from_pages(x->npages);
286 free_xenheap_pages(x->rawbuf, order);
287 }
289 xfree(x);
290 d->xenoprof = NULL;
291 }
293 static int active_index(struct domain *d)
294 {
295 int i;
297 for ( i = 0; i < adomains; i++ )
298 if ( active_domains[i] == d )
299 return i;
301 return -1;
302 }
304 static int set_active(struct domain *d)
305 {
306 int ind;
307 struct xenoprof *x;
309 ind = active_index(d);
310 if ( ind < 0 )
311 return -EPERM;
313 x = d->xenoprof;
314 if ( x == NULL )
315 return -EPERM;
317 x->domain_ready = 1;
318 x->domain_type = XENOPROF_DOMAIN_ACTIVE;
319 active_ready[ind] = 1;
320 activated++;
322 return 0;
323 }
325 static int reset_active(struct domain *d)
326 {
327 int ind;
328 struct xenoprof *x;
330 ind = active_index(d);
331 if ( ind < 0 )
332 return -EPERM;
334 x = d->xenoprof;
335 if ( x == NULL )
336 return -EPERM;
338 x->domain_ready = 0;
339 x->domain_type = XENOPROF_DOMAIN_IGNORED;
340 active_ready[ind] = 0;
341 active_domains[ind] = NULL;
342 activated--;
343 put_domain(d);
345 if ( activated <= 0 )
346 adomains = 0;
348 return 0;
349 }
351 static void reset_passive(struct domain *d)
352 {
353 struct xenoprof *x;
355 if ( d == NULL )
356 return;
358 x = d->xenoprof;
359 if ( x == NULL )
360 return;
362 unshare_xenoprof_page_with_guest(x);
363 x->domain_type = XENOPROF_DOMAIN_IGNORED;
364 }
366 static void reset_active_list(void)
367 {
368 int i;
370 for ( i = 0; i < adomains; i++ )
371 if ( active_ready[i] )
372 reset_active(active_domains[i]);
374 adomains = 0;
375 activated = 0;
376 }
378 static void reset_passive_list(void)
379 {
380 int i;
382 for ( i = 0; i < pdomains; i++ )
383 {
384 reset_passive(passive_domains[i]);
385 put_domain(passive_domains[i]);
386 passive_domains[i] = NULL;
387 }
389 pdomains = 0;
390 }
392 static int add_active_list(domid_t domid)
393 {
394 struct domain *d;
396 if ( adomains >= MAX_OPROF_DOMAINS )
397 return -E2BIG;
399 d = get_domain_by_id(domid);
400 if ( d == NULL )
401 return -EINVAL;
403 active_domains[adomains] = d;
404 active_ready[adomains] = 0;
405 adomains++;
407 return 0;
408 }
410 static int add_passive_list(XEN_GUEST_HANDLE(void) arg)
411 {
412 struct xenoprof_passive passive;
413 struct domain *d;
414 int ret = 0;
416 if ( pdomains >= MAX_OPROF_DOMAINS )
417 return -E2BIG;
419 if ( copy_from_guest(&passive, arg, 1) )
420 return -EFAULT;
422 d = get_domain_by_id(passive.domain_id);
423 if ( d == NULL )
424 return -EINVAL;
426 if ( d->xenoprof == NULL )
427 {
428 ret = alloc_xenoprof_struct(d, passive.max_samples, 1);
429 if ( ret < 0 )
430 {
431 put_domain(d);
432 return -ENOMEM;
433 }
434 }
436 ret = share_xenoprof_page_with_guest(
437 current->domain, virt_to_mfn(d->xenoprof->rawbuf),
438 d->xenoprof->npages);
439 if ( ret < 0 )
440 {
441 put_domain(d);
442 return ret;
443 }
445 d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE;
446 passive.nbuf = d->xenoprof->nbuf;
447 passive.bufsize = d->xenoprof->bufsize;
448 if ( !paging_mode_translate(current->domain) )
449 passive.buf_gmaddr = __pa(d->xenoprof->rawbuf);
450 else
451 xenoprof_shared_gmfn_with_guest(
452 current->domain, __pa(d->xenoprof->rawbuf),
453 passive.buf_gmaddr, d->xenoprof->npages);
455 if ( copy_to_guest(arg, &passive, 1) )
456 {
457 put_domain(d);
458 return -EFAULT;
459 }
461 passive_domains[pdomains] = d;
462 pdomains++;
464 return ret;
465 }
468 /* Get space in the buffer */
469 static int xenoprof_buf_space(struct domain *d, xenoprof_buf_t * buf, int size)
470 {
471 int head, tail;
473 head = xenoprof_buf(d, buf, event_head);
474 tail = xenoprof_buf(d, buf, event_tail);
476 return ((tail > head) ? 0 : size) + tail - head - 1;
477 }
479 /* Check for space and add a sample. Return 1 if successful, 0 otherwise. */
480 static int xenoprof_add_sample(struct domain *d, xenoprof_buf_t *buf,
481 unsigned long eip, int mode, int event)
482 {
483 int head, tail, size;
485 head = xenoprof_buf(d, buf, event_head);
486 tail = xenoprof_buf(d, buf, event_tail);
487 size = xenoprof_buf(d, buf, event_size);
489 /* make sure indexes in shared buffer are sane */
490 if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) )
491 {
492 corrupted_buffer_samples++;
493 return 0;
494 }
496 if ( xenoprof_buf_space(d, buf, size) > 0 )
497 {
498 xenoprof_buf(d, buf, event_log[head].eip) = eip;
499 xenoprof_buf(d, buf, event_log[head].mode) = mode;
500 xenoprof_buf(d, buf, event_log[head].event) = event;
501 head++;
502 if ( head >= size )
503 head = 0;
505 xenoprof_buf(d, buf, event_head) = head;
506 }
507 else
508 {
509 xenoprof_buf(d, buf, lost_samples)++;
510 lost_samples++;
511 return 0;
512 }
514 return 1;
515 }
517 int xenoprof_add_trace(struct domain *d, struct vcpu *vcpu,
518 unsigned long eip, int mode)
519 {
520 xenoprof_buf_t *buf = d->xenoprof->vcpu[vcpu->vcpu_id].buffer;
522 /* Do not accidentally write an escape code due to a broken frame. */
523 if ( eip == XENOPROF_ESCAPE_CODE )
524 {
525 invalid_buffer_samples++;
526 return 0;
527 }
529 return xenoprof_add_sample(d, buf, eip, mode, 0);
530 }
532 void xenoprof_log_event(struct vcpu *vcpu,
533 struct cpu_user_regs * regs, unsigned long eip,
534 int mode, int event)
535 {
536 struct domain *d = vcpu->domain;
537 struct xenoprof_vcpu *v;
538 xenoprof_buf_t *buf;
540 total_samples++;
542 /* Ignore samples of un-monitored domains. */
543 if ( !is_profiled(d) )
544 {
545 others_samples++;
546 return;
547 }
549 v = &d->xenoprof->vcpu[vcpu->vcpu_id];
550 if ( v->buffer == NULL )
551 {
552 invalid_buffer_samples++;
553 return;
554 }
556 buf = v->buffer;
558 /* Provide backtrace if requested. */
559 if ( backtrace_depth > 0 )
560 {
561 if ( (xenoprof_buf_space(d, buf, v->event_size) < 2) ||
562 !xenoprof_add_sample(d, buf, XENOPROF_ESCAPE_CODE, mode,
563 XENOPROF_TRACE_BEGIN) )
564 {
565 xenoprof_buf(d, buf, lost_samples)++;
566 lost_samples++;
567 return;
568 }
569 }
571 if ( xenoprof_add_sample(d, buf, eip, mode, event) )
572 {
573 if ( is_active(vcpu->domain) )
574 active_samples++;
575 else
576 passive_samples++;
577 if ( mode == 0 )
578 xenoprof_buf(d, buf, user_samples)++;
579 else if ( mode == 1 )
580 xenoprof_buf(d, buf, kernel_samples)++;
581 else
582 xenoprof_buf(d, buf, xen_samples)++;
584 }
586 if ( backtrace_depth > 0 )
587 xenoprof_backtrace(d, vcpu, regs, backtrace_depth, mode);
588 }
592 static int xenoprof_op_init(XEN_GUEST_HANDLE(void) arg)
593 {
594 struct domain *d = current->domain;
595 struct xenoprof_init xenoprof_init;
596 int ret;
598 if ( copy_from_guest(&xenoprof_init, arg, 1) )
599 return -EFAULT;
601 if ( (ret = xenoprof_arch_init(&xenoprof_init.num_events,
602 xenoprof_init.cpu_type)) )
603 return ret;
605 xenoprof_init.is_primary =
606 ((xenoprof_primary_profiler == d) ||
607 ((xenoprof_primary_profiler == NULL) && (d->domain_id == 0)));
608 if ( xenoprof_init.is_primary )
609 xenoprof_primary_profiler = current->domain;
611 return (copy_to_guest(arg, &xenoprof_init, 1) ? -EFAULT : 0);
612 }
614 #endif /* !COMPAT */
616 static int xenoprof_op_get_buffer(XEN_GUEST_HANDLE(void) arg)
617 {
618 struct xenoprof_get_buffer xenoprof_get_buffer;
619 struct domain *d = current->domain;
620 int ret;
622 if ( copy_from_guest(&xenoprof_get_buffer, arg, 1) )
623 return -EFAULT;
625 /*
626 * We allocate xenoprof struct and buffers only at first time
627 * get_buffer is called. Memory is then kept until domain is destroyed.
628 */
629 if ( d->xenoprof == NULL )
630 {
631 ret = alloc_xenoprof_struct(d, xenoprof_get_buffer.max_samples, 0);
632 if ( ret < 0 )
633 return ret;
634 }
636 ret = share_xenoprof_page_with_guest(
637 d, virt_to_mfn(d->xenoprof->rawbuf), d->xenoprof->npages);
638 if ( ret < 0 )
639 return ret;
641 xenoprof_reset_buf(d);
643 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
644 d->xenoprof->domain_ready = 0;
645 d->xenoprof->is_primary = (xenoprof_primary_profiler == current->domain);
647 xenoprof_get_buffer.nbuf = d->xenoprof->nbuf;
648 xenoprof_get_buffer.bufsize = d->xenoprof->bufsize;
649 if ( !paging_mode_translate(d) )
650 xenoprof_get_buffer.buf_gmaddr = __pa(d->xenoprof->rawbuf);
651 else
652 xenoprof_shared_gmfn_with_guest(
653 d, __pa(d->xenoprof->rawbuf), xenoprof_get_buffer.buf_gmaddr,
654 d->xenoprof->npages);
656 if ( copy_to_guest(arg, &xenoprof_get_buffer, 1) )
657 return -EFAULT;
659 return 0;
660 }
662 #define NONPRIV_OP(op) ( (op == XENOPROF_init) \
663 || (op == XENOPROF_enable_virq) \
664 || (op == XENOPROF_disable_virq) \
665 || (op == XENOPROF_get_buffer))
667 int do_xenoprof_op(int op, XEN_GUEST_HANDLE(void) arg)
668 {
669 int ret = 0;
671 if ( (op < 0) || (op > XENOPROF_last_op) )
672 {
673 printk("xenoprof: invalid operation %d for domain %d\n",
674 op, current->domain->domain_id);
675 return -EINVAL;
676 }
678 if ( !NONPRIV_OP(op) && (current->domain != xenoprof_primary_profiler) )
679 {
680 printk("xenoprof: dom %d denied privileged operation %d\n",
681 current->domain->domain_id, op);
682 return -EPERM;
683 }
685 ret = xsm_profile(current->domain, op);
686 if ( ret )
687 return ret;
689 spin_lock(&xenoprof_lock);
691 switch ( op )
692 {
693 case XENOPROF_init:
694 ret = xenoprof_op_init(arg);
695 if ( !ret )
696 xenoprof_state = XENOPROF_INITIALIZED;
697 break;
699 case XENOPROF_get_buffer:
700 if ( !acquire_pmu_ownership(PMU_OWNER_XENOPROF) )
701 {
702 ret = -EBUSY;
703 break;
704 }
705 ret = xenoprof_op_get_buffer(arg);
706 break;
708 case XENOPROF_reset_active_list:
709 reset_active_list();
710 ret = 0;
711 break;
713 case XENOPROF_reset_passive_list:
714 reset_passive_list();
715 ret = 0;
716 break;
718 case XENOPROF_set_active:
719 {
720 domid_t domid;
721 if ( xenoprof_state != XENOPROF_INITIALIZED )
722 {
723 ret = -EPERM;
724 break;
725 }
726 if ( copy_from_guest(&domid, arg, 1) )
727 {
728 ret = -EFAULT;
729 break;
730 }
731 ret = add_active_list(domid);
732 break;
733 }
735 case XENOPROF_set_passive:
736 if ( xenoprof_state != XENOPROF_INITIALIZED )
737 {
738 ret = -EPERM;
739 break;
740 }
741 ret = add_passive_list(arg);
742 break;
744 case XENOPROF_reserve_counters:
745 if ( xenoprof_state != XENOPROF_INITIALIZED )
746 {
747 ret = -EPERM;
748 break;
749 }
750 ret = xenoprof_arch_reserve_counters();
751 if ( !ret )
752 xenoprof_state = XENOPROF_COUNTERS_RESERVED;
753 break;
755 case XENOPROF_counter:
756 if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) ||
757 (adomains == 0) )
758 {
759 ret = -EPERM;
760 break;
761 }
762 ret = xenoprof_arch_counter(arg);
763 break;
765 case XENOPROF_setup_events:
766 if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED )
767 {
768 ret = -EPERM;
769 break;
770 }
771 ret = xenoprof_arch_setup_events();
772 if ( !ret )
773 xenoprof_state = XENOPROF_READY;
774 break;
776 case XENOPROF_enable_virq:
777 {
778 int i;
780 if ( current->domain == xenoprof_primary_profiler )
781 {
782 if ( xenoprof_state != XENOPROF_READY )
783 {
784 ret = -EPERM;
785 break;
786 }
787 xenoprof_arch_enable_virq();
788 xenoprof_reset_stat();
789 for ( i = 0; i < pdomains; i++ )
790 xenoprof_reset_buf(passive_domains[i]);
791 }
792 xenoprof_reset_buf(current->domain);
793 ret = set_active(current->domain);
794 break;
795 }
797 case XENOPROF_start:
798 ret = -EPERM;
799 if ( (xenoprof_state == XENOPROF_READY) &&
800 (activated == adomains) )
801 ret = xenoprof_arch_start();
802 if ( ret == 0 )
803 xenoprof_state = XENOPROF_PROFILING;
804 break;
806 case XENOPROF_stop:
807 {
808 struct domain *d;
809 struct vcpu *v;
810 int i;
812 if ( xenoprof_state != XENOPROF_PROFILING )
813 {
814 ret = -EPERM;
815 break;
816 }
817 xenoprof_arch_stop();
819 /* Flush remaining samples. */
820 for ( i = 0; i < adomains; i++ )
821 {
822 if ( !active_ready[i] )
823 continue;
824 d = active_domains[i];
825 for_each_vcpu(d, v)
826 send_guest_vcpu_virq(v, VIRQ_XENOPROF);
827 }
828 xenoprof_state = XENOPROF_READY;
829 break;
830 }
832 case XENOPROF_disable_virq:
833 {
834 struct xenoprof *x;
835 if ( (xenoprof_state == XENOPROF_PROFILING) &&
836 (is_active(current->domain)) )
837 {
838 ret = -EPERM;
839 break;
840 }
841 if ( (ret = reset_active(current->domain)) != 0 )
842 break;
843 x = current->domain->xenoprof;
844 unshare_xenoprof_page_with_guest(x);
845 release_pmu_ownship(PMU_OWNER_XENOPROF);
846 break;
847 }
849 case XENOPROF_release_counters:
850 ret = -EPERM;
851 if ( (xenoprof_state == XENOPROF_COUNTERS_RESERVED) ||
852 (xenoprof_state == XENOPROF_READY) )
853 {
854 xenoprof_state = XENOPROF_INITIALIZED;
855 xenoprof_arch_release_counters();
856 xenoprof_arch_disable_virq();
857 reset_passive_list();
858 ret = 0;
859 }
860 break;
862 case XENOPROF_shutdown:
863 ret = -EPERM;
864 if ( xenoprof_state == XENOPROF_INITIALIZED )
865 {
866 activated = 0;
867 adomains=0;
868 xenoprof_primary_profiler = NULL;
869 backtrace_depth=0;
870 ret = 0;
871 }
872 break;
874 case XENOPROF_set_backtrace:
875 ret = 0;
876 if ( !xenoprof_backtrace_supported() )
877 ret = -EINVAL;
878 else if ( copy_from_guest(&backtrace_depth, arg, 1) )
879 ret = -EFAULT;
880 break;
882 default:
883 ret = -ENOSYS;
884 }
886 spin_unlock(&xenoprof_lock);
888 if ( ret < 0 )
889 printk("xenoprof: operation %d failed for dom %d (status : %d)\n",
890 op, current->domain->domain_id, ret);
892 return ret;
893 }
895 #if defined(CONFIG_COMPAT) && !defined(COMPAT)
896 #include "compat/xenoprof.c"
897 #endif
899 /*
900 * Local variables:
901 * mode: C
902 * c-set-style: "BSD"
903 * c-basic-offset: 4
904 * tab-width: 4
905 * indent-tabs-mode: nil
906 * End:
907 */