/root/src/xen/xen/common/xenoprof.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (C) 2005 Hewlett-Packard Co. |
3 | | * written by Aravind Menon & Jose Renato Santos |
4 | | * (email: xenoprof@groups.hp.com) |
5 | | * |
6 | | * arch generic xenoprof and IA64 support. |
7 | | * dynamic map/unmap xenoprof buffer support. |
8 | | * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> |
9 | | * VA Linux Systems Japan K.K. |
10 | | */ |
11 | | |
12 | | #ifndef COMPAT |
13 | | #include <xen/guest_access.h> |
14 | | #include <xen/sched.h> |
15 | | #include <xen/event.h> |
16 | | #include <xen/xenoprof.h> |
17 | | #include <public/xenoprof.h> |
18 | | #include <xen/paging.h> |
19 | | #include <xsm/xsm.h> |
20 | | #include <xen/hypercall.h> |
21 | | |
22 | | /* Override macros from asm/page.h to make them work with mfn_t */ |
23 | | #undef virt_to_mfn |
24 | 0 | #define virt_to_mfn(va) _mfn(__virt_to_mfn(va)) |
25 | | #undef mfn_to_page |
26 | 0 | #define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn)) |
27 | | |
28 | | /* Limit amount of pages used for shared buffer (per domain) */ |
29 | 0 | #define MAX_OPROF_SHARED_PAGES 32 |
30 | | |
31 | | /* Lock protecting the following global state */ |
32 | | static DEFINE_SPINLOCK(xenoprof_lock); |
33 | | |
34 | | static DEFINE_SPINLOCK(pmu_owner_lock); |
35 | | int pmu_owner = 0; |
36 | | int pmu_hvm_refcount = 0; |
37 | | |
38 | | static struct domain *active_domains[MAX_OPROF_DOMAINS]; |
39 | | static int active_ready[MAX_OPROF_DOMAINS]; |
40 | | static unsigned int adomains; |
41 | | |
42 | | static struct domain *passive_domains[MAX_OPROF_DOMAINS]; |
43 | | static unsigned int pdomains; |
44 | | |
45 | | static unsigned int activated; |
46 | | static struct domain *xenoprof_primary_profiler; |
47 | | static int xenoprof_state = XENOPROF_IDLE; |
48 | | static unsigned long backtrace_depth; |
49 | | |
50 | | static u64 total_samples; |
51 | | static u64 invalid_buffer_samples; |
52 | | static u64 corrupted_buffer_samples; |
53 | | static u64 lost_samples; |
54 | | static u64 active_samples; |
55 | | static u64 passive_samples; |
56 | | static u64 idle_samples; |
57 | | static u64 others_samples; |
58 | | |
59 | | int acquire_pmu_ownership(int pmu_ownership) |
60 | 0 | { |
61 | 0 | spin_lock(&pmu_owner_lock); |
62 | 0 | if ( pmu_owner == PMU_OWNER_NONE ) |
63 | 0 | { |
64 | 0 | pmu_owner = pmu_ownership; |
65 | 0 | goto out; |
66 | 0 | } |
67 | 0 |
|
68 | 0 | if ( pmu_owner == pmu_ownership ) |
69 | 0 | goto out; |
70 | 0 |
|
71 | 0 | spin_unlock(&pmu_owner_lock); |
72 | 0 | return 0; |
73 | 0 | out: |
74 | 0 | if ( pmu_owner == PMU_OWNER_HVM ) |
75 | 0 | pmu_hvm_refcount++; |
76 | 0 | spin_unlock(&pmu_owner_lock); |
77 | 0 | return 1; |
78 | 0 | } |
79 | | |
80 | | void release_pmu_ownership(int pmu_ownership) |
81 | 0 | { |
82 | 0 | spin_lock(&pmu_owner_lock); |
83 | 0 | if ( pmu_ownership == PMU_OWNER_HVM ) |
84 | 0 | pmu_hvm_refcount--; |
85 | 0 | if ( !pmu_hvm_refcount ) |
86 | 0 | pmu_owner = PMU_OWNER_NONE; |
87 | 0 | spin_unlock(&pmu_owner_lock); |
88 | 0 | } |
89 | | |
90 | | int is_active(struct domain *d) |
91 | 0 | { |
92 | 0 | struct xenoprof *x = d->xenoprof; |
93 | 0 | return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE)); |
94 | 0 | } |
95 | | |
96 | | int is_passive(struct domain *d) |
97 | 0 | { |
98 | 0 | struct xenoprof *x = d->xenoprof; |
99 | 0 | return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE)); |
100 | 0 | } |
101 | | |
102 | | static int is_profiled(struct domain *d) |
103 | 0 | { |
104 | 0 | return (is_active(d) || is_passive(d)); |
105 | 0 | } |
106 | | |
107 | | static void xenoprof_reset_stat(void) |
108 | 0 | { |
109 | 0 | total_samples = 0; |
110 | 0 | invalid_buffer_samples = 0; |
111 | 0 | corrupted_buffer_samples = 0; |
112 | 0 | lost_samples = 0; |
113 | 0 | active_samples = 0; |
114 | 0 | passive_samples = 0; |
115 | 0 | idle_samples = 0; |
116 | 0 | others_samples = 0; |
117 | 0 | } |
118 | | |
119 | | static void xenoprof_reset_buf(struct domain *d) |
120 | 0 | { |
121 | 0 | int j; |
122 | 0 | xenoprof_buf_t *buf; |
123 | 0 |
|
124 | 0 | if ( d->xenoprof == NULL ) |
125 | 0 | { |
126 | 0 | printk("xenoprof_reset_buf: ERROR - Unexpected " |
127 | 0 | "Xenoprof NULL pointer \n"); |
128 | 0 | return; |
129 | 0 | } |
130 | 0 |
|
131 | 0 | for ( j = 0; j < d->max_vcpus; j++ ) |
132 | 0 | { |
133 | 0 | buf = d->xenoprof->vcpu[j].buffer; |
134 | 0 | if ( buf != NULL ) |
135 | 0 | { |
136 | 0 | xenoprof_buf(d, buf, event_head) = 0; |
137 | 0 | xenoprof_buf(d, buf, event_tail) = 0; |
138 | 0 | } |
139 | 0 | } |
140 | 0 | } |
141 | | |
142 | | static int |
143 | | share_xenoprof_page_with_guest(struct domain *d, mfn_t mfn, int npages) |
144 | 0 | { |
145 | 0 | int i; |
146 | 0 |
|
147 | 0 | /* Check if previous page owner has released the page. */ |
148 | 0 | for ( i = 0; i < npages; i++ ) |
149 | 0 | { |
150 | 0 | struct page_info *page = mfn_to_page(mfn_add(mfn, i)); |
151 | 0 |
|
152 | 0 | if ( (page->count_info & (PGC_allocated|PGC_count_mask)) != 0 ) |
153 | 0 | { |
154 | 0 | printk(XENLOG_G_INFO "dom%d mfn %#lx page->count_info %#lx\n", |
155 | 0 | d->domain_id, mfn_x(mfn_add(mfn, i)), page->count_info); |
156 | 0 | return -EBUSY; |
157 | 0 | } |
158 | 0 | page_set_owner(page, NULL); |
159 | 0 | } |
160 | 0 |
|
161 | 0 | for ( i = 0; i < npages; i++ ) |
162 | 0 | share_xen_page_with_guest(mfn_to_page(mfn_add(mfn, i)), |
163 | 0 | d, XENSHARE_writable); |
164 | 0 |
|
165 | 0 | return 0; |
166 | 0 | } |
167 | | |
168 | | static void |
169 | | unshare_xenoprof_page_with_guest(struct xenoprof *x) |
170 | 0 | { |
171 | 0 | int i, npages = x->npages; |
172 | 0 | mfn_t mfn = virt_to_mfn(x->rawbuf); |
173 | 0 |
|
174 | 0 | for ( i = 0; i < npages; i++ ) |
175 | 0 | { |
176 | 0 | struct page_info *page = mfn_to_page(mfn_add(mfn, i)); |
177 | 0 |
|
178 | 0 | BUG_ON(page_get_owner(page) != current->domain); |
179 | 0 | if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) |
180 | 0 | put_page(page); |
181 | 0 | } |
182 | 0 | } |
183 | | |
184 | | static void |
185 | | xenoprof_shared_gmfn_with_guest( |
186 | | struct domain *d, unsigned long maddr, unsigned long gmaddr, int npages) |
187 | 0 | { |
188 | 0 | int i; |
189 | 0 |
|
190 | 0 | for ( i = 0; i < npages; i++, maddr += PAGE_SIZE, gmaddr += PAGE_SIZE ) |
191 | 0 | { |
192 | 0 | BUG_ON(page_get_owner(maddr_to_page(maddr)) != d); |
193 | 0 | if ( i == 0 ) |
194 | 0 | gdprintk(XENLOG_WARNING, |
195 | 0 | "xenoprof unsupported with autotranslated guests\n"); |
196 | 0 |
|
197 | 0 | } |
198 | 0 | } |
199 | | |
200 | | static int alloc_xenoprof_struct( |
201 | | struct domain *d, int max_samples, int is_passive) |
202 | 0 | { |
203 | 0 | struct vcpu *v; |
204 | 0 | int nvcpu, npages, bufsize, max_bufsize; |
205 | 0 | unsigned max_max_samples; |
206 | 0 | int i; |
207 | 0 |
|
208 | 0 | nvcpu = 0; |
209 | 0 | for_each_vcpu ( d, v ) |
210 | 0 | nvcpu++; |
211 | 0 |
|
212 | 0 | if ( !nvcpu ) |
213 | 0 | return -EINVAL; |
214 | 0 |
|
215 | 0 | d->xenoprof = xzalloc(struct xenoprof); |
216 | 0 | if ( d->xenoprof == NULL ) |
217 | 0 | { |
218 | 0 | printk("alloc_xenoprof_struct(): memory allocation failed\n"); |
219 | 0 | return -ENOMEM; |
220 | 0 | } |
221 | 0 |
|
222 | 0 | d->xenoprof->vcpu = xzalloc_array(struct xenoprof_vcpu, d->max_vcpus); |
223 | 0 | if ( d->xenoprof->vcpu == NULL ) |
224 | 0 | { |
225 | 0 | xfree(d->xenoprof); |
226 | 0 | d->xenoprof = NULL; |
227 | 0 | printk("alloc_xenoprof_struct(): vcpu array allocation failed\n"); |
228 | 0 | return -ENOMEM; |
229 | 0 | } |
230 | 0 |
|
231 | 0 | bufsize = sizeof(struct xenoprof_buf); |
232 | 0 | i = sizeof(struct event_log); |
233 | 0 | #ifdef CONFIG_COMPAT |
234 | 0 | d->xenoprof->is_compat = is_pv_32bit_domain(is_passive ? hardware_domain : d); |
235 | 0 | if ( XENOPROF_COMPAT(d->xenoprof) ) |
236 | 0 | { |
237 | 0 | bufsize = sizeof(struct compat_oprof_buf); |
238 | 0 | i = sizeof(struct compat_event_log); |
239 | 0 | } |
240 | 0 | #endif |
241 | 0 |
|
242 | 0 | /* reduce max_samples if necessary to limit pages allocated */ |
243 | 0 | max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu; |
244 | 0 | max_max_samples = ( (max_bufsize - bufsize) / i ) + 1; |
245 | 0 | if ( (unsigned)max_samples > max_max_samples ) |
246 | 0 | max_samples = max_max_samples; |
247 | 0 |
|
248 | 0 | bufsize += (max_samples - 1) * i; |
249 | 0 | npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1; |
250 | 0 |
|
251 | 0 | d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages), 0); |
252 | 0 | if ( d->xenoprof->rawbuf == NULL ) |
253 | 0 | { |
254 | 0 | xfree(d->xenoprof->vcpu); |
255 | 0 | xfree(d->xenoprof); |
256 | 0 | d->xenoprof = NULL; |
257 | 0 | return -ENOMEM; |
258 | 0 | } |
259 | 0 |
|
260 | 0 | d->xenoprof->npages = npages; |
261 | 0 | d->xenoprof->nbuf = nvcpu; |
262 | 0 | d->xenoprof->bufsize = bufsize; |
263 | 0 | d->xenoprof->domain_ready = 0; |
264 | 0 | d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED; |
265 | 0 |
|
266 | 0 | /* Update buffer pointers for active vcpus */ |
267 | 0 | i = 0; |
268 | 0 | for_each_vcpu ( d, v ) |
269 | 0 | { |
270 | 0 | xenoprof_buf_t *buf = (xenoprof_buf_t *) |
271 | 0 | &d->xenoprof->rawbuf[i * bufsize]; |
272 | 0 |
|
273 | 0 | d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples; |
274 | 0 | d->xenoprof->vcpu[v->vcpu_id].buffer = buf; |
275 | 0 | xenoprof_buf(d, buf, event_size) = max_samples; |
276 | 0 | xenoprof_buf(d, buf, vcpu_id) = v->vcpu_id; |
277 | 0 |
|
278 | 0 | i++; |
279 | 0 | /* in the unlikely case that the number of active vcpus changes */ |
280 | 0 | if ( i >= nvcpu ) |
281 | 0 | break; |
282 | 0 | } |
283 | 0 | |
284 | 0 | return 0; |
285 | 0 | } |
286 | | |
287 | | void free_xenoprof_pages(struct domain *d) |
288 | 0 | { |
289 | 0 | struct xenoprof *x; |
290 | 0 | int order; |
291 | 0 |
|
292 | 0 | x = d->xenoprof; |
293 | 0 | if ( x == NULL ) |
294 | 0 | return; |
295 | 0 |
|
296 | 0 | if ( x->rawbuf != NULL ) |
297 | 0 | { |
298 | 0 | order = get_order_from_pages(x->npages); |
299 | 0 | free_xenheap_pages(x->rawbuf, order); |
300 | 0 | } |
301 | 0 |
|
302 | 0 | xfree(x->vcpu); |
303 | 0 | xfree(x); |
304 | 0 | d->xenoprof = NULL; |
305 | 0 | } |
306 | | |
307 | | static int active_index(struct domain *d) |
308 | 0 | { |
309 | 0 | int i; |
310 | 0 |
|
311 | 0 | for ( i = 0; i < adomains; i++ ) |
312 | 0 | if ( active_domains[i] == d ) |
313 | 0 | return i; |
314 | 0 |
|
315 | 0 | return -1; |
316 | 0 | } |
317 | | |
318 | | static int set_active(struct domain *d) |
319 | 0 | { |
320 | 0 | int ind; |
321 | 0 | struct xenoprof *x; |
322 | 0 |
|
323 | 0 | ind = active_index(d); |
324 | 0 | if ( ind < 0 ) |
325 | 0 | return -EPERM; |
326 | 0 |
|
327 | 0 | x = d->xenoprof; |
328 | 0 | if ( x == NULL ) |
329 | 0 | return -EPERM; |
330 | 0 |
|
331 | 0 | x->domain_ready = 1; |
332 | 0 | x->domain_type = XENOPROF_DOMAIN_ACTIVE; |
333 | 0 | active_ready[ind] = 1; |
334 | 0 | activated++; |
335 | 0 |
|
336 | 0 | return 0; |
337 | 0 | } |
338 | | |
339 | | static int reset_active(struct domain *d) |
340 | 0 | { |
341 | 0 | int ind; |
342 | 0 | struct xenoprof *x; |
343 | 0 |
|
344 | 0 | ind = active_index(d); |
345 | 0 | if ( ind < 0 ) |
346 | 0 | return -EPERM; |
347 | 0 |
|
348 | 0 | x = d->xenoprof; |
349 | 0 | if ( x == NULL ) |
350 | 0 | return -EPERM; |
351 | 0 |
|
352 | 0 | x->domain_ready = 0; |
353 | 0 | x->domain_type = XENOPROF_DOMAIN_IGNORED; |
354 | 0 | active_ready[ind] = 0; |
355 | 0 | active_domains[ind] = NULL; |
356 | 0 | activated--; |
357 | 0 | put_domain(d); |
358 | 0 |
|
359 | 0 | if ( activated <= 0 ) |
360 | 0 | adomains = 0; |
361 | 0 |
|
362 | 0 | return 0; |
363 | 0 | } |
364 | | |
365 | | static void reset_passive(struct domain *d) |
366 | 0 | { |
367 | 0 | struct xenoprof *x; |
368 | 0 |
|
369 | 0 | if ( d == NULL ) |
370 | 0 | return; |
371 | 0 |
|
372 | 0 | x = d->xenoprof; |
373 | 0 | if ( x == NULL ) |
374 | 0 | return; |
375 | 0 |
|
376 | 0 | unshare_xenoprof_page_with_guest(x); |
377 | 0 | x->domain_type = XENOPROF_DOMAIN_IGNORED; |
378 | 0 | } |
379 | | |
380 | | static void reset_active_list(void) |
381 | 0 | { |
382 | 0 | int i; |
383 | 0 |
|
384 | 0 | for ( i = 0; i < adomains; i++ ) |
385 | 0 | if ( active_ready[i] ) |
386 | 0 | reset_active(active_domains[i]); |
387 | 0 |
|
388 | 0 | adomains = 0; |
389 | 0 | activated = 0; |
390 | 0 | } |
391 | | |
392 | | static void reset_passive_list(void) |
393 | 0 | { |
394 | 0 | int i; |
395 | 0 |
|
396 | 0 | for ( i = 0; i < pdomains; i++ ) |
397 | 0 | { |
398 | 0 | reset_passive(passive_domains[i]); |
399 | 0 | put_domain(passive_domains[i]); |
400 | 0 | passive_domains[i] = NULL; |
401 | 0 | } |
402 | 0 |
|
403 | 0 | pdomains = 0; |
404 | 0 | } |
405 | | |
406 | | static int add_active_list(domid_t domid) |
407 | 0 | { |
408 | 0 | struct domain *d; |
409 | 0 |
|
410 | 0 | if ( adomains >= MAX_OPROF_DOMAINS ) |
411 | 0 | return -E2BIG; |
412 | 0 |
|
413 | 0 | d = get_domain_by_id(domid); |
414 | 0 | if ( d == NULL ) |
415 | 0 | return -EINVAL; |
416 | 0 |
|
417 | 0 | active_domains[adomains] = d; |
418 | 0 | active_ready[adomains] = 0; |
419 | 0 | adomains++; |
420 | 0 |
|
421 | 0 | return 0; |
422 | 0 | } |
423 | | |
424 | | static int add_passive_list(XEN_GUEST_HANDLE_PARAM(void) arg) |
425 | 0 | { |
426 | 0 | struct xenoprof_passive passive; |
427 | 0 | struct domain *d; |
428 | 0 | int ret = 0; |
429 | 0 |
|
430 | 0 | if ( pdomains >= MAX_OPROF_DOMAINS ) |
431 | 0 | return -E2BIG; |
432 | 0 |
|
433 | 0 | if ( copy_from_guest(&passive, arg, 1) ) |
434 | 0 | return -EFAULT; |
435 | 0 |
|
436 | 0 | d = get_domain_by_id(passive.domain_id); |
437 | 0 | if ( d == NULL ) |
438 | 0 | return -EINVAL; |
439 | 0 |
|
440 | 0 | if ( d->xenoprof == NULL ) |
441 | 0 | { |
442 | 0 | ret = alloc_xenoprof_struct(d, passive.max_samples, 1); |
443 | 0 | if ( ret < 0 ) |
444 | 0 | { |
445 | 0 | put_domain(d); |
446 | 0 | return -ENOMEM; |
447 | 0 | } |
448 | 0 | } |
449 | 0 |
|
450 | 0 | ret = share_xenoprof_page_with_guest( |
451 | 0 | current->domain, virt_to_mfn(d->xenoprof->rawbuf), |
452 | 0 | d->xenoprof->npages); |
453 | 0 | if ( ret < 0 ) |
454 | 0 | { |
455 | 0 | put_domain(d); |
456 | 0 | return ret; |
457 | 0 | } |
458 | 0 |
|
459 | 0 | d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE; |
460 | 0 | passive.nbuf = d->xenoprof->nbuf; |
461 | 0 | passive.bufsize = d->xenoprof->bufsize; |
462 | 0 | if ( !paging_mode_translate(current->domain) ) |
463 | 0 | passive.buf_gmaddr = __pa(d->xenoprof->rawbuf); |
464 | 0 | else |
465 | 0 | xenoprof_shared_gmfn_with_guest( |
466 | 0 | current->domain, __pa(d->xenoprof->rawbuf), |
467 | 0 | passive.buf_gmaddr, d->xenoprof->npages); |
468 | 0 |
|
469 | 0 | if ( __copy_to_guest(arg, &passive, 1) ) |
470 | 0 | { |
471 | 0 | put_domain(d); |
472 | 0 | return -EFAULT; |
473 | 0 | } |
474 | 0 | |
475 | 0 | passive_domains[pdomains] = d; |
476 | 0 | pdomains++; |
477 | 0 |
|
478 | 0 | return ret; |
479 | 0 | } |
480 | | |
481 | | |
482 | | /* Get space in the buffer */ |
483 | | static int xenoprof_buf_space(struct domain *d, xenoprof_buf_t * buf, int size) |
484 | 0 | { |
485 | 0 | int head, tail; |
486 | 0 |
|
487 | 0 | head = xenoprof_buf(d, buf, event_head); |
488 | 0 | tail = xenoprof_buf(d, buf, event_tail); |
489 | 0 |
|
490 | 0 | return ((tail > head) ? 0 : size) + tail - head - 1; |
491 | 0 | } |
492 | | |
493 | | /* Check for space and add a sample. Return 1 if successful, 0 otherwise. */ |
494 | | static int xenoprof_add_sample(struct domain *d, xenoprof_buf_t *buf, |
495 | | uint64_t eip, int mode, int event) |
496 | 0 | { |
497 | 0 | int head, tail, size; |
498 | 0 |
|
499 | 0 | head = xenoprof_buf(d, buf, event_head); |
500 | 0 | tail = xenoprof_buf(d, buf, event_tail); |
501 | 0 | size = xenoprof_buf(d, buf, event_size); |
502 | 0 | |
503 | 0 | /* make sure indexes in shared buffer are sane */ |
504 | 0 | if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) ) |
505 | 0 | { |
506 | 0 | corrupted_buffer_samples++; |
507 | 0 | return 0; |
508 | 0 | } |
509 | 0 |
|
510 | 0 | if ( xenoprof_buf_space(d, buf, size) > 0 ) |
511 | 0 | { |
512 | 0 | xenoprof_buf(d, buf, event_log[head].eip) = eip; |
513 | 0 | xenoprof_buf(d, buf, event_log[head].mode) = mode; |
514 | 0 | xenoprof_buf(d, buf, event_log[head].event) = event; |
515 | 0 | head++; |
516 | 0 | if ( head >= size ) |
517 | 0 | head = 0; |
518 | 0 | |
519 | 0 | xenoprof_buf(d, buf, event_head) = head; |
520 | 0 | } |
521 | 0 | else |
522 | 0 | { |
523 | 0 | xenoprof_buf(d, buf, lost_samples)++; |
524 | 0 | lost_samples++; |
525 | 0 | return 0; |
526 | 0 | } |
527 | 0 |
|
528 | 0 | return 1; |
529 | 0 | } |
530 | | |
531 | | int xenoprof_add_trace(struct vcpu *vcpu, uint64_t pc, int mode) |
532 | 0 | { |
533 | 0 | struct domain *d = vcpu->domain; |
534 | 0 | xenoprof_buf_t *buf = d->xenoprof->vcpu[vcpu->vcpu_id].buffer; |
535 | 0 |
|
536 | 0 | /* Do not accidentally write an escape code due to a broken frame. */ |
537 | 0 | if ( pc == XENOPROF_ESCAPE_CODE ) |
538 | 0 | { |
539 | 0 | invalid_buffer_samples++; |
540 | 0 | return 0; |
541 | 0 | } |
542 | 0 |
|
543 | 0 | return xenoprof_add_sample(d, buf, pc, mode, 0); |
544 | 0 | } |
545 | | |
546 | | void xenoprof_log_event(struct vcpu *vcpu, const struct cpu_user_regs *regs, |
547 | | uint64_t pc, int mode, int event) |
548 | 0 | { |
549 | 0 | struct domain *d = vcpu->domain; |
550 | 0 | struct xenoprof_vcpu *v; |
551 | 0 | xenoprof_buf_t *buf; |
552 | 0 |
|
553 | 0 | total_samples++; |
554 | 0 |
|
555 | 0 | /* Ignore samples of un-monitored domains. */ |
556 | 0 | if ( !is_profiled(d) ) |
557 | 0 | { |
558 | 0 | others_samples++; |
559 | 0 | return; |
560 | 0 | } |
561 | 0 |
|
562 | 0 | v = &d->xenoprof->vcpu[vcpu->vcpu_id]; |
563 | 0 | if ( v->buffer == NULL ) |
564 | 0 | { |
565 | 0 | invalid_buffer_samples++; |
566 | 0 | return; |
567 | 0 | } |
568 | 0 | |
569 | 0 | buf = v->buffer; |
570 | 0 |
|
571 | 0 | /* Provide backtrace if requested. */ |
572 | 0 | if ( backtrace_depth > 0 ) |
573 | 0 | { |
574 | 0 | if ( (xenoprof_buf_space(d, buf, v->event_size) < 2) || |
575 | 0 | !xenoprof_add_sample(d, buf, XENOPROF_ESCAPE_CODE, mode, |
576 | 0 | XENOPROF_TRACE_BEGIN) ) |
577 | 0 | { |
578 | 0 | xenoprof_buf(d, buf, lost_samples)++; |
579 | 0 | lost_samples++; |
580 | 0 | return; |
581 | 0 | } |
582 | 0 | } |
583 | 0 |
|
584 | 0 | if ( xenoprof_add_sample(d, buf, pc, mode, event) ) |
585 | 0 | { |
586 | 0 | if ( is_active(vcpu->domain) ) |
587 | 0 | active_samples++; |
588 | 0 | else |
589 | 0 | passive_samples++; |
590 | 0 | if ( mode == 0 ) |
591 | 0 | xenoprof_buf(d, buf, user_samples)++; |
592 | 0 | else if ( mode == 1 ) |
593 | 0 | xenoprof_buf(d, buf, kernel_samples)++; |
594 | 0 | else |
595 | 0 | xenoprof_buf(d, buf, xen_samples)++; |
596 | 0 | |
597 | 0 | } |
598 | 0 |
|
599 | 0 | if ( backtrace_depth > 0 ) |
600 | 0 | xenoprof_backtrace(vcpu, regs, backtrace_depth, mode); |
601 | 0 | } |
602 | | |
603 | | |
604 | | |
605 | | static int xenoprof_op_init(XEN_GUEST_HANDLE_PARAM(void) arg) |
606 | 0 | { |
607 | 0 | struct domain *d = current->domain; |
608 | 0 | struct xenoprof_init xenoprof_init; |
609 | 0 | int ret; |
610 | 0 |
|
611 | 0 | if ( copy_from_guest(&xenoprof_init, arg, 1) ) |
612 | 0 | return -EFAULT; |
613 | 0 |
|
614 | 0 | if ( (ret = xenoprof_arch_init(&xenoprof_init.num_events, |
615 | 0 | xenoprof_init.cpu_type)) ) |
616 | 0 | return ret; |
617 | 0 |
|
618 | 0 | /* Only the hardware domain may become the primary profiler here because |
619 | 0 | * there is currently no cleanup of xenoprof_primary_profiler or associated |
620 | 0 | * profiling state when the primary profiling domain is shut down or |
621 | 0 | * crashes. Once a better cleanup method is present, it will be possible to |
622 | 0 | * allow another domain to be the primary profiler. |
623 | 0 | */ |
624 | 0 | xenoprof_init.is_primary = |
625 | 0 | ((xenoprof_primary_profiler == d) || |
626 | 0 | ((xenoprof_primary_profiler == NULL) && is_hardware_domain(d))); |
627 | 0 | if ( xenoprof_init.is_primary ) |
628 | 0 | xenoprof_primary_profiler = current->domain; |
629 | 0 |
|
630 | 0 | return __copy_to_guest(arg, &xenoprof_init, 1) ? -EFAULT : 0; |
631 | 0 | } |
632 | | |
633 | | #define ret_t long |
634 | | |
635 | | #endif /* !COMPAT */ |
636 | | |
637 | | static int xenoprof_op_get_buffer(XEN_GUEST_HANDLE_PARAM(void) arg) |
638 | 0 | { |
639 | 0 | struct xenoprof_get_buffer xenoprof_get_buffer; |
640 | 0 | struct domain *d = current->domain; |
641 | 0 | int ret; |
642 | 0 |
|
643 | 0 | if ( copy_from_guest(&xenoprof_get_buffer, arg, 1) ) |
644 | 0 | return -EFAULT; |
645 | 0 |
|
646 | 0 | /* |
647 | 0 | * We allocate xenoprof struct and buffers only at first time |
648 | 0 | * get_buffer is called. Memory is then kept until domain is destroyed. |
649 | 0 | */ |
650 | 0 | if ( d->xenoprof == NULL ) |
651 | 0 | { |
652 | 0 | ret = alloc_xenoprof_struct(d, xenoprof_get_buffer.max_samples, 0); |
653 | 0 | if ( ret < 0 ) |
654 | 0 | return ret; |
655 | 0 | } |
656 | 0 |
|
657 | 0 | ret = share_xenoprof_page_with_guest( |
658 | 0 | d, virt_to_mfn(d->xenoprof->rawbuf), d->xenoprof->npages); |
659 | 0 | if ( ret < 0 ) |
660 | 0 | return ret; |
661 | 0 |
|
662 | 0 | xenoprof_reset_buf(d); |
663 | 0 |
|
664 | 0 | d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED; |
665 | 0 | d->xenoprof->domain_ready = 0; |
666 | 0 | d->xenoprof->is_primary = (xenoprof_primary_profiler == current->domain); |
667 | 0 | |
668 | 0 | xenoprof_get_buffer.nbuf = d->xenoprof->nbuf; |
669 | 0 | xenoprof_get_buffer.bufsize = d->xenoprof->bufsize; |
670 | 0 | if ( !paging_mode_translate(d) ) |
671 | 0 | xenoprof_get_buffer.buf_gmaddr = __pa(d->xenoprof->rawbuf); |
672 | 0 | else |
673 | 0 | xenoprof_shared_gmfn_with_guest( |
674 | 0 | d, __pa(d->xenoprof->rawbuf), xenoprof_get_buffer.buf_gmaddr, |
675 | 0 | d->xenoprof->npages); |
676 | 0 |
|
677 | 0 | return __copy_to_guest(arg, &xenoprof_get_buffer, 1) ? -EFAULT : 0; |
678 | 0 | } Unexecuted instantiation: xenoprof.c:xenoprof_op_get_buffer Unexecuted instantiation: xenoprof.c:compat_oprof_op_get_buffer |
679 | | |
680 | 0 | #define NONPRIV_OP(op) ( (op == XENOPROF_init) \ |
681 | 0 | || (op == XENOPROF_enable_virq) \ |
682 | 0 | || (op == XENOPROF_disable_virq) \ |
683 | 0 | || (op == XENOPROF_get_buffer)) |
684 | | |
685 | | ret_t do_xenoprof_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) |
686 | 0 | { |
687 | 0 | int ret = 0; |
688 | 0 | |
689 | 0 | if ( (op < 0) || (op > XENOPROF_last_op) ) |
690 | 0 | { |
691 | 0 | gdprintk(XENLOG_DEBUG, "invalid operation %d\n", op); |
692 | 0 | return -EINVAL; |
693 | 0 | } |
694 | 0 |
|
695 | 0 | if ( !NONPRIV_OP(op) && (current->domain != xenoprof_primary_profiler) ) |
696 | 0 | { |
697 | 0 | gdprintk(XENLOG_DEBUG, "denied privileged operation %d\n", op); |
698 | 0 | return -EPERM; |
699 | 0 | } |
700 | 0 |
|
701 | 0 | ret = xsm_profile(XSM_HOOK, current->domain, op); |
702 | 0 | if ( ret ) |
703 | 0 | return ret; |
704 | 0 |
|
705 | 0 | spin_lock(&xenoprof_lock); |
706 | 0 | |
707 | 0 | switch ( op ) |
708 | 0 | { |
709 | 0 | case XENOPROF_init: |
710 | 0 | ret = xenoprof_op_init(arg); |
711 | 0 | if ( (ret == 0) && |
712 | 0 | (current->domain == xenoprof_primary_profiler) ) |
713 | 0 | xenoprof_state = XENOPROF_INITIALIZED; |
714 | 0 | break; |
715 | 0 |
|
716 | 0 | case XENOPROF_get_buffer: |
717 | 0 | if ( !acquire_pmu_ownership(PMU_OWNER_XENOPROF) ) |
718 | 0 | { |
719 | 0 | ret = -EBUSY; |
720 | 0 | break; |
721 | 0 | } |
722 | 0 | ret = xenoprof_op_get_buffer(arg); |
723 | 0 | break; |
724 | 0 |
|
725 | 0 | case XENOPROF_reset_active_list: |
726 | 0 | reset_active_list(); |
727 | 0 | ret = 0; |
728 | 0 | break; |
729 | 0 |
|
730 | 0 | case XENOPROF_reset_passive_list: |
731 | 0 | reset_passive_list(); |
732 | 0 | ret = 0; |
733 | 0 | break; |
734 | 0 |
|
735 | 0 | case XENOPROF_set_active: |
736 | 0 | { |
737 | 0 | domid_t domid; |
738 | 0 | if ( xenoprof_state != XENOPROF_INITIALIZED ) |
739 | 0 | { |
740 | 0 | ret = -EPERM; |
741 | 0 | break; |
742 | 0 | } |
743 | 0 | if ( copy_from_guest(&domid, arg, 1) ) |
744 | 0 | { |
745 | 0 | ret = -EFAULT; |
746 | 0 | break; |
747 | 0 | } |
748 | 0 | ret = add_active_list(domid); |
749 | 0 | break; |
750 | 0 | } |
751 | 0 |
|
752 | 0 | case XENOPROF_set_passive: |
753 | 0 | if ( xenoprof_state != XENOPROF_INITIALIZED ) |
754 | 0 | { |
755 | 0 | ret = -EPERM; |
756 | 0 | break; |
757 | 0 | } |
758 | 0 | ret = add_passive_list(arg); |
759 | 0 | break; |
760 | 0 |
|
761 | 0 | case XENOPROF_reserve_counters: |
762 | 0 | if ( xenoprof_state != XENOPROF_INITIALIZED ) |
763 | 0 | { |
764 | 0 | ret = -EPERM; |
765 | 0 | break; |
766 | 0 | } |
767 | 0 | ret = xenoprof_arch_reserve_counters(); |
768 | 0 | if ( !ret ) |
769 | 0 | xenoprof_state = XENOPROF_COUNTERS_RESERVED; |
770 | 0 | break; |
771 | 0 |
|
772 | 0 | case XENOPROF_counter: |
773 | 0 | if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) || |
774 | 0 | (adomains == 0) ) |
775 | 0 | { |
776 | 0 | ret = -EPERM; |
777 | 0 | break; |
778 | 0 | } |
779 | 0 | ret = xenoprof_arch_counter(arg); |
780 | 0 | break; |
781 | 0 |
|
782 | 0 | case XENOPROF_setup_events: |
783 | 0 | if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED ) |
784 | 0 | { |
785 | 0 | ret = -EPERM; |
786 | 0 | break; |
787 | 0 | } |
788 | 0 | ret = xenoprof_arch_setup_events(); |
789 | 0 | if ( !ret ) |
790 | 0 | xenoprof_state = XENOPROF_READY; |
791 | 0 | break; |
792 | 0 |
|
793 | 0 | case XENOPROF_enable_virq: |
794 | 0 | { |
795 | 0 | int i; |
796 | 0 |
|
797 | 0 | if ( current->domain == xenoprof_primary_profiler ) |
798 | 0 | { |
799 | 0 | if ( xenoprof_state != XENOPROF_READY ) |
800 | 0 | { |
801 | 0 | ret = -EPERM; |
802 | 0 | break; |
803 | 0 | } |
804 | 0 | xenoprof_arch_enable_virq(); |
805 | 0 | xenoprof_reset_stat(); |
806 | 0 | for ( i = 0; i < pdomains; i++ ) |
807 | 0 | xenoprof_reset_buf(passive_domains[i]); |
808 | 0 | } |
809 | 0 | xenoprof_reset_buf(current->domain); |
810 | 0 | ret = set_active(current->domain); |
811 | 0 | break; |
812 | 0 | } |
813 | 0 |
|
814 | 0 | case XENOPROF_start: |
815 | 0 | ret = -EPERM; |
816 | 0 | if ( (xenoprof_state == XENOPROF_READY) && |
817 | 0 | (activated == adomains) ) |
818 | 0 | ret = xenoprof_arch_start(); |
819 | 0 | if ( ret == 0 ) |
820 | 0 | xenoprof_state = XENOPROF_PROFILING; |
821 | 0 | break; |
822 | 0 |
|
823 | 0 | case XENOPROF_stop: |
824 | 0 | { |
825 | 0 | struct domain *d; |
826 | 0 | struct vcpu *v; |
827 | 0 | int i; |
828 | 0 |
|
829 | 0 | if ( xenoprof_state != XENOPROF_PROFILING ) |
830 | 0 | { |
831 | 0 | ret = -EPERM; |
832 | 0 | break; |
833 | 0 | } |
834 | 0 | xenoprof_arch_stop(); |
835 | 0 |
|
836 | 0 | /* Flush remaining samples. */ |
837 | 0 | for ( i = 0; i < adomains; i++ ) |
838 | 0 | { |
839 | 0 | if ( !active_ready[i] ) |
840 | 0 | continue; |
841 | 0 | d = active_domains[i]; |
842 | 0 | for_each_vcpu(d, v) |
843 | 0 | send_guest_vcpu_virq(v, VIRQ_XENOPROF); |
844 | 0 | } |
845 | 0 | xenoprof_state = XENOPROF_READY; |
846 | 0 | break; |
847 | 0 | } |
848 | 0 |
|
849 | 0 | case XENOPROF_disable_virq: |
850 | 0 | { |
851 | 0 | struct xenoprof *x; |
852 | 0 | if ( (xenoprof_state == XENOPROF_PROFILING) && |
853 | 0 | (is_active(current->domain)) ) |
854 | 0 | { |
855 | 0 | ret = -EPERM; |
856 | 0 | break; |
857 | 0 | } |
858 | 0 | if ( (ret = reset_active(current->domain)) != 0 ) |
859 | 0 | break; |
860 | 0 | x = current->domain->xenoprof; |
861 | 0 | unshare_xenoprof_page_with_guest(x); |
862 | 0 | release_pmu_ownership(PMU_OWNER_XENOPROF); |
863 | 0 | break; |
864 | 0 | } |
865 | 0 |
|
866 | 0 | case XENOPROF_release_counters: |
867 | 0 | ret = -EPERM; |
868 | 0 | if ( (xenoprof_state == XENOPROF_COUNTERS_RESERVED) || |
869 | 0 | (xenoprof_state == XENOPROF_READY) ) |
870 | 0 | { |
871 | 0 | xenoprof_state = XENOPROF_INITIALIZED; |
872 | 0 | xenoprof_arch_release_counters(); |
873 | 0 | xenoprof_arch_disable_virq(); |
874 | 0 | reset_passive_list(); |
875 | 0 | ret = 0; |
876 | 0 | } |
877 | 0 | break; |
878 | 0 |
|
879 | 0 | case XENOPROF_shutdown: |
880 | 0 | ret = -EPERM; |
881 | 0 | if ( xenoprof_state == XENOPROF_INITIALIZED ) |
882 | 0 | { |
883 | 0 | activated = 0; |
884 | 0 | adomains=0; |
885 | 0 | xenoprof_primary_profiler = NULL; |
886 | 0 | backtrace_depth=0; |
887 | 0 | ret = 0; |
888 | 0 | } |
889 | 0 | break; |
890 | 0 | |
891 | 0 | case XENOPROF_set_backtrace: |
892 | 0 | ret = 0; |
893 | 0 | if ( !xenoprof_backtrace_supported() ) |
894 | 0 | ret = -EINVAL; |
895 | 0 | else if ( copy_from_guest(&backtrace_depth, arg, 1) ) |
896 | 0 | ret = -EFAULT; |
897 | 0 | break; |
898 | 0 |
|
899 | 0 | case XENOPROF_ibs_counter: |
900 | 0 | if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) || |
901 | 0 | (adomains == 0) ) |
902 | 0 | { |
903 | 0 | ret = -EPERM; |
904 | 0 | break; |
905 | 0 | } |
906 | 0 | ret = xenoprof_arch_ibs_counter(arg); |
907 | 0 | break; |
908 | 0 |
|
909 | 0 | case XENOPROF_get_ibs_caps: |
910 | 0 | ret = ibs_caps; |
911 | 0 | break; |
912 | 0 |
|
913 | 0 | default: |
914 | 0 | ret = -ENOSYS; |
915 | 0 | } |
916 | 0 |
|
917 | 0 | spin_unlock(&xenoprof_lock); |
918 | 0 |
|
919 | 0 | if ( ret < 0 ) |
920 | 0 | gdprintk(XENLOG_DEBUG, "operation %d failed: %d\n", op, ret); |
921 | 0 |
|
922 | 0 | return ret; |
923 | 0 | } Unexecuted instantiation: do_xenoprof_op Unexecuted instantiation: compat_xenoprof_op |
924 | | |
925 | | #if defined(CONFIG_COMPAT) && !defined(COMPAT) |
926 | | #undef ret_t |
927 | | #include "compat/xenoprof.c" |
928 | | #endif |
929 | | |
930 | | /* |
931 | | * Local variables: |
932 | | * mode: C |
933 | | * c-file-style: "BSD" |
934 | | * c-basic-offset: 4 |
935 | | * tab-width: 4 |
936 | | * indent-tabs-mode: nil |
937 | | * End: |
938 | | */ |