rev |
line source |
kfraser@12543
|
1 /*
|
kfraser@12543
|
2 * Copyright (C) 2005 Hewlett-Packard Co.
|
kfraser@12543
|
3 * written by Aravind Menon & Jose Renato Santos
|
kfraser@12543
|
4 * (email: xenoprof@groups.hp.com)
|
kfraser@12548
|
5 *
|
kfraser@12548
|
6 * arch generic xenoprof and IA64 support.
|
kfraser@12548
|
7 * dynamic map/unmap xenoprof buffer support.
|
kfraser@12543
|
8 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
|
kfraser@12543
|
9 * VA Linux Systems Japan K.K.
|
kfraser@12543
|
10 */
|
kfraser@12543
|
11
|
ack@13308
|
12 #ifndef COMPAT
|
kfraser@12543
|
13 #include <xen/guest_access.h>
|
kfraser@12543
|
14 #include <xen/sched.h>
|
keir@16650
|
15 #include <xen/event.h>
|
kfraser@12543
|
16 #include <public/xenoprof.h>
|
Tim@15666
|
17 #include <xen/paging.h>
|
kfraser@15846
|
18 #include <xsm/xsm.h>
|
keir@20323
|
19 #include <xen/hypercall.h>
|
kfraser@12543
|
20
|
kfraser@12543
|
21 /* Limit amount of pages used for shared buffer (per domain) */
|
kfraser@12543
|
22 #define MAX_OPROF_SHARED_PAGES 32
|
kfraser@12543
|
23
|
kfraser@12543
|
24 /* Lock protecting the following global state */
|
kfraser@12543
|
25 static DEFINE_SPINLOCK(xenoprof_lock);
|
kfraser@12543
|
26
|
keir@16978
|
27 static DEFINE_SPINLOCK(pmu_owner_lock);
|
keir@16978
|
28 int pmu_owner = 0;
|
keir@16978
|
29 int pmu_hvm_refcount = 0;
|
keir@16978
|
30
|
kfraser@15562
|
31 static struct domain *active_domains[MAX_OPROF_DOMAINS];
|
kfraser@15562
|
32 static int active_ready[MAX_OPROF_DOMAINS];
|
kfraser@15562
|
33 static unsigned int adomains;
|
kfraser@12543
|
34
|
kfraser@15562
|
35 static struct domain *passive_domains[MAX_OPROF_DOMAINS];
|
kfraser@15562
|
36 static unsigned int pdomains;
|
kfraser@12543
|
37
|
kfraser@15562
|
38 static unsigned int activated;
|
kfraser@15562
|
39 static struct domain *xenoprof_primary_profiler;
|
kfraser@15562
|
40 static int xenoprof_state = XENOPROF_IDLE;
|
kfraser@15273
|
41 static unsigned long backtrace_depth;
|
kfraser@12543
|
42
|
kfraser@15562
|
43 static u64 total_samples;
|
kfraser@15562
|
44 static u64 invalid_buffer_samples;
|
kfraser@15562
|
45 static u64 corrupted_buffer_samples;
|
kfraser@15562
|
46 static u64 lost_samples;
|
kfraser@15562
|
47 static u64 active_samples;
|
kfraser@15562
|
48 static u64 passive_samples;
|
kfraser@15562
|
49 static u64 idle_samples;
|
kfraser@15562
|
50 static u64 others_samples;
|
kfraser@12543
|
51
|
keir@16978
|
52 int acquire_pmu_ownership(int pmu_ownship)
|
keir@16978
|
53 {
|
keir@16978
|
54 spin_lock(&pmu_owner_lock);
|
keir@16978
|
55 if ( pmu_owner == PMU_OWNER_NONE )
|
keir@16978
|
56 {
|
keir@16978
|
57 pmu_owner = pmu_ownship;
|
keir@16978
|
58 goto out;
|
keir@16978
|
59 }
|
keir@16978
|
60
|
keir@16978
|
61 if ( pmu_owner == pmu_ownship )
|
keir@16978
|
62 goto out;
|
keir@16978
|
63
|
keir@16978
|
64 spin_unlock(&pmu_owner_lock);
|
keir@16978
|
65 return 0;
|
keir@16978
|
66 out:
|
keir@16978
|
67 if ( pmu_owner == PMU_OWNER_HVM )
|
keir@16978
|
68 pmu_hvm_refcount++;
|
keir@16978
|
69 spin_unlock(&pmu_owner_lock);
|
keir@16978
|
70 return 1;
|
keir@16978
|
71 }
|
keir@16978
|
72
|
keir@16978
|
73 void release_pmu_ownship(int pmu_ownship)
|
keir@16978
|
74 {
|
keir@16978
|
75 spin_lock(&pmu_owner_lock);
|
keir@16978
|
76 if ( pmu_ownship == PMU_OWNER_HVM )
|
keir@16978
|
77 pmu_hvm_refcount--;
|
keir@16978
|
78 if ( !pmu_hvm_refcount )
|
keir@16978
|
79 pmu_owner = PMU_OWNER_NONE;
|
keir@16978
|
80 spin_unlock(&pmu_owner_lock);
|
keir@16978
|
81 }
|
keir@16978
|
82
|
kfraser@12543
|
83 int is_active(struct domain *d)
|
kfraser@12543
|
84 {
|
kfraser@12543
|
85 struct xenoprof *x = d->xenoprof;
|
kfraser@12543
|
86 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE));
|
kfraser@12543
|
87 }
|
kfraser@12543
|
88
|
keir@18763
|
89 int is_passive(struct domain *d)
|
kfraser@12543
|
90 {
|
kfraser@12543
|
91 struct xenoprof *x = d->xenoprof;
|
kfraser@12543
|
92 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE));
|
kfraser@12543
|
93 }
|
kfraser@12543
|
94
|
kfraser@15562
|
95 static int is_profiled(struct domain *d)
|
kfraser@12543
|
96 {
|
kfraser@12543
|
97 return (is_active(d) || is_passive(d));
|
kfraser@12543
|
98 }
|
kfraser@12543
|
99
|
kfraser@12543
|
100 static void xenoprof_reset_stat(void)
|
kfraser@12543
|
101 {
|
kfraser@12543
|
102 total_samples = 0;
|
kfraser@12543
|
103 invalid_buffer_samples = 0;
|
kfraser@12543
|
104 corrupted_buffer_samples = 0;
|
kfraser@12543
|
105 lost_samples = 0;
|
kfraser@12543
|
106 active_samples = 0;
|
kfraser@12543
|
107 passive_samples = 0;
|
kfraser@12543
|
108 idle_samples = 0;
|
kfraser@12543
|
109 others_samples = 0;
|
kfraser@12543
|
110 }
|
kfraser@12543
|
111
|
kfraser@12543
|
112 static void xenoprof_reset_buf(struct domain *d)
|
kfraser@12543
|
113 {
|
kfraser@12543
|
114 int j;
|
ack@13308
|
115 xenoprof_buf_t *buf;
|
kfraser@12543
|
116
|
kfraser@12543
|
117 if ( d->xenoprof == NULL )
|
kfraser@12543
|
118 {
|
kfraser@12543
|
119 printk("xenoprof_reset_buf: ERROR - Unexpected "
|
kfraser@12543
|
120 "Xenoprof NULL pointer \n");
|
kfraser@12543
|
121 return;
|
kfraser@12543
|
122 }
|
kfraser@12543
|
123
|
keir@19826
|
124 for ( j = 0; j < d->max_vcpus; j++ )
|
kfraser@12543
|
125 {
|
kfraser@12543
|
126 buf = d->xenoprof->vcpu[j].buffer;
|
kfraser@12543
|
127 if ( buf != NULL )
|
kfraser@12543
|
128 {
|
ack@13308
|
129 xenoprof_buf(d, buf, event_head) = 0;
|
ack@13308
|
130 xenoprof_buf(d, buf, event_tail) = 0;
|
kfraser@12543
|
131 }
|
kfraser@12543
|
132 }
|
kfraser@12543
|
133 }
|
kfraser@12543
|
134
|
kfraser@12623
|
135 static int
|
kfraser@12623
|
136 share_xenoprof_page_with_guest(struct domain *d, unsigned long mfn, int npages)
|
kfraser@12548
|
137 {
|
kfraser@12548
|
138 int i;
|
kfraser@12623
|
139
|
keir@19130
|
140 /* Check if previous page owner has released the page. */
|
keir@19130
|
141 for ( i = 0; i < npages; i++ )
|
keir@19130
|
142 {
|
keir@19130
|
143 struct page_info *page = mfn_to_page(mfn + i);
|
keir@19130
|
144 if ( (page->count_info & (PGC_allocated|PGC_count_mask)) != 0 )
|
keir@19130
|
145 {
|
keir@19130
|
146 gdprintk(XENLOG_INFO, "mfn 0x%lx page->count_info 0x%lx\n",
|
keir@19130
|
147 mfn + i, (unsigned long)page->count_info);
|
keir@19130
|
148 return -EBUSY;
|
keir@19130
|
149 }
|
keir@19130
|
150 page_set_owner(page, NULL);
|
keir@19130
|
151 }
|
kfraser@12623
|
152
|
keir@19130
|
153 for ( i = 0; i < npages; i++ )
|
keir@19130
|
154 share_xen_page_with_guest(mfn_to_page(mfn + i), d, XENSHARE_writable);
|
kfraser@12623
|
155
|
keir@19130
|
156 return 0;
|
kfraser@12548
|
157 }
|
kfraser@12548
|
158
|
kfraser@12548
|
159 static void
|
kfraser@12623
|
160 unshare_xenoprof_page_with_guest(struct xenoprof *x)
|
kfraser@12548
|
161 {
|
kfraser@12623
|
162 int i, npages = x->npages;
|
kfraser@12623
|
163 unsigned long mfn = virt_to_mfn(x->rawbuf);
|
kfraser@12548
|
164
|
kfraser@12548
|
165 for ( i = 0; i < npages; i++ )
|
kfraser@12548
|
166 {
|
kfraser@12548
|
167 struct page_info *page = mfn_to_page(mfn + i);
|
kfraser@12548
|
168 BUG_ON(page_get_owner(page) != current->domain);
|
kfraser@12548
|
169 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
|
kfraser@12548
|
170 put_page(page);
|
kfraser@12548
|
171 }
|
kfraser@12548
|
172 }
|
kfraser@12548
|
173
|
kfraser@12548
|
174 static void
|
kfraser@12548
|
175 xenoprof_shared_gmfn_with_guest(
|
kfraser@12623
|
176 struct domain *d, unsigned long maddr, unsigned long gmaddr, int npages)
|
kfraser@12548
|
177 {
|
kfraser@12548
|
178 int i;
|
kfraser@12548
|
179
|
kfraser@12548
|
180 for ( i = 0; i < npages; i++, maddr += PAGE_SIZE, gmaddr += PAGE_SIZE )
|
kfraser@12548
|
181 {
|
kfraser@12548
|
182 BUG_ON(page_get_owner(maddr_to_page(maddr)) != d);
|
kfraser@12548
|
183 xenoprof_shared_gmfn(d, gmaddr, maddr);
|
kfraser@12548
|
184 }
|
kfraser@12548
|
185 }
|
kfraser@12548
|
186
|
kfraser@12543
|
187 static int alloc_xenoprof_struct(
|
kfraser@12591
|
188 struct domain *d, int max_samples, int is_passive)
|
kfraser@12543
|
189 {
|
kfraser@12543
|
190 struct vcpu *v;
|
kfraser@12543
|
191 int nvcpu, npages, bufsize, max_bufsize;
|
kfraser@12543
|
192 unsigned max_max_samples;
|
kfraser@12543
|
193 int i;
|
kfraser@12543
|
194
|
kfraser@12543
|
195 d->xenoprof = xmalloc(struct xenoprof);
|
kfraser@12543
|
196
|
kfraser@12543
|
197 if ( d->xenoprof == NULL )
|
kfraser@12543
|
198 {
|
kfraser@12623
|
199 printk("alloc_xenoprof_struct(): memory allocation failed\n");
|
kfraser@12543
|
200 return -ENOMEM;
|
kfraser@12543
|
201 }
|
kfraser@12543
|
202
|
kfraser@12543
|
203 memset(d->xenoprof, 0, sizeof(*d->xenoprof));
|
kfraser@12543
|
204
|
keir@19826
|
205 d->xenoprof->vcpu = xmalloc_array(struct xenoprof_vcpu, d->max_vcpus);
|
keir@19826
|
206 if ( d->xenoprof->vcpu == NULL )
|
keir@19826
|
207 {
|
keir@19826
|
208 xfree(d->xenoprof);
|
keir@19826
|
209 d->xenoprof = NULL;
|
keir@19826
|
210 printk("alloc_xenoprof_struct(): vcpu array allocation failed\n");
|
keir@19826
|
211 return -ENOMEM;
|
keir@19826
|
212 }
|
keir@19826
|
213
|
keir@19826
|
214 memset(d->xenoprof->vcpu, 0, d->max_vcpus * sizeof(*d->xenoprof->vcpu));
|
keir@19826
|
215
|
kfraser@12543
|
216 nvcpu = 0;
|
kfraser@12543
|
217 for_each_vcpu ( d, v )
|
kfraser@12543
|
218 nvcpu++;
|
kfraser@12543
|
219
|
ack@13308
|
220 bufsize = sizeof(struct xenoprof_buf);
|
ack@13308
|
221 i = sizeof(struct event_log);
|
ack@13308
|
222 #ifdef CONFIG_COMPAT
|
keir@19304
|
223 d->xenoprof->is_compat = is_pv_32on64_domain(is_passive ? dom0 : d);
|
ack@13308
|
224 if ( XENOPROF_COMPAT(d->xenoprof) )
|
ack@13308
|
225 {
|
ack@13308
|
226 bufsize = sizeof(struct compat_oprof_buf);
|
ack@13308
|
227 i = sizeof(struct compat_event_log);
|
ack@13308
|
228 }
|
ack@13308
|
229 #endif
|
ack@13308
|
230
|
kfraser@12543
|
231 /* reduce max_samples if necessary to limit pages allocated */
|
kfraser@12543
|
232 max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
|
ack@13308
|
233 max_max_samples = ( (max_bufsize - bufsize) / i ) + 1;
|
kfraser@12543
|
234 if ( (unsigned)max_samples > max_max_samples )
|
kfraser@12543
|
235 max_samples = max_max_samples;
|
kfraser@12543
|
236
|
ack@13308
|
237 bufsize += (max_samples - 1) * i;
|
kfraser@12543
|
238 npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
|
kfraser@12543
|
239
|
keir@19143
|
240 d->xenoprof->rawbuf = alloc_xenheap_pages(get_order_from_pages(npages), 0);
|
kfraser@12543
|
241 if ( d->xenoprof->rawbuf == NULL )
|
kfraser@12543
|
242 {
|
kfraser@12543
|
243 xfree(d->xenoprof);
|
kfraser@12543
|
244 d->xenoprof = NULL;
|
kfraser@12543
|
245 return -ENOMEM;
|
kfraser@12543
|
246 }
|
kfraser@12543
|
247
|
kfraser@12543
|
248 d->xenoprof->npages = npages;
|
kfraser@12543
|
249 d->xenoprof->nbuf = nvcpu;
|
kfraser@12543
|
250 d->xenoprof->bufsize = bufsize;
|
kfraser@12543
|
251 d->xenoprof->domain_ready = 0;
|
kfraser@12543
|
252 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
|
kfraser@12543
|
253
|
kfraser@12543
|
254 /* Update buffer pointers for active vcpus */
|
kfraser@12543
|
255 i = 0;
|
kfraser@12543
|
256 for_each_vcpu ( d, v )
|
kfraser@12543
|
257 {
|
kfraser@15273
|
258 xenoprof_buf_t *buf = (xenoprof_buf_t *)
|
kfraser@15273
|
259 &d->xenoprof->rawbuf[i * bufsize];
|
ack@13308
|
260
|
kfraser@12543
|
261 d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
|
ack@13308
|
262 d->xenoprof->vcpu[v->vcpu_id].buffer = buf;
|
ack@13308
|
263 xenoprof_buf(d, buf, event_size) = max_samples;
|
ack@13308
|
264 xenoprof_buf(d, buf, vcpu_id) = v->vcpu_id;
|
kfraser@12543
|
265
|
kfraser@12543
|
266 i++;
|
kfraser@12543
|
267 /* in the unlikely case that the number of active vcpus changes */
|
kfraser@12543
|
268 if ( i >= nvcpu )
|
kfraser@12543
|
269 break;
|
kfraser@12543
|
270 }
|
kfraser@12543
|
271
|
kfraser@12543
|
272 return 0;
|
kfraser@12543
|
273 }
|
kfraser@12543
|
274
|
kfraser@12543
|
275 void free_xenoprof_pages(struct domain *d)
|
kfraser@12543
|
276 {
|
kfraser@12543
|
277 struct xenoprof *x;
|
kfraser@12543
|
278 int order;
|
kfraser@12543
|
279
|
kfraser@12543
|
280 x = d->xenoprof;
|
kfraser@12543
|
281 if ( x == NULL )
|
kfraser@12543
|
282 return;
|
kfraser@12543
|
283
|
kfraser@12543
|
284 if ( x->rawbuf != NULL )
|
kfraser@12543
|
285 {
|
kfraser@12543
|
286 order = get_order_from_pages(x->npages);
|
kfraser@12543
|
287 free_xenheap_pages(x->rawbuf, order);
|
kfraser@12543
|
288 }
|
kfraser@12543
|
289
|
kfraser@12543
|
290 xfree(x);
|
kfraser@12543
|
291 d->xenoprof = NULL;
|
kfraser@12543
|
292 }
|
kfraser@12543
|
293
|
kfraser@12543
|
294 static int active_index(struct domain *d)
|
kfraser@12543
|
295 {
|
kfraser@12543
|
296 int i;
|
kfraser@12543
|
297
|
kfraser@12543
|
298 for ( i = 0; i < adomains; i++ )
|
kfraser@12543
|
299 if ( active_domains[i] == d )
|
kfraser@12543
|
300 return i;
|
kfraser@12543
|
301
|
kfraser@12543
|
302 return -1;
|
kfraser@12543
|
303 }
|
kfraser@12543
|
304
|
kfraser@12543
|
305 static int set_active(struct domain *d)
|
kfraser@12543
|
306 {
|
kfraser@12543
|
307 int ind;
|
kfraser@12543
|
308 struct xenoprof *x;
|
kfraser@12543
|
309
|
kfraser@12543
|
310 ind = active_index(d);
|
kfraser@12543
|
311 if ( ind < 0 )
|
kfraser@12543
|
312 return -EPERM;
|
kfraser@12543
|
313
|
kfraser@12543
|
314 x = d->xenoprof;
|
kfraser@12543
|
315 if ( x == NULL )
|
kfraser@12543
|
316 return -EPERM;
|
kfraser@12543
|
317
|
kfraser@12543
|
318 x->domain_ready = 1;
|
kfraser@12543
|
319 x->domain_type = XENOPROF_DOMAIN_ACTIVE;
|
kfraser@12543
|
320 active_ready[ind] = 1;
|
kfraser@12543
|
321 activated++;
|
kfraser@12543
|
322
|
kfraser@12543
|
323 return 0;
|
kfraser@12543
|
324 }
|
kfraser@12543
|
325
|
kfraser@12543
|
326 static int reset_active(struct domain *d)
|
kfraser@12543
|
327 {
|
kfraser@12543
|
328 int ind;
|
kfraser@12543
|
329 struct xenoprof *x;
|
kfraser@12543
|
330
|
kfraser@12543
|
331 ind = active_index(d);
|
kfraser@12543
|
332 if ( ind < 0 )
|
kfraser@12543
|
333 return -EPERM;
|
kfraser@12543
|
334
|
kfraser@12543
|
335 x = d->xenoprof;
|
kfraser@12543
|
336 if ( x == NULL )
|
kfraser@12543
|
337 return -EPERM;
|
kfraser@12543
|
338
|
kfraser@12543
|
339 x->domain_ready = 0;
|
kfraser@12543
|
340 x->domain_type = XENOPROF_DOMAIN_IGNORED;
|
kfraser@12543
|
341 active_ready[ind] = 0;
|
kfraser@12543
|
342 active_domains[ind] = NULL;
|
kfraser@12543
|
343 activated--;
|
kfraser@12543
|
344 put_domain(d);
|
kfraser@12543
|
345
|
kfraser@12543
|
346 if ( activated <= 0 )
|
kfraser@12543
|
347 adomains = 0;
|
kfraser@12543
|
348
|
kfraser@12543
|
349 return 0;
|
kfraser@12543
|
350 }
|
kfraser@12543
|
351
|
kfraser@12543
|
352 static void reset_passive(struct domain *d)
|
kfraser@12543
|
353 {
|
kfraser@12543
|
354 struct xenoprof *x;
|
kfraser@12543
|
355
|
kfraser@12623
|
356 if ( d == NULL )
|
kfraser@12543
|
357 return;
|
kfraser@12543
|
358
|
kfraser@12543
|
359 x = d->xenoprof;
|
kfraser@12543
|
360 if ( x == NULL )
|
kfraser@12543
|
361 return;
|
kfraser@12543
|
362
|
kfraser@12623
|
363 unshare_xenoprof_page_with_guest(x);
|
kfraser@12543
|
364 x->domain_type = XENOPROF_DOMAIN_IGNORED;
|
kfraser@12543
|
365 }
|
kfraser@12543
|
366
|
kfraser@12543
|
367 static void reset_active_list(void)
|
kfraser@12543
|
368 {
|
kfraser@12543
|
369 int i;
|
kfraser@12543
|
370
|
kfraser@12543
|
371 for ( i = 0; i < adomains; i++ )
|
kfraser@12543
|
372 if ( active_ready[i] )
|
kfraser@12543
|
373 reset_active(active_domains[i]);
|
kfraser@12543
|
374
|
kfraser@12543
|
375 adomains = 0;
|
kfraser@12543
|
376 activated = 0;
|
kfraser@12543
|
377 }
|
kfraser@12543
|
378
|
kfraser@12543
|
379 static void reset_passive_list(void)
|
kfraser@12543
|
380 {
|
kfraser@12543
|
381 int i;
|
kfraser@12543
|
382
|
kfraser@12543
|
383 for ( i = 0; i < pdomains; i++ )
|
kfraser@12543
|
384 {
|
kfraser@12543
|
385 reset_passive(passive_domains[i]);
|
kfraser@12543
|
386 put_domain(passive_domains[i]);
|
kfraser@12543
|
387 passive_domains[i] = NULL;
|
kfraser@12543
|
388 }
|
kfraser@12543
|
389
|
kfraser@12543
|
390 pdomains = 0;
|
kfraser@12543
|
391 }
|
kfraser@12543
|
392
|
kfraser@12543
|
393 static int add_active_list(domid_t domid)
|
kfraser@12543
|
394 {
|
kfraser@12543
|
395 struct domain *d;
|
kfraser@12543
|
396
|
kfraser@12543
|
397 if ( adomains >= MAX_OPROF_DOMAINS )
|
kfraser@12543
|
398 return -E2BIG;
|
kfraser@12543
|
399
|
kaf24@13687
|
400 d = get_domain_by_id(domid);
|
kfraser@12543
|
401 if ( d == NULL )
|
kfraser@12543
|
402 return -EINVAL;
|
kfraser@12543
|
403
|
kfraser@12543
|
404 active_domains[adomains] = d;
|
kfraser@12543
|
405 active_ready[adomains] = 0;
|
kfraser@12543
|
406 adomains++;
|
kfraser@12543
|
407
|
kfraser@12543
|
408 return 0;
|
kfraser@12543
|
409 }
|
kfraser@12543
|
410
|
kfraser@12543
|
411 static int add_passive_list(XEN_GUEST_HANDLE(void) arg)
|
kfraser@12543
|
412 {
|
kfraser@12543
|
413 struct xenoprof_passive passive;
|
kfraser@12543
|
414 struct domain *d;
|
kfraser@12543
|
415 int ret = 0;
|
kfraser@12543
|
416
|
kfraser@12543
|
417 if ( pdomains >= MAX_OPROF_DOMAINS )
|
kfraser@12543
|
418 return -E2BIG;
|
kfraser@12543
|
419
|
kfraser@12543
|
420 if ( copy_from_guest(&passive, arg, 1) )
|
kfraser@12543
|
421 return -EFAULT;
|
kfraser@12543
|
422
|
kaf24@13687
|
423 d = get_domain_by_id(passive.domain_id);
|
kfraser@12543
|
424 if ( d == NULL )
|
kfraser@12543
|
425 return -EINVAL;
|
kfraser@12543
|
426
|
kfraser@12548
|
427 if ( d->xenoprof == NULL )
|
kfraser@12543
|
428 {
|
kfraser@12591
|
429 ret = alloc_xenoprof_struct(d, passive.max_samples, 1);
|
kfraser@12548
|
430 if ( ret < 0 )
|
kfraser@12548
|
431 {
|
kfraser@12548
|
432 put_domain(d);
|
kfraser@12548
|
433 return -ENOMEM;
|
kfraser@12548
|
434 }
|
kfraser@12543
|
435 }
|
kfraser@12543
|
436
|
kfraser@12623
|
437 ret = share_xenoprof_page_with_guest(
|
kfraser@12548
|
438 current->domain, virt_to_mfn(d->xenoprof->rawbuf),
|
kfraser@12548
|
439 d->xenoprof->npages);
|
kfraser@12623
|
440 if ( ret < 0 )
|
kfraser@12623
|
441 {
|
kfraser@12623
|
442 put_domain(d);
|
kfraser@12623
|
443 return ret;
|
kfraser@12623
|
444 }
|
kfraser@12548
|
445
|
kfraser@12543
|
446 d->xenoprof->domain_type = XENOPROF_DOMAIN_PASSIVE;
|
kfraser@12543
|
447 passive.nbuf = d->xenoprof->nbuf;
|
kfraser@12543
|
448 passive.bufsize = d->xenoprof->bufsize;
|
Tim@15666
|
449 if ( !paging_mode_translate(current->domain) )
|
kfraser@12548
|
450 passive.buf_gmaddr = __pa(d->xenoprof->rawbuf);
|
kfraser@12548
|
451 else
|
kfraser@12548
|
452 xenoprof_shared_gmfn_with_guest(
|
kfraser@12548
|
453 current->domain, __pa(d->xenoprof->rawbuf),
|
kfraser@12548
|
454 passive.buf_gmaddr, d->xenoprof->npages);
|
kfraser@12543
|
455
|
kfraser@12543
|
456 if ( copy_to_guest(arg, &passive, 1) )
|
kfraser@12543
|
457 {
|
kfraser@12543
|
458 put_domain(d);
|
kfraser@12543
|
459 return -EFAULT;
|
kfraser@12543
|
460 }
|
kfraser@12543
|
461
|
kfraser@12543
|
462 passive_domains[pdomains] = d;
|
kfraser@12543
|
463 pdomains++;
|
kfraser@12543
|
464
|
kfraser@12543
|
465 return ret;
|
kfraser@12543
|
466 }
|
kfraser@12543
|
467
|
kfraser@15273
|
468
|
kfraser@15273
|
469 /* Get space in the buffer */
|
kfraser@15273
|
470 static int xenoprof_buf_space(struct domain *d, xenoprof_buf_t * buf, int size)
|
kfraser@15273
|
471 {
|
kfraser@15273
|
472 int head, tail;
|
kfraser@15273
|
473
|
kfraser@15273
|
474 head = xenoprof_buf(d, buf, event_head);
|
kfraser@15273
|
475 tail = xenoprof_buf(d, buf, event_tail);
|
kfraser@15273
|
476
|
kfraser@15273
|
477 return ((tail > head) ? 0 : size) + tail - head - 1;
|
kfraser@15273
|
478 }
|
kfraser@15273
|
479
|
kfraser@15273
|
480 /* Check for space and add a sample. Return 1 if successful, 0 otherwise. */
|
kfraser@15273
|
481 static int xenoprof_add_sample(struct domain *d, xenoprof_buf_t *buf,
|
kfraser@15273
|
482 unsigned long eip, int mode, int event)
|
kfraser@15273
|
483 {
|
kfraser@15273
|
484 int head, tail, size;
|
kfraser@15273
|
485
|
kfraser@15273
|
486 head = xenoprof_buf(d, buf, event_head);
|
kfraser@15273
|
487 tail = xenoprof_buf(d, buf, event_tail);
|
kfraser@15273
|
488 size = xenoprof_buf(d, buf, event_size);
|
kfraser@15273
|
489
|
kfraser@15273
|
490 /* make sure indexes in shared buffer are sane */
|
kfraser@15273
|
491 if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) )
|
kfraser@15273
|
492 {
|
kfraser@15273
|
493 corrupted_buffer_samples++;
|
kfraser@15273
|
494 return 0;
|
kfraser@15273
|
495 }
|
kfraser@15273
|
496
|
kfraser@15273
|
497 if ( xenoprof_buf_space(d, buf, size) > 0 )
|
kfraser@15273
|
498 {
|
kfraser@15273
|
499 xenoprof_buf(d, buf, event_log[head].eip) = eip;
|
kfraser@15273
|
500 xenoprof_buf(d, buf, event_log[head].mode) = mode;
|
kfraser@15273
|
501 xenoprof_buf(d, buf, event_log[head].event) = event;
|
kfraser@15273
|
502 head++;
|
kfraser@15273
|
503 if ( head >= size )
|
kfraser@15273
|
504 head = 0;
|
kfraser@15273
|
505
|
kfraser@15273
|
506 xenoprof_buf(d, buf, event_head) = head;
|
kfraser@15273
|
507 }
|
kfraser@15273
|
508 else
|
kfraser@15273
|
509 {
|
kfraser@15273
|
510 xenoprof_buf(d, buf, lost_samples)++;
|
kfraser@15273
|
511 lost_samples++;
|
kfraser@15273
|
512 return 0;
|
kfraser@15273
|
513 }
|
kfraser@15273
|
514
|
kfraser@15273
|
515 return 1;
|
kfraser@15273
|
516 }
|
kfraser@15273
|
517
|
kfraser@15273
|
518 int xenoprof_add_trace(struct domain *d, struct vcpu *vcpu,
|
kfraser@15273
|
519 unsigned long eip, int mode)
|
kfraser@15273
|
520 {
|
kfraser@15273
|
521 xenoprof_buf_t *buf = d->xenoprof->vcpu[vcpu->vcpu_id].buffer;
|
kfraser@15273
|
522
|
kfraser@15273
|
523 /* Do not accidentally write an escape code due to a broken frame. */
|
kfraser@15273
|
524 if ( eip == XENOPROF_ESCAPE_CODE )
|
kfraser@15273
|
525 {
|
kfraser@15273
|
526 invalid_buffer_samples++;
|
kfraser@15273
|
527 return 0;
|
kfraser@15273
|
528 }
|
kfraser@15273
|
529
|
kfraser@15273
|
530 return xenoprof_add_sample(d, buf, eip, mode, 0);
|
kfraser@15273
|
531 }
|
kfraser@15273
|
532
|
kfraser@15273
|
533 void xenoprof_log_event(struct vcpu *vcpu,
|
kfraser@15273
|
534 struct cpu_user_regs * regs, unsigned long eip,
|
kfraser@15273
|
535 int mode, int event)
|
kfraser@12543
|
536 {
|
ack@13308
|
537 struct domain *d = vcpu->domain;
|
kfraser@12543
|
538 struct xenoprof_vcpu *v;
|
ack@13308
|
539 xenoprof_buf_t *buf;
|
kfraser@12543
|
540
|
kfraser@12543
|
541 total_samples++;
|
kfraser@12543
|
542
|
kfraser@15273
|
543 /* Ignore samples of un-monitored domains. */
|
ack@13308
|
544 if ( !is_profiled(d) )
|
kfraser@12543
|
545 {
|
kfraser@12543
|
546 others_samples++;
|
kfraser@12543
|
547 return;
|
kfraser@12543
|
548 }
|
kfraser@12543
|
549
|
ack@13308
|
550 v = &d->xenoprof->vcpu[vcpu->vcpu_id];
|
kfraser@12543
|
551 if ( v->buffer == NULL )
|
kfraser@12543
|
552 {
|
kfraser@12543
|
553 invalid_buffer_samples++;
|
kfraser@12543
|
554 return;
|
kfraser@12543
|
555 }
|
kfraser@15273
|
556
|
ack@13308
|
557 buf = v->buffer;
|
kfraser@12543
|
558
|
kfraser@15273
|
559 /* Provide backtrace if requested. */
|
kfraser@15273
|
560 if ( backtrace_depth > 0 )
|
kfraser@12543
|
561 {
|
kfraser@15273
|
562 if ( (xenoprof_buf_space(d, buf, v->event_size) < 2) ||
|
kfraser@15273
|
563 !xenoprof_add_sample(d, buf, XENOPROF_ESCAPE_CODE, mode,
|
kfraser@15273
|
564 XENOPROF_TRACE_BEGIN) )
|
kfraser@15273
|
565 {
|
kfraser@15273
|
566 xenoprof_buf(d, buf, lost_samples)++;
|
kfraser@15273
|
567 lost_samples++;
|
kfraser@15273
|
568 return;
|
kfraser@15273
|
569 }
|
kfraser@12543
|
570 }
|
kfraser@12543
|
571
|
kfraser@15273
|
572 if ( xenoprof_add_sample(d, buf, eip, mode, event) )
|
kfraser@12543
|
573 {
|
kfraser@12543
|
574 if ( is_active(vcpu->domain) )
|
kfraser@12543
|
575 active_samples++;
|
kfraser@12543
|
576 else
|
kfraser@12543
|
577 passive_samples++;
|
kfraser@12543
|
578 if ( mode == 0 )
|
ack@13308
|
579 xenoprof_buf(d, buf, user_samples)++;
|
kfraser@12543
|
580 else if ( mode == 1 )
|
ack@13308
|
581 xenoprof_buf(d, buf, kernel_samples)++;
|
kfraser@12543
|
582 else
|
ack@13308
|
583 xenoprof_buf(d, buf, xen_samples)++;
|
kfraser@15273
|
584
|
kfraser@12543
|
585 }
|
kfraser@15273
|
586
|
kfraser@15273
|
587 if ( backtrace_depth > 0 )
|
kfraser@15273
|
588 xenoprof_backtrace(d, vcpu, regs, backtrace_depth, mode);
|
kfraser@12543
|
589 }
|
kfraser@12543
|
590
|
kfraser@15273
|
591
|
kfraser@15273
|
592
|
kfraser@12543
|
593 static int xenoprof_op_init(XEN_GUEST_HANDLE(void) arg)
|
kfraser@12543
|
594 {
|
kfraser@15562
|
595 struct domain *d = current->domain;
|
kfraser@12543
|
596 struct xenoprof_init xenoprof_init;
|
kfraser@12543
|
597 int ret;
|
kfraser@12543
|
598
|
kfraser@12543
|
599 if ( copy_from_guest(&xenoprof_init, arg, 1) )
|
kfraser@12543
|
600 return -EFAULT;
|
kfraser@12543
|
601
|
kfraser@15562
|
602 if ( (ret = xenoprof_arch_init(&xenoprof_init.num_events,
|
kfraser@12544
|
603 xenoprof_init.cpu_type)) )
|
kfraser@12543
|
604 return ret;
|
kfraser@12543
|
605
|
kfraser@15562
|
606 xenoprof_init.is_primary =
|
kfraser@15562
|
607 ((xenoprof_primary_profiler == d) ||
|
kfraser@15562
|
608 ((xenoprof_primary_profiler == NULL) && (d->domain_id == 0)));
|
kfraser@12543
|
609 if ( xenoprof_init.is_primary )
|
kfraser@12548
|
610 xenoprof_primary_profiler = current->domain;
|
kfraser@12543
|
611
|
kfraser@15562
|
612 return (copy_to_guest(arg, &xenoprof_init, 1) ? -EFAULT : 0);
|
kfraser@12543
|
613 }
|
kfraser@12543
|
614
|
ack@13308
|
615 #endif /* !COMPAT */
|
ack@13308
|
616
|
kfraser@12543
|
617 static int xenoprof_op_get_buffer(XEN_GUEST_HANDLE(void) arg)
|
kfraser@12543
|
618 {
|
kfraser@12543
|
619 struct xenoprof_get_buffer xenoprof_get_buffer;
|
kfraser@12543
|
620 struct domain *d = current->domain;
|
kfraser@12543
|
621 int ret;
|
kfraser@12543
|
622
|
kfraser@12543
|
623 if ( copy_from_guest(&xenoprof_get_buffer, arg, 1) )
|
kfraser@12543
|
624 return -EFAULT;
|
kfraser@12543
|
625
|
kfraser@12543
|
626 /*
|
kfraser@12543
|
627 * We allocate xenoprof struct and buffers only at first time
|
kfraser@12543
|
628 * get_buffer is called. Memory is then kept until domain is destroyed.
|
kfraser@12543
|
629 */
|
kfraser@12543
|
630 if ( d->xenoprof == NULL )
|
kfraser@12543
|
631 {
|
kfraser@12591
|
632 ret = alloc_xenoprof_struct(d, xenoprof_get_buffer.max_samples, 0);
|
kfraser@12543
|
633 if ( ret < 0 )
|
kfraser@12543
|
634 return ret;
|
kfraser@12543
|
635 }
|
kfraser@12543
|
636
|
kfraser@12623
|
637 ret = share_xenoprof_page_with_guest(
|
kfraser@12548
|
638 d, virt_to_mfn(d->xenoprof->rawbuf), d->xenoprof->npages);
|
kfraser@12623
|
639 if ( ret < 0 )
|
kfraser@12623
|
640 return ret;
|
kfraser@12548
|
641
|
kfraser@12543
|
642 xenoprof_reset_buf(d);
|
kfraser@12543
|
643
|
kfraser@12543
|
644 d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
|
kfraser@12543
|
645 d->xenoprof->domain_ready = 0;
|
kfraser@12548
|
646 d->xenoprof->is_primary = (xenoprof_primary_profiler == current->domain);
|
kfraser@12543
|
647
|
kfraser@12543
|
648 xenoprof_get_buffer.nbuf = d->xenoprof->nbuf;
|
kfraser@12543
|
649 xenoprof_get_buffer.bufsize = d->xenoprof->bufsize;
|
Tim@15666
|
650 if ( !paging_mode_translate(d) )
|
kfraser@12548
|
651 xenoprof_get_buffer.buf_gmaddr = __pa(d->xenoprof->rawbuf);
|
kfraser@12548
|
652 else
|
kfraser@12548
|
653 xenoprof_shared_gmfn_with_guest(
|
kfraser@12548
|
654 d, __pa(d->xenoprof->rawbuf), xenoprof_get_buffer.buf_gmaddr,
|
kfraser@12548
|
655 d->xenoprof->npages);
|
kfraser@12543
|
656
|
kfraser@12543
|
657 if ( copy_to_guest(arg, &xenoprof_get_buffer, 1) )
|
kfraser@12543
|
658 return -EFAULT;
|
kfraser@12543
|
659
|
kfraser@12543
|
660 return 0;
|
kfraser@12543
|
661 }
|
kfraser@12543
|
662
|
kfraser@12543
|
663 #define NONPRIV_OP(op) ( (op == XENOPROF_init) \
|
kfraser@12543
|
664 || (op == XENOPROF_enable_virq) \
|
kfraser@12543
|
665 || (op == XENOPROF_disable_virq) \
|
kfraser@12543
|
666 || (op == XENOPROF_get_buffer))
|
kfraser@12543
|
667
|
kfraser@12543
|
668 int do_xenoprof_op(int op, XEN_GUEST_HANDLE(void) arg)
|
kfraser@12543
|
669 {
|
kfraser@12543
|
670 int ret = 0;
|
kfraser@12543
|
671
|
kfraser@12543
|
672 if ( (op < 0) || (op > XENOPROF_last_op) )
|
kfraser@12543
|
673 {
|
kfraser@12543
|
674 printk("xenoprof: invalid operation %d for domain %d\n",
|
kfraser@12543
|
675 op, current->domain->domain_id);
|
kfraser@12543
|
676 return -EINVAL;
|
kfraser@12543
|
677 }
|
kfraser@12543
|
678
|
kfraser@12548
|
679 if ( !NONPRIV_OP(op) && (current->domain != xenoprof_primary_profiler) )
|
kfraser@12543
|
680 {
|
kfraser@12543
|
681 printk("xenoprof: dom %d denied privileged operation %d\n",
|
kfraser@12543
|
682 current->domain->domain_id, op);
|
kfraser@12543
|
683 return -EPERM;
|
kfraser@12543
|
684 }
|
kfraser@12543
|
685
|
kfraser@15846
|
686 ret = xsm_profile(current->domain, op);
|
kfraser@15846
|
687 if ( ret )
|
kfraser@15846
|
688 return ret;
|
kfraser@15846
|
689
|
kfraser@12543
|
690 spin_lock(&xenoprof_lock);
|
kfraser@12543
|
691
|
kfraser@12543
|
692 switch ( op )
|
kfraser@12543
|
693 {
|
kfraser@12543
|
694 case XENOPROF_init:
|
kfraser@12543
|
695 ret = xenoprof_op_init(arg);
|
keir@20692
|
696 if ( (ret == 0) &&
|
keir@20692
|
697 (current->domain == xenoprof_primary_profiler) )
|
keir@19102
|
698 xenoprof_state = XENOPROF_INITIALIZED;
|
kfraser@12543
|
699 break;
|
kfraser@12543
|
700
|
kfraser@12543
|
701 case XENOPROF_get_buffer:
|
keir@16978
|
702 if ( !acquire_pmu_ownership(PMU_OWNER_XENOPROF) )
|
keir@16978
|
703 {
|
keir@16978
|
704 ret = -EBUSY;
|
keir@16978
|
705 break;
|
keir@16978
|
706 }
|
kfraser@12543
|
707 ret = xenoprof_op_get_buffer(arg);
|
kfraser@12543
|
708 break;
|
kfraser@12543
|
709
|
kfraser@12543
|
710 case XENOPROF_reset_active_list:
|
kfraser@12543
|
711 reset_active_list();
|
kfraser@12543
|
712 ret = 0;
|
kfraser@12543
|
713 break;
|
keir@19102
|
714
|
kfraser@12543
|
715 case XENOPROF_reset_passive_list:
|
kfraser@12543
|
716 reset_passive_list();
|
kfraser@12543
|
717 ret = 0;
|
kfraser@12543
|
718 break;
|
keir@19102
|
719
|
kfraser@12543
|
720 case XENOPROF_set_active:
|
kfraser@12543
|
721 {
|
kfraser@12543
|
722 domid_t domid;
|
keir@19102
|
723 if ( xenoprof_state != XENOPROF_INITIALIZED )
|
kfraser@12543
|
724 {
|
kfraser@12543
|
725 ret = -EPERM;
|
kfraser@12543
|
726 break;
|
kfraser@12543
|
727 }
|
kfraser@12543
|
728 if ( copy_from_guest(&domid, arg, 1) )
|
kfraser@12543
|
729 {
|
kfraser@12543
|
730 ret = -EFAULT;
|
kfraser@12543
|
731 break;
|
kfraser@12543
|
732 }
|
kfraser@12543
|
733 ret = add_active_list(domid);
|
kfraser@12543
|
734 break;
|
kfraser@12543
|
735 }
|
keir@19102
|
736
|
kfraser@12543
|
737 case XENOPROF_set_passive:
|
keir@19102
|
738 if ( xenoprof_state != XENOPROF_INITIALIZED )
|
kfraser@12543
|
739 {
|
kfraser@12543
|
740 ret = -EPERM;
|
kfraser@12543
|
741 break;
|
kfraser@12543
|
742 }
|
kfraser@12543
|
743 ret = add_passive_list(arg);
|
kfraser@12543
|
744 break;
|
keir@19102
|
745
|
kfraser@12543
|
746 case XENOPROF_reserve_counters:
|
keir@19102
|
747 if ( xenoprof_state != XENOPROF_INITIALIZED )
|
kfraser@12543
|
748 {
|
kfraser@12543
|
749 ret = -EPERM;
|
kfraser@12543
|
750 break;
|
kfraser@12543
|
751 }
|
kfraser@12544
|
752 ret = xenoprof_arch_reserve_counters();
|
kfraser@12543
|
753 if ( !ret )
|
kfraser@12543
|
754 xenoprof_state = XENOPROF_COUNTERS_RESERVED;
|
kfraser@12543
|
755 break;
|
kfraser@12543
|
756
|
kfraser@12543
|
757 case XENOPROF_counter:
|
kfraser@12543
|
758 if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) ||
|
kfraser@12543
|
759 (adomains == 0) )
|
kfraser@12543
|
760 {
|
kfraser@12543
|
761 ret = -EPERM;
|
kfraser@12543
|
762 break;
|
kfraser@12543
|
763 }
|
kfraser@12544
|
764 ret = xenoprof_arch_counter(arg);
|
kfraser@12543
|
765 break;
|
kfraser@12543
|
766
|
kfraser@12543
|
767 case XENOPROF_setup_events:
|
kfraser@12543
|
768 if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED )
|
kfraser@12543
|
769 {
|
kfraser@12543
|
770 ret = -EPERM;
|
kfraser@12543
|
771 break;
|
kfraser@12543
|
772 }
|
kfraser@12544
|
773 ret = xenoprof_arch_setup_events();
|
kfraser@12543
|
774 if ( !ret )
|
kfraser@12543
|
775 xenoprof_state = XENOPROF_READY;
|
kfraser@12543
|
776 break;
|
kfraser@12543
|
777
|
kfraser@12543
|
778 case XENOPROF_enable_virq:
|
kfraser@12543
|
779 {
|
kfraser@12543
|
780 int i;
|
keir@19102
|
781
|
kfraser@12548
|
782 if ( current->domain == xenoprof_primary_profiler )
|
kfraser@12543
|
783 {
|
keir@19102
|
784 if ( xenoprof_state != XENOPROF_READY )
|
keir@19102
|
785 {
|
keir@19102
|
786 ret = -EPERM;
|
keir@19102
|
787 break;
|
keir@19102
|
788 }
|
kfraser@12544
|
789 xenoprof_arch_enable_virq();
|
kfraser@12543
|
790 xenoprof_reset_stat();
|
kfraser@12543
|
791 for ( i = 0; i < pdomains; i++ )
|
kfraser@12543
|
792 xenoprof_reset_buf(passive_domains[i]);
|
kfraser@12543
|
793 }
|
kfraser@12543
|
794 xenoprof_reset_buf(current->domain);
|
kfraser@12543
|
795 ret = set_active(current->domain);
|
kfraser@12543
|
796 break;
|
kfraser@12543
|
797 }
|
kfraser@12543
|
798
|
kfraser@12543
|
799 case XENOPROF_start:
|
kfraser@12543
|
800 ret = -EPERM;
|
kfraser@12543
|
801 if ( (xenoprof_state == XENOPROF_READY) &&
|
kfraser@12543
|
802 (activated == adomains) )
|
kfraser@12544
|
803 ret = xenoprof_arch_start();
|
kfraser@12543
|
804 if ( ret == 0 )
|
kfraser@12543
|
805 xenoprof_state = XENOPROF_PROFILING;
|
kfraser@12543
|
806 break;
|
kfraser@12543
|
807
|
kfraser@12543
|
808 case XENOPROF_stop:
|
keir@16650
|
809 {
|
keir@16650
|
810 struct domain *d;
|
keir@16650
|
811 struct vcpu *v;
|
keir@16650
|
812 int i;
|
keir@16650
|
813
|
kfraser@15273
|
814 if ( xenoprof_state != XENOPROF_PROFILING )
|
kfraser@15273
|
815 {
|
kfraser@12543
|
816 ret = -EPERM;
|
kfraser@12543
|
817 break;
|
kfraser@12543
|
818 }
|
kfraser@12544
|
819 xenoprof_arch_stop();
|
keir@16650
|
820
|
keir@16650
|
821 /* Flush remaining samples. */
|
keir@16650
|
822 for ( i = 0; i < adomains; i++ )
|
keir@16650
|
823 {
|
keir@16650
|
824 if ( !active_ready[i] )
|
keir@16650
|
825 continue;
|
keir@16650
|
826 d = active_domains[i];
|
keir@16650
|
827 for_each_vcpu(d, v)
|
keir@16650
|
828 send_guest_vcpu_virq(v, VIRQ_XENOPROF);
|
keir@16650
|
829 }
|
kfraser@12543
|
830 xenoprof_state = XENOPROF_READY;
|
kfraser@12543
|
831 break;
|
keir@16650
|
832 }
|
kfraser@12543
|
833
|
kfraser@12543
|
834 case XENOPROF_disable_virq:
|
kfraser@12548
|
835 {
|
kfraser@12548
|
836 struct xenoprof *x;
|
kfraser@12543
|
837 if ( (xenoprof_state == XENOPROF_PROFILING) &&
|
kfraser@12543
|
838 (is_active(current->domain)) )
|
kfraser@12543
|
839 {
|
kfraser@12543
|
840 ret = -EPERM;
|
kfraser@12543
|
841 break;
|
kfraser@12543
|
842 }
|
kfraser@12548
|
843 if ( (ret = reset_active(current->domain)) != 0 )
|
kfraser@12548
|
844 break;
|
kfraser@12548
|
845 x = current->domain->xenoprof;
|
kfraser@12623
|
846 unshare_xenoprof_page_with_guest(x);
|
keir@16978
|
847 release_pmu_ownship(PMU_OWNER_XENOPROF);
|
kfraser@12543
|
848 break;
|
kfraser@12548
|
849 }
|
kfraser@12543
|
850
|
kfraser@12543
|
851 case XENOPROF_release_counters:
|
kfraser@12543
|
852 ret = -EPERM;
|
kfraser@12543
|
853 if ( (xenoprof_state == XENOPROF_COUNTERS_RESERVED) ||
|
kfraser@12543
|
854 (xenoprof_state == XENOPROF_READY) )
|
kfraser@12543
|
855 {
|
keir@19102
|
856 xenoprof_state = XENOPROF_INITIALIZED;
|
kfraser@12544
|
857 xenoprof_arch_release_counters();
|
kfraser@12544
|
858 xenoprof_arch_disable_virq();
|
kfraser@12543
|
859 reset_passive_list();
|
kfraser@12543
|
860 ret = 0;
|
kfraser@12543
|
861 }
|
kfraser@12543
|
862 break;
|
kfraser@12543
|
863
|
kfraser@12543
|
864 case XENOPROF_shutdown:
|
kfraser@12543
|
865 ret = -EPERM;
|
keir@19102
|
866 if ( xenoprof_state == XENOPROF_INITIALIZED )
|
kfraser@12543
|
867 {
|
kfraser@12543
|
868 activated = 0;
|
kfraser@12543
|
869 adomains=0;
|
kfraser@12548
|
870 xenoprof_primary_profiler = NULL;
|
kfraser@15273
|
871 backtrace_depth=0;
|
kfraser@12543
|
872 ret = 0;
|
kfraser@12543
|
873 }
|
kfraser@12543
|
874 break;
|
kfraser@15273
|
875
|
kfraser@15273
|
876 case XENOPROF_set_backtrace:
|
kfraser@15273
|
877 ret = 0;
|
kfraser@15273
|
878 if ( !xenoprof_backtrace_supported() )
|
kfraser@15273
|
879 ret = -EINVAL;
|
kfraser@15273
|
880 else if ( copy_from_guest(&backtrace_depth, arg, 1) )
|
kfraser@15273
|
881 ret = -EFAULT;
|
kfraser@15273
|
882 break;
|
kfraser@12543
|
883
|
keir@21948
|
884 case XENOPROF_ibs_counter:
|
keir@21948
|
885 if ( (xenoprof_state != XENOPROF_COUNTERS_RESERVED) ||
|
keir@21948
|
886 (adomains == 0) )
|
keir@21948
|
887 {
|
keir@21948
|
888 ret = -EPERM;
|
keir@21948
|
889 break;
|
keir@21948
|
890 }
|
keir@21948
|
891 ret = xenoprof_arch_ibs_counter(arg);
|
keir@21948
|
892 break;
|
keir@21948
|
893
|
keir@21948
|
894 case XENOPROF_get_ibs_caps:
|
keir@21948
|
895 ret = ibs_caps;
|
keir@21948
|
896 break;
|
keir@21948
|
897
|
kfraser@12543
|
898 default:
|
kfraser@12543
|
899 ret = -ENOSYS;
|
kfraser@12543
|
900 }
|
kfraser@12543
|
901
|
kfraser@12543
|
902 spin_unlock(&xenoprof_lock);
|
kfraser@12543
|
903
|
kfraser@12543
|
904 if ( ret < 0 )
|
kfraser@12543
|
905 printk("xenoprof: operation %d failed for dom %d (status : %d)\n",
|
kfraser@12543
|
906 op, current->domain->domain_id, ret);
|
kfraser@12543
|
907
|
kfraser@12543
|
908 return ret;
|
kfraser@12543
|
909 }
|
kfraser@12543
|
910
|
ack@13308
|
911 #if defined(CONFIG_COMPAT) && !defined(COMPAT)
|
ack@13308
|
912 #include "compat/xenoprof.c"
|
ack@13308
|
913 #endif
|
ack@13308
|
914
|
kfraser@12543
|
915 /*
|
kfraser@12543
|
916 * Local variables:
|
kfraser@12543
|
917 * mode: C
|
kfraser@12543
|
918 * c-set-style: "BSD"
|
kfraser@12543
|
919 * c-basic-offset: 4
|
kfraser@12543
|
920 * tab-width: 4
|
kfraser@12543
|
921 * indent-tabs-mode: nil
|
kfraser@12543
|
922 * End:
|
kfraser@12543
|
923 */
|