/root/src/xen/xen/common/tmem_control.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved. |
3 | | * |
4 | | */ |
5 | | |
6 | | #include <xen/init.h> |
7 | | #include <xen/list.h> |
8 | | #include <xen/radix-tree.h> |
9 | | #include <xen/rbtree.h> |
10 | | #include <xen/rwlock.h> |
11 | | #include <xen/tmem_control.h> |
12 | | #include <xen/tmem.h> |
13 | | #include <xen/tmem_xen.h> |
14 | | #include <public/sysctl.h> |
15 | | |
16 | | /************ TMEM CONTROL OPERATIONS ************************************/ |
17 | | |
18 | | /* Freeze/thaw all pools belonging to client cli_id (all domains if -1). */ |
19 | | static int tmemc_freeze_pools(domid_t cli_id, int arg) |
20 | 0 | { |
21 | 0 | struct client *client; |
22 | 0 | bool freeze = arg == XEN_SYSCTL_TMEM_OP_FREEZE; |
23 | 0 | bool destroy = arg == XEN_SYSCTL_TMEM_OP_DESTROY; |
24 | 0 | char *s; |
25 | 0 |
|
26 | 0 | s = destroy ? "destroyed" : ( freeze ? "frozen" : "thawed" ); |
27 | 0 | if ( cli_id == TMEM_CLI_ID_NULL ) |
28 | 0 | { |
29 | 0 | list_for_each_entry(client,&tmem_global.client_list,client_list) |
30 | 0 | client->info.flags.u.frozen = freeze; |
31 | 0 | tmem_client_info("tmem: all pools %s for all %ss\n", s, tmem_client_str); |
32 | 0 | } |
33 | 0 | else |
34 | 0 | { |
35 | 0 | if ( (client = tmem_client_from_cli_id(cli_id)) == NULL) |
36 | 0 | return -1; |
37 | 0 | client->info.flags.u.frozen = freeze; |
38 | 0 | tmem_client_info("tmem: all pools %s for %s=%d\n", |
39 | 0 | s, tmem_cli_id_str, cli_id); |
40 | 0 | } |
41 | 0 | return 0; |
42 | 0 | } |
43 | | |
44 | | static unsigned long tmem_flush_npages(unsigned long n) |
45 | 0 | { |
46 | 0 | unsigned long avail_pages = 0; |
47 | 0 |
|
48 | 0 | while ( (avail_pages = tmem_page_list_pages) < n ) |
49 | 0 | { |
50 | 0 | if ( !tmem_evict() ) |
51 | 0 | break; |
52 | 0 | } |
53 | 0 | if ( avail_pages ) |
54 | 0 | { |
55 | 0 | spin_lock(&tmem_page_list_lock); |
56 | 0 | while ( !page_list_empty(&tmem_page_list) ) |
57 | 0 | { |
58 | 0 | struct page_info *pg = page_list_remove_head(&tmem_page_list); |
59 | 0 | scrub_one_page(pg); |
60 | 0 | tmem_page_list_pages--; |
61 | 0 | free_domheap_page(pg); |
62 | 0 | } |
63 | 0 | ASSERT(tmem_page_list_pages == 0); |
64 | 0 | INIT_PAGE_LIST_HEAD(&tmem_page_list); |
65 | 0 | spin_unlock(&tmem_page_list_lock); |
66 | 0 | } |
67 | 0 | return avail_pages; |
68 | 0 | } |
69 | | |
70 | | static int tmemc_flush_mem(domid_t cli_id, uint32_t kb) |
71 | 0 | { |
72 | 0 | uint32_t npages, flushed_pages, flushed_kb; |
73 | 0 |
|
74 | 0 | if ( cli_id != TMEM_CLI_ID_NULL ) |
75 | 0 | { |
76 | 0 | tmem_client_warn("tmem: %s-specific flush not supported yet, use --all\n", |
77 | 0 | tmem_client_str); |
78 | 0 | return -1; |
79 | 0 | } |
80 | 0 | /* Convert kb to pages, rounding up if necessary. */ |
81 | 0 | npages = (kb + ((1 << (PAGE_SHIFT-10))-1)) >> (PAGE_SHIFT-10); |
82 | 0 | flushed_pages = tmem_flush_npages(npages); |
83 | 0 | flushed_kb = flushed_pages << (PAGE_SHIFT-10); |
84 | 0 | return flushed_kb; |
85 | 0 | } |
86 | | |
87 | | /* |
88 | | * These tmemc_list* routines output lots of stats in a format that is |
89 | | * intended to be program-parseable, not human-readable. Further, by |
90 | | * tying each group of stats to a line format indicator (e.g. G= for |
91 | | * global stats) and each individual stat to a two-letter specifier |
92 | | * (e.g. Ec:nnnnn in the G= line says there are nnnnn pages in the |
93 | | * global ephemeral pool), it should allow the stats reported to be |
94 | | * forward and backwards compatible as tmem evolves. |
95 | | */ |
96 | 0 | #define BSIZE 1024 |
97 | | |
98 | | static int tmemc_list_client(struct client *c, tmem_cli_va_param_t buf, |
99 | | int off, uint32_t len, bool use_long) |
100 | 0 | { |
101 | 0 | char info[BSIZE]; |
102 | 0 | int i, n = 0, sum = 0; |
103 | 0 | struct tmem_pool *p; |
104 | 0 | bool s; |
105 | 0 |
|
106 | 0 | n = scnprintf(info,BSIZE,"C=CI:%d,ww:%d,co:%d,fr:%d," |
107 | 0 | "Tc:%"PRIu64",Ge:%ld,Pp:%ld,Gp:%ld%c", |
108 | 0 | c->cli_id, c->info.weight, c->info.flags.u.compress, c->info.flags.u.frozen, |
109 | 0 | c->total_cycles, c->succ_eph_gets, c->succ_pers_puts, c->succ_pers_gets, |
110 | 0 | use_long ? ',' : '\n'); |
111 | 0 | if (use_long) |
112 | 0 | n += scnprintf(info+n,BSIZE-n, |
113 | 0 | "Ec:%ld,Em:%ld,cp:%ld,cb:%"PRId64",cn:%ld,cm:%ld\n", |
114 | 0 | c->eph_count, c->eph_count_max, |
115 | 0 | c->compressed_pages, c->compressed_sum_size, |
116 | 0 | c->compress_poor, c->compress_nomem); |
117 | 0 | if ( !copy_to_guest_offset(buf, off + sum, info, n + 1) ) |
118 | 0 | sum += n; |
119 | 0 | for ( i = 0; i < MAX_POOLS_PER_DOMAIN; i++ ) |
120 | 0 | { |
121 | 0 | if ( (p = c->pools[i]) == NULL ) |
122 | 0 | continue; |
123 | 0 | s = is_shared(p); |
124 | 0 | n = scnprintf(info,BSIZE,"P=CI:%d,PI:%d," |
125 | 0 | "PT:%c%c,U0:%"PRIx64",U1:%"PRIx64"%c", |
126 | 0 | c->cli_id, p->pool_id, |
127 | 0 | is_persistent(p) ? 'P' : 'E', s ? 'S' : 'P', |
128 | 0 | (uint64_t)(s ? p->uuid[0] : 0), |
129 | 0 | (uint64_t)(s ? p->uuid[1] : 0LL), |
130 | 0 | use_long ? ',' : '\n'); |
131 | 0 | if (use_long) |
132 | 0 | n += scnprintf(info+n,BSIZE-n, |
133 | 0 | "Pc:%d,Pm:%d,Oc:%ld,Om:%ld,Nc:%lu,Nm:%lu," |
134 | 0 | "ps:%lu,pt:%lu,pd:%lu,pr:%lu,px:%lu,gs:%lu,gt:%lu," |
135 | 0 | "fs:%lu,ft:%lu,os:%lu,ot:%lu\n", |
136 | 0 | _atomic_read(p->pgp_count), p->pgp_count_max, |
137 | 0 | p->obj_count, p->obj_count_max, |
138 | 0 | p->objnode_count, p->objnode_count_max, |
139 | 0 | p->good_puts, p->puts,p->dup_puts_flushed, p->dup_puts_replaced, |
140 | 0 | p->no_mem_puts, |
141 | 0 | p->found_gets, p->gets, |
142 | 0 | p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs); |
143 | 0 | if ( sum + n >= len ) |
144 | 0 | return sum; |
145 | 0 | if ( !copy_to_guest_offset(buf, off + sum, info, n + 1) ) |
146 | 0 | sum += n; |
147 | 0 | } |
148 | 0 | return sum; |
149 | 0 | } |
150 | | |
151 | | static int tmemc_list_shared(tmem_cli_va_param_t buf, int off, uint32_t len, |
152 | | bool use_long) |
153 | 0 | { |
154 | 0 | char info[BSIZE]; |
155 | 0 | int i, n = 0, sum = 0; |
156 | 0 | struct tmem_pool *p; |
157 | 0 | struct share_list *sl; |
158 | 0 |
|
159 | 0 | for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++ ) |
160 | 0 | { |
161 | 0 | if ( (p = tmem_global.shared_pools[i]) == NULL ) |
162 | 0 | continue; |
163 | 0 | n = scnprintf(info+n,BSIZE-n,"S=SI:%d,PT:%c%c,U0:%"PRIx64",U1:%"PRIx64, |
164 | 0 | i, is_persistent(p) ? 'P' : 'E', |
165 | 0 | is_shared(p) ? 'S' : 'P', |
166 | 0 | p->uuid[0], p->uuid[1]); |
167 | 0 | list_for_each_entry(sl,&p->share_list, share_list) |
168 | 0 | n += scnprintf(info+n,BSIZE-n,",SC:%d",sl->client->cli_id); |
169 | 0 | n += scnprintf(info+n,BSIZE-n,"%c", use_long ? ',' : '\n'); |
170 | 0 | if (use_long) |
171 | 0 | n += scnprintf(info+n,BSIZE-n, |
172 | 0 | "Pc:%d,Pm:%d,Oc:%ld,Om:%ld,Nc:%lu,Nm:%lu," |
173 | 0 | "ps:%lu,pt:%lu,pd:%lu,pr:%lu,px:%lu,gs:%lu,gt:%lu," |
174 | 0 | "fs:%lu,ft:%lu,os:%lu,ot:%lu\n", |
175 | 0 | _atomic_read(p->pgp_count), p->pgp_count_max, |
176 | 0 | p->obj_count, p->obj_count_max, |
177 | 0 | p->objnode_count, p->objnode_count_max, |
178 | 0 | p->good_puts, p->puts,p->dup_puts_flushed, p->dup_puts_replaced, |
179 | 0 | p->no_mem_puts, |
180 | 0 | p->found_gets, p->gets, |
181 | 0 | p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs); |
182 | 0 | if ( sum + n >= len ) |
183 | 0 | return sum; |
184 | 0 | if ( !copy_to_guest_offset(buf, off + sum, info, n + 1) ) |
185 | 0 | sum += n; |
186 | 0 | } |
187 | 0 | return sum; |
188 | 0 | } |
189 | | |
190 | | static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off, |
191 | | uint32_t len, bool use_long) |
192 | 0 | { |
193 | 0 | char info[BSIZE]; |
194 | 0 | int n = 0, sum = 0; |
195 | 0 |
|
196 | 0 | n = scnprintf(info+n,BSIZE-n,"T="); |
197 | 0 | n--; /* Overwrite trailing comma. */ |
198 | 0 | n += scnprintf(info+n,BSIZE-n,"\n"); |
199 | 0 | if ( sum + n >= len ) |
200 | 0 | return sum; |
201 | 0 | if ( !copy_to_guest_offset(buf, off + sum, info, n + 1) ) |
202 | 0 | sum += n; |
203 | 0 | return sum; |
204 | 0 | } |
205 | | |
206 | | static int tmemc_list_global(tmem_cli_va_param_t buf, int off, uint32_t len, |
207 | | bool use_long) |
208 | 0 | { |
209 | 0 | char info[BSIZE]; |
210 | 0 | int n = 0, sum = off; |
211 | 0 |
|
212 | 0 | n += scnprintf(info,BSIZE,"G=" |
213 | 0 | "Tt:%lu,Te:%lu,Cf:%lu,Af:%lu,Pf:%lu,Ta:%lu," |
214 | 0 | "Lm:%lu,Et:%lu,Ea:%lu,Rt:%lu,Ra:%lu,Rx:%lu,Fp:%lu%c", |
215 | 0 | tmem_stats.total_tmem_ops, tmem_stats.errored_tmem_ops, tmem_stats.failed_copies, |
216 | 0 | tmem_stats.alloc_failed, tmem_stats.alloc_page_failed, tmem_page_list_pages, |
217 | 0 | tmem_stats.low_on_memory, tmem_stats.evicted_pgs, |
218 | 0 | tmem_stats.evict_attempts, tmem_stats.relinq_pgs, tmem_stats.relinq_attempts, |
219 | 0 | tmem_stats.max_evicts_per_relinq, |
220 | 0 | tmem_stats.total_flush_pool, use_long ? ',' : '\n'); |
221 | 0 | if (use_long) |
222 | 0 | n += scnprintf(info+n,BSIZE-n, |
223 | 0 | "Ec:%ld,Em:%ld,Oc:%d,Om:%d,Nc:%d,Nm:%d,Pc:%d,Pm:%d," |
224 | 0 | "Fc:%d,Fm:%d,Sc:%d,Sm:%d,Ep:%lu,Gd:%lu,Zt:%lu,Gz:%lu\n", |
225 | 0 | tmem_global.eph_count, tmem_stats.global_eph_count_max, |
226 | 0 | _atomic_read(tmem_stats.global_obj_count), tmem_stats.global_obj_count_max, |
227 | 0 | _atomic_read(tmem_stats.global_rtree_node_count), tmem_stats.global_rtree_node_count_max, |
228 | 0 | _atomic_read(tmem_stats.global_pgp_count), tmem_stats.global_pgp_count_max, |
229 | 0 | _atomic_read(tmem_stats.global_page_count), tmem_stats.global_page_count_max, |
230 | 0 | _atomic_read(tmem_stats.global_pcd_count), tmem_stats.global_pcd_count_max, |
231 | 0 | tmem_stats.tot_good_eph_puts,tmem_stats.deduped_puts,tmem_stats.pcd_tot_tze_size, |
232 | 0 | tmem_stats.pcd_tot_csize); |
233 | 0 | if ( sum + n >= len ) |
234 | 0 | return sum; |
235 | 0 | if ( !copy_to_guest_offset(buf, off + sum, info, n + 1) ) |
236 | 0 | sum += n; |
237 | 0 | return sum; |
238 | 0 | } |
239 | | |
240 | | static int tmemc_list(domid_t cli_id, tmem_cli_va_param_t buf, uint32_t len, |
241 | | bool use_long) |
242 | 0 | { |
243 | 0 | struct client *client; |
244 | 0 | int off = 0; |
245 | 0 |
|
246 | 0 | if ( cli_id == TMEM_CLI_ID_NULL ) { |
247 | 0 | off = tmemc_list_global(buf,0,len,use_long); |
248 | 0 | off += tmemc_list_shared(buf,off,len-off,use_long); |
249 | 0 | list_for_each_entry(client,&tmem_global.client_list,client_list) |
250 | 0 | off += tmemc_list_client(client, buf, off, len-off, use_long); |
251 | 0 | off += tmemc_list_global_perf(buf,off,len-off,use_long); |
252 | 0 | } |
253 | 0 | else if ( (client = tmem_client_from_cli_id(cli_id)) == NULL) |
254 | 0 | return -1; |
255 | 0 | else |
256 | 0 | off = tmemc_list_client(client, buf, 0, len, use_long); |
257 | 0 |
|
258 | 0 | return 0; |
259 | 0 | } |
260 | | |
261 | | static int __tmemc_set_client_info(struct client *client, |
262 | | XEN_GUEST_HANDLE(xen_tmem_client_t) buf) |
263 | 0 | { |
264 | 0 | domid_t cli_id; |
265 | 0 | uint32_t old_weight; |
266 | 0 | xen_tmem_client_t info = { }; |
267 | 0 |
|
268 | 0 | ASSERT(client); |
269 | 0 |
|
270 | 0 | if ( copy_from_guest(&info, buf, 1) ) |
271 | 0 | return -EFAULT; |
272 | 0 |
|
273 | 0 | if ( info.version != TMEM_SPEC_VERSION ) |
274 | 0 | return -EOPNOTSUPP; |
275 | 0 |
|
276 | 0 | if ( info.maxpools > MAX_POOLS_PER_DOMAIN ) |
277 | 0 | return -ERANGE; |
278 | 0 |
|
279 | 0 | /* Ignore info.nr_pools. */ |
280 | 0 | cli_id = client->cli_id; |
281 | 0 |
|
282 | 0 | if ( info.weight != client->info.weight ) |
283 | 0 | { |
284 | 0 | old_weight = client->info.weight; |
285 | 0 | client->info.weight = info.weight; |
286 | 0 | tmem_client_info("tmem: weight set to %d for %s=%d\n", |
287 | 0 | info.weight, tmem_cli_id_str, cli_id); |
288 | 0 | atomic_sub(old_weight,&tmem_global.client_weight_total); |
289 | 0 | atomic_add(client->info.weight,&tmem_global.client_weight_total); |
290 | 0 | } |
291 | 0 |
|
292 | 0 |
|
293 | 0 | if ( info.flags.u.compress != client->info.flags.u.compress ) |
294 | 0 | { |
295 | 0 | client->info.flags.u.compress = info.flags.u.compress; |
296 | 0 | tmem_client_info("tmem: compression %s for %s=%d\n", |
297 | 0 | info.flags.u.compress ? "enabled" : "disabled", |
298 | 0 | tmem_cli_id_str,cli_id); |
299 | 0 | } |
300 | 0 | return 0; |
301 | 0 | } |
302 | | |
303 | | static int tmemc_set_client_info(domid_t cli_id, |
304 | | XEN_GUEST_HANDLE(xen_tmem_client_t) info) |
305 | 0 | { |
306 | 0 | struct client *client; |
307 | 0 | int ret = -ENOENT; |
308 | 0 |
|
309 | 0 | if ( cli_id == TMEM_CLI_ID_NULL ) |
310 | 0 | { |
311 | 0 | list_for_each_entry(client,&tmem_global.client_list,client_list) |
312 | 0 | { |
313 | 0 | ret = __tmemc_set_client_info(client, info); |
314 | 0 | if (ret) |
315 | 0 | break; |
316 | 0 | } |
317 | 0 | } |
318 | 0 | else |
319 | 0 | { |
320 | 0 | client = tmem_client_from_cli_id(cli_id); |
321 | 0 | if ( client ) |
322 | 0 | ret = __tmemc_set_client_info(client, info); |
323 | 0 | } |
324 | 0 | return ret; |
325 | 0 | } |
326 | | |
327 | | static int tmemc_get_client_info(int cli_id, |
328 | | XEN_GUEST_HANDLE(xen_tmem_client_t) info) |
329 | 0 | { |
330 | 0 | struct client *client = tmem_client_from_cli_id(cli_id); |
331 | 0 |
|
332 | 0 | if ( client ) |
333 | 0 | { |
334 | 0 | if ( copy_to_guest(info, &client->info, 1) ) |
335 | 0 | return -EFAULT; |
336 | 0 | } |
337 | 0 | else |
338 | 0 | { |
339 | 0 | static const xen_tmem_client_t generic = { |
340 | 0 | .version = TMEM_SPEC_VERSION, |
341 | 0 | .maxpools = MAX_POOLS_PER_DOMAIN |
342 | 0 | }; |
343 | 0 |
|
344 | 0 | if ( copy_to_guest(info, &generic, 1) ) |
345 | 0 | return -EFAULT; |
346 | 0 | } |
347 | 0 |
|
348 | 0 | return 0; |
349 | 0 | } |
350 | | |
351 | | static int tmemc_get_pool(int cli_id, |
352 | | XEN_GUEST_HANDLE(xen_tmem_pool_info_t) pools, |
353 | | uint32_t len) |
354 | 0 | { |
355 | 0 | struct client *client = tmem_client_from_cli_id(cli_id); |
356 | 0 | unsigned int i, idx; |
357 | 0 | int rc = 0; |
358 | 0 | unsigned int nr = len / sizeof(xen_tmem_pool_info_t); |
359 | 0 |
|
360 | 0 | if ( len % sizeof(xen_tmem_pool_info_t) ) |
361 | 0 | return -EINVAL; |
362 | 0 |
|
363 | 0 | if ( nr > MAX_POOLS_PER_DOMAIN ) |
364 | 0 | return -E2BIG; |
365 | 0 |
|
366 | 0 | if ( !guest_handle_okay(pools, nr) ) |
367 | 0 | return -EINVAL; |
368 | 0 |
|
369 | 0 | if ( !client ) |
370 | 0 | return -EINVAL; |
371 | 0 |
|
372 | 0 | for ( idx = 0, i = 0; i < MAX_POOLS_PER_DOMAIN; i++ ) |
373 | 0 | { |
374 | 0 | struct tmem_pool *pool = client->pools[i]; |
375 | 0 | xen_tmem_pool_info_t out; |
376 | 0 |
|
377 | 0 | if ( pool == NULL ) |
378 | 0 | continue; |
379 | 0 |
|
380 | 0 | out.flags.raw = (pool->persistent ? TMEM_POOL_PERSIST : 0) | |
381 | 0 | (pool->shared ? TMEM_POOL_SHARED : 0) | |
382 | 0 | (POOL_PAGESHIFT << TMEM_POOL_PAGESIZE_SHIFT) | |
383 | 0 | (TMEM_SPEC_VERSION << TMEM_POOL_VERSION_SHIFT); |
384 | 0 | out.n_pages = _atomic_read(pool->pgp_count); |
385 | 0 | out.uuid[0] = pool->uuid[0]; |
386 | 0 | out.uuid[1] = pool->uuid[1]; |
387 | 0 | out.id = i; |
388 | 0 |
|
389 | 0 | /* N.B. 'idx' != 'i'. */ |
390 | 0 | if ( __copy_to_guest_offset(pools, idx, &out, 1) ) |
391 | 0 | { |
392 | 0 | rc = -EFAULT; |
393 | 0 | break; |
394 | 0 | } |
395 | 0 | idx++; |
396 | 0 | /* Don't try to put more than what was requested. */ |
397 | 0 | if ( idx >= nr ) |
398 | 0 | break; |
399 | 0 | } |
400 | 0 |
|
401 | 0 | /* And how many we have processed. */ |
402 | 0 | return rc ? : idx; |
403 | 0 | } |
404 | | |
405 | | static int tmemc_set_pools(int cli_id, |
406 | | XEN_GUEST_HANDLE(xen_tmem_pool_info_t) pools, |
407 | | uint32_t len) |
408 | 0 | { |
409 | 0 | unsigned int i; |
410 | 0 | int rc = 0; |
411 | 0 | unsigned int nr = len / sizeof(xen_tmem_pool_info_t); |
412 | 0 | struct client *client = tmem_client_from_cli_id(cli_id); |
413 | 0 |
|
414 | 0 | if ( len % sizeof(xen_tmem_pool_info_t) ) |
415 | 0 | return -EINVAL; |
416 | 0 |
|
417 | 0 | if ( nr > MAX_POOLS_PER_DOMAIN ) |
418 | 0 | return -E2BIG; |
419 | 0 |
|
420 | 0 | if ( !guest_handle_okay(pools, nr) ) |
421 | 0 | return -EINVAL; |
422 | 0 |
|
423 | 0 | if ( !client ) |
424 | 0 | { |
425 | 0 | client = client_create(cli_id); |
426 | 0 | if ( !client ) |
427 | 0 | return -ENOMEM; |
428 | 0 | } |
429 | 0 | for ( i = 0; i < nr; i++ ) |
430 | 0 | { |
431 | 0 | xen_tmem_pool_info_t pool; |
432 | 0 |
|
433 | 0 | if ( __copy_from_guest_offset(&pool, pools, i, 1 ) ) |
434 | 0 | return -EFAULT; |
435 | 0 |
|
436 | 0 | if ( pool.n_pages ) |
437 | 0 | return -EINVAL; |
438 | 0 |
|
439 | 0 | rc = do_tmem_new_pool(cli_id, pool.id, pool.flags.raw, |
440 | 0 | pool.uuid[0], pool.uuid[1]); |
441 | 0 | if ( rc < 0 ) |
442 | 0 | break; |
443 | 0 |
|
444 | 0 | pool.id = rc; |
445 | 0 | if ( __copy_to_guest_offset(pools, i, &pool, 1) ) |
446 | 0 | return -EFAULT; |
447 | 0 | } |
448 | 0 |
|
449 | 0 | /* And how many we have processed. */ |
450 | 0 | return rc ? : i; |
451 | 0 | } |
452 | | |
453 | | static int tmemc_auth_pools(int cli_id, |
454 | | XEN_GUEST_HANDLE(xen_tmem_pool_info_t) pools, |
455 | | uint32_t len) |
456 | 0 | { |
457 | 0 | unsigned int i; |
458 | 0 | int rc = 0; |
459 | 0 | unsigned int nr = len / sizeof(xen_tmem_pool_info_t); |
460 | 0 | struct client *client = tmem_client_from_cli_id(cli_id); |
461 | 0 |
|
462 | 0 | if ( len % sizeof(xen_tmem_pool_info_t) ) |
463 | 0 | return -EINVAL; |
464 | 0 |
|
465 | 0 | if ( nr > MAX_POOLS_PER_DOMAIN ) |
466 | 0 | return -E2BIG; |
467 | 0 |
|
468 | 0 | if ( !guest_handle_okay(pools, nr) ) |
469 | 0 | return -EINVAL; |
470 | 0 |
|
471 | 0 | if ( !client ) |
472 | 0 | { |
473 | 0 | client = client_create(cli_id); |
474 | 0 | if ( !client ) |
475 | 0 | return -ENOMEM; |
476 | 0 | } |
477 | 0 |
|
478 | 0 | for ( i = 0; i < nr; i++ ) |
479 | 0 | { |
480 | 0 | xen_tmem_pool_info_t pool; |
481 | 0 |
|
482 | 0 | if ( __copy_from_guest_offset(&pool, pools, i, 1 ) ) |
483 | 0 | return -EFAULT; |
484 | 0 |
|
485 | 0 | if ( pool.n_pages ) |
486 | 0 | return -EINVAL; |
487 | 0 |
|
488 | 0 | rc = tmemc_shared_pool_auth(cli_id, pool.uuid[0], pool.uuid[1], |
489 | 0 | pool.flags.u.auth); |
490 | 0 |
|
491 | 0 | if ( rc < 0 ) |
492 | 0 | break; |
493 | 0 |
|
494 | 0 | } |
495 | 0 |
|
496 | 0 | /* And how many we have processed. */ |
497 | 0 | return rc ? : i; |
498 | 0 | } |
499 | | |
500 | | int tmem_control(struct xen_sysctl_tmem_op *op) |
501 | 0 | { |
502 | 0 | int ret; |
503 | 0 | uint32_t cmd = op->cmd; |
504 | 0 |
|
505 | 0 | if ( op->pad != 0 ) |
506 | 0 | return -EINVAL; |
507 | 0 |
|
508 | 0 | write_lock(&tmem_rwlock); |
509 | 0 |
|
510 | 0 | switch (cmd) |
511 | 0 | { |
512 | 0 | case XEN_SYSCTL_TMEM_OP_THAW: |
513 | 0 | case XEN_SYSCTL_TMEM_OP_FREEZE: |
514 | 0 | case XEN_SYSCTL_TMEM_OP_DESTROY: |
515 | 0 | ret = tmemc_freeze_pools(op->cli_id, cmd); |
516 | 0 | break; |
517 | 0 | case XEN_SYSCTL_TMEM_OP_FLUSH: |
518 | 0 | ret = tmemc_flush_mem(op->cli_id, op->arg); |
519 | 0 | break; |
520 | 0 | case XEN_SYSCTL_TMEM_OP_LIST: |
521 | 0 | ret = tmemc_list(op->cli_id, |
522 | 0 | guest_handle_cast(op->u.buf, char), op->len, op->arg); |
523 | 0 | break; |
524 | 0 | case XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO: |
525 | 0 | ret = tmemc_set_client_info(op->cli_id, op->u.client); |
526 | 0 | break; |
527 | 0 | case XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB: |
528 | 0 | ret = tmem_freeable_pages() >> (20 - PAGE_SHIFT); |
529 | 0 | break; |
530 | 0 | case XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO: |
531 | 0 | ret = tmemc_get_client_info(op->cli_id, op->u.client); |
532 | 0 | break; |
533 | 0 | case XEN_SYSCTL_TMEM_OP_GET_POOLS: |
534 | 0 | ret = tmemc_get_pool(op->cli_id, op->u.pool, op->len); |
535 | 0 | break; |
536 | 0 | case XEN_SYSCTL_TMEM_OP_SET_POOLS: /* TMEM_RESTORE_NEW */ |
537 | 0 | ret = tmemc_set_pools(op->cli_id, op->u.pool, op->len); |
538 | 0 | break; |
539 | 0 | case XEN_SYSCTL_TMEM_OP_SET_AUTH: /* TMEM_AUTH */ |
540 | 0 | ret = tmemc_auth_pools(op->cli_id, op->u.pool, op->len); |
541 | 0 | break; |
542 | 0 | default: |
543 | 0 | ret = do_tmem_control(op); |
544 | 0 | break; |
545 | 0 | } |
546 | 0 |
|
547 | 0 | write_unlock(&tmem_rwlock); |
548 | 0 |
|
549 | 0 | return ret; |
550 | 0 | } |
551 | | |
552 | | /* |
553 | | * Local variables: |
554 | | * mode: C |
555 | | * c-file-style: "BSD" |
556 | | * c-basic-offset: 4 |
557 | | * tab-width: 4 |
558 | | * indent-tabs-mode: nil |
559 | | * End: |
560 | | */ |