Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/common/sysctl.c
Line
Count
Source (jump to first uncovered line)
1
/******************************************************************************
2
 * sysctl.c
3
 * 
4
 * System management operations. For use by node control stack.
5
 * 
6
 * Copyright (c) 2002-2006, K Fraser
7
 */
8
9
#include <xen/types.h>
10
#include <xen/lib.h>
11
#include <xen/mm.h>
12
#include <xen/sched.h>
13
#include <xen/domain.h>
14
#include <xen/event.h>
15
#include <xen/domain_page.h>
16
#include <xen/tmem.h>
17
#include <xen/trace.h>
18
#include <xen/console.h>
19
#include <xen/iocap.h>
20
#include <xen/guest_access.h>
21
#include <xen/keyhandler.h>
22
#include <asm/current.h>
23
#include <xen/hypercall.h>
24
#include <public/sysctl.h>
25
#include <asm/numa.h>
26
#include <xen/nodemask.h>
27
#include <xsm/xsm.h>
28
#include <xen/pmstat.h>
29
#include <xen/livepatch.h>
30
#include <xen/gcov.h>
31
32
long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
33
6
{
34
6
    long ret = 0;
35
6
    int copyback = -1;
36
6
    struct xen_sysctl curop, *op = &curop;
37
6
    static DEFINE_SPINLOCK(sysctl_lock);
38
6
39
6
    if ( copy_from_guest(op, u_sysctl, 1) )
40
0
        return -EFAULT;
41
6
42
6
    if ( op->interface_version != XEN_SYSCTL_INTERFACE_VERSION )
43
0
        return -EACCES;
44
6
45
6
    ret = xsm_sysctl(XSM_PRIV, op->cmd);
46
6
    if ( ret )
47
0
        return ret;
48
6
49
6
    /*
50
6
     * Trylock here avoids deadlock with an existing sysctl critical section
51
6
     * which might (for some current or future reason) want to synchronise
52
6
     * with this vcpu.
53
6
     */
54
6
    while ( !spin_trylock(&sysctl_lock) )
55
0
        if ( hypercall_preempt_check() )
56
0
            return hypercall_create_continuation(
57
0
                __HYPERVISOR_sysctl, "h", u_sysctl);
58
6
59
6
    switch ( op->cmd )
60
6
    {
61
0
    case XEN_SYSCTL_readconsole:
62
0
        ret = xsm_readconsole(XSM_HOOK, op->u.readconsole.clear);
63
0
        if ( ret )
64
0
            break;
65
0
66
0
        ret = read_console_ring(&op->u.readconsole);
67
0
        break;
68
0
69
0
    case XEN_SYSCTL_tbuf_op:
70
0
        ret = tb_control(&op->u.tbuf_op);
71
0
        break;
72
0
73
0
    case XEN_SYSCTL_sched_id:
74
0
        op->u.sched_id.sched_id = sched_id();
75
0
        break;
76
0
77
4
    case XEN_SYSCTL_getdomaininfolist:
78
4
    { 
79
4
        struct domain *d;
80
4
        struct xen_domctl_getdomaininfo info = { 0 };
81
4
        u32 num_domains = 0;
82
4
83
4
        rcu_read_lock(&domlist_read_lock);
84
4
85
4
        for_each_domain ( d )
86
4
        {
87
4
            if ( d->domain_id < op->u.getdomaininfolist.first_domain )
88
0
                continue;
89
4
            if ( num_domains == op->u.getdomaininfolist.max_domains )
90
0
                break;
91
4
92
4
            ret = xsm_getdomaininfo(XSM_HOOK, d);
93
4
            if ( ret )
94
0
                continue;
95
4
96
4
            getdomaininfo(d, &info);
97
4
98
4
            if ( copy_to_guest_offset(op->u.getdomaininfolist.buffer,
99
4
                                      num_domains, &info, 1) )
100
0
            {
101
0
                ret = -EFAULT;
102
0
                break;
103
0
            }
104
4
            
105
4
            num_domains++;
106
4
        }
107
4
        
108
4
        rcu_read_unlock(&domlist_read_lock);
109
4
        
110
4
        if ( ret != 0 )
111
0
            break;
112
4
        
113
4
        op->u.getdomaininfolist.num_domains = num_domains;
114
4
    }
115
4
    break;
116
4
117
4
#ifdef CONFIG_PERF_COUNTERS
118
    case XEN_SYSCTL_perfc_op:
119
        ret = perfc_control(&op->u.perfc_op);
120
        break;
121
#endif
122
4
123
4
#ifdef CONFIG_LOCK_PROFILE
124
    case XEN_SYSCTL_lockprof_op:
125
        ret = spinlock_profile_control(&op->u.lockprof_op);
126
        break;
127
#endif
128
0
    case XEN_SYSCTL_debug_keys:
129
0
    {
130
0
        char c;
131
0
        uint32_t i;
132
0
133
0
        ret = -EFAULT;
134
0
        for ( i = 0; i < op->u.debug_keys.nr_keys; i++ )
135
0
        {
136
0
            if ( copy_from_guest_offset(&c, op->u.debug_keys.keys, i, 1) )
137
0
                goto out;
138
0
            handle_keypress(c, guest_cpu_user_regs());
139
0
        }
140
0
        ret = 0;
141
0
        copyback = 0;
142
0
    }
143
0
    break;
144
0
145
0
    case XEN_SYSCTL_getcpuinfo:
146
0
    {
147
0
        uint32_t i, nr_cpus;
148
0
        struct xen_sysctl_cpuinfo cpuinfo = { 0 };
149
0
150
0
        nr_cpus = min(op->u.getcpuinfo.max_cpus, nr_cpu_ids);
151
0
152
0
        ret = -EFAULT;
153
0
        for ( i = 0; i < nr_cpus; i++ )
154
0
        {
155
0
            cpuinfo.idletime = get_cpu_idle_time(i);
156
0
157
0
            if ( copy_to_guest_offset(op->u.getcpuinfo.info, i, &cpuinfo, 1) )
158
0
                goto out;
159
0
        }
160
0
161
0
        op->u.getcpuinfo.nr_cpus = i;
162
0
        ret = 0;
163
0
    }
164
0
    break;
165
0
166
0
    case XEN_SYSCTL_availheap:
167
0
        op->u.availheap.avail_bytes = avail_domheap_pages_region(
168
0
            op->u.availheap.node,
169
0
            op->u.availheap.min_bitwidth,
170
0
            op->u.availheap.max_bitwidth);
171
0
        op->u.availheap.avail_bytes <<= PAGE_SHIFT;
172
0
        break;
173
0
174
0
#if defined (CONFIG_ACPI) && defined (CONFIG_HAS_CPUFREQ)
175
0
    case XEN_SYSCTL_get_pmstat:
176
0
        ret = do_get_pm_info(&op->u.get_pmstat);
177
0
        break;
178
0
179
0
    case XEN_SYSCTL_pm_op:
180
0
        ret = do_pm_op(&op->u.pm_op);
181
0
        if ( ret == -EAGAIN )
182
0
            copyback = 1;
183
0
        break;
184
0
#endif
185
0
186
0
    case XEN_SYSCTL_page_offline_op:
187
0
    {
188
0
        uint32_t *status, *ptr;
189
0
        unsigned long pfn;
190
0
191
0
        ret = xsm_page_offline(XSM_HOOK, op->u.page_offline.cmd);
192
0
        if ( ret )
193
0
            break;
194
0
195
0
        ptr = status = xmalloc_bytes( sizeof(uint32_t) *
196
0
                                (op->u.page_offline.end -
197
0
                                  op->u.page_offline.start + 1));
198
0
        if ( !status )
199
0
        {
200
0
            dprintk(XENLOG_WARNING, "Out of memory for page offline op\n");
201
0
            ret = -ENOMEM;
202
0
            break;
203
0
        }
204
0
205
0
        memset(status, PG_OFFLINE_INVALID, sizeof(uint32_t) *
206
0
                      (op->u.page_offline.end - op->u.page_offline.start + 1));
207
0
208
0
        for ( pfn = op->u.page_offline.start;
209
0
              pfn <= op->u.page_offline.end;
210
0
              pfn ++ )
211
0
        {
212
0
            switch ( op->u.page_offline.cmd )
213
0
            {
214
0
                /* Shall revert her if failed, or leave caller do it? */
215
0
                case sysctl_page_offline:
216
0
                    ret = offline_page(pfn, 0, ptr++);
217
0
                    break;
218
0
                case sysctl_page_online:
219
0
                    ret = online_page(pfn, ptr++);
220
0
                    break;
221
0
                case sysctl_query_page_offline:
222
0
                    ret = query_page_offline(pfn, ptr++);
223
0
                    break;
224
0
                default:
225
0
                    ret = -EINVAL;
226
0
                    break;
227
0
            }
228
0
229
0
            if (ret)
230
0
                break;
231
0
        }
232
0
233
0
        if ( copy_to_guest(
234
0
                 op->u.page_offline.status, status,
235
0
                 op->u.page_offline.end - op->u.page_offline.start + 1) )
236
0
            ret = -EFAULT;
237
0
238
0
        xfree(status);
239
0
        copyback = 0;
240
0
    }
241
0
    break;
242
0
243
0
    case XEN_SYSCTL_cpupool_op:
244
0
        ret = cpupool_do_sysctl(&op->u.cpupool_op);
245
0
        break;
246
0
247
0
    case XEN_SYSCTL_scheduler_op:
248
0
        ret = sched_adjust_global(&op->u.scheduler_op);
249
0
        break;
250
0
251
0
    case XEN_SYSCTL_physinfo:
252
0
    {
253
0
        struct xen_sysctl_physinfo *pi = &op->u.physinfo;
254
0
255
0
        memset(pi, 0, sizeof(*pi));
256
0
        pi->threads_per_core =
257
0
            cpumask_weight(per_cpu(cpu_sibling_mask, 0));
258
0
        pi->cores_per_socket =
259
0
            cpumask_weight(per_cpu(cpu_core_mask, 0)) / pi->threads_per_core;
260
0
        pi->nr_cpus = num_online_cpus();
261
0
        pi->nr_nodes = num_online_nodes();
262
0
        pi->max_node_id = MAX_NUMNODES-1;
263
0
        pi->max_cpu_id = nr_cpu_ids - 1;
264
0
        pi->total_pages = total_pages;
265
0
        /* Protected by lock */
266
0
        get_outstanding_claims(&pi->free_pages, &pi->outstanding_pages);
267
0
        pi->scrub_pages = 0;
268
0
        pi->cpu_khz = cpu_khz;
269
0
        pi->max_mfn = get_upper_mfn_bound();
270
0
        arch_do_physinfo(pi);
271
0
272
0
        if ( copy_to_guest(u_sysctl, op, 1) )
273
0
            ret = -EFAULT;
274
0
    }
275
0
    break;
276
0
277
0
    case XEN_SYSCTL_numainfo:
278
0
    {
279
0
        unsigned int i, j, num_nodes;
280
0
        struct xen_sysctl_numainfo *ni = &op->u.numainfo;
281
0
        bool_t do_meminfo = !guest_handle_is_null(ni->meminfo);
282
0
        bool_t do_distance = !guest_handle_is_null(ni->distance);
283
0
284
0
        num_nodes = last_node(node_online_map) + 1;
285
0
286
0
        if ( do_meminfo || do_distance )
287
0
        {
288
0
            struct xen_sysctl_meminfo meminfo = { };
289
0
290
0
            if ( num_nodes > ni->num_nodes )
291
0
                num_nodes = ni->num_nodes;
292
0
            for ( i = 0; i < num_nodes; ++i )
293
0
            {
294
0
                static uint32_t distance[MAX_NUMNODES];
295
0
296
0
                if ( do_meminfo )
297
0
                {
298
0
                    if ( node_online(i) )
299
0
                    {
300
0
                        meminfo.memsize = node_spanned_pages(i) << PAGE_SHIFT;
301
0
                        meminfo.memfree = avail_node_heap_pages(i) << PAGE_SHIFT;
302
0
                    }
303
0
                    else
304
0
                        meminfo.memsize = meminfo.memfree = XEN_INVALID_MEM_SZ;
305
0
306
0
                    if ( copy_to_guest_offset(ni->meminfo, i, &meminfo, 1) )
307
0
                    {
308
0
                        ret = -EFAULT;
309
0
                        break;
310
0
                    }
311
0
                }
312
0
313
0
                if ( do_distance )
314
0
                {
315
0
                    for ( j = 0; j < num_nodes; j++ )
316
0
                    {
317
0
                        distance[j] = __node_distance(i, j);
318
0
                        if ( distance[j] == NUMA_NO_DISTANCE )
319
0
                            distance[j] = XEN_INVALID_NODE_DIST;
320
0
                    }
321
0
322
0
                    if ( copy_to_guest_offset(ni->distance, i * num_nodes,
323
0
                                              distance, num_nodes) )
324
0
                    {
325
0
                        ret = -EFAULT;
326
0
                        break;
327
0
                    }
328
0
                }
329
0
            }
330
0
        }
331
0
        else
332
0
            i = num_nodes;
333
0
334
0
        if ( !ret && (ni->num_nodes != i) )
335
0
        {
336
0
            ni->num_nodes = i;
337
0
            if ( __copy_field_to_guest(u_sysctl, op,
338
0
                                       u.numainfo.num_nodes) )
339
0
            {
340
0
                ret = -EFAULT;
341
0
                break;
342
0
            }
343
0
        }
344
0
    }
345
0
    break;
346
0
347
0
    case XEN_SYSCTL_cputopoinfo:
348
0
    {
349
0
        unsigned int i, num_cpus;
350
0
        struct xen_sysctl_cputopoinfo *ti = &op->u.cputopoinfo;
351
0
352
0
        num_cpus = cpumask_last(&cpu_online_map) + 1;
353
0
        if ( !guest_handle_is_null(ti->cputopo) )
354
0
        {
355
0
            struct xen_sysctl_cputopo cputopo = { };
356
0
357
0
            if ( num_cpus > ti->num_cpus )
358
0
                num_cpus = ti->num_cpus;
359
0
            for ( i = 0; i < num_cpus; ++i )
360
0
            {
361
0
                if ( cpu_present(i) )
362
0
                {
363
0
                    cputopo.core = cpu_to_core(i);
364
0
                    cputopo.socket = cpu_to_socket(i);
365
0
                    cputopo.node = cpu_to_node(i);
366
0
                    if ( cputopo.node == NUMA_NO_NODE )
367
0
                        cputopo.node = XEN_INVALID_NODE_ID;
368
0
                }
369
0
                else
370
0
                {
371
0
                    cputopo.core = XEN_INVALID_CORE_ID;
372
0
                    cputopo.socket = XEN_INVALID_SOCKET_ID;
373
0
                    cputopo.node = XEN_INVALID_NODE_ID;
374
0
                }
375
0
376
0
                if ( copy_to_guest_offset(ti->cputopo, i, &cputopo, 1) )
377
0
                {
378
0
                    ret = -EFAULT;
379
0
                    break;
380
0
                }
381
0
            }
382
0
        }
383
0
        else
384
0
            i = num_cpus;
385
0
386
0
        if ( !ret && (ti->num_cpus != i) )
387
0
        {
388
0
            ti->num_cpus = i;
389
0
            if ( __copy_field_to_guest(u_sysctl, op,
390
0
                                       u.cputopoinfo.num_cpus) )
391
0
            {
392
0
                ret = -EFAULT;
393
0
                break;
394
0
            }
395
0
        }
396
0
    }
397
0
    break;
398
0
399
0
#ifdef CONFIG_GCOV
400
2
    case XEN_SYSCTL_gcov_op:
401
2
        ret = sysctl_gcov_op(&op->u.gcov_op);
402
2
        copyback = 1;
403
2
        break;
404
0
#endif
405
0
406
0
#ifdef CONFIG_HAS_PCI
407
0
    case XEN_SYSCTL_pcitopoinfo:
408
0
    {
409
0
        struct xen_sysctl_pcitopoinfo *ti = &op->u.pcitopoinfo;
410
0
        unsigned int i = 0;
411
0
412
0
        if ( guest_handle_is_null(ti->devs) ||
413
0
             guest_handle_is_null(ti->nodes) )
414
0
        {
415
0
            ret = -EINVAL;
416
0
            break;
417
0
        }
418
0
419
0
        while ( i < ti->num_devs )
420
0
        {
421
0
            physdev_pci_device_t dev;
422
0
            uint32_t node;
423
0
            const struct pci_dev *pdev;
424
0
425
0
            if ( copy_from_guest_offset(&dev, ti->devs, i, 1) )
426
0
            {
427
0
                ret = -EFAULT;
428
0
                break;
429
0
            }
430
0
431
0
            pcidevs_lock();
432
0
            pdev = pci_get_pdev(dev.seg, dev.bus, dev.devfn);
433
0
            if ( !pdev )
434
0
                node = XEN_INVALID_DEV;
435
0
            else if ( pdev->node == NUMA_NO_NODE )
436
0
                node = XEN_INVALID_NODE_ID;
437
0
            else
438
0
                node = pdev->node;
439
0
            pcidevs_unlock();
440
0
441
0
            if ( copy_to_guest_offset(ti->nodes, i, &node, 1) )
442
0
            {
443
0
                ret = -EFAULT;
444
0
                break;
445
0
            }
446
0
447
0
            if ( (++i > 0x3f) && hypercall_preempt_check() )
448
0
                break;
449
0
        }
450
0
451
0
        if ( !ret && (ti->num_devs != i) )
452
0
        {
453
0
            ti->num_devs = i;
454
0
            if ( __copy_field_to_guest(u_sysctl, op, u.pcitopoinfo.num_devs) )
455
0
                ret = -EFAULT;
456
0
        }
457
0
        break;
458
0
    }
459
0
#endif
460
0
461
0
    case XEN_SYSCTL_tmem_op:
462
0
        ret = tmem_control(&op->u.tmem_op);
463
0
        break;
464
0
465
0
    case XEN_SYSCTL_livepatch_op:
466
0
        ret = livepatch_op(&op->u.livepatch);
467
0
        if ( ret != -ENOSYS && ret != -EOPNOTSUPP )
468
0
            copyback = 1;
469
0
        break;
470
0
471
0
    case XEN_SYSCTL_set_parameter:
472
0
    {
473
0
#define XEN_SET_PARAMETER_MAX_SIZE 1023
474
0
        char *params;
475
0
476
0
        if ( op->u.set_parameter.pad[0] || op->u.set_parameter.pad[1] ||
477
0
             op->u.set_parameter.pad[2] )
478
0
        {
479
0
            ret = -EINVAL;
480
0
            break;
481
0
        }
482
0
        if ( op->u.set_parameter.size > XEN_SET_PARAMETER_MAX_SIZE )
483
0
        {
484
0
            ret = -E2BIG;
485
0
            break;
486
0
        }
487
0
        params = xmalloc_bytes(op->u.set_parameter.size + 1);
488
0
        if ( !params )
489
0
        {
490
0
            ret = -ENOMEM;
491
0
            break;
492
0
        }
493
0
        if ( copy_from_guest(params, op->u.set_parameter.params,
494
0
                             op->u.set_parameter.size) )
495
0
            ret = -EFAULT;
496
0
        else
497
0
        {
498
0
            params[op->u.set_parameter.size] = 0;
499
0
            ret = runtime_parse(params);
500
0
        }
501
0
502
0
        xfree(params);
503
0
504
0
        break;
505
0
    }
506
0
507
0
    default:
508
0
        ret = arch_do_sysctl(op, u_sysctl);
509
0
        copyback = 0;
510
0
        break;
511
6
    }
512
6
513
5
 out:
514
5
    spin_unlock(&sysctl_lock);
515
5
516
5
    if ( copyback && (!ret || copyback > 0) &&
517
5
         __copy_to_guest(u_sysctl, op, 1) )
518
0
        ret = -EFAULT;
519
5
520
5
    return ret;
521
6
}
522
523
/*
524
 * Local variables:
525
 * mode: C
526
 * c-file-style: "BSD"
527
 * c-basic-offset: 4
528
 * tab-width: 4
529
 * indent-tabs-mode: nil
530
 * End:
531
 */