Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/drivers/acpi/pmstat.c
Line
Count
Source (jump to first uncovered line)
1
/*****************************************************************************
2
#  pmstat.c - Power Management statistic information (Px/Cx/Tx, etc.)
3
#
4
#  Copyright (c) 2008, Liu Jinsong <jinsong.liu@intel.com>
5
#
6
# This program is free software; you can redistribute it and/or modify it 
7
# under the terms of the GNU General Public License as published by the Free 
8
# Software Foundation; either version 2 of the License, or (at your option) 
9
# any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but WITHOUT 
12
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
13
# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
14
# more details.
15
#
16
# You should have received a copy of the GNU General Public License along with
17
# this program; If not, see <http://www.gnu.org/licenses/>.
18
#
19
# The full GNU General Public License is included in this distribution in the
20
# file called LICENSE.
21
#
22
*****************************************************************************/
23
24
#include <xen/lib.h>
25
#include <xen/errno.h>
26
#include <xen/sched.h>
27
#include <xen/event.h>
28
#include <xen/irq.h>
29
#include <xen/iocap.h>
30
#include <xen/compat.h>
31
#include <xen/guest_access.h>
32
#include <asm/current.h>
33
#include <public/xen.h>
34
#include <xen/cpumask.h>
35
#include <asm/processor.h>
36
#include <xen/percpu.h>
37
#include <xen/domain.h>
38
#include <xen/acpi.h>
39
40
#include <public/sysctl.h>
41
#include <acpi/cpufreq/cpufreq.h>
42
#include <xen/pmstat.h>
43
44
DEFINE_PER_CPU_READ_MOSTLY(struct pm_px *, cpufreq_statistic_data);
45
46
/*
47
 * Get PM statistic info
48
 */
49
int do_get_pm_info(struct xen_sysctl_get_pmstat *op)
50
0
{
51
0
    int ret = 0;
52
0
    const struct processor_pminfo *pmpt;
53
0
54
0
    if ( !op || (op->cpuid >= nr_cpu_ids) || !cpu_online(op->cpuid) )
55
0
        return -EINVAL;
56
0
    pmpt = processor_pminfo[op->cpuid];
57
0
58
0
    switch ( op->type & PMSTAT_CATEGORY_MASK )
59
0
    {
60
0
    case PMSTAT_CX:
61
0
        if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_CX) )
62
0
            return -ENODEV;
63
0
        break;
64
0
    case PMSTAT_PX:
65
0
        if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_PX) )
66
0
            return -ENODEV;
67
0
        if ( !cpufreq_driver )
68
0
            return -ENODEV;
69
0
        if ( !pmpt || !(pmpt->perf.init & XEN_PX_INIT) )
70
0
            return -EINVAL;
71
0
        break;
72
0
    default:
73
0
        return -ENODEV;
74
0
    }
75
0
76
0
    switch ( op->type )
77
0
    {
78
0
    case PMSTAT_get_max_px:
79
0
    {
80
0
        op->u.getpx.total = pmpt->perf.state_count;
81
0
        break;
82
0
    }
83
0
84
0
    case PMSTAT_get_pxstat:
85
0
    {
86
0
        uint32_t ct;
87
0
        struct pm_px *pxpt;
88
0
        spinlock_t *cpufreq_statistic_lock = 
89
0
                   &per_cpu(cpufreq_statistic_lock, op->cpuid);
90
0
91
0
        spin_lock(cpufreq_statistic_lock);
92
0
93
0
        pxpt = per_cpu(cpufreq_statistic_data, op->cpuid);
94
0
        if ( !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt )
95
0
        {
96
0
            spin_unlock(cpufreq_statistic_lock);
97
0
            return -ENODATA;
98
0
        }
99
0
100
0
        pxpt->u.usable = pmpt->perf.state_count - pmpt->perf.platform_limit;
101
0
102
0
        cpufreq_residency_update(op->cpuid, pxpt->u.cur);
103
0
104
0
        ct = pmpt->perf.state_count;
105
0
        if ( copy_to_guest(op->u.getpx.trans_pt, pxpt->u.trans_pt, ct*ct) )
106
0
        {
107
0
            spin_unlock(cpufreq_statistic_lock);
108
0
            ret = -EFAULT;
109
0
            break;
110
0
        }
111
0
112
0
        if ( copy_to_guest(op->u.getpx.pt, pxpt->u.pt, ct) )
113
0
        {
114
0
            spin_unlock(cpufreq_statistic_lock);
115
0
            ret = -EFAULT;
116
0
            break;
117
0
        }
118
0
119
0
        op->u.getpx.total = pxpt->u.total;
120
0
        op->u.getpx.usable = pxpt->u.usable;
121
0
        op->u.getpx.last = pxpt->u.last;
122
0
        op->u.getpx.cur = pxpt->u.cur;
123
0
124
0
        spin_unlock(cpufreq_statistic_lock);
125
0
126
0
        break;
127
0
    }
128
0
129
0
    case PMSTAT_reset_pxstat:
130
0
    {
131
0
        cpufreq_statistic_reset(op->cpuid);
132
0
        break;
133
0
    }
134
0
135
0
    case PMSTAT_get_max_cx:
136
0
    {
137
0
        op->u.getcx.nr = pmstat_get_cx_nr(op->cpuid);
138
0
        ret = 0;
139
0
        break;
140
0
    }
141
0
142
0
    case PMSTAT_get_cxstat:
143
0
    {
144
0
        ret = pmstat_get_cx_stat(op->cpuid, &op->u.getcx);
145
0
        break;
146
0
    }
147
0
148
0
    case PMSTAT_reset_cxstat:
149
0
    {
150
0
        ret = pmstat_reset_cx_stat(op->cpuid);
151
0
        break;
152
0
    }
153
0
154
0
    default:
155
0
        printk("not defined sub-hypercall @ do_get_pm_info\n");
156
0
        ret = -ENOSYS;
157
0
        break;
158
0
    }
159
0
160
0
    return ret;
161
0
}
162
163
/*
164
 * 1. Get PM parameter
165
 * 2. Provide user PM control
166
 */
167
static int read_scaling_available_governors(char *scaling_available_governors,
168
                                            unsigned int size)
169
0
{
170
0
    unsigned int i = 0;
171
0
    struct cpufreq_governor *t;
172
0
173
0
    if ( !scaling_available_governors )
174
0
        return -EINVAL;
175
0
176
0
    list_for_each_entry(t, &cpufreq_governor_list, governor_list)
177
0
    {
178
0
        i += scnprintf(&scaling_available_governors[i],
179
0
                       CPUFREQ_NAME_LEN, "%s ", t->name);
180
0
        if ( i > size )
181
0
            return -EINVAL;
182
0
    }
183
0
    scaling_available_governors[i-1] = '\0';
184
0
185
0
    return 0;
186
0
}
187
188
static int get_cpufreq_para(struct xen_sysctl_pm_op *op)
189
0
{
190
0
    uint32_t ret = 0;
191
0
    const struct processor_pminfo *pmpt;
192
0
    struct cpufreq_policy *policy;
193
0
    uint32_t gov_num = 0;
194
0
    uint32_t *affected_cpus;
195
0
    uint32_t *scaling_available_frequencies;
196
0
    char     *scaling_available_governors;
197
0
    struct list_head *pos;
198
0
    uint32_t cpu, i, j = 0;
199
0
200
0
    pmpt = processor_pminfo[op->cpuid];
201
0
    policy = per_cpu(cpufreq_cpu_policy, op->cpuid);
202
0
203
0
    if ( !pmpt || !pmpt->perf.states ||
204
0
         !policy || !policy->governor )
205
0
        return -EINVAL;
206
0
207
0
    list_for_each(pos, &cpufreq_governor_list)
208
0
        gov_num++;
209
0
210
0
    if ( (op->u.get_para.cpu_num  != cpumask_weight(policy->cpus)) ||
211
0
         (op->u.get_para.freq_num != pmpt->perf.state_count)    ||
212
0
         (op->u.get_para.gov_num  != gov_num) )
213
0
    {
214
0
        op->u.get_para.cpu_num =  cpumask_weight(policy->cpus);
215
0
        op->u.get_para.freq_num = pmpt->perf.state_count;
216
0
        op->u.get_para.gov_num  = gov_num;
217
0
        return -EAGAIN;
218
0
    }
219
0
220
0
    if ( !(affected_cpus = xzalloc_array(uint32_t, op->u.get_para.cpu_num)) )
221
0
        return -ENOMEM;
222
0
    for_each_cpu(cpu, policy->cpus)
223
0
        affected_cpus[j++] = cpu;
224
0
    ret = copy_to_guest(op->u.get_para.affected_cpus,
225
0
                       affected_cpus, op->u.get_para.cpu_num);
226
0
    xfree(affected_cpus);
227
0
    if ( ret )
228
0
        return ret;
229
0
230
0
    if ( !(scaling_available_frequencies =
231
0
           xzalloc_array(uint32_t, op->u.get_para.freq_num)) )
232
0
        return -ENOMEM;
233
0
    for ( i = 0; i < op->u.get_para.freq_num; i++ )
234
0
        scaling_available_frequencies[i] =
235
0
                        pmpt->perf.states[i].core_frequency * 1000;
236
0
    ret = copy_to_guest(op->u.get_para.scaling_available_frequencies,
237
0
                   scaling_available_frequencies, op->u.get_para.freq_num);
238
0
    xfree(scaling_available_frequencies);
239
0
    if ( ret )
240
0
        return ret;
241
0
242
0
    if ( !(scaling_available_governors =
243
0
           xzalloc_array(char, gov_num * CPUFREQ_NAME_LEN)) )
244
0
        return -ENOMEM;
245
0
    if ( (ret = read_scaling_available_governors(scaling_available_governors,
246
0
                gov_num * CPUFREQ_NAME_LEN * sizeof(char))) )
247
0
    {
248
0
        xfree(scaling_available_governors);
249
0
        return ret;
250
0
    }
251
0
    ret = copy_to_guest(op->u.get_para.scaling_available_governors,
252
0
                scaling_available_governors, gov_num * CPUFREQ_NAME_LEN);
253
0
    xfree(scaling_available_governors);
254
0
    if ( ret )
255
0
        return ret;
256
0
257
0
    op->u.get_para.cpuinfo_cur_freq =
258
0
        cpufreq_driver->get ? cpufreq_driver->get(op->cpuid) : policy->cur;
259
0
    op->u.get_para.cpuinfo_max_freq = policy->cpuinfo.max_freq;
260
0
    op->u.get_para.cpuinfo_min_freq = policy->cpuinfo.min_freq;
261
0
    op->u.get_para.scaling_cur_freq = policy->cur;
262
0
    op->u.get_para.scaling_max_freq = policy->max;
263
0
    op->u.get_para.scaling_min_freq = policy->min;
264
0
265
0
    if ( cpufreq_driver->name[0] )
266
0
        strlcpy(op->u.get_para.scaling_driver, 
267
0
            cpufreq_driver->name, CPUFREQ_NAME_LEN);
268
0
    else
269
0
        strlcpy(op->u.get_para.scaling_driver, "Unknown", CPUFREQ_NAME_LEN);
270
0
271
0
    if ( policy->governor->name[0] )
272
0
        strlcpy(op->u.get_para.scaling_governor, 
273
0
            policy->governor->name, CPUFREQ_NAME_LEN);
274
0
    else
275
0
        strlcpy(op->u.get_para.scaling_governor, "Unknown", CPUFREQ_NAME_LEN);
276
0
277
0
    /* governor specific para */
278
0
    if ( !strnicmp(op->u.get_para.scaling_governor, 
279
0
                   "userspace", CPUFREQ_NAME_LEN) )
280
0
    {
281
0
        op->u.get_para.u.userspace.scaling_setspeed = policy->cur;
282
0
    }
283
0
284
0
    if ( !strnicmp(op->u.get_para.scaling_governor, 
285
0
                   "ondemand", CPUFREQ_NAME_LEN) )
286
0
    {
287
0
        ret = get_cpufreq_ondemand_para(
288
0
            &op->u.get_para.u.ondemand.sampling_rate_max,
289
0
            &op->u.get_para.u.ondemand.sampling_rate_min,
290
0
            &op->u.get_para.u.ondemand.sampling_rate,
291
0
            &op->u.get_para.u.ondemand.up_threshold);
292
0
    }
293
0
    op->u.get_para.turbo_enabled = cpufreq_get_turbo_status(op->cpuid);
294
0
295
0
    return ret;
296
0
}
297
298
static int set_cpufreq_gov(struct xen_sysctl_pm_op *op)
299
0
{
300
0
    struct cpufreq_policy new_policy, *old_policy;
301
0
302
0
    old_policy = per_cpu(cpufreq_cpu_policy, op->cpuid);
303
0
    if ( !old_policy )
304
0
        return -EINVAL;
305
0
306
0
    memcpy(&new_policy, old_policy, sizeof(struct cpufreq_policy));
307
0
308
0
    new_policy.governor = __find_governor(op->u.set_gov.scaling_governor);
309
0
    if (new_policy.governor == NULL)
310
0
        return -EINVAL;
311
0
312
0
    return __cpufreq_set_policy(old_policy, &new_policy);
313
0
}
314
315
static int set_cpufreq_para(struct xen_sysctl_pm_op *op)
316
0
{
317
0
    int ret = 0;
318
0
    struct cpufreq_policy *policy;
319
0
320
0
    policy = per_cpu(cpufreq_cpu_policy, op->cpuid);
321
0
322
0
    if ( !policy || !policy->governor )
323
0
        return -EINVAL;
324
0
325
0
    switch(op->u.set_para.ctrl_type)
326
0
    {
327
0
    case SCALING_MAX_FREQ:
328
0
    {
329
0
        struct cpufreq_policy new_policy;
330
0
331
0
        memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
332
0
        new_policy.max = op->u.set_para.ctrl_value;
333
0
        ret = __cpufreq_set_policy(policy, &new_policy);
334
0
335
0
        break;
336
0
    }
337
0
338
0
    case SCALING_MIN_FREQ:
339
0
    {
340
0
        struct cpufreq_policy new_policy;
341
0
342
0
        memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
343
0
        new_policy.min = op->u.set_para.ctrl_value;
344
0
        ret = __cpufreq_set_policy(policy, &new_policy);
345
0
346
0
        break;
347
0
    }
348
0
349
0
    case SCALING_SETSPEED:
350
0
    {
351
0
        unsigned int freq =op->u.set_para.ctrl_value;
352
0
353
0
        if ( !strnicmp(policy->governor->name,
354
0
                       "userspace", CPUFREQ_NAME_LEN) )
355
0
            ret = write_userspace_scaling_setspeed(op->cpuid, freq);
356
0
        else
357
0
            ret = -EINVAL;
358
0
359
0
        break;
360
0
    }
361
0
362
0
    case SAMPLING_RATE:
363
0
    {
364
0
        unsigned int sampling_rate = op->u.set_para.ctrl_value;
365
0
366
0
        if ( !strnicmp(policy->governor->name,
367
0
                       "ondemand", CPUFREQ_NAME_LEN) )
368
0
            ret = write_ondemand_sampling_rate(sampling_rate);
369
0
        else
370
0
            ret = -EINVAL;
371
0
372
0
        break;
373
0
    }
374
0
375
0
    case UP_THRESHOLD:
376
0
    {
377
0
        unsigned int up_threshold = op->u.set_para.ctrl_value;
378
0
379
0
        if ( !strnicmp(policy->governor->name,
380
0
                       "ondemand", CPUFREQ_NAME_LEN) )
381
0
            ret = write_ondemand_up_threshold(up_threshold);
382
0
        else
383
0
            ret = -EINVAL;
384
0
385
0
        break;
386
0
    }
387
0
388
0
    default:
389
0
        ret = -EINVAL;
390
0
        break;
391
0
    }
392
0
393
0
    return ret;
394
0
}
395
396
int do_pm_op(struct xen_sysctl_pm_op *op)
397
0
{
398
0
    int ret = 0;
399
0
    const struct processor_pminfo *pmpt;
400
0
401
0
    if ( !op || op->cpuid >= nr_cpu_ids || !cpu_online(op->cpuid) )
402
0
        return -EINVAL;
403
0
    pmpt = processor_pminfo[op->cpuid];
404
0
405
0
    switch ( op->cmd & PM_PARA_CATEGORY_MASK )
406
0
    {
407
0
    case CPUFREQ_PARA:
408
0
        if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_PX) )
409
0
            return -ENODEV;
410
0
        if ( !pmpt || !(pmpt->perf.init & XEN_PX_INIT) )
411
0
            return -EINVAL;
412
0
        break;
413
0
    }
414
0
415
0
    switch ( op->cmd )
416
0
    {
417
0
    case GET_CPUFREQ_PARA:
418
0
    {
419
0
        ret = get_cpufreq_para(op);
420
0
        break;
421
0
    }
422
0
423
0
    case SET_CPUFREQ_GOV:
424
0
    {
425
0
        ret = set_cpufreq_gov(op);
426
0
        break;
427
0
    }
428
0
429
0
    case SET_CPUFREQ_PARA:
430
0
    {
431
0
        ret = set_cpufreq_para(op);
432
0
        break;
433
0
    }
434
0
435
0
    case GET_CPUFREQ_AVGFREQ:
436
0
    {
437
0
        op->u.get_avgfreq = cpufreq_driver_getavg(op->cpuid, USR_GETAVG);
438
0
        break;
439
0
    }
440
0
441
0
    case XEN_SYSCTL_pm_op_set_sched_opt_smt:
442
0
    {
443
0
        uint32_t saved_value;
444
0
445
0
        saved_value = sched_smt_power_savings;
446
0
        sched_smt_power_savings = !!op->u.set_sched_opt_smt;
447
0
        op->u.set_sched_opt_smt = saved_value;
448
0
449
0
        break;
450
0
    }
451
0
452
0
    case XEN_SYSCTL_pm_op_set_vcpu_migration_delay:
453
0
    {
454
0
        set_vcpu_migration_delay(op->u.set_vcpu_migration_delay);
455
0
        break;
456
0
    }
457
0
458
0
    case XEN_SYSCTL_pm_op_get_vcpu_migration_delay:
459
0
    {
460
0
        op->u.get_vcpu_migration_delay = get_vcpu_migration_delay();
461
0
        break;
462
0
    }
463
0
464
0
    case XEN_SYSCTL_pm_op_get_max_cstate:
465
0
    {
466
0
        op->u.get_max_cstate = acpi_get_cstate_limit();
467
0
        break;
468
0
    }
469
0
470
0
    case XEN_SYSCTL_pm_op_set_max_cstate:
471
0
    {
472
0
        acpi_set_cstate_limit(op->u.set_max_cstate);
473
0
        break;
474
0
    }
475
0
476
0
    case XEN_SYSCTL_pm_op_enable_turbo:
477
0
    {
478
0
        ret = cpufreq_update_turbo(op->cpuid, CPUFREQ_TURBO_ENABLED);
479
0
        break;
480
0
    }
481
0
482
0
    case XEN_SYSCTL_pm_op_disable_turbo:
483
0
    {
484
0
        ret = cpufreq_update_turbo(op->cpuid, CPUFREQ_TURBO_DISABLED);
485
0
        break;
486
0
    }
487
0
488
0
    default:
489
0
        printk("not defined sub-hypercall @ do_pm_op\n");
490
0
        ret = -ENOSYS;
491
0
        break;
492
0
    }
493
0
494
0
    return ret;
495
0
}
496
497
int acpi_set_pdc_bits(u32 acpi_id, XEN_GUEST_HANDLE_PARAM(uint32) pdc)
498
0
{
499
0
    u32 bits[3];
500
0
    int ret;
501
0
502
0
    if ( copy_from_guest(bits, pdc, 2) )
503
0
        ret = -EFAULT;
504
0
    else if ( bits[0] != ACPI_PDC_REVISION_ID || !bits[1] )
505
0
        ret = -EINVAL;
506
0
    else if ( copy_from_guest_offset(bits + 2, pdc, 2, 1) )
507
0
        ret = -EFAULT;
508
0
    else
509
0
    {
510
0
        u32 mask = 0;
511
0
512
0
        if ( xen_processor_pmbits & XEN_PROCESSOR_PM_CX )
513
0
            mask |= ACPI_PDC_C_MASK | ACPI_PDC_SMP_C1PT;
514
0
        if ( xen_processor_pmbits & XEN_PROCESSOR_PM_PX )
515
0
            mask |= ACPI_PDC_P_MASK | ACPI_PDC_SMP_C1PT;
516
0
        if ( xen_processor_pmbits & XEN_PROCESSOR_PM_TX )
517
0
            mask |= ACPI_PDC_T_MASK | ACPI_PDC_SMP_C1PT;
518
0
        bits[2] &= (ACPI_PDC_C_MASK | ACPI_PDC_P_MASK | ACPI_PDC_T_MASK |
519
0
                    ACPI_PDC_SMP_C1PT) & ~mask;
520
0
        ret = arch_acpi_set_pdc_bits(acpi_id, bits, mask);
521
0
    }
522
0
    if ( !ret && __copy_to_guest_offset(pdc, 2, bits + 2, 1) )
523
0
        ret = -EFAULT;
524
0
525
0
    return ret;
526
0
}