Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/common/cpupool.c
Line
Count
Source (jump to first uncovered line)
1
/******************************************************************************
2
 * cpupool.c
3
 * 
4
 * Generic cpupool-handling functions.
5
 *
6
 * Cpupools are a feature to have configurable scheduling domains. Each
7
 * cpupool runs an own scheduler on a dedicated set of physical cpus.
8
 * A domain is bound to one cpupool at any time, but it can be moved to
9
 * another cpupool.
10
 *
11
 * (C) 2009, Juergen Gross, Fujitsu Technology Solutions
12
 */
13
14
#include <xen/lib.h>
15
#include <xen/init.h>
16
#include <xen/cpumask.h>
17
#include <xen/percpu.h>
18
#include <xen/sched.h>
19
#include <xen/sched-if.h>
20
#include <xen/keyhandler.h>
21
#include <xen/cpu.h>
22
23
#define for_each_cpupool(ptr)    \
24
2
    for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
25
26
struct cpupool *cpupool0;                /* Initial cpupool with Dom0 */
27
cpumask_t cpupool_free_cpus;             /* cpus not in any cpupool */
28
29
static struct cpupool *cpupool_list;     /* linked list, sorted by poolid */
30
31
static int cpupool_moving_cpu = -1;
32
static struct cpupool *cpupool_cpu_moving = NULL;
33
static cpumask_t cpupool_locked_cpus;
34
35
static DEFINE_SPINLOCK(cpupool_lock);
36
37
DEFINE_PER_CPU(struct cpupool *, cpupool);
38
39
3
#define cpupool_dprintk(x...) ((void)0)
40
41
static struct cpupool *alloc_cpupool_struct(void)
42
1
{
43
1
    struct cpupool *c = xzalloc(struct cpupool);
44
1
45
1
    if ( !c || !zalloc_cpumask_var(&c->cpu_valid) )
46
0
    {
47
0
        xfree(c);
48
0
        c = NULL;
49
0
    }
50
1
    else if ( !zalloc_cpumask_var(&c->cpu_suspended) )
51
0
    {
52
0
        free_cpumask_var(c->cpu_valid);
53
0
        xfree(c);
54
0
        c = NULL;
55
0
    }
56
1
57
1
    return c;
58
1
}
59
60
static void free_cpupool_struct(struct cpupool *c)
61
0
{
62
0
    if ( c )
63
0
    {
64
0
        free_cpumask_var(c->cpu_suspended);
65
0
        free_cpumask_var(c->cpu_valid);
66
0
    }
67
0
    xfree(c);
68
0
}
69
70
/*
71
 * find a cpupool by it's id. to be called with cpupool lock held
72
 * if exact is not specified, the first cpupool with an id larger or equal to
73
 * the searched id is returned
74
 * returns NULL if not found.
75
 */
76
static struct cpupool *__cpupool_find_by_id(int id, int exact)
77
1
{
78
1
    struct cpupool **q;
79
1
80
1
    ASSERT(spin_is_locked(&cpupool_lock));
81
1
82
1
    for_each_cpupool(q)
83
1
        if ( (*q)->cpupool_id >= id )
84
1
            break;
85
1
86
1
    return (!exact || (*q == NULL) || ((*q)->cpupool_id == id)) ? *q : NULL;
87
1
}
88
89
static struct cpupool *cpupool_find_by_id(int poolid)
90
1
{
91
1
    return __cpupool_find_by_id(poolid, 1);
92
1
}
93
94
static struct cpupool *__cpupool_get_by_id(int poolid, int exact)
95
0
{
96
0
    struct cpupool *c;
97
0
    spin_lock(&cpupool_lock);
98
0
    c = __cpupool_find_by_id(poolid, exact);
99
0
    if ( c != NULL )
100
0
        atomic_inc(&c->refcnt);
101
0
    spin_unlock(&cpupool_lock);
102
0
    return c;
103
0
}
104
105
struct cpupool *cpupool_get_by_id(int poolid)
106
0
{
107
0
    return __cpupool_get_by_id(poolid, 1);
108
0
}
109
110
static struct cpupool *cpupool_get_next_by_id(int poolid)
111
0
{
112
0
    return __cpupool_get_by_id(poolid, 0);
113
0
}
114
115
void cpupool_put(struct cpupool *pool)
116
1
{
117
1
    if ( !atomic_dec_and_test(&pool->refcnt) )
118
1
        return;
119
0
    scheduler_free(pool->sched);
120
0
    free_cpupool_struct(pool);
121
0
}
122
123
/*
124
 * create a new cpupool with specified poolid and scheduler
125
 * returns pointer to new cpupool structure if okay, NULL else
126
 * possible failures:
127
 * - no memory
128
 * - poolid already used
129
 * - unknown scheduler
130
 */
131
static struct cpupool *cpupool_create(
132
    int poolid, unsigned int sched_id, int *perr)
133
1
{
134
1
    struct cpupool *c;
135
1
    struct cpupool **q;
136
1
    int last = 0;
137
1
138
1
    *perr = -ENOMEM;
139
1
    if ( (c = alloc_cpupool_struct()) == NULL )
140
0
        return NULL;
141
1
142
1
    /* One reference for caller, one reference for cpupool_destroy(). */
143
1
    atomic_set(&c->refcnt, 2);
144
1
145
1
    cpupool_dprintk("cpupool_create(pool=%d,sched=%u)\n", poolid, sched_id);
146
1
147
1
    spin_lock(&cpupool_lock);
148
1
149
1
    for_each_cpupool(q)
150
0
    {
151
0
        last = (*q)->cpupool_id;
152
0
        if ( (poolid != CPUPOOLID_NONE) && (last >= poolid) )
153
0
            break;
154
0
    }
155
1
    if ( *q != NULL )
156
0
    {
157
0
        if ( (*q)->cpupool_id == poolid )
158
0
        {
159
0
            spin_unlock(&cpupool_lock);
160
0
            free_cpupool_struct(c);
161
0
            *perr = -EEXIST;
162
0
            return NULL;
163
0
        }
164
0
        c->next = *q;
165
0
    }
166
1
167
1
    c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid;
168
1
    if ( poolid == 0 )
169
1
    {
170
1
        c->sched = scheduler_get_default();
171
1
    }
172
1
    else
173
0
    {
174
0
        c->sched = scheduler_alloc(sched_id, perr);
175
0
        if ( c->sched == NULL )
176
0
        {
177
0
            spin_unlock(&cpupool_lock);
178
0
            free_cpupool_struct(c);
179
0
            return NULL;
180
0
        }
181
0
    }
182
1
183
1
    *q = c;
184
1
185
1
    spin_unlock(&cpupool_lock);
186
1
187
1
    cpupool_dprintk("Created cpupool %d with scheduler %s (%s)\n",
188
1
                    c->cpupool_id, c->sched->name, c->sched->opt_name);
189
1
190
1
    *perr = 0;
191
1
    return c;
192
1
}
193
/*
194
 * destroys the given cpupool
195
 * returns 0 on success, 1 else
196
 * possible failures:
197
 * - pool still in use
198
 * - cpus still assigned to pool
199
 * - pool not in list
200
 */
201
static int cpupool_destroy(struct cpupool *c)
202
0
{
203
0
    struct cpupool **q;
204
0
205
0
    spin_lock(&cpupool_lock);
206
0
    for_each_cpupool(q)
207
0
        if ( *q == c )
208
0
            break;
209
0
    if ( *q != c )
210
0
    {
211
0
        spin_unlock(&cpupool_lock);
212
0
        return -ENOENT;
213
0
    }
214
0
    if ( (c->n_dom != 0) || cpumask_weight(c->cpu_valid) )
215
0
    {
216
0
        spin_unlock(&cpupool_lock);
217
0
        return -EBUSY;
218
0
    }
219
0
    *q = c->next;
220
0
    spin_unlock(&cpupool_lock);
221
0
222
0
    cpupool_put(c);
223
0
224
0
    cpupool_dprintk("cpupool_destroy(pool=%d)\n", c->cpupool_id);
225
0
    return 0;
226
0
}
227
228
/*
229
 * Move domain to another cpupool
230
 */
231
static int cpupool_move_domain_locked(struct domain *d, struct cpupool *c)
232
0
{
233
0
    int ret;
234
0
235
0
    if ( unlikely(d->cpupool == c) )
236
0
        return 0;
237
0
238
0
    d->cpupool->n_dom--;
239
0
    ret = sched_move_domain(d, c);
240
0
    if ( ret )
241
0
        d->cpupool->n_dom++;
242
0
    else
243
0
        c->n_dom++;
244
0
245
0
    return ret;
246
0
}
247
int cpupool_move_domain(struct domain *d, struct cpupool *c)
248
0
{
249
0
    int ret;
250
0
251
0
    spin_lock(&cpupool_lock);
252
0
253
0
    ret = cpupool_move_domain_locked(d, c);
254
0
255
0
    spin_unlock(&cpupool_lock);
256
0
257
0
    return ret;
258
0
}
259
260
/*
261
 * assign a specific cpu to a cpupool
262
 * cpupool_lock must be held
263
 */
264
static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
265
12
{
266
12
    int ret;
267
12
    struct domain *d;
268
12
269
12
    if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) )
270
0
        return -EADDRNOTAVAIL;
271
12
    ret = schedule_cpu_switch(cpu, c);
272
12
    if ( ret )
273
0
        return ret;
274
12
275
12
    cpumask_clear_cpu(cpu, &cpupool_free_cpus);
276
12
    if (cpupool_moving_cpu == cpu)
277
0
    {
278
0
        cpupool_moving_cpu = -1;
279
0
        cpupool_put(cpupool_cpu_moving);
280
0
        cpupool_cpu_moving = NULL;
281
0
    }
282
12
    cpumask_set_cpu(cpu, c->cpu_valid);
283
12
284
12
    rcu_read_lock(&domlist_read_lock);
285
12
    for_each_domain_in_cpupool(d, c)
286
0
    {
287
0
        domain_update_node_affinity(d);
288
0
    }
289
12
    rcu_read_unlock(&domlist_read_lock);
290
12
291
12
    return 0;
292
12
}
293
294
static long cpupool_unassign_cpu_helper(void *info)
295
0
{
296
0
    int cpu = cpupool_moving_cpu;
297
0
    struct cpupool *c = info;
298
0
    struct domain *d;
299
0
    long ret;
300
0
301
0
    cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
302
0
                    cpupool_cpu_moving->cpupool_id, cpu);
303
0
304
0
    spin_lock(&cpupool_lock);
305
0
    if ( c != cpupool_cpu_moving )
306
0
    {
307
0
        ret = -EADDRNOTAVAIL;
308
0
        goto out;
309
0
    }
310
0
311
0
    /*
312
0
     * We need this for scanning the domain list, both in
313
0
     * cpu_disable_scheduler(), and at the bottom of this function.
314
0
     */
315
0
    rcu_read_lock(&domlist_read_lock);
316
0
    ret = cpu_disable_scheduler(cpu);
317
0
    cpumask_set_cpu(cpu, &cpupool_free_cpus);
318
0
319
0
    /*
320
0
     * cpu_disable_scheduler() returning an error doesn't require resetting
321
0
     * cpupool_free_cpus' cpu bit. All error cases should be of temporary
322
0
     * nature and tools will retry the operation. Even if the number of
323
0
     * retries may be limited, the in-between state can easily be repaired
324
0
     * by adding the cpu to the cpupool again.
325
0
     */
326
0
    if ( !ret )
327
0
    {
328
0
        ret = schedule_cpu_switch(cpu, NULL);
329
0
        if ( ret )
330
0
            cpumask_clear_cpu(cpu, &cpupool_free_cpus);
331
0
        else
332
0
        {
333
0
            cpupool_moving_cpu = -1;
334
0
            cpupool_put(cpupool_cpu_moving);
335
0
            cpupool_cpu_moving = NULL;
336
0
        }
337
0
    }
338
0
339
0
    for_each_domain_in_cpupool(d, c)
340
0
    {
341
0
        domain_update_node_affinity(d);
342
0
    }
343
0
    rcu_read_unlock(&domlist_read_lock);
344
0
out:
345
0
    spin_unlock(&cpupool_lock);
346
0
    cpupool_dprintk("cpupool_unassign_cpu ret=%ld\n", ret);
347
0
    return ret;
348
0
}
349
350
/*
351
 * unassign a specific cpu from a cpupool
352
 * we must be sure not to run on the cpu to be unassigned! to achieve this
353
 * the main functionality is performed via continue_hypercall_on_cpu on a
354
 * specific cpu.
355
 * if the cpu to be removed is the last one of the cpupool no active domain
356
 * must be bound to the cpupool. dying domains are moved to cpupool0 as they
357
 * might be zombies.
358
 * possible failures:
359
 * - last cpu and still active domains in cpupool
360
 * - cpu just being unplugged
361
 */
362
static int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
363
0
{
364
0
    int work_cpu;
365
0
    int ret;
366
0
    struct domain *d;
367
0
368
0
    cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
369
0
                    c->cpupool_id, cpu);
370
0
371
0
    spin_lock(&cpupool_lock);
372
0
    ret = -EADDRNOTAVAIL;
373
0
    if ( (cpupool_moving_cpu != -1) && (cpu != cpupool_moving_cpu) )
374
0
        goto out;
375
0
    if ( cpumask_test_cpu(cpu, &cpupool_locked_cpus) )
376
0
        goto out;
377
0
378
0
    ret = 0;
379
0
    if ( !cpumask_test_cpu(cpu, c->cpu_valid) && (cpu != cpupool_moving_cpu) )
380
0
        goto out;
381
0
382
0
    if ( (c->n_dom > 0) && (cpumask_weight(c->cpu_valid) == 1) &&
383
0
         (cpu != cpupool_moving_cpu) )
384
0
    {
385
0
        rcu_read_lock(&domlist_read_lock);
386
0
        for_each_domain_in_cpupool(d, c)
387
0
        {
388
0
            if ( !d->is_dying )
389
0
            {
390
0
                ret = -EBUSY;
391
0
                break;
392
0
            }
393
0
            ret = cpupool_move_domain_locked(d, cpupool0);
394
0
            if ( ret )
395
0
                break;
396
0
        }
397
0
        rcu_read_unlock(&domlist_read_lock);
398
0
        if ( ret )
399
0
            goto out;
400
0
    }
401
0
    cpupool_moving_cpu = cpu;
402
0
    atomic_inc(&c->refcnt);
403
0
    cpupool_cpu_moving = c;
404
0
    cpumask_clear_cpu(cpu, c->cpu_valid);
405
0
    spin_unlock(&cpupool_lock);
406
0
407
0
    work_cpu = smp_processor_id();
408
0
    if ( work_cpu == cpu )
409
0
    {
410
0
        work_cpu = cpumask_first(cpupool0->cpu_valid);
411
0
        if ( work_cpu == cpu )
412
0
            work_cpu = cpumask_next(cpu, cpupool0->cpu_valid);
413
0
    }
414
0
    return continue_hypercall_on_cpu(work_cpu, cpupool_unassign_cpu_helper, c);
415
0
416
0
out:
417
0
    spin_unlock(&cpupool_lock);
418
0
    cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
419
0
                    c->cpupool_id, cpu, ret);
420
0
    return ret;
421
0
}
422
423
/*
424
 * add a new domain to a cpupool
425
 * possible failures:
426
 * - pool does not exist
427
 * - no cpu assigned to pool
428
 */
429
int cpupool_add_domain(struct domain *d, int poolid)
430
2
{
431
2
    struct cpupool *c;
432
2
    int rc;
433
2
    int n_dom = 0;
434
2
435
2
    if ( poolid == CPUPOOLID_NONE )
436
1
        return 0;
437
1
    spin_lock(&cpupool_lock);
438
1
    c = cpupool_find_by_id(poolid);
439
1
    if ( c == NULL )
440
0
        rc = -ESRCH;
441
1
    else if ( !cpumask_weight(c->cpu_valid) )
442
0
        rc = -ENODEV;
443
1
    else
444
1
    {
445
1
        c->n_dom++;
446
1
        n_dom = c->n_dom;
447
1
        d->cpupool = c;
448
1
        rc = 0;
449
1
    }
450
1
    spin_unlock(&cpupool_lock);
451
1
    cpupool_dprintk("cpupool_add_domain(dom=%d,pool=%d) n_dom %d rc %d\n",
452
1
                    d->domain_id, poolid, n_dom, rc);
453
1
    return rc;
454
2
}
455
456
/*
457
 * remove a domain from a cpupool
458
 */
459
void cpupool_rm_domain(struct domain *d)
460
0
{
461
0
    int cpupool_id;
462
0
    int n_dom;
463
0
464
0
    if ( d->cpupool == NULL )
465
0
        return;
466
0
    spin_lock(&cpupool_lock);
467
0
    cpupool_id = d->cpupool->cpupool_id;
468
0
    d->cpupool->n_dom--;
469
0
    n_dom = d->cpupool->n_dom;
470
0
    d->cpupool = NULL;
471
0
    spin_unlock(&cpupool_lock);
472
0
    cpupool_dprintk("cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
473
0
                    d->domain_id, cpupool_id, n_dom);
474
0
    return;
475
0
}
476
477
/*
478
 * Called to add a cpu to a pool. CPUs being hot-plugged are added to pool0,
479
 * as they must have been in there when unplugged.
480
 *
481
 * If, on the other hand, we are adding CPUs because we are resuming (e.g.,
482
 * after ACPI S3) we put the cpu back in the pool where it was in prior when
483
 * we suspended.
484
 */
485
static int cpupool_cpu_add(unsigned int cpu)
486
12
{
487
12
    int ret = 0;
488
12
489
12
    spin_lock(&cpupool_lock);
490
12
    cpumask_clear_cpu(cpu, &cpupool_locked_cpus);
491
12
    cpumask_set_cpu(cpu, &cpupool_free_cpus);
492
12
493
12
    if ( system_state == SYS_STATE_resume )
494
0
    {
495
0
        struct cpupool **c;
496
0
497
0
        for_each_cpupool(c)
498
0
        {
499
0
            if ( cpumask_test_cpu(cpu, (*c)->cpu_suspended ) )
500
0
            {
501
0
                ret = cpupool_assign_cpu_locked(*c, cpu);
502
0
                if ( ret )
503
0
                    goto out;
504
0
                cpumask_clear_cpu(cpu, (*c)->cpu_suspended);
505
0
                break;
506
0
            }
507
0
        }
508
0
509
0
        /*
510
0
         * Either cpu has been found as suspended in a pool, and added back
511
0
         * there, or it stayed free (if it did not belong to any pool when
512
0
         * suspending), and we don't want to do anything.
513
0
         */
514
0
        ASSERT(cpumask_test_cpu(cpu, &cpupool_free_cpus) ||
515
0
               cpumask_test_cpu(cpu, (*c)->cpu_valid));
516
0
    }
517
12
    else
518
12
    {
519
12
        /*
520
12
         * If we are not resuming, we are hot-plugging cpu, and in which case
521
12
         * we add it to pool0, as it certainly was there when hot-unplagged
522
12
         * (or unplugging would have failed) and that is the default behavior
523
12
         * anyway.
524
12
         */
525
12
        ret = cpupool_assign_cpu_locked(cpupool0, cpu);
526
12
    }
527
12
 out:
528
12
    spin_unlock(&cpupool_lock);
529
12
530
12
    return ret;
531
12
}
532
533
/*
534
 * Called to remove a CPU from a pool. The CPU is locked, to forbid removing
535
 * it from pool0. In fact, if we want to hot-unplug a CPU, it must belong to
536
 * pool0, or we fail.
537
 *
538
 * However, if we are suspending (e.g., to ACPI S3), we mark the CPU in such
539
 * a way that it can be put back in its pool when resuming.
540
 */
541
static int cpupool_cpu_remove(unsigned int cpu)
542
0
{
543
0
    int ret = -ENODEV;
544
0
545
0
    spin_lock(&cpupool_lock);
546
0
    if ( system_state == SYS_STATE_suspend )
547
0
    {
548
0
        struct cpupool **c;
549
0
550
0
        for_each_cpupool(c)
551
0
        {
552
0
            if ( cpumask_test_cpu(cpu, (*c)->cpu_valid ) )
553
0
            {
554
0
                cpumask_set_cpu(cpu, (*c)->cpu_suspended);
555
0
                cpumask_clear_cpu(cpu, (*c)->cpu_valid);
556
0
                break;
557
0
            }
558
0
        }
559
0
560
0
        /*
561
0
         * Either we found cpu in a pool, or it must be free (if it has been
562
0
         * hot-unplagged, then we must have found it in pool0). It is, of
563
0
         * course, fine to suspend or shutdown with CPUs not assigned to a
564
0
         * pool, and (in case of suspend) they will stay free when resuming.
565
0
         */
566
0
        ASSERT(cpumask_test_cpu(cpu, &cpupool_free_cpus) ||
567
0
               cpumask_test_cpu(cpu, (*c)->cpu_suspended));
568
0
        ASSERT(cpumask_test_cpu(cpu, &cpu_online_map) ||
569
0
               cpumask_test_cpu(cpu, cpupool0->cpu_suspended));
570
0
        ret = 0;
571
0
    }
572
0
    else if ( cpumask_test_cpu(cpu, cpupool0->cpu_valid) )
573
0
    {
574
0
        /*
575
0
         * If we are not suspending, we are hot-unplugging cpu, and that is
576
0
         * allowed only for CPUs in pool0.
577
0
         */
578
0
        cpumask_clear_cpu(cpu, cpupool0->cpu_valid);
579
0
        ret = 0;
580
0
    }
581
0
582
0
    if ( !ret )
583
0
        cpumask_set_cpu(cpu, &cpupool_locked_cpus);
584
0
    spin_unlock(&cpupool_lock);
585
0
586
0
    return ret;
587
0
}
588
589
/*
590
 * do cpupool related sysctl operations
591
 */
592
int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
593
0
{
594
0
    int ret;
595
0
    struct cpupool *c;
596
0
597
0
    switch ( op->op )
598
0
    {
599
0
600
0
    case XEN_SYSCTL_CPUPOOL_OP_CREATE:
601
0
    {
602
0
        int poolid;
603
0
604
0
        poolid = (op->cpupool_id == XEN_SYSCTL_CPUPOOL_PAR_ANY) ?
605
0
            CPUPOOLID_NONE: op->cpupool_id;
606
0
        c = cpupool_create(poolid, op->sched_id, &ret);
607
0
        if ( c != NULL )
608
0
        {
609
0
            op->cpupool_id = c->cpupool_id;
610
0
            cpupool_put(c);
611
0
        }
612
0
    }
613
0
    break;
614
0
615
0
    case XEN_SYSCTL_CPUPOOL_OP_DESTROY:
616
0
    {
617
0
        c = cpupool_get_by_id(op->cpupool_id);
618
0
        ret = -ENOENT;
619
0
        if ( c == NULL )
620
0
            break;
621
0
        ret = cpupool_destroy(c);
622
0
        cpupool_put(c);
623
0
    }
624
0
    break;
625
0
626
0
    case XEN_SYSCTL_CPUPOOL_OP_INFO:
627
0
    {
628
0
        c = cpupool_get_next_by_id(op->cpupool_id);
629
0
        ret = -ENOENT;
630
0
        if ( c == NULL )
631
0
            break;
632
0
        op->cpupool_id = c->cpupool_id;
633
0
        op->sched_id = c->sched->sched_id;
634
0
        op->n_dom = c->n_dom;
635
0
        ret = cpumask_to_xenctl_bitmap(&op->cpumap, c->cpu_valid);
636
0
        cpupool_put(c);
637
0
    }
638
0
    break;
639
0
640
0
    case XEN_SYSCTL_CPUPOOL_OP_ADDCPU:
641
0
    {
642
0
        unsigned cpu;
643
0
644
0
        cpu = op->cpu;
645
0
        cpupool_dprintk("cpupool_assign_cpu(pool=%d,cpu=%d)\n",
646
0
                        op->cpupool_id, cpu);
647
0
        spin_lock(&cpupool_lock);
648
0
        if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
649
0
            cpu = cpumask_first(&cpupool_free_cpus);
650
0
        ret = -EINVAL;
651
0
        if ( cpu >= nr_cpu_ids )
652
0
            goto addcpu_out;
653
0
        ret = -ENODEV;
654
0
        if ( !cpumask_test_cpu(cpu, &cpupool_free_cpus) )
655
0
            goto addcpu_out;
656
0
        c = cpupool_find_by_id(op->cpupool_id);
657
0
        ret = -ENOENT;
658
0
        if ( c == NULL )
659
0
            goto addcpu_out;
660
0
        ret = cpupool_assign_cpu_locked(c, cpu);
661
0
    addcpu_out:
662
0
        spin_unlock(&cpupool_lock);
663
0
        cpupool_dprintk("cpupool_assign_cpu(pool=%d,cpu=%d) ret %d\n",
664
0
                        op->cpupool_id, cpu, ret);
665
0
    }
666
0
    break;
667
0
668
0
    case XEN_SYSCTL_CPUPOOL_OP_RMCPU:
669
0
    {
670
0
        unsigned cpu;
671
0
672
0
        c = cpupool_get_by_id(op->cpupool_id);
673
0
        ret = -ENOENT;
674
0
        if ( c == NULL )
675
0
            break;
676
0
        cpu = op->cpu;
677
0
        if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
678
0
            cpu = cpumask_last(c->cpu_valid);
679
0
        ret = (cpu < nr_cpu_ids) ? cpupool_unassign_cpu(c, cpu) : -EINVAL;
680
0
        cpupool_put(c);
681
0
    }
682
0
    break;
683
0
684
0
    case XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN:
685
0
    {
686
0
        struct domain *d;
687
0
688
0
        ret = rcu_lock_remote_domain_by_id(op->domid, &d);
689
0
        if ( ret )
690
0
            break;
691
0
        if ( d->cpupool == NULL )
692
0
        {
693
0
            ret = -EINVAL;
694
0
            rcu_unlock_domain(d);
695
0
            break;
696
0
        }
697
0
        if ( op->cpupool_id == d->cpupool->cpupool_id )
698
0
        {
699
0
            ret = 0;
700
0
            rcu_unlock_domain(d);
701
0
            break;
702
0
        }
703
0
        cpupool_dprintk("cpupool move_domain(dom=%d)->pool=%d\n",
704
0
                        d->domain_id, op->cpupool_id);
705
0
        ret = -ENOENT;
706
0
        spin_lock(&cpupool_lock);
707
0
708
0
        c = cpupool_find_by_id(op->cpupool_id);
709
0
        if ( (c != NULL) && cpumask_weight(c->cpu_valid) )
710
0
            ret = cpupool_move_domain_locked(d, c);
711
0
712
0
        spin_unlock(&cpupool_lock);
713
0
        cpupool_dprintk("cpupool move_domain(dom=%d)->pool=%d ret %d\n",
714
0
                        d->domain_id, op->cpupool_id, ret);
715
0
        rcu_unlock_domain(d);
716
0
    }
717
0
    break;
718
0
719
0
    case XEN_SYSCTL_CPUPOOL_OP_FREEINFO:
720
0
    {
721
0
        ret = cpumask_to_xenctl_bitmap(
722
0
            &op->cpumap, &cpupool_free_cpus);
723
0
    }
724
0
    break;
725
0
726
0
    default:
727
0
        ret = -ENOSYS;
728
0
        break;
729
0
    }
730
0
731
0
    return ret;
732
0
}
733
734
static void print_cpumap(const char *str, const cpumask_t *map)
735
0
{
736
0
    cpulist_scnprintf(keyhandler_scratch, sizeof(keyhandler_scratch), map);
737
0
    printk("%s: %s\n", str, keyhandler_scratch);
738
0
}
739
740
void dump_runq(unsigned char key)
741
0
{
742
0
    unsigned long    flags;
743
0
    s_time_t         now = NOW();
744
0
    struct cpupool **c;
745
0
746
0
    spin_lock(&cpupool_lock);
747
0
    local_irq_save(flags);
748
0
749
0
    printk("sched_smt_power_savings: %s\n",
750
0
            sched_smt_power_savings? "enabled":"disabled");
751
0
    printk("NOW=%"PRI_stime"\n", now);
752
0
753
0
    print_cpumap("Online Cpus", &cpu_online_map);
754
0
    if ( !cpumask_empty(&cpupool_free_cpus) )
755
0
    {
756
0
        print_cpumap("Free Cpus", &cpupool_free_cpus);
757
0
        schedule_dump(NULL);
758
0
    }
759
0
760
0
    for_each_cpupool(c)
761
0
    {
762
0
        printk("Cpupool %d:\n", (*c)->cpupool_id);
763
0
        print_cpumap("Cpus", (*c)->cpu_valid);
764
0
        schedule_dump(*c);
765
0
    }
766
0
767
0
    local_irq_restore(flags);
768
0
    spin_unlock(&cpupool_lock);
769
0
}
770
771
static int cpu_callback(
772
    struct notifier_block *nfb, unsigned long action, void *hcpu)
773
34
{
774
34
    unsigned int cpu = (unsigned long)hcpu;
775
34
    int rc = 0;
776
34
777
34
    switch ( action )
778
34
    {
779
12
    case CPU_DOWN_FAILED:
780
12
    case CPU_ONLINE:
781
12
        rc = cpupool_cpu_add(cpu);
782
12
        break;
783
0
    case CPU_DOWN_PREPARE:
784
0
        rc = cpupool_cpu_remove(cpu);
785
0
        break;
786
22
    default:
787
22
        break;
788
34
    }
789
34
790
34
    return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
791
34
}
792
793
static struct notifier_block cpu_nfb = {
794
    .notifier_call = cpu_callback
795
};
796
797
static int __init cpupool_presmp_init(void)
798
1
{
799
1
    int err;
800
1
    void *cpu = (void *)(long)smp_processor_id();
801
1
    cpupool0 = cpupool_create(0, 0, &err);
802
1
    BUG_ON(cpupool0 == NULL);
803
1
    cpupool_put(cpupool0);
804
1
    cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
805
1
    register_cpu_notifier(&cpu_nfb);
806
1
    return 0;
807
1
}
808
presmp_initcall(cpupool_presmp_init);
809
810
/*
811
 * Local variables:
812
 * mode: C
813
 * c-file-style: "BSD"
814
 * c-basic-offset: 4
815
 * tab-width: 4
816
 * indent-tabs-mode: nil
817
 * End:
818
 */