Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/include/xen/sched-if.h
Line
Count
Source (jump to first uncovered line)
1
/******************************************************************************
2
 * Additional declarations for the generic scheduler interface.  This should
3
 * only be included by files that implement conforming schedulers.
4
 *
5
 * Portions by Mark Williamson are (C) 2004 Intel Research Cambridge
6
 */
7
8
#ifndef __XEN_SCHED_IF_H__
9
#define __XEN_SCHED_IF_H__
10
11
#include <xen/percpu.h>
12
13
/* A global pointer to the initial cpupool (POOL0). */
14
extern struct cpupool *cpupool0;
15
16
/* cpus currently in no cpupool */
17
extern cpumask_t cpupool_free_cpus;
18
19
/* Scheduler generic parameters
20
 * */
21
0
#define SCHED_DEFAULT_RATELIMIT_US 1000
22
extern int sched_ratelimit_us;
23
24
25
/*
26
 * In order to allow a scheduler to remap the lock->cpu mapping,
27
 * we have a per-cpu pointer, along with a pre-allocated set of
28
 * locks.  The generic schedule init code will point each schedule lock
29
 * pointer to the schedule lock; if the scheduler wants to remap them,
30
 * it can simply modify the schedule locks.
31
 * 
32
 * For cache betterness, keep the actual lock in the same cache area
33
 * as the rest of the struct.  Just have the scheduler point to the
34
 * one it wants (This may be the one right in front of it).*/
35
struct schedule_data {
36
    spinlock_t         *schedule_lock,
37
                       _lock;
38
    struct vcpu        *curr;           /* current task                    */
39
    void               *sched_priv;
40
    struct timer        s_timer;        /* scheduling timer                */
41
    atomic_t            urgent_count;   /* how many urgent vcpus           */
42
};
43
44
360
#define curr_on_cpu(c)    (per_cpu(schedule_data, c).curr)
45
46
DECLARE_PER_CPU(struct schedule_data, schedule_data);
47
DECLARE_PER_CPU(struct scheduler *, scheduler);
48
DECLARE_PER_CPU(struct cpupool *, cpupool);
49
50
/*
51
 * Scratch space, for avoiding having too many cpumask_t on the stack.
52
 * Within each scheduler, when using the scratch mask of one pCPU:
53
 * - the pCPU must belong to the scheduler,
54
 * - the caller must own the per-pCPU scheduler lock (a.k.a. runqueue
55
 *   lock).
56
 */
57
DECLARE_PER_CPU(cpumask_t, cpumask_scratch);
58
1.03k
#define cpumask_scratch        (&this_cpu(cpumask_scratch))
59
970
#define cpumask_scratch_cpu(c) (&per_cpu(cpumask_scratch, c))
60
61
#define sched_lock(kind, param, cpu, irq, arg...) \
62
9.39M
static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
63
9.39M
{ \
64
9.39M
    for ( ; ; ) \
65
9.43M
    { \
66
9.43M
        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
67
9.43M
        /* \
68
9.43M
         * v->processor may change when grabbing the lock; but \
69
9.43M
         * per_cpu(v->processor) may also change, if changing cpu pool \
70
9.43M
         * also changes the scheduler lock.  Retry until they match. \
71
9.43M
         * \
72
9.43M
         * It may also be the case that v->processor may change but the \
73
9.43M
         * lock may be the same; this will succeed in that case. \
74
9.43M
         */ \
75
0
        spin_lock##irq(lock, ## arg); \
76
9.43M
        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
77
9.65M
            return lock; \
78
18.4E
        spin_unlock##irq(lock, ## arg); \
79
18.4E
    } \
80
9.39M
}
Unexecuted instantiation: cpupool.c:vcpu_schedule_lock_irqsave
Unexecuted instantiation: cpupool.c:pcpu_schedule_lock
Unexecuted instantiation: cpupool.c:pcpu_schedule_lock_irq
Unexecuted instantiation: cpupool.c:pcpu_schedule_lock_irqsave
Unexecuted instantiation: cpupool.c:vcpu_schedule_lock
Unexecuted instantiation: cpupool.c:vcpu_schedule_lock_irq
Unexecuted instantiation: domctl.c:vcpu_schedule_lock_irq
Unexecuted instantiation: domctl.c:pcpu_schedule_lock
Unexecuted instantiation: domctl.c:vcpu_schedule_lock
Unexecuted instantiation: domctl.c:vcpu_schedule_lock_irqsave
Unexecuted instantiation: domctl.c:pcpu_schedule_lock_irq
Unexecuted instantiation: domctl.c:pcpu_schedule_lock_irqsave
Unexecuted instantiation: domain.c:vcpu_schedule_lock_irqsave
Unexecuted instantiation: domain.c:vcpu_schedule_lock
Unexecuted instantiation: domain.c:vcpu_schedule_lock_irq
Unexecuted instantiation: domain.c:pcpu_schedule_lock_irq
Unexecuted instantiation: domain.c:pcpu_schedule_lock
Unexecuted instantiation: domain.c:pcpu_schedule_lock_irqsave
Unexecuted instantiation: sched_arinc653.c:pcpu_schedule_lock_irqsave
Unexecuted instantiation: sched_arinc653.c:pcpu_schedule_lock_irq
Unexecuted instantiation: sched_arinc653.c:vcpu_schedule_lock_irq
Unexecuted instantiation: sched_arinc653.c:vcpu_schedule_lock
Unexecuted instantiation: sched_arinc653.c:vcpu_schedule_lock_irqsave
Unexecuted instantiation: sched_arinc653.c:pcpu_schedule_lock
sched_credit.c:vcpu_schedule_lock_irqsave
Line
Count
Source
62
10.2k
static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
63
10.2k
{ \
64
10.2k
    for ( ; ; ) \
65
10.2k
    { \
66
10.2k
        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
67
10.2k
        /* \
68
10.2k
         * v->processor may change when grabbing the lock; but \
69
10.2k
         * per_cpu(v->processor) may also change, if changing cpu pool \
70
10.2k
         * also changes the scheduler lock.  Retry until they match. \
71
10.2k
         * \
72
10.2k
         * It may also be the case that v->processor may change but the \
73
10.2k
         * lock may be the same; this will succeed in that case. \
74
10.2k
         */ \
75
10.2k
        spin_lock##irq(lock, ## arg); \
76
10.2k
        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
77
10.3k
            return lock; \
78
18.4E
        spin_unlock##irq(lock, ## arg); \
79
18.4E
    } \
80
10.2k
}
sched_credit.c:pcpu_schedule_lock_irqsave
Line
Count
Source
62
4.90k
static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
63
4.90k
{ \
64
4.90k
    for ( ; ; ) \
65
4.94k
    { \
66
4.94k
        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
67
4.94k
        /* \
68
4.94k
         * v->processor may change when grabbing the lock; but \
69
4.94k
         * per_cpu(v->processor) may also change, if changing cpu pool \
70
4.94k
         * also changes the scheduler lock.  Retry until they match. \
71
4.94k
         * \
72
4.94k
         * It may also be the case that v->processor may change but the \
73
4.94k
         * lock may be the same; this will succeed in that case. \
74
4.94k
         */ \
75
4.94k
        spin_lock##irq(lock, ## arg); \
76
4.94k
        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
77
4.81k
            return lock; \
78
131
        spin_unlock##irq(lock, ## arg); \
79
131
    } \
80
4.90k
}
sched_credit.c:vcpu_schedule_lock_irq
Line
Count
Source
62
24
static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
63
24
{ \
64
24
    for ( ; ; ) \
65
24
    { \
66
24
        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
67
24
        /* \
68
24
         * v->processor may change when grabbing the lock; but \
69
24
         * per_cpu(v->processor) may also change, if changing cpu pool \
70
24
         * also changes the scheduler lock.  Retry until they match. \
71
24
         * \
72
24
         * It may also be the case that v->processor may change but the \
73
24
         * lock may be the same; this will succeed in that case. \
74
24
         */ \
75
24
        spin_lock##irq(lock, ## arg); \
76
24
        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
77
24
            return lock; \
78
0
        spin_unlock##irq(lock, ## arg); \
79
0
    } \
80
24
}
Unexecuted instantiation: sched_credit.c:vcpu_schedule_lock
Unexecuted instantiation: sched_credit.c:pcpu_schedule_lock
Unexecuted instantiation: sched_credit.c:pcpu_schedule_lock_irq
Unexecuted instantiation: sched_credit2.c:pcpu_schedule_lock
Unexecuted instantiation: sched_credit2.c:vcpu_schedule_lock_irqsave
Unexecuted instantiation: sched_credit2.c:vcpu_schedule_lock_irq
Unexecuted instantiation: sched_credit2.c:vcpu_schedule_lock
Unexecuted instantiation: sched_credit2.c:pcpu_schedule_lock_irqsave
Unexecuted instantiation: sched_credit2.c:pcpu_schedule_lock_irq
Unexecuted instantiation: sched_rt.c:pcpu_schedule_lock_irqsave
Unexecuted instantiation: sched_rt.c:vcpu_schedule_lock_irq
Unexecuted instantiation: sched_rt.c:vcpu_schedule_lock
Unexecuted instantiation: sched_rt.c:pcpu_schedule_lock_irq
Unexecuted instantiation: sched_rt.c:pcpu_schedule_lock
Unexecuted instantiation: sched_rt.c:vcpu_schedule_lock_irqsave
Unexecuted instantiation: sched_null.c:vcpu_schedule_lock_irq
Unexecuted instantiation: sched_null.c:vcpu_schedule_lock
Unexecuted instantiation: sched_null.c:pcpu_schedule_lock_irqsave
Unexecuted instantiation: sched_null.c:vcpu_schedule_lock_irqsave
Unexecuted instantiation: sched_null.c:pcpu_schedule_lock
Unexecuted instantiation: sched_null.c:pcpu_schedule_lock_irq
schedule.c:vcpu_schedule_lock_irq
Line
Count
Source
62
4.54M
static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
63
4.54M
{ \
64
4.54M
    for ( ; ; ) \
65
4.55M
    { \
66
4.55M
        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
67
4.55M
        /* \
68
4.55M
         * v->processor may change when grabbing the lock; but \
69
4.55M
         * per_cpu(v->processor) may also change, if changing cpu pool \
70
4.55M
         * also changes the scheduler lock.  Retry until they match. \
71
4.55M
         * \
72
4.55M
         * It may also be the case that v->processor may change but the \
73
4.55M
         * lock may be the same; this will succeed in that case. \
74
4.55M
         */ \
75
4.55M
        spin_lock##irq(lock, ## arg); \
76
4.55M
        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
77
4.71M
            return lock; \
78
18.4E
        spin_unlock##irq(lock, ## arg); \
79
18.4E
    } \
80
4.54M
}
schedule.c:vcpu_schedule_lock_irqsave
Line
Count
Source
62
67.6k
static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
63
67.6k
{ \
64
67.6k
    for ( ; ; ) \
65
67.6k
    { \
66
67.6k
        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
67
67.6k
        /* \
68
67.6k
         * v->processor may change when grabbing the lock; but \
69
67.6k
         * per_cpu(v->processor) may also change, if changing cpu pool \
70
67.6k
         * also changes the scheduler lock.  Retry until they match. \
71
67.6k
         * \
72
67.6k
         * It may also be the case that v->processor may change but the \
73
67.6k
         * lock may be the same; this will succeed in that case. \
74
67.6k
         */ \
75
67.6k
        spin_lock##irq(lock, ## arg); \
76
67.6k
        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
77
67.6k
            return lock; \
78
18.4E
        spin_unlock##irq(lock, ## arg); \
79
18.4E
    } \
80
67.6k
}
schedule.c:pcpu_schedule_lock_irq
Line
Count
Source
62
4.76M
static inline spinlock_t *kind##_schedule_lock##irq(param EXTRA_TYPE(arg)) \
63
4.76M
{ \
64
4.76M
    for ( ; ; ) \
65
4.79M
    { \
66
4.79M
        spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock; \
67
4.79M
        /* \
68
4.79M
         * v->processor may change when grabbing the lock; but \
69
4.79M
         * per_cpu(v->processor) may also change, if changing cpu pool \
70
4.79M
         * also changes the scheduler lock.  Retry until they match. \
71
4.79M
         * \
72
4.79M
         * It may also be the case that v->processor may change but the \
73
4.79M
         * lock may be the same; this will succeed in that case. \
74
4.79M
         */ \
75
4.79M
        spin_lock##irq(lock, ## arg); \
76
4.79M
        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) ) \
77
4.85M
            return lock; \
78
18.4E
        spin_unlock##irq(lock, ## arg); \
79
18.4E
    } \
80
4.76M
}
Unexecuted instantiation: schedule.c:pcpu_schedule_lock
Unexecuted instantiation: schedule.c:vcpu_schedule_lock
Unexecuted instantiation: schedule.c:pcpu_schedule_lock_irqsave
Unexecuted instantiation: setup.c:pcpu_schedule_lock_irq
Unexecuted instantiation: setup.c:vcpu_schedule_lock_irq
Unexecuted instantiation: setup.c:pcpu_schedule_lock_irqsave
Unexecuted instantiation: setup.c:vcpu_schedule_lock
Unexecuted instantiation: setup.c:vcpu_schedule_lock_irqsave
Unexecuted instantiation: setup.c:pcpu_schedule_lock
Unexecuted instantiation: smpboot.c:vcpu_schedule_lock
Unexecuted instantiation: smpboot.c:vcpu_schedule_lock_irqsave
Unexecuted instantiation: smpboot.c:pcpu_schedule_lock_irq
Unexecuted instantiation: smpboot.c:pcpu_schedule_lock
Unexecuted instantiation: smpboot.c:vcpu_schedule_lock_irq
Unexecuted instantiation: smpboot.c:pcpu_schedule_lock_irqsave
Unexecuted instantiation: cpu_idle.c:pcpu_schedule_lock_irq
Unexecuted instantiation: cpu_idle.c:vcpu_schedule_lock_irq
Unexecuted instantiation: cpu_idle.c:pcpu_schedule_lock_irqsave
Unexecuted instantiation: cpu_idle.c:vcpu_schedule_lock_irqsave
Unexecuted instantiation: cpu_idle.c:pcpu_schedule_lock
Unexecuted instantiation: cpu_idle.c:vcpu_schedule_lock
Unexecuted instantiation: mwait-idle.c:pcpu_schedule_lock
Unexecuted instantiation: mwait-idle.c:vcpu_schedule_lock_irqsave
Unexecuted instantiation: mwait-idle.c:vcpu_schedule_lock_irq
Unexecuted instantiation: mwait-idle.c:pcpu_schedule_lock_irq
Unexecuted instantiation: mwait-idle.c:vcpu_schedule_lock
Unexecuted instantiation: mwait-idle.c:pcpu_schedule_lock_irqsave
Unexecuted instantiation: mctelem.c:pcpu_schedule_lock_irq
Unexecuted instantiation: mctelem.c:vcpu_schedule_lock
Unexecuted instantiation: mctelem.c:pcpu_schedule_lock_irqsave
Unexecuted instantiation: mctelem.c:vcpu_schedule_lock_irqsave
Unexecuted instantiation: mctelem.c:vcpu_schedule_lock_irq
Unexecuted instantiation: mctelem.c:pcpu_schedule_lock
Unexecuted instantiation: mce.c:vcpu_schedule_lock
Unexecuted instantiation: mce.c:vcpu_schedule_lock_irqsave
Unexecuted instantiation: mce.c:pcpu_schedule_lock
Unexecuted instantiation: mce.c:pcpu_schedule_lock_irq
Unexecuted instantiation: mce.c:pcpu_schedule_lock_irqsave
Unexecuted instantiation: mce.c:vcpu_schedule_lock_irq
81
82
#define sched_unlock(kind, param, cpu, irq, arg...) \
83
static inline void kind##_schedule_unlock##irq(spinlock_t *lock \
84
9.68M
                                               EXTRA_TYPE(arg), param) \
85
9.68M
{ \
86
9.68M
    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
87
2.39k
    spin_unlock##irq(lock, ## arg); \
88
9.68M
}
Unexecuted instantiation: cpupool.c:pcpu_schedule_unlock
Unexecuted instantiation: sched_null.c:vcpu_schedule_unlock_irqrestore
Unexecuted instantiation: mce.c:vcpu_schedule_unlock_irq
Unexecuted instantiation: mce.c:pcpu_schedule_unlock
Unexecuted instantiation: mce.c:vcpu_schedule_unlock
Unexecuted instantiation: mce.c:pcpu_schedule_unlock_irqrestore
Unexecuted instantiation: mce.c:vcpu_schedule_unlock_irqrestore
Unexecuted instantiation: mce.c:pcpu_schedule_unlock_irq
Unexecuted instantiation: mctelem.c:vcpu_schedule_unlock_irqrestore
Unexecuted instantiation: mctelem.c:pcpu_schedule_unlock
Unexecuted instantiation: mctelem.c:vcpu_schedule_unlock_irq
Unexecuted instantiation: mctelem.c:pcpu_schedule_unlock_irqrestore
Unexecuted instantiation: mctelem.c:pcpu_schedule_unlock_irq
Unexecuted instantiation: mctelem.c:vcpu_schedule_unlock
Unexecuted instantiation: mwait-idle.c:vcpu_schedule_unlock_irqrestore
Unexecuted instantiation: mwait-idle.c:vcpu_schedule_unlock_irq
Unexecuted instantiation: mwait-idle.c:pcpu_schedule_unlock_irqrestore
Unexecuted instantiation: mwait-idle.c:pcpu_schedule_unlock
Unexecuted instantiation: cpu_idle.c:vcpu_schedule_unlock_irq
Unexecuted instantiation: cpu_idle.c:vcpu_schedule_unlock
Unexecuted instantiation: cpu_idle.c:pcpu_schedule_unlock_irqrestore
Unexecuted instantiation: cpu_idle.c:pcpu_schedule_unlock_irq
Unexecuted instantiation: cpu_idle.c:vcpu_schedule_unlock_irqrestore
Unexecuted instantiation: cpu_idle.c:pcpu_schedule_unlock
Unexecuted instantiation: smpboot.c:pcpu_schedule_unlock_irqrestore
Unexecuted instantiation: smpboot.c:vcpu_schedule_unlock_irq
Unexecuted instantiation: smpboot.c:vcpu_schedule_unlock
Unexecuted instantiation: smpboot.c:vcpu_schedule_unlock_irqrestore
Unexecuted instantiation: smpboot.c:pcpu_schedule_unlock_irq
Unexecuted instantiation: smpboot.c:pcpu_schedule_unlock
Unexecuted instantiation: mwait-idle.c:pcpu_schedule_unlock_irq
Unexecuted instantiation: cpupool.c:vcpu_schedule_unlock
Unexecuted instantiation: cpupool.c:pcpu_schedule_unlock_irq
Unexecuted instantiation: cpupool.c:vcpu_schedule_unlock_irq
Unexecuted instantiation: cpupool.c:pcpu_schedule_unlock_irqrestore
Unexecuted instantiation: cpupool.c:vcpu_schedule_unlock_irqrestore
Unexecuted instantiation: domctl.c:vcpu_schedule_unlock_irqrestore
Unexecuted instantiation: domctl.c:pcpu_schedule_unlock_irqrestore
Unexecuted instantiation: domctl.c:pcpu_schedule_unlock_irq
Unexecuted instantiation: domctl.c:pcpu_schedule_unlock
Unexecuted instantiation: domctl.c:vcpu_schedule_unlock_irq
Unexecuted instantiation: domctl.c:vcpu_schedule_unlock
Unexecuted instantiation: domain.c:vcpu_schedule_unlock_irqrestore
Unexecuted instantiation: domain.c:pcpu_schedule_unlock
Unexecuted instantiation: domain.c:vcpu_schedule_unlock
Unexecuted instantiation: domain.c:vcpu_schedule_unlock_irq
Unexecuted instantiation: domain.c:pcpu_schedule_unlock_irqrestore
Unexecuted instantiation: domain.c:pcpu_schedule_unlock_irq
Unexecuted instantiation: sched_arinc653.c:vcpu_schedule_unlock
Unexecuted instantiation: sched_arinc653.c:vcpu_schedule_unlock_irqrestore
Unexecuted instantiation: sched_arinc653.c:pcpu_schedule_unlock_irq
Unexecuted instantiation: sched_arinc653.c:pcpu_schedule_unlock_irqrestore
Unexecuted instantiation: sched_arinc653.c:pcpu_schedule_unlock
Unexecuted instantiation: sched_arinc653.c:vcpu_schedule_unlock_irq
sched_credit.c:vcpu_schedule_unlock_irqrestore
Line
Count
Source
84
9.82k
                                               EXTRA_TYPE(arg), param) \
85
9.82k
{ \
86
9.82k
    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
87
9.82k
    spin_unlock##irq(lock, ## arg); \
88
9.82k
}
sched_credit.c:pcpu_schedule_unlock_irqrestore
Line
Count
Source
84
4.83k
                                               EXTRA_TYPE(arg), param) \
85
4.83k
{ \
86
4.83k
    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
87
4.83k
    spin_unlock##irq(lock, ## arg); \
88
4.83k
}
sched_credit.c:vcpu_schedule_unlock_irq
Line
Count
Source
84
12
                                               EXTRA_TYPE(arg), param) \
85
12
{ \
86
12
    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
87
12
    spin_unlock##irq(lock, ## arg); \
88
12
}
sched_credit.c:pcpu_schedule_unlock
Line
Count
Source
84
2.39k
                                               EXTRA_TYPE(arg), param) \
85
2.39k
{ \
86
2.39k
    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
87
2.39k
    spin_unlock##irq(lock, ## arg); \
88
2.39k
}
Unexecuted instantiation: sched_credit.c:vcpu_schedule_unlock
Unexecuted instantiation: sched_credit.c:pcpu_schedule_unlock_irq
Unexecuted instantiation: sched_credit2.c:vcpu_schedule_unlock_irqrestore
Unexecuted instantiation: sched_credit2.c:vcpu_schedule_unlock_irq
Unexecuted instantiation: sched_credit2.c:vcpu_schedule_unlock
Unexecuted instantiation: sched_credit2.c:pcpu_schedule_unlock_irqrestore
Unexecuted instantiation: sched_credit2.c:pcpu_schedule_unlock_irq
Unexecuted instantiation: sched_credit2.c:pcpu_schedule_unlock
Unexecuted instantiation: sched_rt.c:vcpu_schedule_unlock_irq
Unexecuted instantiation: sched_rt.c:vcpu_schedule_unlock_irqrestore
Unexecuted instantiation: sched_rt.c:pcpu_schedule_unlock
Unexecuted instantiation: sched_rt.c:pcpu_schedule_unlock_irqrestore
Unexecuted instantiation: sched_rt.c:vcpu_schedule_unlock
Unexecuted instantiation: sched_rt.c:pcpu_schedule_unlock_irq
Unexecuted instantiation: sched_null.c:vcpu_schedule_unlock_irq
Unexecuted instantiation: sched_null.c:vcpu_schedule_unlock
Unexecuted instantiation: sched_null.c:pcpu_schedule_unlock_irqrestore
Unexecuted instantiation: mwait-idle.c:vcpu_schedule_unlock
Unexecuted instantiation: sched_null.c:pcpu_schedule_unlock_irq
Unexecuted instantiation: sched_null.c:pcpu_schedule_unlock
schedule.c:vcpu_schedule_unlock_irq
Line
Count
Source
84
4.92M
                                               EXTRA_TYPE(arg), param) \
85
4.92M
{ \
86
4.92M
    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
87
4.92M
    spin_unlock##irq(lock, ## arg); \
88
4.92M
}
schedule.c:vcpu_schedule_unlock_irqrestore
Line
Count
Source
84
67.6k
                                               EXTRA_TYPE(arg), param) \
85
67.6k
{ \
86
67.6k
    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
87
67.6k
    spin_unlock##irq(lock, ## arg); \
88
67.6k
}
schedule.c:pcpu_schedule_unlock_irq
Line
Count
Source
84
4.68M
                                               EXTRA_TYPE(arg), param) \
85
4.68M
{ \
86
4.68M
    ASSERT(lock == per_cpu(schedule_data, cpu).schedule_lock); \
87
4.68M
    spin_unlock##irq(lock, ## arg); \
88
4.68M
}
Unexecuted instantiation: schedule.c:pcpu_schedule_unlock
Unexecuted instantiation: schedule.c:pcpu_schedule_unlock_irqrestore
Unexecuted instantiation: schedule.c:vcpu_schedule_unlock
Unexecuted instantiation: setup.c:vcpu_schedule_unlock
Unexecuted instantiation: setup.c:vcpu_schedule_unlock_irqrestore
Unexecuted instantiation: setup.c:pcpu_schedule_unlock_irq
Unexecuted instantiation: setup.c:pcpu_schedule_unlock_irqrestore
Unexecuted instantiation: setup.c:pcpu_schedule_unlock
Unexecuted instantiation: setup.c:vcpu_schedule_unlock_irq
89
90
#define EXTRA_TYPE(arg)
91
sched_lock(pcpu, unsigned int cpu,     cpu, )
92
sched_lock(vcpu, const struct vcpu *v, v->processor, )
93
sched_lock(pcpu, unsigned int cpu,     cpu,          _irq)
94
sched_lock(vcpu, const struct vcpu *v, v->processor, _irq)
95
sched_unlock(pcpu, unsigned int cpu,     cpu, )
96
sched_unlock(vcpu, const struct vcpu *v, v->processor, )
97
sched_unlock(pcpu, unsigned int cpu,     cpu,          _irq)
98
sched_unlock(vcpu, const struct vcpu *v, v->processor, _irq)
99
#undef EXTRA_TYPE
100
101
#define EXTRA_TYPE(arg) , unsigned long arg
102
55
#define spin_unlock_irqsave spin_unlock_irqrestore
103
sched_lock(pcpu, unsigned int cpu,     cpu,          _irqsave, *flags)
104
sched_lock(vcpu, const struct vcpu *v, v->processor, _irqsave, *flags)
105
#undef spin_unlock_irqsave
106
sched_unlock(pcpu, unsigned int cpu,     cpu,          _irqrestore, flags)
107
sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags)
108
#undef EXTRA_TYPE
109
110
#undef sched_unlock
111
#undef sched_lock
112
113
static inline spinlock_t *pcpu_schedule_trylock(unsigned int cpu)
114
3.42k
{
115
3.42k
    spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
116
3.42k
117
3.42k
    if ( !spin_trylock(lock) )
118
1.03k
        return NULL;
119
2.38k
    if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
120
2.39k
        return lock;
121
18.4E
    spin_unlock(lock);
122
18.4E
    return NULL;
123
2.38k
}
Unexecuted instantiation: mce.c:pcpu_schedule_trylock
Unexecuted instantiation: mctelem.c:pcpu_schedule_trylock
Unexecuted instantiation: mwait-idle.c:pcpu_schedule_trylock
Unexecuted instantiation: cpu_idle.c:pcpu_schedule_trylock
Unexecuted instantiation: smpboot.c:pcpu_schedule_trylock
Unexecuted instantiation: setup.c:pcpu_schedule_trylock
Unexecuted instantiation: schedule.c:pcpu_schedule_trylock
Unexecuted instantiation: sched_null.c:pcpu_schedule_trylock
Unexecuted instantiation: sched_rt.c:pcpu_schedule_trylock
Unexecuted instantiation: sched_credit2.c:pcpu_schedule_trylock
sched_credit.c:pcpu_schedule_trylock
Line
Count
Source
114
3.42k
{
115
3.42k
    spinlock_t *lock = per_cpu(schedule_data, cpu).schedule_lock;
116
3.42k
117
3.42k
    if ( !spin_trylock(lock) )
118
1.03k
        return NULL;
119
2.38k
    if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
120
2.39k
        return lock;
121
18.4E
    spin_unlock(lock);
122
18.4E
    return NULL;
123
2.38k
}
Unexecuted instantiation: sched_arinc653.c:pcpu_schedule_trylock
Unexecuted instantiation: domain.c:pcpu_schedule_trylock
Unexecuted instantiation: domctl.c:pcpu_schedule_trylock
Unexecuted instantiation: cpupool.c:pcpu_schedule_trylock
124
125
struct task_slice {
126
    struct vcpu *task;
127
    s_time_t     time;
128
    bool_t       migrated;
129
};
130
131
struct scheduler {
132
    char *name;             /* full name for this scheduler      */
133
    char *opt_name;         /* option name for this scheduler    */
134
    unsigned int sched_id;  /* ID for this scheduler             */
135
    void *sched_data;       /* global data pointer               */
136
137
    int          (*global_init)    (void);
138
139
    int          (*init)           (struct scheduler *);
140
    void         (*deinit)         (struct scheduler *);
141
142
    void         (*free_vdata)     (const struct scheduler *, void *);
143
    void *       (*alloc_vdata)    (const struct scheduler *, struct vcpu *,
144
                                    void *);
145
    void         (*free_pdata)     (const struct scheduler *, void *, int);
146
    void *       (*alloc_pdata)    (const struct scheduler *, int);
147
    void         (*init_pdata)     (const struct scheduler *, void *, int);
148
    void         (*deinit_pdata)   (const struct scheduler *, void *, int);
149
    void         (*free_domdata)   (const struct scheduler *, void *);
150
    void *       (*alloc_domdata)  (const struct scheduler *, struct domain *);
151
152
    void         (*switch_sched)   (struct scheduler *, unsigned int,
153
                                    void *, void *);
154
155
    int          (*init_domain)    (const struct scheduler *, struct domain *);
156
    void         (*destroy_domain) (const struct scheduler *, struct domain *);
157
158
    /* Activate / deactivate vcpus in a cpu pool */
159
    void         (*insert_vcpu)    (const struct scheduler *, struct vcpu *);
160
    void         (*remove_vcpu)    (const struct scheduler *, struct vcpu *);
161
162
    void         (*sleep)          (const struct scheduler *, struct vcpu *);
163
    void         (*wake)           (const struct scheduler *, struct vcpu *);
164
    void         (*yield)          (const struct scheduler *, struct vcpu *);
165
    void         (*context_saved)  (const struct scheduler *, struct vcpu *);
166
167
    struct task_slice (*do_schedule) (const struct scheduler *, s_time_t,
168
                                      bool_t tasklet_work_scheduled);
169
170
    int          (*pick_cpu)       (const struct scheduler *, struct vcpu *);
171
    void         (*migrate)        (const struct scheduler *, struct vcpu *,
172
                                    unsigned int);
173
    int          (*adjust)         (const struct scheduler *, struct domain *,
174
                                    struct xen_domctl_scheduler_op *);
175
    int          (*adjust_global)  (const struct scheduler *,
176
                                    struct xen_sysctl_scheduler_op *);
177
    void         (*dump_settings)  (const struct scheduler *);
178
    void         (*dump_cpu_state) (const struct scheduler *, int);
179
180
    void         (*tick_suspend)    (const struct scheduler *, unsigned int);
181
    void         (*tick_resume)     (const struct scheduler *, unsigned int);
182
};
183
184
#define REGISTER_SCHEDULER(x) static const struct scheduler *x##_entry \
185
  __used_section(".data.schedulers") = &x;
186
187
struct cpupool
188
{
189
    int              cpupool_id;
190
    cpumask_var_t    cpu_valid;      /* all cpus assigned to pool */
191
    cpumask_var_t    cpu_suspended;  /* cpus in S3 that should be in this pool */
192
    struct cpupool   *next;
193
    unsigned int     n_dom;
194
    struct scheduler *sched;
195
    atomic_t         refcnt;
196
};
197
198
#define cpupool_online_cpumask(_pool) \
199
385k
    (((_pool) == NULL) ? &cpu_online_map : (_pool)->cpu_valid)
200
201
static inline cpumask_t* cpupool_domain_cpumask(struct domain *d)
202
88.0k
{
203
88.0k
    /*
204
88.0k
     * d->cpupool is NULL only for the idle domain, and no one should
205
88.0k
     * be interested in calling this for the idle domain.
206
88.0k
     */
207
88.0k
    ASSERT(d->cpupool != NULL);
208
88.0k
    return d->cpupool->cpu_valid;
209
88.0k
}
Unexecuted instantiation: domctl.c:cpupool_domain_cpumask
Unexecuted instantiation: cpupool.c:cpupool_domain_cpumask
Unexecuted instantiation: mce.c:cpupool_domain_cpumask
Unexecuted instantiation: mctelem.c:cpupool_domain_cpumask
Unexecuted instantiation: mwait-idle.c:cpupool_domain_cpumask
Unexecuted instantiation: cpu_idle.c:cpupool_domain_cpumask
Unexecuted instantiation: smpboot.c:cpupool_domain_cpumask
Unexecuted instantiation: setup.c:cpupool_domain_cpumask
Unexecuted instantiation: schedule.c:cpupool_domain_cpumask
Unexecuted instantiation: sched_null.c:cpupool_domain_cpumask
Unexecuted instantiation: sched_rt.c:cpupool_domain_cpumask
Unexecuted instantiation: sched_credit2.c:cpupool_domain_cpumask
sched_credit.c:cpupool_domain_cpumask
Line
Count
Source
202
88.0k
{
203
88.0k
    /*
204
88.0k
     * d->cpupool is NULL only for the idle domain, and no one should
205
88.0k
     * be interested in calling this for the idle domain.
206
88.0k
     */
207
88.0k
    ASSERT(d->cpupool != NULL);
208
88.0k
    return d->cpupool->cpu_valid;
209
88.0k
}
Unexecuted instantiation: sched_arinc653.c:cpupool_domain_cpumask
domain.c:cpupool_domain_cpumask
Line
Count
Source
202
12
{
203
12
    /*
204
12
     * d->cpupool is NULL only for the idle domain, and no one should
205
12
     * be interested in calling this for the idle domain.
206
12
     */
207
12
    ASSERT(d->cpupool != NULL);
208
12
    return d->cpupool->cpu_valid;
209
12
}
210
211
/*
212
 * Hard and soft affinity load balancing.
213
 *
214
 * Idea is each vcpu has some pcpus that it prefers, some that it does not
215
 * prefer but is OK with, and some that it cannot run on at all. The first
216
 * set of pcpus are the ones that are both in the soft affinity *and* in the
217
 * hard affinity; the second set of pcpus are the ones that are in the hard
218
 * affinity but *not* in the soft affinity; the third set of pcpus are the
219
 * ones that are not in the hard affinity.
220
 *
221
 * We implement a two step balancing logic. Basically, every time there is
222
 * the need to decide where to run a vcpu, we first check the soft affinity
223
 * (well, actually, the && between soft and hard affinity), to see if we can
224
 * send it where it prefers to (and can) run on. However, if the first step
225
 * does not find any suitable and free pcpu, we fall back checking the hard
226
 * affinity.
227
 */
228
32.4k
#define BALANCE_SOFT_AFFINITY    0
229
1.16M
#define BALANCE_HARD_AFFINITY    1
230
231
#define for_each_affinity_balance_step(step) \
232
1.16M
    for ( (step) = 0; (step) <= BALANCE_HARD_AFFINITY; (step)++ )
233
234
/*
235
 * Hard affinity balancing is always necessary and must never be skipped.
236
 * But soft affinity need only be considered when it has a functionally
237
 * different effect than other constraints (such as hard affinity, cpus
238
 * online, or cpupools).
239
 *
240
 * Soft affinity only needs to be considered if:
241
 * * The cpus in the cpupool are not a subset of soft affinity
242
 * * The hard affinity is not a subset of soft affinity
243
 * * There is an overlap between the soft affinity and the mask which is
244
 *   currently being considered.
245
 */
246
static inline int has_soft_affinity(const struct vcpu *v,
247
                                    const cpumask_t *mask)
248
10.9k
{
249
10.9k
    return !cpumask_subset(cpupool_domain_cpumask(v->domain),
250
10.9k
                           v->cpu_soft_affinity) &&
251
0
           !cpumask_subset(v->cpu_hard_affinity, v->cpu_soft_affinity) &&
252
0
           cpumask_intersects(v->cpu_soft_affinity, mask);
253
10.9k
}
Unexecuted instantiation: cpupool.c:has_soft_affinity
Unexecuted instantiation: domctl.c:has_soft_affinity
Unexecuted instantiation: domain.c:has_soft_affinity
Unexecuted instantiation: sched_arinc653.c:has_soft_affinity
sched_credit.c:has_soft_affinity
Line
Count
Source
248
10.9k
{
249
10.9k
    return !cpumask_subset(cpupool_domain_cpumask(v->domain),
250
10.9k
                           v->cpu_soft_affinity) &&
251
0
           !cpumask_subset(v->cpu_hard_affinity, v->cpu_soft_affinity) &&
252
0
           cpumask_intersects(v->cpu_soft_affinity, mask);
253
10.9k
}
Unexecuted instantiation: sched_credit2.c:has_soft_affinity
Unexecuted instantiation: sched_rt.c:has_soft_affinity
Unexecuted instantiation: sched_null.c:has_soft_affinity
Unexecuted instantiation: schedule.c:has_soft_affinity
Unexecuted instantiation: setup.c:has_soft_affinity
Unexecuted instantiation: smpboot.c:has_soft_affinity
Unexecuted instantiation: cpu_idle.c:has_soft_affinity
Unexecuted instantiation: mwait-idle.c:has_soft_affinity
Unexecuted instantiation: mctelem.c:has_soft_affinity
Unexecuted instantiation: mce.c:has_soft_affinity
254
255
/*
256
 * This function copies in mask the cpumask that should be used for a
257
 * particular affinity balancing step. For the soft affinity one, the pcpus
258
 * that are not part of vc's hard affinity are filtered out from the result,
259
 * to avoid running a vcpu where it would like, but is not allowed to!
260
 */
261
static inline void
262
affinity_balance_cpumask(const struct vcpu *v, int step, cpumask_t *mask)
263
10.8k
{
264
10.8k
    if ( step == BALANCE_SOFT_AFFINITY )
265
0
    {
266
0
        cpumask_and(mask, v->cpu_soft_affinity, v->cpu_hard_affinity);
267
0
268
0
        if ( unlikely(cpumask_empty(mask)) )
269
0
            cpumask_copy(mask, v->cpu_hard_affinity);
270
0
    }
271
10.8k
    else /* step == BALANCE_HARD_AFFINITY */
272
10.8k
        cpumask_copy(mask, v->cpu_hard_affinity);
273
10.8k
}
Unexecuted instantiation: cpupool.c:affinity_balance_cpumask
Unexecuted instantiation: domctl.c:affinity_balance_cpumask
Unexecuted instantiation: domain.c:affinity_balance_cpumask
Unexecuted instantiation: sched_arinc653.c:affinity_balance_cpumask
sched_credit.c:affinity_balance_cpumask
Line
Count
Source
263
10.8k
{
264
10.8k
    if ( step == BALANCE_SOFT_AFFINITY )
265
0
    {
266
0
        cpumask_and(mask, v->cpu_soft_affinity, v->cpu_hard_affinity);
267
0
268
0
        if ( unlikely(cpumask_empty(mask)) )
269
0
            cpumask_copy(mask, v->cpu_hard_affinity);
270
0
    }
271
10.8k
    else /* step == BALANCE_HARD_AFFINITY */
272
10.8k
        cpumask_copy(mask, v->cpu_hard_affinity);
273
10.8k
}
Unexecuted instantiation: sched_credit2.c:affinity_balance_cpumask
Unexecuted instantiation: sched_rt.c:affinity_balance_cpumask
Unexecuted instantiation: sched_null.c:affinity_balance_cpumask
Unexecuted instantiation: schedule.c:affinity_balance_cpumask
Unexecuted instantiation: setup.c:affinity_balance_cpumask
Unexecuted instantiation: smpboot.c:affinity_balance_cpumask
Unexecuted instantiation: cpu_idle.c:affinity_balance_cpumask
Unexecuted instantiation: mwait-idle.c:affinity_balance_cpumask
Unexecuted instantiation: mctelem.c:affinity_balance_cpumask
Unexecuted instantiation: mce.c:affinity_balance_cpumask
274
275
#endif /* __XEN_SCHED_IF_H__ */