Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/genapic/x2apic.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * x2APIC driver.
3
 *
4
 * Copyright (c) 2008, Intel Corporation.
5
 *
6
 * This program is free software; you can redistribute it and/or modify it
7
 * under the terms and conditions of the GNU General Public License,
8
 * version 2, as published by the Free Software Foundation.
9
 *
10
 * This program is distributed in the hope it will be useful, but WITHOUT
11
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13
 * more details.
14
 *
15
 * You should have received a copy of the GNU General Public License along with
16
 * this program; If not, see <http://www.gnu.org/licenses/>.
17
 */
18
19
#include <xen/init.h>
20
#include <xen/cpu.h>
21
#include <xen/cpumask.h>
22
#include <asm/apicdef.h>
23
#include <asm/genapic.h>
24
#include <asm/apic.h>
25
#include <asm/io_apic.h>
26
#include <asm/msr.h>
27
#include <asm/processor.h>
28
#include <xen/smp.h>
29
#include <asm/mach-default/mach_mpparse.h>
30
31
static DEFINE_PER_CPU_READ_MOSTLY(u32, cpu_2_logical_apicid);
32
static DEFINE_PER_CPU_READ_MOSTLY(cpumask_t *, cluster_cpus);
33
static cpumask_t *cluster_cpus_spare;
34
static DEFINE_PER_CPU(cpumask_var_t, scratch_mask);
35
36
static inline u32 x2apic_cluster(unsigned int cpu)
37
22
{
38
22
    return per_cpu(cpu_2_logical_apicid, cpu) >> 16;
39
22
}
40
41
static void init_apic_ldr_x2apic_phys(void)
42
0
{
43
0
}
44
45
static void init_apic_ldr_x2apic_cluster(void)
46
13
{
47
13
    unsigned int cpu, this_cpu = smp_processor_id();
48
13
49
13
    per_cpu(cpu_2_logical_apicid, this_cpu) = apic_read(APIC_LDR);
50
13
51
13
    if ( per_cpu(cluster_cpus, this_cpu) )
52
1
    {
53
1
        ASSERT(cpumask_test_cpu(this_cpu, per_cpu(cluster_cpus, this_cpu)));
54
1
        return;
55
1
    }
56
13
57
12
    per_cpu(cluster_cpus, this_cpu) = cluster_cpus_spare;
58
12
    for_each_online_cpu ( cpu )
59
12
    {
60
12
        if (this_cpu == cpu || x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
61
1
            continue;
62
11
        per_cpu(cluster_cpus, this_cpu) = per_cpu(cluster_cpus, cpu);
63
11
        break;
64
12
    }
65
12
    if ( per_cpu(cluster_cpus, this_cpu) == cluster_cpus_spare )
66
1
        cluster_cpus_spare = NULL;
67
12
68
12
    cpumask_set_cpu(this_cpu, per_cpu(cluster_cpus, this_cpu));
69
12
}
70
71
static void __init clustered_apic_check_x2apic(void)
72
0
{
73
0
}
74
75
static const cpumask_t *vector_allocation_cpumask_x2apic_cluster(int cpu)
76
60
{
77
60
    return per_cpu(cluster_cpus, cpu);
78
60
}
79
80
static unsigned int cpu_mask_to_apicid_x2apic_cluster(const cpumask_t *cpumask)
81
128
{
82
128
    unsigned int cpu = cpumask_any(cpumask);
83
128
    unsigned int dest = per_cpu(cpu_2_logical_apicid, cpu);
84
128
    const cpumask_t *cluster_cpus = per_cpu(cluster_cpus, cpu);
85
128
86
128
    for_each_cpu ( cpu, cluster_cpus )
87
1.36k
        if ( cpumask_test_cpu(cpu, cpumask) )
88
634
            dest |= per_cpu(cpu_2_logical_apicid, cpu);
89
128
90
128
    return dest;
91
128
}
92
93
static void send_IPI_self_x2apic(uint8_t vector)
94
0
{
95
0
    apic_wrmsr(APIC_SELF_IPI, vector);
96
0
}
97
98
static void send_IPI_mask_x2apic_phys(const cpumask_t *cpumask, int vector)
99
0
{
100
0
    unsigned int cpu;
101
0
    unsigned long flags;
102
0
    uint64_t msr_content;
103
0
104
0
    /*
105
0
     * Ensure that any synchronisation data written in program order by this
106
0
     * CPU is seen by notified remote CPUs. The WRMSR contained within
107
0
     * apic_icr_write() can otherwise be executed early.
108
0
     * 
109
0
     * The reason mb() is sufficient here is subtle: the register arguments
110
0
     * to WRMSR must depend on a memory read executed after the barrier. This
111
0
     * is guaranteed by cpu_physical_id(), which reads from a global array (and
112
0
     * so cannot be hoisted above the barrier even by a clever compiler).
113
0
     */
114
0
    mb();
115
0
116
0
    local_irq_save(flags);
117
0
118
0
    for_each_cpu ( cpu, cpumask )
119
0
    {
120
0
        if ( !cpu_online(cpu) || (cpu == smp_processor_id()) )
121
0
            continue;
122
0
        msr_content = cpu_physical_id(cpu);
123
0
        msr_content = (msr_content << 32) | APIC_DM_FIXED |
124
0
                      APIC_DEST_PHYSICAL | vector;
125
0
        apic_wrmsr(APIC_ICR, msr_content);
126
0
    }
127
0
128
0
    local_irq_restore(flags);
129
0
}
130
131
static void send_IPI_mask_x2apic_cluster(const cpumask_t *cpumask, int vector)
132
324k
{
133
324k
    unsigned int cpu = smp_processor_id();
134
324k
    cpumask_t *ipimask = per_cpu(scratch_mask, cpu);
135
324k
    const cpumask_t *cluster_cpus;
136
324k
    unsigned long flags;
137
324k
138
324k
    mb(); /* See above for an explanation. */
139
324k
140
324k
    local_irq_save(flags);
141
324k
142
324k
    cpumask_andnot(ipimask, &cpu_online_map, cpumask_of(cpu));
143
324k
144
578k
    for ( cpumask_and(ipimask, cpumask, ipimask); !cpumask_empty(ipimask);
145
254k
          cpumask_andnot(ipimask, ipimask, cluster_cpus) )
146
254k
    {
147
254k
        uint64_t msr_content = 0;
148
254k
149
254k
        cluster_cpus = per_cpu(cluster_cpus, cpumask_first(ipimask));
150
254k
        for_each_cpu ( cpu, cluster_cpus )
151
3.05M
        {
152
3.05M
            if ( !cpumask_test_cpu(cpu, ipimask) )
153
617k
                continue;
154
2.43M
            msr_content |= per_cpu(cpu_2_logical_apicid, cpu);
155
2.43M
        }
156
254k
157
254k
        BUG_ON(!msr_content);
158
254k
        msr_content = (msr_content << 32) | APIC_DM_FIXED |
159
254k
                      APIC_DEST_LOGICAL | vector;
160
254k
        apic_wrmsr(APIC_ICR, msr_content);
161
254k
    }
162
324k
163
324k
    local_irq_restore(flags);
164
324k
}
165
166
static const struct genapic apic_x2apic_phys = {
167
    APIC_INIT("x2apic_phys", NULL),
168
    .int_delivery_mode = dest_Fixed,
169
    .int_dest_mode = 0 /* physical delivery */,
170
    .init_apic_ldr = init_apic_ldr_x2apic_phys,
171
    .clustered_apic_check = clustered_apic_check_x2apic,
172
    .target_cpus = target_cpus_all,
173
    .vector_allocation_cpumask = vector_allocation_cpumask_phys,
174
    .cpu_mask_to_apicid = cpu_mask_to_apicid_phys,
175
    .send_IPI_mask = send_IPI_mask_x2apic_phys,
176
    .send_IPI_self = send_IPI_self_x2apic
177
};
178
179
static const struct genapic apic_x2apic_cluster = {
180
    APIC_INIT("x2apic_cluster", NULL),
181
    .int_delivery_mode = dest_LowestPrio,
182
    .int_dest_mode = 1 /* logical delivery */,
183
    .init_apic_ldr = init_apic_ldr_x2apic_cluster,
184
    .clustered_apic_check = clustered_apic_check_x2apic,
185
    .target_cpus = target_cpus_all,
186
    .vector_allocation_cpumask = vector_allocation_cpumask_x2apic_cluster,
187
    .cpu_mask_to_apicid = cpu_mask_to_apicid_x2apic_cluster,
188
    .send_IPI_mask = send_IPI_mask_x2apic_cluster,
189
    .send_IPI_self = send_IPI_self_x2apic
190
};
191
192
static int update_clusterinfo(
193
    struct notifier_block *nfb, unsigned long action, void *hcpu)
194
34
{
195
34
    unsigned int cpu = (unsigned long)hcpu;
196
34
    int err = 0;
197
34
198
34
    switch (action) {
199
12
    case CPU_UP_PREPARE:
200
12
        per_cpu(cpu_2_logical_apicid, cpu) = BAD_APICID;
201
12
        if ( !cluster_cpus_spare )
202
2
            cluster_cpus_spare = xzalloc(cpumask_t);
203
12
        if ( !cluster_cpus_spare ||
204
12
             !alloc_cpumask_var(&per_cpu(scratch_mask, cpu)) )
205
0
            err = -ENOMEM;
206
12
        break;
207
0
    case CPU_UP_CANCELED:
208
0
    case CPU_DEAD:
209
0
        if ( per_cpu(cluster_cpus, cpu) )
210
0
        {
211
0
            cpumask_clear_cpu(cpu, per_cpu(cluster_cpus, cpu));
212
0
            if ( cpumask_empty(per_cpu(cluster_cpus, cpu)) )
213
0
                xfree(per_cpu(cluster_cpus, cpu));
214
0
        }
215
0
        free_cpumask_var(per_cpu(scratch_mask, cpu));
216
0
        break;
217
34
    }
218
34
219
34
    return !err ? NOTIFY_DONE : notifier_from_errno(err);
220
34
}
221
222
static struct notifier_block x2apic_cpu_nfb = {
223
   .notifier_call = update_clusterinfo
224
};
225
226
static s8 __initdata x2apic_phys = -1; /* By default we use logical cluster mode. */
227
boolean_param("x2apic_phys", x2apic_phys);
228
229
const struct genapic *__init apic_x2apic_probe(void)
230
1
{
231
1
    if ( x2apic_phys < 0 )
232
1
        x2apic_phys = !!(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL);
233
1
234
1
    if ( x2apic_phys )
235
0
        return &apic_x2apic_phys;
236
1
237
1
    if ( !this_cpu(cluster_cpus) )
238
1
    {
239
1
        update_clusterinfo(NULL, CPU_UP_PREPARE,
240
1
                           (void *)(long)smp_processor_id());
241
1
        init_apic_ldr_x2apic_cluster();
242
1
        register_cpu_notifier(&x2apic_cpu_nfb);
243
1
    }
244
1
245
1
    return &apic_x2apic_cluster;
246
1
}
247
248
void __init check_x2apic_preenabled(void)
249
1
{
250
1
    u32 lo, hi;
251
1
252
1
    if ( !cpu_has_x2apic )
253
0
        return;
254
1
255
1
    /* Check whether x2apic mode was already enabled by the BIOS. */
256
1
    rdmsr(MSR_IA32_APICBASE, lo, hi);
257
1
    if ( lo & MSR_IA32_APICBASE_EXTD )
258
0
    {
259
0
        printk("x2APIC mode is already enabled by BIOS.\n");
260
0
        x2apic_enabled = 1;
261
0
        genapic = apic_x2apic_probe();
262
0
    }
263
1
}