/root/src/xen/xen/arch/x86/smp.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Intel SMP support routines. |
3 | | * |
4 | | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> |
5 | | * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> |
6 | | * |
7 | | * This code is released under the GNU General Public License version 2 or |
8 | | * later. |
9 | | */ |
10 | | |
11 | | #include <xen/irq.h> |
12 | | #include <xen/sched.h> |
13 | | #include <xen/delay.h> |
14 | | #include <xen/perfc.h> |
15 | | #include <xen/spinlock.h> |
16 | | #include <asm/current.h> |
17 | | #include <asm/smp.h> |
18 | | #include <asm/mc146818rtc.h> |
19 | | #include <asm/flushtlb.h> |
20 | | #include <asm/hardirq.h> |
21 | | #include <asm/hpet.h> |
22 | | #include <asm/hvm/support.h> |
23 | | #include <mach_apic.h> |
24 | | |
25 | | /* |
26 | | * send_IPI_mask(cpumask, vector): sends @vector IPI to CPUs in @cpumask, |
27 | | * excluding the local CPU. @cpumask may be empty. |
28 | | */ |
29 | | |
30 | | void send_IPI_mask(const cpumask_t *mask, int vector) |
31 | 324k | { |
32 | 324k | genapic->send_IPI_mask(mask, vector); |
33 | 324k | } |
34 | | |
35 | | void send_IPI_self(int vector) |
36 | 0 | { |
37 | 0 | genapic->send_IPI_self(vector); |
38 | 0 | } |
39 | | |
40 | | /* |
41 | | * Some notes on x86 processor bugs affecting SMP operation: |
42 | | * |
43 | | * Pentium, Pentium Pro, II, III (and all CPUs) have bugs. |
44 | | * The Linux implications for SMP are handled as follows: |
45 | | * |
46 | | * Pentium III / [Xeon] |
47 | | * None of the E1AP-E3AP errata are visible to the user. |
48 | | * |
49 | | * E1AP. see PII A1AP |
50 | | * E2AP. see PII A2AP |
51 | | * E3AP. see PII A3AP |
52 | | * |
53 | | * Pentium II / [Xeon] |
54 | | * None of the A1AP-A3AP errata are visible to the user. |
55 | | * |
56 | | * A1AP. see PPro 1AP |
57 | | * A2AP. see PPro 2AP |
58 | | * A3AP. see PPro 7AP |
59 | | * |
60 | | * Pentium Pro |
61 | | * None of 1AP-9AP errata are visible to the normal user, |
62 | | * except occasional delivery of 'spurious interrupt' as trap #15. |
63 | | * This is very rare and a non-problem. |
64 | | * |
65 | | * 1AP. Linux maps APIC as non-cacheable |
66 | | * 2AP. worked around in hardware |
67 | | * 3AP. fixed in C0 and above steppings microcode update. |
68 | | * Linux does not use excessive STARTUP_IPIs. |
69 | | * 4AP. worked around in hardware |
70 | | * 5AP. symmetric IO mode (normal Linux operation) not affected. |
71 | | * 'noapic' mode has vector 0xf filled out properly. |
72 | | * 6AP. 'noapic' mode might be affected - fixed in later steppings |
73 | | * 7AP. We do not assume writes to the LVT deassering IRQs |
74 | | * 8AP. We do not enable low power mode (deep sleep) during MP bootup |
75 | | * 9AP. We do not use mixed mode |
76 | | */ |
77 | | |
78 | | /* |
79 | | * The following functions deal with sending IPIs between CPUs. |
80 | | */ |
81 | | |
82 | | static inline int __prepare_ICR (unsigned int shortcut, int vector) |
83 | 0 | { |
84 | 0 | return APIC_DM_FIXED | shortcut | vector; |
85 | 0 | } |
86 | | |
87 | | static inline int __prepare_ICR2 (unsigned int mask) |
88 | 0 | { |
89 | 0 | return SET_xAPIC_DEST_FIELD(mask); |
90 | 0 | } |
91 | | |
92 | | void apic_wait_icr_idle(void) |
93 | 0 | { |
94 | 0 | if ( x2apic_enabled ) |
95 | 0 | return; |
96 | 0 |
|
97 | 0 | while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ) |
98 | 0 | cpu_relax(); |
99 | 0 | } |
100 | | |
101 | | static void __default_send_IPI_shortcut(unsigned int shortcut, int vector, |
102 | | unsigned int dest) |
103 | 0 | { |
104 | 0 | unsigned int cfg; |
105 | 0 |
|
106 | 0 | /* |
107 | 0 | * Wait for idle. |
108 | 0 | */ |
109 | 0 | apic_wait_icr_idle(); |
110 | 0 |
|
111 | 0 | /* |
112 | 0 | * prepare target chip field |
113 | 0 | */ |
114 | 0 | cfg = __prepare_ICR(shortcut, vector) | dest; |
115 | 0 | /* |
116 | 0 | * Send the IPI. The write to APIC_ICR fires this off. |
117 | 0 | */ |
118 | 0 | apic_write(APIC_ICR, cfg); |
119 | 0 | } |
120 | | |
121 | | void send_IPI_self_legacy(uint8_t vector) |
122 | 0 | { |
123 | 0 | __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); |
124 | 0 | } |
125 | | |
126 | | void send_IPI_mask_flat(const cpumask_t *cpumask, int vector) |
127 | 0 | { |
128 | 0 | unsigned long mask = cpumask_bits(cpumask)[0]; |
129 | 0 | unsigned long cfg; |
130 | 0 | unsigned long flags; |
131 | 0 |
|
132 | 0 | mask &= cpumask_bits(&cpu_online_map)[0]; |
133 | 0 | mask &= ~(1UL << smp_processor_id()); |
134 | 0 | if ( mask == 0 ) |
135 | 0 | return; |
136 | 0 |
|
137 | 0 | local_irq_save(flags); |
138 | 0 |
|
139 | 0 | /* |
140 | 0 | * Wait for idle. |
141 | 0 | */ |
142 | 0 | apic_wait_icr_idle(); |
143 | 0 |
|
144 | 0 | /* |
145 | 0 | * prepare target chip field |
146 | 0 | */ |
147 | 0 | cfg = __prepare_ICR2(mask); |
148 | 0 | apic_write(APIC_ICR2, cfg); |
149 | 0 |
|
150 | 0 | /* |
151 | 0 | * program the ICR |
152 | 0 | */ |
153 | 0 | cfg = __prepare_ICR(0, vector) | APIC_DEST_LOGICAL; |
154 | 0 |
|
155 | 0 | /* |
156 | 0 | * Send the IPI. The write to APIC_ICR fires this off. |
157 | 0 | */ |
158 | 0 | apic_write(APIC_ICR, cfg); |
159 | 0 | |
160 | 0 | local_irq_restore(flags); |
161 | 0 | } |
162 | | |
163 | | void send_IPI_mask_phys(const cpumask_t *mask, int vector) |
164 | 0 | { |
165 | 0 | unsigned long cfg, flags; |
166 | 0 | unsigned int query_cpu; |
167 | 0 |
|
168 | 0 | local_irq_save(flags); |
169 | 0 |
|
170 | 0 | for_each_cpu ( query_cpu, mask ) |
171 | 0 | { |
172 | 0 | if ( !cpu_online(query_cpu) || (query_cpu == smp_processor_id()) ) |
173 | 0 | continue; |
174 | 0 |
|
175 | 0 | /* |
176 | 0 | * Wait for idle. |
177 | 0 | */ |
178 | 0 | apic_wait_icr_idle(); |
179 | 0 |
|
180 | 0 | /* |
181 | 0 | * prepare target chip field |
182 | 0 | */ |
183 | 0 | cfg = __prepare_ICR2(cpu_physical_id(query_cpu)); |
184 | 0 | apic_write(APIC_ICR2, cfg); |
185 | 0 |
|
186 | 0 | /* |
187 | 0 | * program the ICR |
188 | 0 | */ |
189 | 0 | cfg = __prepare_ICR(0, vector) | APIC_DEST_PHYSICAL; |
190 | 0 |
|
191 | 0 | /* |
192 | 0 | * Send the IPI. The write to APIC_ICR fires this off. |
193 | 0 | */ |
194 | 0 | apic_write(APIC_ICR, cfg); |
195 | 0 | } |
196 | 0 |
|
197 | 0 | local_irq_restore(flags); |
198 | 0 | } |
199 | | |
200 | | static DEFINE_SPINLOCK(flush_lock); |
201 | | static cpumask_t flush_cpumask; |
202 | | static const void *flush_va; |
203 | | static unsigned int flush_flags; |
204 | | |
205 | | void invalidate_interrupt(struct cpu_user_regs *regs) |
206 | 540 | { |
207 | 540 | unsigned int flags = flush_flags; |
208 | 540 | ack_APIC_irq(); |
209 | 540 | perfc_incr(ipis); |
210 | 540 | if ( __sync_local_execstate() ) |
211 | 73 | flags &= ~(FLUSH_TLB | FLUSH_TLB_GLOBAL); |
212 | 540 | flush_area_local(flush_va, flags); |
213 | 540 | cpumask_clear_cpu(smp_processor_id(), &flush_cpumask); |
214 | 540 | } |
215 | | |
216 | | void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags) |
217 | 425 | { |
218 | 425 | unsigned int cpu = smp_processor_id(); |
219 | 425 | |
220 | 425 | ASSERT(local_irq_is_enabled()); |
221 | 425 | |
222 | 425 | if ( cpumask_test_cpu(cpu, mask) ) |
223 | 42 | flags = flush_area_local(va, flags); |
224 | 425 | |
225 | 425 | if ( (flags & ~FLUSH_ORDER_MASK) && |
226 | 425 | !cpumask_subset(mask, cpumask_of(cpu)) ) |
227 | 118 | { |
228 | 118 | spin_lock(&flush_lock); |
229 | 118 | cpumask_and(&flush_cpumask, mask, &cpu_online_map); |
230 | 118 | cpumask_clear_cpu(cpu, &flush_cpumask); |
231 | 118 | flush_va = va; |
232 | 118 | flush_flags = flags; |
233 | 118 | send_IPI_mask(&flush_cpumask, INVALIDATE_TLB_VECTOR); |
234 | 800k | while ( !cpumask_empty(&flush_cpumask) ) |
235 | 800k | cpu_relax(); |
236 | 118 | spin_unlock(&flush_lock); |
237 | 118 | } |
238 | 425 | } |
239 | | |
240 | | /* Call with no locks held and interrupts enabled (e.g., softirq context). */ |
241 | | void new_tlbflush_clock_period(void) |
242 | 38 | { |
243 | 38 | cpumask_t allbutself; |
244 | 38 | |
245 | 38 | /* Flush everyone else. We definitely flushed just before entry. */ |
246 | 38 | cpumask_andnot(&allbutself, &cpu_online_map, |
247 | 38 | cpumask_of(smp_processor_id())); |
248 | 38 | flush_mask(&allbutself, FLUSH_TLB); |
249 | 38 | |
250 | 38 | /* No need for atomicity: we are the only possible updater. */ |
251 | 38 | ASSERT(tlbflush_clock == 0); |
252 | 38 | tlbflush_clock++; |
253 | 38 | } |
254 | | |
255 | | void smp_send_event_check_mask(const cpumask_t *mask) |
256 | 101k | { |
257 | 101k | send_IPI_mask(mask, EVENT_CHECK_VECTOR); |
258 | 101k | } |
259 | | |
260 | | void smp_send_call_function_mask(const cpumask_t *mask) |
261 | 222k | { |
262 | 222k | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); |
263 | 222k | |
264 | 222k | if ( cpumask_test_cpu(smp_processor_id(), mask) ) |
265 | 222k | { |
266 | 222k | local_irq_disable(); |
267 | 222k | smp_call_function_interrupt(); |
268 | 222k | local_irq_enable(); |
269 | 222k | } |
270 | 222k | } |
271 | | |
272 | | void __stop_this_cpu(void) |
273 | 0 | { |
274 | 0 | ASSERT(!local_irq_is_enabled()); |
275 | 0 |
|
276 | 0 | disable_local_APIC(); |
277 | 0 |
|
278 | 0 | hvm_cpu_down(); |
279 | 0 |
|
280 | 0 | /* |
281 | 0 | * Clear FPU, zapping any pending exceptions. Needed for warm reset with |
282 | 0 | * some BIOSes. |
283 | 0 | */ |
284 | 0 | clts(); |
285 | 0 | asm volatile ( "fninit" ); |
286 | 0 |
|
287 | 0 | cpumask_clear_cpu(smp_processor_id(), &cpu_online_map); |
288 | 0 | } |
289 | | |
290 | | static void stop_this_cpu(void *dummy) |
291 | 0 | { |
292 | 0 | __stop_this_cpu(); |
293 | 0 | for ( ; ; ) |
294 | 0 | halt(); |
295 | 0 | } |
296 | | |
297 | | /* |
298 | | * Stop all CPUs and turn off local APICs and the IO-APIC, so other OSs see a |
299 | | * clean IRQ state. |
300 | | */ |
301 | | void smp_send_stop(void) |
302 | 0 | { |
303 | 0 | int timeout = 10; |
304 | 0 |
|
305 | 0 | local_irq_disable(); |
306 | 0 | fixup_irqs(cpumask_of(smp_processor_id()), 0); |
307 | 0 | local_irq_enable(); |
308 | 0 |
|
309 | 0 | smp_call_function(stop_this_cpu, NULL, 0); |
310 | 0 |
|
311 | 0 | /* Wait 10ms for all other CPUs to go offline. */ |
312 | 0 | while ( (num_online_cpus() > 1) && (timeout-- > 0) ) |
313 | 0 | mdelay(1); |
314 | 0 |
|
315 | 0 | local_irq_disable(); |
316 | 0 | disable_IO_APIC(); |
317 | 0 | hpet_disable(); |
318 | 0 | __stop_this_cpu(); |
319 | 0 | local_irq_enable(); |
320 | 0 | } |
321 | | |
322 | | void smp_send_nmi_allbutself(void) |
323 | 0 | { |
324 | 0 | send_IPI_mask(&cpu_online_map, APIC_DM_NMI); |
325 | 0 | } |
326 | | |
327 | | void event_check_interrupt(struct cpu_user_regs *regs) |
328 | 35.2k | { |
329 | 35.2k | ack_APIC_irq(); |
330 | 35.2k | perfc_incr(ipis); |
331 | 35.2k | this_cpu(irq_count)++; |
332 | 35.2k | } |
333 | | |
334 | | void call_function_interrupt(struct cpu_user_regs *regs) |
335 | 1.92M | { |
336 | 1.92M | ack_APIC_irq(); |
337 | 1.92M | perfc_incr(ipis); |
338 | 1.92M | smp_call_function_interrupt(); |
339 | 1.92M | } |