/root/src/xen/xen/arch/x86/io_apic.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Intel IO-APIC support for multi-Pentium hosts. |
3 | | * |
4 | | * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo |
5 | | * |
6 | | * Many thanks to Stig Venaas for trying out countless experimental |
7 | | * patches and reporting/debugging problems patiently! |
8 | | * |
9 | | * (c) 1999, Multiple IO-APIC support, developed by |
10 | | * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and |
11 | | * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>, |
12 | | * further tested and cleaned up by Zach Brown <zab@redhat.com> |
13 | | * and Ingo Molnar <mingo@redhat.com> |
14 | | * |
15 | | * Fixes |
16 | | * Maciej W. Rozycki : Bits for genuine 82489DX APICs; |
17 | | * thanks to Eric Gilmore |
18 | | * and Rolf G. Tews |
19 | | * for testing these extensively |
20 | | * Paul Diefenbaugh : Added full ACPI support |
21 | | */ |
22 | | |
23 | | #include <xen/lib.h> |
24 | | #include <xen/init.h> |
25 | | #include <xen/irq.h> |
26 | | #include <xen/delay.h> |
27 | | #include <xen/sched.h> |
28 | | #include <xen/acpi.h> |
29 | | #include <xen/keyhandler.h> |
30 | | #include <xen/softirq.h> |
31 | | #include <asm/mc146818rtc.h> |
32 | | #include <asm/smp.h> |
33 | | #include <asm/desc.h> |
34 | | #include <asm/msi.h> |
35 | | #include <asm/setup.h> |
36 | | #include <mach_apic.h> |
37 | | #include <io_ports.h> |
38 | | #include <public/physdev.h> |
39 | | #include <xen/trace.h> |
40 | | |
41 | | /* Where if anywhere is the i8259 connect in external int mode */ |
42 | | static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; |
43 | | |
44 | | static DEFINE_SPINLOCK(ioapic_lock); |
45 | | |
46 | | bool __read_mostly skip_ioapic_setup; |
47 | | bool __initdata ioapic_ack_new = true; |
48 | | bool __initdata ioapic_ack_forced; |
49 | | |
50 | | /* |
51 | | * # of IRQ routing registers |
52 | | */ |
53 | | int __read_mostly nr_ioapic_entries[MAX_IO_APICS]; |
54 | | int __read_mostly nr_ioapics; |
55 | | |
56 | | /* |
57 | | * Rough estimation of how many shared IRQs there are, can |
58 | | * be changed anytime. |
59 | | */ |
60 | 146 | #define MAX_PLUS_SHARED_IRQS nr_irqs_gsi |
61 | 146 | #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + nr_irqs_gsi) |
62 | | |
63 | | |
64 | 299 | #define ioapic_has_eoi_reg(apic) (mp_ioapics[(apic)].mpc_apicver >= 0x20) |
65 | | |
66 | | static int apic_pin_2_gsi_irq(int apic, int pin); |
67 | | |
68 | | static vmask_t *__read_mostly vector_map[MAX_IO_APICS]; |
69 | | |
70 | | static void share_vector_maps(unsigned int src, unsigned int dst) |
71 | 18 | { |
72 | 18 | unsigned int pin; |
73 | 18 | |
74 | 18 | if (vector_map[src] == vector_map[dst]) |
75 | 18 | return; |
76 | 18 | |
77 | 0 | bitmap_or(vector_map[src]->_bits, vector_map[src]->_bits, |
78 | 0 | vector_map[dst]->_bits, NR_VECTORS); |
79 | 0 |
|
80 | 0 | for (pin = 0; pin < nr_ioapic_entries[dst]; ++pin) { |
81 | 0 | int irq = apic_pin_2_gsi_irq(dst, pin); |
82 | 0 | struct irq_desc *desc; |
83 | 0 |
|
84 | 0 | if (irq < 0) |
85 | 0 | continue; |
86 | 0 | desc = irq_to_desc(irq); |
87 | 0 | if (desc->arch.used_vectors == vector_map[dst]) |
88 | 0 | desc->arch.used_vectors = vector_map[src]; |
89 | 0 | } |
90 | 0 |
|
91 | 0 | vector_map[dst] = vector_map[src]; |
92 | 0 | } |
93 | | |
94 | | /* |
95 | | * This is performance-critical, we want to do it O(1) |
96 | | * |
97 | | * the indexing order of this array favors 1:1 mappings |
98 | | * between pins and IRQs. |
99 | | */ |
100 | | |
101 | | static struct irq_pin_list { |
102 | | int apic, pin; |
103 | | unsigned int next; |
104 | | } *__read_mostly irq_2_pin; |
105 | | |
106 | | static unsigned int irq_2_pin_free_entry; |
107 | | |
108 | | /* |
109 | | * The common case is 1:1 IRQ<->pin mappings. Sometimes there are |
110 | | * shared ISA-space IRQs, so we have to support them. We are super |
111 | | * fast in the common case, and fast for shared ISA-space IRQs. |
112 | | */ |
113 | | static void add_pin_to_irq(unsigned int irq, int apic, int pin) |
114 | 18 | { |
115 | 18 | struct irq_pin_list *entry = irq_2_pin + irq; |
116 | 18 | |
117 | 18 | while (entry->next) { |
118 | 0 | BUG_ON((entry->apic == apic) && (entry->pin == pin)); |
119 | 0 | entry = irq_2_pin + entry->next; |
120 | 0 | } |
121 | 18 | |
122 | 18 | BUG_ON((entry->apic == apic) && (entry->pin == pin)); |
123 | 18 | |
124 | 18 | if (entry->pin != -1) { |
125 | 0 | if (irq_2_pin_free_entry >= PIN_MAP_SIZE) |
126 | 0 | panic("io_apic.c: whoops"); |
127 | 0 | entry->next = irq_2_pin_free_entry; |
128 | 0 | entry = irq_2_pin + entry->next; |
129 | 0 | irq_2_pin_free_entry = entry->next; |
130 | 0 | entry->next = 0; |
131 | 0 | } |
132 | 18 | entry->apic = apic; |
133 | 18 | entry->pin = pin; |
134 | 18 | share_vector_maps(irq_2_pin[irq].apic, apic); |
135 | 18 | } |
136 | | |
137 | | static void remove_pin_from_irq(unsigned int irq, int apic, int pin) |
138 | 0 | { |
139 | 0 | struct irq_pin_list *entry, *prev; |
140 | 0 |
|
141 | 0 | for (entry = &irq_2_pin[irq]; ; entry = &irq_2_pin[entry->next]) { |
142 | 0 | if ((entry->apic == apic) && (entry->pin == pin)) |
143 | 0 | break; |
144 | 0 | BUG_ON(!entry->next); |
145 | 0 | } |
146 | 0 |
|
147 | 0 | entry->pin = entry->apic = -1; |
148 | 0 |
|
149 | 0 | if (entry != &irq_2_pin[irq]) { |
150 | 0 | /* Removed entry is not at head of list. */ |
151 | 0 | prev = &irq_2_pin[irq]; |
152 | 0 | while (&irq_2_pin[prev->next] != entry) |
153 | 0 | prev = &irq_2_pin[prev->next]; |
154 | 0 | prev->next = entry->next; |
155 | 0 | } else if (entry->next) { |
156 | 0 | /* Removed entry is at head of multi-item list. */ |
157 | 0 | prev = entry; |
158 | 0 | entry = &irq_2_pin[entry->next]; |
159 | 0 | *prev = *entry; |
160 | 0 | entry->pin = entry->apic = -1; |
161 | 0 | } else |
162 | 0 | return; |
163 | 0 |
|
164 | 0 | entry->next = irq_2_pin_free_entry; |
165 | 0 | irq_2_pin_free_entry = entry - irq_2_pin; |
166 | 0 | } |
167 | | |
168 | | /* |
169 | | * Reroute an IRQ to a different pin. |
170 | | */ |
171 | | static void __init replace_pin_at_irq(unsigned int irq, |
172 | | int oldapic, int oldpin, |
173 | | int newapic, int newpin) |
174 | 0 | { |
175 | 0 | struct irq_pin_list *entry = irq_2_pin + irq; |
176 | 0 |
|
177 | 0 | while (1) { |
178 | 0 | if (entry->apic == oldapic && entry->pin == oldpin) { |
179 | 0 | entry->apic = newapic; |
180 | 0 | entry->pin = newpin; |
181 | 0 | share_vector_maps(oldapic, newapic); |
182 | 0 | } |
183 | 0 | if (!entry->next) |
184 | 0 | break; |
185 | 0 | entry = irq_2_pin + entry->next; |
186 | 0 | } |
187 | 0 | } |
188 | | |
189 | | vmask_t *io_apic_get_used_vector_map(unsigned int irq) |
190 | 16 | { |
191 | 16 | struct irq_pin_list *entry = irq_2_pin + irq; |
192 | 16 | |
193 | 16 | if (entry->pin == -1) |
194 | 0 | return NULL; |
195 | 16 | |
196 | 16 | return vector_map[entry->apic]; |
197 | 16 | } |
198 | | |
199 | | struct IO_APIC_route_entry **alloc_ioapic_entries(void) |
200 | 1 | { |
201 | 1 | int apic; |
202 | 1 | struct IO_APIC_route_entry **ioapic_entries; |
203 | 1 | |
204 | 1 | ioapic_entries = xmalloc_array(struct IO_APIC_route_entry *, nr_ioapics); |
205 | 1 | if (!ioapic_entries) |
206 | 0 | return 0; |
207 | 1 | |
208 | 3 | for (apic = 0; apic < nr_ioapics; apic++) { |
209 | 2 | ioapic_entries[apic] = |
210 | 2 | xmalloc_array(struct IO_APIC_route_entry, |
211 | 2 | nr_ioapic_entries[apic]); |
212 | 2 | if (!ioapic_entries[apic] && nr_ioapic_entries[apic]) |
213 | 0 | goto nomem; |
214 | 2 | } |
215 | 1 | |
216 | 1 | return ioapic_entries; |
217 | 1 | |
218 | 0 | nomem: |
219 | 0 | while (--apic >= 0) |
220 | 0 | xfree(ioapic_entries[apic]); |
221 | 0 | xfree(ioapic_entries); |
222 | 0 |
|
223 | 0 | return 0; |
224 | 1 | } |
225 | | |
226 | | union entry_union { |
227 | | struct { u32 w1, w2; }; |
228 | | struct IO_APIC_route_entry entry; |
229 | | }; |
230 | | |
231 | | struct IO_APIC_route_entry __ioapic_read_entry( |
232 | | unsigned int apic, unsigned int pin, bool raw) |
233 | 363 | { |
234 | 363 | unsigned int (*read)(unsigned int, unsigned int) |
235 | 267 | = raw ? __io_apic_read : io_apic_read; |
236 | 363 | union entry_union eu; |
237 | 363 | eu.w1 = (*read)(apic, 0x10 + 2 * pin); |
238 | 363 | eu.w2 = (*read)(apic, 0x11 + 2 * pin); |
239 | 363 | return eu.entry; |
240 | 363 | } |
241 | | |
242 | | static struct IO_APIC_route_entry ioapic_read_entry( |
243 | | unsigned int apic, unsigned int pin, bool raw) |
244 | 48 | { |
245 | 48 | struct IO_APIC_route_entry entry; |
246 | 48 | unsigned long flags; |
247 | 48 | |
248 | 48 | spin_lock_irqsave(&ioapic_lock, flags); |
249 | 48 | entry = __ioapic_read_entry(apic, pin, raw); |
250 | 48 | spin_unlock_irqrestore(&ioapic_lock, flags); |
251 | 48 | return entry; |
252 | 48 | } |
253 | | |
254 | | void __ioapic_write_entry( |
255 | | unsigned int apic, unsigned int pin, bool raw, |
256 | | struct IO_APIC_route_entry e) |
257 | 210 | { |
258 | 210 | void (*write)(unsigned int, unsigned int, unsigned int) |
259 | 189 | = raw ? __io_apic_write : io_apic_write; |
260 | 210 | union entry_union eu = { .entry = e }; |
261 | 210 | |
262 | 210 | (*write)(apic, 0x11 + 2*pin, eu.w2); |
263 | 210 | (*write)(apic, 0x10 + 2*pin, eu.w1); |
264 | 210 | } |
265 | | |
266 | | static void ioapic_write_entry( |
267 | | unsigned int apic, unsigned int pin, bool raw, |
268 | | struct IO_APIC_route_entry e) |
269 | 48 | { |
270 | 48 | unsigned long flags; |
271 | 48 | spin_lock_irqsave(&ioapic_lock, flags); |
272 | 48 | __ioapic_write_entry(apic, pin, raw, e); |
273 | 48 | spin_unlock_irqrestore(&ioapic_lock, flags); |
274 | 48 | } |
275 | | |
276 | | /* EOI an IO-APIC entry. Vector may be -1, indicating that it should be |
277 | | * worked out using the pin. This function expects that the ioapic_lock is |
278 | | * being held, and interrupts are disabled (or there is a good reason not |
279 | | * to), and that if both pin and vector are passed, that they refer to the |
280 | | * same redirection entry in the IO-APIC. */ |
281 | | static void __io_apic_eoi(unsigned int apic, unsigned int vector, unsigned int pin) |
282 | 299 | { |
283 | 299 | /* Prefer the use of the EOI register if available */ |
284 | 299 | if ( ioapic_has_eoi_reg(apic) ) |
285 | 299 | { |
286 | 299 | /* If vector is unknown, read it from the IO-APIC */ |
287 | 299 | if ( vector == IRQ_VECTOR_UNASSIGNED ) |
288 | 0 | vector = __ioapic_read_entry(apic, pin, TRUE).vector; |
289 | 299 | |
290 | 299 | *(IO_APIC_BASE(apic)+16) = vector; |
291 | 299 | } |
292 | 299 | else |
293 | 0 | { |
294 | 0 | /* Else fake an EOI by switching to edge triggered mode |
295 | 0 | * and back */ |
296 | 0 | struct IO_APIC_route_entry entry; |
297 | 0 | bool need_to_unmask = false; |
298 | 0 |
|
299 | 0 | entry = __ioapic_read_entry(apic, pin, TRUE); |
300 | 0 |
|
301 | 0 | if ( ! entry.mask ) |
302 | 0 | { |
303 | 0 | /* If entry is not currently masked, mask it and make |
304 | 0 | * a note to unmask it later */ |
305 | 0 | entry.mask = 1; |
306 | 0 | __ioapic_write_entry(apic, pin, TRUE, entry); |
307 | 0 | need_to_unmask = true; |
308 | 0 | } |
309 | 0 |
|
310 | 0 | /* Flip the trigger mode to edge and back */ |
311 | 0 | entry.trigger = 0; |
312 | 0 | __ioapic_write_entry(apic, pin, TRUE, entry); |
313 | 0 | entry.trigger = 1; |
314 | 0 | __ioapic_write_entry(apic, pin, TRUE, entry); |
315 | 0 |
|
316 | 0 | if ( need_to_unmask ) |
317 | 0 | { |
318 | 0 | /* Unmask if neccesary */ |
319 | 0 | entry.mask = 0; |
320 | 0 | __ioapic_write_entry(apic, pin, TRUE, entry); |
321 | 0 | } |
322 | 0 | } |
323 | 299 | } |
324 | | |
325 | | /* |
326 | | * Saves all the IO-APIC RTE's |
327 | | */ |
328 | | int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) |
329 | 1 | { |
330 | 1 | int apic, pin; |
331 | 1 | |
332 | 1 | if (!ioapic_entries) |
333 | 0 | return -ENOMEM; |
334 | 1 | |
335 | 3 | for (apic = 0; apic < nr_ioapics; apic++) { |
336 | 2 | if (!nr_ioapic_entries[apic]) |
337 | 0 | continue; |
338 | 2 | |
339 | 2 | if (!ioapic_entries[apic]) |
340 | 0 | return -ENOMEM; |
341 | 2 | |
342 | 50 | for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) |
343 | 48 | ioapic_entries[apic][pin] = __ioapic_read_entry(apic, pin, 1); |
344 | 2 | } |
345 | 1 | |
346 | 1 | return 0; |
347 | 1 | } |
348 | | |
349 | | /* |
350 | | * Mask all IO APIC entries. |
351 | | */ |
352 | | void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) |
353 | 1 | { |
354 | 1 | int apic, pin; |
355 | 1 | |
356 | 1 | if (!ioapic_entries) |
357 | 0 | return; |
358 | 1 | |
359 | 3 | for (apic = 0; apic < nr_ioapics; apic++) { |
360 | 2 | if (!nr_ioapic_entries[apic]) |
361 | 0 | continue; |
362 | 2 | |
363 | 2 | if (!ioapic_entries[apic]) |
364 | 0 | break; |
365 | 2 | |
366 | 50 | for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) { |
367 | 48 | struct IO_APIC_route_entry entry; |
368 | 48 | |
369 | 48 | entry = ioapic_entries[apic][pin]; |
370 | 48 | if (!entry.mask) { |
371 | 0 | entry.mask = 1; |
372 | 0 |
|
373 | 0 | ioapic_write_entry(apic, pin, 1, entry); |
374 | 0 | } |
375 | 48 | } |
376 | 2 | } |
377 | 1 | } |
378 | | |
379 | | /* |
380 | | * Restore IO APIC entries which was saved in ioapic_entries. |
381 | | */ |
382 | | int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) |
383 | 1 | { |
384 | 1 | int apic, pin; |
385 | 1 | |
386 | 1 | if (!ioapic_entries) |
387 | 0 | return -ENOMEM; |
388 | 1 | |
389 | 3 | for (apic = 0; apic < nr_ioapics; apic++) { |
390 | 2 | if (!nr_ioapic_entries[apic]) |
391 | 0 | continue; |
392 | 2 | |
393 | 2 | if (!ioapic_entries[apic]) |
394 | 0 | return -ENOMEM; |
395 | 2 | |
396 | 50 | for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) |
397 | 48 | ioapic_write_entry(apic, pin, 1, ioapic_entries[apic][pin]); |
398 | 2 | } |
399 | 1 | |
400 | 1 | return 0; |
401 | 1 | } |
402 | | |
403 | | void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries) |
404 | 1 | { |
405 | 1 | int apic; |
406 | 1 | |
407 | 3 | for (apic = 0; apic < nr_ioapics; apic++) |
408 | 2 | xfree(ioapic_entries[apic]); |
409 | 1 | |
410 | 1 | xfree(ioapic_entries); |
411 | 1 | } |
412 | | |
413 | | static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable) |
414 | 9 | { |
415 | 9 | struct irq_pin_list *entry = irq_2_pin + irq; |
416 | 9 | unsigned int pin, reg; |
417 | 9 | |
418 | 9 | for (;;) { |
419 | 9 | pin = entry->pin; |
420 | 9 | if (pin == -1) |
421 | 0 | break; |
422 | 9 | reg = io_apic_read(entry->apic, 0x10 + pin*2); |
423 | 9 | reg &= ~disable; |
424 | 9 | reg |= enable; |
425 | 9 | io_apic_modify(entry->apic, 0x10 + pin*2, reg); |
426 | 9 | if (!entry->next) |
427 | 9 | break; |
428 | 0 | entry = irq_2_pin + entry->next; |
429 | 0 | } |
430 | 9 | } |
431 | | |
432 | | /* mask = 1 */ |
433 | | static void __mask_IO_APIC_irq (unsigned int irq) |
434 | 1 | { |
435 | 1 | __modify_IO_APIC_irq(irq, 0x00010000, 0); |
436 | 1 | } |
437 | | |
438 | | /* mask = 0 */ |
439 | | static void __unmask_IO_APIC_irq (unsigned int irq) |
440 | 8 | { |
441 | 8 | __modify_IO_APIC_irq(irq, 0, 0x00010000); |
442 | 8 | } |
443 | | |
444 | | /* trigger = 0 */ |
445 | | static void __edge_IO_APIC_irq (unsigned int irq) |
446 | 0 | { |
447 | 0 | __modify_IO_APIC_irq(irq, 0, 0x00008000); |
448 | 0 | } |
449 | | |
450 | | /* trigger = 1 */ |
451 | | static void __level_IO_APIC_irq (unsigned int irq) |
452 | 0 | { |
453 | 0 | __modify_IO_APIC_irq(irq, 0x00008000, 0); |
454 | 0 | } |
455 | | |
456 | | static void mask_IO_APIC_irq(struct irq_desc *desc) |
457 | 1 | { |
458 | 1 | unsigned long flags; |
459 | 1 | |
460 | 1 | spin_lock_irqsave(&ioapic_lock, flags); |
461 | 1 | __mask_IO_APIC_irq(desc->irq); |
462 | 1 | spin_unlock_irqrestore(&ioapic_lock, flags); |
463 | 1 | } |
464 | | |
465 | | static void unmask_IO_APIC_irq(struct irq_desc *desc) |
466 | 5 | { |
467 | 5 | unsigned long flags; |
468 | 5 | |
469 | 5 | spin_lock_irqsave(&ioapic_lock, flags); |
470 | 5 | __unmask_IO_APIC_irq(desc->irq); |
471 | 5 | spin_unlock_irqrestore(&ioapic_lock, flags); |
472 | 5 | } |
473 | | |
474 | | static void __eoi_IO_APIC_irq(struct irq_desc *desc) |
475 | 299 | { |
476 | 299 | struct irq_pin_list *entry = irq_2_pin + desc->irq; |
477 | 299 | unsigned int pin, vector = desc->arch.vector; |
478 | 299 | |
479 | 299 | for (;;) { |
480 | 299 | pin = entry->pin; |
481 | 299 | if (pin == -1) |
482 | 0 | break; |
483 | 299 | __io_apic_eoi(entry->apic, vector, pin); |
484 | 299 | if (!entry->next) |
485 | 299 | break; |
486 | 0 | entry = irq_2_pin + entry->next; |
487 | 0 | } |
488 | 299 | } |
489 | | |
490 | | static void eoi_IO_APIC_irq(struct irq_desc *desc) |
491 | 299 | { |
492 | 299 | unsigned long flags; |
493 | 299 | spin_lock_irqsave(&ioapic_lock, flags); |
494 | 299 | __eoi_IO_APIC_irq(desc); |
495 | 299 | spin_unlock_irqrestore(&ioapic_lock, flags); |
496 | 299 | } |
497 | | |
498 | | static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) |
499 | 48 | { |
500 | 48 | struct IO_APIC_route_entry entry; |
501 | 48 | |
502 | 48 | /* Check delivery_mode to be sure we're not clearing an SMI pin */ |
503 | 48 | entry = __ioapic_read_entry(apic, pin, FALSE); |
504 | 48 | if (entry.delivery_mode == dest_SMI) |
505 | 0 | return; |
506 | 48 | |
507 | 48 | /* |
508 | 48 | * Make sure the entry is masked and re-read the contents to check |
509 | 48 | * if it is a level triggered pin and if the remoteIRR is set. |
510 | 48 | */ |
511 | 48 | if (!entry.mask) { |
512 | 0 | entry.mask = 1; |
513 | 0 | __ioapic_write_entry(apic, pin, FALSE, entry); |
514 | 0 | } |
515 | 48 | entry = __ioapic_read_entry(apic, pin, TRUE); |
516 | 48 | |
517 | 48 | if (entry.irr) { |
518 | 0 | /* Make sure the trigger mode is set to level. */ |
519 | 0 | if (!entry.trigger) { |
520 | 0 | entry.trigger = 1; |
521 | 0 | __ioapic_write_entry(apic, pin, TRUE, entry); |
522 | 0 | } |
523 | 0 | __io_apic_eoi(apic, entry.vector, pin); |
524 | 0 | } |
525 | 48 | |
526 | 48 | /* |
527 | 48 | * Disable it in the IO-APIC irq-routing table: |
528 | 48 | */ |
529 | 48 | memset(&entry, 0, sizeof(entry)); |
530 | 48 | entry.mask = 1; |
531 | 48 | __ioapic_write_entry(apic, pin, TRUE, entry); |
532 | 48 | |
533 | 48 | entry = __ioapic_read_entry(apic, pin, TRUE); |
534 | 48 | if (entry.irr) |
535 | 0 | printk(KERN_ERR "IO-APIC%02x-%u: Unable to reset IRR\n", |
536 | 0 | IO_APIC_ID(apic), pin); |
537 | 48 | } |
538 | | |
539 | | static void clear_IO_APIC (void) |
540 | 1 | { |
541 | 1 | int apic, pin; |
542 | 1 | |
543 | 3 | for (apic = 0; apic < nr_ioapics; apic++) { |
544 | 50 | for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) |
545 | 48 | clear_IO_APIC_pin(apic, pin); |
546 | 2 | } |
547 | 1 | } |
548 | | |
549 | | static void |
550 | | set_ioapic_affinity_irq(struct irq_desc *desc, const cpumask_t *mask) |
551 | 21 | { |
552 | 21 | unsigned long flags; |
553 | 21 | unsigned int dest; |
554 | 21 | int pin, irq; |
555 | 21 | struct irq_pin_list *entry; |
556 | 21 | |
557 | 21 | irq = desc->irq; |
558 | 21 | |
559 | 21 | spin_lock_irqsave(&ioapic_lock, flags); |
560 | 21 | dest = set_desc_affinity(desc, mask); |
561 | 21 | if (dest != BAD_APICID) { |
562 | 21 | if ( !x2apic_enabled ) |
563 | 0 | dest = SET_APIC_LOGICAL_ID(dest); |
564 | 21 | entry = irq_2_pin + irq; |
565 | 21 | for (;;) { |
566 | 21 | unsigned int data; |
567 | 21 | pin = entry->pin; |
568 | 21 | if (pin == -1) |
569 | 0 | break; |
570 | 21 | |
571 | 21 | io_apic_write(entry->apic, 0x10 + 1 + pin*2, dest); |
572 | 21 | data = io_apic_read(entry->apic, 0x10 + pin*2); |
573 | 21 | data &= ~IO_APIC_REDIR_VECTOR_MASK; |
574 | 21 | data |= desc->arch.vector & 0xFF; |
575 | 21 | io_apic_modify(entry->apic, 0x10 + pin*2, data); |
576 | 21 | |
577 | 21 | if (!entry->next) |
578 | 21 | break; |
579 | 0 | entry = irq_2_pin + entry->next; |
580 | 0 | } |
581 | 21 | } |
582 | 21 | spin_unlock_irqrestore(&ioapic_lock, flags); |
583 | 21 | |
584 | 21 | } |
585 | | |
586 | | /* |
587 | | * Find the IRQ entry number of a certain pin. |
588 | | */ |
589 | | static int find_irq_entry(int apic, int pin, int type) |
590 | 231 | { |
591 | 231 | int i; |
592 | 231 | |
593 | 2.24k | for (i = 0; i < mp_irq_entries; i++) |
594 | 2.16k | if (mp_irqs[i].mpc_irqtype == type && |
595 | 2.16k | (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid || |
596 | 720 | mp_irqs[i].mpc_dstapic == MP_APIC_ALL) && |
597 | 1.44k | mp_irqs[i].mpc_dstirq == pin) |
598 | 150 | return i; |
599 | 231 | |
600 | 81 | return -1; |
601 | 231 | } |
602 | | |
603 | | /* |
604 | | * Find the pin to which IRQ[irq] (ISA) is connected |
605 | | */ |
606 | | static int __init find_isa_irq_pin(int irq, int type) |
607 | 2 | { |
608 | 2 | int i; |
609 | 2 | |
610 | 17 | for (i = 0; i < mp_irq_entries; i++) { |
611 | 16 | int lbus = mp_irqs[i].mpc_srcbus; |
612 | 16 | |
613 | 16 | if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA || |
614 | 0 | mp_bus_id_to_type[lbus] == MP_BUS_EISA || |
615 | 0 | mp_bus_id_to_type[lbus] == MP_BUS_MCA || |
616 | 0 | mp_bus_id_to_type[lbus] == MP_BUS_NEC98 |
617 | 16 | ) && |
618 | 16 | (mp_irqs[i].mpc_irqtype == type) && |
619 | 1 | (mp_irqs[i].mpc_srcbusirq == irq)) |
620 | 16 | |
621 | 1 | return mp_irqs[i].mpc_dstirq; |
622 | 16 | } |
623 | 1 | return -1; |
624 | 2 | } |
625 | | |
626 | | static int __init find_isa_irq_apic(int irq, int type) |
627 | 2 | { |
628 | 2 | int i; |
629 | 2 | |
630 | 17 | for (i = 0; i < mp_irq_entries; i++) { |
631 | 16 | int lbus = mp_irqs[i].mpc_srcbus; |
632 | 16 | |
633 | 16 | if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA || |
634 | 0 | mp_bus_id_to_type[lbus] == MP_BUS_EISA || |
635 | 0 | mp_bus_id_to_type[lbus] == MP_BUS_MCA || |
636 | 0 | mp_bus_id_to_type[lbus] == MP_BUS_NEC98 |
637 | 16 | ) && |
638 | 16 | (mp_irqs[i].mpc_irqtype == type) && |
639 | 1 | (mp_irqs[i].mpc_srcbusirq == irq)) |
640 | 1 | break; |
641 | 16 | } |
642 | 2 | if (i < mp_irq_entries) { |
643 | 1 | int apic; |
644 | 1 | for(apic = 0; apic < nr_ioapics; apic++) { |
645 | 1 | if (!nr_ioapic_entries[apic]) |
646 | 0 | continue; |
647 | 1 | if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic) |
648 | 1 | return apic; |
649 | 1 | } |
650 | 1 | } |
651 | 2 | |
652 | 1 | return -1; |
653 | 2 | } |
654 | | |
655 | | /* |
656 | | * Find a specific PCI IRQ entry. |
657 | | * Not an __init, possibly needed by modules |
658 | | */ |
659 | | static int pin_2_irq(int idx, int apic, int pin); |
660 | | |
661 | | /* |
662 | | * This function currently is only a helper for the i386 smp boot process where |
663 | | * we need to reprogram the ioredtbls to cater for the cpus which have come online |
664 | | * so mask in all cases should simply be TARGET_CPUS |
665 | | */ |
666 | | void /*__init*/ setup_ioapic_dest(void) |
667 | 1 | { |
668 | 1 | int pin, ioapic, irq, irq_entry; |
669 | 1 | |
670 | 1 | if (skip_ioapic_setup) |
671 | 0 | return; |
672 | 1 | |
673 | 3 | for (ioapic = 0; ioapic < nr_ioapics; ioapic++) { |
674 | 50 | for (pin = 0; pin < nr_ioapic_entries[ioapic]; pin++) { |
675 | 48 | struct irq_desc *desc; |
676 | 48 | |
677 | 48 | irq_entry = find_irq_entry(ioapic, pin, mp_INT); |
678 | 48 | if (irq_entry == -1) |
679 | 33 | continue; |
680 | 15 | irq = pin_2_irq(irq_entry, ioapic, pin); |
681 | 15 | desc = irq_to_desc(irq); |
682 | 15 | BUG_ON(cpumask_empty(desc->arch.cpu_mask)); |
683 | 15 | set_ioapic_affinity_irq(desc, desc->arch.cpu_mask); |
684 | 15 | } |
685 | 2 | |
686 | 2 | } |
687 | 1 | } |
688 | | |
689 | | /* |
690 | | * EISA Edge/Level control register, ELCR |
691 | | */ |
692 | | static int EISA_ELCR(unsigned int irq) |
693 | 0 | { |
694 | 0 | if (platform_legacy_irq(irq)) { |
695 | 0 | unsigned int port = 0x4d0 + (irq >> 3); |
696 | 0 | return (inb(port) >> (irq & 7)) & 1; |
697 | 0 | } |
698 | 0 | apic_printk(APIC_VERBOSE, KERN_INFO |
699 | 0 | "Broken MPtable reports ISA irq %d\n", irq); |
700 | 0 | return 0; |
701 | 0 | } |
702 | | |
703 | | /* EISA interrupts are always polarity zero and can be edge or level |
704 | | * trigger depending on the ELCR value. If an interrupt is listed as |
705 | | * EISA conforming in the MP table, that means its trigger type must |
706 | | * be read in from the ELCR */ |
707 | | |
708 | 0 | #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq)) |
709 | 0 | #define default_EISA_polarity(idx) (0) |
710 | | |
711 | | /* ISA interrupts are always polarity zero edge triggered, |
712 | | * when listed as conforming in the MP table. */ |
713 | | |
714 | 42 | #define default_ISA_trigger(idx) (0) |
715 | 14 | #define default_ISA_polarity(idx) (0) |
716 | | |
717 | | /* PCI interrupts are always polarity one level triggered, |
718 | | * when listed as conforming in the MP table. */ |
719 | | |
720 | 0 | #define default_PCI_trigger(idx) (1) |
721 | 0 | #define default_PCI_polarity(idx) (1) |
722 | | |
723 | | /* MCA interrupts are always polarity zero level triggered, |
724 | | * when listed as conforming in the MP table. */ |
725 | | |
726 | 0 | #define default_MCA_trigger(idx) (1) |
727 | 0 | #define default_MCA_polarity(idx) (0) |
728 | | |
729 | | /* NEC98 interrupts are always polarity zero edge triggered, |
730 | | * when listed as conforming in the MP table. */ |
731 | | |
732 | 0 | #define default_NEC98_trigger(idx) (0) |
733 | 0 | #define default_NEC98_polarity(idx) (0) |
734 | | |
735 | | static int __init MPBIOS_polarity(int idx) |
736 | 15 | { |
737 | 15 | int bus = mp_irqs[idx].mpc_srcbus; |
738 | 15 | int polarity; |
739 | 15 | |
740 | 15 | /* |
741 | 15 | * Determine IRQ line polarity (high active or low active): |
742 | 15 | */ |
743 | 15 | switch (mp_irqs[idx].mpc_irqflag & 3) |
744 | 15 | { |
745 | 14 | case 0: /* conforms, ie. bus-type dependent polarity */ |
746 | 14 | { |
747 | 14 | switch (mp_bus_id_to_type[bus]) |
748 | 14 | { |
749 | 14 | case MP_BUS_ISA: /* ISA pin */ |
750 | 14 | { |
751 | 14 | polarity = default_ISA_polarity(idx); |
752 | 14 | break; |
753 | 14 | } |
754 | 0 | case MP_BUS_EISA: /* EISA pin */ |
755 | 0 | { |
756 | 0 | polarity = default_EISA_polarity(idx); |
757 | 0 | break; |
758 | 14 | } |
759 | 0 | case MP_BUS_PCI: /* PCI pin */ |
760 | 0 | { |
761 | 0 | polarity = default_PCI_polarity(idx); |
762 | 0 | break; |
763 | 14 | } |
764 | 0 | case MP_BUS_MCA: /* MCA pin */ |
765 | 0 | { |
766 | 0 | polarity = default_MCA_polarity(idx); |
767 | 0 | break; |
768 | 14 | } |
769 | 0 | case MP_BUS_NEC98: /* NEC 98 pin */ |
770 | 0 | { |
771 | 0 | polarity = default_NEC98_polarity(idx); |
772 | 0 | break; |
773 | 14 | } |
774 | 0 | default: |
775 | 0 | { |
776 | 0 | printk(KERN_WARNING "broken BIOS!!\n"); |
777 | 0 | polarity = 1; |
778 | 0 | break; |
779 | 14 | } |
780 | 14 | } |
781 | 14 | break; |
782 | 14 | } |
783 | 1 | case 1: /* high active */ |
784 | 1 | { |
785 | 1 | polarity = 0; |
786 | 1 | break; |
787 | 14 | } |
788 | 0 | case 2: /* reserved */ |
789 | 0 | { |
790 | 0 | printk(KERN_WARNING "broken BIOS!!\n"); |
791 | 0 | polarity = 1; |
792 | 0 | break; |
793 | 14 | } |
794 | 0 | case 3: /* low active */ |
795 | 0 | { |
796 | 0 | polarity = 1; |
797 | 0 | break; |
798 | 14 | } |
799 | 0 | default: /* invalid */ |
800 | 0 | { |
801 | 0 | printk(KERN_WARNING "broken BIOS!!\n"); |
802 | 0 | polarity = 1; |
803 | 0 | break; |
804 | 14 | } |
805 | 15 | } |
806 | 15 | return polarity; |
807 | 15 | } |
808 | | |
809 | | static int MPBIOS_trigger(int idx) |
810 | 45 | { |
811 | 45 | int bus = mp_irqs[idx].mpc_srcbus; |
812 | 45 | int trigger; |
813 | 45 | |
814 | 45 | /* |
815 | 45 | * Determine IRQ trigger mode (edge or level sensitive): |
816 | 45 | */ |
817 | 45 | switch ((mp_irqs[idx].mpc_irqflag>>2) & 3) |
818 | 45 | { |
819 | 42 | case 0: /* conforms, ie. bus-type dependent */ |
820 | 42 | { |
821 | 42 | switch (mp_bus_id_to_type[bus]) |
822 | 42 | { |
823 | 42 | case MP_BUS_ISA: /* ISA pin */ |
824 | 42 | { |
825 | 42 | trigger = default_ISA_trigger(idx); |
826 | 42 | break; |
827 | 42 | } |
828 | 0 | case MP_BUS_EISA: /* EISA pin */ |
829 | 0 | { |
830 | 0 | trigger = default_EISA_trigger(idx); |
831 | 0 | break; |
832 | 42 | } |
833 | 0 | case MP_BUS_PCI: /* PCI pin */ |
834 | 0 | { |
835 | 0 | trigger = default_PCI_trigger(idx); |
836 | 0 | break; |
837 | 42 | } |
838 | 0 | case MP_BUS_MCA: /* MCA pin */ |
839 | 0 | { |
840 | 0 | trigger = default_MCA_trigger(idx); |
841 | 0 | break; |
842 | 42 | } |
843 | 0 | case MP_BUS_NEC98: /* NEC 98 pin */ |
844 | 0 | { |
845 | 0 | trigger = default_NEC98_trigger(idx); |
846 | 0 | break; |
847 | 42 | } |
848 | 0 | default: |
849 | 0 | { |
850 | 0 | printk(KERN_WARNING "broken BIOS!!\n"); |
851 | 0 | trigger = 1; |
852 | 0 | break; |
853 | 42 | } |
854 | 42 | } |
855 | 42 | break; |
856 | 42 | } |
857 | 0 | case 1: /* edge */ |
858 | 0 | { |
859 | 0 | trigger = 0; |
860 | 0 | break; |
861 | 42 | } |
862 | 0 | case 2: /* reserved */ |
863 | 0 | { |
864 | 0 | printk(KERN_WARNING "broken BIOS!!\n"); |
865 | 0 | trigger = 1; |
866 | 0 | break; |
867 | 42 | } |
868 | 3 | case 3: /* level */ |
869 | 3 | { |
870 | 3 | trigger = 1; |
871 | 3 | break; |
872 | 42 | } |
873 | 0 | default: /* invalid */ |
874 | 0 | { |
875 | 0 | printk(KERN_WARNING "broken BIOS!!\n"); |
876 | 0 | trigger = 0; |
877 | 0 | break; |
878 | 42 | } |
879 | 45 | } |
880 | 45 | return trigger; |
881 | 45 | } |
882 | | |
883 | | static inline int irq_polarity(int idx) |
884 | 15 | { |
885 | 15 | return MPBIOS_polarity(idx); |
886 | 15 | } |
887 | | |
888 | | static inline int irq_trigger(int idx) |
889 | 45 | { |
890 | 45 | return MPBIOS_trigger(idx); |
891 | 45 | } |
892 | | |
893 | | static int pin_2_irq(int idx, int apic, int pin) |
894 | 150 | { |
895 | 150 | int irq, i; |
896 | 150 | int bus = mp_irqs[idx].mpc_srcbus; |
897 | 150 | |
898 | 150 | /* |
899 | 150 | * Debugging check, we are in big trouble if this message pops up! |
900 | 150 | */ |
901 | 150 | if (mp_irqs[idx].mpc_dstirq != pin) |
902 | 0 | printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); |
903 | 150 | |
904 | 150 | switch (mp_bus_id_to_type[bus]) |
905 | 150 | { |
906 | 150 | case MP_BUS_ISA: /* ISA pin */ |
907 | 150 | case MP_BUS_EISA: |
908 | 150 | case MP_BUS_MCA: |
909 | 150 | case MP_BUS_NEC98: |
910 | 150 | { |
911 | 150 | irq = mp_irqs[idx].mpc_srcbusirq; |
912 | 150 | break; |
913 | 150 | } |
914 | 0 | case MP_BUS_PCI: /* PCI pin */ |
915 | 0 | { |
916 | 0 | /* |
917 | 0 | * PCI IRQs are mapped in order |
918 | 0 | */ |
919 | 0 | i = irq = 0; |
920 | 0 | while (i < apic) |
921 | 0 | irq += nr_ioapic_entries[i++]; |
922 | 0 | irq += pin; |
923 | 0 | break; |
924 | 150 | } |
925 | 0 | default: |
926 | 0 | { |
927 | 0 | printk(KERN_ERR "unknown bus type %d.\n",bus); |
928 | 0 | irq = 0; |
929 | 0 | break; |
930 | 150 | } |
931 | 150 | } |
932 | 150 | |
933 | 150 | return irq; |
934 | 150 | } |
935 | | |
936 | | static inline int IO_APIC_irq_trigger(int irq) |
937 | 15 | { |
938 | 15 | int apic, idx, pin; |
939 | 15 | |
940 | 15 | for (apic = 0; apic < nr_ioapics; apic++) { |
941 | 135 | for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) { |
942 | 135 | idx = find_irq_entry(apic,pin,mp_INT); |
943 | 135 | if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin))) |
944 | 15 | return irq_trigger(idx); |
945 | 135 | } |
946 | 15 | } |
947 | 15 | /* |
948 | 15 | * nonexistent IRQs are edge default |
949 | 15 | */ |
950 | 0 | return 0; |
951 | 15 | } |
952 | | |
953 | | static struct hw_interrupt_type ioapic_level_type; |
954 | | static hw_irq_controller ioapic_edge_type; |
955 | | |
956 | 36 | #define IOAPIC_AUTO -1 |
957 | | #define IOAPIC_EDGE 0 |
958 | 20 | #define IOAPIC_LEVEL 1 |
959 | | |
960 | 21 | #define SET_DEST(ent, mode, val) do { \ |
961 | 21 | if (x2apic_enabled) \ |
962 | 21 | (ent).dest.dest32 = (val); \ |
963 | 21 | else \ |
964 | 0 | (ent).dest.mode.mode##_dest = (val); \ |
965 | 21 | } while (0) |
966 | | |
967 | | static inline void ioapic_register_intr(int irq, unsigned long trigger) |
968 | 21 | { |
969 | 21 | if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || |
970 | 20 | trigger == IOAPIC_LEVEL) |
971 | 5 | irq_desc[irq].handler = &ioapic_level_type; |
972 | 21 | else |
973 | 16 | irq_desc[irq].handler = &ioapic_edge_type; |
974 | 21 | } |
975 | | |
976 | | static void __init setup_IO_APIC_irqs(void) |
977 | 1 | { |
978 | 1 | struct IO_APIC_route_entry entry; |
979 | 1 | int apic, pin, idx, irq, first_notcon = 1, vector; |
980 | 1 | unsigned long flags; |
981 | 1 | |
982 | 1 | apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); |
983 | 1 | |
984 | 3 | for (apic = 0; apic < nr_ioapics; apic++) { |
985 | 50 | for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) { |
986 | 48 | struct irq_desc *desc; |
987 | 48 | |
988 | 48 | /* |
989 | 48 | * add it to the IO-APIC irq-routing table: |
990 | 48 | */ |
991 | 48 | memset(&entry,0,sizeof(entry)); |
992 | 48 | |
993 | 48 | entry.delivery_mode = INT_DELIVERY_MODE; |
994 | 48 | entry.dest_mode = INT_DEST_MODE; |
995 | 48 | entry.mask = 0; /* enable IRQ */ |
996 | 48 | |
997 | 48 | idx = find_irq_entry(apic,pin,mp_INT); |
998 | 48 | if (idx == -1) { |
999 | 33 | if (first_notcon) { |
1000 | 1 | apic_printk(APIC_VERBOSE, KERN_DEBUG |
1001 | 1 | " IO-APIC (apicid-pin) %d-%d", |
1002 | 1 | mp_ioapics[apic].mpc_apicid, |
1003 | 1 | pin); |
1004 | 1 | first_notcon = 0; |
1005 | 1 | } else |
1006 | 32 | apic_printk(APIC_VERBOSE, ", %d-%d", |
1007 | 33 | mp_ioapics[apic].mpc_apicid, pin); |
1008 | 33 | continue; |
1009 | 33 | } |
1010 | 48 | |
1011 | 15 | entry.trigger = irq_trigger(idx); |
1012 | 15 | entry.polarity = irq_polarity(idx); |
1013 | 15 | |
1014 | 15 | if (irq_trigger(idx)) { |
1015 | 1 | entry.trigger = 1; |
1016 | 1 | entry.mask = 1; |
1017 | 1 | } |
1018 | 15 | |
1019 | 15 | irq = pin_2_irq(idx, apic, pin); |
1020 | 15 | /* |
1021 | 15 | * skip adding the timer int on secondary nodes, which causes |
1022 | 15 | * a small but painful rift in the time-space continuum |
1023 | 15 | */ |
1024 | 15 | if (multi_timer_check(apic, irq)) |
1025 | 0 | continue; |
1026 | 15 | else |
1027 | 15 | add_pin_to_irq(irq, apic, pin); |
1028 | 15 | |
1029 | 15 | if (!IO_APIC_IRQ(irq)) |
1030 | 0 | continue; |
1031 | 15 | |
1032 | 15 | vector = assign_irq_vector(irq, NULL); |
1033 | 15 | BUG_ON(vector < 0); |
1034 | 15 | entry.vector = vector; |
1035 | 15 | ioapic_register_intr(irq, IOAPIC_AUTO); |
1036 | 15 | |
1037 | 15 | if (platform_legacy_irq(irq)) |
1038 | 15 | disable_8259A_irq(irq_to_desc(irq)); |
1039 | 15 | |
1040 | 15 | desc = irq_to_desc(irq); |
1041 | 15 | SET_DEST(entry, logical, cpu_mask_to_apicid(TARGET_CPUS)); |
1042 | 15 | spin_lock_irqsave(&ioapic_lock, flags); |
1043 | 15 | __ioapic_write_entry(apic, pin, 0, entry); |
1044 | 15 | set_native_irq_info(irq, TARGET_CPUS); |
1045 | 15 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1046 | 15 | } |
1047 | 2 | } |
1048 | 1 | |
1049 | 1 | if (!first_notcon) |
1050 | 1 | apic_printk(APIC_VERBOSE, " not connected.\n"); |
1051 | 1 | } |
1052 | | |
1053 | | /* |
1054 | | * Set up the 8259A-master output pin: |
1055 | | */ |
1056 | | static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector) |
1057 | 0 | { |
1058 | 0 | struct IO_APIC_route_entry entry; |
1059 | 0 |
|
1060 | 0 | memset(&entry,0,sizeof(entry)); |
1061 | 0 |
|
1062 | 0 | disable_8259A_irq(irq_to_desc(0)); |
1063 | 0 |
|
1064 | 0 | /* mask LVT0 */ |
1065 | 0 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); |
1066 | 0 |
|
1067 | 0 | /* |
1068 | 0 | * We use logical delivery to get the timer IRQ |
1069 | 0 | * to the first CPU. |
1070 | 0 | */ |
1071 | 0 | entry.dest_mode = INT_DEST_MODE; |
1072 | 0 | entry.mask = 0; /* unmask IRQ now */ |
1073 | 0 | SET_DEST(entry, logical, cpu_mask_to_apicid(TARGET_CPUS)); |
1074 | 0 | entry.delivery_mode = INT_DELIVERY_MODE; |
1075 | 0 | entry.polarity = 0; |
1076 | 0 | entry.trigger = 0; |
1077 | 0 | entry.vector = vector; |
1078 | 0 |
|
1079 | 0 | /* |
1080 | 0 | * The timer IRQ doesn't have to know that behind the |
1081 | 0 | * scene we have a 8259A-master in AEOI mode ... |
1082 | 0 | */ |
1083 | 0 | irq_desc[0].handler = &ioapic_edge_type; |
1084 | 0 |
|
1085 | 0 | /* |
1086 | 0 | * Add it to the IO-APIC irq-routing table: |
1087 | 0 | */ |
1088 | 0 | ioapic_write_entry(apic, pin, 0, entry); |
1089 | 0 |
|
1090 | 0 | enable_8259A_irq(irq_to_desc(0)); |
1091 | 0 | } |
1092 | | |
1093 | | static inline void UNEXPECTED_IO_APIC(void) |
1094 | 0 | { |
1095 | 0 | } |
1096 | | |
1097 | | static void /*__init*/ __print_IO_APIC(bool boot) |
1098 | 0 | { |
1099 | 0 | int apic, i; |
1100 | 0 | union IO_APIC_reg_00 reg_00; |
1101 | 0 | union IO_APIC_reg_01 reg_01; |
1102 | 0 | union IO_APIC_reg_02 reg_02; |
1103 | 0 | union IO_APIC_reg_03 reg_03; |
1104 | 0 | unsigned long flags; |
1105 | 0 |
|
1106 | 0 | printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); |
1107 | 0 | for (i = 0; i < nr_ioapics; i++) |
1108 | 0 | printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", |
1109 | 0 | mp_ioapics[i].mpc_apicid, nr_ioapic_entries[i]); |
1110 | 0 |
|
1111 | 0 | /* |
1112 | 0 | * We are a bit conservative about what we expect. We have to |
1113 | 0 | * know about every hardware change ASAP. |
1114 | 0 | */ |
1115 | 0 | printk(KERN_INFO "testing the IO APIC.......................\n"); |
1116 | 0 |
|
1117 | 0 | for (apic = 0; apic < nr_ioapics; apic++) { |
1118 | 0 | if ( !boot ) |
1119 | 0 | process_pending_softirqs(); |
1120 | 0 |
|
1121 | 0 | if (!nr_ioapic_entries[apic]) |
1122 | 0 | continue; |
1123 | 0 |
|
1124 | 0 | spin_lock_irqsave(&ioapic_lock, flags); |
1125 | 0 | reg_00.raw = io_apic_read(apic, 0); |
1126 | 0 | reg_01.raw = io_apic_read(apic, 1); |
1127 | 0 | if (reg_01.bits.version >= 0x10) |
1128 | 0 | reg_02.raw = io_apic_read(apic, 2); |
1129 | 0 | if (reg_01.bits.version >= 0x20) |
1130 | 0 | reg_03.raw = io_apic_read(apic, 3); |
1131 | 0 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1132 | 0 |
|
1133 | 0 | printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid); |
1134 | 0 | printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); |
1135 | 0 | printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); |
1136 | 0 | printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); |
1137 | 0 | printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); |
1138 | 0 | if (reg_00.bits.ID >= get_physical_broadcast()) |
1139 | 0 | UNEXPECTED_IO_APIC(); |
1140 | 0 | if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2) |
1141 | 0 | UNEXPECTED_IO_APIC(); |
1142 | 0 |
|
1143 | 0 | printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw); |
1144 | 0 | printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); |
1145 | 0 | if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */ |
1146 | 0 | (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */ |
1147 | 0 | (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */ |
1148 | 0 | (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */ |
1149 | 0 | (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */ |
1150 | 0 | (reg_01.bits.entries != 0x2E) && |
1151 | 0 | (reg_01.bits.entries != 0x3F) |
1152 | 0 | ) |
1153 | 0 | UNEXPECTED_IO_APIC(); |
1154 | 0 |
|
1155 | 0 | printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); |
1156 | 0 | printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); |
1157 | 0 | if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */ |
1158 | 0 | (reg_01.bits.version != 0x10) && /* oldest IO-APICs */ |
1159 | 0 | (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */ |
1160 | 0 | (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */ |
1161 | 0 | (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */ |
1162 | 0 | ) |
1163 | 0 | UNEXPECTED_IO_APIC(); |
1164 | 0 | if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2) |
1165 | 0 | UNEXPECTED_IO_APIC(); |
1166 | 0 |
|
1167 | 0 | /* |
1168 | 0 | * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, |
1169 | 0 | * but the value of reg_02 is read as the previous read register |
1170 | 0 | * value, so ignore it if reg_02 == reg_01. |
1171 | 0 | */ |
1172 | 0 | if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { |
1173 | 0 | printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); |
1174 | 0 | printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); |
1175 | 0 | if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2) |
1176 | 0 | UNEXPECTED_IO_APIC(); |
1177 | 0 | } |
1178 | 0 |
|
1179 | 0 | /* |
1180 | 0 | * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 |
1181 | 0 | * or reg_03, but the value of reg_0[23] is read as the previous read |
1182 | 0 | * register value, so ignore it if reg_03 == reg_0[12]. |
1183 | 0 | */ |
1184 | 0 | if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && |
1185 | 0 | reg_03.raw != reg_01.raw) { |
1186 | 0 | printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); |
1187 | 0 | printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); |
1188 | 0 | if (reg_03.bits.__reserved_1) |
1189 | 0 | UNEXPECTED_IO_APIC(); |
1190 | 0 | } |
1191 | 0 |
|
1192 | 0 | printk(KERN_DEBUG ".... IRQ redirection table:\n"); |
1193 | 0 |
|
1194 | 0 | printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol" |
1195 | 0 | " Stat Dest Deli Vect: \n"); |
1196 | 0 |
|
1197 | 0 | for (i = 0; i <= reg_01.bits.entries; i++) { |
1198 | 0 | struct IO_APIC_route_entry entry; |
1199 | 0 |
|
1200 | 0 | entry = ioapic_read_entry(apic, i, 0); |
1201 | 0 |
|
1202 | 0 | printk(KERN_DEBUG " %02x %03X %02X ", |
1203 | 0 | i, |
1204 | 0 | entry.dest.logical.logical_dest, |
1205 | 0 | entry.dest.physical.physical_dest |
1206 | 0 | ); |
1207 | 0 |
|
1208 | 0 | printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", |
1209 | 0 | entry.mask, |
1210 | 0 | entry.trigger, |
1211 | 0 | entry.irr, |
1212 | 0 | entry.polarity, |
1213 | 0 | entry.delivery_status, |
1214 | 0 | entry.dest_mode, |
1215 | 0 | entry.delivery_mode, |
1216 | 0 | entry.vector |
1217 | 0 | ); |
1218 | 0 | } |
1219 | 0 | } |
1220 | 0 | printk(KERN_INFO "Using vector-based indexing\n"); |
1221 | 0 | printk(KERN_DEBUG "IRQ to pin mappings:\n"); |
1222 | 0 | for (i = 0; i < nr_irqs_gsi; i++) { |
1223 | 0 | struct irq_pin_list *entry = irq_2_pin + i; |
1224 | 0 |
|
1225 | 0 | if ( !boot && !(i & 0x1f) ) |
1226 | 0 | process_pending_softirqs(); |
1227 | 0 |
|
1228 | 0 | if (entry->pin < 0) |
1229 | 0 | continue; |
1230 | 0 | printk(KERN_DEBUG "IRQ%d ", irq_to_desc(i)->arch.vector); |
1231 | 0 | for (;;) { |
1232 | 0 | printk("-> %d:%d", entry->apic, entry->pin); |
1233 | 0 | if (!entry->next) |
1234 | 0 | break; |
1235 | 0 | entry = irq_2_pin + entry->next; |
1236 | 0 | } |
1237 | 0 | printk("\n"); |
1238 | 0 | } |
1239 | 0 |
|
1240 | 0 | printk(KERN_INFO ".................................... done.\n"); |
1241 | 0 |
|
1242 | 0 | return; |
1243 | 0 | } |
1244 | | |
1245 | | static void __init print_IO_APIC(void) |
1246 | 1 | { |
1247 | 1 | if (apic_verbosity != APIC_QUIET) |
1248 | 0 | __print_IO_APIC(1); |
1249 | 1 | } |
1250 | | |
1251 | | static void _print_IO_APIC_keyhandler(unsigned char key) |
1252 | 0 | { |
1253 | 0 | __print_IO_APIC(0); |
1254 | 0 | } |
1255 | | |
1256 | | static void __init enable_IO_APIC(void) |
1257 | 1 | { |
1258 | 1 | int i8259_apic, i8259_pin; |
1259 | 1 | int i, apic; |
1260 | 1 | |
1261 | 1 | /* Initialise dynamic irq_2_pin free list. */ |
1262 | 1 | irq_2_pin = xzalloc_array(struct irq_pin_list, PIN_MAP_SIZE); |
1263 | 1 | |
1264 | 97 | for (i = 0; i < PIN_MAP_SIZE; i++) |
1265 | 96 | irq_2_pin[i].pin = -1; |
1266 | 49 | for (i = irq_2_pin_free_entry = nr_irqs_gsi; i < PIN_MAP_SIZE; i++) |
1267 | 48 | irq_2_pin[i].next = i + 1; |
1268 | 1 | |
1269 | 1 | if (directed_eoi_enabled) { |
1270 | 3 | for (apic = 0; apic < nr_ioapics; apic++) { |
1271 | 2 | if (!nr_ioapic_entries[apic]) |
1272 | 0 | continue; |
1273 | 2 | vector_map[apic] = xzalloc(vmask_t); |
1274 | 2 | BUG_ON(!vector_map[apic]); |
1275 | 2 | } |
1276 | 0 | } else { |
1277 | 0 | vector_map[0] = xzalloc(vmask_t); |
1278 | 0 | BUG_ON(!vector_map[0]); |
1279 | 0 | for (apic = 1; apic < nr_ioapics; apic++) |
1280 | 0 | vector_map[apic] = vector_map[0]; |
1281 | 0 | } |
1282 | 1 | |
1283 | 3 | for(apic = 0; apic < nr_ioapics; apic++) { |
1284 | 2 | int pin; |
1285 | 2 | /* See if any of the pins is in ExtINT mode */ |
1286 | 50 | for (pin = 0; pin < nr_ioapic_entries[apic]; pin++) { |
1287 | 48 | struct IO_APIC_route_entry entry = ioapic_read_entry(apic, pin, 0); |
1288 | 48 | |
1289 | 48 | /* If the interrupt line is enabled and in ExtInt mode |
1290 | 48 | * I have found the pin where the i8259 is connected. |
1291 | 48 | */ |
1292 | 48 | if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { |
1293 | 0 | ioapic_i8259.apic = apic; |
1294 | 0 | ioapic_i8259.pin = pin; |
1295 | 0 | goto found_i8259; |
1296 | 0 | } |
1297 | 48 | } |
1298 | 2 | } |
1299 | 1 | found_i8259: |
1300 | 1 | /* Look to see what if the MP table has reported the ExtINT */ |
1301 | 1 | /* If we could not find the appropriate pin by looking at the ioapic |
1302 | 1 | * the i8259 probably is not connected the ioapic but give the |
1303 | 1 | * mptable a chance anyway. |
1304 | 1 | */ |
1305 | 1 | i8259_pin = find_isa_irq_pin(0, mp_ExtINT); |
1306 | 1 | i8259_apic = find_isa_irq_apic(0, mp_ExtINT); |
1307 | 1 | /* Trust the MP table if nothing is setup in the hardware */ |
1308 | 1 | if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { |
1309 | 0 | printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); |
1310 | 0 | ioapic_i8259.pin = i8259_pin; |
1311 | 0 | ioapic_i8259.apic = i8259_apic; |
1312 | 0 | } |
1313 | 1 | /* Complain if the MP table and the hardware disagree */ |
1314 | 1 | if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && |
1315 | 0 | (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) |
1316 | 0 | { |
1317 | 0 | printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); |
1318 | 0 | } |
1319 | 1 | |
1320 | 1 | /* |
1321 | 1 | * Do not trust the IO-APIC being empty at bootup |
1322 | 1 | */ |
1323 | 1 | clear_IO_APIC(); |
1324 | 1 | } |
1325 | | |
1326 | | /* |
1327 | | * Not an __init, needed by the reboot code |
1328 | | */ |
1329 | | void disable_IO_APIC(void) |
1330 | 0 | { |
1331 | 0 | /* |
1332 | 0 | * Clear the IO-APIC before rebooting: |
1333 | 0 | */ |
1334 | 0 | clear_IO_APIC(); |
1335 | 0 |
|
1336 | 0 | /* |
1337 | 0 | * If the i8259 is routed through an IOAPIC |
1338 | 0 | * Put that IOAPIC in virtual wire mode |
1339 | 0 | * so legacy interrupts can be delivered. |
1340 | 0 | */ |
1341 | 0 | if (ioapic_i8259.pin != -1) { |
1342 | 0 | struct IO_APIC_route_entry entry; |
1343 | 0 |
|
1344 | 0 | memset(&entry, 0, sizeof(entry)); |
1345 | 0 | entry.mask = 0; /* Enabled */ |
1346 | 0 | entry.trigger = 0; /* Edge */ |
1347 | 0 | entry.irr = 0; |
1348 | 0 | entry.polarity = 0; /* High */ |
1349 | 0 | entry.delivery_status = 0; |
1350 | 0 | entry.dest_mode = 0; /* Physical */ |
1351 | 0 | entry.delivery_mode = dest_ExtINT; /* ExtInt */ |
1352 | 0 | entry.vector = 0; |
1353 | 0 | SET_DEST(entry, physical, get_apic_id()); |
1354 | 0 |
|
1355 | 0 | /* |
1356 | 0 | * Add it to the IO-APIC irq-routing table: |
1357 | 0 | */ |
1358 | 0 | ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, 0, entry); |
1359 | 0 | } |
1360 | 0 | disconnect_bsp_APIC(ioapic_i8259.pin != -1); |
1361 | 0 | } |
1362 | | |
1363 | | /* |
1364 | | * function to set the IO-APIC physical IDs based on the |
1365 | | * values stored in the MPC table. |
1366 | | * |
1367 | | * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 |
1368 | | */ |
1369 | | |
1370 | | static void __init setup_ioapic_ids_from_mpc(void) |
1371 | 0 | { |
1372 | 0 | union IO_APIC_reg_00 reg_00; |
1373 | 0 | static physid_mask_t __initdata phys_id_present_map; |
1374 | 0 | int apic; |
1375 | 0 | int i; |
1376 | 0 | unsigned char old_id; |
1377 | 0 | unsigned long flags; |
1378 | 0 |
|
1379 | 0 | /* |
1380 | 0 | * Don't check I/O APIC IDs for xAPIC systems. They have |
1381 | 0 | * no meaning without the serial APIC bus. |
1382 | 0 | */ |
1383 | 0 | if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) |
1384 | 0 | || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) |
1385 | 0 | return; |
1386 | 0 |
|
1387 | 0 | /* |
1388 | 0 | * This is broken; anything with a real cpu count has to |
1389 | 0 | * circumvent this idiocy regardless. |
1390 | 0 | */ |
1391 | 0 | ioapic_phys_id_map(&phys_id_present_map); |
1392 | 0 |
|
1393 | 0 | /* |
1394 | 0 | * Set the IOAPIC ID to the value stored in the MPC table. |
1395 | 0 | */ |
1396 | 0 | for (apic = 0; apic < nr_ioapics; apic++) { |
1397 | 0 | if (!nr_ioapic_entries[apic]) |
1398 | 0 | continue; |
1399 | 0 |
|
1400 | 0 | /* Read the register 0 value */ |
1401 | 0 | spin_lock_irqsave(&ioapic_lock, flags); |
1402 | 0 | reg_00.raw = io_apic_read(apic, 0); |
1403 | 0 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1404 | 0 | |
1405 | 0 | old_id = mp_ioapics[apic].mpc_apicid; |
1406 | 0 |
|
1407 | 0 | if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) { |
1408 | 0 | printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", |
1409 | 0 | apic, mp_ioapics[apic].mpc_apicid); |
1410 | 0 | printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", |
1411 | 0 | reg_00.bits.ID); |
1412 | 0 | mp_ioapics[apic].mpc_apicid = reg_00.bits.ID; |
1413 | 0 | } |
1414 | 0 |
|
1415 | 0 | /* |
1416 | 0 | * Sanity check, is the ID really free? Every APIC in a |
1417 | 0 | * system must have a unique ID or we get lots of nice |
1418 | 0 | * 'stuck on smp_invalidate_needed IPI wait' messages. |
1419 | 0 | */ |
1420 | 0 | if (check_apicid_used(&phys_id_present_map, |
1421 | 0 | mp_ioapics[apic].mpc_apicid)) { |
1422 | 0 | printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", |
1423 | 0 | apic, mp_ioapics[apic].mpc_apicid); |
1424 | 0 | for (i = 0; i < get_physical_broadcast(); i++) |
1425 | 0 | if (!physid_isset(i, phys_id_present_map)) |
1426 | 0 | break; |
1427 | 0 | if (i >= get_physical_broadcast()) |
1428 | 0 | panic("Max APIC ID exceeded"); |
1429 | 0 | printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", |
1430 | 0 | i); |
1431 | 0 | mp_ioapics[apic].mpc_apicid = i; |
1432 | 0 | } else { |
1433 | 0 | apic_printk(APIC_VERBOSE, "Setting %d in the " |
1434 | 0 | "phys_id_present_map\n", |
1435 | 0 | mp_ioapics[apic].mpc_apicid); |
1436 | 0 | } |
1437 | 0 | set_apicid(mp_ioapics[apic].mpc_apicid, &phys_id_present_map); |
1438 | 0 |
|
1439 | 0 | /* |
1440 | 0 | * We need to adjust the IRQ routing table |
1441 | 0 | * if the ID changed. |
1442 | 0 | */ |
1443 | 0 | if (old_id != mp_ioapics[apic].mpc_apicid) |
1444 | 0 | for (i = 0; i < mp_irq_entries; i++) |
1445 | 0 | if (mp_irqs[i].mpc_dstapic == old_id) |
1446 | 0 | mp_irqs[i].mpc_dstapic |
1447 | 0 | = mp_ioapics[apic].mpc_apicid; |
1448 | 0 |
|
1449 | 0 | /* |
1450 | 0 | * Read the right value from the MPC table and |
1451 | 0 | * write it into the ID register. |
1452 | 0 | */ |
1453 | 0 | apic_printk(APIC_VERBOSE, KERN_INFO |
1454 | 0 | "...changing IO-APIC physical APIC ID to %d ...", |
1455 | 0 | mp_ioapics[apic].mpc_apicid); |
1456 | 0 |
|
1457 | 0 | reg_00.bits.ID = mp_ioapics[apic].mpc_apicid; |
1458 | 0 | spin_lock_irqsave(&ioapic_lock, flags); |
1459 | 0 | io_apic_write(apic, 0, reg_00.raw); |
1460 | 0 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1461 | 0 |
|
1462 | 0 | /* |
1463 | 0 | * Sanity check |
1464 | 0 | */ |
1465 | 0 | spin_lock_irqsave(&ioapic_lock, flags); |
1466 | 0 | reg_00.raw = io_apic_read(apic, 0); |
1467 | 0 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1468 | 0 | if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid) |
1469 | 0 | printk("could not set ID!\n"); |
1470 | 0 | else |
1471 | 0 | apic_printk(APIC_VERBOSE, " ok.\n"); |
1472 | 0 | } |
1473 | 0 | } |
1474 | | |
1475 | | /* |
1476 | | * There is a nasty bug in some older SMP boards, their mptable lies |
1477 | | * about the timer IRQ. We do the following to work around the situation: |
1478 | | * |
1479 | | * - timer IRQ defaults to IO-APIC IRQ |
1480 | | * - if this function detects that timer IRQs are defunct, then we fall |
1481 | | * back to ISA timer IRQs |
1482 | | */ |
1483 | | static int __init timer_irq_works(void) |
1484 | 1 | { |
1485 | 1 | unsigned long t1, flags; |
1486 | 1 | |
1487 | 1 | t1 = ACCESS_ONCE(pit0_ticks); |
1488 | 1 | |
1489 | 1 | local_save_flags(flags); |
1490 | 1 | local_irq_enable(); |
1491 | 1 | /* Let ten ticks pass... */ |
1492 | 1 | mdelay((10 * 1000) / HZ); |
1493 | 1 | local_irq_restore(flags); |
1494 | 1 | |
1495 | 1 | /* |
1496 | 1 | * Expect a few ticks at least, to be sure some possible |
1497 | 1 | * glue logic does not lock up after one or two first |
1498 | 1 | * ticks in a non-ExtINT mode. Also the local APIC |
1499 | 1 | * might have cached one ExtINT interrupt. Finally, at |
1500 | 1 | * least one tick may be lost due to delays. |
1501 | 1 | */ |
1502 | 1 | if ( (ACCESS_ONCE(pit0_ticks) - t1) > 4 ) |
1503 | 1 | return 1; |
1504 | 1 | |
1505 | 0 | return 0; |
1506 | 1 | } |
1507 | | |
1508 | | /* |
1509 | | * In the SMP+IOAPIC case it might happen that there are an unspecified |
1510 | | * number of pending IRQ events unhandled. These cases are very rare, |
1511 | | * so we 'resend' these IRQs via IPIs, to the same CPU. It's much |
1512 | | * better to do it this way as thus we do not have to be aware of |
1513 | | * 'pending' interrupts in the IRQ path, except at this point. |
1514 | | */ |
1515 | | /* |
1516 | | * Edge triggered needs to resend any interrupt |
1517 | | * that was delayed but this is now handled in the device |
1518 | | * independent code. |
1519 | | */ |
1520 | | |
1521 | | /* |
1522 | | * Starting up a edge-triggered IO-APIC interrupt is |
1523 | | * nasty - we need to make sure that we get the edge. |
1524 | | * If it is already asserted for some reason, we need |
1525 | | * return 1 to indicate that is was pending. |
1526 | | * |
1527 | | * This is not complete - we should be able to fake |
1528 | | * an edge even if it isn't on the 8259A... |
1529 | | */ |
1530 | | static unsigned int startup_edge_ioapic_irq(struct irq_desc *desc) |
1531 | 3 | { |
1532 | 3 | int was_pending = 0; |
1533 | 3 | unsigned long flags; |
1534 | 3 | |
1535 | 3 | spin_lock_irqsave(&ioapic_lock, flags); |
1536 | 3 | if (platform_legacy_irq(desc->irq)) { |
1537 | 3 | disable_8259A_irq(desc); |
1538 | 3 | if (i8259A_irq_pending(desc->irq)) |
1539 | 0 | was_pending = 1; |
1540 | 3 | } |
1541 | 3 | __unmask_IO_APIC_irq(desc->irq); |
1542 | 3 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1543 | 3 | |
1544 | 3 | return was_pending; |
1545 | 3 | } |
1546 | | |
1547 | | /* |
1548 | | * Once we have recorded IRQ_PENDING already, we can mask the |
1549 | | * interrupt for real. This prevents IRQ storms from unhandled |
1550 | | * devices. |
1551 | | */ |
1552 | | static void ack_edge_ioapic_irq(struct irq_desc *desc) |
1553 | 9.06k | { |
1554 | 9.06k | irq_complete_move(desc); |
1555 | 9.06k | move_native_irq(desc); |
1556 | 9.06k | |
1557 | 9.06k | if ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) |
1558 | 9.06k | == (IRQ_PENDING | IRQ_DISABLED)) |
1559 | 1 | mask_IO_APIC_irq(desc); |
1560 | 9.06k | ack_APIC_irq(); |
1561 | 9.06k | } |
1562 | | |
1563 | | /* |
1564 | | * Level triggered interrupts can just be masked, |
1565 | | * and shutting down and starting up the interrupt |
1566 | | * is the same as enabling and disabling them -- except |
1567 | | * with a startup need to return a "was pending" value. |
1568 | | * |
1569 | | * Level triggered interrupts are special because we |
1570 | | * do not touch any IO-APIC register while handling |
1571 | | * them. We ack the APIC in the end-IRQ handler, not |
1572 | | * in the start-IRQ-handler. Protection against reentrance |
1573 | | * from the same interrupt is still provided, both by the |
1574 | | * generic IRQ layer and by the fact that an unacked local |
1575 | | * APIC does not accept IRQs. |
1576 | | */ |
1577 | | static unsigned int startup_level_ioapic_irq(struct irq_desc *desc) |
1578 | 4 | { |
1579 | 4 | unmask_IO_APIC_irq(desc); |
1580 | 4 | |
1581 | 4 | return 0; /* don't check for pending */ |
1582 | 4 | } |
1583 | | |
1584 | | static int __init setup_ioapic_ack(const char *s) |
1585 | 0 | { |
1586 | 0 | if ( !strcmp(s, "old") ) |
1587 | 0 | { |
1588 | 0 | ioapic_ack_new = false; |
1589 | 0 | ioapic_ack_forced = true; |
1590 | 0 | } |
1591 | 0 | else if ( !strcmp(s, "new") ) |
1592 | 0 | { |
1593 | 0 | ioapic_ack_new = true; |
1594 | 0 | ioapic_ack_forced = true; |
1595 | 0 | } |
1596 | 0 | else |
1597 | 0 | return -EINVAL; |
1598 | 0 |
|
1599 | 0 | return 0; |
1600 | 0 | } |
1601 | | custom_param("ioapic_ack", setup_ioapic_ack); |
1602 | | |
1603 | | static bool io_apic_level_ack_pending(unsigned int irq) |
1604 | 0 | { |
1605 | 0 | struct irq_pin_list *entry; |
1606 | 0 | unsigned long flags; |
1607 | 0 |
|
1608 | 0 | spin_lock_irqsave(&ioapic_lock, flags); |
1609 | 0 | entry = &irq_2_pin[irq]; |
1610 | 0 | for (;;) { |
1611 | 0 | unsigned int reg; |
1612 | 0 | int pin; |
1613 | 0 |
|
1614 | 0 | if (!entry) |
1615 | 0 | break; |
1616 | 0 |
|
1617 | 0 | pin = entry->pin; |
1618 | 0 | if (pin == -1) |
1619 | 0 | continue; |
1620 | 0 | reg = io_apic_read(entry->apic, 0x10 + pin*2); |
1621 | 0 | /* Is the remote IRR bit set? */ |
1622 | 0 | if (reg & IO_APIC_REDIR_REMOTE_IRR) { |
1623 | 0 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1624 | 0 | return 1; |
1625 | 0 | } |
1626 | 0 | if (!entry->next) |
1627 | 0 | break; |
1628 | 0 | entry = irq_2_pin + entry->next; |
1629 | 0 | } |
1630 | 0 | spin_unlock_irqrestore(&ioapic_lock, flags); |
1631 | 0 |
|
1632 | 0 | return 0; |
1633 | 0 | } |
1634 | | |
1635 | | static void mask_and_ack_level_ioapic_irq(struct irq_desc *desc) |
1636 | 299 | { |
1637 | 299 | unsigned long v; |
1638 | 299 | int i; |
1639 | 299 | |
1640 | 299 | irq_complete_move(desc); |
1641 | 299 | |
1642 | 299 | if ( !directed_eoi_enabled ) |
1643 | 0 | mask_IO_APIC_irq(desc); |
1644 | 299 | |
1645 | 299 | /* |
1646 | 299 | * It appears there is an erratum which affects at least version 0x11 |
1647 | 299 | * of I/O APIC (that's the 82093AA and cores integrated into various |
1648 | 299 | * chipsets). Under certain conditions a level-triggered interrupt is |
1649 | 299 | * erroneously delivered as edge-triggered one but the respective IRR |
1650 | 299 | * bit gets set nevertheless. As a result the I/O unit expects an EOI |
1651 | 299 | * message but it will never arrive and further interrupts are blocked |
1652 | 299 | * from the source. The exact reason is so far unknown, but the |
1653 | 299 | * phenomenon was observed when two consecutive interrupt requests |
1654 | 299 | * from a given source get delivered to the same CPU and the source is |
1655 | 299 | * temporarily disabled in between. |
1656 | 299 | * |
1657 | 299 | * A workaround is to simulate an EOI message manually. We achieve it |
1658 | 299 | * by setting the trigger mode to edge and then to level when the edge |
1659 | 299 | * trigger mode gets detected in the TMR of a local APIC for a |
1660 | 299 | * level-triggered interrupt. We mask the source for the time of the |
1661 | 299 | * operation to prevent an edge-triggered interrupt escaping meanwhile. |
1662 | 299 | * The idea is from Manfred Spraul. --macro |
1663 | 299 | */ |
1664 | 299 | i = desc->arch.vector; |
1665 | 299 | |
1666 | 299 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); |
1667 | 299 | |
1668 | 299 | ack_APIC_irq(); |
1669 | 299 | |
1670 | 299 | if ( directed_eoi_enabled ) |
1671 | 299 | return; |
1672 | 299 | |
1673 | 0 | if ((desc->status & IRQ_MOVE_PENDING) && |
1674 | 0 | !io_apic_level_ack_pending(desc->irq)) |
1675 | 0 | move_masked_irq(desc); |
1676 | 0 |
|
1677 | 0 | if ( !(v & (1 << (i & 0x1f))) ) { |
1678 | 0 | spin_lock(&ioapic_lock); |
1679 | 0 | __edge_IO_APIC_irq(desc->irq); |
1680 | 0 | __level_IO_APIC_irq(desc->irq); |
1681 | 0 | spin_unlock(&ioapic_lock); |
1682 | 0 | } |
1683 | 0 | } |
1684 | | |
1685 | | static void end_level_ioapic_irq_old(struct irq_desc *desc, u8 vector) |
1686 | 299 | { |
1687 | 299 | if ( directed_eoi_enabled ) |
1688 | 299 | { |
1689 | 299 | if ( !(desc->status & (IRQ_DISABLED|IRQ_MOVE_PENDING)) ) |
1690 | 299 | { |
1691 | 299 | eoi_IO_APIC_irq(desc); |
1692 | 299 | return; |
1693 | 299 | } |
1694 | 299 | |
1695 | 0 | mask_IO_APIC_irq(desc); |
1696 | 0 | eoi_IO_APIC_irq(desc); |
1697 | 0 | if ( (desc->status & IRQ_MOVE_PENDING) && |
1698 | 0 | !io_apic_level_ack_pending(desc->irq) ) |
1699 | 0 | move_masked_irq(desc); |
1700 | 0 | } |
1701 | 299 | |
1702 | 0 | if ( !(desc->status & IRQ_DISABLED) ) |
1703 | 0 | unmask_IO_APIC_irq(desc); |
1704 | 0 | } |
1705 | | |
1706 | | static void end_level_ioapic_irq_new(struct irq_desc *desc, u8 vector) |
1707 | 0 | { |
1708 | 0 | /* |
1709 | 0 | * It appears there is an erratum which affects at least version 0x11 |
1710 | 0 | * of I/O APIC (that's the 82093AA and cores integrated into various |
1711 | 0 | * chipsets). Under certain conditions a level-triggered interrupt is |
1712 | 0 | * erroneously delivered as edge-triggered one but the respective IRR |
1713 | 0 | * bit gets set nevertheless. As a result the I/O unit expects an EOI |
1714 | 0 | * message but it will never arrive and further interrupts are blocked |
1715 | 0 | * from the source. The exact reason is so far unknown, but the |
1716 | 0 | * phenomenon was observed when two consecutive interrupt requests |
1717 | 0 | * from a given source get delivered to the same CPU and the source is |
1718 | 0 | * temporarily disabled in between. |
1719 | 0 | * |
1720 | 0 | * A workaround is to simulate an EOI message manually. We achieve it |
1721 | 0 | * by setting the trigger mode to edge and then to level when the edge |
1722 | 0 | * trigger mode gets detected in the TMR of a local APIC for a |
1723 | 0 | * level-triggered interrupt. We mask the source for the time of the |
1724 | 0 | * operation to prevent an edge-triggered interrupt escaping meanwhile. |
1725 | 0 | * The idea is from Manfred Spraul. --macro |
1726 | 0 | */ |
1727 | 0 | unsigned int v, i = desc->arch.vector; |
1728 | 0 |
|
1729 | 0 | /* Manually EOI the old vector if we are moving to the new */ |
1730 | 0 | if ( vector && i != vector ) |
1731 | 0 | eoi_IO_APIC_irq(desc); |
1732 | 0 |
|
1733 | 0 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); |
1734 | 0 |
|
1735 | 0 | ack_APIC_irq(); |
1736 | 0 |
|
1737 | 0 | if ( (desc->status & IRQ_MOVE_PENDING) && |
1738 | 0 | !io_apic_level_ack_pending(desc->irq) ) |
1739 | 0 | move_native_irq(desc); |
1740 | 0 |
|
1741 | 0 | if (!(v & (1 << (i & 0x1f)))) { |
1742 | 0 | spin_lock(&ioapic_lock); |
1743 | 0 | __mask_IO_APIC_irq(desc->irq); |
1744 | 0 | __edge_IO_APIC_irq(desc->irq); |
1745 | 0 | __level_IO_APIC_irq(desc->irq); |
1746 | 0 | if ( !(desc->status & IRQ_DISABLED) ) |
1747 | 0 | __unmask_IO_APIC_irq(desc->irq); |
1748 | 0 | spin_unlock(&ioapic_lock); |
1749 | 0 | } |
1750 | 0 | } |
1751 | | |
1752 | | /* |
1753 | | * Level and edge triggered IO-APIC interrupts need different handling, |
1754 | | * so we use two separate IRQ descriptors. Edge triggered IRQs can be |
1755 | | * handled with the level-triggered descriptor, but that one has slightly |
1756 | | * more overhead. Level-triggered interrupts cannot be handled with the |
1757 | | * edge-triggered handler, without risking IRQ storms and other ugly |
1758 | | * races. |
1759 | | */ |
1760 | | static hw_irq_controller ioapic_edge_type = { |
1761 | | .typename = "IO-APIC-edge", |
1762 | | .startup = startup_edge_ioapic_irq, |
1763 | | .shutdown = irq_shutdown_none, |
1764 | | .enable = unmask_IO_APIC_irq, |
1765 | | .disable = irq_disable_none, |
1766 | | .ack = ack_edge_ioapic_irq, |
1767 | | .set_affinity = set_ioapic_affinity_irq, |
1768 | | }; |
1769 | | |
1770 | | static struct hw_interrupt_type __read_mostly ioapic_level_type = { |
1771 | | .typename = "IO-APIC-level", |
1772 | | .startup = startup_level_ioapic_irq, |
1773 | | .shutdown = mask_IO_APIC_irq, |
1774 | | .enable = unmask_IO_APIC_irq, |
1775 | | .disable = mask_IO_APIC_irq, |
1776 | | .ack = mask_and_ack_level_ioapic_irq, |
1777 | | .end = end_level_ioapic_irq_old, |
1778 | | .set_affinity = set_ioapic_affinity_irq, |
1779 | | }; |
1780 | | |
1781 | | static inline void init_IO_APIC_traps(void) |
1782 | 1 | { |
1783 | 1 | int irq; |
1784 | 1 | /* Xen: This is way simpler than the Linux implementation. */ |
1785 | 17 | for (irq = 0; platform_legacy_irq(irq); irq++) |
1786 | 16 | if (IO_APIC_IRQ(irq) && !irq_to_vector(irq)) |
1787 | 0 | make_8259A_irq(irq); |
1788 | 1 | } |
1789 | | |
1790 | | static void enable_lapic_irq(struct irq_desc *desc) |
1791 | 0 | { |
1792 | 0 | unsigned long v; |
1793 | 0 |
|
1794 | 0 | v = apic_read(APIC_LVT0); |
1795 | 0 | apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); |
1796 | 0 | } |
1797 | | |
1798 | | static void disable_lapic_irq(struct irq_desc *desc) |
1799 | 0 | { |
1800 | 0 | unsigned long v; |
1801 | 0 |
|
1802 | 0 | v = apic_read(APIC_LVT0); |
1803 | 0 | apic_write(APIC_LVT0, v | APIC_LVT_MASKED); |
1804 | 0 | } |
1805 | | |
1806 | | static void ack_lapic_irq(struct irq_desc *desc) |
1807 | 0 | { |
1808 | 0 | ack_APIC_irq(); |
1809 | 0 | } |
1810 | | |
1811 | | static hw_irq_controller lapic_irq_type = { |
1812 | | .typename = "local-APIC-edge", |
1813 | | .startup = NULL, /* startup_irq() not used for IRQ0 */ |
1814 | | .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */ |
1815 | | .enable = enable_lapic_irq, |
1816 | | .disable = disable_lapic_irq, |
1817 | | .ack = ack_lapic_irq, |
1818 | | }; |
1819 | | |
1820 | | /* |
1821 | | * This looks a bit hackish but it's about the only one way of sending |
1822 | | * a few INTA cycles to 8259As and any associated glue logic. ICR does |
1823 | | * not support the ExtINT mode, unfortunately. We need to send these |
1824 | | * cycles as some i82489DX-based boards have glue logic that keeps the |
1825 | | * 8259A interrupt line asserted until INTA. --macro |
1826 | | */ |
1827 | | static void __init unlock_ExtINT_logic(void) |
1828 | 0 | { |
1829 | 0 | int apic, pin, i; |
1830 | 0 | struct IO_APIC_route_entry entry0, entry1; |
1831 | 0 | unsigned char save_control, save_freq_select; |
1832 | 0 |
|
1833 | 0 | pin = find_isa_irq_pin(8, mp_INT); |
1834 | 0 | apic = find_isa_irq_apic(8, mp_INT); |
1835 | 0 | if ( pin == -1 || apic == -1 ) |
1836 | 0 | return; |
1837 | 0 |
|
1838 | 0 | entry0 = ioapic_read_entry(apic, pin, 0); |
1839 | 0 | clear_IO_APIC_pin(apic, pin); |
1840 | 0 |
|
1841 | 0 | memset(&entry1, 0, sizeof(entry1)); |
1842 | 0 |
|
1843 | 0 | entry1.dest_mode = 0; /* physical delivery */ |
1844 | 0 | entry1.mask = 0; /* unmask IRQ now */ |
1845 | 0 | SET_DEST(entry1, physical, get_apic_id()); |
1846 | 0 | entry1.delivery_mode = dest_ExtINT; |
1847 | 0 | entry1.polarity = entry0.polarity; |
1848 | 0 | entry1.trigger = 0; |
1849 | 0 | entry1.vector = 0; |
1850 | 0 |
|
1851 | 0 | ioapic_write_entry(apic, pin, 0, entry1); |
1852 | 0 |
|
1853 | 0 | save_control = CMOS_READ(RTC_CONTROL); |
1854 | 0 | save_freq_select = CMOS_READ(RTC_FREQ_SELECT); |
1855 | 0 | CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, |
1856 | 0 | RTC_FREQ_SELECT); |
1857 | 0 | CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); |
1858 | 0 |
|
1859 | 0 | i = 100; |
1860 | 0 | while (i-- > 0) { |
1861 | 0 | mdelay(10); |
1862 | 0 | if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) |
1863 | 0 | i -= 10; |
1864 | 0 | } |
1865 | 0 |
|
1866 | 0 | CMOS_WRITE(save_control, RTC_CONTROL); |
1867 | 0 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); |
1868 | 0 | clear_IO_APIC_pin(apic, pin); |
1869 | 0 |
|
1870 | 0 | ioapic_write_entry(apic, pin, 0, entry0); |
1871 | 0 | } |
1872 | | |
1873 | | /* |
1874 | | * This code may look a bit paranoid, but it's supposed to cooperate with |
1875 | | * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ |
1876 | | * is so screwy. Thanks to Brian Perkins for testing/hacking this beast |
1877 | | * fanatically on his truly buggy board. |
1878 | | */ |
1879 | | static void __init check_timer(void) |
1880 | 1 | { |
1881 | 1 | int apic1, pin1, apic2, pin2; |
1882 | 1 | int vector, ret; |
1883 | 1 | unsigned long flags; |
1884 | 1 | cpumask_t mask_all; |
1885 | 1 | |
1886 | 1 | local_irq_save(flags); |
1887 | 1 | |
1888 | 1 | /* |
1889 | 1 | * get/set the timer IRQ vector: |
1890 | 1 | */ |
1891 | 1 | disable_8259A_irq(irq_to_desc(0)); |
1892 | 1 | vector = IRQ0_VECTOR; |
1893 | 1 | clear_irq_vector(0); |
1894 | 1 | |
1895 | 1 | cpumask_setall(&mask_all); |
1896 | 1 | if ((ret = bind_irq_vector(0, vector, &mask_all))) |
1897 | 0 | printk(KERN_ERR"..IRQ0 is not set correctly with ioapic!!!, err:%d\n", ret); |
1898 | 1 | |
1899 | 1 | irq_desc[0].status &= ~IRQ_DISABLED; |
1900 | 1 | |
1901 | 1 | /* |
1902 | 1 | * Subtle, code in do_timer_interrupt() expects an AEOI |
1903 | 1 | * mode for the 8259A whenever interrupts are routed |
1904 | 1 | * through I/O APICs. Also IRQ0 has to be enabled in |
1905 | 1 | * the 8259A which implies the virtual wire has to be |
1906 | 1 | * disabled in the local APIC. |
1907 | 1 | */ |
1908 | 1 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); |
1909 | 1 | init_8259A(1); |
1910 | 1 | /* XEN: Ripped out the legacy missed-tick logic, so below is not needed. */ |
1911 | 1 | /*timer_ack = 1;*/ |
1912 | 1 | /*enable_8259A_irq(irq_to_desc(0));*/ |
1913 | 1 | |
1914 | 1 | pin1 = find_isa_irq_pin(0, mp_INT); |
1915 | 1 | apic1 = find_isa_irq_apic(0, mp_INT); |
1916 | 1 | pin2 = ioapic_i8259.pin; |
1917 | 1 | apic2 = ioapic_i8259.apic; |
1918 | 1 | |
1919 | 1 | printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", |
1920 | 1 | vector, apic1, pin1, apic2, pin2); |
1921 | 1 | |
1922 | 1 | if (pin1 != -1) { |
1923 | 1 | /* |
1924 | 1 | * Ok, does IRQ0 through the IOAPIC work? |
1925 | 1 | */ |
1926 | 1 | unmask_IO_APIC_irq(irq_to_desc(0)); |
1927 | 1 | if (timer_irq_works()) { |
1928 | 1 | local_irq_restore(flags); |
1929 | 1 | return; |
1930 | 1 | } |
1931 | 0 | clear_IO_APIC_pin(apic1, pin1); |
1932 | 0 | printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to " |
1933 | 0 | "IO-APIC\n"); |
1934 | 0 | } |
1935 | 1 | |
1936 | 0 | printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... "); |
1937 | 0 | if (pin2 != -1) { |
1938 | 0 | printk("\n..... (found pin %d) ...", pin2); |
1939 | 0 | /* |
1940 | 0 | * legacy devices should be connected to IO APIC #0 |
1941 | 0 | */ |
1942 | 0 | setup_ExtINT_IRQ0_pin(apic2, pin2, vector); |
1943 | 0 | if (timer_irq_works()) { |
1944 | 0 | local_irq_restore(flags); |
1945 | 0 | printk("works.\n"); |
1946 | 0 | if (pin1 != -1) |
1947 | 0 | replace_pin_at_irq(0, apic1, pin1, apic2, pin2); |
1948 | 0 | else |
1949 | 0 | add_pin_to_irq(0, apic2, pin2); |
1950 | 0 | return; |
1951 | 0 | } |
1952 | 0 | /* |
1953 | 0 | * Cleanup, just in case ... |
1954 | 0 | */ |
1955 | 0 | clear_IO_APIC_pin(apic2, pin2); |
1956 | 0 | } |
1957 | 0 | printk(" failed.\n"); |
1958 | 0 |
|
1959 | 0 | if (nmi_watchdog == NMI_IO_APIC) { |
1960 | 0 | printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n"); |
1961 | 0 | nmi_watchdog = 0; |
1962 | 0 | } |
1963 | 0 |
|
1964 | 0 | printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ..."); |
1965 | 0 |
|
1966 | 0 | disable_8259A_irq(irq_to_desc(0)); |
1967 | 0 | irq_desc[0].handler = &lapic_irq_type; |
1968 | 0 | apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ |
1969 | 0 | enable_8259A_irq(irq_to_desc(0)); |
1970 | 0 |
|
1971 | 0 | if (timer_irq_works()) { |
1972 | 0 | local_irq_restore(flags); |
1973 | 0 | printk(" works.\n"); |
1974 | 0 | return; |
1975 | 0 | } |
1976 | 0 | apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector); |
1977 | 0 | printk(" failed.\n"); |
1978 | 0 |
|
1979 | 0 | printk(KERN_INFO "...trying to set up timer as ExtINT IRQ..."); |
1980 | 0 |
|
1981 | 0 | /*timer_ack = 0;*/ |
1982 | 0 | init_8259A(0); |
1983 | 0 | make_8259A_irq(0); |
1984 | 0 | apic_write(APIC_LVT0, APIC_DM_EXTINT); |
1985 | 0 |
|
1986 | 0 | unlock_ExtINT_logic(); |
1987 | 0 |
|
1988 | 0 | local_irq_restore(flags); |
1989 | 0 |
|
1990 | 0 | if (timer_irq_works()) { |
1991 | 0 | printk(" works.\n"); |
1992 | 0 | return; |
1993 | 0 | } |
1994 | 0 | printk(" failed :(.\n"); |
1995 | 0 | panic("IO-APIC + timer doesn't work! Boot with apic_verbosity=debug " |
1996 | 0 | "and send a report. Then try booting with the 'noapic' option"); |
1997 | 0 | } |
1998 | | |
1999 | | /* |
2000 | | * |
2001 | | * IRQ's that are handled by the PIC in the MPS IOAPIC case. |
2002 | | * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ. |
2003 | | * Linux doesn't really care, as it's not actually used |
2004 | | * for any interrupt handling anyway. |
2005 | | */ |
2006 | 0 | #define PIC_IRQS (1 << PIC_CASCADE_IR) |
2007 | | |
2008 | | static struct IO_APIC_route_entry *ioapic_pm_state; |
2009 | | |
2010 | | static void __init ioapic_pm_state_alloc(void) |
2011 | 1 | { |
2012 | 1 | int i, nr_entry = 0; |
2013 | 1 | |
2014 | 3 | for (i = 0; i < nr_ioapics; i++) |
2015 | 2 | nr_entry += nr_ioapic_entries[i]; |
2016 | 1 | |
2017 | 1 | ioapic_pm_state = _xmalloc(sizeof(struct IO_APIC_route_entry)*nr_entry, |
2018 | 1 | sizeof(struct IO_APIC_route_entry)); |
2019 | 1 | BUG_ON(ioapic_pm_state == NULL); |
2020 | 1 | } |
2021 | | |
2022 | | void __init setup_IO_APIC(void) |
2023 | 1 | { |
2024 | 1 | enable_IO_APIC(); |
2025 | 1 | |
2026 | 1 | if (acpi_ioapic) |
2027 | 1 | io_apic_irqs = ~0; /* all IRQs go through IOAPIC */ |
2028 | 1 | else |
2029 | 0 | io_apic_irqs = ~PIC_IRQS; |
2030 | 1 | |
2031 | 1 | printk("ENABLING IO-APIC IRQs\n"); |
2032 | 1 | printk(" -> Using %s ACK method\n", ioapic_ack_new ? "new" : "old"); |
2033 | 1 | |
2034 | 1 | if (ioapic_ack_new) { |
2035 | 0 | ioapic_level_type.ack = irq_complete_move; |
2036 | 0 | ioapic_level_type.end = end_level_ioapic_irq_new; |
2037 | 0 | } |
2038 | 1 | |
2039 | 1 | /* |
2040 | 1 | * Set up IO-APIC IRQ routing. |
2041 | 1 | */ |
2042 | 1 | if (!acpi_ioapic) |
2043 | 0 | setup_ioapic_ids_from_mpc(); |
2044 | 1 | sync_Arb_IDs(); |
2045 | 1 | setup_IO_APIC_irqs(); |
2046 | 1 | init_IO_APIC_traps(); |
2047 | 1 | check_timer(); |
2048 | 1 | print_IO_APIC(); |
2049 | 1 | ioapic_pm_state_alloc(); |
2050 | 1 | |
2051 | 1 | register_keyhandler('z', _print_IO_APIC_keyhandler, "dump IOAPIC info", 1); |
2052 | 1 | } |
2053 | | |
2054 | | void ioapic_suspend(void) |
2055 | 0 | { |
2056 | 0 | struct IO_APIC_route_entry *entry = ioapic_pm_state; |
2057 | 0 | unsigned long flags; |
2058 | 0 | int apic, i; |
2059 | 0 |
|
2060 | 0 | spin_lock_irqsave(&ioapic_lock, flags); |
2061 | 0 | for (apic = 0; apic < nr_ioapics; apic++) { |
2062 | 0 | for (i = 0; i < nr_ioapic_entries[apic]; i ++, entry ++ ) { |
2063 | 0 | *(((int *)entry) + 1) = __io_apic_read(apic, 0x11 + 2 * i); |
2064 | 0 | *(((int *)entry) + 0) = __io_apic_read(apic, 0x10 + 2 * i); |
2065 | 0 | } |
2066 | 0 | } |
2067 | 0 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2068 | 0 | } |
2069 | | |
2070 | | void ioapic_resume(void) |
2071 | 0 | { |
2072 | 0 | struct IO_APIC_route_entry *entry = ioapic_pm_state; |
2073 | 0 | unsigned long flags; |
2074 | 0 | union IO_APIC_reg_00 reg_00; |
2075 | 0 | int i, apic; |
2076 | 0 |
|
2077 | 0 | spin_lock_irqsave(&ioapic_lock, flags); |
2078 | 0 | for (apic = 0; apic < nr_ioapics; apic++){ |
2079 | 0 | if (!nr_ioapic_entries[apic]) |
2080 | 0 | continue; |
2081 | 0 | reg_00.raw = __io_apic_read(apic, 0); |
2082 | 0 | if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid) { |
2083 | 0 | reg_00.bits.ID = mp_ioapics[apic].mpc_apicid; |
2084 | 0 | __io_apic_write(apic, 0, reg_00.raw); |
2085 | 0 | } |
2086 | 0 | for (i = 0; i < nr_ioapic_entries[apic]; i++, entry++) { |
2087 | 0 | __io_apic_write(apic, 0x11+2*i, *(((int *)entry)+1)); |
2088 | 0 | __io_apic_write(apic, 0x10+2*i, *(((int *)entry)+0)); |
2089 | 0 | } |
2090 | 0 | } |
2091 | 0 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2092 | 0 | } |
2093 | | |
2094 | | /* -------------------------------------------------------------------------- |
2095 | | ACPI-based IOAPIC Configuration |
2096 | | -------------------------------------------------------------------------- */ |
2097 | | |
2098 | | |
2099 | | int __init io_apic_get_unique_id (int ioapic, int apic_id) |
2100 | 0 | { |
2101 | 0 | union IO_APIC_reg_00 reg_00; |
2102 | 0 | static physid_mask_t __initdata apic_id_map = PHYSID_MASK_NONE; |
2103 | 0 | unsigned long flags; |
2104 | 0 | int i = 0; |
2105 | 0 |
|
2106 | 0 | /* |
2107 | 0 | * The P4 platform supports up to 256 APIC IDs on two separate APIC |
2108 | 0 | * buses (one for LAPICs, one for IOAPICs), where predecessors only |
2109 | 0 | * supports up to 16 on one shared APIC bus. |
2110 | 0 | * |
2111 | 0 | * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full |
2112 | 0 | * advantage of new APIC bus architecture. |
2113 | 0 | */ |
2114 | 0 |
|
2115 | 0 | if (physids_empty(apic_id_map)) |
2116 | 0 | ioapic_phys_id_map(&apic_id_map); |
2117 | 0 |
|
2118 | 0 | spin_lock_irqsave(&ioapic_lock, flags); |
2119 | 0 | reg_00.raw = io_apic_read(ioapic, 0); |
2120 | 0 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2121 | 0 |
|
2122 | 0 | if (apic_id >= get_physical_broadcast()) { |
2123 | 0 | printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " |
2124 | 0 | "%d\n", ioapic, apic_id, reg_00.bits.ID); |
2125 | 0 | apic_id = reg_00.bits.ID; |
2126 | 0 | } |
2127 | 0 |
|
2128 | 0 | /* |
2129 | 0 | * Every APIC in a system must have a unique ID or we get lots of nice |
2130 | 0 | * 'stuck on smp_invalidate_needed IPI wait' messages. |
2131 | 0 | */ |
2132 | 0 | if (check_apicid_used(&apic_id_map, apic_id)) { |
2133 | 0 |
|
2134 | 0 | for (i = 0; i < get_physical_broadcast(); i++) { |
2135 | 0 | if (!check_apicid_used(&apic_id_map, i)) |
2136 | 0 | break; |
2137 | 0 | } |
2138 | 0 |
|
2139 | 0 | if (i == get_physical_broadcast()) |
2140 | 0 | panic("Max apic_id exceeded"); |
2141 | 0 |
|
2142 | 0 | printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " |
2143 | 0 | "trying %d\n", ioapic, apic_id, i); |
2144 | 0 |
|
2145 | 0 | apic_id = i; |
2146 | 0 | } |
2147 | 0 |
|
2148 | 0 | set_apicid(apic_id, &apic_id_map); |
2149 | 0 |
|
2150 | 0 | if (reg_00.bits.ID != apic_id) { |
2151 | 0 | reg_00.bits.ID = apic_id; |
2152 | 0 |
|
2153 | 0 | spin_lock_irqsave(&ioapic_lock, flags); |
2154 | 0 | io_apic_write(ioapic, 0, reg_00.raw); |
2155 | 0 | reg_00.raw = io_apic_read(ioapic, 0); |
2156 | 0 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2157 | 0 |
|
2158 | 0 | /* Sanity check */ |
2159 | 0 | if (reg_00.bits.ID != apic_id) { |
2160 | 0 | printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); |
2161 | 0 | return -1; |
2162 | 0 | } |
2163 | 0 | } |
2164 | 0 |
|
2165 | 0 | apic_printk(APIC_VERBOSE, KERN_INFO |
2166 | 0 | "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); |
2167 | 0 |
|
2168 | 0 | return apic_id; |
2169 | 0 | } |
2170 | | |
2171 | | |
2172 | | int __init io_apic_get_version (int ioapic) |
2173 | 2 | { |
2174 | 2 | union IO_APIC_reg_01 reg_01; |
2175 | 2 | unsigned long flags; |
2176 | 2 | |
2177 | 2 | spin_lock_irqsave(&ioapic_lock, flags); |
2178 | 2 | reg_01.raw = io_apic_read(ioapic, 1); |
2179 | 2 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2180 | 2 | |
2181 | 2 | return reg_01.bits.version; |
2182 | 2 | } |
2183 | | |
2184 | | |
2185 | | int __init io_apic_get_redir_entries (int ioapic) |
2186 | 2 | { |
2187 | 2 | union IO_APIC_reg_01 reg_01; |
2188 | 2 | unsigned long flags; |
2189 | 2 | |
2190 | 2 | spin_lock_irqsave(&ioapic_lock, flags); |
2191 | 2 | reg_01.raw = io_apic_read(ioapic, 1); |
2192 | 2 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2193 | 2 | |
2194 | 2 | return reg_01.bits.entries; |
2195 | 2 | } |
2196 | | |
2197 | | |
2198 | | int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low) |
2199 | 6 | { |
2200 | 6 | struct irq_desc *desc = irq_to_desc(irq); |
2201 | 6 | struct IO_APIC_route_entry entry; |
2202 | 6 | cpumask_t mask; |
2203 | 6 | unsigned long flags; |
2204 | 6 | int vector; |
2205 | 6 | |
2206 | 6 | if (!IO_APIC_IRQ(irq)) { |
2207 | 0 | printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ %d\n", |
2208 | 0 | ioapic, irq); |
2209 | 0 | return -EINVAL; |
2210 | 0 | } |
2211 | 6 | |
2212 | 6 | /* |
2213 | 6 | * Generate a PCI IRQ routing entry and program the IOAPIC accordingly. |
2214 | 6 | * Note that we mask (disable) IRQs now -- these get enabled when the |
2215 | 6 | * corresponding device driver registers for this IRQ. |
2216 | 6 | */ |
2217 | 6 | |
2218 | 6 | memset(&entry,0,sizeof(entry)); |
2219 | 6 | |
2220 | 6 | entry.delivery_mode = INT_DELIVERY_MODE; |
2221 | 6 | entry.dest_mode = INT_DEST_MODE; |
2222 | 6 | entry.trigger = edge_level; |
2223 | 6 | entry.polarity = active_high_low; |
2224 | 6 | entry.mask = 1; |
2225 | 6 | |
2226 | 6 | /* |
2227 | 6 | * IRQs < 16 are already in the irq_2_pin[] map |
2228 | 6 | */ |
2229 | 6 | if (!platform_legacy_irq(irq)) |
2230 | 3 | add_pin_to_irq(irq, ioapic, pin); |
2231 | 6 | |
2232 | 6 | vector = assign_irq_vector(irq, NULL); |
2233 | 6 | if (vector < 0) |
2234 | 0 | return vector; |
2235 | 6 | entry.vector = vector; |
2236 | 6 | |
2237 | 6 | cpumask_copy(&mask, TARGET_CPUS); |
2238 | 6 | /* Don't chance ending up with an empty mask. */ |
2239 | 6 | if (cpumask_intersects(&mask, desc->arch.cpu_mask)) |
2240 | 6 | cpumask_and(&mask, &mask, desc->arch.cpu_mask); |
2241 | 6 | SET_DEST(entry, logical, cpu_mask_to_apicid(&mask)); |
2242 | 6 | |
2243 | 6 | apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry " |
2244 | 6 | "(%d-%d -> %#x -> IRQ %d Mode:%i Active:%i)\n", ioapic, |
2245 | 6 | mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq, |
2246 | 6 | edge_level, active_high_low); |
2247 | 6 | |
2248 | 6 | ioapic_register_intr(irq, edge_level); |
2249 | 6 | |
2250 | 6 | if (!ioapic && platform_legacy_irq(irq)) |
2251 | 3 | disable_8259A_irq(desc); |
2252 | 6 | |
2253 | 6 | spin_lock_irqsave(&ioapic_lock, flags); |
2254 | 6 | __ioapic_write_entry(ioapic, pin, 0, entry); |
2255 | 6 | set_native_irq_info(irq, TARGET_CPUS); |
2256 | 6 | spin_unlock(&ioapic_lock); |
2257 | 6 | |
2258 | 6 | spin_lock(&desc->lock); |
2259 | 6 | if (!(desc->status & (IRQ_DISABLED | IRQ_GUEST))) |
2260 | 0 | desc->handler->startup(desc); |
2261 | 6 | spin_unlock_irqrestore(&desc->lock, flags); |
2262 | 6 | |
2263 | 6 | return 0; |
2264 | 6 | } |
2265 | | |
2266 | | static int ioapic_physbase_to_id(unsigned long physbase) |
2267 | 0 | { |
2268 | 0 | int apic; |
2269 | 0 | for ( apic = 0; apic < nr_ioapics; apic++ ) |
2270 | 0 | { |
2271 | 0 | if ( !nr_ioapic_entries[apic] ) |
2272 | 0 | continue; |
2273 | 0 | if ( mp_ioapics[apic].mpc_apicaddr == physbase ) |
2274 | 0 | return apic; |
2275 | 0 | } |
2276 | 0 | return -EINVAL; |
2277 | 0 | } |
2278 | | |
2279 | | static int apic_pin_2_gsi_irq(int apic, int pin) |
2280 | 0 | { |
2281 | 0 | int idx; |
2282 | 0 |
|
2283 | 0 | if (apic < 0) |
2284 | 0 | return -EINVAL; |
2285 | 0 |
|
2286 | 0 | idx = find_irq_entry(apic, pin, mp_INT); |
2287 | 0 |
|
2288 | 0 | return idx >= 0 ? pin_2_irq(idx, apic, pin) |
2289 | 0 | : io_apic_gsi_base(apic) + pin; |
2290 | 0 | } |
2291 | | |
2292 | | int ioapic_guest_read(unsigned long physbase, unsigned int reg, u32 *pval) |
2293 | 0 | { |
2294 | 0 | int apic; |
2295 | 0 | unsigned long flags; |
2296 | 0 |
|
2297 | 0 | if ( (apic = ioapic_physbase_to_id(physbase)) < 0 ) |
2298 | 0 | return apic; |
2299 | 0 |
|
2300 | 0 | spin_lock_irqsave(&ioapic_lock, flags); |
2301 | 0 | *pval = io_apic_read(apic, reg); |
2302 | 0 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2303 | 0 |
|
2304 | 0 | return 0; |
2305 | 0 | } |
2306 | | |
2307 | | #define WARN_BOGUS_WRITE(f, a...) \ |
2308 | 0 | dprintk(XENLOG_INFO, "IO-APIC: apic=%d, pin=%d, irq=%d\n" \ |
2309 | 0 | XENLOG_INFO "IO-APIC: new_entry=%08x\n" \ |
2310 | 0 | XENLOG_INFO "IO-APIC: " f "\n", \ |
2311 | 0 | apic, pin, irq, *(u32 *)&rte, ##a ) |
2312 | | |
2313 | | int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val) |
2314 | 0 | { |
2315 | 0 | int apic, pin, irq, ret, pirq; |
2316 | 0 | struct IO_APIC_route_entry rte = { 0 }; |
2317 | 0 | unsigned long flags; |
2318 | 0 | struct irq_desc *desc; |
2319 | 0 |
|
2320 | 0 | if ( (apic = ioapic_physbase_to_id(physbase)) < 0 ) |
2321 | 0 | return apic; |
2322 | 0 |
|
2323 | 0 | /* Only write to the first half of a route entry. */ |
2324 | 0 | if ( (reg < 0x10) || (reg & 1) ) |
2325 | 0 | return 0; |
2326 | 0 | |
2327 | 0 | pin = (reg - 0x10) >> 1; |
2328 | 0 |
|
2329 | 0 | /* Write first half from guest; second half is target info. */ |
2330 | 0 | *(u32 *)&rte = val; |
2331 | 0 |
|
2332 | 0 | /* |
2333 | 0 | * What about weird destination types? |
2334 | 0 | * SMI: Ignore? Ought to be set up by the BIOS. |
2335 | 0 | * NMI: Ignore? Watchdog functionality is Xen's concern. |
2336 | 0 | * INIT: Definitely ignore: probably a guest OS bug. |
2337 | 0 | * ExtINT: Ignore? Linux only asserts this at start of day. |
2338 | 0 | * For now, print a message and return an error. We can fix up on demand. |
2339 | 0 | */ |
2340 | 0 | if ( rte.delivery_mode > dest_LowestPrio ) |
2341 | 0 | { |
2342 | 0 | printk("ERROR: Attempt to write weird IOAPIC destination mode!\n"); |
2343 | 0 | printk(" APIC=%d/%d, lo-reg=%x\n", apic, pin, val); |
2344 | 0 | return -EINVAL; |
2345 | 0 | } |
2346 | 0 |
|
2347 | 0 | /* |
2348 | 0 | * The guest does not know physical APIC arrangement (flat vs. cluster). |
2349 | 0 | * Apply genapic conventions for this platform. |
2350 | 0 | */ |
2351 | 0 | rte.delivery_mode = INT_DELIVERY_MODE; |
2352 | 0 | rte.dest_mode = INT_DEST_MODE; |
2353 | 0 |
|
2354 | 0 | irq = apic_pin_2_gsi_irq(apic, pin); |
2355 | 0 | if ( irq < 0 ) |
2356 | 0 | return irq; |
2357 | 0 |
|
2358 | 0 | desc = irq_to_desc(irq); |
2359 | 0 |
|
2360 | 0 | /* |
2361 | 0 | * Since PHYSDEVOP_alloc_irq_vector is dummy, rte.vector is the pirq |
2362 | 0 | * which corresponds to this ioapic pin, retrieve it for building |
2363 | 0 | * pirq and irq mapping. Where the GSI is greater than 256, we assume |
2364 | 0 | * that dom0 pirq == irq. |
2365 | 0 | */ |
2366 | 0 | if ( !rte.mask ) |
2367 | 0 | { |
2368 | 0 | pirq = (irq >= 256) ? irq : rte.vector; |
2369 | 0 | if ( pirq >= hardware_domain->nr_pirqs ) |
2370 | 0 | return -EINVAL; |
2371 | 0 | } |
2372 | 0 | else |
2373 | 0 | pirq = -1; |
2374 | 0 | |
2375 | 0 | if ( desc->action ) |
2376 | 0 | { |
2377 | 0 | spin_lock_irqsave(&ioapic_lock, flags); |
2378 | 0 | ret = io_apic_read(apic, 0x10 + 2 * pin); |
2379 | 0 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2380 | 0 | rte.vector = desc->arch.vector; |
2381 | 0 | if ( *(u32*)&rte != ret ) |
2382 | 0 | WARN_BOGUS_WRITE("old_entry=%08x pirq=%d\n" XENLOG_INFO |
2383 | 0 | "IO-APIC: Attempt to modify IO-APIC pin for in-use IRQ!", |
2384 | 0 | ret, pirq); |
2385 | 0 | return 0; |
2386 | 0 | } |
2387 | 0 |
|
2388 | 0 | if ( desc->arch.vector <= 0 || desc->arch.vector > LAST_DYNAMIC_VECTOR ) |
2389 | 0 | { |
2390 | 0 | int vector = desc->arch.vector; |
2391 | 0 |
|
2392 | 0 | if ( vector < FIRST_HIPRIORITY_VECTOR ) |
2393 | 0 | add_pin_to_irq(irq, apic, pin); |
2394 | 0 | else |
2395 | 0 | desc->arch.vector = IRQ_VECTOR_UNASSIGNED; |
2396 | 0 | ret = assign_irq_vector(irq, NULL); |
2397 | 0 | if ( ret < 0 ) |
2398 | 0 | { |
2399 | 0 | if ( vector < FIRST_HIPRIORITY_VECTOR ) |
2400 | 0 | remove_pin_from_irq(irq, apic, pin); |
2401 | 0 | else |
2402 | 0 | desc->arch.vector = vector; |
2403 | 0 | return ret; |
2404 | 0 | } |
2405 | 0 |
|
2406 | 0 | printk(XENLOG_INFO "allocated vector %02x for irq %d\n", ret, irq); |
2407 | 0 | } |
2408 | 0 | if ( pirq >= 0 ) |
2409 | 0 | { |
2410 | 0 | spin_lock(&hardware_domain->event_lock); |
2411 | 0 | ret = map_domain_pirq(hardware_domain, pirq, irq, |
2412 | 0 | MAP_PIRQ_TYPE_GSI, NULL); |
2413 | 0 | spin_unlock(&hardware_domain->event_lock); |
2414 | 0 | if ( ret < 0 ) |
2415 | 0 | return ret; |
2416 | 0 | } |
2417 | 0 |
|
2418 | 0 | spin_lock_irqsave(&ioapic_lock, flags); |
2419 | 0 | /* Set the correct irq-handling type. */ |
2420 | 0 | desc->handler = rte.trigger ? |
2421 | 0 | &ioapic_level_type: &ioapic_edge_type; |
2422 | 0 |
|
2423 | 0 | /* Mask iff level triggered. */ |
2424 | 0 | rte.mask = rte.trigger; |
2425 | 0 | /* Set the vector field to the real vector! */ |
2426 | 0 | rte.vector = desc->arch.vector; |
2427 | 0 |
|
2428 | 0 | SET_DEST(rte, logical, cpu_mask_to_apicid(desc->arch.cpu_mask)); |
2429 | 0 |
|
2430 | 0 | __ioapic_write_entry(apic, pin, 0, rte); |
2431 | 0 | |
2432 | 0 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2433 | 0 |
|
2434 | 0 | return 0; |
2435 | 0 | } |
2436 | | |
2437 | | static const char * delivery_mode_2_str( |
2438 | | const enum ioapic_irq_destination_types mode) |
2439 | 0 | { |
2440 | 0 | switch ( mode ) |
2441 | 0 | { |
2442 | 0 | case dest_Fixed: return "Fixed"; |
2443 | 0 | case dest_LowestPrio: return "LoPri"; |
2444 | 0 | case dest_SMI: return "SMI"; |
2445 | 0 | case dest_NMI: return "NMI"; |
2446 | 0 | case dest_INIT: return "INIT"; |
2447 | 0 | case dest_ExtINT: return "ExINT"; |
2448 | 0 | case dest__reserved_1: |
2449 | 0 | case dest__reserved_2: return "Resvd"; |
2450 | 0 | default: return "INVAL"; |
2451 | 0 | } |
2452 | 0 | } |
2453 | | |
2454 | | void dump_ioapic_irq_info(void) |
2455 | 0 | { |
2456 | 0 | struct irq_pin_list *entry; |
2457 | 0 | struct IO_APIC_route_entry rte; |
2458 | 0 | unsigned int irq, pin, printed = 0; |
2459 | 0 |
|
2460 | 0 | if ( !irq_2_pin ) |
2461 | 0 | return; |
2462 | 0 |
|
2463 | 0 | for ( irq = 0; irq < nr_irqs_gsi; irq++ ) |
2464 | 0 | { |
2465 | 0 | if ( !(irq & 0x1f) ) |
2466 | 0 | process_pending_softirqs(); |
2467 | 0 |
|
2468 | 0 | entry = &irq_2_pin[irq]; |
2469 | 0 | if ( entry->pin == -1 ) |
2470 | 0 | continue; |
2471 | 0 |
|
2472 | 0 | if ( !printed++ ) |
2473 | 0 | printk("IO-APIC interrupt information:\n"); |
2474 | 0 |
|
2475 | 0 | printk(" IRQ%3d Vec%3d:\n", irq, irq_to_vector(irq)); |
2476 | 0 |
|
2477 | 0 | for ( ; ; ) |
2478 | 0 | { |
2479 | 0 | pin = entry->pin; |
2480 | 0 |
|
2481 | 0 | printk(" Apic 0x%02x, Pin %2d: ", entry->apic, pin); |
2482 | 0 |
|
2483 | 0 | rte = ioapic_read_entry(entry->apic, pin, 0); |
2484 | 0 |
|
2485 | 0 | printk("vec=%02x delivery=%-5s dest=%c status=%d " |
2486 | 0 | "polarity=%d irr=%d trig=%c mask=%d dest_id:%d\n", |
2487 | 0 | rte.vector, delivery_mode_2_str(rte.delivery_mode), |
2488 | 0 | rte.dest_mode ? 'L' : 'P', |
2489 | 0 | rte.delivery_status, rte.polarity, rte.irr, |
2490 | 0 | rte.trigger ? 'L' : 'E', rte.mask, |
2491 | 0 | rte.dest.logical.logical_dest); |
2492 | 0 |
|
2493 | 0 | if ( entry->next == 0 ) |
2494 | 0 | break; |
2495 | 0 | entry = &irq_2_pin[entry->next]; |
2496 | 0 | } |
2497 | 0 | } |
2498 | 0 | } |
2499 | | |
2500 | | static unsigned int __initdata max_gsi_irqs; |
2501 | | integer_param("max_gsi_irqs", max_gsi_irqs); |
2502 | | |
2503 | | static __init bool bad_ioapic_register(unsigned int idx) |
2504 | 2 | { |
2505 | 2 | union IO_APIC_reg_00 reg_00 = { .raw = io_apic_read(idx, 0) }; |
2506 | 2 | union IO_APIC_reg_01 reg_01 = { .raw = io_apic_read(idx, 1) }; |
2507 | 2 | union IO_APIC_reg_02 reg_02 = { .raw = io_apic_read(idx, 2) }; |
2508 | 2 | |
2509 | 2 | if ( reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1 ) |
2510 | 0 | { |
2511 | 0 | printk(KERN_WARNING "I/O APIC %#x registers return all ones, skipping!\n", |
2512 | 0 | mp_ioapics[idx].mpc_apicaddr); |
2513 | 0 | return 1; |
2514 | 0 | } |
2515 | 2 | |
2516 | 2 | return 0; |
2517 | 2 | } |
2518 | | |
2519 | | void __init init_ioapic_mappings(void) |
2520 | 1 | { |
2521 | 1 | unsigned long ioapic_phys; |
2522 | 1 | unsigned int i, idx = FIX_IO_APIC_BASE_0; |
2523 | 1 | union IO_APIC_reg_01 reg_01; |
2524 | 1 | |
2525 | 1 | if ( smp_found_config ) |
2526 | 1 | nr_irqs_gsi = 0; |
2527 | 3 | for ( i = 0; i < nr_ioapics; i++ ) |
2528 | 2 | { |
2529 | 2 | if ( smp_found_config ) |
2530 | 2 | { |
2531 | 2 | ioapic_phys = mp_ioapics[i].mpc_apicaddr; |
2532 | 2 | if ( !ioapic_phys ) |
2533 | 0 | { |
2534 | 0 | printk(KERN_ERR "WARNING: bogus zero IO-APIC address " |
2535 | 0 | "found in MPTABLE, disabling IO/APIC support!\n"); |
2536 | 0 | smp_found_config = false; |
2537 | 0 | skip_ioapic_setup = true; |
2538 | 0 | goto fake_ioapic_page; |
2539 | 0 | } |
2540 | 2 | } |
2541 | 2 | else |
2542 | 0 | { |
2543 | 0 | fake_ioapic_page: |
2544 | 0 | ioapic_phys = __pa(alloc_xenheap_page()); |
2545 | 0 | clear_page(__va(ioapic_phys)); |
2546 | 0 | } |
2547 | 2 | set_fixmap_nocache(idx, ioapic_phys); |
2548 | 2 | apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08Lx (%08lx)\n", |
2549 | 0 | __fix_to_virt(idx), ioapic_phys); |
2550 | 2 | idx++; |
2551 | 2 | |
2552 | 2 | if ( bad_ioapic_register(i) ) |
2553 | 0 | { |
2554 | 0 | clear_fixmap(idx); |
2555 | 0 | continue; |
2556 | 0 | } |
2557 | 2 | |
2558 | 2 | if ( smp_found_config ) |
2559 | 2 | { |
2560 | 2 | /* The number of IO-APIC IRQ registers (== #pins): */ |
2561 | 2 | reg_01.raw = io_apic_read(i, 1); |
2562 | 2 | nr_ioapic_entries[i] = reg_01.bits.entries + 1; |
2563 | 2 | nr_irqs_gsi += nr_ioapic_entries[i]; |
2564 | 2 | |
2565 | 2 | if ( rangeset_add_singleton(mmio_ro_ranges, |
2566 | 2 | ioapic_phys >> PAGE_SHIFT) ) |
2567 | 0 | printk(KERN_ERR "Failed to mark IO-APIC page %lx read-only\n", |
2568 | 0 | ioapic_phys); |
2569 | 2 | } |
2570 | 2 | } |
2571 | 1 | |
2572 | 1 | nr_irqs_gsi = max(nr_irqs_gsi, highest_gsi() + 1); |
2573 | 1 | |
2574 | 1 | if ( max_gsi_irqs == 0 ) |
2575 | 1 | max_gsi_irqs = nr_irqs ? nr_irqs / 8 : PAGE_SIZE; |
2576 | 0 | else if ( nr_irqs != 0 && max_gsi_irqs > nr_irqs ) |
2577 | 0 | { |
2578 | 0 | printk(XENLOG_WARNING "\"max_gsi_irqs=\" cannot be specified larger" |
2579 | 0 | " than \"nr_irqs=\"\n"); |
2580 | 0 | max_gsi_irqs = nr_irqs; |
2581 | 0 | } |
2582 | 1 | if ( max_gsi_irqs < 16 ) |
2583 | 0 | max_gsi_irqs = 16; |
2584 | 1 | |
2585 | 1 | /* for PHYSDEVOP_pirq_eoi_gmfn guest assumptions */ |
2586 | 1 | if ( max_gsi_irqs > PAGE_SIZE * 8 ) |
2587 | 0 | max_gsi_irqs = PAGE_SIZE * 8; |
2588 | 1 | |
2589 | 1 | if ( !smp_found_config || skip_ioapic_setup || nr_irqs_gsi < 16 ) |
2590 | 0 | nr_irqs_gsi = 16; |
2591 | 1 | else if ( nr_irqs_gsi > max_gsi_irqs ) |
2592 | 0 | { |
2593 | 0 | printk(XENLOG_WARNING "Limiting to %u GSI IRQs (found %u)\n", |
2594 | 0 | max_gsi_irqs, nr_irqs_gsi); |
2595 | 0 | nr_irqs_gsi = max_gsi_irqs; |
2596 | 0 | } |
2597 | 1 | |
2598 | 1 | if ( nr_irqs == 0 ) |
2599 | 1 | nr_irqs = cpu_has_apic ? |
2600 | 1 | max(16U + num_present_cpus() * NR_DYNAMIC_VECTORS, |
2601 | 1 | 8 * nr_irqs_gsi) : |
2602 | 0 | nr_irqs_gsi; |
2603 | 0 | else if ( nr_irqs < 16 ) |
2604 | 0 | nr_irqs = 16; |
2605 | 1 | printk(XENLOG_INFO "IRQ limits: %u GSI, %u MSI/MSI-X\n", |
2606 | 1 | nr_irqs_gsi, nr_irqs - nr_irqs_gsi); |
2607 | 1 | } |
2608 | | |
2609 | | unsigned int arch_hwdom_irqs(domid_t domid) |
2610 | 1 | { |
2611 | 1 | unsigned int n = fls(num_present_cpus()); |
2612 | 1 | |
2613 | 1 | if ( !domid ) |
2614 | 1 | n = min(n, dom0_max_vcpus()); |
2615 | 1 | n = min(nr_irqs_gsi + n * NR_DYNAMIC_VECTORS, nr_irqs); |
2616 | 1 | |
2617 | 1 | /* Bounded by the domain pirq eoi bitmap gfn. */ |
2618 | 1 | n = min_t(unsigned int, n, PAGE_SIZE * BITS_PER_BYTE); |
2619 | 1 | |
2620 | 1 | printk("Dom%d has maximum %u PIRQs\n", domid, n); |
2621 | 1 | |
2622 | 1 | return n; |
2623 | 1 | } |