debuggers.hg

view xen/arch/ia64/linux-xen/sn/kernel/irq.c @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children f875aaa791f0
line source
1 /*
2 * Platform dependent support for SGI SN
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved.
9 */
11 #include <linux/irq.h>
12 #include <linux/spinlock.h>
13 #include <linux/init.h>
14 #ifdef XEN
15 #include <linux/linux-pci.h>
16 #include <asm/hw_irq.h>
17 #endif
18 #include <asm/sn/addrs.h>
19 #include <asm/sn/arch.h>
20 #include <asm/sn/intr.h>
21 #include <asm/sn/pcibr_provider.h>
22 #include <asm/sn/pcibus_provider_defs.h>
23 #ifndef XEN
24 #include <asm/sn/pcidev.h>
25 #endif
26 #include <asm/sn/shub_mmr.h>
27 #include <asm/sn/sn_sal.h>
29 #ifdef XEN
30 #define pci_dev_get(dev) do {} while(0)
31 #define move_native_irq(foo) do {} while(0)
32 #endif
34 static void force_interrupt(int irq);
35 #ifndef XEN
36 static void register_intr_pda(struct sn_irq_info *sn_irq_info);
37 static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
38 #endif
40 int sn_force_interrupt_flag = 1;
41 extern int sn_ioif_inited;
42 struct list_head **sn_irq_lh;
43 static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
45 u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
46 struct sn_irq_info *sn_irq_info,
47 int req_irq, nasid_t req_nasid,
48 int req_slice)
49 {
50 struct ia64_sal_retval ret_stuff;
51 ret_stuff.status = 0;
52 ret_stuff.v0 = 0;
54 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
55 (u64) SAL_INTR_ALLOC, (u64) local_nasid,
56 (u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
57 (u64) req_nasid, (u64) req_slice);
59 return ret_stuff.status;
60 }
62 void sn_intr_free(nasid_t local_nasid, int local_widget,
63 struct sn_irq_info *sn_irq_info)
64 {
65 struct ia64_sal_retval ret_stuff;
66 ret_stuff.status = 0;
67 ret_stuff.v0 = 0;
69 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
70 (u64) SAL_INTR_FREE, (u64) local_nasid,
71 (u64) local_widget, (u64) sn_irq_info->irq_irq,
72 (u64) sn_irq_info->irq_cookie, 0, 0);
73 }
75 static unsigned int sn_startup_irq(unsigned int irq)
76 {
77 return 0;
78 }
80 static void sn_shutdown_irq(unsigned int irq)
81 {
82 }
84 static void sn_disable_irq(unsigned int irq)
85 {
86 }
88 static void sn_enable_irq(unsigned int irq)
89 {
90 }
92 static void sn_ack_irq(unsigned int irq)
93 {
94 u64 event_occurred, mask;
96 irq = irq & 0xff;
97 event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
98 mask = event_occurred & SH_ALL_INT_MASK;
99 HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
100 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
102 move_native_irq(irq);
103 }
105 static void sn_end_irq(unsigned int irq)
106 {
107 int ivec;
108 u64 event_occurred;
110 ivec = irq & 0xff;
111 if (ivec == SGI_UART_VECTOR) {
112 event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED));
113 /* If the UART bit is set here, we may have received an
114 * interrupt from the UART that the driver missed. To
115 * make sure, we IPI ourselves to force us to look again.
116 */
117 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
118 platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
119 IA64_IPI_DM_INT, 0);
120 }
121 }
122 __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
123 if (sn_force_interrupt_flag)
124 force_interrupt(irq);
125 }
127 #ifndef XEN
128 static void sn_irq_info_free(struct rcu_head *head);
130 struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
131 nasid_t nasid, int slice)
132 {
133 int vector;
134 int cpuphys;
135 int64_t bridge;
136 int local_widget, status;
137 nasid_t local_nasid;
138 struct sn_irq_info *new_irq_info;
139 struct sn_pcibus_provider *pci_provider;
141 new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
142 if (new_irq_info == NULL)
143 return NULL;
145 memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
147 bridge = (u64) new_irq_info->irq_bridge;
148 if (!bridge) {
149 kfree(new_irq_info);
150 return NULL; /* irq is not a device interrupt */
151 }
153 local_nasid = NASID_GET(bridge);
155 if (local_nasid & 1)
156 local_widget = TIO_SWIN_WIDGETNUM(bridge);
157 else
158 local_widget = SWIN_WIDGETNUM(bridge);
160 vector = sn_irq_info->irq_irq;
161 /* Free the old PROM new_irq_info structure */
162 sn_intr_free(local_nasid, local_widget, new_irq_info);
163 /* Update kernels new_irq_info with new target info */
164 unregister_intr_pda(new_irq_info);
166 /* allocate a new PROM new_irq_info struct */
167 status = sn_intr_alloc(local_nasid, local_widget,
168 new_irq_info, vector,
169 nasid, slice);
171 /* SAL call failed */
172 if (status) {
173 kfree(new_irq_info);
174 return NULL;
175 }
177 cpuphys = nasid_slice_to_cpuid(nasid, slice);
178 new_irq_info->irq_cpuid = cpuphys;
179 register_intr_pda(new_irq_info);
181 pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
183 /*
184 * If this represents a line interrupt, target it. If it's
185 * an msi (irq_int_bit < 0), it's already targeted.
186 */
187 if (new_irq_info->irq_int_bit >= 0 &&
188 pci_provider && pci_provider->target_interrupt)
189 (pci_provider->target_interrupt)(new_irq_info);
191 spin_lock(&sn_irq_info_lock);
192 #ifdef XEN
193 list_replace(&sn_irq_info->list, &new_irq_info->list);
194 #else
195 list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
196 #endif
197 spin_unlock(&sn_irq_info_lock);
198 #ifndef XEN
199 call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
200 #endif
202 #ifdef CONFIG_SMP
203 set_irq_affinity_info((vector & 0xff), cpuphys, 0);
204 #endif
206 return new_irq_info;
207 }
209 static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
210 {
211 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
212 nasid_t nasid;
213 int slice;
215 nasid = cpuid_to_nasid(first_cpu(mask));
216 slice = cpuid_to_slice(first_cpu(mask));
218 list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
219 sn_irq_lh[irq], list)
220 (void)sn_retarget_vector(sn_irq_info, nasid, slice);
221 }
222 #endif
224 struct hw_interrupt_type irq_type_sn = {
225 #ifndef XEN
226 .name = "SN hub",
227 #else
228 .typename = "SN hub",
229 #endif
230 .startup = sn_startup_irq,
231 .shutdown = sn_shutdown_irq,
232 .enable = sn_enable_irq,
233 .disable = sn_disable_irq,
234 .ack = sn_ack_irq,
235 .end = sn_end_irq,
236 #ifndef XEN
237 .set_affinity = sn_set_affinity_irq
238 #endif
239 };
241 unsigned int sn_local_vector_to_irq(u8 vector)
242 {
243 return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
244 }
246 void sn_irq_init(void)
247 {
248 int i;
249 irq_desc_t *base_desc = irq_desc;
251 #ifndef XEN
252 ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
253 ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
254 #endif
256 for (i = 0; i < NR_IRQS; i++) {
257 #ifdef XEN
258 if (base_desc[i].handler == &no_irq_type) {
259 base_desc[i].handler = &irq_type_sn;
260 #else
261 if (base_desc[i].chip == &no_irq_type) {
262 base_desc[i].chip = &irq_type_sn;
263 #endif
264 }
265 }
266 }
268 static void register_intr_pda(struct sn_irq_info *sn_irq_info)
269 {
270 int irq = sn_irq_info->irq_irq;
271 int cpu = sn_irq_info->irq_cpuid;
273 if (pdacpu(cpu)->sn_last_irq < irq) {
274 pdacpu(cpu)->sn_last_irq = irq;
275 }
277 if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq)
278 pdacpu(cpu)->sn_first_irq = irq;
279 }
281 #ifndef XEN
282 static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
283 {
284 int irq = sn_irq_info->irq_irq;
285 int cpu = sn_irq_info->irq_cpuid;
286 struct sn_irq_info *tmp_irq_info;
287 int i, foundmatch;
289 #ifndef XEN
290 rcu_read_lock();
291 #else
292 spin_lock(&sn_irq_info_lock);
293 #endif
294 if (pdacpu(cpu)->sn_last_irq == irq) {
295 foundmatch = 0;
296 for (i = pdacpu(cpu)->sn_last_irq - 1;
297 i && !foundmatch; i--) {
298 #ifdef XEN
299 list_for_each_entry(tmp_irq_info,
300 sn_irq_lh[i],
301 list) {
302 #else
303 list_for_each_entry_rcu(tmp_irq_info,
304 sn_irq_lh[i],
305 list) {
306 #endif
307 if (tmp_irq_info->irq_cpuid == cpu) {
308 foundmatch = 1;
309 break;
310 }
311 }
312 }
313 pdacpu(cpu)->sn_last_irq = i;
314 }
316 if (pdacpu(cpu)->sn_first_irq == irq) {
317 foundmatch = 0;
318 for (i = pdacpu(cpu)->sn_first_irq + 1;
319 i < NR_IRQS && !foundmatch; i++) {
320 #ifdef XEN
321 list_for_each_entry(tmp_irq_info,
322 sn_irq_lh[i],
323 list) {
324 #else
325 list_for_each_entry_rcu(tmp_irq_info,
326 sn_irq_lh[i],
327 list) {
328 #endif
329 if (tmp_irq_info->irq_cpuid == cpu) {
330 foundmatch = 1;
331 break;
332 }
333 }
334 }
335 pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
336 }
337 #ifndef XEN
338 rcu_read_unlock();
339 #else
340 spin_unlock(&sn_irq_info_lock);
341 #endif
342 }
344 static void sn_irq_info_free(struct rcu_head *head)
345 {
346 struct sn_irq_info *sn_irq_info;
348 sn_irq_info = container_of(head, struct sn_irq_info, rcu);
349 kfree(sn_irq_info);
350 }
351 #endif
353 void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
354 {
355 nasid_t nasid = sn_irq_info->irq_nasid;
356 int slice = sn_irq_info->irq_slice;
357 int cpu = nasid_slice_to_cpuid(nasid, slice);
359 pci_dev_get(pci_dev);
360 sn_irq_info->irq_cpuid = cpu;
361 #ifndef XEN
362 sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
363 #endif
365 /* link it into the sn_irq[irq] list */
366 spin_lock(&sn_irq_info_lock);
367 #ifdef XEN
368 list_add(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
369 #else
370 list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
371 #endif
372 #ifndef XEN
373 reserve_irq_vector(sn_irq_info->irq_irq);
374 #endif
375 spin_unlock(&sn_irq_info_lock);
377 register_intr_pda(sn_irq_info);
378 }
380 void sn_irq_unfixup(struct pci_dev *pci_dev)
381 {
382 #ifndef XEN
383 struct sn_irq_info *sn_irq_info;
385 /* Only cleanup IRQ stuff if this device has a host bus context */
386 if (!SN_PCIDEV_BUSSOFT(pci_dev))
387 return;
389 sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info;
390 if (!sn_irq_info)
391 return;
392 if (!sn_irq_info->irq_irq) {
393 kfree(sn_irq_info);
394 return;
395 }
397 unregister_intr_pda(sn_irq_info);
398 spin_lock(&sn_irq_info_lock);
399 #ifdef XEN
400 list_del(&sn_irq_info->list);
401 #else
402 list_del_rcu(&sn_irq_info->list);
403 #endif
404 spin_unlock(&sn_irq_info_lock);
405 if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
406 free_irq_vector(sn_irq_info->irq_irq);
407 #ifndef XEN
408 call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
409 #endif
410 pci_dev_put(pci_dev);
412 #endif
413 }
415 static inline void
416 sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
417 {
418 struct sn_pcibus_provider *pci_provider;
420 pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
421 if (pci_provider && pci_provider->force_interrupt)
422 (*pci_provider->force_interrupt)(sn_irq_info);
423 }
425 static void force_interrupt(int irq)
426 {
427 struct sn_irq_info *sn_irq_info;
429 #ifndef XEN
430 if (!sn_ioif_inited)
431 return;
432 #endif
434 #ifdef XEN
435 spin_lock(&sn_irq_info_lock);
436 #else
437 rcu_read_lock();
438 #endif
439 #ifdef XEN
440 list_for_each_entry(sn_irq_info, sn_irq_lh[irq], list)
441 #else
442 list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list)
443 #endif
444 sn_call_force_intr_provider(sn_irq_info);
446 #ifdef XEN
447 spin_unlock(&sn_irq_info_lock);
448 #else
449 rcu_read_unlock();
450 #endif
451 }
453 #ifndef XEN
454 /*
455 * Check for lost interrupts. If the PIC int_status reg. says that
456 * an interrupt has been sent, but not handled, and the interrupt
457 * is not pending in either the cpu irr regs or in the soft irr regs,
458 * and the interrupt is not in service, then the interrupt may have
459 * been lost. Force an interrupt on that pin. It is possible that
460 * the interrupt is in flight, so we may generate a spurious interrupt,
461 * but we should never miss a real lost interrupt.
462 */
463 static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
464 {
465 u64 regval;
466 struct pcidev_info *pcidev_info;
467 struct pcibus_info *pcibus_info;
469 /*
470 * Bridge types attached to TIO (anything but PIC) do not need this WAR
471 * since they do not target Shub II interrupt registers. If that
472 * ever changes, this check needs to accomodate.
473 */
474 if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC)
475 return;
477 pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
478 if (!pcidev_info)
479 return;
481 pcibus_info =
482 (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
483 pdi_pcibus_info;
484 regval = pcireg_intr_status_get(pcibus_info);
486 if (!ia64_get_irr(irq_to_vector(irq))) {
487 if (!test_bit(irq, pda->sn_in_service_ivecs)) {
488 regval &= 0xff;
489 if (sn_irq_info->irq_int_bit & regval &
490 sn_irq_info->irq_last_intr) {
491 regval &= ~(sn_irq_info->irq_int_bit & regval);
492 sn_call_force_intr_provider(sn_irq_info);
493 }
494 }
495 }
496 sn_irq_info->irq_last_intr = regval;
497 }
498 #endif
500 void sn_lb_int_war_check(void)
501 {
502 #ifndef XEN
503 struct sn_irq_info *sn_irq_info;
504 int i;
506 #ifdef XEN
507 if (pda->sn_first_irq == 0)
508 #else
509 if (!sn_ioif_inited || pda->sn_first_irq == 0)
510 #endif
511 return;
513 #ifdef XEN
514 spin_lock(&sn_irq_info_lock);
515 #else
516 rcu_read_lock();
517 #endif
518 for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
519 #ifdef XEN
520 list_for_each_entry(sn_irq_info, sn_irq_lh[i], list) {
521 #else
522 list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
523 #endif
524 sn_check_intr(i, sn_irq_info);
525 }
526 }
527 #ifdef XEN
528 spin_unlock(&sn_irq_info_lock);
529 #else
530 rcu_read_unlock();
531 #endif
532 #endif
533 }
535 void __init sn_irq_lh_init(void)
536 {
537 int i;
539 sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL);
540 if (!sn_irq_lh)
541 panic("SN PCI INIT: Failed to allocate memory for PCI init\n");
543 for (i = 0; i < NR_IRQS; i++) {
544 sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL);
545 if (!sn_irq_lh[i])
546 panic("SN PCI INIT: Failed IRQ memory allocation\n");
548 INIT_LIST_HEAD(sn_irq_lh[i]);
549 }
550 }