debuggers.hg

view xen/arch/x86/mpparse.c @ 3649:36fa617b88a7

bitkeeper revision 1.1159.212.70 (42009f98S4XUaUN_bw-spD54MX9ZtQ)

Merge scramble.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
author kaf24@scramble.cl.cam.ac.uk
date Wed Feb 02 09:38:32 2005 +0000 (2005-02-02)
parents 51052c8b6456 49e44c44570c
children bbe8541361dd
line source
1 /*
2 * Intel Multiprocessor Specificiation 1.1 and 1.4
3 * compliant MP-table parsing routines.
4 *
5 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 *
8 * Fixes
9 * Erich Boleyn : MP v1.4 and additional changes.
10 * Alan Cox : Added EBDA scanning
11 * Ingo Molnar : various cleanups and rewrites
12 * Maciej W. Rozycki: Bits for default MP configurations
13 * Paul Diefenbaugh: Added full ACPI support
14 */
16 #include <xen/config.h>
17 #include <xen/init.h>
18 #include <xen/lib.h>
19 #include <xen/kernel.h>
20 #include <xen/irq.h>
21 #include <xen/smp.h>
22 #include <xen/mm.h>
23 #include <xen/acpi.h>
24 #include <asm/acpi.h>
25 #include <asm/io.h>
26 #include <asm/apic.h>
27 #include <asm/mpspec.h>
28 #include <asm/flushtlb.h>
29 #include <asm/smpboot.h>
31 int numnodes = 1; /* XXX Xen */
33 /* Have we found an MP table */
34 int smp_found_config;
36 /*
37 * Various Linux-internal data structures created from the
38 * MP-table.
39 */
40 int apic_version [MAX_APICS];
41 int quad_local_to_mp_bus_id [NR_CPUS/4][4];
42 int mp_current_pci_id;
43 int *mp_bus_id_to_type;
44 int *mp_bus_id_to_node;
45 int *mp_bus_id_to_local;
46 int *mp_bus_id_to_pci_bus;
47 int max_mp_busses;
48 int max_irq_sources;
50 /* I/O APIC entries */
51 struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
53 /* # of MP IRQ source entries */
54 struct mpc_config_intsrc *mp_irqs;
56 /* MP IRQ source entries */
57 int mp_irq_entries;
59 int nr_ioapics;
61 int pic_mode;
62 unsigned long mp_lapic_addr;
64 /* Processor that is doing the boot up */
65 unsigned int boot_cpu_physical_apicid = -1U;
66 unsigned int boot_cpu_logical_apicid = -1U;
67 /* Internal processor count */
68 static unsigned int num_processors;
70 /* Bitmask of physically existing CPUs */
71 unsigned long phys_cpu_present_map;
72 unsigned long logical_cpu_present_map;
74 #ifdef CONFIG_X86_CLUSTERED_APIC
75 unsigned char esr_disable = 0;
76 unsigned char clustered_apic_mode = CLUSTERED_APIC_NONE;
77 unsigned int apic_broadcast_id = APIC_BROADCAST_ID_APIC;
78 #endif
79 unsigned char raw_phys_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
81 /*
82 * Intel MP BIOS table parsing routines:
83 */
85 #ifndef CONFIG_X86_VISWS_APIC
86 /*
87 * Checksum an MP configuration block.
88 */
90 static int __init mpf_checksum(unsigned char *mp, int len)
91 {
92 int sum = 0;
94 while (len--)
95 sum += *mp++;
97 return sum & 0xFF;
98 }
100 /*
101 * Processor encoding in an MP configuration block
102 */
104 static char __init *mpc_family(int family,int model)
105 {
106 static char n[32];
107 static char *model_defs[]=
108 {
109 "80486DX","80486DX",
110 "80486SX","80486DX/2 or 80487",
111 "80486SL","80486SX/2",
112 "Unknown","80486DX/2-WB",
113 "80486DX/4","80486DX/4-WB"
114 };
116 switch (family) {
117 case 0x04:
118 if (model < 10)
119 return model_defs[model];
120 break;
122 case 0x05:
123 return("Pentium(tm)");
125 case 0x06:
126 return("Pentium(tm) Pro");
128 case 0x0F:
129 if (model == 0x00)
130 return("Pentium 4(tm)");
131 if (model == 0x01)
132 return("Pentium 4(tm)");
133 if (model == 0x02)
134 return("Pentium 4(tm) XEON(tm)");
135 if (model == 0x0F)
136 return("Special controller");
137 }
138 sprintf(n,"Unknown CPU [%d:%d]",family, model);
139 return n;
140 }
142 /*
143 * Have to match translation table entries to main table entries by counter
144 * hence the mpc_record variable .... can't see a less disgusting way of
145 * doing this ....
146 */
148 static int mpc_record;
149 static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
151 void __init MP_processor_info (struct mpc_config_processor *m)
152 {
153 int ver, quad, logical_apicid;
155 if (!(m->mpc_cpuflag & CPU_ENABLED))
156 return;
158 logical_apicid = m->mpc_apicid;
159 if (clustered_apic_mode == CLUSTERED_APIC_NUMAQ) {
160 quad = translation_table[mpc_record]->trans_quad;
161 logical_apicid = (quad << 4) +
162 (m->mpc_apicid ? m->mpc_apicid << 1 : 1);
163 printk("Processor #%d %s APIC version %d (quad %d, apic %d)\n",
164 m->mpc_apicid,
165 mpc_family((m->mpc_cpufeature & CPU_FAMILY_MASK)>>8 ,
166 (m->mpc_cpufeature & CPU_MODEL_MASK)>>4),
167 m->mpc_apicver, quad, logical_apicid);
168 } else {
169 printk("Processor #%d %s APIC version %d\n",
170 m->mpc_apicid,
171 mpc_family((m->mpc_cpufeature & CPU_FAMILY_MASK)>>8 ,
172 (m->mpc_cpufeature & CPU_MODEL_MASK)>>4),
173 m->mpc_apicver);
174 }
176 if (m->mpc_featureflag&(1<<0))
177 Dprintk(" Floating point unit present.\n");
178 if (m->mpc_featureflag&(1<<7))
179 Dprintk(" Machine Exception supported.\n");
180 if (m->mpc_featureflag&(1<<8))
181 Dprintk(" 64 bit compare & exchange supported.\n");
182 if (m->mpc_featureflag&(1<<9))
183 Dprintk(" Internal APIC present.\n");
184 if (m->mpc_featureflag&(1<<11))
185 Dprintk(" SEP present.\n");
186 if (m->mpc_featureflag&(1<<12))
187 Dprintk(" MTRR present.\n");
188 if (m->mpc_featureflag&(1<<13))
189 Dprintk(" PGE present.\n");
190 if (m->mpc_featureflag&(1<<14))
191 Dprintk(" MCA present.\n");
192 if (m->mpc_featureflag&(1<<15))
193 Dprintk(" CMOV present.\n");
194 if (m->mpc_featureflag&(1<<16))
195 Dprintk(" PAT present.\n");
196 if (m->mpc_featureflag&(1<<17))
197 Dprintk(" PSE present.\n");
198 if (m->mpc_featureflag&(1<<18))
199 Dprintk(" PSN present.\n");
200 if (m->mpc_featureflag&(1<<19))
201 Dprintk(" Cache Line Flush Instruction present.\n");
202 /* 20 Reserved */
203 if (m->mpc_featureflag&(1<<21))
204 Dprintk(" Debug Trace and EMON Store present.\n");
205 if (m->mpc_featureflag&(1<<22))
206 Dprintk(" ACPI Thermal Throttle Registers present.\n");
207 if (m->mpc_featureflag&(1<<23))
208 Dprintk(" MMX present.\n");
209 if (m->mpc_featureflag&(1<<24))
210 Dprintk(" FXSR present.\n");
211 if (m->mpc_featureflag&(1<<25))
212 Dprintk(" XMM present.\n");
213 if (m->mpc_featureflag&(1<<26))
214 Dprintk(" Willamette New Instructions present.\n");
215 if (m->mpc_featureflag&(1<<27))
216 Dprintk(" Self Snoop present.\n");
217 if (m->mpc_featureflag&(1<<28))
218 Dprintk(" HT present.\n");
219 if (m->mpc_featureflag&(1<<29))
220 Dprintk(" Thermal Monitor present.\n");
221 /* 30, 31 Reserved */
224 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
225 Dprintk(" Bootup CPU\n");
226 boot_cpu_physical_apicid = m->mpc_apicid;
227 boot_cpu_logical_apicid = logical_apicid;
228 }
230 if (num_processors >= NR_CPUS){
231 printk(KERN_WARNING "NR_CPUS limit of %i reached. Cannot "
232 "boot CPU(apicid 0x%x).\n", NR_CPUS, m->mpc_apicid);
233 return;
234 }
235 num_processors++;
237 if (m->mpc_apicid > MAX_APICS) {
238 printk("Processor #%d INVALID. (Max ID: %d).\n",
239 m->mpc_apicid, MAX_APICS);
240 --num_processors;
241 return;
242 }
243 ver = m->mpc_apicver;
245 logical_cpu_present_map |= 1 << (num_processors-1);
246 phys_cpu_present_map |= apicid_to_phys_cpu_present(m->mpc_apicid);
248 /*
249 * Validate version
250 */
251 if (ver == 0x0) {
252 printk("BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
253 ver = 0x10;
254 }
255 apic_version[m->mpc_apicid] = ver;
256 raw_phys_apicid[num_processors - 1] = m->mpc_apicid;
257 }
259 static void __init MP_bus_info (struct mpc_config_bus *m)
260 {
261 char str[7];
262 int quad;
264 memcpy(str, m->mpc_bustype, 6);
265 str[6] = 0;
267 if (clustered_apic_mode == CLUSTERED_APIC_NUMAQ) {
268 quad = translation_table[mpc_record]->trans_quad;
269 mp_bus_id_to_node[m->mpc_busid] = quad;
270 mp_bus_id_to_local[m->mpc_busid] = translation_table[mpc_record]->trans_local;
271 quad_local_to_mp_bus_id[quad][translation_table[mpc_record]->trans_local] = m->mpc_busid;
272 printk("Bus #%d is %s (node %d)\n", m->mpc_busid, str, quad);
273 } else {
274 Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
275 }
277 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
278 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
279 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
280 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
281 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
282 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
283 mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
284 mp_current_pci_id++;
285 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
286 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
287 } else {
288 printk("Unknown bustype %s - ignoring\n", str);
289 }
290 }
292 static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
293 {
294 if (!(m->mpc_flags & MPC_APIC_USABLE))
295 return;
297 printk("I/O APIC #%d Version %d at 0x%X.\n",
298 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
299 if (nr_ioapics >= MAX_IO_APICS) {
300 printk("Max # of I/O APICs (%d) exceeded (found %d).\n",
301 MAX_IO_APICS, nr_ioapics);
302 panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
303 }
304 if (!m->mpc_apicaddr) {
305 printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
306 " found in MP table, skipping!\n");
307 return;
308 }
309 mp_ioapics[nr_ioapics] = *m;
310 nr_ioapics++;
311 }
313 static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
314 {
315 mp_irqs [mp_irq_entries] = *m;
316 Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
317 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
318 m->mpc_irqtype, m->mpc_irqflag & 3,
319 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
320 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
321 if (++mp_irq_entries == max_irq_sources)
322 panic("Max # of irq sources exceeded!!\n");
323 }
325 static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
326 {
327 Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
328 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
329 m->mpc_irqtype, m->mpc_irqflag & 3,
330 (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
331 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
332 /*
333 * Well it seems all SMP boards in existence
334 * use ExtINT/LVT1 == LINT0 and
335 * NMI/LVT2 == LINT1 - the following check
336 * will show us if this assumptions is false.
337 * Until then we do not have to add baggage.
338 */
339 if ((m->mpc_irqtype == mp_ExtINT) &&
340 (m->mpc_destapiclint != 0))
341 BUG();
342 if ((m->mpc_irqtype == mp_NMI) &&
343 (m->mpc_destapiclint != 1))
344 BUG();
345 }
347 static void __init MP_translation_info (struct mpc_config_translation *m)
348 {
349 printk("Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
351 if (mpc_record >= MAX_MPC_ENTRY)
352 printk("MAX_MPC_ENTRY exceeded!\n");
353 else
354 translation_table[mpc_record] = m; /* stash this for later */
355 if (m->trans_quad+1 > numnodes)
356 numnodes = m->trans_quad+1;
357 }
359 /*
360 * Read/parse the MPC oem tables
361 */
363 static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
364 unsigned short oemsize)
365 {
366 int count = sizeof (*oemtable); /* the header size */
367 unsigned char *oemptr = ((unsigned char *)oemtable)+count;
369 printk("Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
370 if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
371 {
372 printk("SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
373 oemtable->oem_signature[0],
374 oemtable->oem_signature[1],
375 oemtable->oem_signature[2],
376 oemtable->oem_signature[3]);
377 return;
378 }
379 if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
380 {
381 printk("SMP oem mptable: checksum error!\n");
382 return;
383 }
384 while (count < oemtable->oem_length) {
385 switch (*oemptr) {
386 case MP_TRANSLATION:
387 {
388 struct mpc_config_translation *m=
389 (struct mpc_config_translation *)oemptr;
390 MP_translation_info(m);
391 oemptr += sizeof(*m);
392 count += sizeof(*m);
393 ++mpc_record;
394 break;
395 }
396 default:
397 {
398 printk("Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
399 return;
400 }
401 }
402 }
403 }
405 /*
406 * Read/parse the MPC
407 */
409 static int __init smp_read_mpc(struct mp_config_table *mpc)
410 {
411 char oem[16], prod[14];
412 int count=sizeof(*mpc);
413 unsigned char *mpt=((unsigned char *)mpc)+count;
414 int num_bus = 0;
415 int num_irq = 0;
416 unsigned char *bus_data;
418 if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
419 panic("SMP mptable: bad signature [%c%c%c%c]!\n",
420 mpc->mpc_signature[0],
421 mpc->mpc_signature[1],
422 mpc->mpc_signature[2],
423 mpc->mpc_signature[3]);
424 return 0;
425 }
426 if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
427 panic("SMP mptable: checksum error!\n");
428 return 0;
429 }
430 if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
431 printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
432 mpc->mpc_spec);
433 return 0;
434 }
435 if (!mpc->mpc_lapic) {
436 printk(KERN_ERR "SMP mptable: null local APIC address!\n");
437 return 0;
438 }
439 memcpy(oem,mpc->mpc_oem,8);
440 oem[8]=0;
441 printk("OEM ID: %s ",oem);
443 memcpy(prod,mpc->mpc_productid,12);
444 prod[12]=0;
445 printk("Product ID: %s ",prod);
447 detect_clustered_apic(oem, prod);
449 printk("APIC at: 0x%X\n",mpc->mpc_lapic);
451 /*
452 * Save the local APIC address (it might be non-default) -- but only
453 * if we're not using ACPI.
454 */
455 if (!acpi_lapic)
456 mp_lapic_addr = mpc->mpc_lapic;
458 if ((clustered_apic_mode == CLUSTERED_APIC_NUMAQ) && mpc->mpc_oemptr) {
459 /* We need to process the oem mpc tables to tell us which quad things are in ... */
460 mpc_record = 0;
461 smp_read_mpc_oem((struct mp_config_oemtable *)(unsigned long)mpc->mpc_oemptr, mpc->mpc_oemsize);
462 mpc_record = 0;
463 }
465 /* Pre-scan to determine the number of bus and
466 * interrupts records we have
467 */
468 while (count < mpc->mpc_length) {
469 switch (*mpt) {
470 case MP_PROCESSOR:
471 mpt += sizeof(struct mpc_config_processor);
472 count += sizeof(struct mpc_config_processor);
473 break;
474 case MP_BUS:
475 ++num_bus;
476 mpt += sizeof(struct mpc_config_bus);
477 count += sizeof(struct mpc_config_bus);
478 break;
479 case MP_INTSRC:
480 ++num_irq;
481 mpt += sizeof(struct mpc_config_intsrc);
482 count += sizeof(struct mpc_config_intsrc);
483 break;
484 case MP_IOAPIC:
485 mpt += sizeof(struct mpc_config_ioapic);
486 count += sizeof(struct mpc_config_ioapic);
487 break;
488 case MP_LINTSRC:
489 mpt += sizeof(struct mpc_config_lintsrc);
490 count += sizeof(struct mpc_config_lintsrc);
491 break;
492 default:
493 count = mpc->mpc_length;
494 break;
495 }
496 }
497 /*
498 * Paranoia: Allocate one extra of both the number of busses and number
499 * of irqs, and make sure that we have at least 4 interrupts per PCI
500 * slot. But some machines do not report very many busses, so we need
501 * to fall back on the older defaults.
502 */
503 ++num_bus;
504 max_mp_busses = max(num_bus, MAX_MP_BUSSES);
505 if (num_irq < (4 * max_mp_busses))
506 num_irq = 4 * num_bus; /* 4 intr/PCI slot */
507 ++num_irq;
508 max_irq_sources = max(num_irq, MAX_IRQ_SOURCES);
510 count = (max_mp_busses * sizeof(int)) * 4;
511 count += (max_irq_sources * sizeof(struct mpc_config_intsrc));
512 bus_data = (void *)alloc_xenheap_pages(get_order(count));
513 if (!bus_data) {
514 printk(KERN_ERR "SMP mptable: out of memory!\n");
515 return 0;
516 }
517 mp_bus_id_to_type = (int *)&bus_data[0];
518 mp_bus_id_to_node = (int *)&bus_data[(max_mp_busses * sizeof(int))];
519 mp_bus_id_to_local = (int *)&bus_data[(max_mp_busses * sizeof(int)) * 2];
520 mp_bus_id_to_pci_bus = (int *)&bus_data[(max_mp_busses * sizeof(int)) * 3];
521 mp_irqs = (struct mpc_config_intsrc *)&bus_data[(max_mp_busses * sizeof(int)) * 4];
522 memset(mp_bus_id_to_pci_bus, -1, max_mp_busses * sizeof(int));
524 /*
525 * Now process the configuration blocks.
526 */
527 count = sizeof(*mpc);
528 mpt = ((unsigned char *)mpc)+count;
529 while (count < mpc->mpc_length) {
530 switch(*mpt) {
531 case MP_PROCESSOR:
532 {
533 struct mpc_config_processor *m=
534 (struct mpc_config_processor *)mpt;
535 /* ACPI may have already provided this data */
536 if (!acpi_lapic)
537 MP_processor_info(m);
538 mpt += sizeof(*m);
539 count += sizeof(*m);
540 break;
541 }
542 case MP_BUS:
543 {
544 struct mpc_config_bus *m=
545 (struct mpc_config_bus *)mpt;
546 MP_bus_info(m);
547 mpt += sizeof(*m);
548 count += sizeof(*m);
549 break;
550 }
551 case MP_IOAPIC:
552 {
553 struct mpc_config_ioapic *m=
554 (struct mpc_config_ioapic *)mpt;
555 MP_ioapic_info(m);
556 mpt+=sizeof(*m);
557 count+=sizeof(*m);
558 break;
559 }
560 case MP_INTSRC:
561 {
562 struct mpc_config_intsrc *m=
563 (struct mpc_config_intsrc *)mpt;
565 MP_intsrc_info(m);
566 mpt+=sizeof(*m);
567 count+=sizeof(*m);
568 break;
569 }
570 case MP_LINTSRC:
571 {
572 struct mpc_config_lintsrc *m=
573 (struct mpc_config_lintsrc *)mpt;
574 MP_lintsrc_info(m);
575 mpt+=sizeof(*m);
576 count+=sizeof(*m);
577 break;
578 }
579 default:
580 {
581 count = mpc->mpc_length;
582 break;
583 }
584 }
585 ++mpc_record;
586 }
588 if (clustered_apic_mode){
589 phys_cpu_present_map = logical_cpu_present_map;
590 }
593 printk("Enabling APIC mode: ");
594 if(clustered_apic_mode == CLUSTERED_APIC_NUMAQ)
595 printk("Clustered Logical. ");
596 else if(clustered_apic_mode == CLUSTERED_APIC_XAPIC)
597 printk("Physical. ");
598 else
599 printk("Flat. ");
600 printk("Using %d I/O APICs\n",nr_ioapics);
602 if (!num_processors)
603 printk(KERN_ERR "SMP mptable: no processors registered!\n");
604 return num_processors;
605 }
607 static int __init ELCR_trigger(unsigned int irq)
608 {
609 unsigned int port;
611 port = 0x4d0 + (irq >> 3);
612 return (inb(port) >> (irq & 7)) & 1;
613 }
615 static void __init construct_default_ioirq_mptable(int mpc_default_type)
616 {
617 struct mpc_config_intsrc intsrc;
618 int i;
619 int ELCR_fallback = 0;
621 intsrc.mpc_type = MP_INTSRC;
622 intsrc.mpc_irqflag = 0; /* conforming */
623 intsrc.mpc_srcbus = 0;
624 intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
626 intsrc.mpc_irqtype = mp_INT;
628 /*
629 * If true, we have an ISA/PCI system with no IRQ entries
630 * in the MP table. To prevent the PCI interrupts from being set up
631 * incorrectly, we try to use the ELCR. The sanity check to see if
632 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
633 * never be level sensitive, so we simply see if the ELCR agrees.
634 * If it does, we assume it's valid.
635 */
636 if (mpc_default_type == 5) {
637 printk("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
639 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
640 printk("ELCR contains invalid data... not using ELCR\n");
641 else {
642 printk("Using ELCR to identify PCI interrupts\n");
643 ELCR_fallback = 1;
644 }
645 }
647 for (i = 0; i < 16; i++) {
648 switch (mpc_default_type) {
649 case 2:
650 if (i == 0 || i == 13)
651 continue; /* IRQ0 & IRQ13 not connected */
652 /* fall through */
653 default:
654 if (i == 2)
655 continue; /* IRQ2 is never connected */
656 }
658 if (ELCR_fallback) {
659 /*
660 * If the ELCR indicates a level-sensitive interrupt, we
661 * copy that information over to the MP table in the
662 * irqflag field (level sensitive, active high polarity).
663 */
664 if (ELCR_trigger(i))
665 intsrc.mpc_irqflag = 13;
666 else
667 intsrc.mpc_irqflag = 0;
668 }
670 intsrc.mpc_srcbusirq = i;
671 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
672 MP_intsrc_info(&intsrc);
673 }
675 intsrc.mpc_irqtype = mp_ExtINT;
676 intsrc.mpc_srcbusirq = 0;
677 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
678 MP_intsrc_info(&intsrc);
679 }
681 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
682 {
683 struct mpc_config_processor processor;
684 struct mpc_config_bus bus;
685 struct mpc_config_ioapic ioapic;
686 struct mpc_config_lintsrc lintsrc;
687 int linttypes[2] = { mp_ExtINT, mp_NMI };
688 int i;
689 struct {
690 int mp_bus_id_to_type[MAX_MP_BUSSES];
691 int mp_bus_id_to_node[MAX_MP_BUSSES];
692 int mp_bus_id_to_local[MAX_MP_BUSSES];
693 int mp_bus_id_to_pci_bus[MAX_MP_BUSSES];
694 struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
695 } *bus_data;
697 bus_data = (void *)alloc_xenheap_pages(get_order(sizeof(*bus_data)));
698 if (!bus_data)
699 panic("SMP mptable: out of memory!\n");
700 mp_bus_id_to_type = bus_data->mp_bus_id_to_type;
701 mp_bus_id_to_node = bus_data->mp_bus_id_to_node;
702 mp_bus_id_to_local = bus_data->mp_bus_id_to_local;
703 mp_bus_id_to_pci_bus = bus_data->mp_bus_id_to_pci_bus;
704 mp_irqs = bus_data->mp_irqs;
705 for (i = 0; i < MAX_MP_BUSSES; ++i)
706 mp_bus_id_to_pci_bus[i] = -1;
708 /*
709 * local APIC has default address
710 */
711 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
713 /*
714 * 2 CPUs, numbered 0 & 1.
715 */
716 processor.mpc_type = MP_PROCESSOR;
717 /* Either an integrated APIC or a discrete 82489DX. */
718 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
719 processor.mpc_cpuflag = CPU_ENABLED;
720 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
721 (boot_cpu_data.x86_model << 4) |
722 boot_cpu_data.x86_mask;
723 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
724 processor.mpc_reserved[0] = 0;
725 processor.mpc_reserved[1] = 0;
726 for (i = 0; i < 2; i++) {
727 processor.mpc_apicid = i;
728 MP_processor_info(&processor);
729 }
731 bus.mpc_type = MP_BUS;
732 bus.mpc_busid = 0;
733 switch (mpc_default_type) {
734 default:
735 printk("???\nUnknown standard configuration %d\n",
736 mpc_default_type);
737 /* fall through */
738 case 1:
739 case 5:
740 memcpy(bus.mpc_bustype, "ISA ", 6);
741 break;
742 case 2:
743 case 6:
744 case 3:
745 memcpy(bus.mpc_bustype, "EISA ", 6);
746 break;
747 case 4:
748 case 7:
749 memcpy(bus.mpc_bustype, "MCA ", 6);
750 }
751 MP_bus_info(&bus);
752 if (mpc_default_type > 4) {
753 bus.mpc_busid = 1;
754 memcpy(bus.mpc_bustype, "PCI ", 6);
755 MP_bus_info(&bus);
756 }
758 ioapic.mpc_type = MP_IOAPIC;
759 ioapic.mpc_apicid = 2;
760 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
761 ioapic.mpc_flags = MPC_APIC_USABLE;
762 ioapic.mpc_apicaddr = 0xFEC00000;
763 MP_ioapic_info(&ioapic);
765 /*
766 * We set up most of the low 16 IO-APIC pins according to MPS rules.
767 */
768 construct_default_ioirq_mptable(mpc_default_type);
770 lintsrc.mpc_type = MP_LINTSRC;
771 lintsrc.mpc_irqflag = 0; /* conforming */
772 lintsrc.mpc_srcbusid = 0;
773 lintsrc.mpc_srcbusirq = 0;
774 lintsrc.mpc_destapic = MP_APIC_ALL;
775 for (i = 0; i < 2; i++) {
776 lintsrc.mpc_irqtype = linttypes[i];
777 lintsrc.mpc_destapiclint = i;
778 MP_lintsrc_info(&lintsrc);
779 }
780 }
782 static struct intel_mp_floating *mpf_found;
784 /*
785 * Scan the memory blocks for an SMP configuration block.
786 */
787 void __init get_smp_config (void)
788 {
789 struct intel_mp_floating *mpf = mpf_found;
791 /*
792 * ACPI may be used to obtain the entire SMP configuration or just to
793 * enumerate/configure processors (CONFIG_ACPI_HT_ONLY). Note that
794 * ACPI supports both logical (e.g. Hyper-Threading) and physical
795 * processors, where MPS only supports physical.
796 */
797 if (acpi_lapic && acpi_ioapic) {
798 printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
799 return;
800 }
801 else if (acpi_lapic)
802 printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
804 printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
805 if (mpf->mpf_feature2 & (1<<7)) {
806 printk(" IMCR and PIC compatibility mode.\n");
807 pic_mode = 1;
808 } else {
809 printk(" Virtual Wire compatibility mode.\n");
810 pic_mode = 0;
811 }
813 /*
814 * Now see if we need to read further.
815 */
816 if (mpf->mpf_feature1 != 0) {
818 printk("Default MP configuration #%d\n", mpf->mpf_feature1);
819 construct_default_ISA_mptable(mpf->mpf_feature1);
821 } else if (mpf->mpf_physptr) {
823 /*
824 * Read the physical hardware table. Anything here will
825 * override the defaults.
826 */
827 if (!smp_read_mpc((void *)(unsigned long)mpf->mpf_physptr)) {
828 smp_found_config = 0;
829 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
830 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
831 return;
832 }
833 /*
834 * If there are no explicit MP IRQ entries, then we are
835 * broken. We set up most of the low 16 IO-APIC pins to
836 * ISA defaults and hope it will work.
837 */
838 if (!mp_irq_entries) {
839 struct mpc_config_bus bus;
841 printk("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
843 bus.mpc_type = MP_BUS;
844 bus.mpc_busid = 0;
845 memcpy(bus.mpc_bustype, "ISA ", 6);
846 MP_bus_info(&bus);
848 construct_default_ioirq_mptable(0);
849 }
851 } else
852 BUG();
854 printk("Processors: %d\n", num_processors);
855 /*
856 * Only use the first configuration found.
857 */
858 }
860 static int __init smp_scan_config (unsigned long base, unsigned long length)
861 {
862 unsigned int *bp = phys_to_virt(base);
863 struct intel_mp_floating *mpf;
865 Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
866 if (sizeof(*mpf) != 16)
867 printk("Error: MPF size\n");
869 while (length > 0) {
870 mpf = (struct intel_mp_floating *)bp;
871 if ((*bp == SMP_MAGIC_IDENT) &&
872 (mpf->mpf_length == 1) &&
873 !mpf_checksum((unsigned char *)bp, 16) &&
874 ((mpf->mpf_specification == 1)
875 || (mpf->mpf_specification == 4)) ) {
877 smp_found_config = 1;
878 printk("found SMP MP-table at %08lx\n",
879 virt_to_phys(mpf));
880 reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
881 if (mpf->mpf_physptr)
882 reserve_bootmem((unsigned long)mpf->mpf_physptr, PAGE_SIZE);
883 mpf_found = mpf;
884 return 1;
885 }
886 bp += 4;
887 length -= 16;
888 }
889 return 0;
890 }
892 void __init find_intel_smp (void)
893 {
894 unsigned int address;
896 /*
897 * FIXME: Linux assumes you have 640K of base ram..
898 * this continues the error...
899 *
900 * 1) Scan the bottom 1K for a signature
901 * 2) Scan the top 1K of base RAM
902 * 3) Scan the 64K of bios
903 */
904 if (smp_scan_config(0x0,0x400) ||
905 smp_scan_config(639*0x400,0x400) ||
906 smp_scan_config(0xF0000,0x10000))
907 return;
908 /*
909 * If it is an SMP machine we should know now, unless the
910 * configuration is in an EISA/MCA bus machine with an
911 * extended bios data area.
912 *
913 * there is a real-mode segmented pointer pointing to the
914 * 4K EBDA area at 0x40E, calculate and scan it here.
915 *
916 * NOTE! There were Linux loaders that will corrupt the EBDA
917 * area, and as such this kind of SMP config may be less
918 * trustworthy, simply because the SMP table may have been
919 * stomped on during early boot. Thankfully the bootloaders
920 * now honour the EBDA.
921 */
923 address = *(unsigned short *)phys_to_virt(0x40E);
924 address <<= 4;
925 smp_scan_config(address, 0x1000);
926 }
928 #else
930 /*
931 * The Visual Workstation is Intel MP compliant in the hardware
932 * sense, but it doesn't have a BIOS(-configuration table).
933 * No problem for Linux.
934 */
935 void __init find_visws_smp(void)
936 {
937 smp_found_config = 1;
939 phys_cpu_present_map |= 2; /* or in id 1 */
940 apic_version[1] |= 0x10; /* integrated APIC */
941 apic_version[0] |= 0x10;
943 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
944 }
946 #endif
948 /*
949 * - Intel MP Configuration Table
950 * - or SGI Visual Workstation configuration
951 */
952 void __init find_smp_config (void)
953 {
954 #ifdef CONFIG_X86_LOCAL_APIC
955 find_intel_smp();
956 #endif
957 #ifdef CONFIG_VISWS
958 find_visws_smp();
959 #endif
960 }
963 /* --------------------------------------------------------------------------
964 ACPI-based MP Configuration
965 -------------------------------------------------------------------------- */
967 #ifdef CONFIG_ACPI_BOOT
969 void __init mp_register_lapic_address (
970 u64 address)
971 {
972 mp_lapic_addr = (unsigned long) address;
974 set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
976 if (boot_cpu_physical_apicid == -1U)
977 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
979 Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
980 }
983 void __init mp_register_lapic (
984 u8 id,
985 u8 enabled)
986 {
987 struct mpc_config_processor processor;
988 int boot_cpu = 0;
990 if (id >= MAX_APICS) {
991 printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
992 id, MAX_APICS);
993 return;
994 }
996 if (id == boot_cpu_physical_apicid)
997 boot_cpu = 1;
999 processor.mpc_type = MP_PROCESSOR;
1000 processor.mpc_apicid = id;
1002 /*
1003 * mp_register_lapic_address() which is called before the
1004 * current function does the fixmap of FIX_APIC_BASE.
1005 * Read in the correct APIC version from there
1006 */
1007 processor.mpc_apicver = apic_read(APIC_LVR);
1009 processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
1010 processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
1011 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
1012 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
1013 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
1014 processor.mpc_reserved[0] = 0;
1015 processor.mpc_reserved[1] = 0;
1017 MP_processor_info(&processor);
1020 #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
1022 #define MP_ISA_BUS 0
1023 #define MP_MAX_IOAPIC_PIN 127
1025 struct mp_ioapic_routing {
1026 int apic_id;
1027 int irq_start;
1028 int irq_end;
1029 u32 pin_programmed[4];
1030 } mp_ioapic_routing[MAX_IO_APICS];
1033 static int __init mp_find_ioapic (
1034 int irq)
1036 int i = 0;
1038 /* Find the IOAPIC that manages this IRQ. */
1039 for (i = 0; i < nr_ioapics; i++) {
1040 if ((irq >= mp_ioapic_routing[i].irq_start)
1041 && (irq <= mp_ioapic_routing[i].irq_end))
1042 return i;
1045 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for IRQ %d\n", irq);
1047 return -1;
1051 void __init mp_register_ioapic (
1052 u8 id,
1053 u32 address,
1054 u32 irq_base)
1056 int idx = 0;
1058 if (nr_ioapics >= MAX_IO_APICS) {
1059 printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
1060 "(found %d)\n", MAX_IO_APICS, nr_ioapics);
1061 panic("Recompile kernel with bigger MAX_IO_APICS!\n");
1063 if (!address) {
1064 printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
1065 " found in MADT table, skipping!\n");
1066 return;
1069 idx = nr_ioapics++;
1071 mp_ioapics[idx].mpc_type = MP_IOAPIC;
1072 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
1073 mp_ioapics[idx].mpc_apicaddr = address;
1075 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
1076 mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
1077 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
1079 /*
1080 * Build basic IRQ lookup table to facilitate irq->io_apic lookups
1081 * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
1082 */
1083 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
1084 mp_ioapic_routing[idx].irq_start = irq_base;
1085 mp_ioapic_routing[idx].irq_end = irq_base +
1086 io_apic_get_redir_entries(idx);
1088 printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
1089 "IRQ %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
1090 mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
1091 mp_ioapic_routing[idx].irq_start,
1092 mp_ioapic_routing[idx].irq_end);
1094 return;
1098 void __init mp_override_legacy_irq (
1099 u8 bus_irq,
1100 u8 polarity,
1101 u8 trigger,
1102 u32 global_irq)
1104 struct mpc_config_intsrc intsrc;
1105 int i = 0;
1106 int found = 0;
1107 int ioapic = -1;
1108 int pin = -1;
1110 /*
1111 * Convert 'global_irq' to 'ioapic.pin'.
1112 */
1113 ioapic = mp_find_ioapic(global_irq);
1114 if (ioapic < 0)
1115 return;
1116 pin = global_irq - mp_ioapic_routing[ioapic].irq_start;
1118 /*
1119 * TBD: This check is for faulty timer entries, where the override
1120 * erroneously sets the trigger to level, resulting in a HUGE
1121 * increase of timer interrupts!
1122 */
1123 if ((bus_irq == 0) && (trigger == 3))
1124 trigger = 1;
1126 intsrc.mpc_type = MP_INTSRC;
1127 intsrc.mpc_irqtype = mp_INT;
1128 intsrc.mpc_irqflag = (trigger << 2) | polarity;
1129 intsrc.mpc_srcbus = MP_ISA_BUS;
1130 intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
1131 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
1132 intsrc.mpc_dstirq = pin; /* INTIN# */
1134 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
1135 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
1136 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
1137 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
1139 /*
1140 * If an existing [IOAPIC.PIN -> IRQ] routing entry exists we override it.
1141 * Otherwise create a new entry (e.g. global_irq == 2).
1142 */
1143 for (i = 0; i < mp_irq_entries; i++) {
1144 if ((mp_irqs[i].mpc_srcbus == intsrc.mpc_srcbus)
1145 && (mp_irqs[i].mpc_srcbusirq == intsrc.mpc_srcbusirq)) {
1146 mp_irqs[i] = intsrc;
1147 found = 1;
1148 break;
1151 if (!found) {
1152 mp_irqs[mp_irq_entries] = intsrc;
1153 if (++mp_irq_entries == MAX_IRQ_SOURCES)
1154 panic("Max # of irq sources exceeded!\n");
1157 return;
1161 void __init mp_config_acpi_legacy_irqs (void)
1163 int i = 0;
1164 int ioapic = -1;
1166 /*
1167 * Initialize mp_irqs for IRQ configuration.
1168 */
1169 unsigned char *bus_data;
1170 int count;
1172 count = (MAX_MP_BUSSES * sizeof(int)) * 4;
1173 count += (MAX_IRQ_SOURCES * sizeof(int)) * 4;
1174 bus_data = (void *)alloc_xenheap_pages(get_order(count));
1175 if (!bus_data) {
1176 panic("Fatal: can't allocate bus memory for ACPI legacy IRQ!");
1178 mp_bus_id_to_type = (int *)&bus_data[0];
1179 mp_bus_id_to_node = (int *)&bus_data[(MAX_MP_BUSSES * sizeof(int))];
1180 mp_bus_id_to_local = (int *)&bus_data[(MAX_MP_BUSSES * sizeof(int)) * 2];
1181 mp_bus_id_to_pci_bus = (int *)&bus_data[(MAX_MP_BUSSES * sizeof(int)) * 3];
1182 mp_irqs = (struct mpc_config_intsrc *)&bus_data[(MAX_MP_BUSSES * sizeof(int)) * 4];
1183 for (i = 0; i < MAX_MP_BUSSES; ++i)
1184 mp_bus_id_to_pci_bus[i] = -1;
1186 /*
1187 * Fabricate the legacy ISA bus (bus #31).
1188 */
1189 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
1190 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
1192 /*
1193 * Locate the IOAPIC that manages the ISA IRQs (0-15).
1194 */
1195 ioapic = mp_find_ioapic(0);
1196 if (ioapic < 0)
1197 return;
1199 /*
1200 * Use the default configuration for the IRQs 0-15. These may be
1201 * overriden by (MADT) interrupt source override entries.
1202 */
1203 for (i = 0; i < 16; i++) {
1205 if (i == 2)
1206 continue; /* Don't connect IRQ2 */
1208 mp_irqs[mp_irq_entries].mpc_type = MP_INTSRC;
1209 mp_irqs[mp_irq_entries].mpc_irqflag = 0; /* Conforming */
1210 mp_irqs[mp_irq_entries].mpc_srcbus = MP_ISA_BUS;
1211 mp_irqs[mp_irq_entries].mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
1212 mp_irqs[mp_irq_entries].mpc_irqtype = mp_INT;
1213 mp_irqs[mp_irq_entries].mpc_srcbusirq = i; /* Identity mapped */
1214 mp_irqs[mp_irq_entries].mpc_dstirq = i;
1216 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
1217 "%d-%d\n",
1218 mp_irqs[mp_irq_entries].mpc_irqtype,
1219 mp_irqs[mp_irq_entries].mpc_irqflag & 3,
1220 (mp_irqs[mp_irq_entries].mpc_irqflag >> 2) & 3,
1221 mp_irqs[mp_irq_entries].mpc_srcbus,
1222 mp_irqs[mp_irq_entries].mpc_srcbusirq,
1223 mp_irqs[mp_irq_entries].mpc_dstapic,
1224 mp_irqs[mp_irq_entries].mpc_dstirq);
1226 if (++mp_irq_entries == MAX_IRQ_SOURCES)
1227 panic("Max # of irq sources exceeded!\n");
1231 #ifdef CONFIG_ACPI_PCI
1233 void __init mp_parse_prt (void)
1235 struct acpi_prt_entry *entry = NULL;
1236 int ioapic = -1;
1237 int ioapic_pin = 0;
1238 int irq = 0;
1239 int idx, bit = 0;
1240 int edge_level = 0;
1241 int active_high_low = 0;
1243 /*
1244 * Parsing through the PCI Interrupt Routing Table (PRT) and program
1245 * routing for all entries.
1246 */
1247 list_for_each_entry(entry, &acpi_prt.entries, node) {
1248 /* Need to get irq for dynamic entry */
1249 if (entry->link.handle) {
1250 irq = acpi_pci_link_get_irq(entry->link.handle, entry->link.index, &edge_level, &active_high_low);
1251 if (!irq)
1252 continue;
1254 else {
1255 /* Hardwired IRQ. Assume PCI standard settings */
1256 irq = entry->link.index;
1257 edge_level = 1;
1258 active_high_low = 1;
1261 /* Don't set up the ACPI SCI because it's already set up */
1262 if (acpi_fadt.sci_int == irq) {
1263 entry->irq = irq; /*we still need to set entry's irq*/
1264 continue;
1267 ioapic = mp_find_ioapic(irq);
1268 if (ioapic < 0)
1269 continue;
1270 ioapic_pin = irq - mp_ioapic_routing[ioapic].irq_start;
1272 /*
1273 * Avoid pin reprogramming. PRTs typically include entries
1274 * with redundant pin->irq mappings (but unique PCI devices);
1275 * we only only program the IOAPIC on the first.
1276 */
1277 bit = ioapic_pin % 32;
1278 idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
1279 if (idx > 3) {
1280 printk(KERN_ERR "Invalid reference to IOAPIC pin "
1281 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
1282 ioapic_pin);
1283 continue;
1285 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
1286 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1287 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
1288 entry->irq = irq;
1289 continue;
1292 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
1294 if (!io_apic_set_pci_routing(ioapic, ioapic_pin, irq, edge_level, active_high_low))
1295 entry->irq = irq;
1297 printk(KERN_DEBUG "%02x:%02x:%02x[%c] -> %d-%d -> IRQ %d\n",
1298 entry->id.segment, entry->id.bus,
1299 entry->id.device, ('A' + entry->pin),
1300 mp_ioapic_routing[ioapic].apic_id, ioapic_pin,
1301 entry->irq);
1304 print_IO_APIC();
1306 return;
1309 #endif /*CONFIG_ACPI_PCI*/
1311 #endif /*CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER*/
1313 #endif /*CONFIG_ACPI*/