debuggers.hg

view xen/arch/x86/acpi/boot.c @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children 5c0bf00e371d
line source
1 /*
2 * boot.c - Architecture-Specific Low-Level ACPI Boot Support
3 *
4 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
5 * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
26 #include <xen/config.h>
27 #include <xen/errno.h>
28 #include <xen/init.h>
29 #include <xen/acpi.h>
30 #include <xen/irq.h>
31 #include <xen/dmi.h>
32 #include <asm/fixmap.h>
33 #include <asm/page.h>
34 #include <asm/apic.h>
35 #include <asm/io_apic.h>
36 #include <asm/apic.h>
37 #include <asm/io.h>
38 #include <asm/mpspec.h>
39 #include <asm/processor.h>
40 #include <mach_apic.h>
41 #include <mach_mpparse.h>
43 int sbf_port;
44 #define CONFIG_ACPI_PCI
46 #define BAD_MADT_ENTRY(entry, end) ( \
47 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
48 ((acpi_table_entry_header *)entry)->length != sizeof(*entry))
50 #define PREFIX "ACPI: "
52 #ifdef CONFIG_ACPI_PCI
53 int acpi_noirq __initdata; /* skip ACPI IRQ initialization */
54 int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
55 #else
56 int acpi_noirq __initdata = 1;
57 int acpi_pci_disabled __initdata = 1;
58 #endif
59 int acpi_ht __initdata = 1; /* enable HT */
61 int acpi_lapic;
62 int acpi_ioapic;
63 int acpi_strict;
64 EXPORT_SYMBOL(acpi_strict);
66 u8 acpi_sci_flags __initdata;
67 int acpi_sci_override_gsi __initdata;
68 int acpi_skip_timer_override __initdata;
70 #ifdef CONFIG_X86_LOCAL_APIC
71 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
72 #endif
74 u32 acpi_smi_cmd;
75 u8 acpi_enable_value, acpi_disable_value;
77 #ifndef __HAVE_ARCH_CMPXCHG
78 #warning ACPI uses CMPXCHG, i486 and later hardware
79 #endif
81 #define MAX_MADT_ENTRIES 256
82 u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
83 {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
84 EXPORT_SYMBOL(x86_acpiid_to_apicid);
86 /* --------------------------------------------------------------------------
87 Boot-time Configuration
88 -------------------------------------------------------------------------- */
90 /*
91 * The default interrupt routing model is PIC (8259). This gets
92 * overriden if IOAPICs are enumerated (below).
93 */
94 enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
96 /*
97 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
98 * to map the target physical address. The problem is that set_fixmap()
99 * provides a single page, and it is possible that the page is not
100 * sufficient.
101 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
102 * i.e. until the next __va_range() call.
103 *
104 * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
105 * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
106 * count idx down while incrementing the phys address.
107 */
108 char *__acpi_map_table(unsigned long phys, unsigned long size)
109 {
110 unsigned long base, offset, mapped_size;
111 int idx;
113 /* XEN: RAM holes above 1MB are not permanently mapped. */
114 if (phys + size < 1 * 1024 * 1024)
115 return __va(phys);
117 offset = phys & (PAGE_SIZE - 1);
118 mapped_size = PAGE_SIZE - offset;
119 set_fixmap(FIX_ACPI_END, phys);
120 base = fix_to_virt(FIX_ACPI_END);
122 /*
123 * Most cases can be covered by the below.
124 */
125 idx = FIX_ACPI_END;
126 while (mapped_size < size) {
127 if (--idx < FIX_ACPI_BEGIN)
128 return NULL; /* cannot handle this */
129 phys += PAGE_SIZE;
130 set_fixmap(idx, phys);
131 mapped_size += PAGE_SIZE;
132 }
134 return ((char *) base + offset);
135 }
137 #ifdef CONFIG_X86_LOCAL_APIC
138 static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
139 {
140 struct acpi_table_madt *madt = NULL;
142 if (!phys_addr || !size)
143 return -EINVAL;
145 madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
146 if (!madt) {
147 printk(KERN_WARNING PREFIX "Unable to map MADT\n");
148 return -ENODEV;
149 }
151 if (madt->address) {
152 acpi_lapic_addr = (u64) madt->address;
154 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
155 madt->address);
156 }
158 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
160 return 0;
161 }
163 static int __init
164 acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
165 {
166 struct acpi_table_lapic *processor = NULL;
168 processor = (struct acpi_table_lapic *)header;
170 if (BAD_MADT_ENTRY(processor, end))
171 return -EINVAL;
173 acpi_table_print_madt_entry(header);
175 /* Record local apic id only when enabled */
176 if (processor->flags.enabled)
177 x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
179 /*
180 * We need to register disabled CPU as well to permit
181 * counting disabled CPUs. This allows us to size
182 * cpus_possible_map more accurately, to permit
183 * to not preallocating memory for all NR_CPUS
184 * when we use CPU hotplug.
185 */
186 mp_register_lapic(processor->id, /* APIC ID */
187 processor->flags.enabled); /* Enabled? */
189 return 0;
190 }
192 static int __init
193 acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
194 const unsigned long end)
195 {
196 struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
198 lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header;
200 if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
201 return -EINVAL;
203 acpi_lapic_addr = lapic_addr_ovr->address;
205 return 0;
206 }
208 static int __init
209 acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
210 {
211 struct acpi_table_lapic_nmi *lapic_nmi = NULL;
213 lapic_nmi = (struct acpi_table_lapic_nmi *)header;
215 if (BAD_MADT_ENTRY(lapic_nmi, end))
216 return -EINVAL;
218 acpi_table_print_madt_entry(header);
220 if (lapic_nmi->lint != 1)
221 printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
223 return 0;
224 }
226 #endif /*CONFIG_X86_LOCAL_APIC */
228 #if defined(CONFIG_X86_IO_APIC) /*&& defined(CONFIG_ACPI_INTERPRETER)*/
230 static int __init
231 acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
232 {
233 struct acpi_table_ioapic *ioapic = NULL;
235 ioapic = (struct acpi_table_ioapic *)header;
237 if (BAD_MADT_ENTRY(ioapic, end))
238 return -EINVAL;
240 acpi_table_print_madt_entry(header);
242 mp_register_ioapic(ioapic->id,
243 ioapic->address, ioapic->global_irq_base);
245 return 0;
246 }
248 static int __init
249 acpi_parse_int_src_ovr(acpi_table_entry_header * header,
250 const unsigned long end)
251 {
252 struct acpi_table_int_src_ovr *intsrc = NULL;
254 intsrc = (struct acpi_table_int_src_ovr *)header;
256 if (BAD_MADT_ENTRY(intsrc, end))
257 return -EINVAL;
259 acpi_table_print_madt_entry(header);
261 if (acpi_skip_timer_override &&
262 intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
263 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
264 return 0;
265 }
267 mp_override_legacy_irq(intsrc->bus_irq,
268 intsrc->flags.polarity,
269 intsrc->flags.trigger, intsrc->global_irq);
271 return 0;
272 }
274 static int __init
275 acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
276 {
277 struct acpi_table_nmi_src *nmi_src = NULL;
279 nmi_src = (struct acpi_table_nmi_src *)header;
281 if (BAD_MADT_ENTRY(nmi_src, end))
282 return -EINVAL;
284 acpi_table_print_madt_entry(header);
286 /* TBD: Support nimsrc entries? */
288 return 0;
289 }
291 #endif /* CONFIG_X86_IO_APIC */
293 static unsigned long __init
294 acpi_scan_rsdp(unsigned long start, unsigned long length)
295 {
296 unsigned long offset = 0;
297 unsigned long sig_len = sizeof("RSD PTR ") - 1;
299 /*
300 * Scan all 16-byte boundaries of the physical memory region for the
301 * RSDP signature.
302 */
303 for (offset = 0; offset < length; offset += 16) {
304 if (strncmp((char *)(start + offset), "RSD PTR ", sig_len))
305 continue;
306 return (start + offset);
307 }
309 return 0;
310 }
312 static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
313 {
314 struct acpi_table_sbf *sb;
316 if (!phys_addr || !size)
317 return -EINVAL;
319 sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size);
320 if (!sb) {
321 printk(KERN_WARNING PREFIX "Unable to map SBF\n");
322 return -ENODEV;
323 }
325 sbf_port = sb->sbf_cmos; /* Save CMOS port */
327 return 0;
328 }
330 #ifdef CONFIG_HPET_TIMER
332 static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
333 {
334 struct acpi_table_hpet *hpet_tbl;
336 if (!phys || !size)
337 return -EINVAL;
339 hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
340 if (!hpet_tbl) {
341 printk(KERN_WARNING PREFIX "Unable to map HPET\n");
342 return -ENODEV;
343 }
345 if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
346 printk(KERN_WARNING PREFIX "HPET timers must be located in "
347 "memory.\n");
348 return -1;
349 }
351 #if 0/*def CONFIG_X86_64*/
352 vxtime.hpet_address = hpet_tbl->address.address;
354 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
355 hpet_tbl->id, vxtime.hpet_address);
356 #else /* X86 */
357 {
358 extern unsigned long hpet_address;
360 hpet_address = hpet_tbl->address.address;
361 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
362 hpet_tbl->id, hpet_address);
363 }
364 #endif /* X86 */
366 return 0;
367 }
368 #else
369 #define acpi_parse_hpet NULL
370 #endif
372 #ifdef CONFIG_X86_PM_TIMER
373 extern u32 pmtmr_ioport;
374 #endif
376 #ifdef CONFIG_ACPI_SLEEP
377 #define acpi_fadt_copy_address(dst, src, len) do { \
378 if (fadt->header.revision >= FADT2_REVISION_ID) \
379 acpi_sinfo.dst##_blk = fadt->x##src##_block; \
380 if (!acpi_sinfo.dst##_blk.address) { \
381 acpi_sinfo.dst##_blk.address = fadt->src##_block; \
382 acpi_sinfo.dst##_blk.space_id = ACPI_ADR_SPACE_SYSTEM_IO; \
383 acpi_sinfo.dst##_blk.bit_width = fadt->len##_length << 3; \
384 acpi_sinfo.dst##_blk.bit_offset = 0; \
385 acpi_sinfo.dst##_blk.access_width = 0; \
386 } \
387 } while (0)
389 /* Get pm1x_cnt and pm1x_evt information for ACPI sleep */
390 static void __init
391 acpi_fadt_parse_sleep_info(struct acpi_table_fadt *fadt)
392 {
393 struct acpi_table_rsdp *rsdp;
394 unsigned long rsdp_phys;
395 struct acpi_table_facs *facs = NULL;
396 uint64_t facs_pa;
398 rsdp_phys = acpi_find_rsdp();
399 if (!rsdp_phys || acpi_disabled)
400 goto bad;
401 rsdp = __va(rsdp_phys);
403 acpi_fadt_copy_address(pm1a_cnt, pm1a_control, pm1_control);
404 acpi_fadt_copy_address(pm1b_cnt, pm1b_control, pm1_control);
405 acpi_fadt_copy_address(pm1a_evt, pm1a_event, pm1_event);
406 acpi_fadt_copy_address(pm1b_evt, pm1b_event, pm1_event);
408 printk(KERN_INFO PREFIX
409 "ACPI SLEEP INFO: pm1x_cnt[%"PRIx64",%"PRIx64"], "
410 "pm1x_evt[%"PRIx64",%"PRIx64"]\n",
411 acpi_sinfo.pm1a_cnt_blk.address,
412 acpi_sinfo.pm1b_cnt_blk.address,
413 acpi_sinfo.pm1a_evt_blk.address,
414 acpi_sinfo.pm1b_evt_blk.address);
416 /* Now FACS... */
417 if (fadt->header.revision >= FADT2_REVISION_ID)
418 facs_pa = fadt->Xfacs;
419 else
420 facs_pa = (uint64_t)fadt->facs;
422 facs = (struct acpi_table_facs *)
423 __acpi_map_table(facs_pa, sizeof(struct acpi_table_facs));
424 if (!facs)
425 goto bad;
427 if (strncmp(facs->signature, "FACS", 4)) {
428 printk(KERN_ERR PREFIX "Invalid FACS signature %.4s\n",
429 facs->signature);
430 goto bad;
431 }
433 if (facs->length < 24) {
434 printk(KERN_ERR PREFIX "Invalid FACS table length: 0x%x",
435 facs->length);
436 goto bad;
437 }
439 if (facs->length < 64)
440 printk(KERN_WARNING PREFIX
441 "FACS is shorter than ACPI spec allow: 0x%x",
442 facs->length);
444 acpi_sinfo.wakeup_vector = facs_pa +
445 offsetof(struct acpi_table_facs, firmware_waking_vector);
446 acpi_sinfo.vector_width = 32;
448 printk(KERN_INFO PREFIX
449 " wakeup_vec[%"PRIx64"], vec_size[%x]\n",
450 acpi_sinfo.wakeup_vector, acpi_sinfo.vector_width);
451 return;
452 bad:
453 memset(&acpi_sinfo, 0, sizeof(acpi_sinfo));
454 }
455 #endif
457 static void __init
458 acpi_fadt_parse_reg(struct acpi_table_fadt *fadt)
459 {
460 unsigned int len;
462 len = min_t(unsigned int, fadt->header.length, sizeof(*fadt));
463 memcpy(&acpi_gbl_FADT, fadt, len);
465 if (len > offsetof(struct acpi_table_fadt, xpm1b_event_block)) {
466 memcpy(&acpi_gbl_xpm1a_enable, &fadt->xpm1a_event_block,
467 sizeof(acpi_gbl_xpm1a_enable));
468 memcpy(&acpi_gbl_xpm1b_enable, &fadt->xpm1b_event_block,
469 sizeof(acpi_gbl_xpm1b_enable));
471 acpi_gbl_xpm1a_enable.address +=
472 acpi_gbl_FADT.pm1_event_length / 2;
473 if ( acpi_gbl_xpm1b_enable.address )
474 acpi_gbl_xpm1b_enable.address +=
475 acpi_gbl_FADT.pm1_event_length / 2;
476 }
477 }
479 static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
480 {
481 struct acpi_table_fadt *fadt = NULL;
483 fadt = (struct acpi_table_fadt *)__acpi_map_table(phys, size);
484 if (!fadt) {
485 printk(KERN_WARNING PREFIX "Unable to map FADT\n");
486 return 0;
487 }
489 #ifdef CONFIG_ACPI_INTERPRETER
490 /* initialize sci_int early for INT_SRC_OVR MADT parsing */
491 acpi_fadt.sci_int = fadt->sci_int;
493 /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
494 acpi_fadt.revision = fadt->revision;
495 acpi_fadt.force_apic_physical_destination_mode =
496 fadt->force_apic_physical_destination_mode;
497 #endif
499 #ifdef CONFIG_X86_PM_TIMER
500 /* detect the location of the ACPI PM Timer */
501 if (fadt->header.revision >= FADT2_REVISION_ID) {
502 /* FADT rev. 2 */
503 if (fadt->xpm_timer_block.space_id ==
504 ACPI_ADR_SPACE_SYSTEM_IO)
505 pmtmr_ioport = fadt->xpm_timer_block.address;
506 /*
507 * "X" fields are optional extensions to the original V1.0
508 * fields, so we must selectively expand V1.0 fields if the
509 * corresponding X field is zero.
510 */
511 if (!pmtmr_ioport)
512 pmtmr_ioport = fadt->pm_timer_block;
513 } else {
514 /* FADT rev. 1 */
515 pmtmr_ioport = fadt->pm_timer_block;
516 }
517 if (pmtmr_ioport)
518 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
519 pmtmr_ioport);
520 #endif
522 acpi_smi_cmd = fadt->smi_command;
523 acpi_enable_value = fadt->acpi_enable;
524 acpi_disable_value = fadt->acpi_disable;
526 acpi_fadt_parse_reg(fadt);
528 #ifdef CONFIG_ACPI_SLEEP
529 acpi_fadt_parse_sleep_info(fadt);
530 #endif
532 return 0;
533 }
535 unsigned long __init acpi_find_rsdp(void)
536 {
537 unsigned long rsdp_phys = 0;
539 #if 0
540 if (efi_enabled) {
541 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
542 return efi.acpi20;
543 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
544 return efi.acpi;
545 }
546 #endif
547 /*
548 * Scan memory looking for the RSDP signature. First search EBDA (low
549 * memory) paragraphs and then search upper memory (E0000-FFFFF).
550 */
551 rsdp_phys = acpi_scan_rsdp(0, 0x400);
552 if (!rsdp_phys)
553 rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
555 return rsdp_phys;
556 }
558 #ifdef CONFIG_X86_LOCAL_APIC
559 /*
560 * Parse LAPIC entries in MADT
561 * returns 0 on success, < 0 on error
562 */
563 static int __init acpi_parse_madt_lapic_entries(void)
564 {
565 int count;
567 if (!cpu_has_apic)
568 return -ENODEV;
570 /*
571 * Note that the LAPIC address is obtained from the MADT (32-bit value)
572 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
573 */
575 count =
576 acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
577 acpi_parse_lapic_addr_ovr, 0);
578 if (count < 0) {
579 printk(KERN_ERR PREFIX
580 "Error parsing LAPIC address override entry\n");
581 return count;
582 }
584 mp_register_lapic_address(acpi_lapic_addr);
586 count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
587 MAX_APICS);
588 if (!count) {
589 printk(KERN_ERR PREFIX "No LAPIC entries present\n");
590 /* TBD: Cleanup to allow fallback to MPS */
591 return -ENODEV;
592 } else if (count < 0) {
593 printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
594 /* TBD: Cleanup to allow fallback to MPS */
595 return count;
596 }
598 count =
599 acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
600 if (count < 0) {
601 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
602 /* TBD: Cleanup to allow fallback to MPS */
603 return count;
604 }
605 return 0;
606 }
607 #endif /* CONFIG_X86_LOCAL_APIC */
609 #if defined(CONFIG_X86_IO_APIC) /*&& defined(CONFIG_ACPI_INTERPRETER)*/
610 /*
611 * Parse IOAPIC related entries in MADT
612 * returns 0 on success, < 0 on error
613 */
614 static int __init acpi_parse_madt_ioapic_entries(void)
615 {
616 int count;
618 /*
619 * ACPI interpreter is required to complete interrupt setup,
620 * so if it is off, don't enumerate the io-apics with ACPI.
621 * If MPS is present, it will handle them,
622 * otherwise the system will stay in PIC mode
623 */
624 if (acpi_disabled || acpi_noirq) {
625 return -ENODEV;
626 }
628 if (!cpu_has_apic)
629 return -ENODEV;
631 /*
632 * if "noapic" boot option, don't look for IO-APICs
633 */
634 if (skip_ioapic_setup) {
635 printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
636 "due to 'noapic' option.\n");
637 return -ENODEV;
638 }
640 count =
641 acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic,
642 MAX_IO_APICS);
643 if (!count) {
644 printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
645 return -ENODEV;
646 } else if (count < 0) {
647 printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
648 return count;
649 }
651 count =
652 acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr,
653 NR_IRQ_VECTORS);
654 if (count < 0) {
655 printk(KERN_ERR PREFIX
656 "Error parsing interrupt source overrides entry\n");
657 /* TBD: Cleanup to allow fallback to MPS */
658 return count;
659 }
661 #ifdef CONFIG_ACPI_INTERPRETER
662 /*
663 * If BIOS did not supply an INT_SRC_OVR for the SCI
664 * pretend we got one so we can set the SCI flags.
665 */
666 if (!acpi_sci_override_gsi)
667 acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
668 #endif
670 /* Fill in identity legacy mapings where no override */
671 mp_config_acpi_legacy_irqs();
673 count =
674 acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src,
675 NR_IRQ_VECTORS);
676 if (count < 0) {
677 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
678 /* TBD: Cleanup to allow fallback to MPS */
679 return count;
680 }
682 return 0;
683 }
684 #else
685 static inline int acpi_parse_madt_ioapic_entries(void)
686 {
687 return -1;
688 }
689 #endif /* !(CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER) */
692 static void __init acpi_process_madt(void)
693 {
694 #ifdef CONFIG_X86_LOCAL_APIC
695 int count, error;
697 count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
698 if (count >= 1) {
700 /*
701 * Parse MADT LAPIC entries
702 */
703 error = acpi_parse_madt_lapic_entries();
704 if (!error) {
705 acpi_lapic = 1;
706 generic_bigsmp_probe();
708 /*
709 * Parse MADT IO-APIC entries
710 */
711 error = acpi_parse_madt_ioapic_entries();
712 if (!error) {
713 acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
714 acpi_irq_balance_set(NULL);
715 acpi_ioapic = 1;
717 smp_found_config = 1;
718 clustered_apic_check();
719 }
720 }
721 if (error == -EINVAL) {
722 /*
723 * Dell Precision Workstation 410, 610 come here.
724 */
725 printk(KERN_ERR PREFIX
726 "Invalid BIOS MADT, disabling ACPI\n");
727 disable_acpi();
728 }
729 }
730 #endif
731 return;
732 }
734 extern int acpi_force;
736 #ifdef __i386__
738 static int __init disable_acpi_irq(struct dmi_system_id *d)
739 {
740 if (!acpi_force) {
741 printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
742 d->ident);
743 acpi_noirq_set();
744 }
745 return 0;
746 }
748 static int __init disable_acpi_pci(struct dmi_system_id *d)
749 {
750 if (!acpi_force) {
751 printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
752 d->ident);
753 /*acpi_disable_pci();*/
754 }
755 return 0;
756 }
758 static int __init dmi_disable_acpi(struct dmi_system_id *d)
759 {
760 if (!acpi_force) {
761 printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
762 disable_acpi();
763 } else {
764 printk(KERN_NOTICE
765 "Warning: DMI blacklist says broken, but acpi forced\n");
766 }
767 return 0;
768 }
770 /*
771 * Limit ACPI to CPU enumeration for HT
772 */
773 static int __init force_acpi_ht(struct dmi_system_id *d)
774 {
775 if (!acpi_force) {
776 printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
777 d->ident);
778 disable_acpi();
779 acpi_ht = 1;
780 } else {
781 printk(KERN_NOTICE
782 "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
783 }
784 return 0;
785 }
787 /*
788 * If your system is blacklisted here, but you find that acpi=force
789 * works for you, please contact acpi-devel@sourceforge.net
790 */
791 static struct dmi_system_id __initdata acpi_dmi_table[] = {
792 /*
793 * Boxes that need ACPI disabled
794 */
795 {
796 .callback = dmi_disable_acpi,
797 .ident = "IBM Thinkpad",
798 .matches = {
799 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
800 DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
801 },
802 },
804 /*
805 * Boxes that need acpi=ht
806 */
807 {
808 .callback = force_acpi_ht,
809 .ident = "FSC Primergy T850",
810 .matches = {
811 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
812 DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
813 },
814 },
815 {
816 .callback = force_acpi_ht,
817 .ident = "DELL GX240",
818 .matches = {
819 DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
820 DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
821 },
822 },
823 {
824 .callback = force_acpi_ht,
825 .ident = "HP VISUALIZE NT Workstation",
826 .matches = {
827 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
828 DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
829 },
830 },
831 {
832 .callback = force_acpi_ht,
833 .ident = "Compaq Workstation W8000",
834 .matches = {
835 DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
836 DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
837 },
838 },
839 {
840 .callback = force_acpi_ht,
841 .ident = "ASUS P4B266",
842 .matches = {
843 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
844 DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
845 },
846 },
847 {
848 .callback = force_acpi_ht,
849 .ident = "ASUS P2B-DS",
850 .matches = {
851 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
852 DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
853 },
854 },
855 {
856 .callback = force_acpi_ht,
857 .ident = "ASUS CUR-DLS",
858 .matches = {
859 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
860 DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
861 },
862 },
863 {
864 .callback = force_acpi_ht,
865 .ident = "ABIT i440BX-W83977",
866 .matches = {
867 DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
868 DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
869 },
870 },
871 {
872 .callback = force_acpi_ht,
873 .ident = "IBM Bladecenter",
874 .matches = {
875 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
876 DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
877 },
878 },
879 {
880 .callback = force_acpi_ht,
881 .ident = "IBM eServer xSeries 360",
882 .matches = {
883 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
884 DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
885 },
886 },
887 {
888 .callback = force_acpi_ht,
889 .ident = "IBM eserver xSeries 330",
890 .matches = {
891 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
892 DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
893 },
894 },
895 {
896 .callback = force_acpi_ht,
897 .ident = "IBM eserver xSeries 440",
898 .matches = {
899 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
900 DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
901 },
902 },
904 /*
905 * Boxes that need ACPI PCI IRQ routing disabled
906 */
907 {
908 .callback = disable_acpi_irq,
909 .ident = "ASUS A7V",
910 .matches = {
911 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
912 DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
913 /* newer BIOS, Revision 1011, does work */
914 DMI_MATCH(DMI_BIOS_VERSION,
915 "ASUS A7V ACPI BIOS Revision 1007"),
916 },
917 },
919 /*
920 * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
921 */
922 { /* _BBN 0 bug */
923 .callback = disable_acpi_pci,
924 .ident = "ASUS PR-DLS",
925 .matches = {
926 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
927 DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
928 DMI_MATCH(DMI_BIOS_VERSION,
929 "ASUS PR-DLS ACPI BIOS Revision 1010"),
930 DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
931 },
932 },
933 {
934 .callback = disable_acpi_pci,
935 .ident = "Acer TravelMate 36x Laptop",
936 .matches = {
937 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
938 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
939 },
940 },
941 {}
942 };
944 #endif /* __i386__ */
946 /*
947 * acpi_boot_table_init() and acpi_boot_init()
948 * called from setup_arch(), always.
949 * 1. checksums all tables
950 * 2. enumerates lapics
951 * 3. enumerates io-apics
952 *
953 * acpi_table_init() is separate to allow reading SRAT without
954 * other side effects.
955 *
956 * side effects of acpi_boot_init:
957 * acpi_lapic = 1 if LAPIC found
958 * acpi_ioapic = 1 if IOAPIC found
959 * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
960 * if acpi_blacklisted() acpi_disabled = 1;
961 * acpi_irq_model=...
962 * ...
963 *
964 * return value: (currently ignored)
965 * 0: success
966 * !0: failure
967 */
969 int __init acpi_boot_table_init(void)
970 {
971 int error;
973 #ifdef __i386__
974 dmi_check_system(acpi_dmi_table);
975 #endif
977 /*
978 * If acpi_disabled, bail out
979 * One exception: acpi=ht continues far enough to enumerate LAPICs
980 */
981 if (acpi_disabled && !acpi_ht)
982 return 1;
984 /*
985 * Initialize the ACPI boot-time table parser.
986 */
987 error = acpi_table_init();
988 if (error) {
989 disable_acpi();
990 return error;
991 }
993 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
995 /*
996 * blacklist may disable ACPI entirely
997 */
998 error = acpi_blacklisted();
999 if (error) {
1000 extern int acpi_force;
1002 if (acpi_force) {
1003 printk(KERN_WARNING PREFIX "acpi=force override\n");
1004 } else {
1005 printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
1006 disable_acpi();
1007 return error;
1011 return 0;
1014 int __init acpi_boot_init(void)
1016 /*
1017 * If acpi_disabled, bail out
1018 * One exception: acpi=ht continues far enough to enumerate LAPICs
1019 */
1020 if (acpi_disabled && !acpi_ht)
1021 return 1;
1023 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
1025 /*
1026 * set sci_int and PM timer address
1027 */
1028 acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
1030 /*
1031 * Process the Multiple APIC Description Table (MADT), if present
1032 */
1033 acpi_process_madt();
1035 acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
1037 acpi_dmar_init();
1039 return 0;
1042 unsigned int acpi_get_processor_id(unsigned int cpu)
1044 unsigned int acpiid, apicid;
1046 if ((apicid = x86_cpu_to_apicid[cpu]) == 0xff)
1047 return 0xff;
1049 for (acpiid = 0; acpiid < ARRAY_SIZE(x86_acpiid_to_apicid); acpiid++)
1050 if (x86_acpiid_to_apicid[acpiid] == apicid)
1051 return acpiid;
1053 return 0xff;