debuggers.hg

view xen/arch/x86/smpboot.c @ 3674:fb875591fd72

bitkeeper revision 1.1159.223.63 (42028527-fv-d9BM0_LRp8UKGP19gQ)

Fix NMI deferral.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Thu Feb 03 20:10:15 2005 +0000 (2005-02-03)
parents 708bd9c8362b
children 41b4061f42cb 0dc3b8b8c298
line source
1 /*
2 * x86 SMP booting functions
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 *
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
9 *
10 * Thanks to Intel for making available several different Pentium,
11 * Pentium Pro and Pentium-II/Xeon MP machines.
12 * Original development of Linux SMP code supported by Caldera.
13 *
14 * This code is released under the GNU General Public License version 2 or
15 * later.
16 *
17 * Fixes
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
28 * from Jose Renau
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Martin J. Bligh : Added support for multi-quad systems
33 */
35 #include <xen/config.h>
36 #include <xen/init.h>
37 #include <xen/irq.h>
38 #include <xen/mm.h>
39 #include <xen/slab.h>
40 #include <asm/flushtlb.h>
41 #include <asm/mc146818rtc.h>
42 #include <asm/smpboot.h>
43 #include <xen/smp.h>
44 #include <asm/msr.h>
45 #include <asm/system.h>
46 #include <asm/mpspec.h>
47 #include <asm/io_apic.h>
48 #include <xen/sched.h>
49 #include <xen/delay.h>
50 #include <xen/lib.h>
52 #ifdef CONFIG_SMP
54 /* Setup configured maximum number of CPUs to activate */
55 static int max_cpus = -1;
57 /* Total count of live CPUs */
58 int smp_num_cpus = 1;
60 /* Number of hyperthreads per core */
61 int ht_per_core = 1;
63 /* Bitmask of currently online CPUs */
64 unsigned long cpu_online_map;
66 static volatile unsigned long cpu_callin_map;
67 static volatile unsigned long cpu_callout_map;
69 /* Per CPU bogomips and other parameters */
70 struct cpuinfo_x86 cpu_data[NR_CPUS];
72 /* Set when the idlers are all forked */
73 int smp_threads_ready;
75 /*
76 * Trampoline 80x86 program as an array.
77 */
79 extern unsigned char trampoline_data [];
80 extern unsigned char trampoline_end [];
81 static unsigned char *trampoline_base;
83 /*
84 * Currently trivial. Write the real->protected mode
85 * bootstrap into the page concerned. The caller
86 * has made sure it's suitably aligned.
87 */
89 static unsigned long __init setup_trampoline(void)
90 {
91 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
92 return virt_to_phys(trampoline_base);
93 }
95 /*
96 * We are called very early to get the low memory for the
97 * SMP bootup trampoline page.
98 */
99 void __init smp_alloc_memory(void)
100 {
101 /*
102 * Has to be in very low memory so we can execute
103 * real-mode AP code.
104 */
105 trampoline_base = __va(0x90000);
106 }
108 /*
109 * The bootstrap kernel entry code has set these up. Save them for
110 * a given CPU
111 */
113 void __init smp_store_cpu_info(int id)
114 {
115 cpu_data[id] = boot_cpu_data;
116 identify_cpu(&cpu_data[id]);
117 }
119 /*
120 * Architecture specific routine called by the kernel just before init is
121 * fired off. This allows the BP to have everything in order [we hope].
122 * At the end of this all the APs will hit the system scheduling and off
123 * we go. Each AP will load the system gdt's and jump through the kernel
124 * init into idle(). At this point the scheduler will one day take over
125 * and give them jobs to do. smp_callin is a standard routine
126 * we use to track CPUs as they power up.
127 */
129 static atomic_t smp_commenced = ATOMIC_INIT(0);
131 void __init smp_commence(void)
132 {
133 /*
134 * Lets the callins below out of their loop.
135 */
136 Dprintk("Setting commenced=1, go go go\n");
138 wmb();
139 atomic_set(&smp_commenced,1);
140 }
142 /*
143 * TSC synchronization.
144 *
145 * We first check wether all CPUs have their TSC's synchronized,
146 * then we print a warning if not, and always resync.
147 */
149 static atomic_t tsc_start_flag = ATOMIC_INIT(0);
150 static atomic_t tsc_count_start = ATOMIC_INIT(0);
151 static atomic_t tsc_count_stop = ATOMIC_INIT(0);
152 static unsigned long long tsc_values[NR_CPUS];
154 #define NR_LOOPS 5
156 /*
157 * accurate 64-bit/32-bit division, expanded to 32-bit divisions and 64-bit
158 * multiplication. Not terribly optimized but we need it at boot time only
159 * anyway.
160 *
161 * result == a / b
162 * == (a1 + a2*(2^32)) / b
163 * == a1/b + a2*(2^32/b)
164 * == a1/b + a2*((2^32-1)/b) + a2/b + (a2*((2^32-1) % b))/b
165 * ^---- (this multiplication can overflow)
166 */
168 static unsigned long long div64 (unsigned long long a, unsigned long b0)
169 {
170 unsigned int a1, a2;
171 unsigned long long res;
173 a1 = ((unsigned int*)&a)[0];
174 a2 = ((unsigned int*)&a)[1];
176 res = a1/b0 +
177 (unsigned long long)a2 * (unsigned long long)(0xffffffff/b0) +
178 a2 / b0 +
179 (a2 * (0xffffffff % b0)) / b0;
181 return res;
182 }
184 static void __init synchronize_tsc_bp (void)
185 {
186 int i;
187 unsigned long long t0;
188 unsigned long long sum, avg;
189 long long delta;
190 int buggy = 0;
192 printk("checking TSC synchronization across CPUs: ");
194 atomic_set(&tsc_start_flag, 1);
195 wmb();
197 /*
198 * We loop a few times to get a primed instruction cache,
199 * then the last pass is more or less synchronized and
200 * the BP and APs set their cycle counters to zero all at
201 * once. This reduces the chance of having random offsets
202 * between the processors, and guarantees that the maximum
203 * delay between the cycle counters is never bigger than
204 * the latency of information-passing (cachelines) between
205 * two CPUs.
206 */
207 for (i = 0; i < NR_LOOPS; i++) {
208 /*
209 * all APs synchronize but they loop on '== num_cpus'
210 */
211 while (atomic_read(&tsc_count_start) != smp_num_cpus-1) mb();
212 atomic_set(&tsc_count_stop, 0);
213 wmb();
214 /*
215 * this lets the APs save their current TSC:
216 */
217 atomic_inc(&tsc_count_start);
219 rdtscll(tsc_values[smp_processor_id()]);
220 /*
221 * We clear the TSC in the last loop:
222 */
223 if (i == NR_LOOPS-1)
224 write_tsc(0, 0);
226 /*
227 * Wait for all APs to leave the synchronization point:
228 */
229 while (atomic_read(&tsc_count_stop) != smp_num_cpus-1) mb();
230 atomic_set(&tsc_count_start, 0);
231 wmb();
232 atomic_inc(&tsc_count_stop);
233 }
235 sum = 0;
236 for (i = 0; i < smp_num_cpus; i++) {
237 t0 = tsc_values[i];
238 sum += t0;
239 }
240 avg = div64(sum, smp_num_cpus);
242 sum = 0;
243 for (i = 0; i < smp_num_cpus; i++) {
244 delta = tsc_values[i] - avg;
245 if (delta < 0)
246 delta = -delta;
247 /*
248 * We report bigger than 2 microseconds clock differences.
249 */
250 if (delta > 2*ticks_per_usec) {
251 long realdelta;
252 if (!buggy) {
253 buggy = 1;
254 printk("\n");
255 }
256 realdelta = div64(delta, ticks_per_usec);
257 if (tsc_values[i] < avg)
258 realdelta = -realdelta;
260 printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n",
261 i, realdelta);
262 }
264 sum += delta;
265 }
266 if (!buggy)
267 printk("passed.\n");
268 }
270 static void __init synchronize_tsc_ap (void)
271 {
272 int i;
274 /*
275 * smp_num_cpus is not necessarily known at the time
276 * this gets called, so we first wait for the BP to
277 * finish SMP initialization:
278 */
279 while (!atomic_read(&tsc_start_flag)) mb();
281 for (i = 0; i < NR_LOOPS; i++) {
282 atomic_inc(&tsc_count_start);
283 while (atomic_read(&tsc_count_start) != smp_num_cpus) mb();
285 rdtscll(tsc_values[smp_processor_id()]);
286 if (i == NR_LOOPS-1)
287 write_tsc(0, 0);
289 atomic_inc(&tsc_count_stop);
290 while (atomic_read(&tsc_count_stop) != smp_num_cpus) mb();
291 }
292 }
293 #undef NR_LOOPS
295 static atomic_t init_deasserted;
297 void __init smp_callin(void)
298 {
299 int cpuid, phys_id, i;
301 /*
302 * If waken up by an INIT in an 82489DX configuration
303 * we may get here before an INIT-deassert IPI reaches
304 * our local APIC. We have to wait for the IPI or we'll
305 * lock up on an APIC access.
306 */
307 while (!atomic_read(&init_deasserted));
309 /*
310 * (This works even if the APIC is not enabled.)
311 */
312 phys_id = GET_APIC_ID(apic_read(APIC_ID));
313 cpuid = smp_processor_id();
314 if (test_and_set_bit(cpuid, &cpu_online_map)) {
315 printk("huh, phys CPU#%d, CPU#%d already present??\n",
316 phys_id, cpuid);
317 BUG();
318 }
319 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
321 /*
322 * STARTUP IPIs are fragile beasts as they might sometimes
323 * trigger some glue motherboard logic. Complete APIC bus
324 * silence for 1 second, this overestimates the time the
325 * boot CPU is spending to send the up to 2 STARTUP IPIs
326 * by a factor of two. This should be enough.
327 */
329 for ( i = 0; i < 200; i++ )
330 {
331 if ( test_bit(cpuid, &cpu_callout_map) ) break;
332 mdelay(10);
333 }
335 if (!test_bit(cpuid, &cpu_callout_map)) {
336 printk("BUG: CPU%d started up but did not get a callout!\n",
337 cpuid);
338 BUG();
339 }
341 /*
342 * the boot CPU has finished the init stage and is spinning
343 * on callin_map until we finish. We are free to set up this
344 * CPU, first the APIC. (this is probably redundant on most
345 * boards)
346 */
348 Dprintk("CALLIN, before setup_local_APIC().\n");
350 setup_local_APIC();
352 __sti();
354 #ifdef CONFIG_MTRR
355 /*
356 * Must be done before calibration delay is computed
357 */
358 mtrr_init_secondary_cpu ();
359 #endif
361 Dprintk("Stack at about %p\n",&cpuid);
363 /*
364 * Save our processor parameters
365 */
366 smp_store_cpu_info(cpuid);
368 if (nmi_watchdog == NMI_LOCAL_APIC)
369 setup_apic_nmi_watchdog();
371 /*
372 * Allow the master to continue.
373 */
374 set_bit(cpuid, &cpu_callin_map);
376 /*
377 * Synchronize the TSC with the BP
378 */
379 synchronize_tsc_ap();
380 }
382 static int cpucount;
384 /*
385 * Activate a secondary processor.
386 */
387 void __init start_secondary(void)
388 {
389 unsigned int cpu = cpucount;
390 /* 6 bytes suitable for passing to LIDT instruction. */
391 unsigned char idt_load[6];
393 extern void cpu_init(void);
395 set_current(idle_task[cpu]);
397 /*
398 * Dont put anything before smp_callin(), SMP
399 * booting is too fragile that we want to limit the
400 * things done here to the most necessary things.
401 */
402 cpu_init();
403 smp_callin();
405 while (!atomic_read(&smp_commenced))
406 rep_nop();
408 /*
409 * At this point, boot CPU has fully initialised the IDT. It is
410 * now safe to make ourselves a private copy.
411 */
412 idt_tables[cpu] = xmalloc(IDT_ENTRIES*8);
413 memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES*8);
414 *(unsigned short *)(&idt_load[0]) = (IDT_ENTRIES*8)-1;
415 *(unsigned long *)(&idt_load[2]) = (unsigned long)idt_tables[cpu];
416 __asm__ __volatile__ ( "lidt %0" : "=m" (idt_load) );
418 /*
419 * low-memory mappings have been cleared, flush them from the local TLBs
420 * too.
421 */
422 local_flush_tlb();
424 startup_cpu_idle_loop();
426 BUG();
427 }
429 extern struct {
430 unsigned long esp, ss;
431 } stack_start;
433 /* which physical APIC ID maps to which logical CPU number */
434 volatile int physical_apicid_2_cpu[MAX_APICID];
435 /* which logical CPU number maps to which physical APIC ID */
436 volatile int cpu_2_physical_apicid[NR_CPUS];
438 /* which logical APIC ID maps to which logical CPU number */
439 volatile int logical_apicid_2_cpu[MAX_APICID];
440 /* which logical CPU number maps to which logical APIC ID */
441 volatile int cpu_2_logical_apicid[NR_CPUS];
443 static inline void init_cpu_to_apicid(void)
444 /* Initialize all maps between cpu number and apicids */
445 {
446 int apicid, cpu;
448 for (apicid = 0; apicid < MAX_APICID; apicid++) {
449 physical_apicid_2_cpu[apicid] = -1;
450 logical_apicid_2_cpu[apicid] = -1;
451 }
452 for (cpu = 0; cpu < NR_CPUS; cpu++) {
453 cpu_2_physical_apicid[cpu] = -1;
454 cpu_2_logical_apicid[cpu] = -1;
455 }
456 }
458 static inline void map_cpu_to_boot_apicid(int cpu, int apicid)
459 /*
460 * set up a mapping between cpu and apicid. Uses logical apicids for multiquad,
461 * else physical apic ids
462 */
463 {
464 physical_apicid_2_cpu[apicid] = cpu;
465 cpu_2_physical_apicid[cpu] = apicid;
466 }
468 static inline void unmap_cpu_to_boot_apicid(int cpu, int apicid)
469 /*
470 * undo a mapping between cpu and apicid. Uses logical apicids for multiquad,
471 * else physical apic ids
472 */
473 {
474 physical_apicid_2_cpu[apicid] = -1;
475 cpu_2_physical_apicid[cpu] = -1;
476 }
478 #if APIC_DEBUG
479 static inline void inquire_remote_apic(int apicid)
480 {
481 int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
482 char *names[] = { "ID", "VERSION", "SPIV" };
483 int timeout, status;
485 printk("Inquiring remote APIC #%d...\n", apicid);
487 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
488 printk("... APIC #%d %s: ", apicid, names[i]);
490 /*
491 * Wait for idle.
492 */
493 apic_wait_icr_idle();
495 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
496 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
498 timeout = 0;
499 do {
500 udelay(100);
501 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
502 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
504 switch (status) {
505 case APIC_ICR_RR_VALID:
506 status = apic_read(APIC_RRR);
507 printk("%08x\n", status);
508 break;
509 default:
510 printk("failed\n");
511 }
512 }
513 }
514 #endif
517 static int wakeup_secondary_via_INIT(int phys_apicid, unsigned long start_eip)
518 {
519 unsigned long send_status = 0, accept_status = 0;
520 int maxlvt, timeout, num_starts, j;
522 Dprintk("Asserting INIT.\n");
524 /*
525 * Turn INIT on target chip
526 */
527 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
529 /*
530 * Send IPI
531 */
532 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
533 | APIC_DM_INIT);
535 Dprintk("Waiting for send to finish...\n");
536 timeout = 0;
537 do {
538 Dprintk("+");
539 udelay(100);
540 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
541 } while (send_status && (timeout++ < 1000));
543 mdelay(10);
545 Dprintk("Deasserting INIT.\n");
547 /* Target chip */
548 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
550 /* Send IPI */
551 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
553 Dprintk("Waiting for send to finish...\n");
554 timeout = 0;
555 do {
556 Dprintk("+");
557 udelay(100);
558 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
559 } while (send_status && (timeout++ < 1000));
561 atomic_set(&init_deasserted, 1);
563 /*
564 * Should we send STARTUP IPIs ?
565 *
566 * Determine this based on the APIC version.
567 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
568 */
569 if (APIC_INTEGRATED(apic_version[phys_apicid]))
570 num_starts = 2;
571 else
572 num_starts = 0;
574 /*
575 * Run STARTUP IPI loop.
576 */
577 Dprintk("#startup loops: %d.\n", num_starts);
579 maxlvt = get_maxlvt();
581 for (j = 1; j <= num_starts; j++) {
582 Dprintk("Sending STARTUP #%d.\n",j);
584 apic_read_around(APIC_SPIV);
585 apic_write(APIC_ESR, 0);
586 apic_read(APIC_ESR);
587 Dprintk("After apic_write.\n");
589 /*
590 * STARTUP IPI
591 */
593 /* Target chip */
594 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
596 /* Boot on the stack */
597 /* Kick the second */
598 apic_write_around(APIC_ICR, APIC_DM_STARTUP
599 | (start_eip >> 12));
601 /*
602 * Give the other CPU some time to accept the IPI.
603 */
604 udelay(300);
606 Dprintk("Startup point 1.\n");
608 Dprintk("Waiting for send to finish...\n");
609 timeout = 0;
610 do {
611 Dprintk("+");
612 udelay(100);
613 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
614 } while (send_status && (timeout++ < 1000));
616 /*
617 * Give the other CPU some time to accept the IPI.
618 */
619 udelay(200);
620 /*
621 * Due to the Pentium erratum 3AP.
622 */
623 if (maxlvt > 3) {
624 apic_read_around(APIC_SPIV);
625 apic_write(APIC_ESR, 0);
626 }
627 accept_status = (apic_read(APIC_ESR) & 0xEF);
628 if (send_status || accept_status)
629 break;
630 }
631 Dprintk("After Startup.\n");
633 if (send_status)
634 printk("APIC never delivered???\n");
635 if (accept_status)
636 printk("APIC delivery error (%lx).\n", accept_status);
638 return (send_status | accept_status);
639 }
641 extern unsigned long cpu_initialized;
643 static void __init do_boot_cpu (int apicid)
644 /*
645 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
646 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
647 */
648 {
649 struct domain *idle;
650 unsigned long boot_error = 0;
651 int timeout, cpu;
652 unsigned long start_eip, stack;
654 cpu = ++cpucount;
656 if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
657 panic("failed 'createdomain' for CPU %d", cpu);
659 set_bit(DF_IDLETASK, &idle->flags);
661 idle->mm.pagetable = mk_pagetable(__pa(idle_pg_table));
663 map_cpu_to_boot_apicid(cpu, apicid);
665 idle_task[cpu] = idle;
667 /* start_eip had better be page-aligned! */
668 start_eip = setup_trampoline();
670 /* So we see what's up. */
671 printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
673 stack = __pa(alloc_xenheap_pages(1));
674 stack_start.esp = stack + STACK_SIZE - STACK_RESERVED;
676 /* Debug build: detect stack overflow by setting up a guard page. */
677 memguard_guard_range(__va(stack), PAGE_SIZE);
679 /*
680 * This grunge runs the startup process for
681 * the targeted processor.
682 */
684 atomic_set(&init_deasserted, 0);
686 Dprintk("Setting warm reset code and vector.\n");
688 CMOS_WRITE(0xa, 0xf);
689 local_flush_tlb();
690 Dprintk("1.\n");
691 *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
692 Dprintk("2.\n");
693 *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
694 Dprintk("3.\n");
696 /*
697 * Be paranoid about clearing APIC errors.
698 */
699 if ( APIC_INTEGRATED(apic_version[apicid]) )
700 {
701 apic_read_around(APIC_SPIV);
702 apic_write(APIC_ESR, 0);
703 apic_read(APIC_ESR);
704 }
706 /*
707 * Status is now clean
708 */
709 boot_error = 0;
711 /*
712 * Starting actual IPI sequence...
713 */
715 boot_error = wakeup_secondary_via_INIT(apicid, start_eip);
717 if (!boot_error) {
718 /*
719 * allow APs to start initializing.
720 */
721 Dprintk("Before Callout %d.\n", cpu);
722 set_bit(cpu, &cpu_callout_map);
723 Dprintk("After Callout %d.\n", cpu);
725 /*
726 * Wait 5s total for a response
727 */
728 for (timeout = 0; timeout < 50000; timeout++) {
729 if (test_bit(cpu, &cpu_callin_map))
730 break; /* It has booted */
731 udelay(100);
732 }
734 if (test_bit(cpu, &cpu_callin_map)) {
735 /* number CPUs logically, starting from 1 (BSP is 0) */
736 printk("CPU%d has booted.\n", cpu);
737 } else {
738 boot_error= 1;
739 if (*((volatile unsigned long *)phys_to_virt(start_eip))
740 == 0xA5A5A5A5)
741 /* trampoline started but...? */
742 printk("Stuck ??\n");
743 else
744 /* trampoline code not run */
745 printk("Not responding.\n");
746 #if APIC_DEBUG
747 inquire_remote_apic(apicid);
748 #endif
749 }
750 }
751 if (boot_error) {
752 /* Try to put things back the way they were before ... */
753 unmap_cpu_to_boot_apicid(cpu, apicid);
754 clear_bit(cpu, &cpu_callout_map); /* was set here (do_boot_cpu()) */
755 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
756 clear_bit(cpu, &cpu_online_map); /* was set in smp_callin() */
757 cpucount--;
758 }
759 }
762 /*
763 * Cycle through the processors sending APIC IPIs to boot each.
764 */
766 static int boot_cpu_logical_apicid;
767 /* Where the IO area was mapped on multiquad, always 0 otherwise */
768 void *xquad_portio = NULL;
770 void __init smp_boot_cpus(void)
771 {
772 int apicid, bit;
774 #ifdef CONFIG_MTRR
775 /* Must be done before other processors booted */
776 mtrr_init_boot_cpu ();
777 #endif
778 /* Initialize the logical to physical CPU number mapping */
779 init_cpu_to_apicid();
781 /*
782 * Setup boot CPU information
783 */
784 smp_store_cpu_info(0); /* Final full version of the data */
785 printk("CPU%d booted\n", 0);
787 /*
788 * We have the boot CPU online for sure.
789 */
790 set_bit(0, &cpu_online_map);
791 boot_cpu_logical_apicid = logical_smp_processor_id();
792 map_cpu_to_boot_apicid(0, boot_cpu_apicid);
794 /*
795 * If we couldnt find an SMP configuration at boot time,
796 * get out of here now!
797 */
798 if (!smp_found_config) {
799 printk("SMP motherboard not detected.\n");
800 io_apic_irqs = 0;
801 cpu_online_map = phys_cpu_present_map = 1;
802 smp_num_cpus = 1;
803 if (APIC_init_uniprocessor())
804 printk("Local APIC not detected."
805 " Using dummy APIC emulation.\n");
806 goto smp_done;
807 }
809 /*
810 * Should not be necessary because the MP table should list the boot
811 * CPU too, but we do it for the sake of robustness anyway.
812 */
813 if (!test_bit(boot_cpu_physical_apicid, &phys_cpu_present_map)) {
814 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
815 boot_cpu_physical_apicid);
816 phys_cpu_present_map |= (1 << hard_smp_processor_id());
817 }
819 /*
820 * If we couldn't find a local APIC, then get out of here now!
821 */
822 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
823 !test_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability)) {
824 printk("BIOS bug, local APIC #%d not detected!...\n",
825 boot_cpu_physical_apicid);
826 printk("... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
827 io_apic_irqs = 0;
828 cpu_online_map = phys_cpu_present_map = 1;
829 smp_num_cpus = 1;
830 goto smp_done;
831 }
833 verify_local_APIC();
835 /*
836 * If SMP should be disabled, then really disable it!
837 */
838 if (!max_cpus) {
839 smp_found_config = 0;
840 printk("SMP mode deactivated, forcing use of dummy APIC emulation.\n");
841 io_apic_irqs = 0;
842 cpu_online_map = phys_cpu_present_map = 1;
843 smp_num_cpus = 1;
844 goto smp_done;
845 }
847 connect_bsp_APIC();
848 setup_local_APIC();
850 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid)
851 BUG();
853 /*
854 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
855 *
856 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
857 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
858 * clustered apic ID.
859 */
860 Dprintk("CPU present map: %lx\n", phys_cpu_present_map);
862 for (bit = 0; bit < NR_CPUS; bit++) {
863 apicid = cpu_present_to_apicid(bit);
864 /*
865 * Don't even attempt to start the boot CPU!
866 */
867 if (apicid == boot_cpu_apicid)
868 continue;
870 /*
871 * Don't start hyperthreads if option noht requested.
872 */
873 if (opt_noht && (apicid & (ht_per_core - 1)))
874 continue;
876 if (!(phys_cpu_present_map & (1 << bit)))
877 continue;
878 if ((max_cpus >= 0) && (max_cpus <= cpucount+1))
879 continue;
881 do_boot_cpu(apicid);
883 /*
884 * Make sure we unmap all failed CPUs
885 */
886 if ((boot_apicid_to_cpu(apicid) == -1) &&
887 (phys_cpu_present_map & (1 << bit)))
888 printk("CPU #%d not responding - cannot use it.\n",
889 apicid);
890 }
892 /*
893 * Cleanup possible dangling ends...
894 */
895 /*
896 * Install writable page 0 entry to set BIOS data area.
897 */
898 local_flush_tlb();
900 /*
901 * Paranoid: Set warm reset code and vector here back
902 * to default values.
903 */
904 CMOS_WRITE(0, 0xf);
906 *((volatile long *) phys_to_virt(0x467)) = 0;
908 if (!cpucount) {
909 printk("Error: only one processor found.\n");
910 } else {
911 printk("Total of %d processors activated.\n", cpucount+1);
912 }
913 smp_num_cpus = cpucount + 1;
915 Dprintk("Boot done.\n");
917 /*
918 * Here we can be sure that there is an IO-APIC in the system. Let's
919 * go and set it up:
920 */
921 if ( nr_ioapics ) setup_IO_APIC();
923 /* Set up all local APIC timers in the system. */
924 setup_APIC_clocks();
926 /* Synchronize the TSC with the AP(s). */
927 if ( cpucount ) synchronize_tsc_bp();
929 smp_done:
930 ;
931 }
933 #endif /* CONFIG_SMP */