debuggers.hg

view xen/arch/x86/smpboot.c @ 3659:bf2c38625b39

bitkeeper revision 1.1159.212.72 (42011b79Y7C9nEKFZ5pdQXwp8jC9hw)

More x86/64. Now boot secondary CPUs, but I seem to have problems
executing IRET, so interrupts are fatal.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Wed Feb 02 18:27:05 2005 +0000 (2005-02-02)
parents 0ef6e8e6e85d
children 060c1ea52343
line source
1 /*
2 * x86 SMP booting functions
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 *
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
9 *
10 * Thanks to Intel for making available several different Pentium,
11 * Pentium Pro and Pentium-II/Xeon MP machines.
12 * Original development of Linux SMP code supported by Caldera.
13 *
14 * This code is released under the GNU General Public License version 2 or
15 * later.
16 *
17 * Fixes
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
28 * from Jose Renau
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Martin J. Bligh : Added support for multi-quad systems
33 */
35 #include <xen/config.h>
36 #include <xen/init.h>
37 #include <xen/irq.h>
38 #include <xen/mm.h>
39 #include <xen/slab.h>
40 #include <asm/flushtlb.h>
41 #include <asm/mc146818rtc.h>
42 #include <asm/smpboot.h>
43 #include <xen/smp.h>
44 #include <asm/msr.h>
45 #include <asm/system.h>
46 #include <asm/mpspec.h>
47 #include <asm/io_apic.h>
48 #include <xen/sched.h>
49 #include <xen/delay.h>
50 #include <xen/lib.h>
52 #ifdef CONFIG_SMP
54 /* Setup configured maximum number of CPUs to activate */
55 static int max_cpus = -1;
57 /* Total count of live CPUs */
58 int smp_num_cpus = 1;
60 /* Number of hyperthreads per core */
61 int ht_per_core = 1;
63 /* Bitmask of currently online CPUs */
64 unsigned long cpu_online_map;
66 static volatile unsigned long cpu_callin_map;
67 static volatile unsigned long cpu_callout_map;
69 /* Per CPU bogomips and other parameters */
70 struct cpuinfo_x86 cpu_data[NR_CPUS];
72 /* Set when the idlers are all forked */
73 int smp_threads_ready;
75 /*
76 * Trampoline 80x86 program as an array.
77 */
79 extern unsigned char trampoline_data [];
80 extern unsigned char trampoline_end [];
81 static unsigned char *trampoline_base;
83 /*
84 * Currently trivial. Write the real->protected mode
85 * bootstrap into the page concerned. The caller
86 * has made sure it's suitably aligned.
87 */
89 static unsigned long __init setup_trampoline(void)
90 {
91 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
92 return virt_to_phys(trampoline_base);
93 }
95 /*
96 * We are called very early to get the low memory for the
97 * SMP bootup trampoline page.
98 */
99 void __init smp_alloc_memory(void)
100 {
101 /*
102 * Has to be in very low memory so we can execute
103 * real-mode AP code.
104 */
105 trampoline_base = __va(0x90000);
106 }
108 /*
109 * The bootstrap kernel entry code has set these up. Save them for
110 * a given CPU
111 */
113 void __init smp_store_cpu_info(int id)
114 {
115 cpu_data[id] = boot_cpu_data;
116 identify_cpu(&cpu_data[id]);
117 }
119 /*
120 * Architecture specific routine called by the kernel just before init is
121 * fired off. This allows the BP to have everything in order [we hope].
122 * At the end of this all the APs will hit the system scheduling and off
123 * we go. Each AP will load the system gdt's and jump through the kernel
124 * init into idle(). At this point the scheduler will one day take over
125 * and give them jobs to do. smp_callin is a standard routine
126 * we use to track CPUs as they power up.
127 */
129 static atomic_t smp_commenced = ATOMIC_INIT(0);
131 void __init smp_commence(void)
132 {
133 /*
134 * Lets the callins below out of their loop.
135 */
136 Dprintk("Setting commenced=1, go go go\n");
138 wmb();
139 atomic_set(&smp_commenced,1);
140 }
142 /*
143 * TSC synchronization.
144 *
145 * We first check wether all CPUs have their TSC's synchronized,
146 * then we print a warning if not, and always resync.
147 */
149 static atomic_t tsc_start_flag = ATOMIC_INIT(0);
150 static atomic_t tsc_count_start = ATOMIC_INIT(0);
151 static atomic_t tsc_count_stop = ATOMIC_INIT(0);
152 static unsigned long long tsc_values[NR_CPUS];
154 #define NR_LOOPS 5
156 /*
157 * accurate 64-bit/32-bit division, expanded to 32-bit divisions and 64-bit
158 * multiplication. Not terribly optimized but we need it at boot time only
159 * anyway.
160 *
161 * result == a / b
162 * == (a1 + a2*(2^32)) / b
163 * == a1/b + a2*(2^32/b)
164 * == a1/b + a2*((2^32-1)/b) + a2/b + (a2*((2^32-1) % b))/b
165 * ^---- (this multiplication can overflow)
166 */
168 static unsigned long long div64 (unsigned long long a, unsigned long b0)
169 {
170 unsigned int a1, a2;
171 unsigned long long res;
173 a1 = ((unsigned int*)&a)[0];
174 a2 = ((unsigned int*)&a)[1];
176 res = a1/b0 +
177 (unsigned long long)a2 * (unsigned long long)(0xffffffff/b0) +
178 a2 / b0 +
179 (a2 * (0xffffffff % b0)) / b0;
181 return res;
182 }
184 static void __init synchronize_tsc_bp (void)
185 {
186 int i;
187 unsigned long long t0;
188 unsigned long long sum, avg;
189 long long delta;
190 int buggy = 0;
192 printk("checking TSC synchronization across CPUs: ");
194 atomic_set(&tsc_start_flag, 1);
195 wmb();
197 /*
198 * We loop a few times to get a primed instruction cache,
199 * then the last pass is more or less synchronized and
200 * the BP and APs set their cycle counters to zero all at
201 * once. This reduces the chance of having random offsets
202 * between the processors, and guarantees that the maximum
203 * delay between the cycle counters is never bigger than
204 * the latency of information-passing (cachelines) between
205 * two CPUs.
206 */
207 for (i = 0; i < NR_LOOPS; i++) {
208 /*
209 * all APs synchronize but they loop on '== num_cpus'
210 */
211 while (atomic_read(&tsc_count_start) != smp_num_cpus-1) mb();
212 atomic_set(&tsc_count_stop, 0);
213 wmb();
214 /*
215 * this lets the APs save their current TSC:
216 */
217 atomic_inc(&tsc_count_start);
219 rdtscll(tsc_values[smp_processor_id()]);
220 /*
221 * We clear the TSC in the last loop:
222 */
223 if (i == NR_LOOPS-1)
224 write_tsc(0, 0);
226 /*
227 * Wait for all APs to leave the synchronization point:
228 */
229 while (atomic_read(&tsc_count_stop) != smp_num_cpus-1) mb();
230 atomic_set(&tsc_count_start, 0);
231 wmb();
232 atomic_inc(&tsc_count_stop);
233 }
235 sum = 0;
236 for (i = 0; i < smp_num_cpus; i++) {
237 t0 = tsc_values[i];
238 sum += t0;
239 }
240 avg = div64(sum, smp_num_cpus);
242 sum = 0;
243 for (i = 0; i < smp_num_cpus; i++) {
244 delta = tsc_values[i] - avg;
245 if (delta < 0)
246 delta = -delta;
247 /*
248 * We report bigger than 2 microseconds clock differences.
249 */
250 if (delta > 2*ticks_per_usec) {
251 long realdelta;
252 if (!buggy) {
253 buggy = 1;
254 printk("\n");
255 }
256 realdelta = div64(delta, ticks_per_usec);
257 if (tsc_values[i] < avg)
258 realdelta = -realdelta;
260 printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n",
261 i, realdelta);
262 }
264 sum += delta;
265 }
266 if (!buggy)
267 printk("passed.\n");
268 }
270 static void __init synchronize_tsc_ap (void)
271 {
272 int i;
274 /*
275 * smp_num_cpus is not necessarily known at the time
276 * this gets called, so we first wait for the BP to
277 * finish SMP initialization:
278 */
279 while (!atomic_read(&tsc_start_flag)) mb();
281 for (i = 0; i < NR_LOOPS; i++) {
282 atomic_inc(&tsc_count_start);
283 while (atomic_read(&tsc_count_start) != smp_num_cpus) mb();
285 rdtscll(tsc_values[smp_processor_id()]);
286 if (i == NR_LOOPS-1)
287 write_tsc(0, 0);
289 atomic_inc(&tsc_count_stop);
290 while (atomic_read(&tsc_count_stop) != smp_num_cpus) mb();
291 }
292 }
293 #undef NR_LOOPS
295 static atomic_t init_deasserted;
297 void __init smp_callin(void)
298 {
299 int cpuid, phys_id, i;
301 /*
302 * If waken up by an INIT in an 82489DX configuration
303 * we may get here before an INIT-deassert IPI reaches
304 * our local APIC. We have to wait for the IPI or we'll
305 * lock up on an APIC access.
306 */
307 while (!atomic_read(&init_deasserted));
309 /*
310 * (This works even if the APIC is not enabled.)
311 */
312 phys_id = GET_APIC_ID(apic_read(APIC_ID));
313 cpuid = smp_processor_id();
314 if (test_and_set_bit(cpuid, &cpu_online_map)) {
315 printk("huh, phys CPU#%d, CPU#%d already present??\n",
316 phys_id, cpuid);
317 BUG();
318 }
319 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
321 /*
322 * STARTUP IPIs are fragile beasts as they might sometimes
323 * trigger some glue motherboard logic. Complete APIC bus
324 * silence for 1 second, this overestimates the time the
325 * boot CPU is spending to send the up to 2 STARTUP IPIs
326 * by a factor of two. This should be enough.
327 */
329 for ( i = 0; i < 200; i++ )
330 {
331 if ( test_bit(cpuid, &cpu_callout_map) ) break;
332 mdelay(10);
333 }
335 if (!test_bit(cpuid, &cpu_callout_map)) {
336 printk("BUG: CPU%d started up but did not get a callout!\n",
337 cpuid);
338 BUG();
339 }
341 /*
342 * the boot CPU has finished the init stage and is spinning
343 * on callin_map until we finish. We are free to set up this
344 * CPU, first the APIC. (this is probably redundant on most
345 * boards)
346 */
348 Dprintk("CALLIN, before setup_local_APIC().\n");
350 setup_local_APIC();
352 __sti();
354 #ifdef CONFIG_MTRR
355 /*
356 * Must be done before calibration delay is computed
357 */
358 mtrr_init_secondary_cpu ();
359 #endif
361 Dprintk("Stack at about %p\n",&cpuid);
363 /*
364 * Save our processor parameters
365 */
366 smp_store_cpu_info(cpuid);
368 if (nmi_watchdog == NMI_LOCAL_APIC)
369 setup_apic_nmi_watchdog();
371 /*
372 * Allow the master to continue.
373 */
374 set_bit(cpuid, &cpu_callin_map);
376 /*
377 * Synchronize the TSC with the BP
378 */
379 synchronize_tsc_ap();
380 }
382 static int cpucount;
384 /*
385 * Activate a secondary processor.
386 */
387 void __init start_secondary(void)
388 {
389 unsigned int cpu = cpucount;
390 /* 6 bytes suitable for passing to LIDT instruction. */
391 unsigned char idt_load[6];
393 extern void cpu_init(void);
395 set_current(idle_task[cpu]);
397 /*
398 * Dont put anything before smp_callin(), SMP
399 * booting is too fragile that we want to limit the
400 * things done here to the most necessary things.
401 */
402 cpu_init();
403 smp_callin();
405 while (!atomic_read(&smp_commenced))
406 rep_nop();
408 /*
409 * At this point, boot CPU has fully initialised the IDT. It is
410 * now safe to make ourselves a private copy.
411 */
412 idt_tables[cpu] = xmalloc_array(idt_entry_t, IDT_ENTRIES);
413 memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES*8);
414 *(unsigned short *)(&idt_load[0]) = (IDT_ENTRIES*8)-1;
415 *(unsigned long *)(&idt_load[2]) = (unsigned long)idt_tables[cpu];
416 __asm__ __volatile__ ( "lidt %0" : "=m" (idt_load) );
418 /*
419 * low-memory mappings have been cleared, flush them from the local TLBs
420 * too.
421 */
422 local_flush_tlb();
424 startup_cpu_idle_loop();
426 BUG();
427 }
429 extern struct {
430 unsigned long esp, ss;
431 } stack_start;
433 /* which physical APIC ID maps to which logical CPU number */
434 volatile int physical_apicid_2_cpu[MAX_APICID];
435 /* which logical CPU number maps to which physical APIC ID */
436 volatile int cpu_2_physical_apicid[NR_CPUS];
438 /* which logical APIC ID maps to which logical CPU number */
439 volatile int logical_apicid_2_cpu[MAX_APICID];
440 /* which logical CPU number maps to which logical APIC ID */
441 volatile int cpu_2_logical_apicid[NR_CPUS];
443 static inline void init_cpu_to_apicid(void)
444 /* Initialize all maps between cpu number and apicids */
445 {
446 int apicid, cpu;
448 for (apicid = 0; apicid < MAX_APICID; apicid++) {
449 physical_apicid_2_cpu[apicid] = -1;
450 logical_apicid_2_cpu[apicid] = -1;
451 }
452 for (cpu = 0; cpu < NR_CPUS; cpu++) {
453 cpu_2_physical_apicid[cpu] = -1;
454 cpu_2_logical_apicid[cpu] = -1;
455 }
456 }
458 static inline void map_cpu_to_boot_apicid(int cpu, int apicid)
459 /*
460 * set up a mapping between cpu and apicid. Uses logical apicids for multiquad,
461 * else physical apic ids
462 */
463 {
464 physical_apicid_2_cpu[apicid] = cpu;
465 cpu_2_physical_apicid[cpu] = apicid;
466 }
468 static inline void unmap_cpu_to_boot_apicid(int cpu, int apicid)
469 /*
470 * undo a mapping between cpu and apicid. Uses logical apicids for multiquad,
471 * else physical apic ids
472 */
473 {
474 physical_apicid_2_cpu[apicid] = -1;
475 cpu_2_physical_apicid[cpu] = -1;
476 }
478 #if APIC_DEBUG
479 static inline void inquire_remote_apic(int apicid)
480 {
481 int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
482 char *names[] = { "ID", "VERSION", "SPIV" };
483 int timeout, status;
485 printk("Inquiring remote APIC #%d...\n", apicid);
487 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
488 printk("... APIC #%d %s: ", apicid, names[i]);
490 /*
491 * Wait for idle.
492 */
493 apic_wait_icr_idle();
495 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
496 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
498 timeout = 0;
499 do {
500 udelay(100);
501 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
502 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
504 switch (status) {
505 case APIC_ICR_RR_VALID:
506 status = apic_read(APIC_RRR);
507 printk("%08x\n", status);
508 break;
509 default:
510 printk("failed\n");
511 }
512 }
513 }
514 #endif
517 static int wakeup_secondary_via_INIT(int phys_apicid, unsigned long start_eip)
518 {
519 unsigned long send_status = 0, accept_status = 0;
520 int maxlvt, timeout, num_starts, j;
522 Dprintk("Asserting INIT.\n");
524 /*
525 * Turn INIT on target chip
526 */
527 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
529 /*
530 * Send IPI
531 */
532 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
533 | APIC_DM_INIT);
535 Dprintk("Waiting for send to finish...\n");
536 timeout = 0;
537 do {
538 Dprintk("+");
539 udelay(100);
540 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
541 } while (send_status && (timeout++ < 1000));
543 mdelay(10);
545 Dprintk("Deasserting INIT.\n");
547 /* Target chip */
548 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
550 /* Send IPI */
551 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
553 Dprintk("Waiting for send to finish...\n");
554 timeout = 0;
555 do {
556 Dprintk("+");
557 udelay(100);
558 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
559 } while (send_status && (timeout++ < 1000));
561 atomic_set(&init_deasserted, 1);
563 /*
564 * Should we send STARTUP IPIs ?
565 *
566 * Determine this based on the APIC version.
567 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
568 */
569 if (APIC_INTEGRATED(apic_version[phys_apicid]))
570 num_starts = 2;
571 else
572 num_starts = 0;
574 /*
575 * Run STARTUP IPI loop.
576 */
577 Dprintk("#startup loops: %d.\n", num_starts);
579 maxlvt = get_maxlvt();
581 for (j = 1; j <= num_starts; j++) {
582 Dprintk("Sending STARTUP #%d.\n",j);
584 apic_read_around(APIC_SPIV);
585 apic_write(APIC_ESR, 0);
586 apic_read(APIC_ESR);
587 Dprintk("After apic_write.\n");
589 /*
590 * STARTUP IPI
591 */
593 /* Target chip */
594 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
596 /* Boot on the stack */
597 /* Kick the second */
598 apic_write_around(APIC_ICR, APIC_DM_STARTUP
599 | (start_eip >> 12));
601 /*
602 * Give the other CPU some time to accept the IPI.
603 */
604 udelay(300);
606 Dprintk("Startup point 1.\n");
608 Dprintk("Waiting for send to finish...\n");
609 timeout = 0;
610 do {
611 Dprintk("+");
612 udelay(100);
613 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
614 } while (send_status && (timeout++ < 1000));
616 /*
617 * Give the other CPU some time to accept the IPI.
618 */
619 udelay(200);
620 /*
621 * Due to the Pentium erratum 3AP.
622 */
623 if (maxlvt > 3) {
624 apic_read_around(APIC_SPIV);
625 apic_write(APIC_ESR, 0);
626 }
627 accept_status = (apic_read(APIC_ESR) & 0xEF);
628 if (send_status || accept_status)
629 break;
630 }
631 Dprintk("After Startup.\n");
633 if (send_status)
634 printk("APIC never delivered???\n");
635 if (accept_status)
636 printk("APIC delivery error (%lx).\n", accept_status);
638 return (send_status | accept_status);
639 }
641 extern unsigned long cpu_initialized;
643 static void __init do_boot_cpu (int apicid)
644 /*
645 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
646 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
647 */
648 {
649 struct domain *idle;
650 struct exec_domain *ed;
651 unsigned long boot_error = 0;
652 int timeout, cpu;
653 unsigned long start_eip;
654 void *stack;
656 cpu = ++cpucount;
658 if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
659 panic("failed 'createdomain' for CPU %d", cpu);
661 ed = idle->exec_domain[0];
663 set_bit(DF_IDLETASK, &idle->d_flags);
665 ed->mm.pagetable = mk_pagetable(__pa(idle_pg_table));
667 map_cpu_to_boot_apicid(cpu, apicid);
669 idle_task[cpu] = ed;
671 /* start_eip had better be page-aligned! */
672 start_eip = setup_trampoline();
674 /* So we see what's up. */
675 printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
677 stack = (void *)alloc_xenheap_pages(1);
678 #if defined(__i386__)
679 stack_start.esp = __pa(stack) + STACK_SIZE - STACK_RESERVED;
680 #elif defined(__x86_64__)
681 stack_start.esp = (unsigned long)stack + STACK_SIZE - STACK_RESERVED;
682 #endif
684 /* Debug build: detect stack overflow by setting up a guard page. */
685 memguard_guard_range(stack, PAGE_SIZE);
687 /*
688 * This grunge runs the startup process for
689 * the targeted processor.
690 */
692 atomic_set(&init_deasserted, 0);
694 Dprintk("Setting warm reset code and vector.\n");
696 CMOS_WRITE(0xa, 0xf);
697 local_flush_tlb();
698 Dprintk("1.\n");
699 *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
700 Dprintk("2.\n");
701 *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
702 Dprintk("3.\n");
704 /*
705 * Be paranoid about clearing APIC errors.
706 */
707 if ( APIC_INTEGRATED(apic_version[apicid]) )
708 {
709 apic_read_around(APIC_SPIV);
710 apic_write(APIC_ESR, 0);
711 apic_read(APIC_ESR);
712 }
714 /*
715 * Status is now clean
716 */
717 boot_error = 0;
719 /*
720 * Starting actual IPI sequence...
721 */
723 boot_error = wakeup_secondary_via_INIT(apicid, start_eip);
725 if (!boot_error) {
726 /*
727 * allow APs to start initializing.
728 */
729 Dprintk("Before Callout %d.\n", cpu);
730 set_bit(cpu, &cpu_callout_map);
731 Dprintk("After Callout %d.\n", cpu);
733 /*
734 * Wait 5s total for a response
735 */
736 for (timeout = 0; timeout < 50000; timeout++) {
737 if (test_bit(cpu, &cpu_callin_map))
738 break; /* It has booted */
739 udelay(100);
740 }
742 if (test_bit(cpu, &cpu_callin_map)) {
743 /* number CPUs logically, starting from 1 (BSP is 0) */
744 printk("CPU%d has booted.\n", cpu);
745 } else {
746 boot_error= 1;
747 if (*((volatile unsigned int *)phys_to_virt(start_eip))
748 == 0xA5A5A5A5)
749 /* trampoline started but...? */
750 printk("Stuck ??\n");
751 else
752 /* trampoline code not run */
753 printk("Not responding.\n");
754 #if APIC_DEBUG
755 inquire_remote_apic(apicid);
756 #endif
757 }
758 }
759 if (boot_error) {
760 /* Try to put things back the way they were before ... */
761 unmap_cpu_to_boot_apicid(cpu, apicid);
762 clear_bit(cpu, &cpu_callout_map); /* was set here (do_boot_cpu()) */
763 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
764 clear_bit(cpu, &cpu_online_map); /* was set in smp_callin() */
765 cpucount--;
766 }
767 }
770 /*
771 * Cycle through the processors sending APIC IPIs to boot each.
772 */
774 static int boot_cpu_logical_apicid;
775 /* Where the IO area was mapped on multiquad, always 0 otherwise */
776 void *xquad_portio = NULL;
778 void __init smp_boot_cpus(void)
779 {
780 int apicid, bit;
782 #ifdef CONFIG_MTRR
783 /* Must be done before other processors booted */
784 mtrr_init_boot_cpu ();
785 #endif
786 /* Initialize the logical to physical CPU number mapping */
787 init_cpu_to_apicid();
789 /*
790 * Setup boot CPU information
791 */
792 smp_store_cpu_info(0); /* Final full version of the data */
793 printk("CPU%d booted\n", 0);
795 /*
796 * We have the boot CPU online for sure.
797 */
798 set_bit(0, &cpu_online_map);
799 boot_cpu_logical_apicid = logical_smp_processor_id();
800 map_cpu_to_boot_apicid(0, boot_cpu_apicid);
802 /*
803 * If we couldnt find an SMP configuration at boot time,
804 * get out of here now!
805 */
806 if (!smp_found_config) {
807 printk("SMP motherboard not detected.\n");
808 io_apic_irqs = 0;
809 cpu_online_map = phys_cpu_present_map = 1;
810 smp_num_cpus = 1;
811 if (APIC_init_uniprocessor())
812 printk("Local APIC not detected."
813 " Using dummy APIC emulation.\n");
814 goto smp_done;
815 }
817 /*
818 * Should not be necessary because the MP table should list the boot
819 * CPU too, but we do it for the sake of robustness anyway.
820 */
821 if (!test_bit(boot_cpu_physical_apicid, &phys_cpu_present_map)) {
822 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
823 boot_cpu_physical_apicid);
824 phys_cpu_present_map |= (1 << hard_smp_processor_id());
825 }
827 /*
828 * If we couldn't find a local APIC, then get out of here now!
829 */
830 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
831 !test_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability)) {
832 printk("BIOS bug, local APIC #%d not detected!...\n",
833 boot_cpu_physical_apicid);
834 printk("... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
835 io_apic_irqs = 0;
836 cpu_online_map = phys_cpu_present_map = 1;
837 smp_num_cpus = 1;
838 goto smp_done;
839 }
841 verify_local_APIC();
843 /*
844 * If SMP should be disabled, then really disable it!
845 */
846 if (!max_cpus) {
847 smp_found_config = 0;
848 printk("SMP mode deactivated, forcing use of dummy APIC emulation.\n");
849 io_apic_irqs = 0;
850 cpu_online_map = phys_cpu_present_map = 1;
851 smp_num_cpus = 1;
852 goto smp_done;
853 }
855 connect_bsp_APIC();
856 setup_local_APIC();
858 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid)
859 BUG();
861 /*
862 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
863 *
864 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
865 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
866 * clustered apic ID.
867 */
868 Dprintk("CPU present map: %lx\n", phys_cpu_present_map);
870 for (bit = 0; bit < NR_CPUS; bit++) {
871 apicid = cpu_present_to_apicid(bit);
872 /*
873 * Don't even attempt to start the boot CPU!
874 */
875 if (apicid == boot_cpu_apicid)
876 continue;
878 /*
879 * Don't start hyperthreads if option noht requested.
880 */
881 if (opt_noht && (apicid & (ht_per_core - 1)))
882 continue;
884 if (!(phys_cpu_present_map & (1 << bit)))
885 continue;
886 if ((max_cpus >= 0) && (max_cpus <= cpucount+1))
887 continue;
889 do_boot_cpu(apicid);
891 /*
892 * Make sure we unmap all failed CPUs
893 */
894 if ((boot_apicid_to_cpu(apicid) == -1) &&
895 (phys_cpu_present_map & (1 << bit)))
896 printk("CPU #%d not responding - cannot use it.\n",
897 apicid);
898 }
900 /*
901 * Cleanup possible dangling ends...
902 */
903 /*
904 * Install writable page 0 entry to set BIOS data area.
905 */
906 local_flush_tlb();
908 /*
909 * Paranoid: Set warm reset code and vector here back
910 * to default values.
911 */
912 CMOS_WRITE(0, 0xf);
914 *((volatile long *) phys_to_virt(0x467)) = 0;
916 if (!cpucount) {
917 printk("Error: only one processor found.\n");
918 } else {
919 printk("Total of %d processors activated.\n", cpucount+1);
920 }
921 smp_num_cpus = cpucount + 1;
923 Dprintk("Boot done.\n");
925 /*
926 * Here we can be sure that there is an IO-APIC in the system. Let's
927 * go and set it up:
928 */
929 if ( nr_ioapics ) setup_IO_APIC();
931 /* Set up all local APIC timers in the system. */
932 setup_APIC_clocks();
934 /* Synchronize the TSC with the AP(s). */
935 if ( cpucount ) synchronize_tsc_bp();
937 smp_done:
938 ;
939 }
941 #endif /* CONFIG_SMP */