debuggers.hg

view linux-2.6.9-xen-sparse/arch/xen/i386/kernel/smpboot.c @ 3329:37cb59b9ddfd

bitkeeper revision 1.1159.1.484 (41c1a3e20WEWxhNQDQK6avGv36pVEA)

Remove per vcpu misdirect virq support.
author cl349@arcadians.cl.cam.ac.uk
date Thu Dec 16 15:04:02 2004 +0000 (2004-12-16)
parents 545088ce72b5
children
line source
1 /*
2 * x86 SMP booting functions
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 *
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
9 *
10 * Thanks to Intel for making available several different Pentium,
11 * Pentium Pro and Pentium-II/Xeon MP machines.
12 * Original development of Linux SMP code supported by Caldera.
13 *
14 * This code is released under the GNU General Public License version 2 or
15 * later.
16 *
17 * Fixes
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIPS report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
28 * from Jose Renau
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Martin J. Bligh : Added support for multi-quad systems
33 * Dave Jones : Report invalid combinations of Athlon CPUs.
34 * Rusty Russell : Hacked into shape for new "hotplug" boot process. */
36 #include <linux/module.h>
37 #include <linux/config.h>
38 #include <linux/init.h>
39 #include <linux/kernel.h>
41 #include <linux/mm.h>
42 #include <linux/sched.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/smp_lock.h>
45 #include <linux/irq.h>
46 #include <linux/bootmem.h>
48 #include <linux/delay.h>
49 #include <linux/mc146818rtc.h>
50 #include <asm/tlbflush.h>
51 #include <asm/desc.h>
52 #include <asm/arch_hooks.h>
54 #if 1
55 #define Dprintk(args...)
56 #else
57 #include <mach_apic.h>
58 #endif
59 #include <mach_wakecpu.h>
60 #include <smpboot_hooks.h>
62 /* Set if we find a B stepping CPU */
63 static int __initdata smp_b_stepping;
65 /* Number of siblings per CPU package */
66 int smp_num_siblings = 1;
67 int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
69 /* bitmap of online cpus */
70 cpumask_t cpu_online_map;
72 static cpumask_t cpu_callin_map;
73 cpumask_t cpu_callout_map;
74 static cpumask_t smp_commenced_mask;
76 /* Per CPU bogomips and other parameters */
77 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
79 u8 x86_cpu_to_apicid[NR_CPUS] =
80 { [0 ... NR_CPUS-1] = 0xff };
81 EXPORT_SYMBOL(x86_cpu_to_apicid);
83 /* Set when the idlers are all forked */
84 int smp_threads_ready;
86 #if 0
87 /*
88 * Trampoline 80x86 program as an array.
89 */
91 extern unsigned char trampoline_data [];
92 extern unsigned char trampoline_end [];
93 static unsigned char *trampoline_base;
94 static int trampoline_exec;
96 /*
97 * Currently trivial. Write the real->protected mode
98 * bootstrap into the page concerned. The caller
99 * has made sure it's suitably aligned.
100 */
102 static unsigned long __init setup_trampoline(void)
103 {
104 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
105 return virt_to_phys(trampoline_base);
106 }
107 #endif
109 /*
110 * We are called very early to get the low memory for the
111 * SMP bootup trampoline page.
112 */
113 void __init smp_alloc_memory(void)
114 {
115 #if 1
116 int cpu;
118 for (cpu = 1; cpu < NR_CPUS; cpu++) {
119 cpu_gdt_descr[cpu].address = (unsigned long)
120 alloc_bootmem_low_pages(PAGE_SIZE);
121 /* XXX free unused pages later */
122 }
123 #else
124 trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
125 /*
126 * Has to be in very low memory so we can execute
127 * real-mode AP code.
128 */
129 if (__pa(trampoline_base) >= 0x9F000)
130 BUG();
131 /*
132 * Make the SMP trampoline executable:
133 */
134 trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);
135 #endif
136 }
138 /*
139 * The bootstrap kernel entry code has set these up. Save them for
140 * a given CPU
141 */
143 static void __init smp_store_cpu_info(int id)
144 {
145 struct cpuinfo_x86 *c = cpu_data + id;
147 *c = boot_cpu_data;
148 if (id!=0)
149 identify_cpu(c);
150 /*
151 * Mask B, Pentium, but not Pentium MMX
152 */
153 if (c->x86_vendor == X86_VENDOR_INTEL &&
154 c->x86 == 5 &&
155 c->x86_mask >= 1 && c->x86_mask <= 4 &&
156 c->x86_model <= 3)
157 /*
158 * Remember we have B step Pentia with bugs
159 */
160 smp_b_stepping = 1;
162 /*
163 * Certain Athlons might work (for various values of 'work') in SMP
164 * but they are not certified as MP capable.
165 */
166 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
168 /* Athlon 660/661 is valid. */
169 if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
170 goto valid_k7;
172 /* Duron 670 is valid */
173 if ((c->x86_model==7) && (c->x86_mask==0))
174 goto valid_k7;
176 /*
177 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
178 * It's worth noting that the A5 stepping (662) of some Athlon XP's
179 * have the MP bit set.
180 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
181 */
182 if (((c->x86_model==6) && (c->x86_mask>=2)) ||
183 ((c->x86_model==7) && (c->x86_mask>=1)) ||
184 (c->x86_model> 7))
185 if (cpu_has_mp)
186 goto valid_k7;
188 /* If we get here, it's not a certified SMP capable AMD system. */
189 tainted |= TAINT_UNSAFE_SMP;
190 }
192 valid_k7:
193 ;
194 }
196 #if 0
197 /*
198 * TSC synchronization.
199 *
200 * We first check whether all CPUs have their TSC's synchronized,
201 * then we print a warning if not, and always resync.
202 */
204 static atomic_t tsc_start_flag = ATOMIC_INIT(0);
205 static atomic_t tsc_count_start = ATOMIC_INIT(0);
206 static atomic_t tsc_count_stop = ATOMIC_INIT(0);
207 static unsigned long long tsc_values[NR_CPUS];
209 #define NR_LOOPS 5
211 static void __init synchronize_tsc_bp (void)
212 {
213 int i;
214 unsigned long long t0;
215 unsigned long long sum, avg;
216 long long delta;
217 unsigned long one_usec;
218 int buggy = 0;
220 printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus());
222 /* convert from kcyc/sec to cyc/usec */
223 one_usec = cpu_khz / 1000;
225 atomic_set(&tsc_start_flag, 1);
226 wmb();
228 /*
229 * We loop a few times to get a primed instruction cache,
230 * then the last pass is more or less synchronized and
231 * the BP and APs set their cycle counters to zero all at
232 * once. This reduces the chance of having random offsets
233 * between the processors, and guarantees that the maximum
234 * delay between the cycle counters is never bigger than
235 * the latency of information-passing (cachelines) between
236 * two CPUs.
237 */
238 for (i = 0; i < NR_LOOPS; i++) {
239 /*
240 * all APs synchronize but they loop on '== num_cpus'
241 */
242 while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
243 mb();
244 atomic_set(&tsc_count_stop, 0);
245 wmb();
246 /*
247 * this lets the APs save their current TSC:
248 */
249 atomic_inc(&tsc_count_start);
251 rdtscll(tsc_values[smp_processor_id()]);
252 /*
253 * We clear the TSC in the last loop:
254 */
255 if (i == NR_LOOPS-1)
256 write_tsc(0, 0);
258 /*
259 * Wait for all APs to leave the synchronization point:
260 */
261 while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
262 mb();
263 atomic_set(&tsc_count_start, 0);
264 wmb();
265 atomic_inc(&tsc_count_stop);
266 }
268 sum = 0;
269 for (i = 0; i < NR_CPUS; i++) {
270 if (cpu_isset(i, cpu_callout_map)) {
271 t0 = tsc_values[i];
272 sum += t0;
273 }
274 }
275 avg = sum;
276 do_div(avg, num_booting_cpus());
278 sum = 0;
279 for (i = 0; i < NR_CPUS; i++) {
280 if (!cpu_isset(i, cpu_callout_map))
281 continue;
282 delta = tsc_values[i] - avg;
283 if (delta < 0)
284 delta = -delta;
285 /*
286 * We report bigger than 2 microseconds clock differences.
287 */
288 if (delta > 2*one_usec) {
289 long realdelta;
290 if (!buggy) {
291 buggy = 1;
292 printk("\n");
293 }
294 realdelta = delta;
295 do_div(realdelta, one_usec);
296 if (tsc_values[i] < avg)
297 realdelta = -realdelta;
299 printk(KERN_INFO "CPU#%d had %ld usecs TSC skew, fixed it up.\n", i, realdelta);
300 }
302 sum += delta;
303 }
304 if (!buggy)
305 printk("passed.\n");
306 }
308 static void __init synchronize_tsc_ap (void)
309 {
310 int i;
312 /*
313 * Not every cpu is online at the time
314 * this gets called, so we first wait for the BP to
315 * finish SMP initialization:
316 */
317 while (!atomic_read(&tsc_start_flag)) mb();
319 for (i = 0; i < NR_LOOPS; i++) {
320 atomic_inc(&tsc_count_start);
321 while (atomic_read(&tsc_count_start) != num_booting_cpus())
322 mb();
324 rdtscll(tsc_values[smp_processor_id()]);
325 if (i == NR_LOOPS-1)
326 write_tsc(0, 0);
328 atomic_inc(&tsc_count_stop);
329 while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
330 }
331 }
332 #undef NR_LOOPS
333 #endif
335 extern void calibrate_delay(void);
337 static atomic_t init_deasserted;
339 void __init smp_callin(void)
340 {
341 int cpuid, phys_id;
342 unsigned long timeout;
344 #if 0
345 /*
346 * If waken up by an INIT in an 82489DX configuration
347 * we may get here before an INIT-deassert IPI reaches
348 * our local APIC. We have to wait for the IPI or we'll
349 * lock up on an APIC access.
350 */
351 wait_for_init_deassert(&init_deasserted);
352 #endif
354 /*
355 * (This works even if the APIC is not enabled.)
356 */
357 phys_id = smp_processor_id();
358 cpuid = smp_processor_id();
359 if (cpu_isset(cpuid, cpu_callin_map)) {
360 printk("huh, phys CPU#%d, CPU#%d already present??\n",
361 phys_id, cpuid);
362 BUG();
363 }
364 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
366 /*
367 * STARTUP IPIs are fragile beasts as they might sometimes
368 * trigger some glue motherboard logic. Complete APIC bus
369 * silence for 1 second, this overestimates the time the
370 * boot CPU is spending to send the up to 2 STARTUP IPIs
371 * by a factor of two. This should be enough.
372 */
374 /*
375 * Waiting 2s total for startup (udelay is not yet working)
376 */
377 timeout = jiffies + 2*HZ;
378 while (time_before(jiffies, timeout)) {
379 /*
380 * Has the boot CPU finished it's STARTUP sequence?
381 */
382 if (cpu_isset(cpuid, cpu_callout_map))
383 break;
384 rep_nop();
385 }
387 if (!time_before(jiffies, timeout)) {
388 printk("BUG: CPU%d started up but did not get a callout!\n",
389 cpuid);
390 BUG();
391 }
393 #if 0
394 /*
395 * the boot CPU has finished the init stage and is spinning
396 * on callin_map until we finish. We are free to set up this
397 * CPU, first the APIC. (this is probably redundant on most
398 * boards)
399 */
401 Dprintk("CALLIN, before setup_local_APIC().\n");
402 smp_callin_clear_local_apic();
403 setup_local_APIC();
404 #endif
405 map_cpu_to_logical_apicid();
407 local_irq_enable();
409 /*
410 * Get our bogomips.
411 */
412 calibrate_delay();
413 Dprintk("Stack at about %p\n",&cpuid);
415 /*
416 * Save our processor parameters
417 */
418 smp_store_cpu_info(cpuid);
420 #if 0
421 disable_APIC_timer();
422 #endif
423 local_irq_disable();
424 /*
425 * Allow the master to continue.
426 */
427 cpu_set(cpuid, cpu_callin_map);
429 #if 0
430 /*
431 * Synchronize the TSC with the BP
432 */
433 if (cpu_has_tsc && cpu_khz)
434 synchronize_tsc_ap();
435 #endif
436 }
438 int cpucount;
440 extern int cpu_idle(void);
443 static irqreturn_t local_debug_interrupt(int irq, void *dev_id,
444 struct pt_regs *regs)
445 {
447 return IRQ_HANDLED;
448 }
450 static struct irqaction local_irq_debug = {
451 local_debug_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "ldebug",
452 NULL, NULL
453 };
455 void local_setup_debug(void)
456 {
457 (void)setup_irq(bind_virq_to_irq(VIRQ_DEBUG), &local_irq_debug);
458 }
461 extern void local_setup_timer(void);
463 /*
464 * Activate a secondary processor.
465 */
466 int __init start_secondary(void *unused)
467 {
468 /*
469 * Dont put anything before smp_callin(), SMP
470 * booting is too fragile that we want to limit the
471 * things done here to the most necessary things.
472 */
473 cpu_init();
474 smp_callin();
475 while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
476 rep_nop();
477 local_setup_timer();
478 local_setup_debug(); /* XXX */
479 smp_intr_init();
480 local_irq_enable();
481 /*
482 * low-memory mappings have been cleared, flush them from
483 * the local TLBs too.
484 */
485 local_flush_tlb();
486 cpu_set(smp_processor_id(), cpu_online_map);
487 wmb();
488 if (0) {
489 char *msg2 = "delay2\n";
490 int timeout;
491 for (timeout = 0; timeout < 50000; timeout++) {
492 udelay(1000);
493 if (timeout == 2000) {
494 (void)HYPERVISOR_console_io(CONSOLEIO_write, strlen(msg2), msg2);
495 timeout = 0;
496 }
497 }
498 }
499 return cpu_idle();
500 }
502 /*
503 * Everything has been set up for the secondary
504 * CPUs - they just need to reload everything
505 * from the task structure
506 * This function must not return.
507 */
508 void __init initialize_secondary(void)
509 {
510 /*
511 * We don't actually need to load the full TSS,
512 * basically just the stack pointer and the eip.
513 */
515 asm volatile(
516 "movl %0,%%esp\n\t"
517 "jmp *%1"
518 :
519 :"r" (current->thread.esp),"r" (current->thread.eip));
520 }
522 extern struct {
523 void * esp;
524 unsigned short ss;
525 } stack_start;
527 #ifdef CONFIG_NUMA
529 /* which logical CPUs are on which nodes */
530 cpumask_t node_2_cpu_mask[MAX_NUMNODES] =
531 { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
532 /* which node each logical CPU is on */
533 int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
534 EXPORT_SYMBOL(cpu_2_node);
536 /* set up a mapping between cpu and node. */
537 static inline void map_cpu_to_node(int cpu, int node)
538 {
539 printk("Mapping cpu %d to node %d\n", cpu, node);
540 cpu_set(cpu, node_2_cpu_mask[node]);
541 cpu_2_node[cpu] = node;
542 }
544 /* undo a mapping between cpu and node. */
545 static inline void unmap_cpu_to_node(int cpu)
546 {
547 int node;
549 printk("Unmapping cpu %d from all nodes\n", cpu);
550 for (node = 0; node < MAX_NUMNODES; node ++)
551 cpu_clear(cpu, node_2_cpu_mask[node]);
552 cpu_2_node[cpu] = 0;
553 }
554 #else /* !CONFIG_NUMA */
556 #define map_cpu_to_node(cpu, node) ({})
557 #define unmap_cpu_to_node(cpu) ({})
559 #endif /* CONFIG_NUMA */
561 u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
563 void map_cpu_to_logical_apicid(void)
564 {
565 int cpu = smp_processor_id();
566 int apicid = smp_processor_id();
568 cpu_2_logical_apicid[cpu] = apicid;
569 map_cpu_to_node(cpu, apicid_to_node(apicid));
570 }
572 void unmap_cpu_to_logical_apicid(int cpu)
573 {
574 cpu_2_logical_apicid[cpu] = BAD_APICID;
575 unmap_cpu_to_node(cpu);
576 }
578 #if APIC_DEBUG
579 static inline void __inquire_remote_apic(int apicid)
580 {
581 int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
582 char *names[] = { "ID", "VERSION", "SPIV" };
583 int timeout, status;
585 printk("Inquiring remote APIC #%d...\n", apicid);
587 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
588 printk("... APIC #%d %s: ", apicid, names[i]);
590 /*
591 * Wait for idle.
592 */
593 apic_wait_icr_idle();
595 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
596 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
598 timeout = 0;
599 do {
600 udelay(100);
601 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
602 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
604 switch (status) {
605 case APIC_ICR_RR_VALID:
606 status = apic_read(APIC_RRR);
607 printk("%08x\n", status);
608 break;
609 default:
610 printk("failed\n");
611 }
612 }
613 }
614 #endif
616 #if 0
617 #ifdef WAKE_SECONDARY_VIA_NMI
618 /*
619 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
620 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
621 * won't ... remember to clear down the APIC, etc later.
622 */
623 static int __init
624 wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
625 {
626 unsigned long send_status = 0, accept_status = 0;
627 int timeout, maxlvt;
629 /* Target chip */
630 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
632 /* Boot on the stack */
633 /* Kick the second */
634 apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
636 Dprintk("Waiting for send to finish...\n");
637 timeout = 0;
638 do {
639 Dprintk("+");
640 udelay(100);
641 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
642 } while (send_status && (timeout++ < 1000));
644 /*
645 * Give the other CPU some time to accept the IPI.
646 */
647 udelay(200);
648 /*
649 * Due to the Pentium erratum 3AP.
650 */
651 maxlvt = get_maxlvt();
652 if (maxlvt > 3) {
653 apic_read_around(APIC_SPIV);
654 apic_write(APIC_ESR, 0);
655 }
656 accept_status = (apic_read(APIC_ESR) & 0xEF);
657 Dprintk("NMI sent.\n");
659 if (send_status)
660 printk("APIC never delivered???\n");
661 if (accept_status)
662 printk("APIC delivery error (%lx).\n", accept_status);
664 return (send_status | accept_status);
665 }
666 #endif /* WAKE_SECONDARY_VIA_NMI */
668 #ifdef WAKE_SECONDARY_VIA_INIT
669 static int __init
670 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
671 {
672 unsigned long send_status = 0, accept_status = 0;
673 int maxlvt, timeout, num_starts, j;
675 /*
676 * Be paranoid about clearing APIC errors.
677 */
678 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
679 apic_read_around(APIC_SPIV);
680 apic_write(APIC_ESR, 0);
681 apic_read(APIC_ESR);
682 }
684 Dprintk("Asserting INIT.\n");
686 /*
687 * Turn INIT on target chip
688 */
689 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
691 /*
692 * Send IPI
693 */
694 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
695 | APIC_DM_INIT);
697 Dprintk("Waiting for send to finish...\n");
698 timeout = 0;
699 do {
700 Dprintk("+");
701 udelay(100);
702 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
703 } while (send_status && (timeout++ < 1000));
705 mdelay(10);
707 Dprintk("Deasserting INIT.\n");
709 /* Target chip */
710 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
712 /* Send IPI */
713 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
715 Dprintk("Waiting for send to finish...\n");
716 timeout = 0;
717 do {
718 Dprintk("+");
719 udelay(100);
720 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
721 } while (send_status && (timeout++ < 1000));
723 atomic_set(&init_deasserted, 1);
725 /*
726 * Should we send STARTUP IPIs ?
727 *
728 * Determine this based on the APIC version.
729 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
730 */
731 if (APIC_INTEGRATED(apic_version[phys_apicid]))
732 num_starts = 2;
733 else
734 num_starts = 0;
736 /*
737 * Run STARTUP IPI loop.
738 */
739 Dprintk("#startup loops: %d.\n", num_starts);
741 maxlvt = get_maxlvt();
743 for (j = 1; j <= num_starts; j++) {
744 Dprintk("Sending STARTUP #%d.\n",j);
745 apic_read_around(APIC_SPIV);
746 apic_write(APIC_ESR, 0);
747 apic_read(APIC_ESR);
748 Dprintk("After apic_write.\n");
750 /*
751 * STARTUP IPI
752 */
754 /* Target chip */
755 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
757 /* Boot on the stack */
758 /* Kick the second */
759 apic_write_around(APIC_ICR, APIC_DM_STARTUP
760 | (start_eip >> 12));
762 /*
763 * Give the other CPU some time to accept the IPI.
764 */
765 udelay(300);
767 Dprintk("Startup point 1.\n");
769 Dprintk("Waiting for send to finish...\n");
770 timeout = 0;
771 do {
772 Dprintk("+");
773 udelay(100);
774 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
775 } while (send_status && (timeout++ < 1000));
777 /*
778 * Give the other CPU some time to accept the IPI.
779 */
780 udelay(200);
781 /*
782 * Due to the Pentium erratum 3AP.
783 */
784 if (maxlvt > 3) {
785 apic_read_around(APIC_SPIV);
786 apic_write(APIC_ESR, 0);
787 }
788 accept_status = (apic_read(APIC_ESR) & 0xEF);
789 if (send_status || accept_status)
790 break;
791 }
792 Dprintk("After Startup.\n");
794 if (send_status)
795 printk("APIC never delivered???\n");
796 if (accept_status)
797 printk("APIC delivery error (%lx).\n", accept_status);
799 return (send_status | accept_status);
800 }
801 #endif /* WAKE_SECONDARY_VIA_INIT */
802 #endif
804 extern cpumask_t cpu_initialized;
806 static int __init do_boot_cpu(int apicid)
807 /*
808 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
809 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
810 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
811 */
812 {
813 struct task_struct *idle;
814 unsigned long boot_error;
815 int timeout, cpu;
816 unsigned long start_eip;
817 #if 0
818 unsigned short nmi_high = 0, nmi_low = 0;
819 #endif
820 full_execution_context_t ctxt;
821 extern void startup_32_smp(void);
822 extern void hypervisor_callback(void);
823 extern void failsafe_callback(void);
824 extern int smp_trap_init(trap_info_t *);
825 int i;
827 cpu = ++cpucount;
828 /*
829 * We can't use kernel_thread since we must avoid to
830 * reschedule the child.
831 */
832 idle = fork_idle(cpu);
833 if (IS_ERR(idle))
834 panic("failed fork for CPU %d", cpu);
835 idle->thread.eip = (unsigned long) start_secondary;
836 /* start_eip had better be page-aligned! */
837 start_eip = (unsigned long)startup_32_smp;
839 /* So we see what's up */
840 printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
841 /* Stack for startup_32 can be just as for start_secondary onwards */
842 stack_start.esp = (void *) idle->thread.esp;
844 irq_ctx_init(cpu);
846 /*
847 * This grunge runs the startup process for
848 * the targeted processor.
849 */
851 atomic_set(&init_deasserted, 0);
853 #if 1
854 if (cpu_gdt_descr[0].size > PAGE_SIZE)
855 BUG();
856 cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size;
857 memcpy((void *)cpu_gdt_descr[cpu].address,
858 (void *)cpu_gdt_descr[0].address, cpu_gdt_descr[0].size);
859 memset((char *)cpu_gdt_descr[cpu].address +
860 FIRST_RESERVED_GDT_ENTRY * 8, 0,
861 NR_RESERVED_GDT_ENTRIES * 8);
863 memset(&ctxt, 0, sizeof(ctxt));
865 ctxt.cpu_ctxt.ds = __USER_DS;
866 ctxt.cpu_ctxt.es = __USER_DS;
867 ctxt.cpu_ctxt.fs = 0;
868 ctxt.cpu_ctxt.gs = 0;
869 ctxt.cpu_ctxt.ss = __KERNEL_DS;
870 ctxt.cpu_ctxt.cs = __KERNEL_CS;
871 ctxt.cpu_ctxt.eip = start_eip;
872 ctxt.cpu_ctxt.esp = idle->thread.esp;
873 ctxt.cpu_ctxt.eflags = (1<<9) | (1<<2) | (idle->thread.io_pl<<12);
875 /* FPU is set up to default initial state. */
876 memset(ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
878 /* Virtual IDT is empty at start-of-day. */
879 for ( i = 0; i < 256; i++ )
880 {
881 ctxt.trap_ctxt[i].vector = i;
882 ctxt.trap_ctxt[i].cs = FLAT_GUESTOS_CS;
883 }
884 ctxt.fast_trap_idx = smp_trap_init(ctxt.trap_ctxt);
886 /* No LDT. */
887 ctxt.ldt_ents = 0;
889 {
890 unsigned long va;
891 int f;
893 for (va = cpu_gdt_descr[cpu].address, f = 0;
894 va < cpu_gdt_descr[cpu].address + cpu_gdt_descr[cpu].size;
895 va += PAGE_SIZE, f++) {
896 ctxt.gdt_frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
897 make_page_readonly((void *)va);
898 }
899 ctxt.gdt_ents = cpu_gdt_descr[cpu].size / 8;
900 flush_page_update_queue();
901 }
903 /* Ring 1 stack is the initial stack. */
904 ctxt.guestos_ss = __KERNEL_DS;
905 ctxt.guestos_esp = idle->thread.esp;
907 /* Callback handlers. */
908 ctxt.event_callback_cs = __KERNEL_CS;
909 ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
910 ctxt.failsafe_callback_cs = __KERNEL_CS;
911 ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
913 ctxt.pt_base = (unsigned long)virt_to_machine(swapper_pg_dir);
915 boot_error = HYPERVISOR_boot_vcpu(cpu, &ctxt);
917 if (!boot_error) {
918 /*
919 * allow APs to start initializing.
920 */
921 Dprintk("Before Callout %d.\n", cpu);
922 cpu_set(cpu, cpu_callout_map);
923 Dprintk("After Callout %d.\n", cpu);
925 /*
926 * Wait 5s total for a response
927 */
928 for (timeout = 0; timeout < 50000; timeout++) {
929 if (cpu_isset(cpu, cpu_callin_map))
930 break; /* It has booted */
931 udelay(100);
932 }
934 if (cpu_isset(cpu, cpu_callin_map)) {
935 /* number CPUs logically, starting from 1 (BSP is 0) */
936 Dprintk("OK.\n");
937 printk("CPU%d: ", cpu);
938 print_cpu_info(&cpu_data[cpu]);
939 Dprintk("CPU has booted.\n");
940 } else {
941 boot_error= 1;
942 }
943 }
944 x86_cpu_to_apicid[cpu] = apicid;
945 if (boot_error) {
946 /* Try to put things back the way they were before ... */
947 unmap_cpu_to_logical_apicid(cpu);
948 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
949 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
950 cpucount--;
951 }
953 #else
954 Dprintk("Setting warm reset code and vector.\n");
956 store_NMI_vector(&nmi_high, &nmi_low);
958 smpboot_setup_warm_reset_vector(start_eip);
960 /*
961 * Starting actual IPI sequence...
962 */
963 boot_error = wakeup_secondary_cpu(apicid, start_eip);
965 if (!boot_error) {
966 /*
967 * allow APs to start initializing.
968 */
969 Dprintk("Before Callout %d.\n", cpu);
970 cpu_set(cpu, cpu_callout_map);
971 Dprintk("After Callout %d.\n", cpu);
973 /*
974 * Wait 5s total for a response
975 */
976 for (timeout = 0; timeout < 50000; timeout++) {
977 if (cpu_isset(cpu, cpu_callin_map))
978 break; /* It has booted */
979 udelay(100);
980 }
982 if (cpu_isset(cpu, cpu_callin_map)) {
983 /* number CPUs logically, starting from 1 (BSP is 0) */
984 Dprintk("OK.\n");
985 printk("CPU%d: ", cpu);
986 print_cpu_info(&cpu_data[cpu]);
987 Dprintk("CPU has booted.\n");
988 } else {
989 boot_error= 1;
990 if (*((volatile unsigned char *)trampoline_base)
991 == 0xA5)
992 /* trampoline started but...? */
993 printk("Stuck ??\n");
994 else
995 /* trampoline code not run */
996 printk("Not responding.\n");
997 inquire_remote_apic(apicid);
998 }
999 }
1000 x86_cpu_to_apicid[cpu] = apicid;
1001 if (boot_error) {
1002 /* Try to put things back the way they were before ... */
1003 unmap_cpu_to_logical_apicid(cpu);
1004 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
1005 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
1006 cpucount--;
1009 /* mark "stuck" area as not stuck */
1010 *((volatile unsigned long *)trampoline_base) = 0;
1011 #endif
1013 return boot_error;
1016 cycles_t cacheflush_time;
1017 unsigned long cache_decay_ticks;
1019 static void smp_tune_scheduling (void)
1021 unsigned long cachesize; /* kB */
1022 unsigned long bandwidth = 350; /* MB/s */
1023 /*
1024 * Rough estimation for SMP scheduling, this is the number of
1025 * cycles it takes for a fully memory-limited process to flush
1026 * the SMP-local cache.
1028 * (For a P5 this pretty much means we will choose another idle
1029 * CPU almost always at wakeup time (this is due to the small
1030 * L1 cache), on PIIs it's around 50-100 usecs, depending on
1031 * the cache size)
1032 */
1034 if (!cpu_khz) {
1035 /*
1036 * this basically disables processor-affinity
1037 * scheduling on SMP without a TSC.
1038 */
1039 cacheflush_time = 0;
1040 return;
1041 } else {
1042 cachesize = boot_cpu_data.x86_cache_size;
1043 if (cachesize == -1) {
1044 cachesize = 16; /* Pentiums, 2x8kB cache */
1045 bandwidth = 100;
1048 cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
1051 cache_decay_ticks = (long)cacheflush_time/cpu_khz + 1;
1053 printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
1054 (long)cacheflush_time/(cpu_khz/1000),
1055 ((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
1056 printk("task migration cache decay timeout: %ld msecs.\n",
1057 cache_decay_ticks);
1060 /*
1061 * Cycle through the processors sending APIC IPIs to boot each.
1062 */
1064 #if 0
1065 static int boot_cpu_logical_apicid;
1066 #endif
1067 /* Where the IO area was mapped on multiquad, always 0 otherwise */
1068 void *xquad_portio;
1070 cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
1072 static void __init smp_boot_cpus(unsigned int max_cpus)
1074 int cpu, kicked;
1075 unsigned long bogosum = 0;
1076 #if 0
1077 int apicid, bit;
1078 #endif
1080 /*
1081 * Setup boot CPU information
1082 */
1083 smp_store_cpu_info(0); /* Final full version of the data */
1084 printk("CPU%d: ", 0);
1085 print_cpu_info(&cpu_data[0]);
1087 #if 0
1088 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
1089 boot_cpu_logical_apicid = logical_smp_processor_id();
1090 x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
1091 #else
1092 // boot_cpu_physical_apicid = 0;
1093 // boot_cpu_logical_apicid = 0;
1094 x86_cpu_to_apicid[0] = 0;
1095 #endif
1097 current_thread_info()->cpu = 0;
1098 smp_tune_scheduling();
1099 cpus_clear(cpu_sibling_map[0]);
1100 cpu_set(0, cpu_sibling_map[0]);
1102 /*
1103 * If we couldn't find an SMP configuration at boot time,
1104 * get out of here now!
1105 */
1106 if (!smp_found_config /* && !acpi_lapic) */) {
1107 printk(KERN_NOTICE "SMP motherboard not detected.\n");
1108 smpboot_clear_io_apic_irqs();
1109 #if 0
1110 phys_cpu_present_map = physid_mask_of_physid(0);
1111 if (APIC_init_uniprocessor())
1112 printk(KERN_NOTICE "Local APIC not detected."
1113 " Using dummy APIC emulation.\n");
1114 #endif
1115 map_cpu_to_logical_apicid();
1116 return;
1119 #if 0
1120 /*
1121 * Should not be necessary because the MP table should list the boot
1122 * CPU too, but we do it for the sake of robustness anyway.
1123 * Makes no sense to do this check in clustered apic mode, so skip it
1124 */
1125 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1126 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1127 boot_cpu_physical_apicid);
1128 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1131 /*
1132 * If we couldn't find a local APIC, then get out of here now!
1133 */
1134 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
1135 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1136 boot_cpu_physical_apicid);
1137 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1138 smpboot_clear_io_apic_irqs();
1139 phys_cpu_present_map = physid_mask_of_physid(0);
1140 return;
1143 verify_local_APIC();
1144 #endif
1146 /*
1147 * If SMP should be disabled, then really disable it!
1148 */
1149 if (!max_cpus) {
1150 HYPERVISOR_shared_info->n_vcpu = 1;
1151 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1152 smpboot_clear_io_apic_irqs();
1153 #if 0
1154 phys_cpu_present_map = physid_mask_of_physid(0);
1155 #endif
1156 return;
1159 smp_intr_init();
1161 #if 0
1162 connect_bsp_APIC();
1163 setup_local_APIC();
1164 #endif
1165 map_cpu_to_logical_apicid();
1166 #if 0
1169 setup_portio_remap();
1171 /*
1172 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
1174 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
1175 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
1176 * clustered apic ID.
1177 */
1178 Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
1179 #endif
1180 Dprintk("CPU present map: %lx\n",
1181 (1UL << HYPERVISOR_shared_info->n_vcpu) - 1);
1183 kicked = 1;
1184 for (cpu = 1; kicked < NR_CPUS &&
1185 cpu < HYPERVISOR_shared_info->n_vcpu; cpu++) {
1186 if (max_cpus <= cpucount+1)
1187 continue;
1189 if (do_boot_cpu(cpu))
1190 printk("CPU #%d not responding - cannot use it.\n",
1191 cpu);
1192 else
1193 ++kicked;
1196 #if 0
1197 /*
1198 * Cleanup possible dangling ends...
1199 */
1200 smpboot_restore_warm_reset_vector();
1201 #endif
1203 /*
1204 * Allow the user to impress friends.
1205 */
1206 Dprintk("Before bogomips.\n");
1207 for (cpu = 0; cpu < NR_CPUS; cpu++)
1208 if (cpu_isset(cpu, cpu_callout_map))
1209 bogosum += cpu_data[cpu].loops_per_jiffy;
1210 printk(KERN_INFO
1211 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
1212 cpucount+1,
1213 bogosum/(500000/HZ),
1214 (bogosum/(5000/HZ))%100);
1216 Dprintk("Before bogocount - setting activated=1.\n");
1218 if (smp_b_stepping)
1219 printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
1221 /*
1222 * Don't taint if we are running SMP kernel on a single non-MP
1223 * approved Athlon
1224 */
1225 if (tainted & TAINT_UNSAFE_SMP) {
1226 if (cpucount)
1227 printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
1228 else
1229 tainted &= ~TAINT_UNSAFE_SMP;
1232 Dprintk("Boot done.\n");
1234 /*
1235 * construct cpu_sibling_map[], so that we can tell sibling CPUs
1236 * efficiently.
1237 */
1238 for (cpu = 0; cpu < NR_CPUS; cpu++)
1239 cpus_clear(cpu_sibling_map[cpu]);
1241 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1242 int siblings = 0;
1243 int i;
1244 if (!cpu_isset(cpu, cpu_callout_map))
1245 continue;
1247 if (smp_num_siblings > 1) {
1248 for (i = 0; i < NR_CPUS; i++) {
1249 if (!cpu_isset(i, cpu_callout_map))
1250 continue;
1251 if (phys_proc_id[cpu] == phys_proc_id[i]) {
1252 siblings++;
1253 cpu_set(i, cpu_sibling_map[cpu]);
1256 } else {
1257 siblings++;
1258 cpu_set(cpu, cpu_sibling_map[cpu]);
1261 if (siblings != smp_num_siblings)
1262 printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
1265 #if 0
1266 if (nmi_watchdog == NMI_LOCAL_APIC)
1267 check_nmi_watchdog();
1269 smpboot_setup_io_apic();
1271 setup_boot_APIC_clock();
1273 /*
1274 * Synchronize the TSC with the AP
1275 */
1276 if (cpu_has_tsc && cpucount && cpu_khz)
1277 synchronize_tsc_bp();
1278 #endif
1281 /* These are wrappers to interface to the new boot process. Someone
1282 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
1283 void __init smp_prepare_cpus(unsigned int max_cpus)
1285 smp_boot_cpus(max_cpus);
1288 void __devinit smp_prepare_boot_cpu(void)
1290 cpu_set(smp_processor_id(), cpu_online_map);
1291 cpu_set(smp_processor_id(), cpu_callout_map);
1294 int __devinit __cpu_up(unsigned int cpu)
1296 /* This only works at boot for x86. See "rewrite" above. */
1297 if (cpu_isset(cpu, smp_commenced_mask)) {
1298 local_irq_enable();
1299 return -ENOSYS;
1302 /* In case one didn't come up */
1303 if (!cpu_isset(cpu, cpu_callin_map)) {
1304 local_irq_enable();
1305 return -EIO;
1308 local_irq_enable();
1309 /* Unleash the CPU! */
1310 cpu_set(cpu, smp_commenced_mask);
1311 while (!cpu_isset(cpu, cpu_online_map))
1312 mb();
1313 return 0;
1316 void __init smp_cpus_done(unsigned int max_cpus)
1318 #if 1
1319 #else
1320 #ifdef CONFIG_X86_IO_APIC
1321 setup_ioapic_dest();
1322 #endif
1323 zap_low_mappings();
1324 /*
1325 * Disable executability of the SMP trampoline:
1326 */
1327 set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
1328 #endif
1331 extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
1333 static struct irqaction reschedule_irq = {
1334 smp_reschedule_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "reschedule",
1335 NULL, NULL
1336 };
1338 extern irqreturn_t smp_invalidate_interrupt(int, void *, struct pt_regs *);
1340 static struct irqaction invalidate_irq = {
1341 smp_invalidate_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "invalidate",
1342 NULL, NULL
1343 };
1345 extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
1347 static struct irqaction call_function_irq = {
1348 smp_call_function_interrupt, SA_INTERRUPT, CPU_MASK_NONE,
1349 "call_function", NULL, NULL
1350 };
1352 void __init smp_intr_init(void)
1355 (void)setup_irq(
1356 bind_ipi_on_cpu_to_irq(smp_processor_id(), RESCHEDULE_VECTOR),
1357 &reschedule_irq);
1358 (void)setup_irq(
1359 bind_ipi_on_cpu_to_irq(smp_processor_id(), INVALIDATE_TLB_VECTOR),
1360 &invalidate_irq);
1361 (void)setup_irq(
1362 bind_ipi_on_cpu_to_irq(smp_processor_id(), CALL_FUNCTION_VECTOR),
1363 &call_function_irq);