debuggers.hg

view xen/arch/x86/setup.c @ 3651:f98fa170a9f4

bitkeeper revision 1.1159.238.2 (4200caf6iFnj85XmiFNAz7VursMGUw)

Slab caches for things allocated only on initialization seems to be
overkill. This patch replaces them with the previous typesafe
allocator.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (authored)
Signed-off-by: ian.pratt@cl.cam.ac.uk
author iap10@labyrinth.cl.cam.ac.uk
date Wed Feb 02 12:43:34 2005 +0000 (2005-02-02)
parents fec8b1778268
children 49103eca5edb
line source
2 #include <xen/config.h>
3 #include <xen/init.h>
4 #include <xen/lib.h>
5 #include <xen/sched.h>
6 #include <xen/pci.h>
7 #include <xen/serial.h>
8 #include <xen/softirq.h>
9 #include <xen/acpi.h>
10 #include <xen/console.h>
11 #include <xen/trace.h>
12 #include <xen/multiboot.h>
13 #include <asm/bitops.h>
14 #include <asm/smp.h>
15 #include <asm/processor.h>
16 #include <asm/mpspec.h>
17 #include <asm/apic.h>
18 #include <asm/desc.h>
19 #include <asm/domain_page.h>
20 #include <asm/pdb.h>
21 #include <asm/shadow.h>
22 #include <asm/e820.h>
24 /* opt_dom0_mem: Kilobytes of memory allocated to domain 0. */
25 static unsigned int opt_dom0_mem = 16000;
26 integer_param("dom0_mem", opt_dom0_mem);
28 /*
29 * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the
30 * pfn_info table and allocation bitmap.
31 */
32 static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
33 #if defined(__x86_64__)
34 integer_param("xenheap_megabytes", opt_xenheap_megabytes);
35 #endif
37 /* opt_noht: If true, Hyperthreading is ignored. */
38 int opt_noht = 0;
39 boolean_param("noht", opt_noht);
41 /* opt_noacpi: If true, ACPI tables are not parsed. */
42 static int opt_noacpi = 0;
43 boolean_param("noacpi", opt_noacpi);
45 /* opt_nosmp: If true, secondary processors are ignored. */
46 static int opt_nosmp = 0;
47 boolean_param("nosmp", opt_nosmp);
49 /* opt_ignorebiostables: If true, ACPI and MP tables are ignored. */
50 /* NB. This flag implies 'nosmp' and 'noacpi'. */
51 static int opt_ignorebiostables = 0;
52 boolean_param("ignorebiostables", opt_ignorebiostables);
54 /* opt_watchdog: If true, run a watchdog NMI on each processor. */
55 static int opt_watchdog = 0;
56 boolean_param("watchdog", opt_watchdog);
58 int early_boot = 1;
60 unsigned long xenheap_phys_end;
62 extern void arch_init_memory(void);
63 extern void init_IRQ(void);
64 extern void trap_init(void);
65 extern void time_init(void);
66 extern void ac_timer_init(void);
67 extern void initialize_keytable();
68 extern int do_timer_lists_from_pit;
70 char ignore_irq13; /* set if exception 16 works */
71 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1 };
73 #if defined(__x86_64__)
74 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
75 #else
76 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE;
77 #endif
78 EXPORT_SYMBOL(mmu_cr4_features);
80 unsigned long wait_init_idle;
82 struct exec_domain *idle_task[NR_CPUS] = { &idle0_exec_domain };
84 #ifdef CONFIG_ACPI_INTERPRETER
85 int acpi_disabled = 0;
86 #else
87 int acpi_disabled = 1;
88 #endif
89 EXPORT_SYMBOL(acpi_disabled);
91 int phys_proc_id[NR_CPUS];
92 int logical_proc_id[NR_CPUS];
94 #if defined(__i386__)
96 /* Standard macro to see if a specific flag is changeable */
97 static inline int flag_is_changeable_p(u32 flag)
98 {
99 u32 f1, f2;
101 asm("pushfl\n\t"
102 "pushfl\n\t"
103 "popl %0\n\t"
104 "movl %0,%1\n\t"
105 "xorl %2,%0\n\t"
106 "pushl %0\n\t"
107 "popfl\n\t"
108 "pushfl\n\t"
109 "popl %0\n\t"
110 "popfl\n\t"
111 : "=&r" (f1), "=&r" (f2)
112 : "ir" (flag));
114 return ((f1^f2) & flag) != 0;
115 }
117 /* Probe for the CPUID instruction */
118 static int __init have_cpuid_p(void)
119 {
120 return flag_is_changeable_p(X86_EFLAGS_ID);
121 }
123 #elif defined(__x86_64__)
125 #define have_cpuid_p() (1)
127 #endif
129 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
130 {
131 char *v = c->x86_vendor_id;
133 if (!strcmp(v, "GenuineIntel"))
134 c->x86_vendor = X86_VENDOR_INTEL;
135 else if (!strcmp(v, "AuthenticAMD"))
136 c->x86_vendor = X86_VENDOR_AMD;
137 else if (!strcmp(v, "CyrixInstead"))
138 c->x86_vendor = X86_VENDOR_CYRIX;
139 else if (!strcmp(v, "UMC UMC UMC "))
140 c->x86_vendor = X86_VENDOR_UMC;
141 else if (!strcmp(v, "CentaurHauls"))
142 c->x86_vendor = X86_VENDOR_CENTAUR;
143 else if (!strcmp(v, "NexGenDriven"))
144 c->x86_vendor = X86_VENDOR_NEXGEN;
145 else if (!strcmp(v, "RiseRiseRise"))
146 c->x86_vendor = X86_VENDOR_RISE;
147 else if (!strcmp(v, "GenuineTMx86") ||
148 !strcmp(v, "TransmetaCPU"))
149 c->x86_vendor = X86_VENDOR_TRANSMETA;
150 else
151 c->x86_vendor = X86_VENDOR_UNKNOWN;
152 }
154 static void __init init_intel(struct cpuinfo_x86 *c)
155 {
156 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
157 if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
158 clear_bit(X86_FEATURE_SEP, &c->x86_capability);
160 #ifdef CONFIG_SMP
161 if ( test_bit(X86_FEATURE_HT, &c->x86_capability) )
162 {
163 u32 eax, ebx, ecx, edx;
164 int initial_apic_id, siblings, cpu = smp_processor_id();
166 cpuid(1, &eax, &ebx, &ecx, &edx);
167 ht_per_core = siblings = (ebx & 0xff0000) >> 16;
169 if ( opt_noht )
170 clear_bit(X86_FEATURE_HT, &c->x86_capability[0]);
172 if ( siblings <= 1 )
173 {
174 printk(KERN_INFO "CPU#%d: Hyper-Threading is disabled\n", cpu);
175 }
176 else if ( siblings > 2 )
177 {
178 panic("We don't support more than two logical CPUs per package!");
179 }
180 else
181 {
182 initial_apic_id = ebx >> 24 & 0xff;
183 phys_proc_id[cpu] = initial_apic_id >> 1;
184 logical_proc_id[cpu] = initial_apic_id & 1;
185 printk(KERN_INFO "CPU#%d: Physical ID: %d, Logical ID: %d\n",
186 cpu, phys_proc_id[cpu], logical_proc_id[cpu]);
187 }
188 }
189 #endif
191 #ifdef CONFIG_VMX
192 start_vmx();
193 #endif
195 }
197 static void __init init_amd(struct cpuinfo_x86 *c)
198 {
199 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
200 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
201 clear_bit(0*32+31, &c->x86_capability);
203 switch(c->x86)
204 {
205 case 5:
206 panic("AMD K6 is not supported.\n");
207 case 6: /* An Athlon/Duron. We can trust the BIOS probably */
208 break;
209 }
210 }
212 /*
213 * This does the hard work of actually picking apart the CPU stuff...
214 */
215 void __init identify_cpu(struct cpuinfo_x86 *c)
216 {
217 int junk, i, cpu = smp_processor_id();
218 u32 xlvl, tfms;
220 phys_proc_id[cpu] = cpu;
221 logical_proc_id[cpu] = 0;
223 c->x86_vendor = X86_VENDOR_UNKNOWN;
224 c->cpuid_level = -1; /* CPUID not detected */
225 c->x86_model = c->x86_mask = 0; /* So far unknown... */
226 c->x86_vendor_id[0] = '\0'; /* Unset */
227 memset(&c->x86_capability, 0, sizeof c->x86_capability);
229 if ( !have_cpuid_p() )
230 panic("Ancient processors not supported\n");
232 /* Get vendor name */
233 cpuid(0x00000000, &c->cpuid_level,
234 (int *)&c->x86_vendor_id[0],
235 (int *)&c->x86_vendor_id[8],
236 (int *)&c->x86_vendor_id[4]);
238 get_cpu_vendor(c);
240 if ( c->cpuid_level == 0 )
241 panic("Decrepit CPUID not supported\n");
243 cpuid(0x00000001, &tfms, &junk, &junk,
244 &c->x86_capability[0]);
245 c->x86 = (tfms >> 8) & 15;
246 c->x86_model = (tfms >> 4) & 15;
247 c->x86_mask = tfms & 15;
249 /* AMD-defined flags: level 0x80000001 */
250 xlvl = cpuid_eax(0x80000000);
251 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
252 if ( xlvl >= 0x80000001 )
253 c->x86_capability[1] = cpuid_edx(0x80000001);
254 }
256 /* Transmeta-defined flags: level 0x80860001 */
257 xlvl = cpuid_eax(0x80860000);
258 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
259 if ( xlvl >= 0x80860001 )
260 c->x86_capability[2] = cpuid_edx(0x80860001);
261 }
263 printk("CPU%d: Before vendor init, caps: %08x %08x %08x, vendor = %d\n",
264 smp_processor_id(),
265 c->x86_capability[0],
266 c->x86_capability[1],
267 c->x86_capability[2],
268 c->x86_vendor);
270 switch ( c->x86_vendor ) {
271 case X86_VENDOR_INTEL:
272 init_intel(c);
273 break;
274 case X86_VENDOR_AMD:
275 init_amd(c);
276 break;
277 case X86_VENDOR_UNKNOWN: /* Connectix Virtual PC reports this */
278 break;
279 case X86_VENDOR_CENTAUR:
280 break;
281 default:
282 printk("Unknown CPU identifier (%d): continuing anyway, "
283 "but might fail.\n", c->x86_vendor);
284 }
286 printk("CPU caps: %08x %08x %08x %08x\n",
287 c->x86_capability[0],
288 c->x86_capability[1],
289 c->x86_capability[2],
290 c->x86_capability[3]);
292 /*
293 * On SMP, boot_cpu_data holds the common feature set between
294 * all CPUs; so make sure that we indicate which features are
295 * common between the CPUs. The first time this routine gets
296 * executed, c == &boot_cpu_data.
297 */
298 if ( c != &boot_cpu_data ) {
299 /* AND the already accumulated flags with these */
300 for ( i = 0 ; i < NCAPINTS ; i++ )
301 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
302 }
303 }
306 unsigned long cpu_initialized;
307 void __init cpu_init(void)
308 {
309 #if defined(__i386__) /* XXX */
310 int nr = smp_processor_id();
311 struct tss_struct * t = &init_tss[nr];
313 if ( test_and_set_bit(nr, &cpu_initialized) )
314 panic("CPU#%d already initialized!!!\n", nr);
315 printk("Initializing CPU#%d\n", nr);
317 t->bitmap = IOBMP_INVALID_OFFSET;
318 memset(t->io_bitmap, ~0, sizeof(t->io_bitmap));
320 /* Set up GDT and IDT. */
321 SET_GDT_ENTRIES(current, DEFAULT_GDT_ENTRIES);
322 SET_GDT_ADDRESS(current, DEFAULT_GDT_ADDRESS);
323 __asm__ __volatile__("lgdt %0": "=m" (*current->mm.gdt));
324 __asm__ __volatile__("lidt %0": "=m" (idt_descr));
326 /* No nested task. */
327 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
329 /* Ensure FPU gets initialised for each domain. */
330 stts();
332 /* Set up and load the per-CPU TSS and LDT. */
333 t->ss0 = __HYPERVISOR_DS;
334 t->esp0 = get_stack_top();
335 set_tss_desc(nr,t);
336 load_TR(nr);
337 __asm__ __volatile__("lldt %%ax"::"a" (0));
339 /* Clear all 6 debug registers. */
340 #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
341 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
342 #undef CD
344 /* Install correct page table. */
345 write_ptbase(&current->mm);
347 init_idle_task();
348 #endif
349 }
351 static void __init do_initcalls(void)
352 {
353 initcall_t *call;
354 for ( call = &__initcall_start; call < &__initcall_end; call++ )
355 (*call)();
356 }
358 unsigned long pci_mem_start = 0x10000000;
360 static void __init start_of_day(void)
361 {
362 unsigned long low_mem_size;
364 #ifdef MEMORY_GUARD
365 /* Unmap the first page of CPU0's stack. */
366 extern unsigned long cpu0_stack[];
367 memguard_guard_range(cpu0_stack, PAGE_SIZE);
368 #endif
370 open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
372 if ( opt_watchdog )
373 nmi_watchdog = NMI_LOCAL_APIC;
375 sort_exception_tables();
377 arch_do_createdomain(current);
379 /* Tell the PCI layer not to allocate too close to the RAM area.. */
380 low_mem_size = ((max_page << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
381 if ( low_mem_size > pci_mem_start ) pci_mem_start = low_mem_size;
383 identify_cpu(&boot_cpu_data); /* get CPU type info */
384 if ( cpu_has_fxsr ) set_in_cr4(X86_CR4_OSFXSR);
385 if ( cpu_has_xmm ) set_in_cr4(X86_CR4_OSXMMEXCPT);
386 #ifdef CONFIG_SMP
387 if ( opt_ignorebiostables )
388 {
389 opt_nosmp = 1; /* No SMP without configuration */
390 opt_noacpi = 1; /* ACPI will just confuse matters also */
391 }
392 else
393 {
394 find_smp_config();
395 smp_alloc_memory(); /* trampoline which other CPUs jump at */
396 }
397 #endif
398 paging_init(); /* not much here now, but sets up fixmap */
399 if ( !opt_noacpi )
400 acpi_boot_init();
401 #ifdef CONFIG_SMP
402 if ( smp_found_config )
403 get_smp_config();
404 #endif
405 scheduler_init();
406 init_IRQ(); /* installs simple interrupt wrappers. Starts HZ clock. */
407 trap_init();
408 time_init(); /* installs software handler for HZ clock. */
409 init_apic_mappings(); /* make APICs addressable in our pagetables. */
411 arch_init_memory();
413 #ifndef CONFIG_SMP
414 APIC_init_uniprocessor();
415 #else
416 if ( opt_nosmp )
417 APIC_init_uniprocessor();
418 else
419 smp_boot_cpus();
420 /*
421 * Does loads of stuff, including kicking the local
422 * APIC, and the IO APIC after other CPUs are booted.
423 * Each IRQ is preferably handled by IO-APIC, but
424 * fall thru to 8259A if we have to (but slower).
425 */
426 #endif
428 __sti();
430 initialize_keytable(); /* call back handling for key codes */
432 serial_init_stage2();
434 #ifdef XEN_DEBUGGER
435 initialize_pdb(); /* pervasive debugger */
436 #endif
438 if ( !cpu_has_apic )
439 {
440 do_timer_lists_from_pit = 1;
441 if ( smp_num_cpus != 1 )
442 panic("We need local APICs on SMP machines!");
443 }
445 ac_timer_init(); /* init accurate timers */
446 init_xen_time(); /* initialise the time */
447 schedulers_start(); /* start scheduler for each CPU */
449 check_nmi_watchdog();
451 #ifdef CONFIG_PCI
452 pci_init();
453 #endif
454 do_initcalls();
456 #ifdef CONFIG_SMP
457 wait_init_idle = cpu_online_map;
458 clear_bit(smp_processor_id(), &wait_init_idle);
459 smp_threads_ready = 1;
460 smp_commence(); /* Tell other CPUs that state of the world is stable. */
461 while ( wait_init_idle != 0 )
462 {
463 cpu_relax();
464 barrier();
465 }
466 #endif
468 watchdog_on = 1;
469 }
471 void __init __start_xen(multiboot_info_t *mbi)
472 {
473 unsigned char *cmdline;
474 module_t *mod = (module_t *)__va(mbi->mods_addr);
475 void *heap_start;
476 unsigned long firsthole_start, nr_pages;
477 unsigned long dom0_memory_start, dom0_memory_end;
478 unsigned long initial_images_start, initial_images_end;
479 struct e820entry e820_raw[E820MAX];
480 int i, e820_raw_nr = 0, bytes = 0;
482 /* Parse the command-line options. */
483 if ( (mbi->flags & MBI_CMDLINE) && (mbi->cmdline != 0) )
484 cmdline_parse(__va(mbi->cmdline));
486 /* Must do this early -- e.g., spinlocks rely on get_current(). */
487 set_current(&idle0_exec_domain);
489 /* We initialise the serial devices very early so we can get debugging. */
490 serial_init_stage1();
492 init_console();
494 /* Check that we have at least one Multiboot module. */
495 if ( !(mbi->flags & MBI_MODULES) || (mbi->mods_count == 0) )
496 {
497 printk("FATAL ERROR: Require at least one Multiboot module.\n");
498 for ( ; ; ) ;
499 }
501 xenheap_phys_end = opt_xenheap_megabytes << 20;
503 if ( mbi->flags & MBI_MEMMAP )
504 {
505 while ( bytes < mbi->mmap_length )
506 {
507 memory_map_t *map = __va(mbi->mmap_addr + bytes);
508 e820_raw[e820_raw_nr].addr =
509 ((u64)map->base_addr_high << 32) | (u64)map->base_addr_low;
510 e820_raw[e820_raw_nr].size =
511 ((u64)map->length_high << 32) | (u64)map->length_low;
512 e820_raw[e820_raw_nr].type =
513 (map->type > E820_SHARED_PAGE) ? E820_RESERVED : map->type;
514 e820_raw_nr++;
515 bytes += map->size + 4;
516 }
517 }
518 else if ( mbi->flags & MBI_MEMLIMITS )
519 {
520 e820_raw[0].addr = 0;
521 e820_raw[0].size = mbi->mem_lower << 10;
522 e820_raw[0].type = E820_RAM;
523 e820_raw[1].addr = 0x100000;
524 e820_raw[1].size = mbi->mem_upper << 10;
525 e820_raw[1].type = E820_RAM;
526 e820_raw_nr = 2;
527 }
528 else
529 {
530 printk("FATAL ERROR: Bootloader provided no memory information.\n");
531 for ( ; ; ) ;
532 }
534 max_page = init_e820(e820_raw, e820_raw_nr);
536 /* Find the first high-memory RAM hole. */
537 for ( i = 0; i < e820.nr_map; i++ )
538 if ( (e820.map[i].type == E820_RAM) &&
539 (e820.map[i].addr >= 0x100000) )
540 break;
541 firsthole_start = e820.map[i].addr + e820.map[i].size;
543 /* Relocate the Multiboot modules. */
544 initial_images_start = xenheap_phys_end;
545 initial_images_end = initial_images_start +
546 (mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
547 if ( initial_images_end > firsthole_start )
548 {
549 printk("Not enough memory to stash the DOM0 kernel image.\n");
550 for ( ; ; ) ;
551 }
552 #if defined(__i386__)
553 memmove((void *)initial_images_start, /* use low mapping */
554 (void *)mod[0].mod_start, /* use low mapping */
555 mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
556 #elif defined(__x86_64__)
557 memmove(__va(initial_images_start),
558 __va(mod[0].mod_start),
559 mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
560 #endif
562 /* Initialise boot-time allocator with all RAM situated after modules. */
563 heap_start = memguard_init(&_end);
564 heap_start = __va(init_boot_allocator(__pa(heap_start)));
565 nr_pages = 0;
566 for ( i = 0; i < e820.nr_map; i++ )
567 {
568 if ( e820.map[i].type != E820_RAM )
569 continue;
570 nr_pages += e820.map[i].size >> PAGE_SHIFT;
571 if ( (e820.map[i].addr + e820.map[i].size) >= initial_images_end )
572 init_boot_pages((e820.map[i].addr < initial_images_end) ?
573 initial_images_end : e820.map[i].addr,
574 e820.map[i].addr + e820.map[i].size);
575 }
577 printk("System RAM: %luMB (%lukB)\n",
578 nr_pages >> (20 - PAGE_SHIFT),
579 nr_pages << (PAGE_SHIFT - 10));
581 /* Allocate an aligned chunk of RAM for DOM0. */
582 dom0_memory_start = alloc_boot_pages(opt_dom0_mem << 10, 4UL << 20);
583 dom0_memory_end = dom0_memory_start + (opt_dom0_mem << 10);
584 if ( dom0_memory_start == 0 )
585 {
586 printk("Not enough memory for DOM0 memory reservation.\n");
587 for ( ; ; ) ;
588 }
590 init_frametable();
592 end_boot_allocator();
594 init_xenheap_pages(__pa(heap_start), xenheap_phys_end);
595 printk("Xen heap: %luMB (%lukB)\n",
596 (xenheap_phys_end-__pa(heap_start)) >> 20,
597 (xenheap_phys_end-__pa(heap_start)) >> 10);
599 early_boot = 0;
601 /* Initialise the slab allocator. */
602 xmem_cache_init();
603 xmem_cache_sizes_init(max_page);
605 start_of_day();
607 grant_table_init();
609 shadow_mode_init();
611 /* Create initial domain 0. */
612 dom0 = do_createdomain(0, 0);
613 if ( dom0 == NULL )
614 panic("Error creating domain 0\n");
616 set_bit(DF_PRIVILEGED, &dom0->d_flags);
618 /* Grab the DOM0 command line. Skip past the image name. */
619 cmdline = (unsigned char *)(mod[0].string ? __va(mod[0].string) : NULL);
620 if ( cmdline != NULL )
621 {
622 while ( *cmdline == ' ' ) cmdline++;
623 if ( (cmdline = strchr(cmdline, ' ')) != NULL )
624 while ( *cmdline == ' ' ) cmdline++;
625 }
627 /*
628 * We're going to setup domain0 using the module(s) that we stashed safely
629 * above our heap. The second module, if present, is an initrd ramdisk.
630 */
631 if ( construct_dom0(dom0, dom0_memory_start, dom0_memory_end,
632 (char *)initial_images_start,
633 mod[0].mod_end-mod[0].mod_start,
634 (mbi->mods_count == 1) ? 0 :
635 (char *)initial_images_start +
636 (mod[1].mod_start-mod[0].mod_start),
637 (mbi->mods_count == 1) ? 0 :
638 mod[mbi->mods_count-1].mod_end - mod[1].mod_start,
639 cmdline) != 0)
640 panic("Could not set up DOM0 guest OS\n");
642 /* The stash space for the initial kernel image can now be freed up. */
643 init_domheap_pages(initial_images_start, initial_images_end);
645 scrub_heap_pages();
647 init_trace_bufs();
649 /* Give up the VGA console if DOM0 is configured to grab it. */
650 console_endboot(cmdline && strstr(cmdline, "tty0"));
652 domain_unpause_by_systemcontroller(current->domain);
653 domain_unpause_by_systemcontroller(dom0);
654 startup_cpu_idle_loop();
655 }