debuggers.hg

view xen/arch/x86/setup.c @ 3705:4294cfa9fad3

bitkeeper revision 1.1159.212.95 (4204aa0ee0re5Xx1zWrJ9ejxzgRs3w)

Various cleanups. Remove PDB pending simpler GDB stub and/or NetBSD debugger.
Force emacs mode to appropriate tabbing in various files.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Sat Feb 05 11:12:14 2005 +0000 (2005-02-05)
parents 32d29625d39b
children d93748c50893
line source
2 #include <xen/config.h>
3 #include <xen/init.h>
4 #include <xen/lib.h>
5 #include <xen/sched.h>
6 #include <xen/pci.h>
7 #include <xen/serial.h>
8 #include <xen/softirq.h>
9 #include <xen/acpi.h>
10 #include <xen/console.h>
11 #include <xen/trace.h>
12 #include <xen/multiboot.h>
13 #include <asm/bitops.h>
14 #include <asm/smp.h>
15 #include <asm/processor.h>
16 #include <asm/mpspec.h>
17 #include <asm/apic.h>
18 #include <asm/desc.h>
19 #include <asm/domain_page.h>
20 #include <asm/shadow.h>
21 #include <asm/e820.h>
23 /* opt_dom0_mem: Kilobytes of memory allocated to domain 0. */
24 static unsigned int opt_dom0_mem = 16000;
25 integer_param("dom0_mem", opt_dom0_mem);
27 /*
28 * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the
29 * pfn_info table and allocation bitmap.
30 */
31 static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
32 #if defined(__x86_64__)
33 integer_param("xenheap_megabytes", opt_xenheap_megabytes);
34 #endif
36 /* opt_noht: If true, Hyperthreading is ignored. */
37 int opt_noht = 0;
38 boolean_param("noht", opt_noht);
40 /* opt_noacpi: If true, ACPI tables are not parsed. */
41 static int opt_noacpi = 0;
42 boolean_param("noacpi", opt_noacpi);
44 /* opt_nosmp: If true, secondary processors are ignored. */
45 static int opt_nosmp = 0;
46 boolean_param("nosmp", opt_nosmp);
48 /* opt_ignorebiostables: If true, ACPI and MP tables are ignored. */
49 /* NB. This flag implies 'nosmp' and 'noacpi'. */
50 static int opt_ignorebiostables = 0;
51 boolean_param("ignorebiostables", opt_ignorebiostables);
53 /* opt_watchdog: If true, run a watchdog NMI on each processor. */
54 static int opt_watchdog = 0;
55 boolean_param("watchdog", opt_watchdog);
57 int early_boot = 1;
59 unsigned long xenheap_phys_end;
61 extern void arch_init_memory(void);
62 extern void init_IRQ(void);
63 extern void trap_init(void);
64 extern void time_init(void);
65 extern void ac_timer_init(void);
66 extern void initialize_keytable();
67 extern int do_timer_lists_from_pit;
69 char ignore_irq13; /* set if exception 16 works */
70 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1 };
72 #if defined(__x86_64__)
73 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
74 #else
75 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE;
76 #endif
77 EXPORT_SYMBOL(mmu_cr4_features);
79 unsigned long wait_init_idle;
81 struct exec_domain *idle_task[NR_CPUS] = { &idle0_exec_domain };
83 #ifdef CONFIG_ACPI_INTERPRETER
84 int acpi_disabled = 0;
85 #else
86 int acpi_disabled = 1;
87 #endif
88 EXPORT_SYMBOL(acpi_disabled);
90 int phys_proc_id[NR_CPUS];
91 int logical_proc_id[NR_CPUS];
93 /* Standard macro to see if a specific flag is changeable. */
94 static inline int flag_is_changeable_p(unsigned long flag)
95 {
96 unsigned long f1, f2;
98 asm("pushf\n\t"
99 "pushf\n\t"
100 "pop %0\n\t"
101 "mov %0,%1\n\t"
102 "xor %2,%0\n\t"
103 "push %0\n\t"
104 "popf\n\t"
105 "pushf\n\t"
106 "pop %0\n\t"
107 "popf\n\t"
108 : "=&r" (f1), "=&r" (f2)
109 : "ir" (flag));
111 return ((f1^f2) & flag) != 0;
112 }
114 /* Probe for the CPUID instruction */
115 static int __init have_cpuid_p(void)
116 {
117 return flag_is_changeable_p(X86_EFLAGS_ID);
118 }
120 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
121 {
122 char *v = c->x86_vendor_id;
124 if (!strcmp(v, "GenuineIntel"))
125 c->x86_vendor = X86_VENDOR_INTEL;
126 else if (!strcmp(v, "AuthenticAMD"))
127 c->x86_vendor = X86_VENDOR_AMD;
128 else if (!strcmp(v, "CyrixInstead"))
129 c->x86_vendor = X86_VENDOR_CYRIX;
130 else if (!strcmp(v, "UMC UMC UMC "))
131 c->x86_vendor = X86_VENDOR_UMC;
132 else if (!strcmp(v, "CentaurHauls"))
133 c->x86_vendor = X86_VENDOR_CENTAUR;
134 else if (!strcmp(v, "NexGenDriven"))
135 c->x86_vendor = X86_VENDOR_NEXGEN;
136 else if (!strcmp(v, "RiseRiseRise"))
137 c->x86_vendor = X86_VENDOR_RISE;
138 else if (!strcmp(v, "GenuineTMx86") ||
139 !strcmp(v, "TransmetaCPU"))
140 c->x86_vendor = X86_VENDOR_TRANSMETA;
141 else
142 c->x86_vendor = X86_VENDOR_UNKNOWN;
143 }
145 static void __init init_intel(struct cpuinfo_x86 *c)
146 {
147 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
148 if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
149 clear_bit(X86_FEATURE_SEP, &c->x86_capability);
151 #ifdef CONFIG_SMP
152 if ( test_bit(X86_FEATURE_HT, &c->x86_capability) )
153 {
154 u32 eax, ebx, ecx, edx;
155 int initial_apic_id, siblings, cpu = smp_processor_id();
157 cpuid(1, &eax, &ebx, &ecx, &edx);
158 ht_per_core = siblings = (ebx & 0xff0000) >> 16;
160 if ( opt_noht )
161 clear_bit(X86_FEATURE_HT, &c->x86_capability[0]);
163 if ( siblings <= 1 )
164 {
165 printk(KERN_INFO "CPU#%d: Hyper-Threading is disabled\n", cpu);
166 }
167 else if ( siblings > 2 )
168 {
169 panic("We don't support more than two logical CPUs per package!");
170 }
171 else
172 {
173 initial_apic_id = ebx >> 24 & 0xff;
174 phys_proc_id[cpu] = initial_apic_id >> 1;
175 logical_proc_id[cpu] = initial_apic_id & 1;
176 printk(KERN_INFO "CPU#%d: Physical ID: %d, Logical ID: %d\n",
177 cpu, phys_proc_id[cpu], logical_proc_id[cpu]);
178 }
179 }
180 #endif
182 #ifdef CONFIG_VMX
183 start_vmx();
184 #endif
186 }
188 static void __init init_amd(struct cpuinfo_x86 *c)
189 {
190 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
191 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
192 clear_bit(0*32+31, &c->x86_capability);
194 switch(c->x86)
195 {
196 case 5:
197 panic("AMD K6 is not supported.\n");
198 case 6: /* An Athlon/Duron. We can trust the BIOS probably */
199 break;
200 }
201 }
203 /*
204 * This does the hard work of actually picking apart the CPU stuff...
205 */
206 void __init identify_cpu(struct cpuinfo_x86 *c)
207 {
208 int junk, i, cpu = smp_processor_id();
209 u32 xlvl, tfms;
211 phys_proc_id[cpu] = cpu;
212 logical_proc_id[cpu] = 0;
214 c->x86_vendor = X86_VENDOR_UNKNOWN;
215 c->cpuid_level = -1; /* CPUID not detected */
216 c->x86_model = c->x86_mask = 0; /* So far unknown... */
217 c->x86_vendor_id[0] = '\0'; /* Unset */
218 memset(&c->x86_capability, 0, sizeof c->x86_capability);
220 if ( !have_cpuid_p() )
221 panic("Ancient processors not supported\n");
223 /* Get vendor name */
224 cpuid(0x00000000, &c->cpuid_level,
225 (int *)&c->x86_vendor_id[0],
226 (int *)&c->x86_vendor_id[8],
227 (int *)&c->x86_vendor_id[4]);
229 get_cpu_vendor(c);
231 if ( c->cpuid_level == 0 )
232 panic("Decrepit CPUID not supported\n");
234 cpuid(0x00000001, &tfms, &junk, &junk,
235 &c->x86_capability[0]);
236 c->x86 = (tfms >> 8) & 15;
237 c->x86_model = (tfms >> 4) & 15;
238 c->x86_mask = tfms & 15;
240 /* AMD-defined flags: level 0x80000001 */
241 xlvl = cpuid_eax(0x80000000);
242 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
243 if ( xlvl >= 0x80000001 )
244 c->x86_capability[1] = cpuid_edx(0x80000001);
245 }
247 /* Transmeta-defined flags: level 0x80860001 */
248 xlvl = cpuid_eax(0x80860000);
249 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
250 if ( xlvl >= 0x80860001 )
251 c->x86_capability[2] = cpuid_edx(0x80860001);
252 }
254 printk("CPU%d: Before vendor init, caps: %08x %08x %08x, vendor = %d\n",
255 smp_processor_id(),
256 c->x86_capability[0],
257 c->x86_capability[1],
258 c->x86_capability[2],
259 c->x86_vendor);
261 switch ( c->x86_vendor ) {
262 case X86_VENDOR_INTEL:
263 init_intel(c);
264 break;
265 case X86_VENDOR_AMD:
266 init_amd(c);
267 break;
268 case X86_VENDOR_UNKNOWN: /* Connectix Virtual PC reports this */
269 break;
270 case X86_VENDOR_CENTAUR:
271 break;
272 default:
273 printk("Unknown CPU identifier (%d): continuing anyway, "
274 "but might fail.\n", c->x86_vendor);
275 }
277 printk("CPU caps: %08x %08x %08x %08x\n",
278 c->x86_capability[0],
279 c->x86_capability[1],
280 c->x86_capability[2],
281 c->x86_capability[3]);
283 /*
284 * On SMP, boot_cpu_data holds the common feature set between
285 * all CPUs; so make sure that we indicate which features are
286 * common between the CPUs. The first time this routine gets
287 * executed, c == &boot_cpu_data.
288 */
289 if ( c != &boot_cpu_data ) {
290 /* AND the already accumulated flags with these */
291 for ( i = 0 ; i < NCAPINTS ; i++ )
292 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
293 }
294 }
297 unsigned long cpu_initialized;
298 void __init cpu_init(void)
299 {
300 extern void percpu_traps_init(void);
301 int nr = smp_processor_id();
302 struct tss_struct *t = &init_tss[nr];
304 if ( test_and_set_bit(nr, &cpu_initialized) )
305 panic("CPU#%d already initialized!!!\n", nr);
306 printk("Initializing CPU#%d\n", nr);
308 /* Set up GDT and IDT. */
309 SET_GDT_ENTRIES(current, DEFAULT_GDT_ENTRIES);
310 SET_GDT_ADDRESS(current, DEFAULT_GDT_ADDRESS);
311 __asm__ __volatile__ ( "lgdt %0" : "=m" (*current->mm.gdt) );
312 __asm__ __volatile__ ( "lidt %0" : "=m" (idt_descr) );
314 /* No nested task. */
315 __asm__ __volatile__ ( "pushf ; andw $0xbfff,(%"__OP"sp) ; popf" );
317 /* Ensure FPU gets initialised for each domain. */
318 stts();
320 /* Set up and load the per-CPU TSS and LDT. */
321 t->bitmap = IOBMP_INVALID_OFFSET;
322 memset(t->io_bitmap, ~0, sizeof(t->io_bitmap));
323 #if defined(__i386__)
324 t->ss0 = __HYPERVISOR_DS;
325 t->esp0 = get_stack_top();
326 #elif defined(__x86_64__)
327 t->rsp0 = get_stack_top();
328 #endif
329 set_tss_desc(nr,t);
330 load_TR(nr);
331 __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
333 /* Clear all 6 debug registers. */
334 #define CD(register) __asm__ ( "mov %0,%%db" #register : : "r" (0UL) );
335 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
336 #undef CD
338 percpu_traps_init();
340 /* Install correct page table. */
341 write_ptbase(&current->mm);
343 init_idle_task();
344 }
346 static void __init do_initcalls(void)
347 {
348 initcall_t *call;
349 for ( call = &__initcall_start; call < &__initcall_end; call++ )
350 (*call)();
351 }
353 unsigned long pci_mem_start = 0x10000000;
355 static void __init start_of_day(void)
356 {
357 unsigned long low_mem_size;
359 #ifdef MEMORY_GUARD
360 /* Unmap the first page of CPU0's stack. */
361 extern unsigned long cpu0_stack[];
362 memguard_guard_range(cpu0_stack, PAGE_SIZE);
363 #endif
365 open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
367 if ( opt_watchdog )
368 nmi_watchdog = NMI_LOCAL_APIC;
370 sort_exception_tables();
372 arch_do_createdomain(current);
374 /* Tell the PCI layer not to allocate too close to the RAM area.. */
375 low_mem_size = ((max_page << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
376 if ( low_mem_size > pci_mem_start ) pci_mem_start = low_mem_size;
378 identify_cpu(&boot_cpu_data); /* get CPU type info */
379 if ( cpu_has_fxsr ) set_in_cr4(X86_CR4_OSFXSR);
380 if ( cpu_has_xmm ) set_in_cr4(X86_CR4_OSXMMEXCPT);
381 #ifdef CONFIG_SMP
382 if ( opt_ignorebiostables )
383 {
384 opt_nosmp = 1; /* No SMP without configuration */
385 opt_noacpi = 1; /* ACPI will just confuse matters also */
386 }
387 else
388 {
389 find_smp_config();
390 smp_alloc_memory(); /* trampoline which other CPUs jump at */
391 }
392 #endif
393 paging_init(); /* not much here now, but sets up fixmap */
394 if ( !opt_noacpi )
395 acpi_boot_init();
396 #ifdef CONFIG_SMP
397 if ( smp_found_config )
398 get_smp_config();
399 #endif
400 scheduler_init();
401 init_IRQ(); /* installs simple interrupt wrappers. Starts HZ clock. */
402 trap_init();
403 time_init(); /* installs software handler for HZ clock. */
404 init_apic_mappings(); /* make APICs addressable in our pagetables. */
406 arch_init_memory();
408 #ifndef CONFIG_SMP
409 APIC_init_uniprocessor();
410 #else
411 if ( opt_nosmp )
412 APIC_init_uniprocessor();
413 else
414 smp_boot_cpus();
415 /*
416 * Does loads of stuff, including kicking the local
417 * APIC, and the IO APIC after other CPUs are booted.
418 * Each IRQ is preferably handled by IO-APIC, but
419 * fall thru to 8259A if we have to (but slower).
420 */
421 #endif
423 __sti();
425 initialize_keytable(); /* call back handling for key codes */
427 serial_init_stage2();
429 if ( !cpu_has_apic )
430 {
431 do_timer_lists_from_pit = 1;
432 if ( smp_num_cpus != 1 )
433 panic("We need local APICs on SMP machines!");
434 }
436 ac_timer_init(); /* init accurate timers */
437 init_xen_time(); /* initialise the time */
438 schedulers_start(); /* start scheduler for each CPU */
440 check_nmi_watchdog();
442 #ifdef CONFIG_PCI
443 pci_init();
444 #endif
445 do_initcalls();
447 #ifdef CONFIG_SMP
448 wait_init_idle = cpu_online_map;
449 clear_bit(smp_processor_id(), &wait_init_idle);
450 smp_threads_ready = 1;
451 smp_commence(); /* Tell other CPUs that state of the world is stable. */
452 while ( wait_init_idle != 0 )
453 {
454 cpu_relax();
455 barrier();
456 }
457 #endif
459 watchdog_on = 1;
460 #ifdef __x86_64__ /* x86_32 uses low mappings when building DOM0. */
461 zap_low_mappings();
462 #endif
463 }
465 void __init __start_xen(multiboot_info_t *mbi)
466 {
467 unsigned char *cmdline;
468 module_t *mod = (module_t *)__va(mbi->mods_addr);
469 void *heap_start;
470 unsigned long firsthole_start, nr_pages;
471 unsigned long dom0_memory_start, dom0_memory_end;
472 unsigned long initial_images_start, initial_images_end;
473 struct e820entry e820_raw[E820MAX];
474 int i, e820_raw_nr = 0, bytes = 0;
476 /* Parse the command-line options. */
477 if ( (mbi->flags & MBI_CMDLINE) && (mbi->cmdline != 0) )
478 cmdline_parse(__va(mbi->cmdline));
480 /* Must do this early -- e.g., spinlocks rely on get_current(). */
481 set_current(&idle0_exec_domain);
483 /* We initialise the serial devices very early so we can get debugging. */
484 serial_init_stage1();
486 init_console();
488 /* Check that we have at least one Multiboot module. */
489 if ( !(mbi->flags & MBI_MODULES) || (mbi->mods_count == 0) )
490 {
491 printk("FATAL ERROR: Require at least one Multiboot module.\n");
492 for ( ; ; ) ;
493 }
495 xenheap_phys_end = opt_xenheap_megabytes << 20;
497 if ( mbi->flags & MBI_MEMMAP )
498 {
499 while ( bytes < mbi->mmap_length )
500 {
501 memory_map_t *map = __va(mbi->mmap_addr + bytes);
502 e820_raw[e820_raw_nr].addr =
503 ((u64)map->base_addr_high << 32) | (u64)map->base_addr_low;
504 e820_raw[e820_raw_nr].size =
505 ((u64)map->length_high << 32) | (u64)map->length_low;
506 e820_raw[e820_raw_nr].type =
507 (map->type > E820_SHARED_PAGE) ? E820_RESERVED : map->type;
508 e820_raw_nr++;
509 bytes += map->size + 4;
510 }
511 }
512 else if ( mbi->flags & MBI_MEMLIMITS )
513 {
514 e820_raw[0].addr = 0;
515 e820_raw[0].size = mbi->mem_lower << 10;
516 e820_raw[0].type = E820_RAM;
517 e820_raw[1].addr = 0x100000;
518 e820_raw[1].size = mbi->mem_upper << 10;
519 e820_raw[1].type = E820_RAM;
520 e820_raw_nr = 2;
521 }
522 else
523 {
524 printk("FATAL ERROR: Bootloader provided no memory information.\n");
525 for ( ; ; ) ;
526 }
528 max_page = init_e820(e820_raw, e820_raw_nr);
530 /* Find the first high-memory RAM hole. */
531 for ( i = 0; i < e820.nr_map; i++ )
532 if ( (e820.map[i].type == E820_RAM) &&
533 (e820.map[i].addr >= 0x100000) )
534 break;
535 firsthole_start = e820.map[i].addr + e820.map[i].size;
537 /* Relocate the Multiboot modules. */
538 initial_images_start = xenheap_phys_end;
539 initial_images_end = initial_images_start +
540 (mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
541 if ( initial_images_end > firsthole_start )
542 {
543 printk("Not enough memory to stash the DOM0 kernel image.\n");
544 for ( ; ; ) ;
545 }
546 #if defined(__i386__)
547 memmove((void *)initial_images_start, /* use low mapping */
548 (void *)mod[0].mod_start, /* use low mapping */
549 mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
550 #elif defined(__x86_64__)
551 memmove(__va(initial_images_start),
552 __va(mod[0].mod_start),
553 mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
554 #endif
556 /* Initialise boot-time allocator with all RAM situated after modules. */
557 heap_start = memguard_init(&_end);
558 heap_start = __va(init_boot_allocator(__pa(heap_start)));
559 nr_pages = 0;
560 for ( i = 0; i < e820.nr_map; i++ )
561 {
562 if ( e820.map[i].type != E820_RAM )
563 continue;
564 nr_pages += e820.map[i].size >> PAGE_SHIFT;
565 if ( (e820.map[i].addr + e820.map[i].size) >= initial_images_end )
566 init_boot_pages((e820.map[i].addr < initial_images_end) ?
567 initial_images_end : e820.map[i].addr,
568 e820.map[i].addr + e820.map[i].size);
569 }
571 printk("System RAM: %luMB (%lukB)\n",
572 nr_pages >> (20 - PAGE_SHIFT),
573 nr_pages << (PAGE_SHIFT - 10));
575 /* Allocate an aligned chunk of RAM for DOM0. */
576 dom0_memory_start = alloc_boot_pages(opt_dom0_mem << 10, 4UL << 20);
577 dom0_memory_end = dom0_memory_start + (opt_dom0_mem << 10);
578 if ( dom0_memory_start == 0 )
579 {
580 printk("Not enough memory for DOM0 memory reservation.\n");
581 for ( ; ; ) ;
582 }
584 init_frametable();
586 end_boot_allocator();
588 init_xenheap_pages(__pa(heap_start), xenheap_phys_end);
589 printk("Xen heap: %luMB (%lukB)\n",
590 (xenheap_phys_end-__pa(heap_start)) >> 20,
591 (xenheap_phys_end-__pa(heap_start)) >> 10);
593 early_boot = 0;
595 start_of_day();
597 grant_table_init();
599 shadow_mode_init();
601 /* Create initial domain 0. */
602 dom0 = do_createdomain(0, 0);
603 if ( dom0 == NULL )
604 panic("Error creating domain 0\n");
606 set_bit(DF_PRIVILEGED, &dom0->d_flags);
608 /* Grab the DOM0 command line. Skip past the image name. */
609 cmdline = (unsigned char *)(mod[0].string ? __va(mod[0].string) : NULL);
610 if ( cmdline != NULL )
611 {
612 while ( *cmdline == ' ' ) cmdline++;
613 if ( (cmdline = strchr(cmdline, ' ')) != NULL )
614 while ( *cmdline == ' ' ) cmdline++;
615 }
617 /*
618 * We're going to setup domain0 using the module(s) that we stashed safely
619 * above our heap. The second module, if present, is an initrd ramdisk.
620 */
621 if ( construct_dom0(dom0, dom0_memory_start, dom0_memory_end,
622 initial_images_start,
623 mod[0].mod_end-mod[0].mod_start,
624 (mbi->mods_count == 1) ? 0 :
625 initial_images_start +
626 (mod[1].mod_start-mod[0].mod_start),
627 (mbi->mods_count == 1) ? 0 :
628 mod[mbi->mods_count-1].mod_end - mod[1].mod_start,
629 cmdline) != 0)
630 panic("Could not set up DOM0 guest OS\n");
632 /* The stash space for the initial kernel image can now be freed up. */
633 init_domheap_pages(initial_images_start, initial_images_end);
635 scrub_heap_pages();
637 init_trace_bufs();
639 /* Give up the VGA console if DOM0 is configured to grab it. */
640 console_endboot(cmdline && strstr(cmdline, "tty0"));
642 domain_unpause_by_systemcontroller(current->domain);
643 domain_unpause_by_systemcontroller(dom0);
644 startup_cpu_idle_loop();
645 }