debuggers.hg

view xen/arch/x86/setup.c @ 13686:32fd32bbf78d

Add RCU support to Xen.
Signed-off-by: Jose Renato Santos <jsantos@hpl.hp.com>
author kaf24@localhost.localdomain
date Fri Jan 26 18:34:36 2007 +0000 (2007-01-26)
parents 328deec3febf
children b2c1eeee2dcf
line source
1 #include <xen/config.h>
2 #include <xen/init.h>
3 #include <xen/lib.h>
4 #include <xen/sched.h>
5 #include <xen/domain.h>
6 #include <xen/serial.h>
7 #include <xen/softirq.h>
8 #include <xen/acpi.h>
9 #include <xen/console.h>
10 #include <xen/serial.h>
11 #include <xen/trace.h>
12 #include <xen/multiboot.h>
13 #include <xen/domain_page.h>
14 #include <xen/version.h>
15 #include <xen/gdbstub.h>
16 #include <xen/percpu.h>
17 #include <xen/hypercall.h>
18 #include <xen/keyhandler.h>
19 #include <xen/numa.h>
20 #include <xen/rcupdate.h>
21 #include <public/version.h>
22 #ifdef CONFIG_COMPAT
23 #include <compat/platform.h>
24 #include <compat/xen.h>
25 #endif
26 #include <asm/bitops.h>
27 #include <asm/smp.h>
28 #include <asm/processor.h>
29 #include <asm/mpspec.h>
30 #include <asm/apic.h>
31 #include <asm/desc.h>
32 #include <asm/shadow.h>
33 #include <asm/e820.h>
34 #include <acm/acm_hooks.h>
35 #include <xen/kexec.h>
37 extern void dmi_scan_machine(void);
38 extern void generic_apic_probe(void);
39 extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
41 /*
42 * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the
43 * page_info table and allocation bitmap.
44 */
45 static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
46 #if defined(CONFIG_X86_64)
47 integer_param("xenheap_megabytes", opt_xenheap_megabytes);
48 #endif
50 /* opt_nosmp: If true, secondary processors are ignored. */
51 static int opt_nosmp = 0;
52 boolean_param("nosmp", opt_nosmp);
54 /* maxcpus: maximum number of CPUs to activate. */
55 static unsigned int max_cpus = NR_CPUS;
56 integer_param("maxcpus", max_cpus);
58 /* opt_watchdog: If true, run a watchdog NMI on each processor. */
59 static int opt_watchdog = 0;
60 boolean_param("watchdog", opt_watchdog);
62 /* **** Linux config option: propagated to domain0. */
63 /* "acpi=off": Sisables both ACPI table parsing and interpreter. */
64 /* "acpi=force": Override the disable blacklist. */
65 /* "acpi=strict": Disables out-of-spec workarounds. */
66 /* "acpi=ht": Limit ACPI just to boot-time to enable HT. */
67 /* "acpi=noirq": Disables ACPI interrupt routing. */
68 static void parse_acpi_param(char *s);
69 custom_param("acpi", parse_acpi_param);
71 /* **** Linux config option: propagated to domain0. */
72 /* acpi_skip_timer_override: Skip IRQ0 overrides. */
73 extern int acpi_skip_timer_override;
74 boolean_param("acpi_skip_timer_override", acpi_skip_timer_override);
76 /* **** Linux config option: propagated to domain0. */
77 /* noapic: Disable IOAPIC setup. */
78 extern int skip_ioapic_setup;
79 boolean_param("noapic", skip_ioapic_setup);
81 int early_boot = 1;
83 cpumask_t cpu_present_map;
85 /* Limits of Xen heap, used to initialise the allocator. */
86 unsigned long xenheap_phys_start, xenheap_phys_end;
88 extern void arch_init_memory(void);
89 extern void init_IRQ(void);
90 extern void trap_init(void);
91 extern void early_time_init(void);
92 extern void early_cpu_init(void);
94 struct tss_struct init_tss[NR_CPUS];
96 extern unsigned long cpu0_stack[];
98 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
100 #if CONFIG_PAGING_LEVELS > 2
101 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
102 #else
103 unsigned long mmu_cr4_features = X86_CR4_PSE;
104 #endif
105 EXPORT_SYMBOL(mmu_cr4_features);
107 int acpi_disabled;
109 int acpi_force;
110 char acpi_param[10] = "";
111 static void parse_acpi_param(char *s)
112 {
113 /* Save the parameter so it can be propagated to domain0. */
114 strncpy(acpi_param, s, sizeof(acpi_param));
115 acpi_param[sizeof(acpi_param)-1] = '\0';
117 /* Interpret the parameter for use within Xen. */
118 if ( !strcmp(s, "off") )
119 {
120 disable_acpi();
121 }
122 else if ( !strcmp(s, "force") )
123 {
124 acpi_force = 1;
125 acpi_ht = 1;
126 acpi_disabled = 0;
127 }
128 else if ( !strcmp(s, "strict") )
129 {
130 acpi_strict = 1;
131 }
132 else if ( !strcmp(s, "ht") )
133 {
134 if ( !acpi_force )
135 disable_acpi();
136 acpi_ht = 1;
137 }
138 else if ( !strcmp(s, "noirq") )
139 {
140 acpi_noirq_set();
141 }
142 }
144 static void __init do_initcalls(void)
145 {
146 initcall_t *call;
147 for ( call = &__initcall_start; call < &__initcall_end; call++ )
148 (*call)();
149 }
151 #define EARLY_FAIL() for ( ; ; ) __asm__ __volatile__ ( "hlt" )
153 static struct e820entry e820_raw[E820MAX];
155 static unsigned long initial_images_start, initial_images_end;
157 unsigned long initial_images_nrpages(void)
158 {
159 unsigned long s = initial_images_start + PAGE_SIZE - 1;
160 unsigned long e = initial_images_end;
161 return ((e >> PAGE_SHIFT) - (s >> PAGE_SHIFT));
162 }
164 void discard_initial_images(void)
165 {
166 init_domheap_pages(initial_images_start, initial_images_end);
167 }
169 extern char __per_cpu_start[], __per_cpu_data_end[], __per_cpu_end[];
171 static void __init percpu_init_areas(void)
172 {
173 unsigned int i, data_size = __per_cpu_data_end - __per_cpu_start;
175 BUG_ON(data_size > PERCPU_SIZE);
177 for_each_cpu ( i )
178 {
179 memguard_unguard_range(__per_cpu_start + (i << PERCPU_SHIFT),
180 1 << PERCPU_SHIFT);
181 if ( i != 0 )
182 memcpy(__per_cpu_start + (i << PERCPU_SHIFT),
183 __per_cpu_start,
184 data_size);
185 }
186 }
188 static void __init percpu_guard_areas(void)
189 {
190 memguard_guard_range(__per_cpu_start, __per_cpu_end - __per_cpu_start);
191 }
193 static void __init percpu_free_unused_areas(void)
194 {
195 unsigned int i, first_unused;
197 /* Find first unused CPU number. */
198 for ( i = 0; i < NR_CPUS; i++ )
199 if ( !cpu_online(i) )
200 break;
201 first_unused = i;
203 /* Check that there are no holes in cpu_online_map. */
204 for ( ; i < NR_CPUS; i++ )
205 BUG_ON(cpu_online(i));
207 #ifndef MEMORY_GUARD
208 init_xenheap_pages(__pa(__per_cpu_start) + (first_unused << PERCPU_SHIFT),
209 __pa(__per_cpu_end));
210 #endif
211 }
213 /* Fetch acm policy module from multiboot modules. */
214 static void extract_acm_policy(
215 multiboot_info_t *mbi,
216 unsigned int *initrdidx,
217 char **_policy_start,
218 unsigned long *_policy_len)
219 {
220 int i;
221 module_t *mod = (module_t *)__va(mbi->mods_addr);
222 unsigned long start, policy_len;
223 char *policy_start;
225 /*
226 * Try all modules and see whichever could be the binary policy.
227 * Adjust the initrdidx if module[1] is the binary policy.
228 */
229 for ( i = mbi->mods_count-1; i >= 1; i-- )
230 {
231 start = initial_images_start + (mod[i].mod_start-mod[0].mod_start);
232 #if defined(__i386__)
233 policy_start = (char *)start;
234 #elif defined(__x86_64__)
235 policy_start = __va(start);
236 #endif
237 policy_len = mod[i].mod_end - mod[i].mod_start;
238 if ( acm_is_policy(policy_start, policy_len) )
239 {
240 printk("Policy len 0x%lx, start at %p - module %d.\n",
241 policy_len, policy_start, i);
242 *_policy_start = policy_start;
243 *_policy_len = policy_len;
244 if ( i == 1 )
245 *initrdidx = (mbi->mods_count > 2) ? 2 : 0;
246 break;
247 }
248 }
249 }
251 static void __init init_idle_domain(void)
252 {
253 struct domain *idle_domain;
255 /* Domain creation requires that scheduler structures are initialised. */
256 scheduler_init();
258 idle_domain = domain_create(IDLE_DOMAIN_ID, 0);
259 if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) )
260 BUG();
262 set_current(idle_domain->vcpu[0]);
263 idle_vcpu[0] = this_cpu(curr_vcpu) = current;
265 setup_idle_pagetable();
266 }
268 static void srat_detect_node(int cpu)
269 {
270 unsigned node;
271 u8 apicid = x86_cpu_to_apicid[cpu];
273 node = apicid_to_node[apicid];
274 if ( node == NUMA_NO_NODE )
275 node = 0;
276 numa_set_node(cpu, node);
278 if ( acpi_numa > 0 )
279 printk(KERN_INFO "CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
280 }
282 void __init move_memory(unsigned long dst,
283 unsigned long src_start, unsigned long src_end)
284 {
285 #if defined(CONFIG_X86_32)
286 memmove((void *)dst, /* use low mapping */
287 (void *)src_start, /* use low mapping */
288 src_end - src_start);
289 #elif defined(CONFIG_X86_64)
290 memmove(__va(dst),
291 __va(src_start),
292 src_end - src_start);
293 #endif
294 }
296 void __init __start_xen(multiboot_info_t *mbi)
297 {
298 char __cmdline[] = "", *cmdline = __cmdline;
299 unsigned long _initrd_start = 0, _initrd_len = 0;
300 unsigned int initrdidx = 1;
301 char *_policy_start = NULL;
302 unsigned long _policy_len = 0;
303 module_t *mod = (module_t *)__va(mbi->mods_addr);
304 unsigned long nr_pages, modules_length;
305 paddr_t s, e;
306 int i, e820_warn = 0, e820_raw_nr = 0, bytes = 0;
307 struct ns16550_defaults ns16550 = {
308 .data_bits = 8,
309 .parity = 'n',
310 .stop_bits = 1
311 };
313 extern void early_page_fault(void);
314 set_intr_gate(TRAP_page_fault, &early_page_fault);
316 /* Parse the command-line options. */
317 if ( (mbi->flags & MBI_CMDLINE) && (mbi->cmdline != 0) )
318 cmdline = __va(mbi->cmdline);
319 cmdline_parse(cmdline);
321 set_current((struct vcpu *)0xfffff000); /* debug sanity */
322 idle_vcpu[0] = current;
323 set_processor_id(0); /* needed early, for smp_processor_id() */
325 smp_prepare_boot_cpu();
327 /* We initialise the serial devices very early so we can get debugging. */
328 ns16550.io_base = 0x3f8;
329 ns16550.irq = 4;
330 ns16550_init(0, &ns16550);
331 ns16550.io_base = 0x2f8;
332 ns16550.irq = 3;
333 ns16550_init(1, &ns16550);
334 serial_init_preirq();
336 init_console();
338 printk("Command line: %s\n", cmdline);
340 /* Check that we have at least one Multiboot module. */
341 if ( !(mbi->flags & MBI_MODULES) || (mbi->mods_count == 0) )
342 {
343 printk("FATAL ERROR: dom0 kernel not specified."
344 " Check bootloader configuration.\n");
345 EARLY_FAIL();
346 }
348 if ( ((unsigned long)cpu0_stack & (STACK_SIZE-1)) != 0 )
349 {
350 printk("FATAL ERROR: Misaligned CPU0 stack.\n");
351 EARLY_FAIL();
352 }
354 /*
355 * Since there are some stubs getting built on the stacks which use
356 * direct calls/jumps, the heap must be confined to the lower 2G so
357 * that those branches can reach their targets.
358 */
359 if ( opt_xenheap_megabytes > 2048 )
360 opt_xenheap_megabytes = 2048;
361 xenheap_phys_end = opt_xenheap_megabytes << 20;
363 if ( mbi->flags & MBI_MEMMAP )
364 {
365 while ( bytes < mbi->mmap_length )
366 {
367 memory_map_t *map = __va(mbi->mmap_addr + bytes);
369 /*
370 * This is a gross workaround for a BIOS bug. Some bootloaders do
371 * not write e820 map entries into pre-zeroed memory. This is
372 * okay if the BIOS fills in all fields of the map entry, but
373 * some broken BIOSes do not bother to write the high word of
374 * the length field if the length is smaller than 4GB. We
375 * detect and fix this by flagging sections below 4GB that
376 * appear to be larger than 4GB in size.
377 */
378 if ( (map->base_addr_high == 0) && (map->length_high != 0) )
379 {
380 e820_warn = 1;
381 map->length_high = 0;
382 }
384 e820_raw[e820_raw_nr].addr =
385 ((u64)map->base_addr_high << 32) | (u64)map->base_addr_low;
386 e820_raw[e820_raw_nr].size =
387 ((u64)map->length_high << 32) | (u64)map->length_low;
388 e820_raw[e820_raw_nr].type =
389 (map->type > E820_NVS) ? E820_RESERVED : map->type;
390 e820_raw_nr++;
392 bytes += map->size + 4;
393 }
394 }
395 else if ( mbi->flags & MBI_MEMLIMITS )
396 {
397 e820_raw[0].addr = 0;
398 e820_raw[0].size = mbi->mem_lower << 10;
399 e820_raw[0].type = E820_RAM;
400 e820_raw[1].addr = 0x100000;
401 e820_raw[1].size = mbi->mem_upper << 10;
402 e820_raw[1].type = E820_RAM;
403 e820_raw_nr = 2;
404 }
405 else
406 {
407 printk("FATAL ERROR: Bootloader provided no memory information.\n");
408 for ( ; ; ) ;
409 }
411 if ( e820_warn )
412 printk("WARNING: Buggy e820 map detected and fixed "
413 "(truncated length fields).\n");
415 /* Ensure that all E820 RAM regions are page-aligned and -sized. */
416 for ( i = 0; i < e820_raw_nr; i++ )
417 {
418 uint64_t s, e;
419 if ( e820_raw[i].type != E820_RAM )
420 continue;
421 s = PFN_UP(e820_raw[i].addr);
422 e = PFN_DOWN(e820_raw[i].addr + e820_raw[i].size);
423 e820_raw[i].size = 0; /* discarded later */
424 if ( s < e )
425 {
426 e820_raw[i].addr = s << PAGE_SHIFT;
427 e820_raw[i].size = (e - s) << PAGE_SHIFT;
428 }
429 }
431 /* Sanitise the raw E820 map to produce a final clean version. */
432 max_page = init_e820(e820_raw, &e820_raw_nr);
434 modules_length = mod[mbi->mods_count-1].mod_end - mod[0].mod_start;
436 /* Find a large enough RAM extent to stash the DOM0 modules. */
437 for ( i = 0; ; i++ )
438 {
439 if ( i == e820.nr_map )
440 {
441 printk("Not enough memory to stash the DOM0 kernel image.\n");
442 for ( ; ; ) ;
443 }
445 if ( (e820.map[i].type == E820_RAM) &&
446 (e820.map[i].size >= modules_length) &&
447 ((e820.map[i].addr + e820.map[i].size) >=
448 (xenheap_phys_end + modules_length)) )
449 break;
450 }
452 /* Stash as near as possible to the beginning of the RAM extent. */
453 initial_images_start = e820.map[i].addr;
454 if ( initial_images_start < xenheap_phys_end )
455 initial_images_start = xenheap_phys_end;
456 initial_images_end = initial_images_start + modules_length;
458 move_memory(initial_images_start,
459 mod[0].mod_start, mod[mbi->mods_count-1].mod_end);
461 /* Initialise boot-time allocator with all RAM situated after modules. */
462 xenheap_phys_start = init_boot_allocator(__pa(&_end));
463 nr_pages = 0;
464 for ( i = 0; i < e820.nr_map; i++ )
465 {
466 if ( e820.map[i].type != E820_RAM )
467 continue;
469 nr_pages += e820.map[i].size >> PAGE_SHIFT;
471 /* Initialise boot heap, skipping Xen heap and dom0 modules. */
472 s = e820.map[i].addr;
473 e = s + e820.map[i].size;
474 if ( s < xenheap_phys_end )
475 s = xenheap_phys_end;
476 if ( (s < initial_images_end) && (e > initial_images_start) )
477 s = initial_images_end;
478 init_boot_pages(s, e);
480 #if defined (CONFIG_X86_64)
481 /*
482 * x86/64 maps all registered RAM. Points to note:
483 * 1. The initial pagetable already maps low 1GB, so skip that.
484 * 2. We must map *only* RAM areas, taking care to avoid I/O holes.
485 * Failure to do this can cause coherency problems and deadlocks
486 * due to cache-attribute mismatches (e.g., AMD/AGP Linux bug).
487 */
488 {
489 /* Calculate page-frame range, discarding partial frames. */
490 unsigned long start, end;
491 unsigned long init_mapped = 1UL << (30 - PAGE_SHIFT); /* 1GB */
492 start = PFN_UP(e820.map[i].addr);
493 end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
494 /* Clip the range to exclude what the bootstrapper initialised. */
495 if ( start < init_mapped )
496 start = init_mapped;
497 if ( end <= start )
498 continue;
499 /* Request the mapping. */
500 map_pages_to_xen(
501 PAGE_OFFSET + (start << PAGE_SHIFT),
502 start, end-start, PAGE_HYPERVISOR);
503 }
504 #endif
505 }
507 if ( kexec_crash_area.size > 0 && kexec_crash_area.start > 0)
508 {
509 unsigned long kdump_start, kdump_size, k;
511 /* Mark images pages as free for now. */
513 init_boot_pages(initial_images_start, initial_images_end);
515 kdump_start = kexec_crash_area.start;
516 kdump_size = kexec_crash_area.size;
518 printk("Kdump: %luMB (%lukB) at 0x%lx\n",
519 kdump_size >> 20,
520 kdump_size >> 10,
521 kdump_start);
523 if ( (kdump_start & ~PAGE_MASK) || (kdump_size & ~PAGE_MASK) )
524 panic("Kdump parameters not page aligned\n");
526 kdump_start >>= PAGE_SHIFT;
527 kdump_size >>= PAGE_SHIFT;
529 /* Allocate pages for Kdump memory area. */
531 k = alloc_boot_pages_at(kdump_size, kdump_start);
532 if ( k != kdump_start )
533 panic("Unable to reserve Kdump memory\n");
535 /* Allocate pages for relocated initial images. */
537 k = ((initial_images_end - initial_images_start) & ~PAGE_MASK) ? 1 : 0;
538 k += (initial_images_end - initial_images_start) >> PAGE_SHIFT;
540 k = alloc_boot_pages(k, 1);
541 if ( k == 0 )
542 panic("Unable to allocate initial images memory\n");
544 move_memory(k << PAGE_SHIFT, initial_images_start, initial_images_end);
546 initial_images_end -= initial_images_start;
547 initial_images_start = k << PAGE_SHIFT;
548 initial_images_end += initial_images_start;
549 }
551 memguard_init();
552 percpu_guard_areas();
554 printk("System RAM: %luMB (%lukB)\n",
555 nr_pages >> (20 - PAGE_SHIFT),
556 nr_pages << (PAGE_SHIFT - 10));
557 total_pages = nr_pages;
559 /* Sanity check for unwanted bloat of certain hypercall structures. */
560 BUILD_BUG_ON(sizeof(((struct xen_platform_op *)0)->u) !=
561 sizeof(((struct xen_platform_op *)0)->u.pad));
562 BUILD_BUG_ON(sizeof(((struct xen_domctl *)0)->u) !=
563 sizeof(((struct xen_domctl *)0)->u.pad));
564 BUILD_BUG_ON(sizeof(((struct xen_sysctl *)0)->u) !=
565 sizeof(((struct xen_sysctl *)0)->u.pad));
567 BUILD_BUG_ON(sizeof(start_info_t) > PAGE_SIZE);
568 BUILD_BUG_ON(sizeof(shared_info_t) > PAGE_SIZE);
569 BUILD_BUG_ON(sizeof(struct vcpu_info) != 64);
571 #ifdef CONFIG_COMPAT
572 BUILD_BUG_ON(sizeof(((struct compat_platform_op *)0)->u) !=
573 sizeof(((struct compat_platform_op *)0)->u.pad));
574 BUILD_BUG_ON(sizeof(start_info_compat_t) > PAGE_SIZE);
575 BUILD_BUG_ON(sizeof(struct compat_vcpu_info) != 64);
576 #endif
578 /* Check definitions in public headers match internal defs. */
579 BUILD_BUG_ON(__HYPERVISOR_VIRT_START != HYPERVISOR_VIRT_START);
580 #ifdef HYPERVISOR_VIRT_END
581 BUILD_BUG_ON(__HYPERVISOR_VIRT_END != HYPERVISOR_VIRT_END);
582 #endif
583 BUILD_BUG_ON(MACH2PHYS_VIRT_START != RO_MPT_VIRT_START);
584 BUILD_BUG_ON(MACH2PHYS_VIRT_END != RO_MPT_VIRT_END);
586 init_frametable();
588 acpi_boot_table_init();
590 acpi_numa_init();
592 numa_initmem_init(0, max_page);
594 end_boot_allocator();
596 /* Initialise the Xen heap, skipping RAM holes. */
597 nr_pages = 0;
598 for ( i = 0; i < e820.nr_map; i++ )
599 {
600 if ( e820.map[i].type != E820_RAM )
601 continue;
603 s = e820.map[i].addr;
604 e = s + e820.map[i].size;
605 if ( s < xenheap_phys_start )
606 s = xenheap_phys_start;
607 if ( e > xenheap_phys_end )
608 e = xenheap_phys_end;
610 if ( s < e )
611 {
612 nr_pages += (e - s) >> PAGE_SHIFT;
613 init_xenheap_pages(s, e);
614 }
615 }
617 printk("Xen heap: %luMB (%lukB)\n",
618 nr_pages >> (20 - PAGE_SHIFT),
619 nr_pages << (PAGE_SHIFT - 10));
621 early_boot = 0;
623 early_cpu_init();
625 paging_init();
627 /* Unmap the first page of CPU0's stack. */
628 memguard_guard_stack(cpu0_stack);
630 open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
632 if ( opt_watchdog )
633 nmi_watchdog = NMI_LOCAL_APIC;
635 sort_exception_tables();
637 find_smp_config();
639 smp_alloc_memory();
641 dmi_scan_machine();
643 generic_apic_probe();
645 acpi_boot_init();
647 init_cpu_to_node();
649 if ( smp_found_config )
650 get_smp_config();
652 init_apic_mappings();
654 init_IRQ();
656 percpu_init_areas();
658 init_idle_domain();
660 trap_init();
662 rcu_init();
664 timer_init();
666 early_time_init();
668 arch_init_memory();
670 identify_cpu(&boot_cpu_data);
671 if ( cpu_has_fxsr )
672 set_in_cr4(X86_CR4_OSFXSR);
673 if ( cpu_has_xmm )
674 set_in_cr4(X86_CR4_OSXMMEXCPT);
676 if ( opt_nosmp )
677 max_cpus = 0;
679 smp_prepare_cpus(max_cpus);
681 /*
682 * Initialise higher-level timer functions. We do this fairly late
683 * (post-SMP) because the time bases and scale factors need to be updated
684 * regularly, and SMP initialisation can cause a long delay with
685 * interrupts not yet enabled.
686 */
687 init_xen_time();
689 initialize_keytable();
691 serial_init_postirq();
693 BUG_ON(!local_irq_is_enabled());
695 for_each_present_cpu ( i )
696 {
697 if ( num_online_cpus() >= max_cpus )
698 break;
699 if ( !cpu_online(i) )
700 {
701 rcu_online_cpu(i);
702 __cpu_up(i);
703 }
705 /* Set up cpu_to_node[]. */
706 srat_detect_node(i);
707 /* Set up node_to_cpumask based on cpu_to_node[]. */
708 numa_add_cpu(i);
709 }
711 printk("Brought up %ld CPUs\n", (long)num_online_cpus());
712 smp_cpus_done(max_cpus);
714 percpu_free_unused_areas();
716 initialise_gdb(); /* could be moved earlier */
718 do_initcalls();
720 schedulers_start();
722 if ( opt_watchdog )
723 watchdog_enable();
725 /* Extract policy from multiboot. */
726 extract_acm_policy(mbi, &initrdidx, &_policy_start, &_policy_len);
728 /* initialize access control security module */
729 acm_init(_policy_start, _policy_len);
731 /* Create initial domain 0. */
732 dom0 = domain_create(0, 0);
733 if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) )
734 panic("Error creating domain 0\n");
736 dom0->is_privileged = 1;
738 /* Post-create hook sets security label. */
739 acm_post_domain0_create(dom0->domain_id);
741 /* Grab the DOM0 command line. */
742 cmdline = (char *)(mod[0].string ? __va(mod[0].string) : NULL);
743 if ( cmdline != NULL )
744 {
745 static char dom0_cmdline[MAX_GUEST_CMDLINE];
747 /* Skip past the image name and copy to a local buffer. */
748 while ( *cmdline == ' ' ) cmdline++;
749 if ( (cmdline = strchr(cmdline, ' ')) != NULL )
750 {
751 while ( *cmdline == ' ' ) cmdline++;
752 strcpy(dom0_cmdline, cmdline);
753 }
755 cmdline = dom0_cmdline;
757 /* Append any extra parameters. */
758 if ( skip_ioapic_setup && !strstr(cmdline, "noapic") )
759 strcat(cmdline, " noapic");
760 if ( acpi_skip_timer_override &&
761 !strstr(cmdline, "acpi_skip_timer_override") )
762 strcat(cmdline, " acpi_skip_timer_override");
763 if ( (strlen(acpi_param) != 0) && !strstr(cmdline, "acpi=") )
764 {
765 strcat(cmdline, " acpi=");
766 strcat(cmdline, acpi_param);
767 }
768 }
770 if ( (initrdidx > 0) && (initrdidx < mbi->mods_count) )
771 {
772 _initrd_start = initial_images_start +
773 (mod[initrdidx].mod_start - mod[0].mod_start);
774 _initrd_len = mod[initrdidx].mod_end - mod[initrdidx].mod_start;
775 }
777 /*
778 * We're going to setup domain0 using the module(s) that we stashed safely
779 * above our heap. The second module, if present, is an initrd ramdisk.
780 */
781 if ( construct_dom0(dom0,
782 initial_images_start,
783 mod[0].mod_end-mod[0].mod_start,
784 _initrd_start,
785 _initrd_len,
786 cmdline) != 0)
787 panic("Could not set up DOM0 guest OS\n");
789 /* Scrub RAM that is still free and so may go to an unprivileged domain. */
790 scrub_heap_pages();
792 init_trace_bufs();
794 console_endboot();
796 /* Hide UART from DOM0 if we're using it */
797 serial_endboot();
799 domain_unpause_by_systemcontroller(dom0);
801 startup_cpu_idle_loop();
802 }
804 void arch_get_xen_caps(xen_capabilities_info_t info)
805 {
806 char *p = info;
807 int major = xen_major_version();
808 int minor = xen_minor_version();
810 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
812 p += sprintf(p, "xen-%d.%d-x86_32 ", major, minor);
813 if ( hvm_enabled )
814 p += sprintf(p, "hvm-%d.%d-x86_32 ", major, minor);
816 #elif defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
818 p += sprintf(p, "xen-%d.%d-x86_32p ", major, minor);
819 if ( hvm_enabled )
820 {
821 p += sprintf(p, "hvm-%d.%d-x86_32 ", major, minor);
822 p += sprintf(p, "hvm-%d.%d-x86_32p ", major, minor);
823 }
825 #elif defined(CONFIG_X86_64)
827 p += sprintf(p, "xen-%d.%d-x86_64 ", major, minor);
828 #ifdef CONFIG_COMPAT
829 p += sprintf(p, "xen-%d.%d-x86_32p ", major, minor);
830 #endif
831 if ( hvm_enabled )
832 {
833 p += sprintf(p, "hvm-%d.%d-x86_32 ", major, minor);
834 p += sprintf(p, "hvm-%d.%d-x86_32p ", major, minor);
835 p += sprintf(p, "hvm-%d.%d-x86_64 ", major, minor);
836 }
838 #else
840 p++;
842 #endif
844 *(p-1) = 0;
846 BUG_ON((p - info) > sizeof(xen_capabilities_info_t));
847 }
849 /*
850 * Local variables:
851 * mode: C
852 * c-set-style: "BSD"
853 * c-basic-offset: 4
854 * tab-width: 4
855 * indent-tabs-mode: nil
856 * End:
857 */