debuggers.hg

view xen/arch/x86/setup.c @ 20852:4a54c794bfd4

x86: Fix and clarify 20803:50bd4235f486

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jan 14 11:46:53 2010 +0000 (2010-01-14)
parents 50bd4235f486
children 88d2273c3942
line source
1 #include <xen/config.h>
2 #include <xen/init.h>
3 #include <xen/lib.h>
4 #include <xen/sched.h>
5 #include <xen/domain.h>
6 #include <xen/serial.h>
7 #include <xen/softirq.h>
8 #include <xen/acpi.h>
9 #include <xen/console.h>
10 #include <xen/serial.h>
11 #include <xen/trace.h>
12 #include <xen/multiboot.h>
13 #include <xen/domain_page.h>
14 #include <xen/version.h>
15 #include <xen/gdbstub.h>
16 #include <xen/percpu.h>
17 #include <xen/hypercall.h>
18 #include <xen/keyhandler.h>
19 #include <xen/numa.h>
20 #include <xen/rcupdate.h>
21 #include <xen/vga.h>
22 #include <xen/dmi.h>
23 #include <xen/nodemask.h>
24 #include <public/version.h>
25 #ifdef CONFIG_COMPAT
26 #include <compat/platform.h>
27 #include <compat/xen.h>
28 #endif
29 #include <asm/bitops.h>
30 #include <asm/smp.h>
31 #include <asm/processor.h>
32 #include <asm/mpspec.h>
33 #include <asm/apic.h>
34 #include <asm/desc.h>
35 #include <asm/paging.h>
36 #include <asm/e820.h>
37 #include <xsm/acm/acm_hooks.h>
38 #include <xen/kexec.h>
39 #include <asm/edd.h>
40 #include <xsm/xsm.h>
41 #include <asm/tboot.h>
42 #include <asm/bzimage.h> /* for bzimage_headroom */
43 #include <asm/mach-generic/mach_apic.h> /* for generic_apic_probe */
44 #include <asm/setup.h>
46 #if defined(CONFIG_X86_64)
47 #define BOOTSTRAP_DIRECTMAP_END (1UL << 32) /* 4GB */
48 #define maddr_to_bootstrap_virt(m) maddr_to_virt(m)
49 #else
50 #define BOOTSTRAP_DIRECTMAP_END (1UL << 30) /* 1GB */
51 #define maddr_to_bootstrap_virt(m) ((void *)(long)(m))
52 #endif
54 extern u16 boot_edid_caps;
55 extern u8 boot_edid_info[128];
56 extern struct boot_video_info boot_vid_info;
58 /* opt_nosmp: If true, secondary processors are ignored. */
59 static int __initdata opt_nosmp = 0;
60 boolean_param("nosmp", opt_nosmp);
62 /* maxcpus: maximum number of CPUs to activate. */
63 static unsigned int __initdata max_cpus = NR_CPUS;
64 integer_param("maxcpus", max_cpus);
66 /* opt_watchdog: If true, run a watchdog NMI on each processor. */
67 static int __initdata opt_watchdog = 0;
68 boolean_param("watchdog", opt_watchdog);
70 /* opt_tsc_unstable: Override all tests; assume TSC is unreliable. */
71 static int opt_tsc_unstable;
72 boolean_param("tsc_unstable", opt_tsc_unstable);
74 /* **** Linux config option: propagated to domain0. */
75 /* "acpi=off": Sisables both ACPI table parsing and interpreter. */
76 /* "acpi=force": Override the disable blacklist. */
77 /* "acpi=strict": Disables out-of-spec workarounds. */
78 /* "acpi=ht": Limit ACPI just to boot-time to enable HT. */
79 /* "acpi=noirq": Disables ACPI interrupt routing. */
80 static void parse_acpi_param(char *s);
81 custom_param("acpi", parse_acpi_param);
83 /* **** Linux config option: propagated to domain0. */
84 /* acpi_skip_timer_override: Skip IRQ0 overrides. */
85 boolean_param("acpi_skip_timer_override", acpi_skip_timer_override);
87 /* **** Linux config option: propagated to domain0. */
88 /* noapic: Disable IOAPIC setup. */
89 boolean_param("noapic", skip_ioapic_setup);
91 /* **** Linux config option: propagated to domain0. */
92 /* xen_cpuidle: xen control cstate. */
93 /*static*/ int xen_cpuidle = -1;
94 boolean_param("cpuidle", xen_cpuidle);
96 int early_boot = 1;
98 cpumask_t __read_mostly cpu_present_map;
100 unsigned long __read_mostly xen_phys_start;
102 #ifdef CONFIG_X86_32
103 /* Limits of Xen heap, used to initialise the allocator. */
104 unsigned long __initdata xenheap_initial_phys_start;
105 unsigned long __read_mostly xenheap_phys_end;
106 #endif
108 DEFINE_PER_CPU_READ_MOSTLY(struct desc_struct *, gdt_table) = boot_cpu_gdt_table;
109 #ifdef CONFIG_COMPAT
110 DEFINE_PER_CPU_READ_MOSTLY(struct desc_struct *, compat_gdt_table)
111 = boot_cpu_compat_gdt_table;
112 #endif
114 DEFINE_PER_CPU(struct tss_struct, init_tss);
116 char __attribute__ ((__section__(".bss.stack_aligned"))) cpu0_stack[STACK_SIZE];
118 struct cpuinfo_x86 __read_mostly boot_cpu_data = { 0, 0, 0, 0, -1 };
120 unsigned long __read_mostly mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
122 int __read_mostly acpi_disabled;
124 int __read_mostly acpi_force;
125 static char __initdata acpi_param[10] = "";
126 static void __init parse_acpi_param(char *s)
127 {
128 /* Save the parameter so it can be propagated to domain0. */
129 safe_strcpy(acpi_param, s);
131 /* Interpret the parameter for use within Xen. */
132 if ( !strcmp(s, "off") )
133 {
134 disable_acpi();
135 }
136 else if ( !strcmp(s, "force") )
137 {
138 acpi_force = 1;
139 acpi_ht = 1;
140 acpi_disabled = 0;
141 }
142 else if ( !strcmp(s, "strict") )
143 {
144 acpi_strict = 1;
145 }
146 else if ( !strcmp(s, "ht") )
147 {
148 if ( !acpi_force )
149 disable_acpi();
150 acpi_ht = 1;
151 }
152 else if ( !strcmp(s, "noirq") )
153 {
154 acpi_noirq_set();
155 }
156 }
158 static void __init do_initcalls(void)
159 {
160 initcall_t *call;
161 for ( call = &__initcall_start; call < &__initcall_end; call++ )
162 (*call)();
163 }
165 #define EARLY_FAIL(f, a...) do { \
166 printk( f , ## a ); \
167 for ( ; ; ) halt(); \
168 } while (0)
170 static unsigned long __initdata initial_images_base;
171 static unsigned long __initdata initial_images_start;
172 static unsigned long __initdata initial_images_end;
174 unsigned long __init initial_images_nrpages(void)
175 {
176 ASSERT(!(initial_images_base & ~PAGE_MASK));
177 ASSERT(!(initial_images_end & ~PAGE_MASK));
178 return ((initial_images_end >> PAGE_SHIFT) -
179 (initial_images_base >> PAGE_SHIFT));
180 }
182 void __init discard_initial_images(void)
183 {
184 init_domheap_pages(initial_images_base, initial_images_end);
185 }
187 static void free_xen_data(char *s, char *e)
188 {
189 #ifndef MEMORY_GUARD
190 init_xenheap_pages(__pa(s), __pa(e));
191 #endif
192 memguard_guard_range(s, e-s);
193 #if defined(CONFIG_X86_64)
194 /* Also zap the mapping in the 1:1 area. */
195 memguard_guard_range(__va(__pa(s)), e-s);
196 #endif
197 }
199 extern char __init_begin[], __init_end[], __bss_start[];
200 extern char __per_cpu_start[], __per_cpu_data_end[];
202 static void __init percpu_init_areas(void)
203 {
204 unsigned int i, data_size = __per_cpu_data_end - __per_cpu_start;
206 BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
207 BUG_ON((unsigned long)__per_cpu_data_end & ~PAGE_MASK);
208 BUG_ON(data_size > PERCPU_SIZE);
210 /* Initialise per-cpu data area for all possible secondary CPUs. */
211 for ( i = 1; i < NR_CPUS; i++ )
212 memcpy(__per_cpu_start + (i << PERCPU_SHIFT),
213 __per_cpu_start,
214 data_size);
215 }
217 static void __init percpu_free_unused_areas(void)
218 {
219 unsigned int i, data_size = __per_cpu_data_end - __per_cpu_start;
220 unsigned int first_unused;
222 /* Find first 'impossible' secondary CPU. */
223 for ( i = 1; i < NR_CPUS; i++ )
224 if ( !cpu_possible(i) )
225 break;
226 first_unused = i;
228 /* Check that there are no holes in cpu_possible_map. */
229 for ( ; i < NR_CPUS; i++ )
230 BUG_ON(cpu_possible(i));
232 /* Free all unused per-cpu data areas. */
233 free_xen_data(&__per_cpu_start[first_unused << PERCPU_SHIFT], __bss_start);
235 if ( data_size != PERCPU_SIZE )
236 for ( i = 0; i < first_unused; i++ )
237 free_xen_data(&__per_cpu_start[(i << PERCPU_SHIFT) + data_size],
238 &__per_cpu_start[(i+1) << PERCPU_SHIFT]);
239 }
241 static void __init init_idle_domain(void)
242 {
243 struct domain *idle_domain;
245 /* Domain creation requires that scheduler structures are initialised. */
246 scheduler_init();
248 idle_domain = domain_create(IDLE_DOMAIN_ID, 0, 0);
249 if ( idle_domain == NULL )
250 BUG();
251 idle_domain->vcpu = idle_vcpu;
252 idle_domain->max_vcpus = NR_CPUS;
253 if ( alloc_vcpu(idle_domain, 0, 0) == NULL )
254 BUG();
256 set_current(idle_vcpu[0]);
257 this_cpu(curr_vcpu) = current;
259 setup_idle_pagetable();
260 }
262 void __devinit srat_detect_node(int cpu)
263 {
264 unsigned node;
265 u32 apicid = x86_cpu_to_apicid[cpu];
267 node = apicid_to_node[apicid];
268 if ( node == NUMA_NO_NODE || !node_online(node) )
269 node = 0;
270 numa_set_node(cpu, node);
272 if ( acpi_numa > 0 )
273 printk(KERN_INFO "CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
274 }
276 /*
277 * Ensure a given physical memory range is present in the bootstrap mappings.
278 * Use superpage mappings to ensure that pagetable memory needn't be allocated.
279 */
280 static void __init bootstrap_map(unsigned long start, unsigned long end)
281 {
282 unsigned long mask = (1UL << L2_PAGETABLE_SHIFT) - 1;
283 start = max_t(unsigned long, start & ~mask, 16UL << 20);
284 end = (end + mask) & ~mask;
285 if ( start >= end )
286 return;
287 if ( end > BOOTSTRAP_DIRECTMAP_END )
288 panic("Cannot access memory beyond end of "
289 "bootstrap direct-map area\n");
290 map_pages_to_xen(
291 (unsigned long)maddr_to_bootstrap_virt(start),
292 start >> PAGE_SHIFT, (end-start) >> PAGE_SHIFT, PAGE_HYPERVISOR);
293 }
295 static void __init move_memory(
296 unsigned long dst, unsigned long src_start, unsigned long src_end)
297 {
298 bootstrap_map(src_start, src_end);
299 bootstrap_map(dst, dst + src_end - src_start);
300 memmove(maddr_to_bootstrap_virt(dst),
301 maddr_to_bootstrap_virt(src_start),
302 src_end - src_start);
303 }
305 static void __init setup_max_pdx(void)
306 {
307 #ifdef __x86_64__
308 max_pdx = pfn_to_pdx(max_page - 1) + 1;
310 if ( max_pdx > (DIRECTMAP_SIZE >> PAGE_SHIFT) )
311 max_pdx = DIRECTMAP_SIZE >> PAGE_SHIFT;
313 if ( max_pdx > FRAMETABLE_SIZE / sizeof(*frame_table) )
314 max_pdx = FRAMETABLE_SIZE / sizeof(*frame_table);
316 max_page = pdx_to_pfn(max_pdx - 1) + 1;
317 #endif
318 }
320 void set_pdx_range(unsigned long smfn, unsigned long emfn)
321 {
322 unsigned long idx, eidx;
324 idx = pfn_to_pdx(smfn) / PDX_GROUP_COUNT;
325 eidx = (pfn_to_pdx(emfn - 1) + PDX_GROUP_COUNT) / PDX_GROUP_COUNT;
326 for ( ; idx < eidx; ++idx )
327 __set_bit(idx, pdx_group_valid);
328 }
330 /* A temporary copy of the e820 map that we can mess with during bootstrap. */
331 static struct e820map __initdata boot_e820;
333 struct boot_video_info {
334 u8 orig_x; /* 0x00 */
335 u8 orig_y; /* 0x01 */
336 u8 orig_video_mode; /* 0x02 */
337 u8 orig_video_cols; /* 0x03 */
338 u8 orig_video_lines; /* 0x04 */
339 u8 orig_video_isVGA; /* 0x05 */
340 u16 orig_video_points; /* 0x06 */
342 /* VESA graphic mode -- linear frame buffer */
343 u32 capabilities; /* 0x08 */
344 u16 lfb_linelength; /* 0x0c */
345 u16 lfb_width; /* 0x0e */
346 u16 lfb_height; /* 0x10 */
347 u16 lfb_depth; /* 0x12 */
348 u32 lfb_base; /* 0x14 */
349 u32 lfb_size; /* 0x18 */
350 u8 red_size; /* 0x1c */
351 u8 red_pos; /* 0x1d */
352 u8 green_size; /* 0x1e */
353 u8 green_pos; /* 0x1f */
354 u8 blue_size; /* 0x20 */
355 u8 blue_pos; /* 0x21 */
356 u8 rsvd_size; /* 0x22 */
357 u8 rsvd_pos; /* 0x23 */
358 u16 vesapm_seg; /* 0x24 */
359 u16 vesapm_off; /* 0x26 */
360 u16 vesa_attrib; /* 0x28 */
361 };
363 static void __init parse_video_info(void)
364 {
365 struct boot_video_info *bvi = &bootsym(boot_vid_info);
367 if ( (bvi->orig_video_isVGA == 1) && (bvi->orig_video_mode == 3) )
368 {
369 vga_console_info.video_type = XEN_VGATYPE_TEXT_MODE_3;
370 vga_console_info.u.text_mode_3.font_height = bvi->orig_video_points;
371 vga_console_info.u.text_mode_3.cursor_x = bvi->orig_x;
372 vga_console_info.u.text_mode_3.cursor_y = bvi->orig_y;
373 vga_console_info.u.text_mode_3.rows = bvi->orig_video_lines;
374 vga_console_info.u.text_mode_3.columns = bvi->orig_video_cols;
375 }
376 else if ( bvi->orig_video_isVGA == 0x23 )
377 {
378 vga_console_info.video_type = XEN_VGATYPE_VESA_LFB;
379 vga_console_info.u.vesa_lfb.width = bvi->lfb_width;
380 vga_console_info.u.vesa_lfb.height = bvi->lfb_height;
381 vga_console_info.u.vesa_lfb.bytes_per_line = bvi->lfb_linelength;
382 vga_console_info.u.vesa_lfb.bits_per_pixel = bvi->lfb_depth;
383 vga_console_info.u.vesa_lfb.lfb_base = bvi->lfb_base;
384 vga_console_info.u.vesa_lfb.lfb_size = bvi->lfb_size;
385 vga_console_info.u.vesa_lfb.red_pos = bvi->red_pos;
386 vga_console_info.u.vesa_lfb.red_size = bvi->red_size;
387 vga_console_info.u.vesa_lfb.green_pos = bvi->green_pos;
388 vga_console_info.u.vesa_lfb.green_size = bvi->green_size;
389 vga_console_info.u.vesa_lfb.blue_pos = bvi->blue_pos;
390 vga_console_info.u.vesa_lfb.blue_size = bvi->blue_size;
391 vga_console_info.u.vesa_lfb.rsvd_pos = bvi->rsvd_pos;
392 vga_console_info.u.vesa_lfb.rsvd_size = bvi->rsvd_size;
393 vga_console_info.u.vesa_lfb.gbl_caps = bvi->capabilities;
394 vga_console_info.u.vesa_lfb.mode_attrs = bvi->vesa_attrib;
395 }
396 }
398 static void __init kexec_reserve_area(struct e820map *e820)
399 {
400 unsigned long kdump_start = kexec_crash_area.start;
401 unsigned long kdump_size = kexec_crash_area.size;
402 static int is_reserved = 0;
404 kdump_size = (kdump_size + PAGE_SIZE - 1) & PAGE_MASK;
406 if ( (kdump_start == 0) || (kdump_size == 0) || is_reserved )
407 return;
409 is_reserved = 1;
411 if ( !reserve_e820_ram(e820, kdump_start, kdump_start + kdump_size) )
412 {
413 printk("Kdump: DISABLED (failed to reserve %luMB (%lukB) at 0x%lx)"
414 "\n", kdump_size >> 20, kdump_size >> 10, kdump_start);
415 kexec_crash_area.start = kexec_crash_area.size = 0;
416 }
417 else
418 {
419 printk("Kdump: %luMB (%lukB) at 0x%lx\n",
420 kdump_size >> 20, kdump_size >> 10, kdump_start);
421 }
422 }
424 void init_done(void)
425 {
426 /* Free (or page-protect) the init areas. */
427 memset(__init_begin, 0xcc, __init_end - __init_begin); /* int3 poison */
428 free_xen_data(__init_begin, __init_end);
429 printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10);
431 startup_cpu_idle_loop();
432 }
434 static char * __init cmdline_cook(char *p)
435 {
436 p = p ? : "";
437 while ( *p == ' ' )
438 p++;
439 while ( (*p != ' ') && (*p != '\0') )
440 p++;
441 while ( *p == ' ' )
442 p++;
443 return p;
444 }
446 void __init __start_xen(unsigned long mbi_p)
447 {
448 char *memmap_type = NULL;
449 char *cmdline, *kextra;
450 unsigned long _initrd_start = 0, _initrd_len = 0;
451 unsigned int initrdidx = 1;
452 multiboot_info_t *mbi = __va(mbi_p);
453 module_t *mod = (module_t *)__va(mbi->mods_addr);
454 unsigned long nr_pages, modules_length, modules_headroom;
455 int i, j, e820_warn = 0, bytes = 0;
456 bool_t acpi_boot_table_init_done = 0;
457 struct ns16550_defaults ns16550 = {
458 .data_bits = 8,
459 .parity = 'n',
460 .stop_bits = 1
461 };
463 percpu_init_areas();
465 set_intr_gate(TRAP_page_fault, &early_page_fault);
467 /* Parse the command-line options. */
468 cmdline = cmdline_cook((mbi->flags & MBI_CMDLINE) ?
469 __va(mbi->cmdline) : NULL);
470 if ( (kextra = strstr(cmdline, " -- ")) != NULL )
471 {
472 /*
473 * Options after ' -- ' separator belong to dom0.
474 * 1. Orphan dom0's options from Xen's command line.
475 * 2. Skip all but final leading space from dom0's options.
476 */
477 *kextra = '\0';
478 kextra += 3;
479 while ( kextra[1] == ' ' ) kextra++;
480 }
481 cmdline_parse(cmdline);
483 /* If TSC is marked as unstable, clear all enhanced TSC features. */
484 if ( opt_tsc_unstable )
485 {
486 setup_clear_cpu_cap(X86_FEATURE_CONSTANT_TSC);
487 setup_clear_cpu_cap(X86_FEATURE_NONSTOP_TSC);
488 setup_clear_cpu_cap(X86_FEATURE_TSC_RELIABLE);
489 }
491 parse_video_info();
493 set_current((struct vcpu *)0xfffff000); /* debug sanity */
494 idle_vcpu[0] = current;
495 set_processor_id(0); /* needed early, for smp_processor_id() */
496 if ( cpu_has_efer )
497 rdmsrl(MSR_EFER, this_cpu(efer));
498 asm volatile ( "mov %%cr4,%0" : "=r" (this_cpu(cr4)) );
500 smp_prepare_boot_cpu();
502 /* We initialise the serial devices very early so we can get debugging. */
503 ns16550.io_base = 0x3f8;
504 ns16550.irq = 4;
505 ns16550_init(0, &ns16550);
506 ns16550.io_base = 0x2f8;
507 ns16550.irq = 3;
508 ns16550_init(1, &ns16550);
509 console_init_preirq();
511 printk("Command line: %s\n", cmdline);
513 printk("Video information:\n");
515 /* Print VGA display mode information. */
516 switch ( vga_console_info.video_type )
517 {
518 case XEN_VGATYPE_TEXT_MODE_3:
519 printk(" VGA is text mode %dx%d, font 8x%d\n",
520 vga_console_info.u.text_mode_3.columns,
521 vga_console_info.u.text_mode_3.rows,
522 vga_console_info.u.text_mode_3.font_height);
523 break;
524 case XEN_VGATYPE_VESA_LFB:
525 printk(" VGA is graphics mode %dx%d, %d bpp\n",
526 vga_console_info.u.vesa_lfb.width,
527 vga_console_info.u.vesa_lfb.height,
528 vga_console_info.u.vesa_lfb.bits_per_pixel);
529 break;
530 default:
531 printk(" No VGA detected\n");
532 break;
533 }
535 /* Print VBE/DDC EDID information. */
536 if ( bootsym(boot_edid_caps) != 0x1313 )
537 {
538 u16 caps = bootsym(boot_edid_caps);
539 printk(" VBE/DDC methods:%s%s%s; ",
540 (caps & 1) ? " V1" : "",
541 (caps & 2) ? " V2" : "",
542 !(caps & 3) ? " none" : "");
543 printk("EDID transfer time: %d seconds\n", caps >> 8);
544 if ( *(u32 *)bootsym(boot_edid_info) == 0x13131313 )
545 {
546 printk(" EDID info not retrieved because ");
547 if ( !(caps & 3) )
548 printk("no DDC retrieval method detected\n");
549 else if ( (caps >> 8) > 5 )
550 printk("takes longer than 5 seconds\n");
551 else
552 printk("of reasons unknown\n");
553 }
554 }
556 printk("Disc information:\n");
557 printk(" Found %d MBR signatures\n",
558 bootsym(boot_mbr_signature_nr));
559 printk(" Found %d EDD information structures\n",
560 bootsym(boot_edd_info_nr));
562 /* Check that we have at least one Multiboot module. */
563 if ( !(mbi->flags & MBI_MODULES) || (mbi->mods_count == 0) )
564 EARLY_FAIL("dom0 kernel not specified. "
565 "Check bootloader configuration.\n");
567 if ( ((unsigned long)cpu0_stack & (STACK_SIZE-1)) != 0 )
568 EARLY_FAIL("Misaligned CPU0 stack.\n");
570 if ( e820_raw_nr != 0 )
571 {
572 memmap_type = "Xen-e820";
573 }
574 else if ( bootsym(lowmem_kb) )
575 {
576 memmap_type = "Xen-e801";
577 e820_raw[0].addr = 0;
578 e820_raw[0].size = bootsym(lowmem_kb) << 10;
579 e820_raw[0].type = E820_RAM;
580 e820_raw[1].addr = 0x100000;
581 e820_raw[1].size = bootsym(highmem_kb) << 10;
582 e820_raw[1].type = E820_RAM;
583 e820_raw_nr = 2;
584 }
585 else if ( mbi->flags & MBI_MEMMAP )
586 {
587 memmap_type = "Multiboot-e820";
588 while ( (bytes < mbi->mmap_length) && (e820_raw_nr < E820MAX) )
589 {
590 memory_map_t *map = __va(mbi->mmap_addr + bytes);
592 /*
593 * This is a gross workaround for a BIOS bug. Some bootloaders do
594 * not write e820 map entries into pre-zeroed memory. This is
595 * okay if the BIOS fills in all fields of the map entry, but
596 * some broken BIOSes do not bother to write the high word of
597 * the length field if the length is smaller than 4GB. We
598 * detect and fix this by flagging sections below 4GB that
599 * appear to be larger than 4GB in size.
600 */
601 if ( (map->base_addr_high == 0) && (map->length_high != 0) )
602 {
603 if ( !e820_warn )
604 {
605 printk("WARNING: Buggy e820 map detected and fixed "
606 "(truncated length fields).\n");
607 e820_warn = 1;
608 }
609 map->length_high = 0;
610 }
612 e820_raw[e820_raw_nr].addr =
613 ((u64)map->base_addr_high << 32) | (u64)map->base_addr_low;
614 e820_raw[e820_raw_nr].size =
615 ((u64)map->length_high << 32) | (u64)map->length_low;
616 e820_raw[e820_raw_nr].type = map->type;
617 e820_raw_nr++;
619 bytes += map->size + 4;
620 }
621 }
622 else if ( mbi->flags & MBI_MEMLIMITS )
623 {
624 memmap_type = "Multiboot-e801";
625 e820_raw[0].addr = 0;
626 e820_raw[0].size = mbi->mem_lower << 10;
627 e820_raw[0].type = E820_RAM;
628 e820_raw[1].addr = 0x100000;
629 e820_raw[1].size = mbi->mem_upper << 10;
630 e820_raw[1].type = E820_RAM;
631 e820_raw_nr = 2;
632 }
633 else
634 {
635 EARLY_FAIL("Bootloader provided no memory information.\n");
636 }
638 /* Sanitise the raw E820 map to produce a final clean version. */
639 max_page = init_e820(memmap_type, e820_raw, &e820_raw_nr);
641 /* Create a temporary copy of the E820 map. */
642 memcpy(&boot_e820, &e820, sizeof(e820));
644 /* Early kexec reservation (explicit static start address). */
645 kexec_reserve_area(&boot_e820);
647 /*
648 * Iterate backwards over all superpage-aligned RAM regions.
649 *
650 * We require superpage alignment because the boot allocator is not yet
651 * initialised. Hence we can only map superpages in the address range
652 * 0 to BOOTSTRAP_DIRECTMAP_END, as this is guaranteed not to require
653 * dynamic allocation of pagetables.
654 *
655 * As well as mapping superpages in that range, in preparation for
656 * initialising the boot allocator, we also look for a region to which
657 * we can relocate the dom0 kernel and other multiboot modules. Also, on
658 * x86/64, we relocate Xen to higher memory.
659 */
660 modules_length = 0;
661 for ( i = 0; i < mbi->mods_count; i++ )
662 modules_length += mod[i].mod_end - mod[i].mod_start;
664 /* ensure mod[0] is mapped before parsing */
665 bootstrap_map(mod[0].mod_start, mod[0].mod_end);
666 modules_headroom = bzimage_headroom(
667 (char *)(unsigned long)mod[0].mod_start,
668 (unsigned long)(mod[0].mod_end - mod[0].mod_start));
670 for ( i = boot_e820.nr_map-1; i >= 0; i-- )
671 {
672 uint64_t s, e, mask = (1UL << L2_PAGETABLE_SHIFT) - 1;
674 /* Superpage-aligned chunks from 16MB to BOOTSTRAP_DIRECTMAP_END. */
675 s = (boot_e820.map[i].addr + mask) & ~mask;
676 e = (boot_e820.map[i].addr + boot_e820.map[i].size) & ~mask;
677 s = max_t(uint64_t, s, 16 << 20);
678 e = min_t(uint64_t, e, BOOTSTRAP_DIRECTMAP_END);
679 if ( (boot_e820.map[i].type != E820_RAM) || (s >= e) )
680 continue;
682 set_pdx_range(s >> PAGE_SHIFT, e >> PAGE_SHIFT);
684 /* Map the chunk. No memory will need to be allocated to do this. */
685 map_pages_to_xen(
686 (unsigned long)maddr_to_bootstrap_virt(s),
687 s >> PAGE_SHIFT, (e-s) >> PAGE_SHIFT, PAGE_HYPERVISOR);
689 #if defined(CONFIG_X86_64)
690 #define reloc_size ((__pa(&_end) + mask) & ~mask)
691 /* Is the region suitable for relocating Xen? */
692 if ( !xen_phys_start && ((e-s) >= reloc_size) )
693 {
694 extern l2_pgentry_t l2_xenmap[];
695 l4_pgentry_t *pl4e;
696 l3_pgentry_t *pl3e;
697 l2_pgentry_t *pl2e;
698 int i, j, k;
700 /* Select relocation address. */
701 e -= reloc_size;
702 xen_phys_start = e;
703 bootsym(trampoline_xen_phys_start) = e;
705 /*
706 * Perform relocation to new physical address.
707 * Before doing so we must sync static/global data with main memory
708 * with a barrier(). After this we must *not* modify static/global
709 * data until after we have switched to the relocated pagetables!
710 */
711 barrier();
712 move_memory(e, 0, __pa(&_end) - xen_phys_start);
714 /* Poison low 1MB to detect stray pointers to physical 0-1MB. */
715 memset(maddr_to_bootstrap_virt(e), 0x55, 1U<<20);
717 /* Walk initial pagetables, relocating page directory entries. */
718 pl4e = __va(__pa(idle_pg_table));
719 for ( i = 0 ; i < L4_PAGETABLE_ENTRIES; i++, pl4e++ )
720 {
721 if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) )
722 continue;
723 *pl4e = l4e_from_intpte(l4e_get_intpte(*pl4e) +
724 xen_phys_start);
725 pl3e = l4e_to_l3e(*pl4e);
726 for ( j = 0; j < L3_PAGETABLE_ENTRIES; j++, pl3e++ )
727 {
728 /* Not present, 1GB mapping, or already relocated? */
729 if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) ||
730 (l3e_get_flags(*pl3e) & _PAGE_PSE) ||
731 (l3e_get_pfn(*pl3e) > 0x1000) )
732 continue;
733 *pl3e = l3e_from_intpte(l3e_get_intpte(*pl3e) +
734 xen_phys_start);
735 pl2e = l3e_to_l2e(*pl3e);
736 for ( k = 0; k < L2_PAGETABLE_ENTRIES; k++, pl2e++ )
737 {
738 /* Not present, PSE, or already relocated? */
739 if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) ||
740 (l2e_get_flags(*pl2e) & _PAGE_PSE) ||
741 (l2e_get_pfn(*pl2e) > 0x1000) )
742 continue;
743 *pl2e = l2e_from_intpte(l2e_get_intpte(*pl2e) +
744 xen_phys_start);
745 }
746 }
747 }
749 /* The only data mappings to be relocated are in the Xen area. */
750 pl2e = __va(__pa(l2_xenmap));
751 *pl2e++ = l2e_from_pfn(xen_phys_start >> PAGE_SHIFT,
752 PAGE_HYPERVISOR | _PAGE_PSE);
753 for ( i = 1; i < L2_PAGETABLE_ENTRIES; i++, pl2e++ )
754 {
755 if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
756 continue;
757 *pl2e = l2e_from_intpte(l2e_get_intpte(*pl2e) +
758 xen_phys_start);
759 }
761 /* Re-sync the stack and then switch to relocated pagetables. */
762 asm volatile (
763 "rep movsb ; " /* re-sync the stack */
764 "movq %%cr4,%%rsi ; "
765 "andb $0x7f,%%sil ; "
766 "movq %%rsi,%%cr4 ; " /* CR4.PGE == 0 */
767 "movq %0,%%cr3 ; " /* CR3 == new pagetables */
768 "orb $0x80,%%sil ; "
769 "movq %%rsi,%%cr4 " /* CR4.PGE == 1 */
770 : : "r" (__pa(idle_pg_table)), "S" (cpu0_stack),
771 "D" (__va(__pa(cpu0_stack))), "c" (STACK_SIZE) : "memory" );
772 }
773 #endif
775 /* Is the region suitable for relocating the multiboot modules? */
776 if ( !initial_images_start && (s < e) &&
777 ((e-s) >= (modules_length+modules_headroom)) )
778 {
779 initial_images_end = e;
780 initial_images_start = initial_images_end - modules_length;
781 initial_images_base = initial_images_start - modules_headroom;
782 initial_images_base &= PAGE_MASK;
783 for ( j = mbi->mods_count-1; j >= 0; j-- )
784 {
785 e -= mod[j].mod_end - mod[j].mod_start;
786 move_memory(e, mod[j].mod_start, mod[j].mod_end);
787 mod[j].mod_end += e - mod[j].mod_start;
788 mod[j].mod_start = e;
789 }
790 e = initial_images_base;
791 }
793 if ( !kexec_crash_area.start && (s < e) &&
794 ((e-s) >= kexec_crash_area.size) )
795 {
796 e = (e - kexec_crash_area.size) & PAGE_MASK;
797 kexec_crash_area.start = e;
798 }
799 }
801 if ( !initial_images_start )
802 EARLY_FAIL("Not enough memory to relocate the dom0 kernel image.\n");
803 reserve_e820_ram(&boot_e820, initial_images_base, initial_images_end);
805 #if defined(CONFIG_X86_32)
806 xenheap_initial_phys_start = (PFN_UP(__pa(&_end)) + 1) << PAGE_SHIFT;
807 /* Must pass a single mapped page for populating bootmem_region_list. */
808 init_boot_pages(__pa(&_end), xenheap_initial_phys_start);
809 xenheap_phys_end = DIRECTMAP_MBYTES << 20;
810 #else
811 if ( !xen_phys_start )
812 EARLY_FAIL("Not enough memory to relocate Xen.\n");
813 reserve_e820_ram(&boot_e820, __pa(&_start), __pa(&_end));
814 #endif
816 /* Late kexec reservation (dynamic start address). */
817 kexec_reserve_area(&boot_e820);
819 setup_max_pdx();
821 /*
822 * Walk every RAM region and map it in its entirety (on x86/64, at least)
823 * and notify it to the boot allocator.
824 */
825 for ( nr_pages = i = 0; i < boot_e820.nr_map; i++ )
826 {
827 uint64_t s, e, map_s, map_e, mask = PAGE_SIZE - 1;
829 /* Only page alignment required now. */
830 s = (boot_e820.map[i].addr + mask) & ~mask;
831 e = (boot_e820.map[i].addr + boot_e820.map[i].size) & ~mask;
832 #if defined(CONFIG_X86_32)
833 s = max_t(uint64_t, s, xenheap_phys_end);
834 #else
835 s = max_t(uint64_t, s, 1<<20);
836 #endif
837 if ( (boot_e820.map[i].type != E820_RAM) || (s >= e) )
838 continue;
840 #ifdef __x86_64__
841 if ( !acpi_boot_table_init_done &&
842 s >= BOOTSTRAP_DIRECTMAP_END &&
843 !acpi_boot_table_init() )
844 {
845 acpi_boot_table_init_done = 1;
846 srat_parse_regions(s);
847 setup_max_pdx();
848 }
850 if ( pfn_to_pdx((e - 1) >> PAGE_SHIFT) >= max_pdx )
851 {
852 if ( pfn_to_pdx(s >> PAGE_SHIFT) >= max_pdx )
853 {
854 for ( j = i - 1; ; --j )
855 {
856 if ( boot_e820.map[j].type == E820_RAM )
857 break;
858 ASSERT(j);
859 }
860 map_e = boot_e820.map[j].addr + boot_e820.map[j].size;
861 if ( (map_e >> PAGE_SHIFT) < max_page )
862 {
863 max_page = map_e >> PAGE_SHIFT;
864 max_pdx = pfn_to_pdx(max_page - 1) + 1;
865 }
866 printk(XENLOG_WARNING "Ignoring inaccessible memory range"
867 " %013"PRIx64"-%013"PRIx64"\n",
868 s, e);
869 continue;
870 }
871 map_e = e;
872 e = (pdx_to_pfn(max_pdx - 1) + 1ULL) << PAGE_SHIFT;
873 printk(XENLOG_WARNING "Ignoring inaccessible memory range"
874 " %013"PRIx64"-%013"PRIx64"\n",
875 e, map_e);
876 }
877 #endif
879 set_pdx_range(s >> PAGE_SHIFT, e >> PAGE_SHIFT);
881 /* Need to create mappings above 16MB. */
882 map_s = max_t(uint64_t, s, 16<<20);
883 map_e = e;
884 #if defined(CONFIG_X86_32) /* mappings are truncated on x86_32 */
885 map_e = min_t(uint64_t, map_e, BOOTSTRAP_DIRECTMAP_END);
886 #endif
888 /* Pass mapped memory to allocator /before/ creating new mappings. */
889 init_boot_pages(s, min_t(uint64_t, map_s, e));
891 /* Create new mappings /before/ passing memory to the allocator. */
892 if ( map_s < map_e )
893 map_pages_to_xen(
894 (unsigned long)maddr_to_bootstrap_virt(map_s),
895 map_s >> PAGE_SHIFT, (map_e-map_s) >> PAGE_SHIFT,
896 PAGE_HYPERVISOR);
898 /* Pass remainder of this memory chunk to the allocator. */
899 init_boot_pages(map_s, e);
900 nr_pages += (e - s) >> PAGE_SHIFT;
901 }
903 memguard_init();
905 printk("System RAM: %luMB (%lukB)\n",
906 nr_pages >> (20 - PAGE_SHIFT),
907 nr_pages << (PAGE_SHIFT - 10));
908 total_pages = nr_pages;
910 /* Sanity check for unwanted bloat of certain hypercall structures. */
911 BUILD_BUG_ON(sizeof(((struct xen_platform_op *)0)->u) !=
912 sizeof(((struct xen_platform_op *)0)->u.pad));
913 BUILD_BUG_ON(sizeof(((struct xen_domctl *)0)->u) !=
914 sizeof(((struct xen_domctl *)0)->u.pad));
915 BUILD_BUG_ON(sizeof(((struct xen_sysctl *)0)->u) !=
916 sizeof(((struct xen_sysctl *)0)->u.pad));
918 BUILD_BUG_ON(sizeof(start_info_t) > PAGE_SIZE);
919 BUILD_BUG_ON(sizeof(shared_info_t) > PAGE_SIZE);
920 BUILD_BUG_ON(sizeof(struct vcpu_info) != 64);
922 #ifdef CONFIG_COMPAT
923 BUILD_BUG_ON(sizeof(((struct compat_platform_op *)0)->u) !=
924 sizeof(((struct compat_platform_op *)0)->u.pad));
925 BUILD_BUG_ON(sizeof(start_info_compat_t) > PAGE_SIZE);
926 BUILD_BUG_ON(sizeof(struct compat_vcpu_info) != 64);
927 #endif
929 /* Check definitions in public headers match internal defs. */
930 BUILD_BUG_ON(__HYPERVISOR_VIRT_START != HYPERVISOR_VIRT_START);
931 #ifdef HYPERVISOR_VIRT_END
932 BUILD_BUG_ON(__HYPERVISOR_VIRT_END != HYPERVISOR_VIRT_END);
933 #endif
934 BUILD_BUG_ON(MACH2PHYS_VIRT_START != RO_MPT_VIRT_START);
935 BUILD_BUG_ON(MACH2PHYS_VIRT_END != RO_MPT_VIRT_END);
937 init_frametable();
939 if ( !acpi_boot_table_init_done )
940 acpi_boot_table_init();
942 acpi_numa_init();
944 numa_initmem_init(0, max_page);
946 #if defined(CONFIG_X86_32)
947 /* Initialise the Xen heap. */
948 for ( nr_pages = i = 0; i < boot_e820.nr_map; i++ )
949 {
950 uint64_t s = boot_e820.map[i].addr;
951 uint64_t e = s + boot_e820.map[i].size;
952 s = max_t(uint64_t, s, xenheap_initial_phys_start);
953 e = min_t(uint64_t, e, xenheap_phys_end);
954 if ( (boot_e820.map[i].type != E820_RAM) || (s >= e) )
955 continue;
956 init_xenheap_pages(s, e);
957 nr_pages += (e - s) >> PAGE_SHIFT;
958 }
959 printk("Xen heap: %luMB (%lukB)\n",
960 nr_pages >> (20 - PAGE_SHIFT),
961 nr_pages << (PAGE_SHIFT - 10));
962 #endif
964 end_boot_allocator();
965 early_boot = 0;
967 #if defined(CONFIG_X86_64)
968 vesa_init();
969 #endif
971 softirq_init();
973 early_cpu_init();
975 paging_init();
977 tboot_probe();
979 /* Unmap the first page of CPU0's stack. */
980 memguard_guard_stack(cpu0_stack);
982 open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
984 if ( opt_watchdog )
985 nmi_watchdog = NMI_LOCAL_APIC;
987 sort_exception_tables();
989 find_smp_config();
991 dmi_scan_machine();
993 generic_apic_probe();
995 acpi_boot_init();
997 if ( x2apic_is_available() )
998 enable_x2apic();
1000 init_cpu_to_node();
1002 if ( smp_found_config )
1003 get_smp_config();
1005 #ifdef CONFIG_X86_64
1006 /* Low mappings were only needed for some BIOS table parsing. */
1007 zap_low_mappings();
1008 #endif
1010 init_apic_mappings();
1012 percpu_free_unused_areas();
1014 init_IRQ();
1016 xsm_init(&initrdidx, mbi, initial_images_start);
1018 init_idle_domain();
1020 trap_init();
1022 rcu_init();
1024 timer_init();
1026 early_time_init();
1028 arch_init_memory();
1030 identify_cpu(&boot_cpu_data);
1031 if ( cpu_has_fxsr )
1032 set_in_cr4(X86_CR4_OSFXSR);
1033 if ( cpu_has_xmm )
1034 set_in_cr4(X86_CR4_OSXMMEXCPT);
1036 local_irq_enable();
1038 #ifdef CONFIG_X86_64
1039 vesa_mtrr_init();
1040 #endif
1042 if ( opt_nosmp )
1043 max_cpus = 0;
1045 iommu_setup(); /* setup iommu if available */
1047 smp_prepare_cpus(max_cpus);
1049 spin_debug_enable();
1051 /*
1052 * Initialise higher-level timer functions. We do this fairly late
1053 * (post-SMP) because the time bases and scale factors need to be updated
1054 * regularly, and SMP initialisation can cause a long delay with
1055 * interrupts not yet enabled.
1056 */
1057 init_xen_time();
1059 initialize_keytable();
1061 console_init_postirq();
1063 for_each_present_cpu ( i )
1065 if ( num_online_cpus() >= max_cpus )
1066 break;
1067 if ( !cpu_online(i) )
1069 rcu_online_cpu(i);
1070 __cpu_up(i);
1073 /* Set up cpu_to_node[]. */
1074 srat_detect_node(i);
1075 /* Set up node_to_cpumask based on cpu_to_node[]. */
1076 numa_add_cpu(i);
1079 printk("Brought up %ld CPUs\n", (long)num_online_cpus());
1080 smp_cpus_done(max_cpus);
1082 initialise_gdb(); /* could be moved earlier */
1084 do_initcalls();
1086 if ( opt_watchdog )
1087 watchdog_enable();
1089 if ( !tboot_protect_mem_regions() )
1090 panic("Could not protect TXT memory regions\n");
1092 /* Create initial domain 0. */
1093 dom0 = domain_create(0, DOMCRF_s3_integrity, DOM0_SSIDREF);
1094 if ( (dom0 == NULL) || (alloc_dom0_vcpu0() == NULL) )
1095 panic("Error creating domain 0\n");
1097 dom0->is_privileged = 1;
1098 dom0->target = NULL;
1100 /* Grab the DOM0 command line. */
1101 cmdline = (char *)(mod[0].string ? __va(mod[0].string) : NULL);
1102 if ( (cmdline != NULL) || (kextra != NULL) )
1104 static char dom0_cmdline[MAX_GUEST_CMDLINE];
1106 cmdline = cmdline_cook(cmdline);
1107 safe_strcpy(dom0_cmdline, cmdline);
1109 if ( kextra != NULL )
1110 /* kextra always includes exactly one leading space. */
1111 safe_strcat(dom0_cmdline, kextra);
1113 /* Append any extra parameters. */
1114 if ( skip_ioapic_setup && !strstr(dom0_cmdline, "noapic") )
1115 safe_strcat(dom0_cmdline, " noapic");
1116 if ( acpi_skip_timer_override &&
1117 !strstr(dom0_cmdline, "acpi_skip_timer_override") )
1118 safe_strcat(dom0_cmdline, " acpi_skip_timer_override");
1119 if ( (strlen(acpi_param) == 0) && acpi_disabled )
1121 printk("ACPI is disabled, notifying Domain 0 (acpi=off)\n");
1122 safe_strcpy(acpi_param, "off");
1124 if ( (strlen(acpi_param) != 0) && !strstr(dom0_cmdline, "acpi=") )
1126 safe_strcat(dom0_cmdline, " acpi=");
1127 safe_strcat(dom0_cmdline, acpi_param);
1130 cmdline = dom0_cmdline;
1133 if ( (initrdidx > 0) && (initrdidx < mbi->mods_count) )
1135 _initrd_start = mod[initrdidx].mod_start;
1136 _initrd_len = mod[initrdidx].mod_end - mod[initrdidx].mod_start;
1139 if ( xen_cpuidle )
1140 xen_processor_pmbits |= XEN_PROCESSOR_PM_CX;
1142 /*
1143 * We're going to setup domain0 using the module(s) that we stashed safely
1144 * above our heap. The second module, if present, is an initrd ramdisk.
1145 */
1146 if ( construct_dom0(dom0,
1147 initial_images_base,
1148 initial_images_start,
1149 mod[0].mod_end-mod[0].mod_start,
1150 _initrd_start,
1151 _initrd_len,
1152 cmdline) != 0)
1153 panic("Could not set up DOM0 guest OS\n");
1155 /* Scrub RAM that is still free and so may go to an unprivileged domain. */
1156 scrub_heap_pages();
1158 init_trace_bufs();
1160 init_tmem();
1162 console_endboot();
1164 /* Hide UART from DOM0 if we're using it */
1165 serial_endboot();
1167 domain_unpause_by_systemcontroller(dom0);
1169 reset_stack_and_jump(init_done);
1172 void arch_get_xen_caps(xen_capabilities_info_t *info)
1174 /* Interface name is always xen-3.0-* for Xen-3.x. */
1175 int major = 3, minor = 0;
1176 char s[32];
1178 (*info)[0] = '\0';
1180 #ifdef CONFIG_X86_64
1181 snprintf(s, sizeof(s), "xen-%d.%d-x86_64 ", major, minor);
1182 safe_strcat(*info, s);
1183 #endif
1184 snprintf(s, sizeof(s), "xen-%d.%d-x86_32p ", major, minor);
1185 safe_strcat(*info, s);
1186 if ( hvm_enabled )
1188 snprintf(s, sizeof(s), "hvm-%d.%d-x86_32 ", major, minor);
1189 safe_strcat(*info, s);
1190 snprintf(s, sizeof(s), "hvm-%d.%d-x86_32p ", major, minor);
1191 safe_strcat(*info, s);
1192 #ifdef CONFIG_X86_64
1193 snprintf(s, sizeof(s), "hvm-%d.%d-x86_64 ", major, minor);
1194 safe_strcat(*info, s);
1195 #endif
1199 int xen_in_range(unsigned long mfn)
1201 paddr_t start, end;
1202 int i;
1204 enum { region_s3, region_text, region_percpu, region_bss, nr_regions };
1205 static struct {
1206 paddr_t s, e;
1207 } xen_regions[nr_regions];
1209 /* initialize first time */
1210 if ( !xen_regions[0].s )
1212 /* S3 resume code (and other real mode trampoline code) */
1213 xen_regions[region_s3].s = bootsym_phys(trampoline_start);
1214 xen_regions[region_s3].e = bootsym_phys(trampoline_end);
1215 /* hypervisor code + data */
1216 xen_regions[region_text].s =__pa(&_stext);
1217 xen_regions[region_text].e = __pa(&__init_begin);
1218 /* per-cpu data */
1219 xen_regions[region_percpu].s = __pa(&__per_cpu_start);
1220 xen_regions[region_percpu].e = xen_regions[2].s +
1221 (((paddr_t)last_cpu(cpu_possible_map) + 1) << PERCPU_SHIFT);
1222 /* bss */
1223 xen_regions[region_bss].s = __pa(&__bss_start);
1224 xen_regions[region_bss].e = __pa(&_end);
1227 start = (paddr_t)mfn << PAGE_SHIFT;
1228 end = start + PAGE_SIZE;
1229 for ( i = 0; i < nr_regions; i++ )
1231 if ( (start >= xen_regions[i].e) || (end <= xen_regions[i].s) )
1232 continue;
1234 if ( i == region_percpu )
1236 /*
1237 * Check if the given page falls into an unused (and therefore
1238 * freed) section of the per-cpu data space. Each CPU's data
1239 * area is page-aligned, so the following arithmetic is safe.
1240 */
1241 unsigned int off = ((start - (unsigned long)__per_cpu_start)
1242 & (PERCPU_SIZE - 1));
1243 unsigned int data_sz = __per_cpu_data_end - __per_cpu_start;
1244 return off < data_sz;
1247 return 1;
1250 return 0;
1253 /*
1254 * Local variables:
1255 * mode: C
1256 * c-set-style: "BSD"
1257 * c-basic-offset: 4
1258 * tab-width: 4
1259 * indent-tabs-mode: nil
1260 * End:
1261 */