debuggers.hg

view xenolinux-2.4.21-sparse/arch/xeno/kernel/setup.c @ 672:2e36bfa1978c

bitkeeper revision 1.353 (3f13c7d5NDWHAyq2VYrx7Mqs7QYIfQ)

vga.h, keyboard.h, setup.c, ioport.c:
Cleanups.
author kaf24@scramble.cl.cam.ac.uk
date Tue Jul 15 09:22:29 2003 +0000 (2003-07-15)
parents 6879a4610638
children
line source
1 /*
2 * linux/arch/i386/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 */
7 /*
8 * This file handles the architecture-dependent parts of initialization
9 */
11 #include <linux/errno.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/stddef.h>
16 #include <linux/unistd.h>
17 #include <linux/ptrace.h>
18 #include <linux/slab.h>
19 #include <linux/user.h>
20 #include <linux/a.out.h>
21 #include <linux/tty.h>
22 #include <linux/ioport.h>
23 #include <linux/delay.h>
24 #include <linux/config.h>
25 #include <linux/init.h>
26 #include <linux/apm_bios.h>
27 #ifdef CONFIG_BLK_DEV_RAM
28 #include <linux/blk.h>
29 #endif
30 #include <linux/highmem.h>
31 #include <linux/bootmem.h>
32 #include <linux/seq_file.h>
33 #include <asm/processor.h>
34 #include <linux/console.h>
35 #include <asm/mtrr.h>
36 #include <asm/uaccess.h>
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/smp.h>
40 #include <asm/msr.h>
41 #include <asm/desc.h>
42 #include <asm/dma.h>
43 #include <asm/mpspec.h>
44 #include <asm/mmu_context.h>
45 #include <asm/hypervisor.h>
46 #include <asm/hypervisor-ifs/dom0_ops.h>
48 shared_info_t *HYPERVISOR_shared_info;
50 unsigned long *phys_to_machine_mapping;
52 /*
53 * Machine setup..
54 */
56 char ignore_irq13; /* set if exception 16 works */
57 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
59 unsigned long mmu_cr4_features;
61 unsigned char * vgacon_mmap;
63 /*
64 * Bus types ..
65 */
66 #ifdef CONFIG_EISA
67 int EISA_bus;
68 #endif
69 int MCA_bus;
71 /* for MCA, but anyone else can use it if they want */
72 unsigned int machine_id;
73 unsigned int machine_submodel_id;
74 unsigned int BIOS_revision;
75 unsigned int mca_pentium_flag;
77 /* For PCI or other memory-mapped resources */
78 unsigned long pci_mem_start = 0x10000000;
80 /*
81 * Setup options
82 */
83 struct drive_info_struct { char dummy[32]; } drive_info;
84 struct screen_info screen_info;
85 struct apm_info apm_info;
86 struct sys_desc_table_struct {
87 unsigned short length;
88 unsigned char table[0];
89 };
91 unsigned char aux_device_present;
93 extern int root_mountflags;
94 extern char _text, _etext, _edata, _end;
96 int enable_acpi_smp_table;
98 /* Raw start-of-day parameters from the hypervisor. */
99 union start_info_union start_info_union;
101 #define COMMAND_LINE_SIZE 256
102 static char command_line[COMMAND_LINE_SIZE];
103 char saved_command_line[COMMAND_LINE_SIZE];
105 static void __init parse_mem_cmdline (char ** cmdline_p)
106 {
107 char c = ' ', *to = command_line, *from = saved_command_line;
108 int len = 0;
110 /* Save unparsed command line copy for /proc/cmdline */
111 memcpy(saved_command_line, start_info.cmd_line, COMMAND_LINE_SIZE);
112 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
114 for (;;) {
115 /*
116 * "mem=nopentium" disables the 4MB page tables.
117 * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
118 * to <mem>, overriding the bios size.
119 * "mem=XXX[KkmM]@XXX[KkmM]" defines a memory region from
120 * <start> to <start>+<mem>, overriding the bios size.
121 */
122 if (c == ' ' && !memcmp(from, "mem=", 4)) {
123 if (to != command_line)
124 to--;
125 if (!memcmp(from+4, "nopentium", 9)) {
126 from += 9+4;
127 } else if (!memcmp(from+4, "exactmap", 8)) {
128 from += 8+4;
129 } else {
130 (void)memparse(from+4, &from);
131 if (*from == '@')
132 (void)memparse(from+1, &from);
133 }
134 }
136 c = *(from++);
137 if (!c)
138 break;
139 if (COMMAND_LINE_SIZE <= ++len)
140 break;
141 *(to++) = c;
142 }
143 *to = '\0';
144 *cmdline_p = command_line;
145 }
147 void __init setup_arch(char **cmdline_p)
148 {
149 unsigned long start_pfn, max_pfn, max_low_pfn;
150 unsigned long bootmap_size;
151 unsigned long i;
153 extern void hypervisor_callback(void);
154 extern void failsafe_callback(void);
156 extern unsigned long cpu0_pte_quicklist[];
157 extern unsigned long cpu0_pgd_quicklist[];
159 HYPERVISOR_set_callbacks(
160 __KERNEL_CS, (unsigned long)hypervisor_callback,
161 __KERNEL_CS, (unsigned long)failsafe_callback);
163 boot_cpu_data.pgd_quick = cpu0_pgd_quicklist;
164 boot_cpu_data.pte_quick = cpu0_pte_quicklist;
166 ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
167 memset(&drive_info, 0, sizeof(drive_info));
168 memset(&screen_info, 0, sizeof(screen_info));
170 /* This is drawn from a dump from vgacon:startup in standard Linux. */
171 screen_info.orig_video_mode = 3;
172 screen_info.orig_video_isVGA = 1;
173 screen_info.orig_video_lines = 25;
174 screen_info.orig_video_cols = 80;
175 screen_info.orig_video_ega_bx = 3;
176 screen_info.orig_video_points = 16;
178 memset(&apm_info.bios, 0, sizeof(apm_info.bios));
179 aux_device_present = 0;
180 #ifdef CONFIG_BLK_DEV_RAM
181 rd_image_start = 0;
182 rd_prompt = 0;
183 rd_doload = 0;
184 #endif
186 root_mountflags &= ~MS_RDONLY;
187 init_mm.start_code = (unsigned long) &_text;
188 init_mm.end_code = (unsigned long) &_etext;
189 init_mm.end_data = (unsigned long) &_edata;
190 init_mm.brk = (unsigned long) &_end;
192 parse_mem_cmdline(cmdline_p);
194 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
195 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
196 #define PFN_PHYS(x) ((x) << PAGE_SHIFT)
198 /*
199 * 128MB for vmalloc and initrd
200 */
201 #define VMALLOC_RESERVE (unsigned long)(128 << 20)
202 #define MAXMEM (unsigned long)(HYPERVISOR_VIRT_START-PAGE_OFFSET-VMALLOC_RESERVE)
203 #define MAXMEM_PFN PFN_DOWN(MAXMEM)
204 #define MAX_NONPAE_PFN (1 << 20)
206 /*
207 * partially used pages are not usable - thus
208 * we are rounding upwards:
209 */
210 #ifdef CONFIG_BLK_DEV_INITRD
211 if ( start_info.mod_start )
212 start_pfn = PFN_UP(__pa(start_info.mod_start + start_info.mod_len));
213 else
214 #endif
215 start_pfn = PFN_UP(__pa(&_end));
216 max_pfn = start_info.nr_pages;
218 /*
219 * Determine low and high memory ranges:
220 */
221 max_low_pfn = max_pfn;
222 if (max_low_pfn > MAXMEM_PFN) {
223 max_low_pfn = MAXMEM_PFN;
224 #ifndef CONFIG_HIGHMEM
225 /* Maximum memory usable is what is directly addressable */
226 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
227 MAXMEM>>20);
228 if (max_pfn > MAX_NONPAE_PFN)
229 printk(KERN_WARNING "Use a PAE enabled kernel.\n");
230 else
231 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
232 #else /* !CONFIG_HIGHMEM */
233 #ifndef CONFIG_X86_PAE
234 if (max_pfn > MAX_NONPAE_PFN) {
235 max_pfn = MAX_NONPAE_PFN;
236 printk(KERN_WARNING "Warning only 4GB will be used.\n");
237 printk(KERN_WARNING "Use a PAE enabled kernel.\n");
238 }
239 #endif /* !CONFIG_X86_PAE */
240 #endif /* !CONFIG_HIGHMEM */
241 }
243 #ifdef CONFIG_HIGHMEM
244 highstart_pfn = highend_pfn = max_pfn;
245 if (max_pfn > MAXMEM_PFN) {
246 highstart_pfn = MAXMEM_PFN;
247 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
248 pages_to_mb(highend_pfn - highstart_pfn));
249 }
250 #endif
252 /*
253 * Initialize the boot-time allocator, and free up all RAM.
254 * Then reserve space for OS image, and the bootmem bitmap.
255 */
256 bootmap_size = init_bootmem(start_pfn, max_low_pfn);
257 free_bootmem(0, PFN_PHYS(max_low_pfn));
258 reserve_bootmem(0, PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1);
260 /* Now reserve space for the hypervisor-provided page tables. */
261 {
262 unsigned long *pgd = (unsigned long *)start_info.pt_base;
263 unsigned long pte;
264 int i;
265 reserve_bootmem(__pa(pgd), PAGE_SIZE);
266 for ( i = 0; i < (HYPERVISOR_VIRT_START>>22); i++ )
267 {
268 unsigned long pgde = *pgd++;
269 if ( !(pgde & 1) ) continue;
270 pte = machine_to_phys(pgde & PAGE_MASK);
271 reserve_bootmem(pte, PAGE_SIZE);
272 }
273 }
274 cur_pgd = init_mm.pgd = (pgd_t *)start_info.pt_base;
276 /* Now initialise the physical->machine mapping table. */
277 phys_to_machine_mapping = alloc_bootmem(max_pfn * sizeof(unsigned long));
278 for ( i = 0; i < max_pfn; i++ )
279 {
280 unsigned long pgde, *ppte;
281 unsigned long pfn = i + (PAGE_OFFSET >> PAGE_SHIFT);
282 pgde = *((unsigned long *)start_info.pt_base + (pfn >> 10));
283 ppte = (unsigned long *)machine_to_phys(pgde & PAGE_MASK) + (pfn&1023);
284 phys_to_machine_mapping[i] =
285 (*(unsigned long *)__va(ppte)) >> PAGE_SHIFT;
286 }
288 #ifdef CONFIG_BLK_DEV_INITRD
289 if (start_info.mod_start) {
290 if ((__pa(start_info.mod_start) + start_info.mod_len) <=
291 (max_low_pfn << PAGE_SHIFT)) {
292 initrd_start = start_info.mod_start;
293 initrd_end = initrd_start + start_info.mod_len;
294 initrd_below_start_ok = 1;
295 }
296 else {
297 printk(KERN_ERR "initrd extends beyond end of memory "
298 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
299 __pa(start_info.mod_start) + start_info.mod_len,
300 max_low_pfn << PAGE_SHIFT);
301 initrd_start = 0;
302 }
303 }
304 #endif
306 paging_init();
308 /* We are privileged guest os - should have IO privileges. */
309 if ( start_info.flags & SIF_PRIVILEGED )
310 {
311 dom0_op_t op;
312 op.cmd = DOM0_IOPL;
313 op.u.iopl.domain = start_info.dom_id;
314 op.u.iopl.iopl = 1;
315 if( HYPERVISOR_dom0_op(&op) != 0 )
316 panic("Unable to obtain IOPL, despite being SIF_PRIVILEGED");
317 current->thread.io_pl = 1;
318 }
320 if(start_info.flags & SIF_CONSOLE)
321 {
322 if( !(start_info.flags & SIF_PRIVILEGED) )
323 panic("Xen granted us console access but not privileged status");
325 #if defined(CONFIG_VT)
326 #if defined(CONFIG_VGA_CONSOLE)
327 conswitchp = &vga_con;
328 #elif defined(CONFIG_DUMMY_CONSOLE)
329 conswitchp = &dummy_con;
330 #endif
331 #endif
332 }
333 }
335 static int cachesize_override __initdata = -1;
336 static int __init cachesize_setup(char *str)
337 {
338 get_option (&str, &cachesize_override);
339 return 1;
340 }
341 __setup("cachesize=", cachesize_setup);
344 static int __init get_model_name(struct cpuinfo_x86 *c)
345 {
346 unsigned int *v;
347 char *p, *q;
349 if (cpuid_eax(0x80000000) < 0x80000004)
350 return 0;
352 v = (unsigned int *) c->x86_model_id;
353 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
354 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
355 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
356 c->x86_model_id[48] = 0;
358 /* Intel chips right-justify this string for some dumb reason;
359 undo that brain damage */
360 p = q = &c->x86_model_id[0];
361 while ( *p == ' ' )
362 p++;
363 if ( p != q ) {
364 while ( *p )
365 *q++ = *p++;
366 while ( q <= &c->x86_model_id[48] )
367 *q++ = '\0'; /* Zero-pad the rest */
368 }
370 return 1;
371 }
374 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
375 {
376 unsigned int n, dummy, ecx, edx, l2size;
378 n = cpuid_eax(0x80000000);
380 if (n >= 0x80000005) {
381 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
382 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
383 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
384 c->x86_cache_size=(ecx>>24)+(edx>>24);
385 }
387 if (n < 0x80000006) /* Some chips just has a large L1. */
388 return;
390 ecx = cpuid_ecx(0x80000006);
391 l2size = ecx >> 16;
393 /* AMD errata T13 (order #21922) */
394 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
395 if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */
396 l2size = 64;
397 if (c->x86_model == 4 &&
398 (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */
399 l2size = 256;
400 }
402 /* Intel PIII Tualatin. This comes in two flavours.
403 * One has 256kb of cache, the other 512. We have no way
404 * to determine which, so we use a boottime override
405 * for the 512kb model, and assume 256 otherwise.
406 */
407 if ((c->x86_vendor == X86_VENDOR_INTEL) && (c->x86 == 6) &&
408 (c->x86_model == 11) && (l2size == 0))
409 l2size = 256;
411 if (c->x86_vendor == X86_VENDOR_CENTAUR) {
412 /* VIA C3 CPUs (670-68F) need further shifting. */
413 if ((c->x86 == 6) &&
414 ((c->x86_model == 7) || (c->x86_model == 8))) {
415 l2size >>= 8;
416 }
418 /* VIA also screwed up Nehemiah stepping 1, and made
419 it return '65KB' instead of '64KB'
420 - Note, it seems this may only be in engineering samples. */
421 if ((c->x86==6) && (c->x86_model==9) &&
422 (c->x86_mask==1) && (l2size==65))
423 l2size -= 1;
424 }
426 /* Allow user to override all this if necessary. */
427 if (cachesize_override != -1)
428 l2size = cachesize_override;
430 if ( l2size == 0 )
431 return; /* Again, no L2 cache is possible */
433 c->x86_cache_size = l2size;
435 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
436 l2size, ecx & 0xFF);
437 }
440 static int __init init_amd(struct cpuinfo_x86 *c)
441 {
442 int r;
444 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
445 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
446 clear_bit(0*32+31, &c->x86_capability);
448 r = get_model_name(c);
450 switch(c->x86)
451 {
452 case 6: /* An Athlon/Duron. We can trust the BIOS probably */
453 break;
454 default:
455 panic("Unsupported AMD processor\n");
456 }
458 display_cacheinfo(c);
459 return r;
460 }
463 static void __init init_intel(struct cpuinfo_x86 *c)
464 {
465 char *p = NULL;
466 unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
468 if (c->cpuid_level > 1) {
469 /* supports eax=2 call */
470 int i, j, n;
471 int regs[4];
472 unsigned char *dp = (unsigned char *)regs;
474 /* Number of times to iterate */
475 n = cpuid_eax(2) & 0xFF;
477 for ( i = 0 ; i < n ; i++ ) {
478 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
480 /* If bit 31 is set, this is an unknown format */
481 for ( j = 0 ; j < 3 ; j++ ) {
482 if ( regs[j] < 0 ) regs[j] = 0;
483 }
485 /* Byte 0 is level count, not a descriptor */
486 for ( j = 1 ; j < 16 ; j++ ) {
487 unsigned char des = dp[j];
488 unsigned char dl, dh;
489 unsigned int cs;
491 dh = des >> 4;
492 dl = des & 0x0F;
494 /* Black magic... */
496 switch ( dh )
497 {
498 case 0:
499 switch ( dl ) {
500 case 6:
501 /* L1 I cache */
502 l1i += 8;
503 break;
504 case 8:
505 /* L1 I cache */
506 l1i += 16;
507 break;
508 case 10:
509 /* L1 D cache */
510 l1d += 8;
511 break;
512 case 12:
513 /* L1 D cache */
514 l1d += 16;
515 break;
516 default:;
517 /* TLB, or unknown */
518 }
519 break;
520 case 2:
521 if ( dl ) {
522 /* L3 cache */
523 cs = (dl-1) << 9;
524 l3 += cs;
525 }
526 break;
527 case 4:
528 if ( c->x86 > 6 && dl ) {
529 /* P4 family */
530 /* L3 cache */
531 cs = 128 << (dl-1);
532 l3 += cs;
533 break;
534 }
535 /* else same as 8 - fall through */
536 case 8:
537 if ( dl ) {
538 /* L2 cache */
539 cs = 128 << (dl-1);
540 l2 += cs;
541 }
542 break;
543 case 6:
544 if (dl > 5) {
545 /* L1 D cache */
546 cs = 8<<(dl-6);
547 l1d += cs;
548 }
549 break;
550 case 7:
551 if ( dl >= 8 )
552 {
553 /* L2 cache */
554 cs = 64<<(dl-8);
555 l2 += cs;
556 } else {
557 /* L0 I cache, count as L1 */
558 cs = dl ? (16 << (dl-1)) : 12;
559 l1i += cs;
560 }
561 break;
562 default:
563 /* TLB, or something else we don't know about */
564 break;
565 }
566 }
567 }
568 if ( l1i || l1d )
569 printk(KERN_INFO "CPU: L1 I cache: %dK, L1 D cache: %dK\n",
570 l1i, l1d);
571 if ( l2 )
572 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
573 if ( l3 )
574 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
576 /*
577 * This assumes the L3 cache is shared; it typically lives in
578 * the northbridge. The L1 caches are included by the L2
579 * cache, and so should not be included for the purpose of
580 * SMP switching weights.
581 */
582 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
583 }
585 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
586 if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
587 clear_bit(X86_FEATURE_SEP, &c->x86_capability);
589 /* Names for the Pentium II/Celeron processors
590 detectable only by also checking the cache size.
591 Dixon is NOT a Celeron. */
592 if (c->x86 == 6) {
593 switch (c->x86_model) {
594 case 5:
595 if (l2 == 0)
596 p = "Celeron (Covington)";
597 if (l2 == 256)
598 p = "Mobile Pentium II (Dixon)";
599 break;
601 case 6:
602 if (l2 == 128)
603 p = "Celeron (Mendocino)";
604 break;
606 case 8:
607 if (l2 == 128)
608 p = "Celeron (Coppermine)";
609 break;
610 }
611 }
613 if ( p )
614 strcpy(c->x86_model_id, p);
615 }
617 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
618 {
619 char *v = c->x86_vendor_id;
621 if (!strcmp(v, "GenuineIntel"))
622 c->x86_vendor = X86_VENDOR_INTEL;
623 else if (!strcmp(v, "AuthenticAMD"))
624 c->x86_vendor = X86_VENDOR_AMD;
625 else
626 c->x86_vendor = X86_VENDOR_UNKNOWN;
627 }
629 struct cpu_model_info {
630 int vendor;
631 int family;
632 char *model_names[16];
633 };
635 /* Naming convention should be: <Name> [(<Codename>)] */
636 /* This table only is used unless init_<vendor>() below doesn't set it; */
637 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
638 static struct cpu_model_info cpu_models[] __initdata = {
639 { X86_VENDOR_INTEL, 6,
640 { "Pentium Pro A-step", "Pentium Pro", NULL, "Pentium II (Klamath)",
641 NULL, "Pentium II (Deschutes)", "Mobile Pentium II",
642 "Pentium III (Katmai)", "Pentium III (Coppermine)", NULL,
643 "Pentium III (Cascades)", NULL, NULL, NULL, NULL }},
644 { X86_VENDOR_AMD, 6, /* Is this this really necessary?? */
645 { "Athlon", "Athlon",
646 "Athlon", NULL, "Athlon", NULL,
647 NULL, NULL, NULL,
648 NULL, NULL, NULL, NULL, NULL, NULL, NULL }}
649 };
651 /* Look up CPU names by table lookup. */
652 static char __init *table_lookup_model(struct cpuinfo_x86 *c)
653 {
654 struct cpu_model_info *info = cpu_models;
655 int i;
657 if ( c->x86_model >= 16 )
658 return NULL; /* Range check */
660 for ( i = 0 ; i < sizeof(cpu_models)/sizeof(struct cpu_model_info) ; i++ ) {
661 if ( info->vendor == c->x86_vendor &&
662 info->family == c->x86 ) {
663 return info->model_names[c->x86_model];
664 }
665 info++;
666 }
667 return NULL; /* Not found */
668 }
672 /* Standard macro to see if a specific flag is changeable */
673 static inline int flag_is_changeable_p(u32 flag)
674 {
675 u32 f1, f2;
677 asm("pushfl\n\t"
678 "pushfl\n\t"
679 "popl %0\n\t"
680 "movl %0,%1\n\t"
681 "xorl %2,%0\n\t"
682 "pushl %0\n\t"
683 "popfl\n\t"
684 "pushfl\n\t"
685 "popl %0\n\t"
686 "popfl\n\t"
687 : "=&r" (f1), "=&r" (f2)
688 : "ir" (flag));
690 return ((f1^f2) & flag) != 0;
691 }
694 /* Probe for the CPUID instruction */
695 static int __init have_cpuid_p(void)
696 {
697 return flag_is_changeable_p(X86_EFLAGS_ID);
698 }
702 /*
703 * This does the hard work of actually picking apart the CPU stuff...
704 */
705 void __init identify_cpu(struct cpuinfo_x86 *c)
706 {
707 int junk, i;
708 u32 xlvl, tfms;
710 c->loops_per_jiffy = loops_per_jiffy;
711 c->x86_cache_size = -1;
712 c->x86_vendor = X86_VENDOR_UNKNOWN;
713 c->cpuid_level = -1; /* CPUID not detected */
714 c->x86_model = c->x86_mask = 0; /* So far unknown... */
715 c->x86_vendor_id[0] = '\0'; /* Unset */
716 c->x86_model_id[0] = '\0'; /* Unset */
717 memset(&c->x86_capability, 0, sizeof c->x86_capability);
718 c->hard_math = 1;
720 if ( !have_cpuid_p() ) {
721 panic("Processor must support CPUID\n");
722 } else {
723 /* CPU does have CPUID */
725 /* Get vendor name */
726 cpuid(0x00000000, &c->cpuid_level,
727 (int *)&c->x86_vendor_id[0],
728 (int *)&c->x86_vendor_id[8],
729 (int *)&c->x86_vendor_id[4]);
731 get_cpu_vendor(c);
732 /* Initialize the standard set of capabilities */
733 /* Note that the vendor-specific code below might override */
735 /* Intel-defined flags: level 0x00000001 */
736 if ( c->cpuid_level >= 0x00000001 ) {
737 cpuid(0x00000001, &tfms, &junk, &junk,
738 &c->x86_capability[0]);
739 c->x86 = (tfms >> 8) & 15;
740 c->x86_model = (tfms >> 4) & 15;
741 c->x86_mask = tfms & 15;
742 } else {
743 /* Have CPUID level 0 only - unheard of */
744 c->x86 = 4;
745 }
747 /* AMD-defined flags: level 0x80000001 */
748 xlvl = cpuid_eax(0x80000000);
749 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
750 if ( xlvl >= 0x80000001 )
751 c->x86_capability[1] = cpuid_edx(0x80000001);
752 if ( xlvl >= 0x80000004 )
753 get_model_name(c); /* Default name */
754 }
756 /* Transmeta-defined flags: level 0x80860001 */
757 xlvl = cpuid_eax(0x80860000);
758 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
759 if ( xlvl >= 0x80860001 )
760 c->x86_capability[2] = cpuid_edx(0x80860001);
761 }
762 }
764 printk(KERN_DEBUG "CPU: Before vendor init, caps: %08x %08x %08x, vendor = %d\n",
765 c->x86_capability[0],
766 c->x86_capability[1],
767 c->x86_capability[2],
768 c->x86_vendor);
770 /*
771 * Vendor-specific initialization. In this section we
772 * canonicalize the feature flags, meaning if there are
773 * features a certain CPU supports which CPUID doesn't
774 * tell us, CPUID claiming incorrect flags, or other bugs,
775 * we handle them here.
776 *
777 * At the end of this section, c->x86_capability better
778 * indicate the features this CPU genuinely supports!
779 */
780 switch ( c->x86_vendor ) {
781 case X86_VENDOR_AMD:
782 init_amd(c);
783 break;
785 case X86_VENDOR_INTEL:
786 init_intel(c);
787 break;
789 default:
790 panic("Unsupported CPU vendor\n");
791 }
793 printk(KERN_DEBUG "CPU: After vendor init, caps: %08x %08x %08x %08x\n",
794 c->x86_capability[0],
795 c->x86_capability[1],
796 c->x86_capability[2],
797 c->x86_capability[3]);
800 /* If the model name is still unset, do table lookup. */
801 if ( !c->x86_model_id[0] ) {
802 char *p;
803 p = table_lookup_model(c);
804 if ( p )
805 strcpy(c->x86_model_id, p);
806 else
807 /* Last resort... */
808 sprintf(c->x86_model_id, "%02x/%02x",
809 c->x86_vendor, c->x86_model);
810 }
812 /* Now the feature flags better reflect actual CPU features! */
814 printk(KERN_DEBUG "CPU: After generic, caps: %08x %08x %08x %08x\n",
815 c->x86_capability[0],
816 c->x86_capability[1],
817 c->x86_capability[2],
818 c->x86_capability[3]);
820 /*
821 * On SMP, boot_cpu_data holds the common feature set between
822 * all CPUs; so make sure that we indicate which features are
823 * common between the CPUs. The first time this routine gets
824 * executed, c == &boot_cpu_data.
825 */
826 if ( c != &boot_cpu_data ) {
827 /* AND the already accumulated flags with these */
828 for ( i = 0 ; i < NCAPINTS ; i++ )
829 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
830 }
832 printk(KERN_DEBUG "CPU: Common caps: %08x %08x %08x %08x\n",
833 boot_cpu_data.x86_capability[0],
834 boot_cpu_data.x86_capability[1],
835 boot_cpu_data.x86_capability[2],
836 boot_cpu_data.x86_capability[3]);
837 }
840 /* These need to match <asm/processor.h> */
841 static char *cpu_vendor_names[] __initdata = {
842 "Intel", "Cyrix", "AMD", "UMC", "NexGen", "Centaur", "Rise", "Transmeta" };
845 void __init print_cpu_info(struct cpuinfo_x86 *c)
846 {
847 char *vendor = NULL;
849 if (c->x86_vendor < sizeof(cpu_vendor_names)/sizeof(char *))
850 vendor = cpu_vendor_names[c->x86_vendor];
851 else if (c->cpuid_level >= 0)
852 vendor = c->x86_vendor_id;
854 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
855 printk("%s ", vendor);
857 if (!c->x86_model_id[0])
858 printk("%d86", c->x86);
859 else
860 printk("%s", c->x86_model_id);
862 if (c->x86_mask || c->cpuid_level >= 0)
863 printk(" stepping %02x\n", c->x86_mask);
864 else
865 printk("\n");
866 }
868 /*
869 * Get CPU information for use by the procfs.
870 */
871 static int show_cpuinfo(struct seq_file *m, void *v)
872 {
873 /*
874 * These flag bits must match the definitions in <asm/cpufeature.h>.
875 * NULL means this bit is undefined or reserved; either way it doesn't
876 * have meaning as far as Linux is concerned. Note that it's important
877 * to realize there is a difference between this table and CPUID -- if
878 * applications want to get the raw CPUID data, they should access
879 * /dev/cpu/<cpu_nr>/cpuid instead.
880 */
881 static char *x86_cap_flags[] = {
882 /* Intel-defined */
883 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
884 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
885 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
886 "fxsr", "sse", "sse2", "ss", NULL, "tm", "ia64", NULL,
888 /* AMD-defined */
889 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
890 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
891 NULL, NULL, NULL, NULL, NULL, NULL, "mmxext", NULL,
892 NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
894 /* Transmeta-defined */
895 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
896 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
897 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
898 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
900 /* Other (Linux-defined) */
901 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
902 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
903 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
904 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
905 };
906 struct cpuinfo_x86 *c = v;
907 int i, n = c - cpu_data;
908 int fpu_exception;
910 #ifdef CONFIG_SMP
911 if (!(cpu_online_map & (1<<n)))
912 return 0;
913 #endif
914 seq_printf(m, "processor\t: %d\n"
915 "vendor_id\t: %s\n"
916 "cpu family\t: %d\n"
917 "model\t\t: %d\n"
918 "model name\t: %s\n",
919 n,
920 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
921 c->x86,
922 c->x86_model,
923 c->x86_model_id[0] ? c->x86_model_id : "unknown");
925 if (c->x86_mask || c->cpuid_level >= 0)
926 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
927 else
928 seq_printf(m, "stepping\t: unknown\n");
930 if ( test_bit(X86_FEATURE_TSC, &c->x86_capability) ) {
931 seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n",
932 cpu_khz / 1000, (cpu_khz % 1000));
933 }
935 /* Cache size */
936 if (c->x86_cache_size >= 0)
937 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
939 /* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */
940 fpu_exception = c->hard_math && (ignore_irq13 || cpu_has_fpu);
941 seq_printf(m, "fdiv_bug\t: %s\n"
942 "hlt_bug\t\t: %s\n"
943 "f00f_bug\t: %s\n"
944 "coma_bug\t: %s\n"
945 "fpu\t\t: %s\n"
946 "fpu_exception\t: %s\n"
947 "cpuid level\t: %d\n"
948 "wp\t\t: %s\n"
949 "flags\t\t:",
950 c->fdiv_bug ? "yes" : "no",
951 c->hlt_works_ok ? "no" : "yes",
952 c->f00f_bug ? "yes" : "no",
953 c->coma_bug ? "yes" : "no",
954 c->hard_math ? "yes" : "no",
955 fpu_exception ? "yes" : "no",
956 c->cpuid_level,
957 c->wp_works_ok ? "yes" : "no");
959 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
960 if ( test_bit(i, &c->x86_capability) &&
961 x86_cap_flags[i] != NULL )
962 seq_printf(m, " %s", x86_cap_flags[i]);
964 seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n",
965 c->loops_per_jiffy/(500000/HZ),
966 (c->loops_per_jiffy/(5000/HZ)) % 100);
967 return 0;
968 }
970 static void *c_start(struct seq_file *m, loff_t *pos)
971 {
972 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
973 }
974 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
975 {
976 ++*pos;
977 return c_start(m, pos);
978 }
979 static void c_stop(struct seq_file *m, void *v)
980 {
981 }
982 struct seq_operations cpuinfo_op = {
983 start: c_start,
984 next: c_next,
985 stop: c_stop,
986 show: show_cpuinfo,
987 };
989 unsigned long cpu_initialized __initdata = 0;
991 /*
992 * cpu_init() initializes state that is per-CPU. Some data is already
993 * initialized (naturally) in the bootstrap process, such as the GDT
994 * and IDT. We reload them nevertheless, this function acts as a
995 * 'CPU state barrier', nothing should get across.
996 */
997 void __init cpu_init (void)
998 {
999 int nr = smp_processor_id();
1001 if (test_and_set_bit(nr, &cpu_initialized)) {
1002 printk(KERN_WARNING "CPU#%d already initialized!\n", nr);
1003 for (;;) __sti();
1005 printk(KERN_INFO "Initializing CPU#%d\n", nr);
1007 /*
1008 * set up and load the per-CPU TSS and LDT
1009 */
1010 atomic_inc(&init_mm.mm_count);
1011 current->active_mm = &init_mm;
1012 if(current->mm)
1013 BUG();
1014 enter_lazy_tlb(&init_mm, current, nr);
1016 HYPERVISOR_stack_switch(__KERNEL_DS, current->thread.esp0);
1018 load_LDT(&init_mm);
1019 flush_page_update_queue();
1021 /* Force FPU initialization. */
1022 current->flags &= ~PF_USEDFPU;
1023 current->used_math = 0;
1024 stts();
1028 /******************************************************************************
1029 * Time-to-die callback handling.
1030 */
1032 static void time_to_die(int irq, void *unused, struct pt_regs *regs)
1034 extern void ctrl_alt_del(void);
1035 ctrl_alt_del();
1038 static int __init setup_death_event(void)
1040 (void)request_irq(_EVENT_DIE, time_to_die, 0, "die", NULL);
1041 return 0;
1044 __initcall(setup_death_event);