debuggers.hg

view xen/arch/i386/setup.c @ 656:c7557b3832b9

bitkeeper revision 1.339.1.6 (3f12cffdzSdqoflJR3gfS-S45xcteA)

nmi.c:
new file
Many files:
NMI watchdog support in Xen.
author kaf24@scramble.cl.cam.ac.uk
date Mon Jul 14 15:45:01 2003 +0000 (2003-07-14)
parents bb07751512ba
children c085fac641e2 384fbe1ed716
line source
2 #include <xeno/config.h>
3 #include <xeno/init.h>
4 #include <xeno/interrupt.h>
5 #include <xeno/lib.h>
6 #include <xeno/sched.h>
7 #include <xeno/pci.h>
8 #include <asm/bitops.h>
9 #include <asm/smp.h>
10 #include <asm/processor.h>
11 #include <asm/mpspec.h>
12 #include <asm/apic.h>
13 #include <asm/desc.h>
14 #include <asm/domain_page.h>
16 struct cpuinfo_x86 boot_cpu_data = { 0 };
17 /* Lots of nice things, since we only target PPro+. */
18 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE;
19 unsigned long wait_init_idle;
21 /* Basic page table for each CPU in the system. */
22 l2_pgentry_t *idle_pg_table[NR_CPUS] = { idle0_pg_table };
23 struct task_struct *idle_task[NR_CPUS] = { &idle0_task };
25 /* for asm/domain_page.h, map_domain_page() */
26 unsigned long *mapcache[NR_CPUS];
28 /* Standard macro to see if a specific flag is changeable */
29 static inline int flag_is_changeable_p(u32 flag)
30 {
31 u32 f1, f2;
33 asm("pushfl\n\t"
34 "pushfl\n\t"
35 "popl %0\n\t"
36 "movl %0,%1\n\t"
37 "xorl %2,%0\n\t"
38 "pushl %0\n\t"
39 "popfl\n\t"
40 "pushfl\n\t"
41 "popl %0\n\t"
42 "popfl\n\t"
43 : "=&r" (f1), "=&r" (f2)
44 : "ir" (flag));
46 return ((f1^f2) & flag) != 0;
47 }
49 /* Probe for the CPUID instruction */
50 static int __init have_cpuid_p(void)
51 {
52 return flag_is_changeable_p(X86_EFLAGS_ID);
53 }
55 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
56 {
57 char *v = c->x86_vendor_id;
59 if (!strcmp(v, "GenuineIntel"))
60 c->x86_vendor = X86_VENDOR_INTEL;
61 else if (!strcmp(v, "AuthenticAMD"))
62 c->x86_vendor = X86_VENDOR_AMD;
63 else if (!strcmp(v, "CyrixInstead"))
64 c->x86_vendor = X86_VENDOR_CYRIX;
65 else if (!strcmp(v, "UMC UMC UMC "))
66 c->x86_vendor = X86_VENDOR_UMC;
67 else if (!strcmp(v, "CentaurHauls"))
68 c->x86_vendor = X86_VENDOR_CENTAUR;
69 else if (!strcmp(v, "NexGenDriven"))
70 c->x86_vendor = X86_VENDOR_NEXGEN;
71 else if (!strcmp(v, "RiseRiseRise"))
72 c->x86_vendor = X86_VENDOR_RISE;
73 else if (!strcmp(v, "GenuineTMx86") ||
74 !strcmp(v, "TransmetaCPU"))
75 c->x86_vendor = X86_VENDOR_TRANSMETA;
76 else
77 c->x86_vendor = X86_VENDOR_UNKNOWN;
78 }
80 static void __init init_intel(struct cpuinfo_x86 *c)
81 {
82 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
83 if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
84 clear_bit(X86_FEATURE_SEP, &c->x86_capability);
85 }
87 static void __init init_amd(struct cpuinfo_x86 *c)
88 {
89 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
90 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
91 clear_bit(0*32+31, &c->x86_capability);
93 switch(c->x86)
94 {
95 case 5:
96 panic("AMD K6 is not supported.\n");
97 case 6: /* An Athlon/Duron. We can trust the BIOS probably */
98 break;
99 }
100 }
102 /*
103 * This does the hard work of actually picking apart the CPU stuff...
104 */
105 void __init identify_cpu(struct cpuinfo_x86 *c)
106 {
107 extern int opt_noht, opt_noacpi;
108 int junk, i;
109 u32 xlvl, tfms;
111 c->x86_vendor = X86_VENDOR_UNKNOWN;
112 c->cpuid_level = -1; /* CPUID not detected */
113 c->x86_model = c->x86_mask = 0; /* So far unknown... */
114 c->x86_vendor_id[0] = '\0'; /* Unset */
115 memset(&c->x86_capability, 0, sizeof c->x86_capability);
117 if ( !have_cpuid_p() )
118 panic("Ancient processors not supported\n");
120 /* Get vendor name */
121 cpuid(0x00000000, &c->cpuid_level,
122 (int *)&c->x86_vendor_id[0],
123 (int *)&c->x86_vendor_id[8],
124 (int *)&c->x86_vendor_id[4]);
126 get_cpu_vendor(c);
128 if ( c->cpuid_level == 0 )
129 panic("Decrepit CPUID not supported\n");
131 cpuid(0x00000001, &tfms, &junk, &junk,
132 &c->x86_capability[0]);
133 c->x86 = (tfms >> 8) & 15;
134 c->x86_model = (tfms >> 4) & 15;
135 c->x86_mask = tfms & 15;
137 /* AMD-defined flags: level 0x80000001 */
138 xlvl = cpuid_eax(0x80000000);
139 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
140 if ( xlvl >= 0x80000001 )
141 c->x86_capability[1] = cpuid_edx(0x80000001);
142 }
144 /* Transmeta-defined flags: level 0x80860001 */
145 xlvl = cpuid_eax(0x80860000);
146 if ( (xlvl & 0xffff0000) == 0x80860000 ) {
147 if ( xlvl >= 0x80860001 )
148 c->x86_capability[2] = cpuid_edx(0x80860001);
149 }
151 printk("CPU%d: Before vendor init, caps: %08x %08x %08x, vendor = %d\n",
152 smp_processor_id(),
153 c->x86_capability[0],
154 c->x86_capability[1],
155 c->x86_capability[2],
156 c->x86_vendor);
158 switch ( c->x86_vendor ) {
159 case X86_VENDOR_INTEL:
160 init_intel(c);
161 break;
162 case X86_VENDOR_AMD:
163 init_amd(c);
164 break;
165 default:
166 panic("Only support Intel processors (P6+)\n");
167 }
169 if ( opt_noht )
170 {
171 opt_noacpi = 1; /* Virtual CPUs only appear in ACPI tables. */
172 clear_bit(X86_FEATURE_HT, &c->x86_capability[0]);
173 }
175 printk("CPU caps: %08x %08x %08x %08x\n",
176 c->x86_capability[0],
177 c->x86_capability[1],
178 c->x86_capability[2],
179 c->x86_capability[3]);
181 /*
182 * On SMP, boot_cpu_data holds the common feature set between
183 * all CPUs; so make sure that we indicate which features are
184 * common between the CPUs. The first time this routine gets
185 * executed, c == &boot_cpu_data.
186 */
187 if ( c != &boot_cpu_data ) {
188 /* AND the already accumulated flags with these */
189 for ( i = 0 ; i < NCAPINTS ; i++ )
190 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
191 }
192 }
195 unsigned long cpu_initialized;
196 void __init cpu_init(void)
197 {
198 int nr = smp_processor_id();
199 struct tss_struct * t = &init_tss[nr];
200 l2_pgentry_t *pl2e;
202 if ( test_and_set_bit(nr, &cpu_initialized) )
203 panic("CPU#%d already initialized!!!\n", nr);
204 printk("Initializing CPU#%d\n", nr);
206 /* Set up GDT and IDT. */
207 SET_GDT_ENTRIES(current, DEFAULT_GDT_ENTRIES);
208 SET_GDT_ADDRESS(current, DEFAULT_GDT_ADDRESS);
209 __asm__ __volatile__("lgdt %0": "=m" (*current->mm.gdt));
210 __asm__ __volatile__("lidt %0": "=m" (idt_descr));
212 /* No nested task. */
213 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
215 /* Ensure FPU gets initialised for each domain. */
216 stts();
218 /* Set up and load the per-CPU TSS and LDT. */
219 t->ss0 = __HYPERVISOR_DS;
220 t->esp0 = current->thread.esp0;
221 set_tss_desc(nr,t);
222 load_TR(nr);
223 __asm__ __volatile__("lldt %%ax"::"a" (0));
225 /* Clear all 6 debug registers. */
226 #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
227 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
228 #undef CD
230 /* Install correct page table. */
231 __asm__ __volatile__ ("movl %%eax,%%cr3"
232 : : "a" (pagetable_val(current->mm.pagetable)));
234 /* Set up mapping cache for domain pages. */
235 pl2e = idle_pg_table[nr] + (MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT);
236 mapcache[nr] = (unsigned long *)get_free_page(GFP_KERNEL);
237 clear_page(mapcache[nr]);
238 *pl2e = mk_l2_pgentry(__pa(mapcache[nr]) | PAGE_HYPERVISOR);
239 }
241 static void __init do_initcalls(void)
242 {
243 initcall_t *call;
245 call = &__initcall_start;
246 do {
247 (*call)();
248 call++;
249 } while (call < &__initcall_end);
250 }
252 /*
253 * IBM-compatible BIOSes place drive info tables at initial interrupt
254 * vectors 0x41 and 0x46. These are in the for of 16-bit-mode far ptrs.
255 */
256 struct drive_info_struct { unsigned char dummy[32]; } drive_info;
257 void get_bios_driveinfo(void)
258 {
259 unsigned long seg, off, tab1, tab2;
261 off = (unsigned long)*(unsigned short *)(4*0x41+0);
262 seg = (unsigned long)*(unsigned short *)(4*0x41+2);
263 tab1 = (seg<<4) + off;
265 off = (unsigned long)*(unsigned short *)(4*0x46+0);
266 seg = (unsigned long)*(unsigned short *)(4*0x46+2);
267 tab2 = (seg<<4) + off;
269 printk("Reading BIOS drive-info tables at 0x%05lx and 0x%05lx\n",
270 tab1, tab2);
272 memcpy(drive_info.dummy+ 0, (char *)tab1, 16);
273 memcpy(drive_info.dummy+16, (char *)tab2, 16);
274 }
277 unsigned long pci_mem_start = 0x10000000;
279 void __init start_of_day(void)
280 {
281 extern void trap_init(void);
282 extern void init_IRQ(void);
283 extern void time_init(void);
284 extern void softirq_init(void);
285 extern void timer_bh(void);
286 extern void tqueue_bh(void);
287 extern void immediate_bh(void);
288 extern void init_timervecs(void);
289 extern void disable_pit(void);
290 extern void ac_timer_init(void);
291 extern int setup_network_devices(void);
292 extern void net_init(void);
293 extern void initialize_block_io(void);
294 extern void initialize_keytable();
295 extern void initialize_serial(void);
296 extern void initialize_keyboard(void);
297 extern int opt_nosmp;
298 unsigned long low_mem_size;
300 /*
301 * We do this early, but tables are in the lowest 1MB (usually
302 * 0xfe000-0xfffff). Therefore they're unlikely to ever get clobbered.
303 */
304 get_bios_driveinfo();
306 /* Tell the PCI layer not to allocate too close to the RAM area.. */
307 low_mem_size = ((max_page << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
308 if ( low_mem_size > pci_mem_start ) pci_mem_start = low_mem_size;
310 identify_cpu(&boot_cpu_data); /* get CPU type info */
311 if ( cpu_has_fxsr ) set_in_cr4(X86_CR4_OSFXSR);
312 if ( cpu_has_xmm ) set_in_cr4(X86_CR4_OSXMMEXCPT);
313 #ifdef CONFIG_SMP
314 find_smp_config(); /* find ACPI tables */
315 smp_alloc_memory(); /* trampoline which other CPUs jump at */
316 #endif
317 paging_init(); /* not much here now, but sets up fixmap */
318 #ifdef CONFIG_SMP
319 if ( smp_found_config ) get_smp_config();
320 #endif
321 domain_init();
322 scheduler_init();
323 trap_init();
324 init_IRQ(); /* installs simple interrupt wrappers. Starts HZ clock. */
325 time_init(); /* installs software handler for HZ clock. */
326 softirq_init();
327 init_timervecs();
328 init_bh(TIMER_BH, timer_bh);
329 init_bh(TQUEUE_BH, tqueue_bh);
330 init_bh(IMMEDIATE_BH, immediate_bh);
331 init_apic_mappings(); /* make APICs addressable in our pagetables. */
333 #ifndef CONFIG_SMP
334 APIC_init_uniprocessor();
335 #else
336 if( opt_nosmp )
337 APIC_init_uniprocessor();
338 else
339 smp_boot_cpus();
340 /*
341 * Does loads of stuff, including kicking the local
342 * APIC, and the IO APIC after other CPUs are booted.
343 * Each IRQ is preferably handled by IO-APIC, but
344 * fall thru to 8259A if we have to (but slower).
345 */
346 #endif
347 initialize_keytable(); /* call back handling for key codes */
349 disable_pit(); /* not needed anymore */
350 ac_timer_init(); /* init accurate timers */
351 init_xeno_time(); /* initialise the time */
352 schedulers_start(); /* start scheduler for each CPU */
354 sti();
356 check_nmi_watchdog();
358 zap_low_mappings();
359 kmem_cache_init();
360 kmem_cache_sizes_init(max_page);
361 #ifdef CONFIG_PCI
362 pci_init();
363 #endif
364 do_initcalls();
367 initialize_serial(); /* setup serial 'driver' (for debugging) */
368 initialize_keyboard(); /* setup keyboard (also for debugging) */
370 if ( !setup_network_devices() )
371 panic("Must have a network device!\n");
372 net_init(); /* initializes virtual network system. */
373 initialize_block_io(); /* setup block devices */
375 #ifdef CONFIG_SMP
376 wait_init_idle = cpu_online_map;
377 clear_bit(smp_processor_id(), &wait_init_idle);
378 smp_threads_ready = 1;
379 smp_commence(); /* Tell other CPUs that state of the world is stable. */
380 while (wait_init_idle)
381 {
382 cpu_relax();
383 barrier();
384 }
385 #endif
386 }