xen-vtx-unstable

view xen/arch/x86/nmi.c @ 6759:b5d91089e42c

Newer binutils is a bit stricter and errors out when you try
to use movl on a 16 bit word on x86_64. Using just a "mov"
compiles fine and should result in the same code.

{standard input}: Assembler messages:
{standard input}:2138: Error: suffix or operands invalid for `mov'
{standard input}:2140: Error: suffix or operands invalid for `mov'
{standard input}:2142: Error: suffix or operands invalid for `mov'
{standard input}:2144: Error: suffix or operands invalid for `mov'

Signed-off-by: Rik van Riel <riel@redhat.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Sep 13 10:21:22 2005 +0000 (2005-09-13)
parents f66a730a2c3d
children
line source
1 /*
2 * linux/arch/i386/nmi.c
3 *
4 * NMI watchdog support on APIC systems
5 *
6 * Started by Ingo Molnar <mingo@redhat.com>
7 *
8 * Fixes:
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
11 * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
12 * Keir Fraser : Pentium 4 Hyperthreading support
13 */
15 #include <xen/config.h>
16 #include <xen/init.h>
17 #include <xen/lib.h>
18 #include <xen/mm.h>
19 #include <xen/irq.h>
20 #include <xen/delay.h>
21 #include <xen/time.h>
22 #include <xen/sched.h>
23 #include <xen/console.h>
24 #include <xen/smp.h>
25 #include <asm/current.h>
26 #include <asm/mc146818rtc.h>
27 #include <asm/msr.h>
28 #include <asm/mpspec.h>
29 #include <asm/debugger.h>
31 unsigned int nmi_watchdog = NMI_NONE;
32 static unsigned int nmi_hz = HZ;
33 static unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */
34 static unsigned int nmi_p4_cccr_val;
35 static struct ac_timer nmi_timer[NR_CPUS];
36 static unsigned int nmi_timer_ticks[NR_CPUS];
38 #define K7_EVNTSEL_ENABLE (1 << 22)
39 #define K7_EVNTSEL_INT (1 << 20)
40 #define K7_EVNTSEL_OS (1 << 17)
41 #define K7_EVNTSEL_USR (1 << 16)
42 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
43 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
45 #define P6_EVNTSEL0_ENABLE (1 << 22)
46 #define P6_EVNTSEL_INT (1 << 20)
47 #define P6_EVNTSEL_OS (1 << 17)
48 #define P6_EVNTSEL_USR (1 << 16)
49 #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
50 #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
52 #define MSR_P4_PERFCTR0 0x300
53 #define MSR_P4_CCCR0 0x360
54 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
55 #define P4_CCCR_OVF_PMI0 (1<<26)
56 #define P4_CCCR_OVF_PMI1 (1<<27)
57 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
58 #define P4_CCCR_COMPLEMENT (1<<19)
59 #define P4_CCCR_COMPARE (1<<18)
60 #define P4_CCCR_REQUIRED (3<<16)
61 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
62 #define P4_CCCR_ENABLE (1<<12)
63 /*
64 * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
65 * CRU_ESCR0 (with any non-null event selector) through a complemented
66 * max threshold. [IA32-Vol3, Section 14.9.9]
67 */
68 #define MSR_P4_IQ_COUNTER0 0x30C
69 #define MSR_P4_IQ_CCCR0 0x36C
70 #define MSR_P4_CRU_ESCR0 0x3B8 /* ESCR no. 4 */
71 #define P4_NMI_CRU_ESCR0 P4_ESCR_EVENT_SELECT(0x3F)
72 #define P4_NMI_IQ_CCCR0 \
73 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
74 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
76 int __init check_nmi_watchdog (void)
77 {
78 unsigned int prev_nmi_count[NR_CPUS];
79 int cpu;
81 if ( !nmi_watchdog )
82 return 0;
84 printk("Testing NMI watchdog --- ");
86 for ( cpu = 0; cpu < NR_CPUS; cpu++ )
87 prev_nmi_count[cpu] = nmi_count(cpu);
88 local_irq_enable();
89 mdelay((10*1000)/nmi_hz); /* wait 10 ticks */
91 for ( cpu = 0; cpu < NR_CPUS; cpu++ )
92 {
93 if ( !cpu_isset(cpu, cpu_callin_map) &&
94 !cpu_isset(cpu, cpu_online_map) )
95 continue;
96 if ( nmi_count(cpu) - prev_nmi_count[cpu] <= 5 )
97 printk("CPU#%d stuck. ", cpu);
98 else
99 printk("CPU#%d okay. ", cpu);
100 }
102 printk("\n");
104 /* now that we know it works we can reduce NMI frequency to
105 something more reasonable; makes a difference in some configs */
106 if ( nmi_watchdog == NMI_LOCAL_APIC )
107 nmi_hz = 1;
109 return 0;
110 }
112 static void nmi_timer_fn(void *unused)
113 {
114 int cpu = smp_processor_id();
115 nmi_timer_ticks[cpu]++;
116 set_ac_timer(&nmi_timer[cpu], NOW() + MILLISECS(1000));
117 }
119 static inline void nmi_pm_init(void) { }
120 #define __pminit __init
122 /*
123 * Activate the NMI watchdog via the local APIC.
124 * Original code written by Keith Owens.
125 */
127 static void __pminit clear_msr_range(unsigned int base, unsigned int n)
128 {
129 unsigned int i;
130 for ( i = 0; i < n; i++ )
131 wrmsr(base+i, 0, 0);
132 }
134 static void __pminit setup_k7_watchdog(void)
135 {
136 unsigned int evntsel;
138 nmi_perfctr_msr = MSR_K7_PERFCTR0;
140 clear_msr_range(MSR_K7_EVNTSEL0, 4);
141 clear_msr_range(MSR_K7_PERFCTR0, 4);
143 evntsel = K7_EVNTSEL_INT
144 | K7_EVNTSEL_OS
145 | K7_EVNTSEL_USR
146 | K7_NMI_EVENT;
148 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
149 Dprintk("setting K7_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000));
150 wrmsr(MSR_K7_PERFCTR0, -(cpu_khz/nmi_hz*1000), -1);
151 apic_write(APIC_LVTPC, APIC_DM_NMI);
152 evntsel |= K7_EVNTSEL_ENABLE;
153 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
154 }
156 static void __pminit setup_p6_watchdog(void)
157 {
158 unsigned int evntsel;
160 nmi_perfctr_msr = MSR_P6_PERFCTR0;
162 clear_msr_range(MSR_P6_EVNTSEL0, 2);
163 clear_msr_range(MSR_P6_PERFCTR0, 2);
165 evntsel = P6_EVNTSEL_INT
166 | P6_EVNTSEL_OS
167 | P6_EVNTSEL_USR
168 | P6_NMI_EVENT;
170 wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
171 Dprintk("setting P6_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000));
172 wrmsr(MSR_P6_PERFCTR0, -(cpu_khz/nmi_hz*1000), 0);
173 apic_write(APIC_LVTPC, APIC_DM_NMI);
174 evntsel |= P6_EVNTSEL0_ENABLE;
175 wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
176 }
178 static int __pminit setup_p4_watchdog(void)
179 {
180 unsigned int misc_enable, dummy;
182 rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
183 if (!(misc_enable & MSR_IA32_MISC_ENABLE_PERF_AVAIL))
184 return 0;
186 nmi_perfctr_msr = MSR_P4_IQ_COUNTER0;
187 nmi_p4_cccr_val = P4_NMI_IQ_CCCR0;
188 if ( smp_num_siblings == 2 )
189 nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1;
191 if (!(misc_enable & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL))
192 clear_msr_range(0x3F1, 2);
193 /* MSR 0x3F0 seems to have a default value of 0xFC00, but current
194 docs doesn't fully define it, so leave it alone for now. */
195 clear_msr_range(0x3A0, 31);
196 clear_msr_range(0x3C0, 6);
197 clear_msr_range(0x3C8, 6);
198 clear_msr_range(0x3E0, 2);
199 clear_msr_range(MSR_P4_CCCR0, 18);
200 clear_msr_range(MSR_P4_PERFCTR0, 18);
202 wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
203 wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
204 Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz/nmi_hz*1000));
205 wrmsr(MSR_P4_IQ_COUNTER0, -(cpu_khz/nmi_hz*1000), -1);
206 apic_write(APIC_LVTPC, APIC_DM_NMI);
207 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
209 return 1;
210 }
212 void __pminit setup_apic_nmi_watchdog(void)
213 {
214 int cpu = smp_processor_id();
216 if (!nmi_watchdog)
217 return;
219 switch (boot_cpu_data.x86_vendor) {
220 case X86_VENDOR_AMD:
221 if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15)
222 return;
223 setup_k7_watchdog();
224 break;
225 case X86_VENDOR_INTEL:
226 switch (boot_cpu_data.x86) {
227 case 6:
228 setup_p6_watchdog();
229 break;
230 case 15:
231 if (!setup_p4_watchdog())
232 return;
233 break;
234 default:
235 return;
236 }
237 break;
238 default:
239 return;
240 }
242 init_ac_timer(&nmi_timer[cpu], nmi_timer_fn, NULL, cpu);
244 nmi_pm_init();
245 }
248 static unsigned int
249 last_irq_sums [NR_CPUS],
250 alert_counter [NR_CPUS];
252 static spinlock_t watchdog_lock = SPIN_LOCK_UNLOCKED;
253 static unsigned int watchdog_disable_count = 1;
254 static unsigned int watchdog_on;
256 void watchdog_disable(void)
257 {
258 unsigned long flags;
260 spin_lock_irqsave(&watchdog_lock, flags);
262 if ( watchdog_disable_count++ == 0 )
263 watchdog_on = 0;
265 spin_unlock_irqrestore(&watchdog_lock, flags);
266 }
268 void watchdog_enable(void)
269 {
270 unsigned int cpu;
271 unsigned long flags;
273 spin_lock_irqsave(&watchdog_lock, flags);
275 if ( --watchdog_disable_count == 0 )
276 {
277 watchdog_on = 1;
278 /*
279 * Ensure periodic heartbeats are active. We cannot do this earlier
280 * during setup because the timer infrastructure is not available.
281 */
282 for_each_online_cpu ( cpu )
283 set_ac_timer(&nmi_timer[cpu], NOW());
284 }
286 spin_unlock_irqrestore(&watchdog_lock, flags);
287 }
289 void nmi_watchdog_tick(struct cpu_user_regs * regs)
290 {
291 int sum, cpu = smp_processor_id();
293 sum = nmi_timer_ticks[cpu];
295 if ( (last_irq_sums[cpu] == sum) && watchdog_on )
296 {
297 /*
298 * Ayiee, looks like this CPU is stuck ... wait a few IRQs (5 seconds)
299 * before doing the oops ...
300 */
301 alert_counter[cpu]++;
302 if ( alert_counter[cpu] == 5*nmi_hz )
303 {
304 console_force_unlock();
305 printk("Watchdog timer detects that CPU%d is stuck!\n", cpu);
306 fatal_trap(TRAP_nmi, regs);
307 }
308 }
309 else
310 {
311 last_irq_sums[cpu] = sum;
312 alert_counter[cpu] = 0;
313 }
315 if ( nmi_perfctr_msr )
316 {
317 if ( nmi_perfctr_msr == MSR_P4_IQ_COUNTER0 )
318 {
319 /*
320 * P4 quirks:
321 * - An overflown perfctr will assert its interrupt
322 * until the OVF flag in its CCCR is cleared.
323 * - LVTPC is masked on interrupt and must be
324 * unmasked by the LVTPC handler.
325 */
326 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
327 apic_write(APIC_LVTPC, APIC_DM_NMI);
328 }
329 else if ( nmi_perfctr_msr == MSR_P6_PERFCTR0 )
330 {
331 /*
332 * Only P6 based Pentium M need to re-unmask the apic vector but
333 * it doesn't hurt other P6 variants.
334 */
335 apic_write(APIC_LVTPC, APIC_DM_NMI);
336 }
337 wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1);
338 }
339 }