/root/src/xen/xen/common/keyhandler.c
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * keyhandler.c |
3 | | */ |
4 | | |
5 | | #include <asm/regs.h> |
6 | | #include <xen/keyhandler.h> |
7 | | #include <xen/shutdown.h> |
8 | | #include <xen/event.h> |
9 | | #include <xen/console.h> |
10 | | #include <xen/serial.h> |
11 | | #include <xen/sched.h> |
12 | | #include <xen/tasklet.h> |
13 | | #include <xen/domain.h> |
14 | | #include <xen/rangeset.h> |
15 | | #include <xen/compat.h> |
16 | | #include <xen/ctype.h> |
17 | | #include <xen/perfc.h> |
18 | | #include <xen/mm.h> |
19 | | #include <xen/watchdog.h> |
20 | | #include <xen/init.h> |
21 | | #include <asm/debugger.h> |
22 | | #include <asm/div64.h> |
23 | | |
24 | | static unsigned char keypress_key; |
25 | | static bool_t alt_key_handling; |
26 | | |
27 | | static keyhandler_fn_t show_handlers, dump_hwdom_registers, |
28 | | dump_domains, read_clocks; |
29 | | static irq_keyhandler_fn_t do_toggle_alt_key, dump_registers, |
30 | | reboot_machine, run_all_keyhandlers, do_debug_key; |
31 | | |
32 | | char keyhandler_scratch[1024]; |
33 | | |
34 | | static struct keyhandler { |
35 | | union { |
36 | | keyhandler_fn_t *fn; |
37 | | irq_keyhandler_fn_t *irq_fn; |
38 | | }; |
39 | | |
40 | | const char *desc; /* Description for help message. */ |
41 | | bool_t irq_callback, /* Call in irq context? if not, tasklet context. */ |
42 | | diagnostic; /* Include in 'dump all' handler. */ |
43 | | } key_table[128] __read_mostly = |
44 | | { |
45 | | #define KEYHANDLER(k, f, desc, diag) \ |
46 | | [k] = { { (f) }, desc, 0, diag } |
47 | | |
48 | | #define IRQ_KEYHANDLER(k, f, desc, diag) \ |
49 | | [k] = { { (keyhandler_fn_t *)(f) }, desc, 1, diag } |
50 | | |
51 | | IRQ_KEYHANDLER('A', do_toggle_alt_key, "toggle alternative key handling", 0), |
52 | | IRQ_KEYHANDLER('d', dump_registers, "dump registers", 1), |
53 | | KEYHANDLER('h', show_handlers, "show this message", 0), |
54 | | KEYHANDLER('q', dump_domains, "dump domain (and guest debug) info", 1), |
55 | | KEYHANDLER('r', dump_runq, "dump run queues", 1), |
56 | | IRQ_KEYHANDLER('R', reboot_machine, "reboot machine", 0), |
57 | | KEYHANDLER('t', read_clocks, "display multi-cpu clock info", 1), |
58 | | KEYHANDLER('0', dump_hwdom_registers, "dump Dom0 registers", 1), |
59 | | IRQ_KEYHANDLER('%', do_debug_key, "trap to xendbg", 0), |
60 | | IRQ_KEYHANDLER('*', run_all_keyhandlers, "print all diagnostics", 0), |
61 | | |
62 | | #ifdef CONFIG_PERF_COUNTERS |
63 | | KEYHANDLER('p', perfc_printall, "print performance counters", 1), |
64 | | KEYHANDLER('P', perfc_reset, "reset performance counters", 0), |
65 | | #endif |
66 | | |
67 | | #ifdef CONFIG_LOCK_PROFILE |
68 | | KEYHANDLER('l', spinlock_profile_printall, "print lock profile info", 1), |
69 | | KEYHANDLER('L', spinlock_profile_reset, "reset lock profile info", 0), |
70 | | #endif |
71 | | |
72 | | #undef IRQ_KEYHANDLER |
73 | | #undef KEYHANDLER |
74 | | }; |
75 | | |
76 | | static void keypress_action(unsigned long unused) |
77 | 0 | { |
78 | 0 | handle_keypress(keypress_key, NULL); |
79 | 0 | } |
80 | | |
81 | | static DECLARE_TASKLET(keypress_tasklet, keypress_action, 0); |
82 | | |
83 | | void handle_keypress(unsigned char key, struct cpu_user_regs *regs) |
84 | 0 | { |
85 | 0 | struct keyhandler *h; |
86 | 0 |
|
87 | 0 | if ( key >= ARRAY_SIZE(key_table) || !(h = &key_table[key])->fn ) |
88 | 0 | return; |
89 | 0 |
|
90 | 0 | if ( !in_irq() || h->irq_callback ) |
91 | 0 | { |
92 | 0 | console_start_log_everything(); |
93 | 0 | h->irq_callback ? h->irq_fn(key, regs) : h->fn(key); |
94 | 0 | console_end_log_everything(); |
95 | 0 | } |
96 | 0 | else |
97 | 0 | { |
98 | 0 | keypress_key = key; |
99 | 0 | tasklet_schedule(&keypress_tasklet); |
100 | 0 | } |
101 | 0 | } |
102 | | |
103 | | void register_keyhandler(unsigned char key, keyhandler_fn_t fn, |
104 | | const char *desc, bool_t diagnostic) |
105 | 22 | { |
106 | 22 | BUG_ON(key >= ARRAY_SIZE(key_table)); /* Key in range? */ |
107 | 22 | ASSERT(!key_table[key].fn); /* Clobbering something else? */ |
108 | 22 | |
109 | 22 | key_table[key].fn = fn; |
110 | 22 | key_table[key].desc = desc; |
111 | 22 | key_table[key].irq_callback = 0; |
112 | 22 | key_table[key].diagnostic = diagnostic; |
113 | 22 | } |
114 | | |
115 | | void register_irq_keyhandler(unsigned char key, irq_keyhandler_fn_t fn, |
116 | | const char *desc, bool_t diagnostic) |
117 | 3 | { |
118 | 3 | BUG_ON(key >= ARRAY_SIZE(key_table)); /* Key in range? */ |
119 | 3 | ASSERT(!key_table[key].irq_fn); /* Clobbering something else? */ |
120 | 3 | |
121 | 3 | key_table[key].irq_fn = fn; |
122 | 3 | key_table[key].desc = desc; |
123 | 3 | key_table[key].irq_callback = 1; |
124 | 3 | key_table[key].diagnostic = diagnostic; |
125 | 3 | } |
126 | | |
127 | | static void show_handlers(unsigned char key) |
128 | 0 | { |
129 | 0 | unsigned int i; |
130 | 0 |
|
131 | 0 | printk("'%c' pressed -> showing installed handlers\n", key); |
132 | 0 | for ( i = 0; i < ARRAY_SIZE(key_table); i++ ) |
133 | 0 | if ( key_table[i].fn ) |
134 | 0 | printk(" key '%c' (ascii '%02x') => %s\n", |
135 | 0 | isprint(i) ? i : ' ', i, key_table[i].desc); |
136 | 0 | } |
137 | | |
138 | | static cpumask_t dump_execstate_mask; |
139 | | |
140 | | void dump_execstate(struct cpu_user_regs *regs) |
141 | 0 | { |
142 | 0 | unsigned int cpu = smp_processor_id(); |
143 | 0 |
|
144 | 0 | if ( !guest_mode(regs) ) |
145 | 0 | { |
146 | 0 | printk("*** Dumping CPU%u host state: ***\n", cpu); |
147 | 0 | show_execution_state(regs); |
148 | 0 | } |
149 | 0 |
|
150 | 0 | if ( !is_idle_vcpu(current) ) |
151 | 0 | { |
152 | 0 | printk("*** Dumping CPU%u guest state (%pv): ***\n", |
153 | 0 | smp_processor_id(), current); |
154 | 0 | show_execution_state(guest_cpu_user_regs()); |
155 | 0 | printk("\n"); |
156 | 0 | } |
157 | 0 |
|
158 | 0 | cpumask_clear_cpu(cpu, &dump_execstate_mask); |
159 | 0 | if ( !alt_key_handling ) |
160 | 0 | return; |
161 | 0 |
|
162 | 0 | cpu = cpumask_cycle(cpu, &dump_execstate_mask); |
163 | 0 | if ( cpu < nr_cpu_ids ) |
164 | 0 | { |
165 | 0 | smp_send_state_dump(cpu); |
166 | 0 | return; |
167 | 0 | } |
168 | 0 |
|
169 | 0 | console_end_sync(); |
170 | 0 | watchdog_enable(); |
171 | 0 | } |
172 | | |
173 | | static void dump_registers(unsigned char key, struct cpu_user_regs *regs) |
174 | 0 | { |
175 | 0 | unsigned int cpu; |
176 | 0 |
|
177 | 0 | /* We want to get everything out that we possibly can. */ |
178 | 0 | watchdog_disable(); |
179 | 0 | console_start_sync(); |
180 | 0 |
|
181 | 0 | printk("'%c' pressed -> dumping registers\n\n", key); |
182 | 0 |
|
183 | 0 | cpumask_copy(&dump_execstate_mask, &cpu_online_map); |
184 | 0 |
|
185 | 0 | /* Get local execution state out immediately, in case we get stuck. */ |
186 | 0 | dump_execstate(regs); |
187 | 0 |
|
188 | 0 | /* Alt. handling: remaining CPUs are dumped asynchronously one-by-one. */ |
189 | 0 | if ( alt_key_handling ) |
190 | 0 | return; |
191 | 0 |
|
192 | 0 | /* Normal handling: synchronously dump the remaining CPUs' states. */ |
193 | 0 | for_each_cpu ( cpu, &dump_execstate_mask ) |
194 | 0 | { |
195 | 0 | smp_send_state_dump(cpu); |
196 | 0 | while ( cpumask_test_cpu(cpu, &dump_execstate_mask) ) |
197 | 0 | cpu_relax(); |
198 | 0 | } |
199 | 0 |
|
200 | 0 | console_end_sync(); |
201 | 0 | watchdog_enable(); |
202 | 0 | } |
203 | | |
204 | | static DECLARE_TASKLET(dump_hwdom_tasklet, NULL, 0); |
205 | | |
206 | | static void dump_hwdom_action(unsigned long arg) |
207 | 0 | { |
208 | 0 | struct vcpu *v = (void *)arg; |
209 | 0 |
|
210 | 0 | for ( ; ; ) |
211 | 0 | { |
212 | 0 | vcpu_show_execution_state(v); |
213 | 0 | if ( (v = v->next_in_list) == NULL ) |
214 | 0 | break; |
215 | 0 | if ( softirq_pending(smp_processor_id()) ) |
216 | 0 | { |
217 | 0 | dump_hwdom_tasklet.data = (unsigned long)v; |
218 | 0 | tasklet_schedule_on_cpu(&dump_hwdom_tasklet, v->processor); |
219 | 0 | break; |
220 | 0 | } |
221 | 0 | } |
222 | 0 | } |
223 | | |
224 | | static void dump_hwdom_registers(unsigned char key) |
225 | 0 | { |
226 | 0 | struct vcpu *v; |
227 | 0 |
|
228 | 0 | if ( hardware_domain == NULL ) |
229 | 0 | return; |
230 | 0 |
|
231 | 0 | printk("'%c' pressed -> dumping Dom0's registers\n", key); |
232 | 0 |
|
233 | 0 | for_each_vcpu ( hardware_domain, v ) |
234 | 0 | { |
235 | 0 | if ( alt_key_handling && softirq_pending(smp_processor_id()) ) |
236 | 0 | { |
237 | 0 | tasklet_kill(&dump_hwdom_tasklet); |
238 | 0 | tasklet_init(&dump_hwdom_tasklet, dump_hwdom_action, |
239 | 0 | (unsigned long)v); |
240 | 0 | tasklet_schedule_on_cpu(&dump_hwdom_tasklet, v->processor); |
241 | 0 | return; |
242 | 0 | } |
243 | 0 | vcpu_show_execution_state(v); |
244 | 0 | } |
245 | 0 | } |
246 | | |
247 | | static void reboot_machine(unsigned char key, struct cpu_user_regs *regs) |
248 | 0 | { |
249 | 0 | printk("'%c' pressed -> rebooting machine\n", key); |
250 | 0 | machine_restart(0); |
251 | 0 | } |
252 | | |
253 | | static void cpuset_print(char *set, int size, const cpumask_t *mask) |
254 | 0 | { |
255 | 0 | *set++ = '{'; |
256 | 0 | set += cpulist_scnprintf(set, size-2, mask); |
257 | 0 | *set++ = '}'; |
258 | 0 | *set++ = '\0'; |
259 | 0 | } |
260 | | |
261 | | static void nodeset_print(char *set, int size, const nodemask_t *mask) |
262 | 0 | { |
263 | 0 | *set++ = '['; |
264 | 0 | set += nodelist_scnprintf(set, size-2, mask); |
265 | 0 | *set++ = ']'; |
266 | 0 | *set++ = '\0'; |
267 | 0 | } |
268 | | |
269 | | static void periodic_timer_print(char *str, int size, uint64_t period) |
270 | 0 | { |
271 | 0 | if ( period == 0 ) |
272 | 0 | { |
273 | 0 | strlcpy(str, "No periodic timer", size); |
274 | 0 | return; |
275 | 0 | } |
276 | 0 |
|
277 | 0 | snprintf(str, size, |
278 | 0 | "%u Hz periodic timer (period %u ms)", |
279 | 0 | 1000000000/(int)period, (int)period/1000000); |
280 | 0 | } |
281 | | |
282 | | static void dump_domains(unsigned char key) |
283 | 0 | { |
284 | 0 | struct domain *d; |
285 | 0 | struct vcpu *v; |
286 | 0 | s_time_t now = NOW(); |
287 | 0 | #define tmpstr keyhandler_scratch |
288 | 0 |
|
289 | 0 | printk("'%c' pressed -> dumping domain info (now=0x%X:%08X)\n", key, |
290 | 0 | (u32)(now>>32), (u32)now); |
291 | 0 |
|
292 | 0 | rcu_read_lock(&domlist_read_lock); |
293 | 0 |
|
294 | 0 | for_each_domain ( d ) |
295 | 0 | { |
296 | 0 | unsigned int i; |
297 | 0 |
|
298 | 0 | process_pending_softirqs(); |
299 | 0 |
|
300 | 0 | printk("General information for domain %u:\n", d->domain_id); |
301 | 0 | cpuset_print(tmpstr, sizeof(tmpstr), d->domain_dirty_cpumask); |
302 | 0 | printk(" refcnt=%d dying=%d pause_count=%d\n", |
303 | 0 | atomic_read(&d->refcnt), d->is_dying, |
304 | 0 | atomic_read(&d->pause_count)); |
305 | 0 | printk(" nr_pages=%d xenheap_pages=%d shared_pages=%u paged_pages=%u " |
306 | 0 | "dirty_cpus=%s max_pages=%u\n", d->tot_pages, d->xenheap_pages, |
307 | 0 | atomic_read(&d->shr_pages), atomic_read(&d->paged_pages), |
308 | 0 | tmpstr, d->max_pages); |
309 | 0 | printk(" handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-" |
310 | 0 | "%02x%02x-%02x%02x%02x%02x%02x%02x vm_assist=%08lx\n", |
311 | 0 | d->handle[ 0], d->handle[ 1], d->handle[ 2], d->handle[ 3], |
312 | 0 | d->handle[ 4], d->handle[ 5], d->handle[ 6], d->handle[ 7], |
313 | 0 | d->handle[ 8], d->handle[ 9], d->handle[10], d->handle[11], |
314 | 0 | d->handle[12], d->handle[13], d->handle[14], d->handle[15], |
315 | 0 | d->vm_assist); |
316 | 0 | for ( i = 0 ; i < NR_DOMAIN_WATCHDOG_TIMERS; i++ ) |
317 | 0 | if ( test_bit(i, &d->watchdog_inuse_map) ) |
318 | 0 | printk(" watchdog %d expires in %d seconds\n", |
319 | 0 | i, (u32)((d->watchdog_timer[i].expires - NOW()) >> 30)); |
320 | 0 |
|
321 | 0 | arch_dump_domain_info(d); |
322 | 0 |
|
323 | 0 | rangeset_domain_printk(d); |
324 | 0 |
|
325 | 0 | dump_pageframe_info(d); |
326 | 0 |
|
327 | 0 | nodeset_print(tmpstr, sizeof(tmpstr), &d->node_affinity); |
328 | 0 | printk("NODE affinity for domain %d: %s\n", d->domain_id, tmpstr); |
329 | 0 |
|
330 | 0 | printk("VCPU information and callbacks for domain %u:\n", |
331 | 0 | d->domain_id); |
332 | 0 | for_each_vcpu ( d, v ) |
333 | 0 | { |
334 | 0 | if ( !(v->vcpu_id & 0x3f) ) |
335 | 0 | process_pending_softirqs(); |
336 | 0 |
|
337 | 0 | printk(" VCPU%d: CPU%d [has=%c] poll=%d " |
338 | 0 | "upcall_pend=%02x upcall_mask=%02x ", |
339 | 0 | v->vcpu_id, v->processor, |
340 | 0 | v->is_running ? 'T':'F', v->poll_evtchn, |
341 | 0 | vcpu_info(v, evtchn_upcall_pending), |
342 | 0 | !vcpu_event_delivery_is_enabled(v)); |
343 | 0 | cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask); |
344 | 0 | printk("dirty_cpus=%s\n", tmpstr); |
345 | 0 | cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_hard_affinity); |
346 | 0 | printk(" cpu_hard_affinity=%s ", tmpstr); |
347 | 0 | cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_soft_affinity); |
348 | 0 | printk("cpu_soft_affinity=%s\n", tmpstr); |
349 | 0 | printk(" pause_count=%d pause_flags=%lx\n", |
350 | 0 | atomic_read(&v->pause_count), v->pause_flags); |
351 | 0 | arch_dump_vcpu_info(v); |
352 | 0 | periodic_timer_print(tmpstr, sizeof(tmpstr), v->periodic_period); |
353 | 0 | printk(" %s\n", tmpstr); |
354 | 0 | } |
355 | 0 | } |
356 | 0 |
|
357 | 0 | for_each_domain ( d ) |
358 | 0 | { |
359 | 0 | for_each_vcpu ( d, v ) |
360 | 0 | { |
361 | 0 | if ( !(v->vcpu_id & 0x3f) ) |
362 | 0 | process_pending_softirqs(); |
363 | 0 |
|
364 | 0 | printk("Notifying guest %d:%d (virq %d, port %d)\n", |
365 | 0 | d->domain_id, v->vcpu_id, |
366 | 0 | VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG]); |
367 | 0 | send_guest_vcpu_virq(v, VIRQ_DEBUG); |
368 | 0 | } |
369 | 0 | } |
370 | 0 |
|
371 | 0 | arch_dump_shared_mem_info(); |
372 | 0 |
|
373 | 0 | rcu_read_unlock(&domlist_read_lock); |
374 | 0 | #undef tmpstr |
375 | 0 | } |
376 | | |
377 | | static cpumask_t read_clocks_cpumask; |
378 | | static DEFINE_PER_CPU(s_time_t, read_clocks_time); |
379 | | static DEFINE_PER_CPU(u64, read_cycles_time); |
380 | | |
381 | | static void read_clocks_slave(void *unused) |
382 | 0 | { |
383 | 0 | unsigned int cpu = smp_processor_id(); |
384 | 0 | local_irq_disable(); |
385 | 0 | while ( !cpumask_test_cpu(cpu, &read_clocks_cpumask) ) |
386 | 0 | cpu_relax(); |
387 | 0 | per_cpu(read_clocks_time, cpu) = NOW(); |
388 | 0 | per_cpu(read_cycles_time, cpu) = get_cycles(); |
389 | 0 | cpumask_clear_cpu(cpu, &read_clocks_cpumask); |
390 | 0 | local_irq_enable(); |
391 | 0 | } |
392 | | |
393 | | static void read_clocks(unsigned char key) |
394 | 0 | { |
395 | 0 | unsigned int cpu = smp_processor_id(), min_stime_cpu, max_stime_cpu; |
396 | 0 | unsigned int min_cycles_cpu, max_cycles_cpu; |
397 | 0 | u64 min_stime, max_stime, dif_stime; |
398 | 0 | u64 min_cycles, max_cycles, dif_cycles; |
399 | 0 | static u64 sumdif_stime = 0, maxdif_stime = 0; |
400 | 0 | static u64 sumdif_cycles = 0, maxdif_cycles = 0; |
401 | 0 | static u32 count = 0; |
402 | 0 | static DEFINE_SPINLOCK(lock); |
403 | 0 |
|
404 | 0 | spin_lock(&lock); |
405 | 0 |
|
406 | 0 | smp_call_function(read_clocks_slave, NULL, 0); |
407 | 0 |
|
408 | 0 | local_irq_disable(); |
409 | 0 | cpumask_andnot(&read_clocks_cpumask, &cpu_online_map, cpumask_of(cpu)); |
410 | 0 | per_cpu(read_clocks_time, cpu) = NOW(); |
411 | 0 | per_cpu(read_cycles_time, cpu) = get_cycles(); |
412 | 0 | local_irq_enable(); |
413 | 0 |
|
414 | 0 | while ( !cpumask_empty(&read_clocks_cpumask) ) |
415 | 0 | cpu_relax(); |
416 | 0 |
|
417 | 0 | min_stime_cpu = max_stime_cpu = min_cycles_cpu = max_cycles_cpu = cpu; |
418 | 0 | for_each_online_cpu ( cpu ) |
419 | 0 | { |
420 | 0 | if ( per_cpu(read_clocks_time, cpu) < |
421 | 0 | per_cpu(read_clocks_time, min_stime_cpu) ) |
422 | 0 | min_stime_cpu = cpu; |
423 | 0 | if ( per_cpu(read_clocks_time, cpu) > |
424 | 0 | per_cpu(read_clocks_time, max_stime_cpu) ) |
425 | 0 | max_stime_cpu = cpu; |
426 | 0 | if ( per_cpu(read_cycles_time, cpu) < |
427 | 0 | per_cpu(read_cycles_time, min_cycles_cpu) ) |
428 | 0 | min_cycles_cpu = cpu; |
429 | 0 | if ( per_cpu(read_cycles_time, cpu) > |
430 | 0 | per_cpu(read_cycles_time, max_cycles_cpu) ) |
431 | 0 | max_cycles_cpu = cpu; |
432 | 0 | } |
433 | 0 |
|
434 | 0 | min_stime = per_cpu(read_clocks_time, min_stime_cpu); |
435 | 0 | max_stime = per_cpu(read_clocks_time, max_stime_cpu); |
436 | 0 | min_cycles = per_cpu(read_cycles_time, min_cycles_cpu); |
437 | 0 | max_cycles = per_cpu(read_cycles_time, max_cycles_cpu); |
438 | 0 |
|
439 | 0 | spin_unlock(&lock); |
440 | 0 |
|
441 | 0 | dif_stime = max_stime - min_stime; |
442 | 0 | if ( dif_stime > maxdif_stime ) |
443 | 0 | maxdif_stime = dif_stime; |
444 | 0 | sumdif_stime += dif_stime; |
445 | 0 | dif_cycles = max_cycles - min_cycles; |
446 | 0 | if ( dif_cycles > maxdif_cycles ) |
447 | 0 | maxdif_cycles = dif_cycles; |
448 | 0 | sumdif_cycles += dif_cycles; |
449 | 0 | count++; |
450 | 0 | printk("Synced stime skew: max=%"PRIu64"ns avg=%"PRIu64"ns " |
451 | 0 | "samples=%"PRIu32" current=%"PRIu64"ns\n", |
452 | 0 | maxdif_stime, sumdif_stime/count, count, dif_stime); |
453 | 0 | printk("Synced cycles skew: max=%"PRIu64" avg=%"PRIu64" " |
454 | 0 | "samples=%"PRIu32" current=%"PRIu64"\n", |
455 | 0 | maxdif_cycles, sumdif_cycles/count, count, dif_cycles); |
456 | 0 | } |
457 | | |
458 | | static void run_all_nonirq_keyhandlers(unsigned long unused) |
459 | 0 | { |
460 | 0 | /* Fire all the non-IRQ-context diagnostic keyhandlers */ |
461 | 0 | struct keyhandler *h; |
462 | 0 | int k; |
463 | 0 |
|
464 | 0 | console_start_log_everything(); |
465 | 0 |
|
466 | 0 | for ( k = 0; k < ARRAY_SIZE(key_table); k++ ) |
467 | 0 | { |
468 | 0 | process_pending_softirqs(); |
469 | 0 | h = &key_table[k]; |
470 | 0 | if ( !h->fn || !h->diagnostic || h->irq_callback ) |
471 | 0 | continue; |
472 | 0 | printk("[%c: %s]\n", k, h->desc); |
473 | 0 | h->fn(k); |
474 | 0 | } |
475 | 0 |
|
476 | 0 | console_end_log_everything(); |
477 | 0 | } |
478 | | |
479 | | static DECLARE_TASKLET(run_all_keyhandlers_tasklet, |
480 | | run_all_nonirq_keyhandlers, 0); |
481 | | |
482 | | static void run_all_keyhandlers(unsigned char key, struct cpu_user_regs *regs) |
483 | 0 | { |
484 | 0 | struct keyhandler *h; |
485 | 0 | unsigned int k; |
486 | 0 |
|
487 | 0 | watchdog_disable(); |
488 | 0 |
|
489 | 0 | printk("'%c' pressed -> firing all diagnostic keyhandlers\n", key); |
490 | 0 |
|
491 | 0 | /* Fire all the IRQ-context diangostic keyhandlers now */ |
492 | 0 | for ( k = 0; k < ARRAY_SIZE(key_table); k++ ) |
493 | 0 | { |
494 | 0 | h = &key_table[k]; |
495 | 0 | if ( !h->irq_fn || !h->diagnostic || !h->irq_callback ) |
496 | 0 | continue; |
497 | 0 | printk("[%c: %s]\n", k, h->desc); |
498 | 0 | h->irq_fn(k, regs); |
499 | 0 | } |
500 | 0 |
|
501 | 0 | watchdog_enable(); |
502 | 0 |
|
503 | 0 | /* Trigger the others from a tasklet in non-IRQ context */ |
504 | 0 | tasklet_schedule(&run_all_keyhandlers_tasklet); |
505 | 0 | } |
506 | | |
507 | | static void do_debug_key(unsigned char key, struct cpu_user_regs *regs) |
508 | 0 | { |
509 | 0 | printk("'%c' pressed -> trapping into debugger\n", key); |
510 | 0 | (void)debugger_trap_fatal(0xf001, regs); |
511 | 0 | nop(); /* Prevent the compiler doing tail call |
512 | 0 | optimisation, as that confuses xendbg a |
513 | 0 | bit. */ |
514 | 0 | } |
515 | | |
516 | | static void do_toggle_alt_key(unsigned char key, struct cpu_user_regs *regs) |
517 | 0 | { |
518 | 0 | alt_key_handling = !alt_key_handling; |
519 | 0 | printk("'%c' pressed -> using %s key handling\n", key, |
520 | 0 | alt_key_handling ? "alternative" : "normal"); |
521 | 0 | } |
522 | | |
523 | | void __init initialize_keytable(void) |
524 | 1 | { |
525 | 1 | if ( num_present_cpus() > 16 ) |
526 | 0 | { |
527 | 0 | alt_key_handling = 1; |
528 | 0 | printk(XENLOG_INFO "Defaulting to alternative key handling; " |
529 | 0 | "send 'A' to switch to normal mode.\n"); |
530 | 0 | } |
531 | 1 | } |
532 | | |
533 | | /* |
534 | | * Local variables: |
535 | | * mode: C |
536 | | * c-file-style: "BSD" |
537 | | * c-basic-offset: 4 |
538 | | * tab-width: 4 |
539 | | * indent-tabs-mode: nil |
540 | | * End: |
541 | | */ |