debuggers.hg

view xen/common/keyhandler.c @ 22848:6341fe0f4e5a

Added tag 4.1.0-rc2 for changeset 9dca60d88c63
author Keir Fraser <keir@xen.org>
date Tue Jan 25 14:06:55 2011 +0000 (2011-01-25)
parents dca1b7cf2e2c
children
line source
1 /******************************************************************************
2 * keyhandler.c
3 */
5 #include <asm/regs.h>
6 #include <xen/keyhandler.h>
7 #include <xen/shutdown.h>
8 #include <xen/event.h>
9 #include <xen/console.h>
10 #include <xen/serial.h>
11 #include <xen/sched.h>
12 #include <xen/tasklet.h>
13 #include <xen/domain.h>
14 #include <xen/rangeset.h>
15 #include <xen/compat.h>
16 #include <xen/ctype.h>
17 #include <asm/debugger.h>
18 #include <asm/div64.h>
20 static struct keyhandler *key_table[256];
21 static unsigned char keypress_key;
22 static bool_t alt_key_handling;
24 char keyhandler_scratch[1024];
26 static void keypress_action(unsigned long unused)
27 {
28 handle_keypress(keypress_key, NULL);
29 }
31 static DECLARE_TASKLET(keypress_tasklet, keypress_action, 0);
33 void handle_keypress(unsigned char key, struct cpu_user_regs *regs)
34 {
35 struct keyhandler *h;
37 if ( (h = key_table[key]) == NULL )
38 return;
40 if ( !in_irq() || h->irq_callback )
41 {
42 console_start_log_everything();
43 h->irq_callback ? (*h->u.irq_fn)(key, regs) : (*h->u.fn)(key);
44 console_end_log_everything();
45 }
46 else
47 {
48 keypress_key = key;
49 tasklet_schedule(&keypress_tasklet);
50 }
51 }
53 void register_keyhandler(unsigned char key, struct keyhandler *handler)
54 {
55 ASSERT(key_table[key] == NULL);
56 key_table[key] = handler;
57 }
59 static void show_handlers(unsigned char key)
60 {
61 int i;
62 printk("'%c' pressed -> showing installed handlers\n", key);
63 for ( i = 0; i < ARRAY_SIZE(key_table); i++ )
64 if ( key_table[i] != NULL )
65 printk(" key '%c' (ascii '%02x') => %s\n",
66 isprint(i) ? i : ' ', i, key_table[i]->desc);
67 }
69 static struct keyhandler show_handlers_keyhandler = {
70 .u.fn = show_handlers,
71 .desc = "show this message"
72 };
74 static cpumask_t dump_execstate_mask;
76 void dump_execstate(struct cpu_user_regs *regs)
77 {
78 unsigned int cpu = smp_processor_id();
80 if ( !guest_mode(regs) )
81 {
82 printk("*** Dumping CPU%u host state: ***\n", cpu);
83 show_execution_state(regs);
84 }
86 if ( !is_idle_vcpu(current) )
87 {
88 printk("*** Dumping CPU%u guest state (d%d:v%d): ***\n",
89 smp_processor_id(), current->domain->domain_id,
90 current->vcpu_id);
91 show_execution_state(guest_cpu_user_regs());
92 printk("\n");
93 }
95 cpu_clear(cpu, dump_execstate_mask);
96 if ( !alt_key_handling )
97 return;
99 cpu = cycle_cpu(cpu, dump_execstate_mask);
100 if ( cpu < NR_CPUS )
101 {
102 smp_send_state_dump(cpu);
103 return;
104 }
106 console_end_sync();
107 watchdog_enable();
108 }
110 static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
111 {
112 unsigned int cpu;
114 /* We want to get everything out that we possibly can. */
115 watchdog_disable();
116 console_start_sync();
118 printk("'%c' pressed -> dumping registers\n\n", key);
120 dump_execstate_mask = cpu_online_map;
122 /* Get local execution state out immediately, in case we get stuck. */
123 dump_execstate(regs);
125 /* Alt. handling: remaining CPUs are dumped asynchronously one-by-one. */
126 if ( alt_key_handling )
127 return;
129 /* Normal handling: synchronously dump the remaining CPUs' states. */
130 for_each_cpu_mask ( cpu, dump_execstate_mask )
131 {
132 smp_send_state_dump(cpu);
133 while ( cpu_isset(cpu, dump_execstate_mask) )
134 cpu_relax();
135 }
137 console_end_sync();
138 watchdog_enable();
139 }
141 static struct keyhandler dump_registers_keyhandler = {
142 .irq_callback = 1,
143 .diagnostic = 1,
144 .u.irq_fn = dump_registers,
145 .desc = "dump registers"
146 };
148 static DECLARE_TASKLET(dump_dom0_tasklet, NULL, 0);
150 static void dump_dom0_action(unsigned long arg)
151 {
152 struct vcpu *v = (void *)arg;
154 for ( ; ; )
155 {
156 vcpu_show_execution_state(v);
157 if ( (v = v->next_in_list) == NULL )
158 break;
159 if ( softirq_pending(smp_processor_id()) )
160 {
161 dump_dom0_tasklet.data = (unsigned long)v;
162 tasklet_schedule_on_cpu(&dump_dom0_tasklet, v->processor);
163 break;
164 }
165 }
166 }
168 static void dump_dom0_registers(unsigned char key)
169 {
170 struct vcpu *v;
172 if ( dom0 == NULL )
173 return;
175 printk("'%c' pressed -> dumping Dom0's registers\n", key);
177 for_each_vcpu ( dom0, v )
178 {
179 if ( alt_key_handling && softirq_pending(smp_processor_id()) )
180 {
181 tasklet_kill(&dump_dom0_tasklet);
182 tasklet_init(&dump_dom0_tasklet, dump_dom0_action,
183 (unsigned long)v);
184 tasklet_schedule_on_cpu(&dump_dom0_tasklet, v->processor);
185 return;
186 }
187 vcpu_show_execution_state(v);
188 }
189 }
191 static struct keyhandler dump_dom0_registers_keyhandler = {
192 .diagnostic = 1,
193 .u.fn = dump_dom0_registers,
194 .desc = "dump Dom0 registers"
195 };
197 static void reboot_machine(unsigned char key, struct cpu_user_regs *regs)
198 {
199 printk("'%c' pressed -> rebooting machine\n", key);
200 machine_restart(0);
201 }
203 static struct keyhandler reboot_machine_keyhandler = {
204 .irq_callback = 1,
205 .u.irq_fn = reboot_machine,
206 .desc = "reboot machine"
207 };
209 static void cpuset_print(char *set, int size, cpumask_t mask)
210 {
211 *set++ = '{';
212 set += cpulist_scnprintf(set, size-2, mask);
213 *set++ = '}';
214 *set++ = '\0';
215 }
217 static void periodic_timer_print(char *str, int size, uint64_t period)
218 {
219 if ( period == 0 )
220 {
221 strlcpy(str, "No periodic timer", size);
222 return;
223 }
225 snprintf(str, size,
226 "%u Hz periodic timer (period %u ms)",
227 1000000000/(int)period, (int)period/1000000);
228 }
230 static void dump_domains(unsigned char key)
231 {
232 struct domain *d;
233 struct vcpu *v;
234 s_time_t now = NOW();
235 #define tmpstr keyhandler_scratch
237 printk("'%c' pressed -> dumping domain info (now=0x%X:%08X)\n", key,
238 (u32)(now>>32), (u32)now);
240 rcu_read_lock(&domlist_read_lock);
242 for_each_domain ( d )
243 {
244 unsigned int i;
245 printk("General information for domain %u:\n", d->domain_id);
246 cpuset_print(tmpstr, sizeof(tmpstr), d->domain_dirty_cpumask);
247 printk(" refcnt=%d dying=%d nr_pages=%d xenheap_pages=%d "
248 "dirty_cpus=%s max_pages=%u\n",
249 atomic_read(&d->refcnt), d->is_dying,
250 d->tot_pages, d->xenheap_pages, tmpstr, d->max_pages);
251 printk(" handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
252 "%02x%02x-%02x%02x%02x%02x%02x%02x vm_assist=%08lx\n",
253 d->handle[ 0], d->handle[ 1], d->handle[ 2], d->handle[ 3],
254 d->handle[ 4], d->handle[ 5], d->handle[ 6], d->handle[ 7],
255 d->handle[ 8], d->handle[ 9], d->handle[10], d->handle[11],
256 d->handle[12], d->handle[13], d->handle[14], d->handle[15],
257 d->vm_assist);
258 for ( i = 0 ; i < NR_DOMAIN_WATCHDOG_TIMERS; i++ )
259 if ( test_bit(i, &d->watchdog_inuse_map) )
260 printk(" watchdog %d expires in %d seconds\n",
261 i, (u32)((d->watchdog_timer[i].expires - NOW()) >> 30));
263 arch_dump_domain_info(d);
265 rangeset_domain_printk(d);
267 dump_pageframe_info(d);
269 printk("VCPU information and callbacks for domain %u:\n",
270 d->domain_id);
271 for_each_vcpu ( d, v )
272 {
273 printk(" VCPU%d: CPU%d [has=%c] flags=%lx poll=%d "
274 "upcall_pend = %02x, upcall_mask = %02x ",
275 v->vcpu_id, v->processor,
276 v->is_running ? 'T':'F',
277 v->pause_flags, v->poll_evtchn,
278 vcpu_info(v, evtchn_upcall_pending),
279 vcpu_info(v, evtchn_upcall_mask));
280 cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask);
281 printk("dirty_cpus=%s ", tmpstr);
282 cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_affinity);
283 printk("cpu_affinity=%s\n", tmpstr);
284 arch_dump_vcpu_info(v);
285 periodic_timer_print(tmpstr, sizeof(tmpstr), v->periodic_period);
286 printk(" %s\n", tmpstr);
287 }
288 }
290 for_each_domain ( d )
291 {
292 for_each_vcpu ( d, v )
293 {
294 printk("Notifying guest %d:%d (virq %d, port %d, stat %d/%d/%d)\n",
295 d->domain_id, v->vcpu_id,
296 VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG],
297 test_bit(v->virq_to_evtchn[VIRQ_DEBUG],
298 &shared_info(d, evtchn_pending)),
299 test_bit(v->virq_to_evtchn[VIRQ_DEBUG],
300 &shared_info(d, evtchn_mask)),
301 test_bit(v->virq_to_evtchn[VIRQ_DEBUG] /
302 BITS_PER_EVTCHN_WORD(d),
303 &vcpu_info(v, evtchn_pending_sel)));
304 send_guest_vcpu_virq(v, VIRQ_DEBUG);
305 }
306 }
308 rcu_read_unlock(&domlist_read_lock);
309 #undef tmpstr
310 }
312 static struct keyhandler dump_domains_keyhandler = {
313 .diagnostic = 1,
314 .u.fn = dump_domains,
315 .desc = "dump domain (and guest debug) info"
316 };
318 static cpumask_t read_clocks_cpumask = CPU_MASK_NONE;
319 static DEFINE_PER_CPU(s_time_t, read_clocks_time);
320 static DEFINE_PER_CPU(u64, read_cycles_time);
322 static void read_clocks_slave(void *unused)
323 {
324 unsigned int cpu = smp_processor_id();
325 local_irq_disable();
326 while ( !cpu_isset(cpu, read_clocks_cpumask) )
327 cpu_relax();
328 per_cpu(read_clocks_time, cpu) = NOW();
329 per_cpu(read_cycles_time, cpu) = get_cycles();
330 cpu_clear(cpu, read_clocks_cpumask);
331 local_irq_enable();
332 }
334 static void read_clocks(unsigned char key)
335 {
336 unsigned int cpu = smp_processor_id(), min_stime_cpu, max_stime_cpu;
337 unsigned int min_cycles_cpu, max_cycles_cpu;
338 u64 min_stime, max_stime, dif_stime;
339 u64 min_cycles, max_cycles, dif_cycles;
340 static u64 sumdif_stime = 0, maxdif_stime = 0;
341 static u64 sumdif_cycles = 0, maxdif_cycles = 0;
342 static u32 count = 0;
343 static DEFINE_SPINLOCK(lock);
345 spin_lock(&lock);
347 smp_call_function(read_clocks_slave, NULL, 0);
349 local_irq_disable();
350 read_clocks_cpumask = cpu_online_map;
351 per_cpu(read_clocks_time, cpu) = NOW();
352 per_cpu(read_cycles_time, cpu) = get_cycles();
353 cpu_clear(cpu, read_clocks_cpumask);
354 local_irq_enable();
356 while ( !cpus_empty(read_clocks_cpumask) )
357 cpu_relax();
359 min_stime_cpu = max_stime_cpu = min_cycles_cpu = max_cycles_cpu = cpu;
360 for_each_online_cpu ( cpu )
361 {
362 if ( per_cpu(read_clocks_time, cpu) <
363 per_cpu(read_clocks_time, min_stime_cpu) )
364 min_stime_cpu = cpu;
365 if ( per_cpu(read_clocks_time, cpu) >
366 per_cpu(read_clocks_time, max_stime_cpu) )
367 max_stime_cpu = cpu;
368 if ( per_cpu(read_cycles_time, cpu) <
369 per_cpu(read_cycles_time, min_cycles_cpu) )
370 min_cycles_cpu = cpu;
371 if ( per_cpu(read_cycles_time, cpu) >
372 per_cpu(read_cycles_time, max_cycles_cpu) )
373 max_cycles_cpu = cpu;
374 }
376 min_stime = per_cpu(read_clocks_time, min_stime_cpu);
377 max_stime = per_cpu(read_clocks_time, max_stime_cpu);
378 min_cycles = per_cpu(read_cycles_time, min_cycles_cpu);
379 max_cycles = per_cpu(read_cycles_time, max_cycles_cpu);
381 spin_unlock(&lock);
383 dif_stime = max_stime - min_stime;
384 if ( dif_stime > maxdif_stime )
385 maxdif_stime = dif_stime;
386 sumdif_stime += dif_stime;
387 dif_cycles = max_cycles - min_cycles;
388 if ( dif_cycles > maxdif_cycles )
389 maxdif_cycles = dif_cycles;
390 sumdif_cycles += dif_cycles;
391 count++;
392 printk("Synced stime skew: max=%"PRIu64"ns avg=%"PRIu64"ns "
393 "samples=%"PRIu32" current=%"PRIu64"ns\n",
394 maxdif_stime, sumdif_stime/count, count, dif_stime);
395 printk("Synced cycles skew: max=%"PRIu64" avg=%"PRIu64" "
396 "samples=%"PRIu32" current=%"PRIu64"\n",
397 maxdif_cycles, sumdif_cycles/count, count, dif_cycles);
398 }
400 static struct keyhandler read_clocks_keyhandler = {
401 .diagnostic = 1,
402 .u.fn = read_clocks,
403 .desc = "display multi-cpu clock info"
404 };
406 extern void dump_runq(unsigned char key);
407 static struct keyhandler dump_runq_keyhandler = {
408 .diagnostic = 1,
409 .u.fn = dump_runq,
410 .desc = "dump run queues"
411 };
413 #ifdef PERF_COUNTERS
414 extern void perfc_printall(unsigned char key);
415 static struct keyhandler perfc_printall_keyhandler = {
416 .diagnostic = 1,
417 .u.fn = perfc_printall,
418 .desc = "print performance counters"
419 };
420 extern void perfc_reset(unsigned char key);
421 static struct keyhandler perfc_reset_keyhandler = {
422 .u.fn = perfc_reset,
423 .desc = "reset performance counters"
424 };
425 #endif
427 #ifdef LOCK_PROFILE
428 extern void spinlock_profile_printall(unsigned char key);
429 static struct keyhandler spinlock_printall_keyhandler = {
430 .diagnostic = 1,
431 .u.fn = spinlock_profile_printall,
432 .desc = "print lock profile info"
433 };
434 extern void spinlock_profile_reset(unsigned char key);
435 static struct keyhandler spinlock_reset_keyhandler = {
436 .u.fn = spinlock_profile_reset,
437 .desc = "reset lock profile info"
438 };
439 #endif
441 static void run_all_nonirq_keyhandlers(unsigned long unused)
442 {
443 /* Fire all the non-IRQ-context diagnostic keyhandlers */
444 struct keyhandler *h;
445 int k;
447 console_start_log_everything();
449 for ( k = 0; k < ARRAY_SIZE(key_table); k++ )
450 {
451 process_pending_softirqs();
452 h = key_table[k];
453 if ( (h == NULL) || !h->diagnostic || h->irq_callback )
454 continue;
455 printk("[%c: %s]\n", k, h->desc);
456 (*h->u.fn)(k);
457 }
459 console_end_log_everything();
460 }
462 static DECLARE_TASKLET(run_all_keyhandlers_tasklet,
463 run_all_nonirq_keyhandlers, 0);
465 static void run_all_keyhandlers(unsigned char key, struct cpu_user_regs *regs)
466 {
467 struct keyhandler *h;
468 int k;
470 watchdog_disable();
472 printk("'%c' pressed -> firing all diagnostic keyhandlers\n", key);
474 /* Fire all the IRQ-context diangostic keyhandlers now */
475 for ( k = 0; k < ARRAY_SIZE(key_table); k++ )
476 {
477 h = key_table[k];
478 if ( (h == NULL) || !h->diagnostic || !h->irq_callback )
479 continue;
480 printk("[%c: %s]\n", k, h->desc);
481 (*h->u.irq_fn)(k, regs);
482 }
484 watchdog_enable();
486 /* Trigger the others from a tasklet in non-IRQ context */
487 tasklet_schedule(&run_all_keyhandlers_tasklet);
488 }
490 static struct keyhandler run_all_keyhandlers_keyhandler = {
491 .irq_callback = 1,
492 .u.irq_fn = run_all_keyhandlers,
493 .desc = "print all diagnostics"
494 };
496 static void do_debug_key(unsigned char key, struct cpu_user_regs *regs)
497 {
498 printk("'%c' pressed -> trapping into debugger\n", key);
499 (void)debugger_trap_fatal(0xf001, regs);
500 nop(); /* Prevent the compiler doing tail call
501 optimisation, as that confuses xendbg a
502 bit. */
503 }
505 static struct keyhandler do_debug_key_keyhandler = {
506 .irq_callback = 1,
507 .u.irq_fn = do_debug_key,
508 .desc = "trap to xendbg"
509 };
511 static void do_toggle_alt_key(unsigned char key, struct cpu_user_regs *regs)
512 {
513 alt_key_handling = !alt_key_handling;
514 printk("'%c' pressed -> using %s key handling\n", key,
515 alt_key_handling ? "alternative" : "normal");
516 }
518 static struct keyhandler toggle_alt_keyhandler = {
519 .irq_callback = 1,
520 .u.irq_fn = do_toggle_alt_key,
521 .desc = "toggle alternative key handling"
522 };
524 void __init initialize_keytable(void)
525 {
526 if ( num_present_cpus() > 16 )
527 {
528 alt_key_handling = 1;
529 printk(XENLOG_INFO "Defaulting to alternative key handling; "
530 "send 'A' to switch to normal mode.\n");
531 }
532 register_keyhandler('A', &toggle_alt_keyhandler);
533 register_keyhandler('d', &dump_registers_keyhandler);
534 register_keyhandler('h', &show_handlers_keyhandler);
535 register_keyhandler('q', &dump_domains_keyhandler);
536 register_keyhandler('r', &dump_runq_keyhandler);
537 register_keyhandler('R', &reboot_machine_keyhandler);
538 register_keyhandler('t', &read_clocks_keyhandler);
539 register_keyhandler('0', &dump_dom0_registers_keyhandler);
540 register_keyhandler('%', &do_debug_key_keyhandler);
541 register_keyhandler('*', &run_all_keyhandlers_keyhandler);
543 #ifdef PERF_COUNTERS
544 register_keyhandler('p', &perfc_printall_keyhandler);
545 register_keyhandler('P', &perfc_reset_keyhandler);
546 #endif
548 #ifdef LOCK_PROFILE
549 register_keyhandler('l', &spinlock_printall_keyhandler);
550 register_keyhandler('L', &spinlock_reset_keyhandler);
551 #endif
553 }
555 /*
556 * Local variables:
557 * mode: C
558 * c-set-style: "BSD"
559 * c-basic-offset: 4
560 * tab-width: 4
561 * indent-tabs-mode: nil
562 * End:
563 */