}
ss = esp & -THREAD_SIZE;
+#if 0
if ((cs & 0xffff) != __KERNEL_CS) {
kdb_printf("Stack is not in kernel space, backtrace not available\n");
return 0;
}
+#endif
kdb_printf("ESP EIP Function (args)\n");
#include <linux/kdbprivate.h>
#include <asm/pc_keyb.h>
+#if defined(CONFIG_XEN)
+#include <xen/interface/xen.h>
+#include <xen/interface/event_channel.h>
+#include <asm/hypervisor.h>
+#include <xen/evtchn.h>
+#include <xen/xencons.h>
+#endif
+
#ifdef CONFIG_VT_CONSOLE
#define KDB_BLINK_LED 1
#else
}
#endif /* CONFIG_SERIAL_CONSOLE */
+#if defined(CONFIG_XEN)
+static int get_xen_char(void)
+{
+ static char rbuf[16];
+
+ if ((HYPERVISOR_console_io(CONSOLEIO_read, 1, rbuf)) > 0)
+ {
+ if (rbuf[0] == 0x7f)
+ {
+ return 0x8;
+ }
+ return(rbuf[0]);
+ }
+ return -1;
+}
+#endif
+
#ifdef CONFIG_VT_CONSOLE
static int kbd_exists = -1;
#endif
get_char_func poll_funcs[] = {
+#if defined(CONFIG_XEN)
+ get_xen_char,
+#endif
#if defined(CONFIG_VT_CONSOLE)
get_kbd_char,
#endif
/* When first entering KDB, try a normal IPI. That reduces backtrace problems
* on the other cpus.
*/
+#ifdef CONFIG_XEN
+void
+smp_kdb_stop(void)
+{
+ if (!KDB_FLAG(NOIPI))
+ send_IPI_allbutself(KDB_VECTOR);
+}
+#else
void
smp_kdb_stop(void)
{
send_IPI_mask(cpu_mask, KDB_VECTOR);
}
}
+#endif /* CONFIG_XEN */
/* The normal KDB IPI handler */
fastcall void
jmp syscall_exit
CFI_ENDPROC
+#if defined(CONFIG_KDB)
+ENTRY(kdb_call)
+ pushl %eax # save orig EAX
+ SAVE_ALL
+ pushl %esp # struct pt_regs
+ pushl $0 # error_code
+ pushl $7 # KDB_REASON_ENTRY
+ call kdb
+ addl $12,%esp # remove args
+ jmp ret_from_exception
+#endif
+
/*
* Return to user mode is not as complex as all this looks,
* but we want the default path for a system call return to
#define UNWIND_ESPFIX_STACK
#endif
-ENTRY(kdb_interrupt) \
- pushl $(0xf8); \
- SAVE_ALL \
- movl %esp,%eax; \
- call smp_kdb_interrupt; \
- jmp ret_from_intr;
+#if defined(CONFIG_KDB) && defined(CONFIG_SMP)
+ENTRY(kdb_interrupt)
+ pushl $(0xf8)
+ SAVE_ALL
+ movl %esp,%eax
+ call smp_kdb_interrupt
+ jmp ret_from_intr
+#endif
ENTRY(divide_error)
#endif
#include <xen/evtchn.h>
+#ifdef CONFIG_KDB
+#include <linux/kdb.h>
+#include <linux/kdbprivate.h>
+#endif /* CONFIG_KDB */
+
/*
* Some notes on x86 processor bugs affecting SMP operation:
*
return IRQ_HANDLED;
}
+#ifdef CONFIG_KDB
+irqreturn_t smp_kdba_process_ipi(int irq, void *data, struct pt_regs *regs)
+{
+ kdb_print_state(__FUNCTION__, 0);
+ kdb_ipi(regs, NULL);
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_KDB */
+
+
#include <linux/mca.h>
#endif
+#ifdef CONFIG_KDB
+#include <linux/kdb.h>
+#endif /* CONFIG_KDB */
+
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
int panic_on_unrecovered_nmi;
asmlinkage int system_call(void);
+#ifdef CONFIG_KDB
+asmlinkage int kdb_call(void);
+#endif /* CONFIG_KDB */
+
struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
{ 0, 0 }, { 0, 0 } };
bust_spinlocks(0);
die.lock_owner = -1;
spin_unlock_irqrestore(&die.lock, flags);
+#ifdef CONFIG_KDB
+ kdb_diemsg = str;
+ kdb(KDB_REASON_OOPS, err, regs);
+#endif /* CONFIG_KDB */
if (!regs)
return;
}
DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
-#ifndef CONFIG_KPROBES
+#if !defined(CONFIG_KPROBES) && !defined(CONFIG_KDB)
DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
#endif
DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
{
+#ifdef CONFIG_KDB
+ (void)kdb(KDB_REASON_NMI, reason, regs);
+#endif /* CONFIG_KDB */
#ifdef CONFIG_MCA
/* Might actually be able to figure out what the guilty party
* is. */
printk(" on CPU%d, eip %08lx, registers:\n",
smp_processor_id(), regs->eip);
show_registers(regs);
+#ifdef CONFIG_KDB
+ kdb(KDB_REASON_NMI, 0, regs);
+#endif /* CONFIG_KDB */
+
printk(KERN_EMERG "console shuts up ...\n");
console_silent();
spin_unlock(&nmi_print_lock);
/* Only the BSP gets external NMIs from the system. */
if (!smp_processor_id())
reason = get_nmi_reason();
+
+#if defined(CONFIG_SMP) && defined(CONFIG_KDB)
+ /*
+ * Call the kernel debugger to see if this NMI is due
+ * to an KDB requested IPI. If so, kdb will handle it.
+ */
+ if (kdb_ipi(regs, NULL)) {
+ return;
+ }
+#endif /* defined(CONFIG_SMP) && defined(CONFIG_KDB) */
+
if (!(reason & 0xc0)) {
if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
}
EXPORT_SYMBOL_GPL(unset_nmi_callback);
-#ifdef CONFIG_KPROBES
+
+#if defined(CONFIG_KDB) && !defined(CONFIG_KPROBES)
+fastcall void do_int3(struct pt_regs * regs, long error_code)
+{
+ if (kdb(KDB_REASON_BREAK, error_code, regs))
+ return;
+ do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
+}
+#endif /* CONFIG_KDB */
+
+#ifdef CONFIG_KPROBES
fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
{
if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
get_debugreg(condition, 6);
+#ifdef CONFIG_KDB
+ if (kdb(KDB_REASON_DEBUG, error_code, regs))
+ return;
+#endif /* CONFIG_KDB */
+
if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
SIGTRAP) == NOTIFY_STOP)
return;
#endif
{ 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
{ SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
+#ifdef CONFIG_KDB
+ { KDBENTER_VECTOR, 3|4, __KERNEL_CS, (unsigned long)kdb_call },
+#endif
+
{ 0, 0, 0, 0 }
};
void __init trap_init(void)
{
int ret;
+#ifdef CONFIG_KDB
+/* this is gone - kdb_enablehwfault(); */
+#endif
ret = HYPERVISOR_set_trap_table(trap_table);
if (ret)
}
ss = rsp & -THREAD_SIZE;
+#if 0
if ((cs & 0xffff) != __KERNEL_CS) {
kdb_printf("Stack is not in kernel space, backtrace not available\n");
return 0;
}
+#endif
kdb_printf("RSP RIP Function (args)\n");
#include <linux/kdbprivate.h>
#include <asm/pc_keyb.h>
+#if defined(CONFIG_XEN)
+#include <xen/interface/xen.h>
+#include <xen/interface/event_channel.h>
+#include <asm/hypervisor.h>
+#include <xen/evtchn.h>
+#include <xen/xencons.h>
+#endif
+
#ifdef CONFIG_VT_CONSOLE
#define KDB_BLINK_LED 1
#else
}
#endif /* CONFIG_SERIAL_CONSOLE */
+#if defined(CONFIG_XEN)
+static int get_xen_char(void)
+{
+ static char rbuf[16];
+
+ if ((HYPERVISOR_console_io(CONSOLEIO_read, 1, rbuf)) > 0)
+ {
+ if (rbuf[0] == 0x7f)
+ {
+ return 0x8;
+ }
+ return(rbuf[0]);
+ }
+ return -1;
+}
+#endif
+
+
#ifdef CONFIG_VT_CONSOLE
static int kbd_exists = -1;
#endif
get_char_func poll_funcs[] = {
+#if defined(CONFIG_XEN)
+ get_xen_char,
+#endif
#if defined(CONFIG_VT_CONSOLE)
get_kbd_char,
#endif
#include <asm/hw_irq.h>
#include <asm/desc.h>
+#define KDBENTER_VECTOR 0xf8
+extern asmlinkage void kdb_interrupt(void);
kdb_machreg_t
kdba_getdr6(void)
{
return kdba_getdr(6);
}
+
+/*
+ * Following definitions are placed here to resolve build
+ * erros which occur when KDB is being compiled for XEN
+ * hence the use of ifdef CONFIG_XEN
+ */
+#ifdef CONFIG_XEN
+#define KDBENTER_VECTOR 0xf8
+extern asmlinkage void kdb_interrupt(void);
+#endif
+
kdb_machreg_t
kdba_getdr7(void)
{
send_IPI_mask(cpumask_of_cpu(c), NMI_VECTOR);
}
-#endif /* CONFIG_SMP */
+
+asmlinkage void smp_kdba_process_ipi(struct pt_regs regs)
+{
+ kdb_print_state(__FUNCTION__, 0);
+ kdb_ipi(regs, NULL);
+}
+
+#endif /* CONFIG_SMP */
CFI_REL_OFFSET rcx,0; \
CFI_REL_OFFSET r11,8
+
+/*
+ * Interrupt entry/exit.
+ *
+ * Interrupt entry points save only callee clobbered registers in fast path.
+ *
+ * Entry runs with interrupts off.
+ */
+
+/* 0(%rsp): interrupt number */
+ .macro interrupt func
+ cld
+ SAVE_ARGS
+ leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
+ pushq %rbp
+ CFI_ADJUST_CFA_OFFSET 8
+ CFI_REL_OFFSET rbp, 0
+ movq %rsp,%rbp
+ CFI_DEF_CFA_REGISTER rbp
+ testl $3,CS(%rdi)
+ je 1f
+ swapgs
+1: incl %gs:pda_irqcount # RED-PEN should check preempt count
+ cmoveq %gs:pda_irqstackptr,%rsp
+ push %rbp # backlink for old unwinder
+ CFI_ADJUST_CFA_OFFSET 8
+ CFI_REL_OFFSET rbp,0
+ /*
+ * We entered an interrupt context - irqs are off:
+ */
+ TRACE_IRQS_OFF
+ call \func
+ .endm
+
+
+
/*
* Interrupt exit.
*
CFI_ENDPROC
ENDPROC(call_softirq)
+
+#ifdef CONFIG_KDB
+KPROBE_ENTRY(call_debug)
+ INTR_FRAME
+/* pushq $0
+ CFI_ADJUST_CFA_OFFSET 8 */
+ zeroentry do_call_debug
+/* jmp paranoid_exit */
+ CFI_ENDPROC
+ .previous .text
+
+#ifdef CONFIG_SMP
+ENTRY(kdb_interrupt)
+ apicinterrupt 0xf8,smp_kdb_interrupt
+END(kdb_interrupt)
+#endif /* CONFIG_SMP */
+
+
+
+ENTRY(kdb_call)
+ INTR_FRAME
+ cld
+ pushq $-1 # orig_eax
+ CFI_ADJUST_CFA_OFFSET 8
+ SAVE_ALL
+ movq $1,%rdi # KDB_REASON_ENTER
+ movq $0,%rsi # error_code
+ movq %rsp,%rdx # struct pt_regs
+ call kdb
+ RESTORE_ALL
+ addq $8,%rsp # forget orig_eax
+ CFI_ADJUST_CFA_OFFSET -8
+ iretq
+ CFI_ENDPROC
+END(kdb_call)
+
+#endif
/* zero the remaining page */
.fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
+ .section .bss, "aw", @nobits
+ .align L1_CACHE_BYTES
+ENTRY(idt_table)
+ .skip 256 * 16
+
.section .bss.page_aligned, "aw", @nobits
.align PAGE_SIZE
ENTRY(empty_zero_page)
asmlinkage void alignment_check(void);
asmlinkage void machine_check(void);
asmlinkage void spurious_interrupt_bug(void);
+#ifdef CONFIG_KDB
+asmlinkage void call_debug(void);
+#endif
ATOMIC_NOTIFIER_HEAD(die_chain);
EXPORT_SYMBOL(die_chain);
#ifdef CONFIG_IA32_EMULATION
{ IA32_SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)ia32_syscall},
#endif
+#ifdef CONFIG_KDB
+ { KDB_VECTOR, 3|4, __KERNEL_CS, (unsigned long)call_debug },
+#endif
{ 0, 0, 0, 0 }
};
+#ifdef CONFIG_KDB
+asmlinkage void do_call_debug(struct pt_regs *regs)
+{
+ notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
+}
+#endif
+
void __init trap_init(void)
{
int ret;
}
+#ifdef CONFIG_KDB
+int kdb_enter = KDBENTER_VECTOR;
+EXPORT_SYMBOL(kdb_enter);
+#endif
+
/* Actual parsing is done early in setup.c. */
static int __init oops_dummy(char *s)
{
* the serial line, you can use the 'kdb=early' flag to lilo and set the
* appropriate breakpoints.
*/
+#ifdef CONFIG_XEN
+atomic_t kdb_8250;
+#endif /* CONFIG_XEN */
static int kdb_serial_line = -1;
static const char *kdb_serial_ptr = kdb_serial_str;
#include <xen/xenbus.h>
#include <xen/xencons.h>
+#ifdef CONFIG_KDB
+#include <linux/kdb.h>
+#endif
+
/*
* Modes:
* 'xencons=off' [XC_OFF]: Console is disabled.
goto out;
for (i = 0; i < len; i++) {
+#ifdef CONFIG_KDB
+ if ( (buf[i] == 0x01) )
+ {
+ kdb(KDB_REASON_KEYBOARD, 0, regs);
+ }
+#endif /* CONFIG_KDB */
#ifdef CONFIG_MAGIC_SYSRQ
if (sysrq_enabled) {
if (buf[i] == '\x0f') { /* ^O */
extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
+#ifdef CONFIG_KDB
+extern irqreturn_t smp_kdba_process_ipi(int, void *, struct pt_regs *);
+#endif
extern int local_setup_timer(unsigned int cpu);
extern void local_teardown_timer(unsigned int cpu);
static DEFINE_PER_CPU(int, callfunc_irq);
static char resched_name[NR_CPUS][15];
static char callfunc_name[NR_CPUS][15];
+#ifdef CONFIG_KDB
+static DEFINE_PER_CPU(int, kdba_irq);
+static char kdba_name[NR_CPUS][15];
+#endif
u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
goto fail;
per_cpu(callfunc_irq, cpu) = rc;
+#ifdef CONFIG_KDB
+ sprintf(kdba_name[cpu], "kdba%d", cpu);
+ per_cpu(kdba_irq, cpu) =
+ bind_ipi_to_irqhandler(
+#ifdef CONFIG_X86_32
+ KDB_VECTOR,
+#else
+ NMI_VECTOR,
+#endif
+ cpu,
+ smp_kdba_process_ipi,
+ SA_INTERRUPT,
+ kdba_name[cpu],
+ NULL);
+ BUG_ON(per_cpu(kdba_irq, cpu) < 0);
+#endif
+
+
if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0))
goto fail;
unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
+#ifdef CONFIG_KDB
+ unbind_from_irqhandler(per_cpu(kdba_irq, cpu), NULL);
+#endif
+
}
#endif
fastcall void reschedule_interrupt(void);
fastcall void invalidate_interrupt(void);
fastcall void call_function_interrupt(void);
+asmlinkage void kdba_process_ipi(void);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
#define PTRACE_SYSEMU 31
#define PTRACE_SYSEMU_SINGLESTEP 32
+enum EFLAGS {
+ EF_CF = 0x00000001,
+ EF_PF = 0x00000004,
+ EF_AF = 0x00000010,
+ EF_ZF = 0x00000040,
+ EF_SF = 0x00000080,
+ EF_TF = 0x00000100,
+ EF_IE = 0x00000200,
+ EF_DF = 0x00000400,
+ EF_OF = 0x00000800,
+ EF_IOPL = 0x00003000,
+ EF_IOPL_RING0 = 0x00000000,
+ EF_IOPL_RING1 = 0x00001000,
+ EF_IOPL_RING2 = 0x00002000,
+ EF_NT = 0x00004000, /* nested task */
+ EF_RF = 0x00010000, /* resume */
+ EF_VM = 0x00020000, /* virtual mode */
+ EF_AC = 0x00040000, /* alignment */
+ EF_VIF = 0x00080000, /* virtual interrupt */
+ EF_VIP = 0x00100000, /* virtual interrupt pending */
+ EF_ID = 0x00200000, /* id */
+};
+
#ifdef __KERNEL__
#include <asm/vm86.h>
#define FIRST_EXTERNAL_VECTOR 0x20
#define SYSCALL_VECTOR 0x80
+#define KDBENTER_VECTOR 0x81
/*
* Vectors 0x20-0x2f are used for ISA interrupts.
#define RESCHEDULE_VECTOR 0
#define CALL_FUNCTION_VECTOR 1
-#define NR_IPIS 2
+#define KDB_VECTOR 2
+#define NR_IPIS 3
/*
* The maximum number of vectors supported by i386 processors
* This is the ldt that every process will get unless we need
* something other than this.
*/
-extern struct desc_struct default_ldt[];
-#ifndef CONFIG_X86_NO_IDT
+/* #ifndef CONFIG_X86_NO_IDT */
extern struct gate_struct idt_table[];
-#endif
+/* #endif */
extern struct desc_ptr cpu_gdt_descr[];
/* the cpu gdt accessor */
memcpy(adr, &s, 16);
}
-#ifndef CONFIG_X86_NO_IDT
+/* #ifndef CONFIG_X86_NO_IDT */
static inline void set_intr_gate(int nr, void *func)
{
BUG_ON((unsigned)nr > 0xFF);
{
_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
}
-#endif
+/* #endif */
static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
unsigned size)
struct hw_interrupt_type;
#endif
-#define NMI_VECTOR 0x02
/*
* IDT vectors usable for external interrupt sources start
* at 0x20:
#define NUM_INVALIDATE_TLB_VECTORS 8
#endif
+#define TEDKDB_VECTOR 0xf8
+#define KDB_VECTOR 0xf8
+
/*
* Local APIC timer IRQ vector is on a different priority level,
* to work around the 'lost local interrupt if more than 2 IRQ
#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
#endif
-#define KDB_VECTOR 0xf9
+#define KDBENTER_VECTOR 0xf8
# define irq_ctx_init(cpu) do { } while (0)
#define RESCHEDULE_VECTOR 0
#define CALL_FUNCTION_VECTOR 1
-#define NR_IPIS 2
+#define NMI_VECTOR 2
+#define NR_IPIS 3
/*
* The maximum number of vectors supported by i386 processors