debuggers.hg

view xen/arch/x86/x86_32/traps.c @ 3755:ea98f0bb6510

bitkeeper revision 1.1159.212.127 (4208b02bTdSR4AVYRg8diDkKZmIVUg)

General shadow code cleanup.

Fixed compilation problems when SHADOW_DEBUG is enabled.
Fixed compilation problems when CONFIG_VMX is undefined.

Simplified l1pte_write_fault and l1pte_read_fault.
Name change: spfn => smfn (shadow machine frame numbers).

In general, the terms pfn and gpfn now refer to pages in the
guest's idea of physical frames (which diffs for full shadow
guests). mfn always refers to a machine frame number.

One bug fix for check_pagetable():
If we're using writable page tables
along with shadow mode, don't check the currently writable page table
page -- check its snapshot instead.

Signed-off-by: michael.fetterman@cl.cam.ac.uk
author mafetter@fleming.research
date Tue Feb 08 12:27:23 2005 +0000 (2005-02-08)
parents ff48344d34df
children f5f2757b3aa2
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 #include <xen/config.h>
4 #include <xen/init.h>
5 #include <xen/sched.h>
6 #include <xen/lib.h>
7 #include <xen/console.h>
8 #include <xen/mm.h>
9 #include <xen/irq.h>
10 #include <asm/flushtlb.h>
12 static int kstack_depth_to_print = 8*20;
14 static inline int kernel_text_address(unsigned long addr)
15 {
16 if (addr >= (unsigned long) &_stext &&
17 addr <= (unsigned long) &_etext)
18 return 1;
19 return 0;
21 }
23 void show_guest_stack(void)
24 {
25 int i;
26 execution_context_t *ec = get_execution_context();
27 unsigned long *stack = (unsigned long *)ec->esp;
28 printk("Guest EIP is %lx\n ",ec->eip);
30 for ( i = 0; i < kstack_depth_to_print; i++ )
31 {
32 if ( ((long)stack & (STACK_SIZE-1)) == 0 )
33 break;
34 if ( i && ((i % 8) == 0) )
35 printk("\n ");
36 printk("%p ", *stack++);
37 }
38 printk("\n");
40 }
42 void show_trace(unsigned long *esp)
43 {
44 unsigned long *stack, addr;
45 int i;
47 printk("Call Trace from ESP=%p:\n ", esp);
48 stack = esp;
49 i = 0;
50 while (((long) stack & (STACK_SIZE-1)) != 0) {
51 addr = *stack++;
52 if (kernel_text_address(addr)) {
53 if (i && ((i % 6) == 0))
54 printk("\n ");
55 printk("[<%p>] ", addr);
56 i++;
57 }
58 }
59 printk("\n");
60 }
62 void show_stack(unsigned long *esp)
63 {
64 unsigned long *stack;
65 int i;
67 printk("Stack trace from ESP=%p:\n ", esp);
69 stack = esp;
70 for ( i = 0; i < kstack_depth_to_print; i++ )
71 {
72 if ( ((long)stack & (STACK_SIZE-1)) == 0 )
73 break;
74 if ( i && ((i % 8) == 0) )
75 printk("\n ");
76 if ( kernel_text_address(*stack) )
77 printk("[%p] ", *stack++);
78 else
79 printk("%p ", *stack++);
80 }
81 printk("\n");
83 show_trace( esp );
84 }
86 void show_registers(struct xen_regs *regs)
87 {
88 unsigned long esp;
89 unsigned short ss, ds, es, fs, gs;
91 if ( GUEST_FAULT(regs) )
92 {
93 esp = regs->esp;
94 ss = regs->ss & 0xffff;
95 ds = regs->ds & 0xffff;
96 es = regs->es & 0xffff;
97 fs = regs->fs & 0xffff;
98 gs = regs->gs & 0xffff;
99 }
100 else
101 {
102 esp = (unsigned long)(&regs->esp);
103 ss = __HYPERVISOR_DS;
104 ds = __HYPERVISOR_DS;
105 es = __HYPERVISOR_DS;
106 fs = __HYPERVISOR_DS;
107 gs = __HYPERVISOR_DS;
108 }
110 printk("CPU: %d\nEIP: %04lx:[<%p>] \nEFLAGS: %p\n",
111 smp_processor_id(), 0xffff & regs->cs, regs->eip, regs->eflags);
112 printk("eax: %p ebx: %p ecx: %p edx: %p\n",
113 regs->eax, regs->ebx, regs->ecx, regs->edx);
114 printk("esi: %p edi: %p ebp: %p esp: %p\n",
115 regs->esi, regs->edi, regs->ebp, esp);
116 printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
117 ds, es, fs, gs, ss);
118 printk("cr3: %08lx\n", read_cr3());
120 show_stack((unsigned long *)&regs->esp);
121 }
123 void show_page_walk(unsigned long addr)
124 {
125 unsigned long page;
127 if ( addr < PAGE_OFFSET )
128 return;
130 printk("Pagetable walk from %p:\n", addr);
132 page = l2_pgentry_val(idle_pg_table[l2_table_offset(addr)]);
133 printk(" L2 = %p %s\n", page, (page & _PAGE_PSE) ? "(4MB)" : "");
134 if ( !(page & _PAGE_PRESENT) || (page & _PAGE_PSE) )
135 return;
137 page &= PAGE_MASK;
138 page = ((unsigned long *) __va(page))[l1_table_offset(addr)];
139 printk(" L1 = %p\n", page);
140 }
142 #define DOUBLEFAULT_STACK_SIZE 1024
143 static struct tss_struct doublefault_tss;
144 static unsigned char doublefault_stack[DOUBLEFAULT_STACK_SIZE];
146 asmlinkage void do_double_fault(void)
147 {
148 struct tss_struct *tss = &doublefault_tss;
149 unsigned int cpu = ((tss->back_link>>3)-__FIRST_TSS_ENTRY)>>1;
151 /* Disable the NMI watchdog. It's useless now. */
152 watchdog_on = 0;
154 console_force_unlock();
156 /* Find information saved during fault and dump it to the console. */
157 tss = &init_tss[cpu];
158 printk("CPU: %d\nEIP: %04x:[<%08x>] \nEFLAGS: %08x\n",
159 cpu, tss->cs, tss->eip, tss->eflags);
160 printk("CR3: %08x\n", tss->__cr3);
161 printk("eax: %08x ebx: %08x ecx: %08x edx: %08x\n",
162 tss->eax, tss->ebx, tss->ecx, tss->edx);
163 printk("esi: %08x edi: %08x ebp: %08x esp: %08x\n",
164 tss->esi, tss->edi, tss->ebp, tss->esp);
165 printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
166 tss->ds, tss->es, tss->fs, tss->gs, tss->ss);
167 printk("************************************\n");
168 printk("CPU%d DOUBLE FAULT -- system shutdown\n", cpu);
169 printk("System needs manual reset.\n");
170 printk("************************************\n");
172 /* Lock up the console to prevent spurious output from other CPUs. */
173 console_force_lock();
175 /* Wait for manual reset. */
176 for ( ; ; )
177 __asm__ __volatile__ ( "hlt" );
178 }
180 void __init percpu_traps_init(void)
181 {
182 if ( smp_processor_id() == 0 )
183 {
184 /*
185 * Make a separate task for double faults. This will get us debug
186 * output if we blow the kernel stack.
187 */
188 struct tss_struct *tss = &doublefault_tss;
189 memset(tss, 0, sizeof(*tss));
190 tss->ds = __HYPERVISOR_DS;
191 tss->es = __HYPERVISOR_DS;
192 tss->ss = __HYPERVISOR_DS;
193 tss->esp = (unsigned long)
194 &doublefault_stack[DOUBLEFAULT_STACK_SIZE];
195 tss->__cr3 = __pa(idle_pg_table);
196 tss->cs = __HYPERVISOR_CS;
197 tss->eip = (unsigned long)do_double_fault;
198 tss->eflags = 2;
199 tss->bitmap = IOBMP_INVALID_OFFSET;
200 _set_tssldt_desc(gdt_table+__DOUBLEFAULT_TSS_ENTRY,
201 (unsigned long)tss, 235, 9);
202 }
204 set_task_gate(TRAP_double_fault, __DOUBLEFAULT_TSS_ENTRY<<3);
205 }
207 long set_fast_trap(struct exec_domain *p, int idx)
208 {
209 trap_info_t *ti;
211 /* Index 0 is special: it disables fast traps. */
212 if ( idx == 0 )
213 {
214 if ( p == current )
215 CLEAR_FAST_TRAP(&p->arch);
216 SET_DEFAULT_FAST_TRAP(&p->arch);
217 return 0;
218 }
220 /*
221 * We only fast-trap vectors 0x20-0x2f, and vector 0x80.
222 * The former range is used by Windows and MS-DOS.
223 * Vector 0x80 is used by Linux and the BSD variants.
224 */
225 if ( (idx != 0x80) && ((idx < 0x20) || (idx > 0x2f)) )
226 return -1;
228 ti = p->arch.traps + idx;
230 /*
231 * We can't virtualise interrupt gates, as there's no way to get
232 * the CPU to automatically clear the events_mask variable.
233 */
234 if ( TI_GET_IF(ti) )
235 return -1;
237 if ( p == current )
238 CLEAR_FAST_TRAP(&p->arch);
240 p->arch.fast_trap_idx = idx;
241 p->arch.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
242 p->arch.fast_trap_desc.b =
243 (ti->address & 0xffff0000) | 0x8f00 | (TI_GET_DPL(ti)&3)<<13;
245 if ( p == current )
246 SET_FAST_TRAP(&p->arch);
248 return 0;
249 }
252 long do_set_fast_trap(int idx)
253 {
254 return set_fast_trap(current, idx);
255 }