xen-vtx-unstable

view xen/arch/x86/x86_64/traps.c @ 6774:4d899a738d59

merge?
author cl349@firebug.cl.cam.ac.uk
date Tue Sep 13 15:05:49 2005 +0000 (2005-09-13)
parents bd951d23d713 291e816acbf4
children e7c7196fa329 8ca0f98ba8e2
line source
2 #include <xen/config.h>
3 #include <xen/init.h>
4 #include <xen/sched.h>
5 #include <xen/lib.h>
6 #include <xen/errno.h>
7 #include <xen/mm.h>
8 #include <xen/irq.h>
9 #include <xen/symbols.h>
10 #include <xen/console.h>
11 #include <xen/sched.h>
12 #include <asm/current.h>
13 #include <asm/flushtlb.h>
14 #include <asm/msr.h>
15 #include <asm/vmx.h>
17 void show_registers(struct cpu_user_regs *regs)
18 {
19 unsigned long rip, rsp, rflags, cs, cr0, cr3;
20 const char *context;
22 if ( VMX_DOMAIN(current) && (regs->eflags == 0) )
23 {
24 __vmread(GUEST_RIP, &rip);
25 __vmread(GUEST_RSP, &rsp);
26 __vmread(GUEST_RFLAGS, &rflags);
27 __vmread(GUEST_CS_SELECTOR, &cs);
28 __vmread(CR0_READ_SHADOW, &cr0);
29 __vmread(GUEST_CR3, &cr3);
30 context = "vmx guest";
31 }
32 else
33 {
34 rip = regs->rip;
35 rflags = regs->rflags;
36 cr0 = read_cr0();
37 cr3 = read_cr3();
38 rsp = regs->rsp;
39 cs = regs->cs & 0xffff;
40 context = GUEST_MODE(regs) ? "guest" : "hypervisor";
41 }
43 printk("CPU: %d\nRIP: %04lx:[<%016lx>]",
44 smp_processor_id(), cs, rip);
45 if ( !GUEST_MODE(regs) )
46 print_symbol(" %s", rip);
47 printk("\nRFLAGS: %016lx CONTEXT: %s\n", rflags, context);
48 printk("rax: %016lx rbx: %016lx rcx: %016lx\n",
49 regs->rax, regs->rbx, regs->rcx);
50 printk("rdx: %016lx rsi: %016lx rdi: %016lx\n",
51 regs->rdx, regs->rsi, regs->rdi);
52 printk("rbp: %016lx rsp: %016lx r8: %016lx\n",
53 regs->rbp, rsp, regs->r8);
54 printk("r9: %016lx r10: %016lx r11: %016lx\n",
55 regs->r9, regs->r10, regs->r11);
56 printk("r12: %016lx r13: %016lx r14: %016lx\n",
57 regs->r12, regs->r13, regs->r14);
58 printk("r15: %016lx cr0: %016lx cr3: %016lx\n",
59 regs->r15, cr0, cr3);
61 show_stack(regs);
62 }
64 void show_page_walk(unsigned long addr)
65 {
66 unsigned long page = read_cr3();
68 printk("Pagetable walk from %016lx:\n", addr);
70 page &= PAGE_MASK;
71 page = ((unsigned long *) __va(page))[l4_table_offset(addr)];
72 printk(" L4 = %016lx\n", page);
73 if ( !(page & _PAGE_PRESENT) )
74 return;
76 page &= PAGE_MASK;
77 page = ((unsigned long *) __va(page))[l3_table_offset(addr)];
78 printk(" L3 = %016lx\n", page);
79 if ( !(page & _PAGE_PRESENT) )
80 return;
82 page &= PAGE_MASK;
83 page = ((unsigned long *) __va(page))[l2_table_offset(addr)];
84 printk(" L2 = %016lx %s\n", page, (page & _PAGE_PSE) ? "(2MB)" : "");
85 if ( !(page & _PAGE_PRESENT) || (page & _PAGE_PSE) )
86 return;
88 page &= PAGE_MASK;
89 page = ((unsigned long *) __va(page))[l1_table_offset(addr)];
90 printk(" L1 = %016lx\n", page);
91 }
93 asmlinkage void double_fault(void);
94 asmlinkage void do_double_fault(struct cpu_user_regs *regs)
95 {
96 watchdog_disable();
98 console_force_unlock();
100 /* Find information saved during fault and dump it to the console. */
101 printk("************************************\n");
102 show_registers(regs);
103 printk("************************************\n");
104 printk("CPU%d DOUBLE FAULT -- system shutdown\n", smp_processor_id());
105 printk("System needs manual reset.\n");
106 printk("************************************\n");
108 /* Lock up the console to prevent spurious output from other CPUs. */
109 console_force_lock();
111 /* Wait for manual reset. */
112 for ( ; ; )
113 __asm__ __volatile__ ( "hlt" );
114 }
116 asmlinkage void syscall_enter(void);
117 void __init percpu_traps_init(void)
118 {
119 char *stack_bottom, *stack;
120 int cpu = smp_processor_id();
122 if ( cpu == 0 )
123 {
124 /* Specify dedicated interrupt stacks for NMIs and double faults. */
125 set_intr_gate(TRAP_double_fault, &double_fault);
126 idt_table[TRAP_double_fault].a |= 1UL << 32; /* IST1 */
127 idt_table[TRAP_nmi].a |= 2UL << 32; /* IST2 */
128 }
130 stack_bottom = (char *)get_stack_bottom();
131 stack = (char *)((unsigned long)stack_bottom & ~(STACK_SIZE - 1));
133 /* Double-fault handler has its own per-CPU 1kB stack. */
134 init_tss[cpu].ist[0] = (unsigned long)&stack[1024];
136 /* NMI handler has its own per-CPU 1kB stack. */
137 init_tss[cpu].ist[1] = (unsigned long)&stack[2048];
139 /*
140 * Trampoline for SYSCALL entry from long mode.
141 */
143 /* Skip the NMI and DF stacks. */
144 stack = &stack[2048];
145 wrmsr(MSR_LSTAR, (unsigned long)stack, ((unsigned long)stack>>32));
147 /* movq %rsp, saversp(%rip) */
148 stack[0] = 0x48;
149 stack[1] = 0x89;
150 stack[2] = 0x25;
151 *(u32 *)&stack[3] = (stack_bottom - &stack[7]) - 16;
153 /* leaq saversp(%rip), %rsp */
154 stack[7] = 0x48;
155 stack[8] = 0x8d;
156 stack[9] = 0x25;
157 *(u32 *)&stack[10] = (stack_bottom - &stack[14]) - 16;
159 /* pushq %r11 */
160 stack[14] = 0x41;
161 stack[15] = 0x53;
163 /* pushq $__GUEST_CS64 */
164 stack[16] = 0x68;
165 *(u32 *)&stack[17] = __GUEST_CS64;
167 /* jmp syscall_enter */
168 stack[21] = 0xe9;
169 *(u32 *)&stack[22] = (char *)syscall_enter - &stack[26];
171 /*
172 * Trampoline for SYSCALL entry from compatibility mode.
173 */
175 /* Skip the long-mode entry trampoline. */
176 stack = &stack[26];
177 wrmsr(MSR_CSTAR, (unsigned long)stack, ((unsigned long)stack>>32));
179 /* movq %rsp, saversp(%rip) */
180 stack[0] = 0x48;
181 stack[1] = 0x89;
182 stack[2] = 0x25;
183 *(u32 *)&stack[3] = (stack_bottom - &stack[7]) - 16;
185 /* leaq saversp(%rip), %rsp */
186 stack[7] = 0x48;
187 stack[8] = 0x8d;
188 stack[9] = 0x25;
189 *(u32 *)&stack[10] = (stack_bottom - &stack[14]) - 16;
191 /* pushq %r11 */
192 stack[14] = 0x41;
193 stack[15] = 0x53;
195 /* pushq $__GUEST_CS32 */
196 stack[16] = 0x68;
197 *(u32 *)&stack[17] = __GUEST_CS32;
199 /* jmp syscall_enter */
200 stack[21] = 0xe9;
201 *(u32 *)&stack[22] = (char *)syscall_enter - &stack[26];
203 /*
204 * Common SYSCALL parameters.
205 */
207 wrmsr(MSR_STAR, 0, (FLAT_RING3_CS32<<16) | __HYPERVISOR_CS);
208 wrmsr(MSR_SYSCALL_MASK, EF_VM|EF_RF|EF_NT|EF_DF|EF_IE|EF_TF, 0U);
209 }
211 long do_set_callbacks(unsigned long event_address,
212 unsigned long failsafe_address,
213 unsigned long syscall_address)
214 {
215 struct vcpu *d = current;
217 d->arch.guest_context.event_callback_eip = event_address;
218 d->arch.guest_context.failsafe_callback_eip = failsafe_address;
219 d->arch.guest_context.syscall_callback_eip = syscall_address;
221 return 0;
222 }
224 /*
225 * Local variables:
226 * mode: C
227 * c-set-style: "BSD"
228 * c-basic-offset: 4
229 * tab-width: 4
230 * indent-tabs-mode: nil
231 * End:
232 */