debuggers.hg
changeset 3640:9a9c5a491401
bitkeeper revision 1.1159.235.1 (42000d3dwcPyT8aY4VIPYGCfCAJuQQ)
More x86/64. Status: traps.c now included in the build, but actual building
of IDT doesn't happen, and we need some sort of entry.S. More page-table
building required so that arch_init_memory() can work. And there is something
odd with MP-table parsing; I currently suspect that __init sections are
causing problems.
Signed-off-by: keir.fraser@cl.cam.ac.uk
More x86/64. Status: traps.c now included in the build, but actual building
of IDT doesn't happen, and we need some sort of entry.S. More page-table
building required so that arch_init_memory() can work. And there is something
odd with MP-table parsing; I currently suspect that __init sections are
causing problems.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author | kaf24@viper.(none) |
---|---|
date | Tue Feb 01 23:14:05 2005 +0000 (2005-02-01) |
parents | ed902e5c4b49 |
children | e6af5d8f8b39 |
files | .rootkeys xen/arch/x86/boot/x86_64.S xen/arch/x86/memory.c xen/arch/x86/traps.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/traps.c xen/include/asm-x86/desc.h xen/include/asm-x86/regs.h xen/include/asm-x86/x86_64/regs.h |
line diff
1.1 --- a/.rootkeys Tue Feb 01 21:26:34 2005 +0000 1.2 +++ b/.rootkeys Tue Feb 01 23:14:05 2005 +0000 1.3 @@ -901,11 +901,13 @@ 3e32af9aRnYGl4GMOaDKp7JdfhOGhg xen/arch/ 1.4 3ddb79bcecupHj56ZbTa3B0FxDowMg xen/arch/x86/x86_32/entry.S 1.5 3ddb79bcHwuCQDjBICDTSis52hWguw xen/arch/x86/x86_32/mm.c 1.6 40f92331jfOlE7MfKwpdkEb1CEf23g xen/arch/x86/x86_32/seg_fixup.c 1.7 +42000d3ckiFc1qxa4AWqsd0t3lxuyw xen/arch/x86/x86_32/traps.c 1.8 3ddb79bc4nTpGQOe6_-MbyZzkhlhFQ xen/arch/x86/x86_32/usercopy.c 1.9 3ddb79bcOMCu9-5mKpjIh5d0qqBDPg xen/arch/x86/x86_32/xen.lds 1.10 41bf1717Ty3hwN3E9swdu8QfnvGqww xen/arch/x86/x86_64/asm-offsets.c 1.11 40e96d3aLDI-nViMuYneD7VKYlZrVg xen/arch/x86/x86_64/entry.S 1.12 41bf1717XhPz_dNT5OKSjgmbFuWBuA xen/arch/x86/x86_64/mm.c 1.13 +42000d3cMb8o1WuFBXC07c8i3lPZBw xen/arch/x86/x86_64/traps.c 1.14 40e96d3ahBTZqbTViInnq0lM03vs7A xen/arch/x86/x86_64/usercopy.c 1.15 40e96d3akN3Hu_J5Bk-WXD8OGscrYQ xen/arch/x86/x86_64/xen.lds 1.16 3ddb79bdff-gj-jFGKjOejeHLqL8Lg xen/common/Makefile
2.1 --- a/xen/arch/x86/boot/x86_64.S Tue Feb 01 21:26:34 2005 +0000 2.2 +++ b/xen/arch/x86/boot/x86_64.S Tue Feb 01 23:14:05 2005 +0000 2.3 @@ -241,26 +241,17 @@ ENTRY(cpu0_stack) # Initial stack is 2.4 ENTRY(stext) 2.5 ENTRY(_stext) 2.6 2.7 -.globl ret_from_intr, copy_to_user, set_intr_gate, die 2.8 +.globl switch_to, ret_from_intr, do_iopl 2.9 +switch_to: 2.10 ret_from_intr: 2.11 -copy_to_user: 2.12 -set_intr_gate: 2.13 -die: 2.14 -.globl copy_from_user, show_registers, do_iopl 2.15 +do_iopl: 2.16 +.globl copy_from_user, copy_to_user, copy_user_generic, new_thread 2.17 copy_from_user: 2.18 -show_registers: 2.19 -do_iopl: 2.20 -.globl idt_table, copy_user_generic, idt_tables, new_thread 2.21 -idt_table: 2.22 +copy_to_user: 2.23 copy_user_generic: 2.24 -idt_tables: 2.25 new_thread: 2.26 -.globl switch_to, __get_user_1, __get_user_4, __get_user_8, trap_init 2.27 -switch_to: 2.28 +.globl __get_user_1, __get_user_4, __get_user_8 2.29 __get_user_1: 2.30 __get_user_4: 2.31 __get_user_8: 2.32 -trap_init: 2.33 -.globl set_debugreg 2.34 -set_debugreg: 2.35
3.1 --- a/xen/arch/x86/memory.c Tue Feb 01 21:26:34 2005 +0000 3.2 +++ b/xen/arch/x86/memory.c Tue Feb 01 23:14:05 2005 +0000 3.3 @@ -168,6 +168,7 @@ void __init init_frametable(void) 3.4 3.5 void arch_init_memory(void) 3.6 { 3.7 +#ifdef __i386__ 3.8 unsigned long i; 3.9 3.10 /* 3.11 @@ -219,6 +220,7 @@ void arch_init_memory(void) 3.12 frame_table[m2p_start_mfn+i].u.inuse.type_info = PGT_gdt_page | 1; 3.13 frame_table[m2p_start_mfn+i].u.inuse.domain = dom_xen; 3.14 } 3.15 +#endif 3.16 } 3.17 3.18 static void __invalidate_shadow_ldt(struct exec_domain *d)
4.1 --- a/xen/arch/x86/traps.c Tue Feb 01 21:26:34 2005 +0000 4.2 +++ b/xen/arch/x86/traps.c Tue Feb 01 23:14:05 2005 +0000 4.3 @@ -66,12 +66,6 @@ char opt_nmi[10] = "fatal"; 4.4 #endif 4.5 string_param("nmi", opt_nmi); 4.6 4.7 -#define GUEST_FAULT(_r) (likely(VM86_MODE(_r) || !RING_0(_r))) 4.8 - 4.9 -#define DOUBLEFAULT_STACK_SIZE 1024 4.10 -static struct tss_struct doublefault_tss; 4.11 -static unsigned char doublefault_stack[DOUBLEFAULT_STACK_SIZE]; 4.12 - 4.13 asmlinkage int hypercall(void); 4.14 4.15 /* Master table, and the one used by CPU0. */ 4.16 @@ -99,116 +93,6 @@ asmlinkage void alignment_check(void); 4.17 asmlinkage void spurious_interrupt_bug(void); 4.18 asmlinkage void machine_check(void); 4.19 4.20 -int kstack_depth_to_print = 8*20; 4.21 - 4.22 -static inline int kernel_text_address(unsigned long addr) 4.23 -{ 4.24 - if (addr >= (unsigned long) &_stext && 4.25 - addr <= (unsigned long) &_etext) 4.26 - return 1; 4.27 - return 0; 4.28 - 4.29 -} 4.30 - 4.31 -void show_guest_stack(void) 4.32 -{ 4.33 - int i; 4.34 - execution_context_t *ec = get_execution_context(); 4.35 - unsigned long *stack = (unsigned long *)ec->esp; 4.36 - printk("Guest EIP is %lx\n",ec->eip); 4.37 - 4.38 - for ( i = 0; i < kstack_depth_to_print; i++ ) 4.39 - { 4.40 - if ( ((long)stack & (STACK_SIZE-1)) == 0 ) 4.41 - break; 4.42 - if ( i && ((i % 8) == 0) ) 4.43 - printk("\n "); 4.44 - printk("%08lx ", *stack++); 4.45 - } 4.46 - printk("\n"); 4.47 - 4.48 -} 4.49 - 4.50 -void show_trace(unsigned long *esp) 4.51 -{ 4.52 - unsigned long *stack, addr; 4.53 - int i; 4.54 - 4.55 - printk("Call Trace from ESP=%p: ", esp); 4.56 - stack = esp; 4.57 - i = 0; 4.58 - while (((long) stack & (STACK_SIZE-1)) != 0) { 4.59 - addr = *stack++; 4.60 - if (kernel_text_address(addr)) { 4.61 - if (i && ((i % 6) == 0)) 4.62 - printk("\n "); 4.63 - printk("[<%08lx>] ", addr); 4.64 - i++; 4.65 - } 4.66 - } 4.67 - printk("\n"); 4.68 -} 4.69 - 4.70 -void show_stack(unsigned long *esp) 4.71 -{ 4.72 - unsigned long *stack; 4.73 - int i; 4.74 - 4.75 - printk("Stack trace from ESP=%p:\n", esp); 4.76 - 4.77 - stack = esp; 4.78 - for ( i = 0; i < kstack_depth_to_print; i++ ) 4.79 - { 4.80 - if ( ((long)stack & (STACK_SIZE-1)) == 0 ) 4.81 - break; 4.82 - if ( i && ((i % 8) == 0) ) 4.83 - printk("\n "); 4.84 - if ( kernel_text_address(*stack) ) 4.85 - printk("[%08lx] ", *stack++); 4.86 - else 4.87 - printk("%08lx ", *stack++); 4.88 - } 4.89 - printk("\n"); 4.90 - 4.91 - show_trace( esp ); 4.92 -} 4.93 - 4.94 -void show_registers(struct xen_regs *regs) 4.95 -{ 4.96 - unsigned long esp; 4.97 - unsigned short ss, ds, es, fs, gs; 4.98 - 4.99 - if ( GUEST_FAULT(regs) ) 4.100 - { 4.101 - esp = regs->esp; 4.102 - ss = regs->ss & 0xffff; 4.103 - ds = regs->ds & 0xffff; 4.104 - es = regs->es & 0xffff; 4.105 - fs = regs->fs & 0xffff; 4.106 - gs = regs->gs & 0xffff; 4.107 - } 4.108 - else 4.109 - { 4.110 - esp = (unsigned long)(®s->esp); 4.111 - ss = __HYPERVISOR_DS; 4.112 - ds = __HYPERVISOR_DS; 4.113 - es = __HYPERVISOR_DS; 4.114 - fs = __HYPERVISOR_DS; 4.115 - gs = __HYPERVISOR_DS; 4.116 - } 4.117 - 4.118 - printk("CPU: %d\nEIP: %04lx:[<%08lx>] \nEFLAGS: %08lx\n", 4.119 - smp_processor_id(), 0xffff & regs->cs, regs->eip, regs->eflags); 4.120 - printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", 4.121 - regs->eax, regs->ebx, regs->ecx, regs->edx); 4.122 - printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", 4.123 - regs->esi, regs->edi, regs->ebp, esp); 4.124 - printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n", 4.125 - ds, es, fs, gs, ss); 4.126 - 4.127 - show_stack((unsigned long *)®s->esp); 4.128 -} 4.129 - 4.130 /* 4.131 * This is called for faults at very unexpected times (e.g., when interrupts 4.132 * are disabled). In such situations we can't do much that is safe. We try to 4.133 @@ -231,7 +115,7 @@ asmlinkage void fatal_trap(int trapnr, s 4.134 4.135 if ( trapnr == TRAP_page_fault ) 4.136 { 4.137 - __asm__ __volatile__ ("movl %%cr2,%0" : "=r" (cr2) : ); 4.138 + __asm__ __volatile__ ("mov %%cr2,%0" : "=r" (cr2) : ); 4.139 printk("Faulting linear address might be %08lx\n", cr2); 4.140 } 4.141 4.142 @@ -344,38 +228,6 @@ asmlinkage int do_int3(struct xen_regs * 4.143 return 0; 4.144 } 4.145 4.146 -asmlinkage void do_double_fault(void) 4.147 -{ 4.148 - struct tss_struct *tss = &doublefault_tss; 4.149 - unsigned int cpu = ((tss->back_link>>3)-__FIRST_TSS_ENTRY)>>1; 4.150 - 4.151 - /* Disable the NMI watchdog. It's useless now. */ 4.152 - watchdog_on = 0; 4.153 - 4.154 - /* Find information saved during fault and dump it to the console. */ 4.155 - tss = &init_tss[cpu]; 4.156 - printk("CPU: %d\nEIP: %04x:[<%08x>] \nEFLAGS: %08x\n", 4.157 - cpu, tss->cs, tss->eip, tss->eflags); 4.158 - printk("CR3: %08x\n", tss->__cr3); 4.159 - printk("eax: %08x ebx: %08x ecx: %08x edx: %08x\n", 4.160 - tss->eax, tss->ebx, tss->ecx, tss->edx); 4.161 - printk("esi: %08x edi: %08x ebp: %08x esp: %08x\n", 4.162 - tss->esi, tss->edi, tss->ebp, tss->esp); 4.163 - printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n", 4.164 - tss->ds, tss->es, tss->fs, tss->gs, tss->ss); 4.165 - printk("************************************\n"); 4.166 - printk("CPU%d DOUBLE FAULT -- system shutdown\n", cpu); 4.167 - printk("System needs manual reset.\n"); 4.168 - printk("************************************\n"); 4.169 - 4.170 - /* Lock up the console to prevent spurious output from other CPUs. */ 4.171 - console_force_lock(); 4.172 - 4.173 - /* Wait for manual reset. */ 4.174 - for ( ; ; ) 4.175 - __asm__ __volatile__ ( "hlt" ); 4.176 -} 4.177 - 4.178 asmlinkage void do_machine_check(struct xen_regs *regs) 4.179 { 4.180 fatal_trap(TRAP_machine_check, regs); 4.181 @@ -408,7 +260,7 @@ asmlinkage int do_page_fault(struct xen_ 4.182 int cpu = ed->processor; 4.183 int ret; 4.184 4.185 - __asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : ); 4.186 + __asm__ __volatile__ ("mov %%cr2,%0" : "=r" (addr) : ); 4.187 4.188 DEBUGGER_trap_entry(TRAP_page_fault, regs); 4.189 4.190 @@ -477,6 +329,7 @@ asmlinkage int do_page_fault(struct xen_ 4.191 4.192 DEBUGGER_trap_fatal(TRAP_page_fault, regs); 4.193 4.194 +#ifdef __i386__ 4.195 if ( addr >= PAGE_OFFSET ) 4.196 { 4.197 unsigned long page; 4.198 @@ -493,6 +346,7 @@ asmlinkage int do_page_fault(struct xen_ 4.199 printk(" -- POSSIBLY AN ACCESS TO FREED MEMORY? --\n"); 4.200 #endif 4.201 } 4.202 +#endif /* __i386__ */ 4.203 4.204 show_registers(regs); 4.205 panic("CPU%d FATAL PAGE FAULT\n" 4.206 @@ -542,7 +396,7 @@ static int emulate_privileged_op(struct 4.207 eip += 1; 4.208 if ( (opcode & 0xc0) != 0xc0 ) 4.209 goto fail; 4.210 - reg = decode_reg(regs, opcode); 4.211 + reg = decode_reg(regs, opcode & 7); 4.212 switch ( (opcode >> 3) & 7 ) 4.213 { 4.214 case 0: /* Read CR0 */ 4.215 @@ -570,7 +424,7 @@ static int emulate_privileged_op(struct 4.216 eip += 1; 4.217 if ( (opcode & 0xc0) != 0xc0 ) 4.218 goto fail; 4.219 - reg = decode_reg(regs, opcode); 4.220 + reg = decode_reg(regs, opcode & 7); 4.221 switch ( (opcode >> 3) & 7 ) 4.222 { 4.223 case 0: /* Write CR0 */ 4.224 @@ -629,7 +483,6 @@ static int emulate_privileged_op(struct 4.225 asmlinkage int do_general_protection(struct xen_regs *regs) 4.226 { 4.227 struct exec_domain *ed = current; 4.228 - struct domain *d = ed->domain; 4.229 struct trap_bounce *tb = &ed->thread.trap_bounce; 4.230 trap_info_t *ti; 4.231 unsigned long fixup; 4.232 @@ -681,7 +534,7 @@ asmlinkage int do_general_protection(str 4.233 return 0; 4.234 4.235 #if defined(__i386__) 4.236 - if ( VM_ASSIST(d, VMASST_TYPE_4gb_segments) && 4.237 + if ( VM_ASSIST(ed->domain, VMASST_TYPE_4gb_segments) && 4.238 (regs->error_code == 0) && 4.239 gpf_emulate_4gb(regs) ) 4.240 return 0; 4.241 @@ -791,19 +644,19 @@ asmlinkage int math_state_restore(struct 4.242 4.243 asmlinkage int do_debug(struct xen_regs *regs) 4.244 { 4.245 - unsigned int condition; 4.246 + unsigned long condition; 4.247 struct exec_domain *d = current; 4.248 struct trap_bounce *tb = &d->thread.trap_bounce; 4.249 4.250 DEBUGGER_trap_entry(TRAP_debug, regs); 4.251 4.252 - __asm__ __volatile__("movl %%db6,%0" : "=r" (condition)); 4.253 + __asm__ __volatile__("mov %%db6,%0" : "=r" (condition)); 4.254 4.255 /* Mask out spurious debug traps due to lazy DR7 setting */ 4.256 if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) && 4.257 (d->thread.debugreg[7] == 0) ) 4.258 { 4.259 - __asm__("movl %0,%%db7" : : "r" (0)); 4.260 + __asm__("mov %0,%%db7" : : "r" (0UL)); 4.261 goto out; 4.262 } 4.263 4.264 @@ -836,30 +689,17 @@ asmlinkage int do_spurious_interrupt_bug 4.265 return EXCRET_not_a_fault; 4.266 } 4.267 4.268 -#define _set_gate(gate_addr,type,dpl,addr) \ 4.269 -do { \ 4.270 - int __d0, __d1; \ 4.271 - __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \ 4.272 - "movw %4,%%dx\n\t" \ 4.273 - "movl %%eax,%0\n\t" \ 4.274 - "movl %%edx,%1" \ 4.275 - :"=m" (*((long *) (gate_addr))), \ 4.276 - "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \ 4.277 - :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \ 4.278 - "3" ((char *) (addr)),"2" (__HYPERVISOR_CS << 16)); \ 4.279 -} while (0) 4.280 - 4.281 void set_intr_gate(unsigned int n, void *addr) 4.282 { 4.283 _set_gate(idt_table+n,14,0,addr); 4.284 } 4.285 4.286 -static void __init set_system_gate(unsigned int n, void *addr) 4.287 +void set_system_gate(unsigned int n, void *addr) 4.288 { 4.289 _set_gate(idt_table+n,14,3,addr); 4.290 } 4.291 4.292 -static void set_task_gate(unsigned int n, unsigned int sel) 4.293 +void set_task_gate(unsigned int n, unsigned int sel) 4.294 { 4.295 idt_table[n].a = sel << 16; 4.296 idt_table[n].b = 0x8500; 4.297 @@ -875,17 +715,6 @@ static void set_task_gate(unsigned int n 4.298 *(gate_addr) = (((base) & 0x0000ffff)<<16) | \ 4.299 ((limit) & 0x0ffff); } 4.300 4.301 -#define _set_tssldt_desc(n,addr,limit,type) \ 4.302 -__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \ 4.303 - "movw %%ax,2(%2)\n\t" \ 4.304 - "rorl $16,%%eax\n\t" \ 4.305 - "movb %%al,4(%2)\n\t" \ 4.306 - "movb %4,5(%2)\n\t" \ 4.307 - "movb $0,6(%2)\n\t" \ 4.308 - "movb %%ah,7(%2)\n\t" \ 4.309 - "rorl $16,%%eax" \ 4.310 - : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type)) 4.311 - 4.312 void set_tss_desc(unsigned int n, void *addr) 4.313 { 4.314 _set_tssldt_desc( 4.315 @@ -897,25 +726,10 @@ void set_tss_desc(unsigned int n, void * 4.316 4.317 void __init trap_init(void) 4.318 { 4.319 - /* 4.320 - * Make a separate task for double faults. This will get us debug output if 4.321 - * we blow the kernel stack. 4.322 - */ 4.323 - struct tss_struct *tss = &doublefault_tss; 4.324 - memset(tss, 0, sizeof(*tss)); 4.325 - tss->ds = __HYPERVISOR_DS; 4.326 - tss->es = __HYPERVISOR_DS; 4.327 - tss->ss = __HYPERVISOR_DS; 4.328 - tss->esp = (unsigned long) 4.329 - &doublefault_stack[DOUBLEFAULT_STACK_SIZE]; 4.330 - tss->__cr3 = __pa(idle_pg_table); 4.331 - tss->cs = __HYPERVISOR_CS; 4.332 - tss->eip = (unsigned long)do_double_fault; 4.333 - tss->eflags = 2; 4.334 - tss->bitmap = IOBMP_INVALID_OFFSET; 4.335 - _set_tssldt_desc(gdt_table+__DOUBLEFAULT_TSS_ENTRY, 4.336 - (int)tss, 235, 0x89); 4.337 + extern void doublefault_init(void); 4.338 + doublefault_init(); 4.339 4.340 +#ifdef __i386__ 4.341 /* 4.342 * Note that interrupt gates are always used, rather than trap gates. We 4.343 * must have interrupts disabled until DS/ES/FS/GS are saved because the 4.344 @@ -948,6 +762,7 @@ void __init trap_init(void) 4.345 4.346 /* Only ring 1 can access Xen services. */ 4.347 _set_gate(idt_table+HYPERCALL_VECTOR,14,1,&hypercall); 4.348 +#endif 4.349 4.350 /* CPU0 uses the master IDT. */ 4.351 idt_tables[0] = idt_table; 4.352 @@ -1015,57 +830,6 @@ long do_set_callbacks(unsigned long even 4.353 } 4.354 4.355 4.356 -long set_fast_trap(struct exec_domain *p, int idx) 4.357 -{ 4.358 - trap_info_t *ti; 4.359 - 4.360 - /* Index 0 is special: it disables fast traps. */ 4.361 - if ( idx == 0 ) 4.362 - { 4.363 - if ( p == current ) 4.364 - CLEAR_FAST_TRAP(&p->thread); 4.365 - SET_DEFAULT_FAST_TRAP(&p->thread); 4.366 - return 0; 4.367 - } 4.368 - 4.369 - /* 4.370 - * We only fast-trap vectors 0x20-0x2f, and vector 0x80. 4.371 - * The former range is used by Windows and MS-DOS. 4.372 - * Vector 0x80 is used by Linux and the BSD variants. 4.373 - */ 4.374 - if ( (idx != 0x80) && ((idx < 0x20) || (idx > 0x2f)) ) 4.375 - return -1; 4.376 - 4.377 - ti = p->thread.traps + idx; 4.378 - 4.379 - /* 4.380 - * We can't virtualise interrupt gates, as there's no way to get 4.381 - * the CPU to automatically clear the events_mask variable. 4.382 - */ 4.383 - if ( TI_GET_IF(ti) ) 4.384 - return -1; 4.385 - 4.386 - if ( p == current ) 4.387 - CLEAR_FAST_TRAP(&p->thread); 4.388 - 4.389 - p->thread.fast_trap_idx = idx; 4.390 - p->thread.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff); 4.391 - p->thread.fast_trap_desc.b = 4.392 - (ti->address & 0xffff0000) | 0x8f00 | (TI_GET_DPL(ti)&3)<<13; 4.393 - 4.394 - if ( p == current ) 4.395 - SET_FAST_TRAP(&p->thread); 4.396 - 4.397 - return 0; 4.398 -} 4.399 - 4.400 - 4.401 -long do_set_fast_trap(int idx) 4.402 -{ 4.403 - return set_fast_trap(current, idx); 4.404 -} 4.405 - 4.406 - 4.407 long do_fpu_taskswitch(void) 4.408 { 4.409 set_bit(EDF_GUEST_STTS, ¤t->ed_flags); 4.410 @@ -1083,22 +847,22 @@ long set_debugreg(struct exec_domain *p, 4.411 case 0: 4.412 if ( value > (PAGE_OFFSET-4) ) return -EPERM; 4.413 if ( p == current ) 4.414 - __asm__ ( "movl %0, %%db0" : : "r" (value) ); 4.415 + __asm__ ( "mov %0, %%db0" : : "r" (value) ); 4.416 break; 4.417 case 1: 4.418 if ( value > (PAGE_OFFSET-4) ) return -EPERM; 4.419 if ( p == current ) 4.420 - __asm__ ( "movl %0, %%db1" : : "r" (value) ); 4.421 + __asm__ ( "mov %0, %%db1" : : "r" (value) ); 4.422 break; 4.423 case 2: 4.424 if ( value > (PAGE_OFFSET-4) ) return -EPERM; 4.425 if ( p == current ) 4.426 - __asm__ ( "movl %0, %%db2" : : "r" (value) ); 4.427 + __asm__ ( "mov %0, %%db2" : : "r" (value) ); 4.428 break; 4.429 case 3: 4.430 if ( value > (PAGE_OFFSET-4) ) return -EPERM; 4.431 if ( p == current ) 4.432 - __asm__ ( "movl %0, %%db3" : : "r" (value) ); 4.433 + __asm__ ( "mov %0, %%db3" : : "r" (value) ); 4.434 break; 4.435 case 6: 4.436 /* 4.437 @@ -1108,7 +872,7 @@ long set_debugreg(struct exec_domain *p, 4.438 value &= 0xffffefff; /* reserved bits => 0 */ 4.439 value |= 0xffff0ff0; /* reserved bits => 1 */ 4.440 if ( p == current ) 4.441 - __asm__ ( "movl %0, %%db6" : : "r" (value) ); 4.442 + __asm__ ( "mov %0, %%db6" : : "r" (value) ); 4.443 break; 4.444 case 7: 4.445 /* 4.446 @@ -1129,7 +893,7 @@ long set_debugreg(struct exec_domain *p, 4.447 if ( ((value >> (i+16)) & 3) == 2 ) return -EPERM; 4.448 } 4.449 if ( p == current ) 4.450 - __asm__ ( "movl %0, %%db7" : : "r" (value) ); 4.451 + __asm__ ( "mov %0, %%db7" : : "r" (value) ); 4.452 break; 4.453 default: 4.454 return -EINVAL;
5.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 5.2 +++ b/xen/arch/x86/x86_32/traps.c Tue Feb 01 23:14:05 2005 +0000 5.3 @@ -0,0 +1,226 @@ 5.4 + 5.5 +#include <xen/config.h> 5.6 +#include <xen/init.h> 5.7 +#include <xen/sched.h> 5.8 +#include <xen/lib.h> 5.9 +#include <xen/console.h> 5.10 +#include <xen/mm.h> 5.11 +#include <xen/irq.h> 5.12 + 5.13 +static int kstack_depth_to_print = 8*20; 5.14 + 5.15 +static inline int kernel_text_address(unsigned long addr) 5.16 +{ 5.17 + if (addr >= (unsigned long) &_stext && 5.18 + addr <= (unsigned long) &_etext) 5.19 + return 1; 5.20 + return 0; 5.21 + 5.22 +} 5.23 + 5.24 +void show_guest_stack(void) 5.25 +{ 5.26 + int i; 5.27 + execution_context_t *ec = get_execution_context(); 5.28 + unsigned long *stack = (unsigned long *)ec->esp; 5.29 + printk("Guest EIP is %lx\n",ec->eip); 5.30 + 5.31 + for ( i = 0; i < kstack_depth_to_print; i++ ) 5.32 + { 5.33 + if ( ((long)stack & (STACK_SIZE-1)) == 0 ) 5.34 + break; 5.35 + if ( i && ((i % 8) == 0) ) 5.36 + printk("\n "); 5.37 + printk("%08lx ", *stack++); 5.38 + } 5.39 + printk("\n"); 5.40 + 5.41 +} 5.42 + 5.43 +void show_trace(unsigned long *esp) 5.44 +{ 5.45 + unsigned long *stack, addr; 5.46 + int i; 5.47 + 5.48 + printk("Call Trace from ESP=%p: ", esp); 5.49 + stack = esp; 5.50 + i = 0; 5.51 + while (((long) stack & (STACK_SIZE-1)) != 0) { 5.52 + addr = *stack++; 5.53 + if (kernel_text_address(addr)) { 5.54 + if (i && ((i % 6) == 0)) 5.55 + printk("\n "); 5.56 + printk("[<%08lx>] ", addr); 5.57 + i++; 5.58 + } 5.59 + } 5.60 + printk("\n"); 5.61 +} 5.62 + 5.63 +void show_stack(unsigned long *esp) 5.64 +{ 5.65 + unsigned long *stack; 5.66 + int i; 5.67 + 5.68 + printk("Stack trace from ESP=%p:\n", esp); 5.69 + 5.70 + stack = esp; 5.71 + for ( i = 0; i < kstack_depth_to_print; i++ ) 5.72 + { 5.73 + if ( ((long)stack & (STACK_SIZE-1)) == 0 ) 5.74 + break; 5.75 + if ( i && ((i % 8) == 0) ) 5.76 + printk("\n "); 5.77 + if ( kernel_text_address(*stack) ) 5.78 + printk("[%08lx] ", *stack++); 5.79 + else 5.80 + printk("%08lx ", *stack++); 5.81 + } 5.82 + printk("\n"); 5.83 + 5.84 + show_trace( esp ); 5.85 +} 5.86 + 5.87 +void show_registers(struct xen_regs *regs) 5.88 +{ 5.89 + unsigned long esp; 5.90 + unsigned short ss, ds, es, fs, gs; 5.91 + 5.92 + if ( GUEST_FAULT(regs) ) 5.93 + { 5.94 + esp = regs->esp; 5.95 + ss = regs->ss & 0xffff; 5.96 + ds = regs->ds & 0xffff; 5.97 + es = regs->es & 0xffff; 5.98 + fs = regs->fs & 0xffff; 5.99 + gs = regs->gs & 0xffff; 5.100 + } 5.101 + else 5.102 + { 5.103 + esp = (unsigned long)(®s->esp); 5.104 + ss = __HYPERVISOR_DS; 5.105 + ds = __HYPERVISOR_DS; 5.106 + es = __HYPERVISOR_DS; 5.107 + fs = __HYPERVISOR_DS; 5.108 + gs = __HYPERVISOR_DS; 5.109 + } 5.110 + 5.111 + printk("CPU: %d\nEIP: %04lx:[<%08lx>] \nEFLAGS: %08lx\n", 5.112 + smp_processor_id(), 0xffff & regs->cs, regs->eip, regs->eflags); 5.113 + printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", 5.114 + regs->eax, regs->ebx, regs->ecx, regs->edx); 5.115 + printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", 5.116 + regs->esi, regs->edi, regs->ebp, esp); 5.117 + printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n", 5.118 + ds, es, fs, gs, ss); 5.119 + 5.120 + show_stack((unsigned long *)®s->esp); 5.121 +} 5.122 + 5.123 +#define DOUBLEFAULT_STACK_SIZE 1024 5.124 +static struct tss_struct doublefault_tss; 5.125 +static unsigned char doublefault_stack[DOUBLEFAULT_STACK_SIZE]; 5.126 + 5.127 +asmlinkage void do_double_fault(void) 5.128 +{ 5.129 + struct tss_struct *tss = &doublefault_tss; 5.130 + unsigned int cpu = ((tss->back_link>>3)-__FIRST_TSS_ENTRY)>>1; 5.131 + 5.132 + /* Disable the NMI watchdog. It's useless now. */ 5.133 + watchdog_on = 0; 5.134 + 5.135 + /* Find information saved during fault and dump it to the console. */ 5.136 + tss = &init_tss[cpu]; 5.137 + printk("CPU: %d\nEIP: %04x:[<%08x>] \nEFLAGS: %08x\n", 5.138 + cpu, tss->cs, tss->eip, tss->eflags); 5.139 + printk("CR3: %08x\n", tss->__cr3); 5.140 + printk("eax: %08x ebx: %08x ecx: %08x edx: %08x\n", 5.141 + tss->eax, tss->ebx, tss->ecx, tss->edx); 5.142 + printk("esi: %08x edi: %08x ebp: %08x esp: %08x\n", 5.143 + tss->esi, tss->edi, tss->ebp, tss->esp); 5.144 + printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n", 5.145 + tss->ds, tss->es, tss->fs, tss->gs, tss->ss); 5.146 + printk("************************************\n"); 5.147 + printk("CPU%d DOUBLE FAULT -- system shutdown\n", cpu); 5.148 + printk("System needs manual reset.\n"); 5.149 + printk("************************************\n"); 5.150 + 5.151 + /* Lock up the console to prevent spurious output from other CPUs. */ 5.152 + console_force_lock(); 5.153 + 5.154 + /* Wait for manual reset. */ 5.155 + for ( ; ; ) 5.156 + __asm__ __volatile__ ( "hlt" ); 5.157 +} 5.158 + 5.159 +void __init doublefault_init(void) 5.160 +{ 5.161 + /* 5.162 + * Make a separate task for double faults. This will get us debug output if 5.163 + * we blow the kernel stack. 5.164 + */ 5.165 + struct tss_struct *tss = &doublefault_tss; 5.166 + memset(tss, 0, sizeof(*tss)); 5.167 + tss->ds = __HYPERVISOR_DS; 5.168 + tss->es = __HYPERVISOR_DS; 5.169 + tss->ss = __HYPERVISOR_DS; 5.170 + tss->esp = (unsigned long) 5.171 + &doublefault_stack[DOUBLEFAULT_STACK_SIZE]; 5.172 + tss->__cr3 = __pa(idle_pg_table); 5.173 + tss->cs = __HYPERVISOR_CS; 5.174 + tss->eip = (unsigned long)do_double_fault; 5.175 + tss->eflags = 2; 5.176 + tss->bitmap = IOBMP_INVALID_OFFSET; 5.177 + _set_tssldt_desc(gdt_table+__DOUBLEFAULT_TSS_ENTRY, 5.178 + (int)tss, 235, 0x89); 5.179 +} 5.180 + 5.181 +long set_fast_trap(struct exec_domain *p, int idx) 5.182 +{ 5.183 + trap_info_t *ti; 5.184 + 5.185 + /* Index 0 is special: it disables fast traps. */ 5.186 + if ( idx == 0 ) 5.187 + { 5.188 + if ( p == current ) 5.189 + CLEAR_FAST_TRAP(&p->thread); 5.190 + SET_DEFAULT_FAST_TRAP(&p->thread); 5.191 + return 0; 5.192 + } 5.193 + 5.194 + /* 5.195 + * We only fast-trap vectors 0x20-0x2f, and vector 0x80. 5.196 + * The former range is used by Windows and MS-DOS. 5.197 + * Vector 0x80 is used by Linux and the BSD variants. 5.198 + */ 5.199 + if ( (idx != 0x80) && ((idx < 0x20) || (idx > 0x2f)) ) 5.200 + return -1; 5.201 + 5.202 + ti = p->thread.traps + idx; 5.203 + 5.204 + /* 5.205 + * We can't virtualise interrupt gates, as there's no way to get 5.206 + * the CPU to automatically clear the events_mask variable. 5.207 + */ 5.208 + if ( TI_GET_IF(ti) ) 5.209 + return -1; 5.210 + 5.211 + if ( p == current ) 5.212 + CLEAR_FAST_TRAP(&p->thread); 5.213 + 5.214 + p->thread.fast_trap_idx = idx; 5.215 + p->thread.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff); 5.216 + p->thread.fast_trap_desc.b = 5.217 + (ti->address & 0xffff0000) | 0x8f00 | (TI_GET_DPL(ti)&3)<<13; 5.218 + 5.219 + if ( p == current ) 5.220 + SET_FAST_TRAP(&p->thread); 5.221 + 5.222 + return 0; 5.223 +} 5.224 + 5.225 + 5.226 +long do_set_fast_trap(int idx) 5.227 +{ 5.228 + return set_fast_trap(current, idx); 5.229 +}
6.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 6.2 +++ b/xen/arch/x86/x86_64/traps.c Tue Feb 01 23:14:05 2005 +0000 6.3 @@ -0,0 +1,127 @@ 6.4 + 6.5 +#include <xen/config.h> 6.6 +#include <xen/init.h> 6.7 +#include <xen/sched.h> 6.8 +#include <xen/lib.h> 6.9 +#include <xen/errno.h> 6.10 +#include <xen/mm.h> 6.11 +#include <xen/irq.h> 6.12 + 6.13 +static int kstack_depth_to_print = 8*20; 6.14 + 6.15 +static inline int kernel_text_address(unsigned long addr) 6.16 +{ 6.17 + if (addr >= (unsigned long) &_stext && 6.18 + addr <= (unsigned long) &_etext) 6.19 + return 1; 6.20 + return 0; 6.21 + 6.22 +} 6.23 + 6.24 +void show_guest_stack(void) 6.25 +{ 6.26 + int i; 6.27 + execution_context_t *ec = get_execution_context(); 6.28 + unsigned long *stack = (unsigned long *)ec->rsp; 6.29 + printk("Guest RIP is %lx\n", ec->rip); 6.30 + 6.31 + for ( i = 0; i < kstack_depth_to_print; i++ ) 6.32 + { 6.33 + if ( ((long)stack & (STACK_SIZE-1)) == 0 ) 6.34 + break; 6.35 + if ( i && ((i % 8) == 0) ) 6.36 + printk("\n "); 6.37 + printk("%08lx ", *stack++); 6.38 + } 6.39 + printk("\n"); 6.40 + 6.41 +} 6.42 + 6.43 +void show_trace(unsigned long *rsp) 6.44 +{ 6.45 + unsigned long *stack, addr; 6.46 + int i; 6.47 + 6.48 + printk("Call Trace from RSP=%p: ", rsp); 6.49 + stack = rsp; 6.50 + i = 0; 6.51 + while (((long) stack & (STACK_SIZE-1)) != 0) { 6.52 + addr = *stack++; 6.53 + if (kernel_text_address(addr)) { 6.54 + if (i && ((i % 6) == 0)) 6.55 + printk("\n "); 6.56 + printk("[<%08lx>] ", addr); 6.57 + i++; 6.58 + } 6.59 + } 6.60 + printk("\n"); 6.61 +} 6.62 + 6.63 +void show_stack(unsigned long *rsp) 6.64 +{ 6.65 + unsigned long *stack; 6.66 + int i; 6.67 + 6.68 + printk("Stack trace from RSP=%p:\n", rsp); 6.69 + 6.70 + stack = rsp; 6.71 + for ( i = 0; i < kstack_depth_to_print; i++ ) 6.72 + { 6.73 + if ( ((long)stack & (STACK_SIZE-1)) == 0 ) 6.74 + break; 6.75 + if ( i && ((i % 8) == 0) ) 6.76 + printk("\n "); 6.77 + if ( kernel_text_address(*stack) ) 6.78 + printk("[%08lx] ", *stack++); 6.79 + else 6.80 + printk("%08lx ", *stack++); 6.81 + } 6.82 + printk("\n"); 6.83 + 6.84 + show_trace(rsp); 6.85 +} 6.86 + 6.87 +void show_registers(struct xen_regs *regs) 6.88 +{ 6.89 + printk("CPU: %d\nEIP: %04lx:[<%08lx>] \nEFLAGS: %08lx\n", 6.90 + smp_processor_id(), 0xffff & regs->cs, regs->rip, regs->eflags); 6.91 + printk("rax: %08lx rbx: %08lx rcx: %08lx rdx: %08lx\n", 6.92 + regs->rax, regs->rbx, regs->rcx, regs->rdx); 6.93 + printk("rsi: %08lx rdi: %08lx rbp: %08lx rsp: %08lx ss: %04x\n", 6.94 + regs->rsi, regs->rdi, regs->rbp, regs->rsp, regs->ss); 6.95 + printk("r8: %08lx r9: %08lx r10: %08lx r11: %08lx\n", 6.96 + regs->r8, regs->r9, regs->r10, regs->r11); 6.97 + printk("r12: %08lx r13: %08lx r14: %08lx r15: %08lx\n", 6.98 + regs->r12, regs->r13, regs->r14, regs->r15); 6.99 + 6.100 + show_stack((unsigned long *)regs->rsp); 6.101 +} 6.102 + 6.103 +void __init doublefault_init(void) 6.104 +{ 6.105 +} 6.106 + 6.107 +void *decode_reg(struct xen_regs *regs, u8 b) 6.108 +{ 6.109 + switch ( b ) 6.110 + { 6.111 + case 0: return ®s->rax; 6.112 + case 1: return ®s->rcx; 6.113 + case 2: return ®s->rdx; 6.114 + case 3: return ®s->rbx; 6.115 + case 4: return ®s->rsp; 6.116 + case 5: return ®s->rbp; 6.117 + case 6: return ®s->rsi; 6.118 + case 7: return ®s->rdi; 6.119 + case 8: return ®s->r8; 6.120 + case 9: return ®s->r9; 6.121 + case 10: return ®s->r10; 6.122 + case 11: return ®s->r11; 6.123 + case 12: return ®s->r12; 6.124 + case 13: return ®s->r13; 6.125 + case 14: return ®s->r14; 6.126 + case 15: return ®s->r15; 6.127 + } 6.128 + 6.129 + return NULL; 6.130 +}
7.1 --- a/xen/include/asm-x86/desc.h Tue Feb 01 21:26:34 2005 +0000 7.2 +++ b/xen/include/asm-x86/desc.h Tue Feb 01 23:14:05 2005 +0000 7.3 @@ -1,5 +1,6 @@ 7.4 #ifndef __ARCH_DESC_H 7.5 #define __ARCH_DESC_H 7.6 +#ifndef __ASSEMBLY__ 7.7 7.8 #define LDT_ENTRY_SIZE 8 7.9 7.10 @@ -25,7 +26,6 @@ 7.11 (((_s)>>3) > LAST_RESERVED_GDT_ENTRY) || \ 7.12 ((_s)&4)) && \ 7.13 (((_s)&3) == 1)) 7.14 -#define VALID_CODESEL(_s) ((_s) == FLAT_RING1_CS || VALID_SEL(_s)) 7.15 7.16 /* These are bitmasks for the high 32 bits of a descriptor table entry. */ 7.17 #define _SEGMENT_TYPE (15<< 8) 7.18 @@ -38,17 +38,51 @@ 7.19 #define _SEGMENT_DB ( 1<<22) /* 16- or 32-bit segment */ 7.20 #define _SEGMENT_G ( 1<<23) /* Granularity */ 7.21 7.22 -#ifndef __ASSEMBLY__ 7.23 struct desc_struct { 7.24 u32 a, b; 7.25 }; 7.26 7.27 #if defined(__x86_64__) 7.28 + 7.29 +#define VALID_CODESEL(_s) ((_s) == FLAT_RING3_CS64 || VALID_SEL(_s)) 7.30 + 7.31 typedef struct { 7.32 u64 a, b; 7.33 } idt_entry_t; 7.34 + 7.35 +#define _set_gate(gate_addr,type,dpl,addr) ((void)0) 7.36 +#define _set_tssldt_desc(n,addr,limit,type) ((void)0) 7.37 + 7.38 #elif defined(__i386__) 7.39 + 7.40 +#define VALID_CODESEL(_s) ((_s) == FLAT_RING1_CS || VALID_SEL(_s)) 7.41 + 7.42 typedef struct desc_struct idt_entry_t; 7.43 + 7.44 +#define _set_gate(gate_addr,type,dpl,addr) \ 7.45 +do { \ 7.46 + int __d0, __d1; \ 7.47 + __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \ 7.48 + "movw %4,%%dx\n\t" \ 7.49 + "movl %%eax,%0\n\t" \ 7.50 + "movl %%edx,%1" \ 7.51 + :"=m" (*((long *) (gate_addr))), \ 7.52 + "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \ 7.53 + :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \ 7.54 + "3" ((char *) (addr)),"2" (__HYPERVISOR_CS << 16)); \ 7.55 +} while (0) 7.56 + 7.57 +#define _set_tssldt_desc(n,addr,limit,type) \ 7.58 +__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \ 7.59 + "movw %%ax,2(%2)\n\t" \ 7.60 + "rorl $16,%%eax\n\t" \ 7.61 + "movb %%al,4(%2)\n\t" \ 7.62 + "movb %4,5(%2)\n\t" \ 7.63 + "movb $0,6(%2)\n\t" \ 7.64 + "movb %%ah,7(%2)\n\t" \ 7.65 + "rorl $16,%%eax" \ 7.66 + : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type)) 7.67 + 7.68 #endif 7.69 7.70 extern struct desc_struct gdt_table[]; 7.71 @@ -64,8 +98,9 @@ struct Xgt_desc_struct { 7.72 #define gdt_descr (*(struct Xgt_desc_struct *)((char *)&gdt - 2)) 7.73 7.74 extern void set_intr_gate(unsigned int irq, void * addr); 7.75 +extern void set_system_gate(unsigned int n, void *addr); 7.76 +extern void set_task_gate(unsigned int n, unsigned int sel); 7.77 extern void set_tss_desc(unsigned int n, void *addr); 7.78 7.79 #endif /* !__ASSEMBLY__ */ 7.80 - 7.81 -#endif 7.82 +#endif /* __ARCH_DESC_H */
8.1 --- a/xen/include/asm-x86/regs.h Tue Feb 01 21:26:34 2005 +0000 8.2 +++ b/xen/include/asm-x86/regs.h Tue Feb 01 23:14:05 2005 +0000 8.3 @@ -31,4 +31,6 @@ enum EFLAGS { 8.4 EF_ID = 0x00200000, /* id */ 8.5 }; 8.6 8.7 +#define GUEST_FAULT(_r) (likely(VM86_MODE(_r) || !RING_0(_r))) 8.8 + 8.9 #endif /* __X86_REGS_H__ */
9.1 --- a/xen/include/asm-x86/x86_64/regs.h Tue Feb 01 21:26:34 2005 +0000 9.2 +++ b/xen/include/asm-x86/x86_64/regs.h Tue Feb 01 23:14:05 2005 +0000 9.3 @@ -11,18 +11,19 @@ struct xen_regs 9.4 u64 r12; 9.5 u64 rbp; 9.6 u64 rbx; 9.7 + /* NB. Above here is C callee-saves. */ 9.8 u64 r11; 9.9 u64 r10; 9.10 u64 r9; 9.11 u64 r8; 9.12 - u64 rax; 9.13 - u64 rcx; 9.14 - u64 rdx; 9.15 - u64 rsi; 9.16 - u64 rdi; 9.17 + union { u64 rax; u32 eax; } __attribute__ ((packed)); 9.18 + union { u64 rcx; u32 ecx; } __attribute__ ((packed)); 9.19 + union { u64 rdx; u32 edx; } __attribute__ ((packed)); 9.20 + union { u64 rsi; u32 esi; } __attribute__ ((packed)); 9.21 + union { u64 rdi; u32 edi; } __attribute__ ((packed)); 9.22 u32 error_code; 9.23 u32 entry_vector; 9.24 - u64 rip; 9.25 + union { u64 rip; u64 eip; } __attribute__ ((packed)); 9.26 u64 cs; 9.27 u64 eflags; 9.28 u64 rsp;