debuggers.hg

view xen/arch/x86/traps.c @ 3740:d3e70af90f15

bitkeeper revision 1.1159.212.115 (4207c574hv18R_VTm-3a9w_AZzNBWw)

Force hypercall continuation arguments to size of longs.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Mon Feb 07 19:45:56 2005 +0000 (2005-02-07)
parents ff48344d34df
children 23e7cf28ddb3
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /******************************************************************************
3 * arch/x86/traps.c
4 *
5 * Modifications to Linux original are copyright (c) 2002-2004, K A Fraser
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
22 /*
23 * Copyright (C) 1991, 1992 Linus Torvalds
24 *
25 * Pentium III FXSR, SSE support
26 * Gareth Hughes <gareth@valinux.com>, May 2000
27 */
29 #include <xen/config.h>
30 #include <xen/init.h>
31 #include <xen/sched.h>
32 #include <xen/lib.h>
33 #include <xen/errno.h>
34 #include <xen/mm.h>
35 #include <xen/console.h>
36 #include <asm/regs.h>
37 #include <xen/delay.h>
38 #include <xen/event.h>
39 #include <xen/spinlock.h>
40 #include <xen/irq.h>
41 #include <xen/perfc.h>
42 #include <xen/softirq.h>
43 #include <asm/shadow.h>
44 #include <asm/domain_page.h>
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/atomic.h>
48 #include <asm/desc.h>
49 #include <asm/debugreg.h>
50 #include <asm/smp.h>
51 #include <asm/flushtlb.h>
52 #include <asm/uaccess.h>
53 #include <asm/i387.h>
54 #include <asm/debugger.h>
55 #include <asm/msr.h>
57 /*
58 * opt_nmi: one of 'ignore', 'dom0', or 'fatal'.
59 * fatal: Xen prints diagnostic message and then hangs.
60 * dom0: The NMI is virtualised to DOM0.
61 * ignore: The NMI error is cleared and ignored.
62 */
63 #ifdef NDEBUG
64 char opt_nmi[10] = "dom0";
65 #else
66 char opt_nmi[10] = "fatal";
67 #endif
68 string_param("nmi", opt_nmi);
70 asmlinkage int hypercall(void);
72 /* Master table, and the one used by CPU0. */
73 idt_entry_t idt_table[IDT_ENTRIES] = { {0, 0}, };
74 /* All other CPUs have their own copy. */
75 idt_entry_t *idt_tables[NR_CPUS] = { 0 };
77 asmlinkage void divide_error(void);
78 asmlinkage void debug(void);
79 asmlinkage void nmi(void);
80 asmlinkage void int3(void);
81 asmlinkage void overflow(void);
82 asmlinkage void bounds(void);
83 asmlinkage void invalid_op(void);
84 asmlinkage void device_not_available(void);
85 asmlinkage void coprocessor_segment_overrun(void);
86 asmlinkage void invalid_TSS(void);
87 asmlinkage void segment_not_present(void);
88 asmlinkage void stack_segment(void);
89 asmlinkage void general_protection(void);
90 asmlinkage void page_fault(void);
91 asmlinkage void coprocessor_error(void);
92 asmlinkage void simd_coprocessor_error(void);
93 asmlinkage void alignment_check(void);
94 asmlinkage void spurious_interrupt_bug(void);
95 asmlinkage void machine_check(void);
97 /*
98 * This is called for faults at very unexpected times (e.g., when interrupts
99 * are disabled). In such situations we can't do much that is safe. We try to
100 * print out some tracing and then we just spin.
101 */
102 asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs)
103 {
104 int cpu = smp_processor_id();
105 unsigned long cr2;
106 static char *trapstr[] = {
107 "divide error", "debug", "nmi", "bkpt", "overflow", "bounds",
108 "invalid operation", "device not available", "double fault",
109 "coprocessor segment", "invalid tss", "segment not found",
110 "stack error", "general protection fault", "page fault",
111 "spurious interrupt", "coprocessor error", "alignment check",
112 "machine check", "simd error"
113 };
115 show_registers(regs);
117 if ( trapnr == TRAP_page_fault )
118 {
119 __asm__ __volatile__ ("mov %%cr2,%0" : "=r" (cr2) : );
120 printk("Faulting linear address might be %0lx %lx\n", cr2, cr2);
121 }
123 printk("************************************\n");
124 printk("CPU%d FATAL TRAP %d (%s), ERROR_CODE %04x%s.\n",
125 cpu, trapnr, trapstr[trapnr], regs->error_code,
126 (regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
127 printk("System shutting down -- need manual reset.\n");
128 printk("************************************\n");
130 /* Lock up the console to prevent spurious output from other CPUs. */
131 console_force_lock();
133 /* Wait for manual reset. */
134 for ( ; ; )
135 __asm__ __volatile__ ( "hlt" );
136 }
138 static inline int do_trap(int trapnr, char *str,
139 struct xen_regs *regs,
140 int use_error_code)
141 {
142 struct exec_domain *ed = current;
143 struct trap_bounce *tb = &ed->arch.trap_bounce;
144 trap_info_t *ti;
145 unsigned long fixup;
147 DEBUGGER_trap_entry(trapnr, regs);
149 if ( !GUEST_FAULT(regs) )
150 goto xen_fault;
152 ti = current->arch.traps + trapnr;
153 tb->flags = TBF_EXCEPTION;
154 tb->cs = ti->cs;
155 tb->eip = ti->address;
156 if ( use_error_code )
157 {
158 tb->flags |= TBF_EXCEPTION_ERRCODE;
159 tb->error_code = regs->error_code;
160 }
161 if ( TI_GET_IF(ti) )
162 ed->vcpu_info->evtchn_upcall_mask = 1;
163 return 0;
165 xen_fault:
167 if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
168 {
169 DPRINTK("Trap %d: %p -> %p\n", trapnr, regs->eip, fixup);
170 regs->eip = fixup;
171 return 0;
172 }
174 DEBUGGER_trap_fatal(trapnr, regs);
176 show_registers(regs);
177 panic("CPU%d FATAL TRAP: vector = %d (%s)\n"
178 "[error_code=%04x]\n",
179 smp_processor_id(), trapnr, str, regs->error_code);
180 return 0;
181 }
183 #define DO_ERROR_NOCODE(trapnr, str, name) \
184 asmlinkage int do_##name(struct xen_regs *regs) \
185 { \
186 return do_trap(trapnr, str, regs, 0); \
187 }
189 #define DO_ERROR(trapnr, str, name) \
190 asmlinkage int do_##name(struct xen_regs *regs) \
191 { \
192 return do_trap(trapnr, str, regs, 1); \
193 }
195 DO_ERROR_NOCODE( 0, "divide error", divide_error)
196 DO_ERROR_NOCODE( 4, "overflow", overflow)
197 DO_ERROR_NOCODE( 5, "bounds", bounds)
198 DO_ERROR_NOCODE( 6, "invalid operand", invalid_op)
199 DO_ERROR_NOCODE( 9, "coprocessor segment overrun", coprocessor_segment_overrun)
200 DO_ERROR(10, "invalid TSS", invalid_TSS)
201 DO_ERROR(11, "segment not present", segment_not_present)
202 DO_ERROR(12, "stack segment", stack_segment)
203 DO_ERROR_NOCODE(16, "fpu error", coprocessor_error)
204 DO_ERROR(17, "alignment check", alignment_check)
205 DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
207 asmlinkage int do_int3(struct xen_regs *regs)
208 {
209 struct exec_domain *ed = current;
210 struct trap_bounce *tb = &ed->arch.trap_bounce;
211 trap_info_t *ti;
213 DEBUGGER_trap_entry(TRAP_int3, regs);
215 if ( !GUEST_FAULT(regs) )
216 {
217 DEBUGGER_trap_fatal(TRAP_int3, regs);
218 show_registers(regs);
219 panic("CPU%d FATAL TRAP: vector = 3 (Int3)\n", smp_processor_id());
220 }
222 ti = current->arch.traps + 3;
223 tb->flags = TBF_EXCEPTION;
224 tb->cs = ti->cs;
225 tb->eip = ti->address;
226 if ( TI_GET_IF(ti) )
227 ed->vcpu_info->evtchn_upcall_mask = 1;
229 return 0;
230 }
232 asmlinkage void do_machine_check(struct xen_regs *regs)
233 {
234 fatal_trap(TRAP_machine_check, regs);
235 }
237 void propagate_page_fault(unsigned long addr, u16 error_code)
238 {
239 trap_info_t *ti;
240 struct exec_domain *ed = current;
241 struct trap_bounce *tb = &ed->arch.trap_bounce;
243 ti = ed->arch.traps + 14;
244 tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE | TBF_EXCEPTION_CR2;
245 tb->cr2 = addr;
246 tb->error_code = error_code;
247 tb->cs = ti->cs;
248 tb->eip = ti->address;
249 if ( TI_GET_IF(ti) )
250 ed->vcpu_info->evtchn_upcall_mask = 1;
252 ed->arch.guest_cr2 = addr;
253 }
255 asmlinkage int do_page_fault(struct xen_regs *regs)
256 {
257 unsigned long off, addr, fixup;
258 struct exec_domain *ed = current;
259 struct domain *d = ed->domain;
260 extern int map_ldt_shadow_page(unsigned int);
261 int cpu = ed->processor;
262 int ret;
264 __asm__ __volatile__ ("mov %%cr2,%0" : "=r" (addr) : );
266 DEBUGGER_trap_entry(TRAP_page_fault, regs);
268 perfc_incrc(page_faults);
270 if ( likely(VM_ASSIST(d, VMASST_TYPE_writable_pagetables)) )
271 {
272 LOCK_BIGLOCK(d);
273 if ( unlikely(ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va) &&
274 unlikely((addr >> L2_PAGETABLE_SHIFT) ==
275 ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l2_idx) )
276 {
277 ptwr_flush(PTWR_PT_ACTIVE);
278 UNLOCK_BIGLOCK(d);
279 return EXCRET_fault_fixed;
280 }
282 if ( (addr < PAGE_OFFSET) &&
283 ((regs->error_code & 3) == 3) && /* write-protection fault */
284 ptwr_do_page_fault(addr) )
285 {
286 if ( unlikely(d->arch.shadow_mode) )
287 (void)shadow_fault(addr, regs->error_code);
288 UNLOCK_BIGLOCK(d);
289 return EXCRET_fault_fixed;
290 }
291 UNLOCK_BIGLOCK(d);
292 }
294 if ( unlikely(d->arch.shadow_mode) &&
295 (addr < PAGE_OFFSET) && shadow_fault(addr, regs->error_code) )
296 return EXCRET_fault_fixed;
298 if ( unlikely(addr >= LDT_VIRT_START(ed)) &&
299 (addr < (LDT_VIRT_START(ed) + (ed->arch.ldt_ents*LDT_ENTRY_SIZE))) )
300 {
301 /*
302 * Copy a mapping from the guest's LDT, if it is valid. Otherwise we
303 * send the fault up to the guest OS to be handled.
304 */
305 LOCK_BIGLOCK(d);
306 off = addr - LDT_VIRT_START(ed);
307 addr = ed->arch.ldt_base + off;
308 ret = map_ldt_shadow_page(off >> PAGE_SHIFT);
309 UNLOCK_BIGLOCK(d);
310 if ( likely(ret) )
311 return EXCRET_fault_fixed; /* successfully copied the mapping */
312 }
314 if ( !GUEST_FAULT(regs) )
315 goto xen_fault;
317 propagate_page_fault(addr, regs->error_code);
318 return 0;
320 xen_fault:
322 if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
323 {
324 perfc_incrc(copy_user_faults);
325 if ( !d->arch.shadow_mode )
326 DPRINTK("Page fault: %p -> %p\n", regs->eip, fixup);
327 regs->eip = fixup;
328 return 0;
329 }
331 DEBUGGER_trap_fatal(TRAP_page_fault, regs);
333 show_registers(regs);
334 show_page_walk(addr);
335 panic("CPU%d FATAL PAGE FAULT\n"
336 "[error_code=%04x]\n"
337 "Faulting linear address might be %p\n",
338 smp_processor_id(), regs->error_code, addr);
339 return 0;
340 }
342 static int emulate_privileged_op(struct xen_regs *regs)
343 {
344 extern long do_fpu_taskswitch(void);
345 extern void *decode_reg(struct xen_regs *regs, u8 b);
347 struct exec_domain *ed = current;
348 unsigned long *reg, eip = regs->eip;
349 u8 opcode;
351 if ( get_user(opcode, (u8 *)eip) )
352 goto page_fault;
353 eip += 1;
354 if ( (opcode & 0xff) != 0x0f )
355 goto fail;
357 if ( get_user(opcode, (u8 *)eip) )
358 goto page_fault;
359 eip += 1;
361 switch ( opcode )
362 {
363 case 0x06: /* CLTS */
364 (void)do_fpu_taskswitch();
365 break;
367 case 0x09: /* WBINVD */
368 if ( !IS_CAPABLE_PHYSDEV(ed->domain) )
369 {
370 DPRINTK("Non-physdev domain attempted WBINVD.\n");
371 goto fail;
372 }
373 wbinvd();
374 break;
376 case 0x20: /* MOV CR?,<reg> */
377 if ( get_user(opcode, (u8 *)eip) )
378 goto page_fault;
379 eip += 1;
380 if ( (opcode & 0xc0) != 0xc0 )
381 goto fail;
382 reg = decode_reg(regs, opcode & 7);
383 switch ( (opcode >> 3) & 7 )
384 {
385 case 0: /* Read CR0 */
386 *reg =
387 (read_cr0() & ~X86_CR0_TS) |
388 (test_bit(EDF_GUEST_STTS, &ed->ed_flags) ? X86_CR0_TS : 0);
389 break;
391 case 2: /* Read CR2 */
392 *reg = ed->arch.guest_cr2;
393 break;
395 case 3: /* Read CR3 */
396 *reg = pagetable_val(ed->arch.pagetable);
397 break;
399 default:
400 goto fail;
401 }
402 break;
404 case 0x22: /* MOV <reg>,CR? */
405 if ( get_user(opcode, (u8 *)eip) )
406 goto page_fault;
407 eip += 1;
408 if ( (opcode & 0xc0) != 0xc0 )
409 goto fail;
410 reg = decode_reg(regs, opcode & 7);
411 switch ( (opcode >> 3) & 7 )
412 {
413 case 0: /* Write CR0 */
414 if ( *reg & X86_CR0_TS ) /* XXX ignore all but TS bit */
415 (void)do_fpu_taskswitch;
416 break;
418 case 2: /* Write CR2 */
419 ed->arch.guest_cr2 = *reg;
420 break;
422 case 3: /* Write CR3 */
423 LOCK_BIGLOCK(ed->domain);
424 (void)new_guest_cr3(*reg);
425 UNLOCK_BIGLOCK(ed->domain);
426 break;
428 default:
429 goto fail;
430 }
431 break;
433 case 0x30: /* WRMSR */
434 if ( !IS_PRIV(ed->domain) )
435 {
436 DPRINTK("Non-priv domain attempted WRMSR.\n");
437 goto fail;
438 }
439 wrmsr(regs->ecx, regs->eax, regs->edx);
440 break;
442 case 0x32: /* RDMSR */
443 if ( !IS_PRIV(ed->domain) )
444 {
445 DPRINTK("Non-priv domain attempted RDMSR.\n");
446 goto fail;
447 }
448 rdmsr(regs->ecx, regs->eax, regs->edx);
449 break;
451 default:
452 goto fail;
453 }
455 regs->eip = eip;
456 return EXCRET_fault_fixed;
458 fail:
459 return 0;
461 page_fault:
462 propagate_page_fault(eip, 0);
463 return EXCRET_fault_fixed;
464 }
466 asmlinkage int do_general_protection(struct xen_regs *regs)
467 {
468 struct exec_domain *ed = current;
469 struct trap_bounce *tb = &ed->arch.trap_bounce;
470 trap_info_t *ti;
471 unsigned long fixup;
473 DEBUGGER_trap_entry(TRAP_gp_fault, regs);
475 if ( regs->error_code & 1 )
476 goto hardware_gp;
478 if ( !GUEST_FAULT(regs) )
479 goto gp_in_kernel;
481 /*
482 * Cunning trick to allow arbitrary "INT n" handling.
483 *
484 * We set DPL == 0 on all vectors in the IDT. This prevents any INT <n>
485 * instruction from trapping to the appropriate vector, when that might not
486 * be expected by Xen or the guest OS. For example, that entry might be for
487 * a fault handler (unlike traps, faults don't increment EIP), or might
488 * expect an error code on the stack (which a software trap never
489 * provides), or might be a hardware interrupt handler that doesn't like
490 * being called spuriously.
491 *
492 * Instead, a GPF occurs with the faulting IDT vector in the error code.
493 * Bit 1 is set to indicate that an IDT entry caused the fault. Bit 0 is
494 * clear to indicate that it's a software fault, not hardware.
495 *
496 * NOTE: Vectors 3 and 4 are dealt with from their own handler. This is
497 * okay because they can only be triggered by an explicit DPL-checked
498 * instruction. The DPL specified by the guest OS for these vectors is NOT
499 * CHECKED!!
500 */
501 if ( (regs->error_code & 3) == 2 )
502 {
503 /* This fault must be due to <INT n> instruction. */
504 ti = current->arch.traps + (regs->error_code>>3);
505 if ( TI_GET_DPL(ti) >= (VM86_MODE(regs) ? 3 : (regs->cs & 3)) )
506 {
507 tb->flags = TBF_EXCEPTION;
508 regs->eip += 2;
509 goto finish_propagation;
510 }
511 }
513 /* Emulate some simple privileged instructions when exec'ed in ring 1. */
514 if ( (regs->error_code == 0) &&
515 RING_1(regs) &&
516 emulate_privileged_op(regs) )
517 return 0;
519 #if defined(__i386__)
520 if ( VM_ASSIST(ed->domain, VMASST_TYPE_4gb_segments) &&
521 (regs->error_code == 0) &&
522 gpf_emulate_4gb(regs) )
523 return 0;
524 #endif
526 /* Pass on GPF as is. */
527 ti = current->arch.traps + 13;
528 tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
529 tb->error_code = regs->error_code;
530 finish_propagation:
531 tb->cs = ti->cs;
532 tb->eip = ti->address;
533 if ( TI_GET_IF(ti) )
534 ed->vcpu_info->evtchn_upcall_mask = 1;
535 return 0;
537 gp_in_kernel:
539 if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
540 {
541 DPRINTK("GPF (%04x): %p -> %p\n",
542 regs->error_code, regs->eip, fixup);
543 regs->eip = fixup;
544 return 0;
545 }
547 DEBUGGER_trap_fatal(TRAP_gp_fault, regs);
549 hardware_gp:
550 show_registers(regs);
551 panic("CPU%d GENERAL PROTECTION FAULT\n[error_code=%04x]\n",
552 smp_processor_id(), regs->error_code);
553 return 0;
554 }
556 unsigned long nmi_softirq_reason;
557 static void nmi_softirq(void)
558 {
559 if ( dom0 == NULL )
560 return;
562 if ( test_and_clear_bit(0, &nmi_softirq_reason) )
563 send_guest_virq(dom0->exec_domain[0], VIRQ_PARITY_ERR);
565 if ( test_and_clear_bit(1, &nmi_softirq_reason) )
566 send_guest_virq(dom0->exec_domain[0], VIRQ_IO_ERR);
567 }
569 asmlinkage void mem_parity_error(struct xen_regs *regs)
570 {
571 /* Clear and disable the parity-error line. */
572 outb((inb(0x61)&15)|4,0x61);
574 switch ( opt_nmi[0] )
575 {
576 case 'd': /* 'dom0' */
577 set_bit(0, &nmi_softirq_reason);
578 raise_softirq(NMI_SOFTIRQ);
579 case 'i': /* 'ignore' */
580 break;
581 default: /* 'fatal' */
582 console_force_unlock();
583 printk("\n\nNMI - MEMORY ERROR\n");
584 fatal_trap(TRAP_nmi, regs);
585 }
586 }
588 asmlinkage void io_check_error(struct xen_regs *regs)
589 {
590 /* Clear and disable the I/O-error line. */
591 outb((inb(0x61)&15)|8,0x61);
593 switch ( opt_nmi[0] )
594 {
595 case 'd': /* 'dom0' */
596 set_bit(0, &nmi_softirq_reason);
597 raise_softirq(NMI_SOFTIRQ);
598 case 'i': /* 'ignore' */
599 break;
600 default: /* 'fatal' */
601 console_force_unlock();
602 printk("\n\nNMI - I/O ERROR\n");
603 fatal_trap(TRAP_nmi, regs);
604 }
605 }
607 static void unknown_nmi_error(unsigned char reason)
608 {
609 printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
610 printk("Dazed and confused, but trying to continue\n");
611 printk("Do you have a strange power saving mode enabled?\n");
612 }
614 asmlinkage void do_nmi(struct xen_regs *regs, unsigned long reason)
615 {
616 ++nmi_count(smp_processor_id());
618 if ( nmi_watchdog )
619 nmi_watchdog_tick(regs);
621 if ( reason & 0x80 )
622 mem_parity_error(regs);
623 else if ( reason & 0x40 )
624 io_check_error(regs);
625 else if ( !nmi_watchdog )
626 unknown_nmi_error((unsigned char)(reason&0xff));
627 }
629 asmlinkage int math_state_restore(struct xen_regs *regs)
630 {
631 /* Prevent recursion. */
632 clts();
634 if ( !test_bit(EDF_USEDFPU, &current->ed_flags) )
635 {
636 if ( test_bit(EDF_DONEFPUINIT, &current->ed_flags) )
637 restore_fpu(current);
638 else
639 init_fpu();
640 set_bit(EDF_USEDFPU, &current->ed_flags); /* so we fnsave on switch_to() */
641 }
643 if ( test_and_clear_bit(EDF_GUEST_STTS, &current->ed_flags) )
644 {
645 struct trap_bounce *tb = &current->arch.trap_bounce;
646 tb->flags = TBF_EXCEPTION;
647 tb->cs = current->arch.traps[7].cs;
648 tb->eip = current->arch.traps[7].address;
649 }
651 return EXCRET_fault_fixed;
652 }
654 asmlinkage int do_debug(struct xen_regs *regs)
655 {
656 unsigned long condition;
657 struct exec_domain *d = current;
658 struct trap_bounce *tb = &d->arch.trap_bounce;
660 DEBUGGER_trap_entry(TRAP_debug, regs);
662 __asm__ __volatile__("mov %%db6,%0" : "=r" (condition));
664 /* Mask out spurious debug traps due to lazy DR7 setting */
665 if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
666 (d->arch.debugreg[7] == 0) )
667 {
668 __asm__("mov %0,%%db7" : : "r" (0UL));
669 goto out;
670 }
672 if ( !GUEST_FAULT(regs) )
673 {
674 /* Clear TF just for absolute sanity. */
675 regs->eflags &= ~EF_TF;
676 /*
677 * We ignore watchpoints when they trigger within Xen. This may happen
678 * when a buffer is passed to us which previously had a watchpoint set
679 * on it. No need to bump EIP; the only faulting trap is an instruction
680 * breakpoint, which can't happen to us.
681 */
682 goto out;
683 }
685 /* Save debug status register where guest OS can peek at it */
686 d->arch.debugreg[6] = condition;
688 tb->flags = TBF_EXCEPTION;
689 tb->cs = d->arch.traps[1].cs;
690 tb->eip = d->arch.traps[1].address;
692 out:
693 return EXCRET_not_a_fault;
694 }
696 asmlinkage int do_spurious_interrupt_bug(struct xen_regs *regs)
697 {
698 return EXCRET_not_a_fault;
699 }
701 BUILD_SMP_INTERRUPT(deferred_nmi, TRAP_deferred_nmi)
702 asmlinkage void smp_deferred_nmi(struct xen_regs regs)
703 {
704 ack_APIC_irq();
705 do_nmi(&regs, 0);
706 }
708 void set_intr_gate(unsigned int n, void *addr)
709 {
710 _set_gate(idt_table+n,14,0,addr);
711 }
713 void set_system_gate(unsigned int n, void *addr)
714 {
715 _set_gate(idt_table+n,14,3,addr);
716 }
718 void set_task_gate(unsigned int n, unsigned int sel)
719 {
720 idt_table[n].a = sel << 16;
721 idt_table[n].b = 0x8500;
722 }
724 void set_tss_desc(unsigned int n, void *addr)
725 {
726 _set_tssldt_desc(
727 gdt_table + __TSS(n),
728 (unsigned long)addr,
729 offsetof(struct tss_struct, __cacheline_filler) - 1,
730 9);
731 }
733 void __init trap_init(void)
734 {
735 extern void percpu_traps_init(void);
736 extern void cpu_init(void);
738 /*
739 * Note that interrupt gates are always used, rather than trap gates. We
740 * must have interrupts disabled until DS/ES/FS/GS are saved because the
741 * first activation must have the "bad" value(s) for these registers and
742 * we may lose them if another activation is installed before they are
743 * saved. The page-fault handler also needs interrupts disabled until %cr2
744 * has been read and saved on the stack.
745 */
746 set_intr_gate(TRAP_divide_error,&divide_error);
747 set_intr_gate(TRAP_debug,&debug);
748 set_intr_gate(TRAP_nmi,&nmi);
749 set_system_gate(TRAP_int3,&int3); /* usable from all privileges */
750 set_system_gate(TRAP_overflow,&overflow); /* usable from all privileges */
751 set_intr_gate(TRAP_bounds,&bounds);
752 set_intr_gate(TRAP_invalid_op,&invalid_op);
753 set_intr_gate(TRAP_no_device,&device_not_available);
754 set_intr_gate(TRAP_copro_seg,&coprocessor_segment_overrun);
755 set_intr_gate(TRAP_invalid_tss,&invalid_TSS);
756 set_intr_gate(TRAP_no_segment,&segment_not_present);
757 set_intr_gate(TRAP_stack_error,&stack_segment);
758 set_intr_gate(TRAP_gp_fault,&general_protection);
759 set_intr_gate(TRAP_page_fault,&page_fault);
760 set_intr_gate(TRAP_spurious_int,&spurious_interrupt_bug);
761 set_intr_gate(TRAP_copro_error,&coprocessor_error);
762 set_intr_gate(TRAP_alignment_check,&alignment_check);
763 set_intr_gate(TRAP_machine_check,&machine_check);
764 set_intr_gate(TRAP_simd_error,&simd_coprocessor_error);
765 set_intr_gate(TRAP_deferred_nmi,&deferred_nmi);
767 #if defined(__i386__)
768 _set_gate(idt_table+HYPERCALL_VECTOR, 14, 1, &hypercall);
769 #endif
771 /* CPU0 uses the master IDT. */
772 idt_tables[0] = idt_table;
774 percpu_traps_init();
776 cpu_init();
778 open_softirq(NMI_SOFTIRQ, nmi_softirq);
779 }
782 long do_set_trap_table(trap_info_t *traps)
783 {
784 trap_info_t cur;
785 trap_info_t *dst = current->arch.traps;
787 LOCK_BIGLOCK(current->domain);
789 for ( ; ; )
790 {
791 if ( hypercall_preempt_check() )
792 {
793 UNLOCK_BIGLOCK(current->domain);
794 return hypercall1_create_continuation(
795 __HYPERVISOR_set_trap_table, traps);
796 }
798 if ( copy_from_user(&cur, traps, sizeof(cur)) ) return -EFAULT;
800 if ( cur.address == 0 ) break;
802 if ( !VALID_CODESEL(cur.cs) ) return -EPERM;
804 memcpy(dst+cur.vector, &cur, sizeof(cur));
805 traps++;
806 }
808 UNLOCK_BIGLOCK(current->domain);
810 return 0;
811 }
814 long do_set_callbacks(unsigned long event_selector,
815 unsigned long event_address,
816 unsigned long failsafe_selector,
817 unsigned long failsafe_address)
818 {
819 struct exec_domain *d = current;
821 if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) )
822 return -EPERM;
824 d->arch.event_selector = event_selector;
825 d->arch.event_address = event_address;
826 d->arch.failsafe_selector = failsafe_selector;
827 d->arch.failsafe_address = failsafe_address;
829 return 0;
830 }
833 long do_fpu_taskswitch(void)
834 {
835 set_bit(EDF_GUEST_STTS, &current->ed_flags);
836 stts();
837 return 0;
838 }
841 long set_debugreg(struct exec_domain *p, int reg, unsigned long value)
842 {
843 int i;
845 switch ( reg )
846 {
847 case 0:
848 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
849 if ( p == current )
850 __asm__ ( "mov %0, %%db0" : : "r" (value) );
851 break;
852 case 1:
853 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
854 if ( p == current )
855 __asm__ ( "mov %0, %%db1" : : "r" (value) );
856 break;
857 case 2:
858 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
859 if ( p == current )
860 __asm__ ( "mov %0, %%db2" : : "r" (value) );
861 break;
862 case 3:
863 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
864 if ( p == current )
865 __asm__ ( "mov %0, %%db3" : : "r" (value) );
866 break;
867 case 6:
868 /*
869 * DR6: Bits 4-11,16-31 reserved (set to 1).
870 * Bit 12 reserved (set to 0).
871 */
872 value &= 0xffffefff; /* reserved bits => 0 */
873 value |= 0xffff0ff0; /* reserved bits => 1 */
874 if ( p == current )
875 __asm__ ( "mov %0, %%db6" : : "r" (value) );
876 break;
877 case 7:
878 /*
879 * DR7: Bit 10 reserved (set to 1).
880 * Bits 11-12,14-15 reserved (set to 0).
881 * Privileged bits:
882 * GD (bit 13): must be 0.
883 * R/Wn (bits 16-17,20-21,24-25,28-29): mustn't be 10.
884 * LENn (bits 18-19,22-23,26-27,30-31): mustn't be 10.
885 */
886 /* DR7 == 0 => debugging disabled for this domain. */
887 if ( value != 0 )
888 {
889 value &= 0xffff27ff; /* reserved bits => 0 */
890 value |= 0x00000400; /* reserved bits => 1 */
891 if ( (value & (1<<13)) != 0 ) return -EPERM;
892 for ( i = 0; i < 16; i += 2 )
893 if ( ((value >> (i+16)) & 3) == 2 ) return -EPERM;
894 }
895 if ( p == current )
896 __asm__ ( "mov %0, %%db7" : : "r" (value) );
897 break;
898 default:
899 return -EINVAL;
900 }
902 p->arch.debugreg[reg] = value;
903 return 0;
904 }
906 long do_set_debugreg(int reg, unsigned long value)
907 {
908 return set_debugreg(current, reg, value);
909 }
911 unsigned long do_get_debugreg(int reg)
912 {
913 if ( (reg < 0) || (reg > 7) ) return -EINVAL;
914 return current->arch.debugreg[reg];
915 }