debuggers.hg

view xen/arch/i386/traps.c @ 656:c7557b3832b9

bitkeeper revision 1.339.1.6 (3f12cffdzSdqoflJR3gfS-S45xcteA)

nmi.c:
new file
Many files:
NMI watchdog support in Xen.
author kaf24@scramble.cl.cam.ac.uk
date Mon Jul 14 15:45:01 2003 +0000 (2003-07-14)
parents 93c7dcf4a80e
children c085fac641e2
line source
1 /*
2 * linux/arch/i386/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
10 #include <xeno/config.h>
11 #include <xeno/init.h>
12 #include <xeno/interrupt.h>
13 #include <xeno/sched.h>
14 #include <xeno/lib.h>
15 #include <xeno/errno.h>
16 #include <xeno/mm.h>
17 #include <asm/ptrace.h>
18 #include <xeno/delay.h>
19 #include <xeno/spinlock.h>
20 #include <xeno/irq.h>
21 #include <asm/domain_page.h>
22 #include <asm/system.h>
23 #include <asm/io.h>
24 #include <asm/atomic.h>
25 #include <asm/desc.h>
26 #include <asm/debugreg.h>
27 #include <asm/smp.h>
28 #include <asm/pgalloc.h>
29 #include <asm/uaccess.h>
30 #include <asm/i387.h>
32 #define GTBF_TRAP 1
33 #define GTBF_TRAP_NOCODE 2
34 #define GTBF_TRAP_CR2 4
35 struct guest_trap_bounce {
36 unsigned long error_code; /* 0 */
37 unsigned long cr2; /* 4 */
38 unsigned short flags; /* 8 */
39 unsigned short cs; /* 10 */
40 unsigned long eip; /* 12 */
41 } guest_trap_bounce[NR_CPUS] = { { 0 } };
43 asmlinkage int hypervisor_call(void);
44 asmlinkage void lcall7(void);
45 asmlinkage void lcall27(void);
47 /* Master table, and the one used by CPU0. */
48 struct desc_struct idt_table[256] = { {0, 0}, };
49 /* All other CPUs have their own copy. */
50 struct desc_struct *idt_tables[NR_CPUS] = { 0 };
52 asmlinkage void divide_error(void);
53 asmlinkage void debug(void);
54 asmlinkage void nmi(void);
55 asmlinkage void int3(void);
56 asmlinkage void overflow(void);
57 asmlinkage void bounds(void);
58 asmlinkage void invalid_op(void);
59 asmlinkage void device_not_available(void);
60 asmlinkage void double_fault(void);
61 asmlinkage void coprocessor_segment_overrun(void);
62 asmlinkage void invalid_TSS(void);
63 asmlinkage void segment_not_present(void);
64 asmlinkage void stack_segment(void);
65 asmlinkage void general_protection(void);
66 asmlinkage void page_fault(void);
67 asmlinkage void coprocessor_error(void);
68 asmlinkage void simd_coprocessor_error(void);
69 asmlinkage void alignment_check(void);
70 asmlinkage void spurious_interrupt_bug(void);
71 asmlinkage void machine_check(void);
73 int kstack_depth_to_print = 8*20;
75 static inline int kernel_text_address(unsigned long addr)
76 {
77 if (addr >= (unsigned long) &_stext &&
78 addr <= (unsigned long) &_etext)
79 return 1;
80 return 0;
82 }
84 void show_trace(unsigned long * stack)
85 {
86 int i;
87 unsigned long addr;
89 if (!stack)
90 stack = (unsigned long*)&stack;
92 printk("Call Trace: ");
93 i = 1;
94 while (((long) stack & (THREAD_SIZE-1)) != 0) {
95 addr = *stack++;
96 if (kernel_text_address(addr)) {
97 if (i && ((i % 6) == 0))
98 printk("\n ");
99 printk("[<%08lx>] ", addr);
100 i++;
101 }
102 }
103 printk("\n");
104 }
106 void show_trace_task(struct task_struct *tsk)
107 {
108 unsigned long esp = tsk->thread.esp;
110 /* User space on another CPU? */
111 if ((esp ^ (unsigned long)tsk) & (PAGE_MASK<<1))
112 return;
113 show_trace((unsigned long *)esp);
114 }
116 void show_stack(unsigned long * esp)
117 {
118 unsigned long *stack;
119 int i;
121 // debugging aid: "show_stack(NULL);" prints the
122 // back trace for this cpu.
124 if(esp==NULL)
125 esp=(unsigned long*)&esp;
127 printk("Stack trace from ESP=%p:\n", esp);
129 stack = esp;
130 for(i=0; i < kstack_depth_to_print; i++) {
131 if (((long) stack & (THREAD_SIZE-1)) == 0)
132 break;
133 if (i && ((i % 8) == 0))
134 printk("\n ");
135 if ( kernel_text_address(*stack) )
136 printk("[%08lx] ", *stack++);
137 else
138 printk("%08lx ", *stack++);
139 }
140 printk("\n");
141 //show_trace(esp);
142 }
144 void show_registers(struct pt_regs *regs)
145 {
146 unsigned long esp;
147 unsigned short ss;
149 esp = (unsigned long) (&regs->esp);
150 ss = __HYPERVISOR_DS;
151 if ( regs->xcs & 3 )
152 {
153 esp = regs->esp;
154 ss = regs->xss & 0xffff;
155 }
157 printk("CPU: %d\nEIP: %04x:[<%08lx>] \nEFLAGS: %08lx\n",
158 smp_processor_id(), 0xffff & regs->xcs, regs->eip, regs->eflags);
159 printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
160 regs->eax, regs->ebx, regs->ecx, regs->edx);
161 printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
162 regs->esi, regs->edi, regs->ebp, esp);
163 printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
164 regs->xds & 0xffff, regs->xes & 0xffff,
165 regs->xfs & 0xffff, regs->xgs & 0xffff, ss);
167 show_stack(&regs->esp);
168 }
171 spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
173 void die(const char * str, struct pt_regs * regs, long err)
174 {
175 unsigned long flags;
176 spin_lock_irqsave(&die_lock, flags);
177 printk("%s: %04lx,%04lx\n", str, err >> 16, err & 0xffff);
178 show_registers(regs);
179 spin_unlock_irqrestore(&die_lock, flags);
180 panic("HYPERVISOR DEATH!!\n");
181 }
183 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
184 {
185 if (!(3 & regs->xcs)) die(str, regs, err);
186 }
188 static void inline do_trap(int trapnr, char *str,
189 struct pt_regs * regs,
190 long error_code, int use_error_code)
191 {
192 struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
193 trap_info_t *ti;
194 unsigned long fixup;
196 if (!(regs->xcs & 3))
197 goto fault_in_hypervisor;
199 ti = current->thread.traps + trapnr;
200 gtb->flags = use_error_code ? GTBF_TRAP : GTBF_TRAP_NOCODE;
201 gtb->error_code = error_code;
202 gtb->cs = ti->cs;
203 gtb->eip = ti->address;
204 return;
206 fault_in_hypervisor:
208 if ( (fixup = search_exception_table(regs->eip)) != 0 )
209 {
210 regs->eip = fixup;
211 regs->xfs = regs->xgs = 0;
212 return;
213 }
215 show_registers(regs);
216 panic("CPU%d FATAL TRAP: vector = %d (%s)\n"
217 "[error_code=%08x]\n",
218 smp_processor_id(), trapnr, str, error_code);
219 }
221 #define DO_ERROR_NOCODE(trapnr, str, name) \
222 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
223 { \
224 do_trap(trapnr, str, regs, error_code, 0); \
225 }
227 #define DO_ERROR(trapnr, str, name) \
228 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
229 { \
230 do_trap(trapnr, str, regs, error_code, 1); \
231 }
233 DO_ERROR_NOCODE( 0, "divide error", divide_error)
234 DO_ERROR_NOCODE( 3, "int3", int3)
235 DO_ERROR_NOCODE( 4, "overflow", overflow)
236 DO_ERROR_NOCODE( 5, "bounds", bounds)
237 DO_ERROR_NOCODE( 6, "invalid operand", invalid_op)
238 DO_ERROR_NOCODE( 7, "device not available", device_not_available)
239 DO_ERROR( 8, "double fault", double_fault)
240 DO_ERROR_NOCODE( 9, "coprocessor segment overrun", coprocessor_segment_overrun)
241 DO_ERROR(10, "invalid TSS", invalid_TSS)
242 DO_ERROR(11, "segment not present", segment_not_present)
243 DO_ERROR(12, "stack segment", stack_segment)
244 /* Vector 15 reserved by Intel */
245 DO_ERROR_NOCODE(16, "fpu error", coprocessor_error)
246 DO_ERROR(17, "alignment check", alignment_check)
247 DO_ERROR_NOCODE(18, "machine check", machine_check)
248 DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
250 asmlinkage void do_page_fault(struct pt_regs *regs, long error_code)
251 {
252 struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
253 trap_info_t *ti;
254 l2_pgentry_t *pl2e;
255 l1_pgentry_t *pl1e;
256 unsigned long addr, off, fixup, l2e, l1e, *ldt_page;
257 struct task_struct *p = current;
258 struct pfn_info *page;
259 int i;
261 __asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : );
263 if ( unlikely(addr > PAGE_OFFSET) )
264 goto fault_in_xen_space;
266 bounce_fault:
268 if ( unlikely(!(regs->xcs & 3)) )
269 goto fault_in_hypervisor;
271 ti = p->thread.traps + 14;
272 gtb->flags = GTBF_TRAP_CR2; /* page fault pushes %cr2 */
273 gtb->cr2 = addr;
274 gtb->error_code = error_code;
275 gtb->cs = ti->cs;
276 gtb->eip = ti->address;
277 return;
279 /*
280 * FAULT IN XEN ADDRESS SPACE:
281 * We only deal with one kind -- a fault in the shadow LDT mapping.
282 * If this occurs we pull a mapping from the guest's LDT, if it is
283 * valid. Otherwise we send the fault up to the guest OS to be handled.
284 */
285 fault_in_xen_space:
287 if ( (addr < LDT_VIRT_START) ||
288 (addr >= (LDT_VIRT_START + (p->mm.ldt_ents*LDT_ENTRY_SIZE))) )
289 goto bounce_fault;
291 off = addr - LDT_VIRT_START;
292 addr = p->mm.ldt_base + off;
294 spin_lock(&p->page_lock);
296 pl2e = map_domain_mem(pagetable_val(p->mm.pagetable));
297 l2e = l2_pgentry_val(pl2e[l2_table_offset(addr)]);
298 unmap_domain_mem(pl2e);
299 if ( !(l2e & _PAGE_PRESENT) )
300 goto unlock_and_bounce_fault;
302 pl1e = map_domain_mem(l2e & PAGE_MASK);
303 l1e = l1_pgentry_val(pl1e[l1_table_offset(addr)]);
304 unmap_domain_mem(pl1e);
305 if ( !(l1e & _PAGE_PRESENT) )
306 goto unlock_and_bounce_fault;
308 page = frame_table + (l1e >> PAGE_SHIFT);
309 if ( (page->flags & PG_type_mask) != PGT_ldt_page )
310 {
311 if ( page->type_count != 0 )
312 goto unlock_and_bounce_fault;
314 /* Check all potential LDT entries in the page. */
315 ldt_page = map_domain_mem(l1e & PAGE_MASK);
316 for ( i = 0; i < 512; i++ )
317 if ( !check_descriptor(ldt_page[i*2], ldt_page[i*2+1]) )
318 goto unlock_and_bounce_fault;
319 unmap_domain_mem(ldt_page);
321 page->flags &= ~PG_type_mask;
322 page->flags |= PGT_ldt_page;
323 }
325 /* Success! */
326 get_page_type(page);
327 get_page_tot(page);
328 p->mm.perdomain_pt[l1_table_offset(off)+16] = mk_l1_pgentry(l1e|_PAGE_RW);
329 p->mm.shadow_ldt_mapcnt++;
331 spin_unlock(&p->page_lock);
332 return;
335 unlock_and_bounce_fault:
337 spin_unlock(&p->page_lock);
338 goto bounce_fault;
341 fault_in_hypervisor:
343 if ( (fixup = search_exception_table(regs->eip)) != 0 )
344 {
345 regs->eip = fixup;
346 regs->xfs = regs->xgs = 0;
347 return;
348 }
350 if ( addr >= PAGE_OFFSET )
351 {
352 unsigned long page;
353 unsigned long *pde;
354 pde = (unsigned long *)idle_pg_table[smp_processor_id()];
355 page = pde[addr >> L2_PAGETABLE_SHIFT];
356 printk("*pde = %08lx\n", page);
357 if ( page & _PAGE_PRESENT )
358 {
359 page &= PAGE_MASK;
360 page = ((unsigned long *) __va(page))[(addr&0x3ff000)>>PAGE_SHIFT];
361 printk(" *pte = %08lx\n", page);
362 }
363 }
365 show_registers(regs);
366 panic("CPU%d FATAL PAGE FAULT\n"
367 "[error_code=%08x]\n"
368 "Faulting linear address might be %08lx\n",
369 smp_processor_id(), error_code, addr);
370 }
372 asmlinkage void do_general_protection(struct pt_regs *regs, long error_code)
373 {
374 struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
375 trap_info_t *ti;
376 unsigned long fixup;
378 /* Bad shit if error in ring 0, or result of an interrupt. */
379 if (!(regs->xcs & 3) || (error_code & 1))
380 goto gp_in_kernel;
382 /*
383 * Cunning trick to allow arbitrary "INT n" handling.
384 *
385 * We set DPL == 0 on all vectors in the IDT. This prevents any INT <n>
386 * instruction from trapping to the appropriate vector, when that might not
387 * be expected by Xen or the guest OS. For example, that entry might be for
388 * a fault handler (unlike traps, faults don't increment EIP), or might
389 * expect an error code on the stack (which a software trap never
390 * provides), or might be a hardware interrupt handler that doesn't like
391 * being called spuriously.
392 *
393 * Instead, a GPF occurs with the faulting IDT vector in the error code.
394 * Bit 1 is set to indicate that an IDT entry caused the fault. Bit 0 is
395 * clear to indicate that it's a software fault, not hardware.
396 *
397 * NOTE: Vectors 3 and 4 are dealt with from their own handler. This is
398 * okay because they can only be triggered by an explicit DPL-checked
399 * instruction. The DPL specified by the guest OS for these vectors is NOT
400 * CHECKED!!
401 */
402 if ( (error_code & 3) == 2 )
403 {
404 /* This fault must be due to <INT n> instruction. */
405 ti = current->thread.traps + (error_code>>3);
406 if ( ti->dpl >= (regs->xcs & 3) )
407 {
408 gtb->flags = GTBF_TRAP_NOCODE;
409 gtb->cs = ti->cs;
410 gtb->eip = ti->address;
411 regs->eip += 2;
412 return;
413 }
414 }
416 /* Pass on GPF as is. */
417 ti = current->thread.traps + 13;
418 gtb->flags = GTBF_TRAP;
419 gtb->error_code = error_code;
420 gtb->cs = ti->cs;
421 gtb->eip = ti->address;
422 return;
424 gp_in_kernel:
426 if ( (fixup = search_exception_table(regs->eip)) != 0 )
427 {
428 regs->eip = fixup;
429 regs->xfs = regs->xgs = 0;
430 return;
431 }
433 die("general protection fault", regs, error_code);
434 }
436 static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
437 {
438 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
439 printk("You probably have a hardware problem with your RAM chips\n");
441 /* Clear and disable the memory parity error line. */
442 reason = (reason & 0xf) | 4;
443 outb(reason, 0x61);
444 }
446 static void io_check_error(unsigned char reason, struct pt_regs * regs)
447 {
448 unsigned long i;
450 printk("NMI: IOCK error (debug interrupt?)\n");
451 show_registers(regs);
453 /* Re-enable the IOCK line, wait for a few seconds */
454 reason = (reason & 0xf) | 8;
455 outb(reason, 0x61);
456 i = 2000;
457 while (--i) udelay(1000);
458 reason &= ~8;
459 outb(reason, 0x61);
460 }
462 static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
463 {
464 printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
465 printk("Dazed and confused, but trying to continue\n");
466 printk("Do you have a strange power saving mode enabled?\n");
467 }
469 asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
470 {
471 unsigned char reason = inb(0x61);
473 ++nmi_count(smp_processor_id());
475 if (!(reason & 0xc0)) {
476 #if CONFIG_X86_LOCAL_APIC
477 if (nmi_watchdog) {
478 nmi_watchdog_tick(regs);
479 return;
480 }
481 #endif
482 unknown_nmi_error(reason, regs);
483 return;
484 }
485 if (reason & 0x80)
486 mem_parity_error(reason, regs);
487 if (reason & 0x40)
488 io_check_error(reason, regs);
489 /*
490 * Reassert NMI in case it became active meanwhile
491 * as it's edge-triggered.
492 */
493 outb(0x8f, 0x70);
494 inb(0x71); /* dummy */
495 outb(0x0f, 0x70);
496 inb(0x71); /* dummy */
497 }
499 asmlinkage void math_state_restore(struct pt_regs *regs, long error_code)
500 {
501 /* Prevent recursion. */
502 clts();
504 if ( !(current->flags & PF_USEDFPU) )
505 {
506 if ( current->flags & PF_DONEFPUINIT )
507 restore_fpu(current);
508 else
509 init_fpu();
510 current->flags |= PF_USEDFPU; /* So we fnsave on switch_to() */
511 }
513 if ( current->flags & PF_GUEST_STTS )
514 {
515 struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
516 gtb->flags = GTBF_TRAP_NOCODE;
517 gtb->cs = current->thread.traps[7].cs;
518 gtb->eip = current->thread.traps[7].address;
519 current->flags &= ~PF_GUEST_STTS;
520 }
521 }
524 asmlinkage void do_debug(struct pt_regs * regs, long error_code)
525 {
526 unsigned int condition;
527 struct task_struct *tsk = current;
528 struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
530 __asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
532 /* Mask out spurious debug traps due to lazy DR7 setting */
533 if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
534 (tsk->thread.debugreg[7] == 0) )
535 {
536 __asm__("movl %0,%%db7" : : "r" (0));
537 return;
538 }
540 if ( (regs->xcs & 3) == 0 )
541 {
542 /* Clear TF just for absolute sanity. */
543 regs->eflags &= ~EF_TF;
544 /*
545 * Basically, we ignore watchpoints when they trigger in
546 * the hypervisor. This may happen when a buffer is passed
547 * to us which previously had a watchpoint set on it.
548 * No need to bump EIP; the only faulting trap is an
549 * instruction breakpoint, which can't happen to us.
550 */
551 return;
552 }
554 /* Save debug status register where guest OS can peek at it */
555 tsk->thread.debugreg[6] = condition;
557 gtb->flags = GTBF_TRAP_NOCODE;
558 gtb->cs = tsk->thread.traps[1].cs;
559 gtb->eip = tsk->thread.traps[1].address;
560 }
563 asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs,
564 long error_code)
565 { /* nothing */ }
568 #define _set_gate(gate_addr,type,dpl,addr) \
569 do { \
570 int __d0, __d1; \
571 __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
572 "movw %4,%%dx\n\t" \
573 "movl %%eax,%0\n\t" \
574 "movl %%edx,%1" \
575 :"=m" (*((long *) (gate_addr))), \
576 "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
577 :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
578 "3" ((char *) (addr)),"2" (__HYPERVISOR_CS << 16)); \
579 } while (0)
581 void set_intr_gate(unsigned int n, void *addr)
582 {
583 _set_gate(idt_table+n,14,0,addr);
584 }
586 static void __init set_system_gate(unsigned int n, void *addr)
587 {
588 _set_gate(idt_table+n,14,3,addr);
589 }
591 #define _set_seg_desc(gate_addr,type,dpl,base,limit) {\
592 *((gate_addr)+1) = ((base) & 0xff000000) | \
593 (((base) & 0x00ff0000)>>16) | \
594 ((limit) & 0xf0000) | \
595 ((dpl)<<13) | \
596 (0x00408000) | \
597 ((type)<<8); \
598 *(gate_addr) = (((base) & 0x0000ffff)<<16) | \
599 ((limit) & 0x0ffff); }
601 #define _set_tssldt_desc(n,addr,limit,type) \
602 __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
603 "movw %%ax,2(%2)\n\t" \
604 "rorl $16,%%eax\n\t" \
605 "movb %%al,4(%2)\n\t" \
606 "movb %4,5(%2)\n\t" \
607 "movb $0,6(%2)\n\t" \
608 "movb %%ah,7(%2)\n\t" \
609 "rorl $16,%%eax" \
610 : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
612 void set_tss_desc(unsigned int n, void *addr)
613 {
614 _set_tssldt_desc(gdt_table+__TSS(n), (int)addr, 235, 0x89);
615 }
617 void __init trap_init(void)
618 {
619 /*
620 * Note that interrupt gates are always used, rather than trap gates. We
621 * must have interrupts disabled until DS/ES/FS/GS are saved because the
622 * first activation must have the "bad" value(s) for these registers and
623 * we may lose them if another activation is installed before they are
624 * saved. The page-fault handler also needs interrupts disabled until %cr2
625 * has been read and saved on the stack.
626 */
627 set_intr_gate(0,&divide_error);
628 set_intr_gate(1,&debug);
629 set_intr_gate(2,&nmi);
630 set_system_gate(3,&int3); /* usable from all privilege levels */
631 set_system_gate(4,&overflow); /* usable from all privilege levels */
632 set_intr_gate(5,&bounds);
633 set_intr_gate(6,&invalid_op);
634 set_intr_gate(7,&device_not_available);
635 set_intr_gate(8,&double_fault);
636 set_intr_gate(9,&coprocessor_segment_overrun);
637 set_intr_gate(10,&invalid_TSS);
638 set_intr_gate(11,&segment_not_present);
639 set_intr_gate(12,&stack_segment);
640 set_intr_gate(13,&general_protection);
641 set_intr_gate(14,&page_fault);
642 set_intr_gate(15,&spurious_interrupt_bug);
643 set_intr_gate(16,&coprocessor_error);
644 set_intr_gate(17,&alignment_check);
645 set_intr_gate(18,&machine_check);
646 set_intr_gate(19,&simd_coprocessor_error);
648 /* Only ring 1 can access monitor services. */
649 _set_gate(idt_table+HYPERVISOR_CALL_VECTOR,14,1,&hypervisor_call);
651 /* CPU0 uses the master IDT. */
652 idt_tables[0] = idt_table;
654 /*
655 * Should be a barrier for any external CPU state.
656 */
657 {
658 extern void cpu_init(void);
659 cpu_init();
660 }
661 }
664 long do_set_trap_table(trap_info_t *traps)
665 {
666 trap_info_t cur;
667 trap_info_t *dst = current->thread.traps;
669 for ( ; ; )
670 {
671 if ( copy_from_user(&cur, traps, sizeof(cur)) ) return -EFAULT;
673 if ( cur.address == 0 ) break;
675 if ( !VALID_CODESEL(cur.cs) ) return -EPERM;
677 memcpy(dst+cur.vector, &cur, sizeof(cur));
678 traps++;
679 }
681 return 0;
682 }
685 long do_set_callbacks(unsigned long event_selector,
686 unsigned long event_address,
687 unsigned long failsafe_selector,
688 unsigned long failsafe_address)
689 {
690 struct task_struct *p = current;
692 if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) )
693 return -EPERM;
695 p->event_selector = event_selector;
696 p->event_address = event_address;
697 p->failsafe_selector = failsafe_selector;
698 p->failsafe_address = failsafe_address;
700 return 0;
701 }
704 long do_set_fast_trap(int idx)
705 {
706 trap_info_t *ti;
708 /* Index 0 is special: it disables fast traps. */
709 if ( idx == 0 )
710 {
711 CLEAR_FAST_TRAP(&current->thread);
712 SET_DEFAULT_FAST_TRAP(&current->thread);
713 return 0;
714 }
716 /*
717 * We only fast-trap vectors 0x20-0x2f, and vector 0x80.
718 * The former range is used by Windows and MS-DOS.
719 * Vector 0x80 is used by Linux and the BSD variants.
720 */
721 if ( (idx != 0x80) && ((idx < 0x20) || (idx > 0x2f)) ) return -1;
723 ti = current->thread.traps + idx;
725 CLEAR_FAST_TRAP(&current->thread);
727 current->thread.fast_trap_idx = idx;
728 current->thread.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
729 current->thread.fast_trap_desc.b =
730 (ti->address & 0xffff0000) | 0x8f00 | (ti->dpl&3)<<13;
732 SET_FAST_TRAP(&current->thread);
734 return 0;
735 }
738 long do_fpu_taskswitch(void)
739 {
740 current->flags |= PF_GUEST_STTS;
741 stts();
742 return 0;
743 }
746 long do_set_debugreg(int reg, unsigned long value)
747 {
748 int i;
750 switch ( reg )
751 {
752 case 0:
753 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
754 __asm__ ( "movl %0, %%db0" : : "r" (value) );
755 break;
756 case 1:
757 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
758 __asm__ ( "movl %0, %%db1" : : "r" (value) );
759 break;
760 case 2:
761 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
762 __asm__ ( "movl %0, %%db2" : : "r" (value) );
763 break;
764 case 3:
765 if ( value > (PAGE_OFFSET-4) ) return -EPERM;
766 __asm__ ( "movl %0, %%db3" : : "r" (value) );
767 break;
768 case 6:
769 /*
770 * DR6: Bits 4-11,16-31 reserved (set to 1).
771 * Bit 12 reserved (set to 0).
772 */
773 value &= 0xffffefff; /* reserved bits => 0 */
774 value |= 0xffff0ff0; /* reserved bits => 1 */
775 __asm__ ( "movl %0, %%db6" : : "r" (value) );
776 break;
777 case 7:
778 /*
779 * DR7: Bit 10 reserved (set to 1).
780 * Bits 11-12,14-15 reserved (set to 0).
781 * Privileged bits:
782 * GD (bit 13): must be 0.
783 * R/Wn (bits 16-17,20-21,24-25,28-29): mustn't be 10.
784 * LENn (bits 18-19,22-23,26-27,30-31): mustn't be 10.
785 */
786 /* DR7 == 0 => debugging disabled for this domain. */
787 if ( value != 0 )
788 {
789 value &= 0xffff27ff; /* reserved bits => 0 */
790 value |= 0x00000400; /* reserved bits => 1 */
791 if ( (value & (1<<13)) != 0 ) return -EPERM;
792 for ( i = 0; i < 16; i += 2 )
793 if ( ((value >> (i+16)) & 3) == 2 ) return -EPERM;
794 }
795 __asm__ ( "movl %0, %%db7" : : "r" (value) );
796 break;
797 default:
798 return -EINVAL;
799 }
801 current->thread.debugreg[reg] = value;
802 return 0;
803 }
805 unsigned long do_get_debugreg(int reg)
806 {
807 if ( (reg < 0) || (reg > 7) ) return -EINVAL;
808 return current->thread.debugreg[reg];
809 }