xen-vtx-unstable

view xen/arch/x86/traps.c @ 6774:4d899a738d59

merge?
author cl349@firebug.cl.cam.ac.uk
date Tue Sep 13 15:05:49 2005 +0000 (2005-09-13)
parents bd951d23d713 813c37b68376
children e7c7196fa329 8ca0f98ba8e2
line source
1 /******************************************************************************
2 * arch/x86/traps.c
3 *
4 * Modifications to Linux original are copyright (c) 2002-2004, K A Fraser
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 /*
22 * Copyright (C) 1991, 1992 Linus Torvalds
23 *
24 * Pentium III FXSR, SSE support
25 * Gareth Hughes <gareth@valinux.com>, May 2000
26 */
28 #include <xen/config.h>
29 #include <xen/init.h>
30 #include <xen/sched.h>
31 #include <xen/lib.h>
32 #include <xen/errno.h>
33 #include <xen/mm.h>
34 #include <xen/console.h>
35 #include <asm/regs.h>
36 #include <xen/delay.h>
37 #include <xen/event.h>
38 #include <xen/spinlock.h>
39 #include <xen/irq.h>
40 #include <xen/perfc.h>
41 #include <xen/softirq.h>
42 #include <xen/domain_page.h>
43 #include <xen/symbols.h>
44 #include <asm/shadow.h>
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/atomic.h>
48 #include <asm/desc.h>
49 #include <asm/debugreg.h>
50 #include <asm/smp.h>
51 #include <asm/flushtlb.h>
52 #include <asm/uaccess.h>
53 #include <asm/i387.h>
54 #include <asm/debugger.h>
55 #include <asm/msr.h>
56 #include <asm/x86_emulate.h>
58 /*
59 * opt_nmi: one of 'ignore', 'dom0', or 'fatal'.
60 * fatal: Xen prints diagnostic message and then hangs.
61 * dom0: The NMI is virtualised to DOM0.
62 * ignore: The NMI error is cleared and ignored.
63 */
64 #ifdef NDEBUG
65 char opt_nmi[10] = "dom0";
66 #else
67 char opt_nmi[10] = "fatal";
68 #endif
69 string_param("nmi", opt_nmi);
71 /* Master table, used by all CPUs on x86/64, and by CPU0 on x86/32.*/
72 idt_entry_t idt_table[IDT_ENTRIES];
74 #define DECLARE_TRAP_HANDLER(_name) \
75 asmlinkage void _name(void); \
76 asmlinkage int do_ ## _name(struct cpu_user_regs *regs)
78 asmlinkage void nmi(void);
79 DECLARE_TRAP_HANDLER(divide_error);
80 DECLARE_TRAP_HANDLER(debug);
81 DECLARE_TRAP_HANDLER(int3);
82 DECLARE_TRAP_HANDLER(overflow);
83 DECLARE_TRAP_HANDLER(bounds);
84 DECLARE_TRAP_HANDLER(invalid_op);
85 DECLARE_TRAP_HANDLER(device_not_available);
86 DECLARE_TRAP_HANDLER(coprocessor_segment_overrun);
87 DECLARE_TRAP_HANDLER(invalid_TSS);
88 DECLARE_TRAP_HANDLER(segment_not_present);
89 DECLARE_TRAP_HANDLER(stack_segment);
90 DECLARE_TRAP_HANDLER(general_protection);
91 DECLARE_TRAP_HANDLER(page_fault);
92 DECLARE_TRAP_HANDLER(coprocessor_error);
93 DECLARE_TRAP_HANDLER(simd_coprocessor_error);
94 DECLARE_TRAP_HANDLER(alignment_check);
95 DECLARE_TRAP_HANDLER(spurious_interrupt_bug);
96 DECLARE_TRAP_HANDLER(machine_check);
98 long do_set_debugreg(int reg, unsigned long value);
99 unsigned long do_get_debugreg(int reg);
101 static int debug_stack_lines = 20;
102 integer_param("debug_stack_lines", debug_stack_lines);
104 #ifdef CONFIG_X86_32
105 #define stack_words_per_line 8
106 #define ESP_BEFORE_EXCEPTION(regs) ((unsigned long *)&regs->esp)
107 #else
108 #define stack_words_per_line 4
109 #define ESP_BEFORE_EXCEPTION(regs) ((unsigned long *)regs->rsp)
110 #endif
112 int is_kernel_text(unsigned long addr)
113 {
114 extern char _stext, _etext;
115 if (addr >= (unsigned long) &_stext &&
116 addr <= (unsigned long) &_etext)
117 return 1;
118 return 0;
120 }
122 unsigned long kernel_text_end(void)
123 {
124 extern char _etext;
125 return (unsigned long) &_etext;
126 }
128 static void show_guest_stack(struct cpu_user_regs *regs)
129 {
130 int i;
131 unsigned long *stack = (unsigned long *)regs->esp, addr;
133 printk("Guest stack trace from "__OP"sp=%p:\n ", stack);
135 for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ )
136 {
137 if ( ((long)stack & (STACK_SIZE-BYTES_PER_LONG)) == 0 )
138 break;
139 if ( get_user(addr, stack) )
140 {
141 if ( i != 0 )
142 printk("\n ");
143 printk("Fault while accessing guest memory.");
144 i = 1;
145 break;
146 }
147 if ( (i != 0) && ((i % stack_words_per_line) == 0) )
148 printk("\n ");
149 printk("%p ", _p(addr));
150 stack++;
151 }
152 if ( i == 0 )
153 printk("Stack empty.");
154 printk("\n");
155 }
157 #ifdef NDEBUG
159 static void show_trace(struct cpu_user_regs *regs)
160 {
161 unsigned long *stack = ESP_BEFORE_EXCEPTION(regs), addr;
163 printk("Xen call trace:\n ");
165 printk("[<%p>]", _p(regs->eip));
166 print_symbol(" %s\n ", regs->eip);
168 while ( ((long)stack & (STACK_SIZE-BYTES_PER_LONG)) != 0 )
169 {
170 addr = *stack++;
171 if ( is_kernel_text(addr) )
172 {
173 printk("[<%p>]", _p(addr));
174 print_symbol(" %s\n ", addr);
175 }
176 }
178 printk("\n");
179 }
181 #else
183 static void show_trace(struct cpu_user_regs *regs)
184 {
185 unsigned long *frame, next, addr, low, high;
187 printk("Xen call trace:\n ");
189 printk("[<%p>]", _p(regs->eip));
190 print_symbol(" %s\n ", regs->eip);
192 /* Bounds for range of valid frame pointer. */
193 low = (unsigned long)(ESP_BEFORE_EXCEPTION(regs) - 2);
194 high = (low & ~(STACK_SIZE - 1)) + (STACK_SIZE - sizeof(struct cpu_info));
196 /* The initial frame pointer. */
197 next = regs->ebp;
199 for ( ; ; )
200 {
201 /* Valid frame pointer? */
202 if ( (next < low) || (next > high) )
203 {
204 /*
205 * Exception stack frames have a different layout, denoted by an
206 * inverted frame pointer.
207 */
208 next = ~next;
209 if ( (next < low) || (next > high) )
210 break;
211 frame = (unsigned long *)next;
212 next = frame[0];
213 addr = frame[(offsetof(struct cpu_user_regs, eip) -
214 offsetof(struct cpu_user_regs, ebp))
215 / BYTES_PER_LONG];
216 }
217 else
218 {
219 /* Ordinary stack frame. */
220 frame = (unsigned long *)next;
221 next = frame[0];
222 addr = frame[1];
223 }
225 printk("[<%p>]", _p(addr));
226 print_symbol(" %s\n ", addr);
228 low = (unsigned long)&frame[2];
229 }
231 printk("\n");
232 }
234 #endif
236 void show_stack(struct cpu_user_regs *regs)
237 {
238 unsigned long *stack = ESP_BEFORE_EXCEPTION(regs), addr;
239 int i;
241 if ( GUEST_CONTEXT(current, regs) )
242 return show_guest_stack(regs);
244 printk("Xen stack trace from "__OP"sp=%p:\n ", stack);
246 for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ )
247 {
248 if ( ((long)stack & (STACK_SIZE-BYTES_PER_LONG)) == 0 )
249 break;
250 if ( (i != 0) && ((i % stack_words_per_line) == 0) )
251 printk("\n ");
252 addr = *stack++;
253 printk("%p ", _p(addr));
254 }
255 if ( i == 0 )
256 printk("Stack empty.");
257 printk("\n");
259 show_trace(regs);
260 }
262 /*
263 * This is called for faults at very unexpected times (e.g., when interrupts
264 * are disabled). In such situations we can't do much that is safe. We try to
265 * print out some tracing and then we just spin.
266 */
267 asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs)
268 {
269 int cpu = smp_processor_id();
270 unsigned long cr2;
271 static char *trapstr[] = {
272 "divide error", "debug", "nmi", "bkpt", "overflow", "bounds",
273 "invalid operation", "device not available", "double fault",
274 "coprocessor segment", "invalid tss", "segment not found",
275 "stack error", "general protection fault", "page fault",
276 "spurious interrupt", "coprocessor error", "alignment check",
277 "machine check", "simd error"
278 };
280 watchdog_disable();
281 console_start_sync();
283 show_registers(regs);
285 if ( trapnr == TRAP_page_fault )
286 {
287 __asm__ __volatile__ ("mov %%cr2,%0" : "=r" (cr2) : );
288 printk("Faulting linear address: %p\n", _p(cr2));
289 show_page_walk(cr2);
290 }
292 printk("************************************\n");
293 printk("CPU%d FATAL TRAP %d (%s), ERROR_CODE %04x%s.\n",
294 cpu, trapnr, trapstr[trapnr], regs->error_code,
295 (regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
296 printk("System shutting down -- need manual reset.\n");
297 printk("************************************\n");
299 (void)debugger_trap_fatal(trapnr, regs);
301 /* Lock up the console to prevent spurious output from other CPUs. */
302 console_force_lock();
304 /* Wait for manual reset. */
305 for ( ; ; )
306 __asm__ __volatile__ ( "hlt" );
307 }
309 static inline int do_trap(int trapnr, char *str,
310 struct cpu_user_regs *regs,
311 int use_error_code)
312 {
313 struct vcpu *v = current;
314 struct trap_bounce *tb = &v->arch.trap_bounce;
315 trap_info_t *ti;
316 unsigned long fixup;
318 DEBUGGER_trap_entry(trapnr, regs);
320 if ( !GUEST_MODE(regs) )
321 goto xen_fault;
323 ti = &current->arch.guest_context.trap_ctxt[trapnr];
324 tb->flags = TBF_EXCEPTION;
325 tb->cs = ti->cs;
326 tb->eip = ti->address;
327 if ( use_error_code )
328 {
329 tb->flags |= TBF_EXCEPTION_ERRCODE;
330 tb->error_code = regs->error_code;
331 }
332 if ( TI_GET_IF(ti) )
333 tb->flags |= TBF_INTERRUPT;
334 return 0;
336 xen_fault:
338 if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
339 {
340 DPRINTK("Trap %d: %p -> %p\n", trapnr, _p(regs->eip), _p(fixup));
341 regs->eip = fixup;
342 return 0;
343 }
345 DEBUGGER_trap_fatal(trapnr, regs);
347 show_registers(regs);
348 panic("CPU%d FATAL TRAP: vector = %d (%s)\n"
349 "[error_code=%04x]\n",
350 smp_processor_id(), trapnr, str, regs->error_code);
351 return 0;
352 }
354 #define DO_ERROR_NOCODE(trapnr, str, name) \
355 asmlinkage int do_##name(struct cpu_user_regs *regs) \
356 { \
357 return do_trap(trapnr, str, regs, 0); \
358 }
360 #define DO_ERROR(trapnr, str, name) \
361 asmlinkage int do_##name(struct cpu_user_regs *regs) \
362 { \
363 return do_trap(trapnr, str, regs, 1); \
364 }
366 DO_ERROR_NOCODE( 0, "divide error", divide_error)
367 DO_ERROR_NOCODE( 4, "overflow", overflow)
368 DO_ERROR_NOCODE( 5, "bounds", bounds)
369 DO_ERROR_NOCODE( 6, "invalid operand", invalid_op)
370 DO_ERROR_NOCODE( 9, "coprocessor segment overrun", coprocessor_segment_overrun)
371 DO_ERROR(10, "invalid TSS", invalid_TSS)
372 DO_ERROR(11, "segment not present", segment_not_present)
373 DO_ERROR(12, "stack segment", stack_segment)
374 DO_ERROR_NOCODE(16, "fpu error", coprocessor_error)
375 DO_ERROR(17, "alignment check", alignment_check)
376 DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
378 asmlinkage int do_int3(struct cpu_user_regs *regs)
379 {
380 struct vcpu *v = current;
381 struct trap_bounce *tb = &v->arch.trap_bounce;
382 trap_info_t *ti;
384 DEBUGGER_trap_entry(TRAP_int3, regs);
386 if ( !GUEST_MODE(regs) )
387 {
388 DEBUGGER_trap_fatal(TRAP_int3, regs);
389 show_registers(regs);
390 panic("CPU%d FATAL TRAP: vector = 3 (Int3)\n", smp_processor_id());
391 }
393 ti = &current->arch.guest_context.trap_ctxt[TRAP_int3];
394 tb->flags = TBF_EXCEPTION;
395 tb->cs = ti->cs;
396 tb->eip = ti->address;
397 if ( TI_GET_IF(ti) )
398 tb->flags |= TBF_INTERRUPT;
400 return 0;
401 }
403 asmlinkage int do_machine_check(struct cpu_user_regs *regs)
404 {
405 fatal_trap(TRAP_machine_check, regs);
406 return 0;
407 }
409 void propagate_page_fault(unsigned long addr, u16 error_code)
410 {
411 trap_info_t *ti;
412 struct vcpu *v = current;
413 struct trap_bounce *tb = &v->arch.trap_bounce;
415 ti = &v->arch.guest_context.trap_ctxt[TRAP_page_fault];
416 tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE | TBF_EXCEPTION_CR2;
417 tb->cr2 = addr;
418 tb->error_code = error_code;
419 tb->cs = ti->cs;
420 tb->eip = ti->address;
421 if ( TI_GET_IF(ti) )
422 tb->flags |= TBF_INTERRUPT;
424 v->arch.guest_context.ctrlreg[2] = addr;
425 }
427 static int handle_perdomain_mapping_fault(
428 unsigned long offset, struct cpu_user_regs *regs)
429 {
430 extern int map_ldt_shadow_page(unsigned int);
432 struct vcpu *v = current;
433 struct domain *d = v->domain;
434 int ret;
436 /* Which vcpu's area did we fault in, and is it in the ldt sub-area? */
437 unsigned int is_ldt_area = (offset >> (PDPT_VCPU_VA_SHIFT-1)) & 1;
438 unsigned int vcpu_area = (offset >> PDPT_VCPU_VA_SHIFT);
440 /* Should never fault in another vcpu's area. */
441 BUG_ON(vcpu_area != current->vcpu_id);
443 /* Byte offset within the gdt/ldt sub-area. */
444 offset &= (1UL << (PDPT_VCPU_VA_SHIFT-1)) - 1UL;
446 if ( likely(is_ldt_area) )
447 {
448 /* LDT fault: Copy a mapping from the guest's LDT, if it is valid. */
449 LOCK_BIGLOCK(d);
450 ret = map_ldt_shadow_page(offset >> PAGE_SHIFT);
451 UNLOCK_BIGLOCK(d);
453 if ( unlikely(ret == 0) )
454 {
455 /* In hypervisor mode? Leave it to the #PF handler to fix up. */
456 if ( !GUEST_MODE(regs) )
457 return 0;
458 /* In guest mode? Propagate #PF to guest, with adjusted %cr2. */
459 propagate_page_fault(
460 v->arch.guest_context.ldt_base + offset, regs->error_code);
461 }
462 }
463 else
464 {
465 /* GDT fault: handle the fault as #GP(selector). */
466 regs->error_code = (u16)offset & ~7;
467 (void)do_general_protection(regs);
468 }
470 return EXCRET_fault_fixed;
471 }
473 #ifdef HYPERVISOR_VIRT_END
474 #define IN_HYPERVISOR_RANGE(va) \
475 (((va) >= HYPERVISOR_VIRT_START) && ((va) < HYPERVISOR_VIRT_END))
476 #else
477 #define IN_HYPERVISOR_RANGE(va) \
478 (((va) >= HYPERVISOR_VIRT_START))
479 #endif
481 static int fixup_page_fault(unsigned long addr, struct cpu_user_regs *regs)
482 {
483 struct vcpu *v = current;
484 struct domain *d = v->domain;
486 if ( unlikely(IN_HYPERVISOR_RANGE(addr)) )
487 {
488 if ( shadow_mode_external(d) && GUEST_CONTEXT(v, regs) )
489 return shadow_fault(addr, regs);
490 if ( (addr >= PERDOMAIN_VIRT_START) && (addr < PERDOMAIN_VIRT_END) )
491 return handle_perdomain_mapping_fault(
492 addr - PERDOMAIN_VIRT_START, regs);
493 }
494 else if ( unlikely(shadow_mode_enabled(d)) )
495 {
496 return shadow_fault(addr, regs);
497 }
498 else if ( likely(VM_ASSIST(d, VMASST_TYPE_writable_pagetables)) )
499 {
500 LOCK_BIGLOCK(d);
501 if ( unlikely(d->arch.ptwr[PTWR_PT_ACTIVE].l1va) &&
502 unlikely(l2_linear_offset(addr) ==
503 d->arch.ptwr[PTWR_PT_ACTIVE].l2_idx) )
504 {
505 ptwr_flush(d, PTWR_PT_ACTIVE);
506 UNLOCK_BIGLOCK(d);
507 return EXCRET_fault_fixed;
508 }
510 if ( KERNEL_MODE(v, regs) &&
511 /* Protection violation on write? No reserved-bit violation? */
512 ((regs->error_code & 0xb) == 0x3) &&
513 ptwr_do_page_fault(d, addr, regs) )
514 {
515 UNLOCK_BIGLOCK(d);
516 return EXCRET_fault_fixed;
517 }
518 UNLOCK_BIGLOCK(d);
519 }
521 return 0;
522 }
524 /*
525 * #PF error code:
526 * Bit 0: Protection violation (=1) ; Page not present (=0)
527 * Bit 1: Write access
528 * Bit 2: Supervisor mode
529 * Bit 3: Reserved bit violation
530 * Bit 4: Instruction fetch
531 */
532 asmlinkage int do_page_fault(struct cpu_user_regs *regs)
533 {
534 unsigned long addr, fixup;
535 int rc;
537 __asm__ __volatile__ ("mov %%cr2,%0" : "=r" (addr) : );
539 DEBUGGER_trap_entry(TRAP_page_fault, regs);
541 perfc_incrc(page_faults);
543 if ( unlikely((rc = fixup_page_fault(addr, regs)) != 0) )
544 return rc;
546 if ( unlikely(!GUEST_MODE(regs)) )
547 {
548 if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
549 {
550 perfc_incrc(copy_user_faults);
551 regs->eip = fixup;
552 return 0;
553 }
555 DEBUGGER_trap_fatal(TRAP_page_fault, regs);
557 show_registers(regs);
558 show_page_walk(addr);
559 panic("CPU%d FATAL PAGE FAULT\n"
560 "[error_code=%04x]\n"
561 "Faulting linear address: %p\n",
562 smp_processor_id(), regs->error_code, addr);
563 }
565 propagate_page_fault(addr, regs->error_code);
566 return 0;
567 }
569 long do_fpu_taskswitch(int set)
570 {
571 struct vcpu *v = current;
573 if ( set )
574 {
575 v->arch.guest_context.ctrlreg[0] |= X86_CR0_TS;
576 stts();
577 }
578 else
579 {
580 v->arch.guest_context.ctrlreg[0] &= ~X86_CR0_TS;
581 if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
582 clts();
583 }
585 return 0;
586 }
588 /* Has the guest requested sufficient permission for this I/O access? */
589 static inline int guest_io_okay(
590 unsigned int port, unsigned int bytes,
591 struct vcpu *v, struct cpu_user_regs *regs)
592 {
593 u16 x;
594 #if defined(__x86_64__)
595 /* If in user mode, switch to kernel mode just to read I/O bitmap. */
596 extern void toggle_guest_mode(struct vcpu *);
597 int user_mode = !(v->arch.flags & TF_kernel_mode);
598 #define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(v)
599 #elif defined(__i386__)
600 #define TOGGLE_MODE() ((void)0)
601 #endif
603 if ( v->arch.iopl >= (KERNEL_MODE(v, regs) ? 1 : 3) )
604 return 1;
606 if ( v->arch.iobmp_limit > (port + bytes) )
607 {
608 TOGGLE_MODE();
609 __get_user(x, (u16 *)(v->arch.iobmp+(port>>3)));
610 TOGGLE_MODE();
611 if ( (x & (((1<<bytes)-1) << (port&7))) == 0 )
612 return 1;
613 }
615 return 0;
616 }
618 /* Has the administrator granted sufficient permission for this I/O access? */
619 static inline int admin_io_okay(
620 unsigned int port, unsigned int bytes,
621 struct vcpu *v, struct cpu_user_regs *regs)
622 {
623 struct domain *d = v->domain;
624 u16 x;
626 if ( d->arch.iobmp_mask != NULL )
627 {
628 x = *(u16 *)(d->arch.iobmp_mask + (port >> 3));
629 if ( (x & (((1<<bytes)-1) << (port&7))) == 0 )
630 return 1;
631 }
633 return 0;
634 }
636 /* Check admin limits. Silently fail the access if it is disallowed. */
637 #define inb_user(_p, _d, _r) (admin_io_okay(_p, 1, _d, _r) ? inb(_p) : ~0)
638 #define inw_user(_p, _d, _r) (admin_io_okay(_p, 2, _d, _r) ? inw(_p) : ~0)
639 #define inl_user(_p, _d, _r) (admin_io_okay(_p, 4, _d, _r) ? inl(_p) : ~0)
640 #define outb_user(_v, _p, _d, _r) \
641 (admin_io_okay(_p, 1, _d, _r) ? outb(_v, _p) : ((void)0))
642 #define outw_user(_v, _p, _d, _r) \
643 (admin_io_okay(_p, 2, _d, _r) ? outw(_v, _p) : ((void)0))
644 #define outl_user(_v, _p, _d, _r) \
645 (admin_io_okay(_p, 4, _d, _r) ? outl(_v, _p) : ((void)0))
647 /* Propagate a fault back to the guest kernel. */
648 #define USER_READ_FAULT 4 /* user mode, read fault */
649 #define USER_WRITE_FAULT 6 /* user mode, write fault */
650 #define PAGE_FAULT(_faultaddr, _errcode) \
651 ({ propagate_page_fault(_faultaddr, _errcode); \
652 return EXCRET_fault_fixed; \
653 })
655 /* Isntruction fetch with error handling. */
656 #define insn_fetch(_type, _size, _ptr) \
657 ({ unsigned long _x; \
658 if ( get_user(_x, (_type *)eip) ) \
659 PAGE_FAULT(eip, USER_READ_FAULT); \
660 eip += _size; (_type)_x; })
662 static int emulate_privileged_op(struct cpu_user_regs *regs)
663 {
664 struct vcpu *v = current;
665 unsigned long *reg, eip = regs->eip, res;
666 u8 opcode, modrm_reg = 0, modrm_rm = 0, rep_prefix = 0;
667 unsigned int port, i, op_bytes = 4, data;
669 /* Legacy prefixes. */
670 for ( i = 0; i < 8; i++ )
671 {
672 switch ( opcode = insn_fetch(u8, 1, eip) )
673 {
674 case 0x66: /* operand-size override */
675 op_bytes ^= 6; /* switch between 2/4 bytes */
676 break;
677 case 0x67: /* address-size override */
678 case 0x2e: /* CS override */
679 case 0x3e: /* DS override */
680 case 0x26: /* ES override */
681 case 0x64: /* FS override */
682 case 0x65: /* GS override */
683 case 0x36: /* SS override */
684 case 0xf0: /* LOCK */
685 case 0xf2: /* REPNE/REPNZ */
686 break;
687 case 0xf3: /* REP/REPE/REPZ */
688 rep_prefix = 1;
689 break;
690 default:
691 goto done_prefixes;
692 }
693 }
694 done_prefixes:
696 #ifdef __x86_64__
697 /* REX prefix. */
698 if ( (opcode & 0xf0) == 0x40 )
699 {
700 modrm_reg = (opcode & 4) << 1; /* REX.R */
701 modrm_rm = (opcode & 1) << 3; /* REX.B */
703 /* REX.W and REX.X do not need to be decoded. */
704 opcode = insn_fetch(u8, 1, eip);
705 }
706 #endif
708 /* Input/Output String instructions. */
709 if ( (opcode >= 0x6c) && (opcode <= 0x6f) )
710 {
711 if ( rep_prefix && (regs->ecx == 0) )
712 goto done;
714 continue_io_string:
715 switch ( opcode )
716 {
717 case 0x6c: /* INSB */
718 op_bytes = 1;
719 case 0x6d: /* INSW/INSL */
720 if ( !guest_io_okay((u16)regs->edx, op_bytes, v, regs) )
721 goto fail;
722 switch ( op_bytes )
723 {
724 case 1:
725 data = (u8)inb_user((u16)regs->edx, v, regs);
726 if ( put_user((u8)data, (u8 *)regs->edi) )
727 PAGE_FAULT(regs->edi, USER_WRITE_FAULT);
728 break;
729 case 2:
730 data = (u16)inw_user((u16)regs->edx, v, regs);
731 if ( put_user((u16)data, (u16 *)regs->edi) )
732 PAGE_FAULT(regs->edi, USER_WRITE_FAULT);
733 break;
734 case 4:
735 data = (u32)inl_user((u16)regs->edx, v, regs);
736 if ( put_user((u32)data, (u32 *)regs->edi) )
737 PAGE_FAULT(regs->edi, USER_WRITE_FAULT);
738 break;
739 }
740 regs->edi += (regs->eflags & EF_DF) ? -op_bytes : op_bytes;
741 break;
743 case 0x6e: /* OUTSB */
744 op_bytes = 1;
745 case 0x6f: /* OUTSW/OUTSL */
746 if ( !guest_io_okay((u16)regs->edx, op_bytes, v, regs) )
747 goto fail;
748 switch ( op_bytes )
749 {
750 case 1:
751 if ( get_user(data, (u8 *)regs->esi) )
752 PAGE_FAULT(regs->esi, USER_READ_FAULT);
753 outb_user((u8)data, (u16)regs->edx, v, regs);
754 break;
755 case 2:
756 if ( get_user(data, (u16 *)regs->esi) )
757 PAGE_FAULT(regs->esi, USER_READ_FAULT);
758 outw_user((u16)data, (u16)regs->edx, v, regs);
759 break;
760 case 4:
761 if ( get_user(data, (u32 *)regs->esi) )
762 PAGE_FAULT(regs->esi, USER_READ_FAULT);
763 outl_user((u32)data, (u16)regs->edx, v, regs);
764 break;
765 }
766 regs->esi += (regs->eflags & EF_DF) ? -op_bytes : op_bytes;
767 break;
768 }
770 if ( rep_prefix && (--regs->ecx != 0) )
771 {
772 if ( !hypercall_preempt_check() )
773 goto continue_io_string;
774 eip = regs->eip;
775 }
777 goto done;
778 }
780 /* I/O Port and Interrupt Flag instructions. */
781 switch ( opcode )
782 {
783 case 0xe4: /* IN imm8,%al */
784 op_bytes = 1;
785 case 0xe5: /* IN imm8,%eax */
786 port = insn_fetch(u8, 1, eip);
787 exec_in:
788 if ( !guest_io_okay(port, op_bytes, v, regs) )
789 goto fail;
790 switch ( op_bytes )
791 {
792 case 1:
793 regs->eax &= ~0xffUL;
794 regs->eax |= (u8)inb_user(port, v, regs);
795 break;
796 case 2:
797 regs->eax &= ~0xffffUL;
798 regs->eax |= (u16)inw_user(port, v, regs);
799 break;
800 case 4:
801 regs->eax = (u32)inl_user(port, v, regs);
802 break;
803 }
804 goto done;
806 case 0xec: /* IN %dx,%al */
807 op_bytes = 1;
808 case 0xed: /* IN %dx,%eax */
809 port = (u16)regs->edx;
810 goto exec_in;
812 case 0xe6: /* OUT %al,imm8 */
813 op_bytes = 1;
814 case 0xe7: /* OUT %eax,imm8 */
815 port = insn_fetch(u8, 1, eip);
816 exec_out:
817 if ( !guest_io_okay(port, op_bytes, v, regs) )
818 goto fail;
819 switch ( op_bytes )
820 {
821 case 1:
822 outb_user((u8)regs->eax, port, v, regs);
823 break;
824 case 2:
825 outw_user((u16)regs->eax, port, v, regs);
826 break;
827 case 4:
828 outl_user((u32)regs->eax, port, v, regs);
829 break;
830 }
831 goto done;
833 case 0xee: /* OUT %al,%dx */
834 op_bytes = 1;
835 case 0xef: /* OUT %eax,%dx */
836 port = (u16)regs->edx;
837 goto exec_out;
839 case 0xfa: /* CLI */
840 case 0xfb: /* STI */
841 if ( v->arch.iopl < (KERNEL_MODE(v, regs) ? 1 : 3) )
842 goto fail;
843 /*
844 * This is just too dangerous to allow, in my opinion. Consider if the
845 * caller then tries to reenable interrupts using POPF: we can't trap
846 * that and we'll end up with hard-to-debug lockups. Fast & loose will
847 * do for us. :-)
848 */
849 /*v->vcpu_info->evtchn_upcall_mask = (opcode == 0xfa);*/
850 goto done;
852 case 0x0f: /* Two-byte opcode */
853 break;
855 default:
856 goto fail;
857 }
859 /* Remaining instructions only emulated from guest kernel. */
860 if ( !KERNEL_MODE(v, regs) )
861 goto fail;
863 /* Privileged (ring 0) instructions. */
864 opcode = insn_fetch(u8, 1, eip);
865 switch ( opcode )
866 {
867 case 0x06: /* CLTS */
868 (void)do_fpu_taskswitch(0);
869 break;
871 case 0x09: /* WBINVD */
872 /* Ignore the instruction if unprivileged. */
873 if ( !IS_CAPABLE_PHYSDEV(v->domain) )
874 DPRINTK("Non-physdev domain attempted WBINVD.\n");
875 else
876 wbinvd();
877 break;
879 case 0x20: /* MOV CR?,<reg> */
880 opcode = insn_fetch(u8, 1, eip);
881 modrm_reg |= (opcode >> 3) & 7;
882 modrm_rm |= (opcode >> 0) & 7;
883 reg = decode_register(modrm_rm, regs, 0);
884 switch ( modrm_reg )
885 {
886 case 0: /* Read CR0 */
887 *reg = v->arch.guest_context.ctrlreg[0];
888 break;
890 case 2: /* Read CR2 */
891 *reg = v->arch.guest_context.ctrlreg[2];
892 break;
894 case 3: /* Read CR3 */
895 *reg = pagetable_get_paddr(v->arch.guest_table);
896 break;
898 default:
899 goto fail;
900 }
901 break;
903 case 0x21: /* MOV DR?,<reg> */
904 opcode = insn_fetch(u8, 1, eip);
905 modrm_reg |= (opcode >> 3) & 7;
906 modrm_rm |= (opcode >> 0) & 7;
907 reg = decode_register(modrm_rm, regs, 0);
908 if ( (res = do_get_debugreg(modrm_reg)) > (unsigned long)-256 )
909 goto fail;
910 *reg = res;
911 break;
913 case 0x22: /* MOV <reg>,CR? */
914 opcode = insn_fetch(u8, 1, eip);
915 modrm_reg |= (opcode >> 3) & 7;
916 modrm_rm |= (opcode >> 0) & 7;
917 reg = decode_register(modrm_rm, regs, 0);
918 switch ( modrm_reg )
919 {
920 case 0: /* Write CR0 */
921 (void)do_fpu_taskswitch(!!(*reg & X86_CR0_TS));
922 break;
924 case 2: /* Write CR2 */
925 v->arch.guest_context.ctrlreg[2] = *reg;
926 break;
928 case 3: /* Write CR3 */
929 LOCK_BIGLOCK(v->domain);
930 (void)new_guest_cr3(*reg);
931 UNLOCK_BIGLOCK(v->domain);
932 break;
934 default:
935 goto fail;
936 }
937 break;
939 case 0x23: /* MOV <reg>,DR? */
940 opcode = insn_fetch(u8, 1, eip);
941 modrm_reg |= (opcode >> 3) & 7;
942 modrm_rm |= (opcode >> 0) & 7;
943 reg = decode_register(modrm_rm, regs, 0);
944 if ( do_set_debugreg(modrm_reg, *reg) != 0 )
945 goto fail;
946 break;
948 case 0x30: /* WRMSR */
949 /* Ignore the instruction if unprivileged. */
950 if ( !IS_PRIV(v->domain) )
951 DPRINTK("Non-priv domain attempted WRMSR(%p,%08lx,%08lx).\n",
952 _p(regs->ecx), (long)regs->eax, (long)regs->edx);
953 else if ( wrmsr_user(regs->ecx, regs->eax, regs->edx) )
954 goto fail;
955 break;
957 case 0x32: /* RDMSR */
958 if ( !IS_PRIV(v->domain) )
959 DPRINTK("Non-priv domain attempted RDMSR(%p,%08lx,%08lx).\n",
960 _p(regs->ecx), (long)regs->eax, (long)regs->edx);
961 /* Everyone can read the MSR space. */
962 if ( rdmsr_user(regs->ecx, regs->eax, regs->edx) )
963 goto fail;
964 break;
966 default:
967 goto fail;
968 }
970 done:
971 regs->eip = eip;
972 return EXCRET_fault_fixed;
974 fail:
975 return 0;
976 }
978 asmlinkage int do_general_protection(struct cpu_user_regs *regs)
979 {
980 struct vcpu *v = current;
981 struct trap_bounce *tb = &v->arch.trap_bounce;
982 trap_info_t *ti;
983 unsigned long fixup;
985 DEBUGGER_trap_entry(TRAP_gp_fault, regs);
987 if ( regs->error_code & 1 )
988 goto hardware_gp;
990 if ( !GUEST_MODE(regs) )
991 goto gp_in_kernel;
993 /*
994 * Cunning trick to allow arbitrary "INT n" handling.
995 *
996 * We set DPL == 0 on all vectors in the IDT. This prevents any INT <n>
997 * instruction from trapping to the appropriate vector, when that might not
998 * be expected by Xen or the guest OS. For example, that entry might be for
999 * a fault handler (unlike traps, faults don't increment EIP), or might
1000 * expect an error code on the stack (which a software trap never
1001 * provides), or might be a hardware interrupt handler that doesn't like
1002 * being called spuriously.
1004 * Instead, a GPF occurs with the faulting IDT vector in the error code.
1005 * Bit 1 is set to indicate that an IDT entry caused the fault. Bit 0 is
1006 * clear to indicate that it's a software fault, not hardware.
1008 * NOTE: Vectors 3 and 4 are dealt with from their own handler. This is
1009 * okay because they can only be triggered by an explicit DPL-checked
1010 * instruction. The DPL specified by the guest OS for these vectors is NOT
1011 * CHECKED!!
1012 */
1013 if ( (regs->error_code & 3) == 2 )
1015 /* This fault must be due to <INT n> instruction. */
1016 ti = &current->arch.guest_context.trap_ctxt[regs->error_code>>3];
1017 if ( PERMIT_SOFTINT(TI_GET_DPL(ti), v, regs) )
1019 tb->flags = TBF_EXCEPTION;
1020 regs->eip += 2;
1021 goto finish_propagation;
1025 /* Emulate some simple privileged and I/O instructions. */
1026 if ( (regs->error_code == 0) &&
1027 emulate_privileged_op(regs) )
1028 return 0;
1030 #if defined(__i386__)
1031 if ( VM_ASSIST(v->domain, VMASST_TYPE_4gb_segments) &&
1032 (regs->error_code == 0) &&
1033 gpf_emulate_4gb(regs) )
1034 return 0;
1035 #endif
1037 /* Pass on GPF as is. */
1038 ti = &current->arch.guest_context.trap_ctxt[TRAP_gp_fault];
1039 tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
1040 tb->error_code = regs->error_code;
1041 finish_propagation:
1042 tb->cs = ti->cs;
1043 tb->eip = ti->address;
1044 if ( TI_GET_IF(ti) )
1045 tb->flags |= TBF_INTERRUPT;
1046 return 0;
1048 gp_in_kernel:
1050 if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
1052 DPRINTK("GPF (%04x): %p -> %p\n",
1053 regs->error_code, _p(regs->eip), _p(fixup));
1054 regs->eip = fixup;
1055 return 0;
1058 DEBUGGER_trap_fatal(TRAP_gp_fault, regs);
1060 hardware_gp:
1061 show_registers(regs);
1062 panic("CPU%d GENERAL PROTECTION FAULT\n[error_code=%04x]\n",
1063 smp_processor_id(), regs->error_code);
1064 return 0;
1067 unsigned long nmi_softirq_reason;
1068 static void nmi_softirq(void)
1070 if ( dom0 == NULL )
1071 return;
1073 if ( test_and_clear_bit(0, &nmi_softirq_reason) )
1074 send_guest_virq(dom0->vcpu[0], VIRQ_PARITY_ERR);
1076 if ( test_and_clear_bit(1, &nmi_softirq_reason) )
1077 send_guest_virq(dom0->vcpu[0], VIRQ_IO_ERR);
1080 asmlinkage void mem_parity_error(struct cpu_user_regs *regs)
1082 /* Clear and disable the parity-error line. */
1083 outb((inb(0x61)&15)|4,0x61);
1085 switch ( opt_nmi[0] )
1087 case 'd': /* 'dom0' */
1088 set_bit(0, &nmi_softirq_reason);
1089 raise_softirq(NMI_SOFTIRQ);
1090 case 'i': /* 'ignore' */
1091 break;
1092 default: /* 'fatal' */
1093 console_force_unlock();
1094 printk("\n\nNMI - MEMORY ERROR\n");
1095 fatal_trap(TRAP_nmi, regs);
1099 asmlinkage void io_check_error(struct cpu_user_regs *regs)
1101 /* Clear and disable the I/O-error line. */
1102 outb((inb(0x61)&15)|8,0x61);
1104 switch ( opt_nmi[0] )
1106 case 'd': /* 'dom0' */
1107 set_bit(0, &nmi_softirq_reason);
1108 raise_softirq(NMI_SOFTIRQ);
1109 case 'i': /* 'ignore' */
1110 break;
1111 default: /* 'fatal' */
1112 console_force_unlock();
1113 printk("\n\nNMI - I/O ERROR\n");
1114 fatal_trap(TRAP_nmi, regs);
1118 static void unknown_nmi_error(unsigned char reason)
1120 printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
1121 printk("Dazed and confused, but trying to continue\n");
1122 printk("Do you have a strange power saving mode enabled?\n");
1125 asmlinkage void do_nmi(struct cpu_user_regs *regs, unsigned long reason)
1127 ++nmi_count(smp_processor_id());
1129 if ( nmi_watchdog )
1130 nmi_watchdog_tick(regs);
1132 if ( reason & 0x80 )
1133 mem_parity_error(regs);
1134 else if ( reason & 0x40 )
1135 io_check_error(regs);
1136 else if ( !nmi_watchdog )
1137 unknown_nmi_error((unsigned char)(reason&0xff));
1140 asmlinkage int math_state_restore(struct cpu_user_regs *regs)
1142 /* Prevent recursion. */
1143 clts();
1145 setup_fpu(current);
1147 if ( current->arch.guest_context.ctrlreg[0] & X86_CR0_TS )
1149 struct trap_bounce *tb = &current->arch.trap_bounce;
1150 tb->flags = TBF_EXCEPTION;
1151 tb->cs = current->arch.guest_context.trap_ctxt[7].cs;
1152 tb->eip = current->arch.guest_context.trap_ctxt[7].address;
1153 current->arch.guest_context.ctrlreg[0] &= ~X86_CR0_TS;
1156 return EXCRET_fault_fixed;
1159 asmlinkage int do_debug(struct cpu_user_regs *regs)
1161 unsigned long condition;
1162 struct vcpu *v = current;
1163 struct trap_bounce *tb = &v->arch.trap_bounce;
1165 __asm__ __volatile__("mov %%db6,%0" : "=r" (condition));
1167 /* Mask out spurious debug traps due to lazy DR7 setting */
1168 if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
1169 (v->arch.guest_context.debugreg[7] == 0) )
1171 __asm__("mov %0,%%db7" : : "r" (0UL));
1172 goto out;
1175 DEBUGGER_trap_entry(TRAP_debug, regs);
1177 if ( !GUEST_MODE(regs) )
1179 /* Clear TF just for absolute sanity. */
1180 regs->eflags &= ~EF_TF;
1181 /*
1182 * We ignore watchpoints when they trigger within Xen. This may happen
1183 * when a buffer is passed to us which previously had a watchpoint set
1184 * on it. No need to bump EIP; the only faulting trap is an instruction
1185 * breakpoint, which can't happen to us.
1186 */
1187 goto out;
1190 /* Save debug status register where guest OS can peek at it */
1191 v->arch.guest_context.debugreg[6] = condition;
1193 tb->flags = TBF_EXCEPTION;
1194 tb->cs = v->arch.guest_context.trap_ctxt[TRAP_debug].cs;
1195 tb->eip = v->arch.guest_context.trap_ctxt[TRAP_debug].address;
1197 out:
1198 return EXCRET_not_a_fault;
1201 asmlinkage int do_spurious_interrupt_bug(struct cpu_user_regs *regs)
1203 return EXCRET_not_a_fault;
1206 void set_intr_gate(unsigned int n, void *addr)
1208 #ifdef __i386__
1209 int i;
1210 /* Keep secondary tables in sync with IRQ updates. */
1211 for ( i = 1; i < NR_CPUS; i++ )
1212 if ( idt_tables[i] != NULL )
1213 _set_gate(&idt_tables[i][n], 14, 0, addr);
1214 #endif
1215 _set_gate(&idt_table[n], 14, 0, addr);
1218 void set_system_gate(unsigned int n, void *addr)
1220 _set_gate(idt_table+n,14,3,addr);
1223 void set_task_gate(unsigned int n, unsigned int sel)
1225 idt_table[n].a = sel << 16;
1226 idt_table[n].b = 0x8500;
1229 void set_tss_desc(unsigned int n, void *addr)
1231 _set_tssldt_desc(
1232 gdt_table + __TSS(n) - FIRST_RESERVED_GDT_ENTRY,
1233 (unsigned long)addr,
1234 offsetof(struct tss_struct, __cacheline_filler) - 1,
1235 9);
1238 void __init trap_init(void)
1240 extern void percpu_traps_init(void);
1242 /*
1243 * Note that interrupt gates are always used, rather than trap gates. We
1244 * must have interrupts disabled until DS/ES/FS/GS are saved because the
1245 * first activation must have the "bad" value(s) for these registers and
1246 * we may lose them if another activation is installed before they are
1247 * saved. The page-fault handler also needs interrupts disabled until %cr2
1248 * has been read and saved on the stack.
1249 */
1250 set_intr_gate(TRAP_divide_error,&divide_error);
1251 set_intr_gate(TRAP_debug,&debug);
1252 set_intr_gate(TRAP_nmi,&nmi);
1253 set_system_gate(TRAP_int3,&int3); /* usable from all privileges */
1254 set_system_gate(TRAP_overflow,&overflow); /* usable from all privileges */
1255 set_intr_gate(TRAP_bounds,&bounds);
1256 set_intr_gate(TRAP_invalid_op,&invalid_op);
1257 set_intr_gate(TRAP_no_device,&device_not_available);
1258 set_intr_gate(TRAP_copro_seg,&coprocessor_segment_overrun);
1259 set_intr_gate(TRAP_invalid_tss,&invalid_TSS);
1260 set_intr_gate(TRAP_no_segment,&segment_not_present);
1261 set_intr_gate(TRAP_stack_error,&stack_segment);
1262 set_intr_gate(TRAP_gp_fault,&general_protection);
1263 set_intr_gate(TRAP_page_fault,&page_fault);
1264 set_intr_gate(TRAP_spurious_int,&spurious_interrupt_bug);
1265 set_intr_gate(TRAP_copro_error,&coprocessor_error);
1266 set_intr_gate(TRAP_alignment_check,&alignment_check);
1267 set_intr_gate(TRAP_machine_check,&machine_check);
1268 set_intr_gate(TRAP_simd_error,&simd_coprocessor_error);
1270 percpu_traps_init();
1272 cpu_init();
1274 open_softirq(NMI_SOFTIRQ, nmi_softirq);
1278 long do_set_trap_table(trap_info_t *traps)
1280 trap_info_t cur;
1281 trap_info_t *dst = current->arch.guest_context.trap_ctxt;
1282 long rc = 0;
1284 LOCK_BIGLOCK(current->domain);
1286 for ( ; ; )
1288 if ( hypercall_preempt_check() )
1290 rc = hypercall1_create_continuation(
1291 __HYPERVISOR_set_trap_table, traps);
1292 break;
1295 if ( copy_from_user(&cur, traps, sizeof(cur)) )
1297 rc = -EFAULT;
1298 break;
1301 if ( cur.address == 0 )
1302 break;
1304 if ( !VALID_CODESEL(cur.cs) )
1306 rc = -EPERM;
1307 break;
1310 memcpy(&dst[cur.vector], &cur, sizeof(cur));
1312 if ( cur.vector == 0x80 )
1313 init_int80_direct_trap(current);
1315 traps++;
1318 UNLOCK_BIGLOCK(current->domain);
1320 return rc;
1324 long set_debugreg(struct vcpu *p, int reg, unsigned long value)
1326 int i;
1328 switch ( reg )
1330 case 0:
1331 if ( !access_ok(value, sizeof(long)) )
1332 return -EPERM;
1333 if ( p == current )
1334 __asm__ ( "mov %0, %%db0" : : "r" (value) );
1335 break;
1336 case 1:
1337 if ( !access_ok(value, sizeof(long)) )
1338 return -EPERM;
1339 if ( p == current )
1340 __asm__ ( "mov %0, %%db1" : : "r" (value) );
1341 break;
1342 case 2:
1343 if ( !access_ok(value, sizeof(long)) )
1344 return -EPERM;
1345 if ( p == current )
1346 __asm__ ( "mov %0, %%db2" : : "r" (value) );
1347 break;
1348 case 3:
1349 if ( !access_ok(value, sizeof(long)) )
1350 return -EPERM;
1351 if ( p == current )
1352 __asm__ ( "mov %0, %%db3" : : "r" (value) );
1353 break;
1354 case 6:
1355 /*
1356 * DR6: Bits 4-11,16-31 reserved (set to 1).
1357 * Bit 12 reserved (set to 0).
1358 */
1359 value &= 0xffffefff; /* reserved bits => 0 */
1360 value |= 0xffff0ff0; /* reserved bits => 1 */
1361 if ( p == current )
1362 __asm__ ( "mov %0, %%db6" : : "r" (value) );
1363 break;
1364 case 7:
1365 /*
1366 * DR7: Bit 10 reserved (set to 1).
1367 * Bits 11-12,14-15 reserved (set to 0).
1368 * Privileged bits:
1369 * GD (bit 13): must be 0.
1370 * R/Wn (bits 16-17,20-21,24-25,28-29): mustn't be 10.
1371 * LENn (bits 18-19,22-23,26-27,30-31): mustn't be 10.
1372 */
1373 /* DR7 == 0 => debugging disabled for this domain. */
1374 if ( value != 0 )
1376 value &= 0xffff27ff; /* reserved bits => 0 */
1377 value |= 0x00000400; /* reserved bits => 1 */
1378 if ( (value & (1<<13)) != 0 ) return -EPERM;
1379 for ( i = 0; i < 16; i += 2 )
1380 if ( ((value >> (i+16)) & 3) == 2 ) return -EPERM;
1382 if ( p == current )
1383 __asm__ ( "mov %0, %%db7" : : "r" (value) );
1384 break;
1385 default:
1386 return -EINVAL;
1389 p->arch.guest_context.debugreg[reg] = value;
1390 return 0;
1393 long do_set_debugreg(int reg, unsigned long value)
1395 return set_debugreg(current, reg, value);
1398 unsigned long do_get_debugreg(int reg)
1400 if ( (reg < 0) || (reg > 7) ) return -EINVAL;
1401 return current->arch.guest_context.debugreg[reg];
1404 /*
1405 * Local variables:
1406 * mode: C
1407 * c-set-style: "BSD"
1408 * c-basic-offset: 4
1409 * tab-width: 4
1410 * indent-tabs-mode: nil
1411 * End:
1412 */