debuggers.hg

annotate xen/arch/x86/x86_32/entry.S @ 2111:4678a5d8fc54

bitkeeper revision 1.1143 (4110b5c38VYeFSdv3JdFEuUgJgQXcg)

Add a HYPERVISOR_vm_assist() hypercall for optionally enabling features
such as writeable pagetables.
author kaf24@scramble.cl.cam.ac.uk
date Wed Aug 04 10:09:07 2004 +0000 (2004-08-04)
parents dae98734f12e
children 3145fa096b1a c326283ef029 0a4b76b6b5a0
rev   line source
kaf24@1710 1 /*
kaf24@1710 2 * Hypercall and fault low-level handling routines.
kaf24@1710 3 *
kaf24@1710 4 * Copyright (c) 2002-2004, K A Fraser
kaf24@1710 5 * Copyright (c) 1991, 1992 Linus Torvalds
kaf24@1710 6 */
kaf24@1710 7
kaf24@1710 8 /*
kaf24@1710 9 * The idea for callbacks to guest OSes
kaf24@1710 10 * ====================================
kaf24@1710 11 *
kaf24@1710 12 * First, we require that all callbacks (either via a supplied
kaf24@1710 13 * interrupt-descriptor-table, or via the special event or failsafe callbacks
kaf24@1710 14 * in the shared-info-structure) are to ring 1. This just makes life easier,
kaf24@1710 15 * in that it means we don't have to do messy GDT/LDT lookups to find
kaf24@1710 16 * out which the privilege-level of the return code-selector. That code
kaf24@1710 17 * would just be a hassle to write, and would need to account for running
kaf24@1710 18 * off the end of the GDT/LDT, for example. For all callbacks we check
kaf24@1710 19 * that the provided
kaf24@1710 20 * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as
kaf24@1710 21 * don't allow a guest OS to install ring-0 privileges into the GDT/LDT.
kaf24@1710 22 * It's up to the guest OS to ensure all returns via the IDT are to ring 1.
kaf24@1710 23 * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather
kaf24@1710 24 * than the correct ring) and bad things are bound to ensue -- IRET is
kaf24@1710 25 * likely to fault, and we may end up killing the domain (no harm can
kaf24@1710 26 * come to Xen, though).
kaf24@1710 27 *
kaf24@1710 28 * When doing a callback, we check if the return CS is in ring 0. If so,
kaf24@1710 29 * callback is delayed until next return to ring != 0.
kaf24@1710 30 * If return CS is in ring 1, then we create a callback frame
kaf24@1710 31 * starting at return SS/ESP. The base of the frame does an intra-privilege
kaf24@1710 32 * interrupt-return.
kaf24@1710 33 * If return CS is in ring > 1, we create a callback frame starting
kaf24@1710 34 * at SS/ESP taken from appropriate section of the current TSS. The base
kaf24@1710 35 * of the frame does an inter-privilege interrupt-return.
kaf24@1710 36 *
kaf24@1710 37 * Note that the "failsafe callback" uses a special stackframe:
kaf24@1710 38 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
kaf24@1710 39 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
kaf24@1710 40 * That is, original values for DS/ES/FS/GS are placed on stack rather than
kaf24@1710 41 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
kaf24@1710 42 * saved/restored in guest OS. Furthermore, if we load them we may cause
kaf24@1710 43 * a fault if they are invalid, which is a hassle to deal with. We avoid
kaf24@1710 44 * that problem if we don't load them :-) This property allows us to use
kaf24@1710 45 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
kaf24@1710 46 * on return to ring != 0, we can simply package it up as a return via
kaf24@1710 47 * the failsafe callback, and let the guest OS sort it out (perhaps by
kaf24@1710 48 * killing an application process). Note that we also do this for any
kaf24@1710 49 * faulting IRET -- just let the guest OS handle it via the event
kaf24@1710 50 * callback.
kaf24@1710 51 *
kaf24@1710 52 * We terminate a domain in the following cases:
kaf24@1710 53 * - creating a callback stack frame (due to bad ring-1 stack).
kaf24@1710 54 * - faulting IRET on entry to failsafe callback handler.
kaf24@1710 55 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
kaf24@1710 56 * handler in good order (absolutely no faults allowed!).
kaf24@1710 57 */
kaf24@1710 58
kaf24@1710 59 #include <xen/config.h>
kaf24@1710 60 #include <xen/errno.h>
kaf24@2085 61 #include <xen/softirq.h>
kaf24@1710 62 #include <hypervisor-ifs/hypervisor-if.h>
kaf24@1710 63
kaf24@1710 64 EBX = 0x00
kaf24@1710 65 ECX = 0x04
kaf24@1710 66 EDX = 0x08
kaf24@1710 67 ESI = 0x0C
kaf24@1710 68 EDI = 0x10
kaf24@1710 69 EBP = 0x14
kaf24@1710 70 EAX = 0x18
kaf24@1710 71 DS = 0x1C
kaf24@1710 72 ES = 0x20
kaf24@1710 73 FS = 0x24
kaf24@1710 74 GS = 0x28
kaf24@1710 75 ORIG_EAX = 0x2C
kaf24@1710 76 EIP = 0x30
kaf24@1710 77 CS = 0x34
kaf24@1710 78 EFLAGS = 0x38
kaf24@1710 79 OLDESP = 0x3C
kaf24@1710 80 OLDSS = 0x40
kaf24@1710 81
kaf24@1710 82 /* Offsets in domain structure */
kaf24@1710 83 PROCESSOR = 0
kaf24@1710 84 SHARED_INFO = 4
kaf24@1710 85 EVENT_SEL = 8
kaf24@1710 86 EVENT_ADDR = 12
kaf24@1710 87 FAILSAFE_BUFFER = 16
kaf24@1710 88 FAILSAFE_SEL = 32
kaf24@1710 89 FAILSAFE_ADDR = 36
kaf24@1710 90
kaf24@1710 91 /* Offsets in shared_info_t */
kaf24@1710 92 #define UPCALL_PENDING /* 0 */
kaf24@1710 93 #define UPCALL_MASK 1
kaf24@1710 94
kaf24@1710 95 /* Offsets in guest_trap_bounce */
kaf24@1710 96 GTB_ERROR_CODE = 0
kaf24@1710 97 GTB_CR2 = 4
kaf24@1710 98 GTB_FLAGS = 8
kaf24@1710 99 GTB_CS = 10
kaf24@1710 100 GTB_EIP = 12
kaf24@1710 101 GTBF_TRAP = 1
kaf24@1710 102 GTBF_TRAP_NOCODE = 2
kaf24@1710 103 GTBF_TRAP_CR2 = 4
kaf24@1710 104
kaf24@1710 105 CF_MASK = 0x00000001
kaf24@1710 106 IF_MASK = 0x00000200
kaf24@1710 107 NT_MASK = 0x00004000
kaf24@1710 108
kaf24@1710 109 #define SAVE_ALL_NOSEGREGS \
kaf24@1710 110 cld; \
kaf24@1710 111 pushl %gs; \
kaf24@1710 112 pushl %fs; \
kaf24@1710 113 pushl %es; \
kaf24@1710 114 pushl %ds; \
kaf24@1710 115 pushl %eax; \
kaf24@1710 116 pushl %ebp; \
kaf24@1710 117 pushl %edi; \
kaf24@1710 118 pushl %esi; \
kaf24@1710 119 pushl %edx; \
kaf24@1710 120 pushl %ecx; \
kaf24@1710 121 pushl %ebx; \
kaf24@1710 122
kaf24@1710 123 #define SAVE_ALL \
kaf24@1710 124 SAVE_ALL_NOSEGREGS \
kaf24@1710 125 movl $(__HYPERVISOR_DS),%edx; \
kaf24@1710 126 movl %edx,%ds; \
kaf24@1710 127 movl %edx,%es; \
kaf24@1710 128 movl %edx,%fs; \
kaf24@1710 129 movl %edx,%gs; \
kaf24@1710 130 sti;
kaf24@1710 131
kaf24@1710 132 #define GET_CURRENT(reg) \
kaf24@1710 133 movl $4096-4, reg; \
kaf24@1710 134 orl %esp, reg; \
kaf24@1710 135 andl $~3,reg; \
kaf24@1710 136 movl (reg),reg;
kaf24@1710 137
kaf24@1710 138 ENTRY(continue_nonidle_task)
kaf24@1710 139 GET_CURRENT(%ebx)
kaf24@1710 140 jmp test_all_events
kaf24@1710 141
kaf24@1710 142 ALIGN
kaf24@1710 143 /*
kaf24@1710 144 * HYPERVISOR_multicall(call_list, nr_calls)
kaf24@1710 145 * Execute a list of 'nr_calls' hypercalls, pointed at by 'call_list'.
kaf24@1710 146 * This is fairly easy except that:
kaf24@1710 147 * 1. We may fault reading the call list, and must patch that up; and
kaf24@1710 148 * 2. We cannot recursively call HYPERVISOR_multicall, or a malicious
kaf24@1710 149 * caller could cause our stack to blow up.
kaf24@1710 150 */
kaf24@1710 151 do_multicall:
kaf24@1710 152 popl %eax
kaf24@1710 153 cmpl $SYMBOL_NAME(multicall_return_from_call),%eax
kaf24@1710 154 je multicall_return_from_call
kaf24@1710 155 pushl %ebx
kaf24@1710 156 movl 4(%esp),%ebx /* EBX == call_list */
kaf24@1710 157 movl 8(%esp),%ecx /* ECX == nr_calls */
kaf24@1710 158 multicall_loop:
kaf24@1710 159 pushl %ecx
kaf24@1710 160 multicall_fault1:
kaf24@1710 161 pushl 20(%ebx) # args[4]
kaf24@1710 162 multicall_fault2:
kaf24@1710 163 pushl 16(%ebx) # args[3]
kaf24@1710 164 multicall_fault3:
kaf24@1710 165 pushl 12(%ebx) # args[2]
kaf24@1710 166 multicall_fault4:
kaf24@1710 167 pushl 8(%ebx) # args[1]
kaf24@1710 168 multicall_fault5:
kaf24@1710 169 pushl 4(%ebx) # args[0]
kaf24@1710 170 multicall_fault6:
kaf24@1710 171 movl (%ebx),%eax # op
kaf24@1710 172 andl $(NR_hypercalls-1),%eax
kaf24@1710 173 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
kaf24@1710 174 multicall_return_from_call:
kaf24@1710 175 multicall_fault7:
kaf24@1710 176 movl %eax,24(%ebx) # args[5] == result
kaf24@1710 177 addl $20,%esp
kaf24@1710 178 popl %ecx
kaf24@1710 179 addl $(ARGS_PER_MULTICALL_ENTRY*4),%ebx
kaf24@1710 180 loop multicall_loop
kaf24@1710 181 popl %ebx
kaf24@1710 182 xorl %eax,%eax
kaf24@1710 183 jmp ret_from_hypercall
kaf24@1710 184
kaf24@1710 185 .section __ex_table,"a"
kaf24@1710 186 .align 4
kaf24@1710 187 .long multicall_fault1, multicall_fixup1
kaf24@1710 188 .long multicall_fault2, multicall_fixup2
kaf24@1710 189 .long multicall_fault3, multicall_fixup3
kaf24@1710 190 .long multicall_fault4, multicall_fixup4
kaf24@1710 191 .long multicall_fault5, multicall_fixup5
kaf24@1710 192 .long multicall_fault6, multicall_fixup6
kaf24@1710 193 .previous
kaf24@1710 194
kaf24@1710 195 .section .fixup,"ax"
kaf24@1710 196 multicall_fixup6:
kaf24@1710 197 addl $4,%esp
kaf24@1710 198 multicall_fixup5:
kaf24@1710 199 addl $4,%esp
kaf24@1710 200 multicall_fixup4:
kaf24@1710 201 addl $4,%esp
kaf24@1710 202 multicall_fixup3:
kaf24@1710 203 addl $4,%esp
kaf24@1710 204 multicall_fixup2:
kaf24@1710 205 addl $4,%esp
kaf24@1710 206 multicall_fixup1:
kaf24@1710 207 addl $4,%esp
kaf24@1710 208 popl %ebx
kaf24@1710 209 movl $-EFAULT,%eax
kaf24@1710 210 jmp ret_from_hypercall
kaf24@1710 211 .previous
kaf24@1710 212
kaf24@1710 213 ALIGN
kaf24@1710 214 restore_all_guest:
kaf24@1710 215 # First, may need to restore %ds if clobbered by create_bounce_frame
kaf24@1710 216 pushl %ss
kaf24@1710 217 popl %ds
kaf24@1710 218 # Second, create a failsafe copy of DS,ES,FS,GS in case any are bad
kaf24@1710 219 leal DS(%esp),%esi
kaf24@1710 220 leal FAILSAFE_BUFFER(%ebx),%edi
kaf24@1710 221 movsl
kaf24@1710 222 movsl
kaf24@1710 223 movsl
kaf24@1710 224 movsl
kaf24@1710 225 # Finally, restore guest registers -- faults will cause failsafe
kaf24@1710 226 popl %ebx
kaf24@1710 227 popl %ecx
kaf24@1710 228 popl %edx
kaf24@1710 229 popl %esi
kaf24@1710 230 popl %edi
kaf24@1710 231 popl %ebp
kaf24@1710 232 popl %eax
kaf24@1710 233 1: popl %ds
kaf24@1710 234 2: popl %es
kaf24@1710 235 3: popl %fs
kaf24@1710 236 4: popl %gs
kaf24@1710 237 addl $4,%esp
kaf24@1710 238 5: iret
kaf24@1710 239 .section .fixup,"ax"
kaf24@1710 240 10: subl $4,%esp
kaf24@1710 241 pushl %gs
kaf24@1710 242 9: pushl %fs
kaf24@1710 243 8: pushl %es
kaf24@1710 244 7: pushl %ds
kaf24@1710 245 6: pushl %eax
kaf24@1710 246 pushl %ebp
kaf24@1710 247 pushl %edi
kaf24@1710 248 pushl %esi
kaf24@1710 249 pushl %edx
kaf24@1710 250 pushl %ecx
kaf24@1710 251 pushl %ebx
kaf24@1710 252 pushl %ss
kaf24@1710 253 popl %ds
kaf24@1710 254 pushl %ss
kaf24@1710 255 popl %es
kaf24@1710 256 jmp failsafe_callback
kaf24@1710 257 .previous
kaf24@1710 258 .section __ex_table,"a"
kaf24@1710 259 .align 4
kaf24@1710 260 .long 1b,6b
kaf24@1710 261 .long 2b,7b
kaf24@1710 262 .long 3b,8b
kaf24@1710 263 .long 4b,9b
kaf24@1710 264 .long 5b,10b
kaf24@1710 265 .previous
kaf24@1710 266
kaf24@1710 267 /* No special register assumptions */
kaf24@1710 268 failsafe_callback:
kaf24@1710 269 GET_CURRENT(%ebx)
kaf24@1710 270 movl PROCESSOR(%ebx),%eax
kaf24@1710 271 shl $4,%eax
kaf24@1710 272 lea guest_trap_bounce(%eax),%edx
kaf24@1710 273 movl FAILSAFE_ADDR(%ebx),%eax
kaf24@1710 274 movl %eax,GTB_EIP(%edx)
kaf24@1710 275 movl FAILSAFE_SEL(%ebx),%eax
kaf24@1710 276 movw %ax,GTB_CS(%edx)
kaf24@1710 277 call create_bounce_frame
kaf24@1710 278 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
kaf24@1710 279 leal FAILSAFE_BUFFER(%ebx),%ebp
kaf24@1710 280 movl 0(%ebp),%eax # DS
kaf24@1710 281 FAULT1: movl %eax,(%esi)
kaf24@1710 282 movl 4(%ebp),%eax # ES
kaf24@1710 283 FAULT2: movl %eax,4(%esi)
kaf24@1710 284 movl 8(%ebp),%eax # FS
kaf24@1710 285 FAULT3: movl %eax,8(%esi)
kaf24@1710 286 movl 12(%ebp),%eax # GS
kaf24@1710 287 FAULT4: movl %eax,12(%esi)
kaf24@1710 288 movl %esi,OLDESP(%esp)
kaf24@1710 289 popl %ebx
kaf24@1710 290 popl %ecx
kaf24@1710 291 popl %edx
kaf24@1710 292 popl %esi
kaf24@1710 293 popl %edi
kaf24@1710 294 popl %ebp
kaf24@1710 295 popl %eax
kaf24@1710 296 addl $20,%esp # skip DS/ES/FS/GS/ORIG_EAX
kaf24@1710 297 FAULT5: iret
kaf24@1710 298
kaf24@1710 299
kaf24@1710 300 ALIGN
kaf24@1710 301 # Simple restore -- we should never fault as we we will only interrupt ring 0
kaf24@1710 302 # when sane values have been placed in all registers. The only exception is
kaf24@1710 303 # NMI, which may interrupt before good values have been placed in DS-GS.
kaf24@1710 304 # The NMI return code deals with this problem itself.
kaf24@1710 305 restore_all_xen:
kaf24@1710 306 popl %ebx
kaf24@1710 307 popl %ecx
kaf24@1710 308 popl %edx
kaf24@1710 309 popl %esi
kaf24@1710 310 popl %edi
kaf24@1710 311 popl %ebp
kaf24@1710 312 popl %eax
kaf24@1710 313 popl %ds
kaf24@1710 314 popl %es
kaf24@1710 315 popl %fs
kaf24@1710 316 popl %gs
kaf24@1710 317 addl $4,%esp
kaf24@1710 318 iret
kaf24@1710 319
kaf24@1710 320 ALIGN
kaf24@1710 321 ENTRY(hypercall)
kaf24@1710 322 pushl %eax # save orig_eax
kaf24@1710 323 SAVE_ALL
kaf24@1710 324 GET_CURRENT(%ebx)
kaf24@1710 325 andl $(NR_hypercalls-1),%eax
kaf24@1710 326 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
kaf24@1710 327
kaf24@1710 328 ret_from_hypercall:
kaf24@1710 329 movl %eax,EAX(%esp) # save the return value
kaf24@1710 330
kaf24@1710 331 test_all_events:
kaf24@1710 332 xorl %ecx,%ecx
kaf24@1710 333 notl %ecx
kaf24@1710 334 cli # tests must not race interrupts
kaf24@1710 335 /*test_softirqs:*/
kaf24@1710 336 movl PROCESSOR(%ebx),%eax
kaf24@1710 337 shl $6,%eax # sizeof(irq_cpustat) == 64
kaf24@1710 338 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
kaf24@1710 339 jnz process_softirqs
kaf24@1710 340 /*test_guest_events:*/
kaf24@1710 341 movl SHARED_INFO(%ebx),%eax
kaf24@1710 342 testb $0xFF,UPCALL_MASK(%eax)
kaf24@1710 343 jnz restore_all_guest
kaf24@1710 344 testb $0xFF,UPCALL_PENDING(%eax)
kaf24@1710 345 jz restore_all_guest
kaf24@1710 346 movb $1,UPCALL_MASK(%eax) # Upcalls are masked during delivery
kaf24@1710 347 /*process_guest_events:*/
kaf24@1710 348 movl PROCESSOR(%ebx),%edx
kaf24@1710 349 shl $4,%edx # sizeof(guest_trap_bounce) == 16
kaf24@1710 350 lea guest_trap_bounce(%edx),%edx
kaf24@1710 351 movl EVENT_ADDR(%ebx),%eax
kaf24@1710 352 movl %eax,GTB_EIP(%edx)
kaf24@1710 353 movl EVENT_SEL(%ebx),%eax
kaf24@1710 354 movw %ax,GTB_CS(%edx)
kaf24@1710 355 call create_bounce_frame
kaf24@1710 356 jmp restore_all_guest
kaf24@1710 357
kaf24@1710 358 ALIGN
kaf24@1710 359 process_softirqs:
kaf24@1710 360 sti
kaf24@1710 361 call SYMBOL_NAME(do_softirq)
kaf24@1710 362 jmp test_all_events
kaf24@1710 363
kaf24@1710 364 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
kaf24@1710 365 /* {EIP, CS, EFLAGS, [ESP, SS]} */
kaf24@1710 366 /* %edx == guest_trap_bounce, %ebx == task_struct */
kaf24@1710 367 /* %eax,%ecx are clobbered. %ds:%esi contain new OLDSS/OLDESP. */
kaf24@1710 368 create_bounce_frame:
kaf24@1710 369 mov CS+4(%esp),%cl
kaf24@1710 370 test $2,%cl
kaf24@1710 371 jz 1f /* jump if returning to an existing ring-1 activation */
kaf24@1710 372 /* obtain ss/esp from TSS -- no current ring-1 activations */
kaf24@1710 373 movl PROCESSOR(%ebx),%eax
kaf24@1710 374 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
kaf24@1710 375 movl %eax, %ecx
kaf24@1710 376 shll $7, %ecx
kaf24@1710 377 shll $13, %eax
kaf24@1710 378 addl %ecx,%eax
kaf24@1710 379 addl $init_tss + 12,%eax
kaf24@1710 380 movl (%eax),%esi /* tss->esp1 */
kaf24@1710 381 FAULT6: movl 4(%eax),%ds /* tss->ss1 */
kaf24@1710 382 /* base of stack frame must contain ss/esp (inter-priv iret) */
kaf24@1710 383 subl $8,%esi
kaf24@1710 384 movl OLDESP+4(%esp),%eax
kaf24@1710 385 FAULT7: movl %eax,(%esi)
kaf24@1710 386 movl OLDSS+4(%esp),%eax
kaf24@1710 387 FAULT8: movl %eax,4(%esi)
kaf24@1710 388 jmp 2f
kaf24@1710 389 1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
kaf24@1710 390 movl OLDESP+4(%esp),%esi
kaf24@1710 391 FAULT9: movl OLDSS+4(%esp),%ds
kaf24@1710 392 2: /* Construct a stack frame: EFLAGS, CS/EIP */
kaf24@1710 393 subl $12,%esi
kaf24@1710 394 movl EIP+4(%esp),%eax
kaf24@1710 395 FAULT10:movl %eax,(%esi)
kaf24@1710 396 movl CS+4(%esp),%eax
kaf24@1710 397 FAULT11:movl %eax,4(%esi)
kaf24@1710 398 movl EFLAGS+4(%esp),%eax
kaf24@1710 399 FAULT12:movl %eax,8(%esi)
kaf24@1710 400 /* Rewrite our stack frame and return to ring 1. */
kaf24@1710 401 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
kaf24@1710 402 andl $0xfffcbeff,%eax
kaf24@1710 403 movl %eax,EFLAGS+4(%esp)
kaf24@1710 404 movl %ds,OLDSS+4(%esp)
kaf24@1710 405 movl %esi,OLDESP+4(%esp)
kaf24@1710 406 movzwl %es:GTB_CS(%edx),%eax
kaf24@1710 407 movl %eax,CS+4(%esp)
kaf24@1710 408 movl %es:GTB_EIP(%edx),%eax
kaf24@1710 409 movl %eax,EIP+4(%esp)
kaf24@1710 410 ret
kaf24@1710 411
kaf24@1710 412
kaf24@1710 413 .section __ex_table,"a"
kaf24@1710 414 .align 4
kaf24@1710 415 .long FAULT1, crash_domain_fixup3 # Fault writing to ring-1 stack
kaf24@1710 416 .long FAULT2, crash_domain_fixup3 # Fault writing to ring-1 stack
kaf24@1710 417 .long FAULT3, crash_domain_fixup3 # Fault writing to ring-1 stack
kaf24@1710 418 .long FAULT4, crash_domain_fixup3 # Fault writing to ring-1 stack
kaf24@1710 419 .long FAULT5, crash_domain_fixup1 # Fault executing failsafe iret
kaf24@1710 420 .long FAULT6, crash_domain_fixup2 # Fault loading ring-1 stack selector
kaf24@1710 421 .long FAULT7, crash_domain_fixup2 # Fault writing to ring-1 stack
kaf24@1710 422 .long FAULT8, crash_domain_fixup2 # Fault writing to ring-1 stack
kaf24@1710 423 .long FAULT9, crash_domain_fixup2 # Fault loading ring-1 stack selector
kaf24@1710 424 .long FAULT10,crash_domain_fixup2 # Fault writing to ring-1 stack
kaf24@1710 425 .long FAULT11,crash_domain_fixup2 # Fault writing to ring-1 stack
kaf24@1710 426 .long FAULT12,crash_domain_fixup2 # Fault writing to ring-1 stack
kaf24@1710 427 .long FAULT13,crash_domain_fixup3 # Fault writing to ring-1 stack
kaf24@1710 428 .long FAULT14,crash_domain_fixup3 # Fault writing to ring-1 stack
kaf24@1710 429 .previous
kaf24@1710 430
kaf24@1710 431 # This handler kills domains which experience unrecoverable faults.
kaf24@1710 432 .section .fixup,"ax"
kaf24@1710 433 crash_domain_fixup1:
kaf24@1710 434 subl $4,%esp
kaf24@1710 435 SAVE_ALL
kaf24@1710 436 jmp domain_crash
kaf24@1710 437 crash_domain_fixup2:
kaf24@1710 438 addl $4,%esp
kaf24@1710 439 crash_domain_fixup3:
kaf24@1710 440 pushl %ss
kaf24@1710 441 popl %ds
kaf24@1710 442 jmp domain_crash
kaf24@1710 443 .previous
kaf24@1710 444
kaf24@1710 445 ALIGN
kaf24@1710 446 process_guest_exception_and_events:
kaf24@1710 447 movl PROCESSOR(%ebx),%eax
kaf24@1710 448 shl $4,%eax
kaf24@1710 449 lea guest_trap_bounce(%eax),%edx
kaf24@1710 450 testb $~0,GTB_FLAGS(%edx)
kaf24@1710 451 jz test_all_events
kaf24@1710 452 call create_bounce_frame # just the basic frame
kaf24@1710 453 mov %es:GTB_FLAGS(%edx),%cl
kaf24@1710 454 test $GTBF_TRAP_NOCODE,%cl
kaf24@1710 455 jnz 2f
kaf24@1710 456 subl $4,%esi # push error_code onto guest frame
kaf24@1710 457 movl %es:GTB_ERROR_CODE(%edx),%eax
kaf24@1710 458 FAULT13:movl %eax,(%esi)
kaf24@1710 459 test $GTBF_TRAP_CR2,%cl
kaf24@1710 460 jz 1f
kaf24@1710 461 subl $4,%esi # push %cr2 onto guest frame
kaf24@1710 462 movl %es:GTB_CR2(%edx),%eax
kaf24@1710 463 FAULT14:movl %eax,(%esi)
kaf24@1710 464 1: movl %esi,OLDESP(%esp)
kaf24@1710 465 2: push %es # unclobber %ds
kaf24@1710 466 pop %ds
kaf24@1710 467 movb $0,GTB_FLAGS(%edx)
kaf24@1710 468 jmp test_all_events
kaf24@1710 469
kaf24@1710 470 ALIGN
kaf24@1710 471 ENTRY(ret_from_intr)
kaf24@1710 472 GET_CURRENT(%ebx)
kaf24@1710 473 movb CS(%esp),%al
kaf24@1710 474 testb $3,%al # return to non-supervisor?
kaf24@1710 475 jne test_all_events
kaf24@1710 476 jmp restore_all_xen
kaf24@1710 477
kaf24@1710 478 ENTRY(divide_error)
kaf24@1710 479 pushl $0 # no error code
kaf24@1710 480 pushl $ SYMBOL_NAME(do_divide_error)
kaf24@1710 481 ALIGN
kaf24@1710 482 error_code:
kaf24@1710 483 pushl %fs
kaf24@1710 484 pushl %es
kaf24@1710 485 pushl %ds
kaf24@1710 486 pushl %eax
kaf24@1710 487 xorl %eax,%eax
kaf24@1710 488 pushl %ebp
kaf24@1710 489 pushl %edi
kaf24@1710 490 pushl %esi
kaf24@1710 491 pushl %edx
kaf24@1710 492 decl %eax # eax = -1
kaf24@1710 493 pushl %ecx
kaf24@1710 494 pushl %ebx
kaf24@1710 495 cld
kaf24@1710 496 movl %gs,%ecx
kaf24@1710 497 movl ORIG_EAX(%esp), %esi # get the error code
kaf24@1710 498 movl GS(%esp), %edi # get the function address
kaf24@1710 499 movl %eax, ORIG_EAX(%esp)
kaf24@1710 500 movl %ecx, GS(%esp)
kaf24@1710 501 movl $(__HYPERVISOR_DS),%edx
kaf24@1710 502 movl %edx,%ds
kaf24@1710 503 movl %edx,%es
kaf24@1710 504 movl %edx,%fs
kaf24@1710 505 movl %edx,%gs
kaf24@1710 506 movl %esp,%edx
kaf24@1710 507 pushl %esi # push the error code
kaf24@1710 508 pushl %edx # push the pt_regs pointer
kaf24@1710 509 GET_CURRENT(%ebx)
kaf24@1710 510 call *%edi
kaf24@1710 511 addl $8,%esp
kaf24@1710 512 movb CS(%esp),%al
kaf24@1710 513 testb $3,%al
kaf24@1710 514 je restore_all_xen
kaf24@1710 515 jmp process_guest_exception_and_events
kaf24@1710 516
kaf24@1710 517 ENTRY(coprocessor_error)
kaf24@1710 518 pushl $0
kaf24@1710 519 pushl $ SYMBOL_NAME(do_coprocessor_error)
kaf24@1710 520 jmp error_code
kaf24@1710 521
kaf24@1710 522 ENTRY(simd_coprocessor_error)
kaf24@1710 523 pushl $0
kaf24@1710 524 pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
kaf24@1710 525 jmp error_code
kaf24@1710 526
kaf24@1710 527 ENTRY(device_not_available)
kaf24@1710 528 pushl $0
kaf24@1710 529 pushl $SYMBOL_NAME(math_state_restore)
kaf24@1710 530 jmp error_code
kaf24@1710 531
kaf24@1710 532 ENTRY(debug)
kaf24@1710 533 pushl $0
kaf24@1710 534 pushl $ SYMBOL_NAME(do_debug)
kaf24@1710 535 jmp error_code
kaf24@1710 536
kaf24@1710 537 ENTRY(int3)
kaf24@1710 538 pushl $0
kaf24@1710 539 pushl $ SYMBOL_NAME(do_int3)
kaf24@1710 540 jmp error_code
kaf24@1710 541
kaf24@1710 542 ENTRY(overflow)
kaf24@1710 543 pushl $0
kaf24@1710 544 pushl $ SYMBOL_NAME(do_overflow)
kaf24@1710 545 jmp error_code
kaf24@1710 546
kaf24@1710 547 ENTRY(bounds)
kaf24@1710 548 pushl $0
kaf24@1710 549 pushl $ SYMBOL_NAME(do_bounds)
kaf24@1710 550 jmp error_code
kaf24@1710 551
kaf24@1710 552 ENTRY(invalid_op)
kaf24@1710 553 pushl $0
kaf24@1710 554 pushl $ SYMBOL_NAME(do_invalid_op)
kaf24@1710 555 jmp error_code
kaf24@1710 556
kaf24@1710 557 ENTRY(coprocessor_segment_overrun)
kaf24@1710 558 pushl $0
kaf24@1710 559 pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
kaf24@1710 560 jmp error_code
kaf24@1710 561
kaf24@1710 562 ENTRY(invalid_TSS)
kaf24@1710 563 pushl $ SYMBOL_NAME(do_invalid_TSS)
kaf24@1710 564 jmp error_code
kaf24@1710 565
kaf24@1710 566 ENTRY(segment_not_present)
kaf24@1710 567 pushl $ SYMBOL_NAME(do_segment_not_present)
kaf24@1710 568 jmp error_code
kaf24@1710 569
kaf24@1710 570 ENTRY(stack_segment)
kaf24@1710 571 pushl $ SYMBOL_NAME(do_stack_segment)
kaf24@1710 572 jmp error_code
kaf24@1710 573
kaf24@1710 574 ENTRY(general_protection)
kaf24@1710 575 pushl $ SYMBOL_NAME(do_general_protection)
kaf24@1710 576 jmp error_code
kaf24@1710 577
kaf24@1710 578 ENTRY(alignment_check)
kaf24@1710 579 pushl $ SYMBOL_NAME(do_alignment_check)
kaf24@1710 580 jmp error_code
kaf24@1710 581
kaf24@1710 582 ENTRY(page_fault)
kaf24@1710 583 pushl $ SYMBOL_NAME(do_page_fault)
kaf24@1710 584 jmp error_code
kaf24@1710 585
kaf24@1710 586 ENTRY(machine_check)
kaf24@1710 587 pushl $0
kaf24@1710 588 pushl $ SYMBOL_NAME(do_machine_check)
kaf24@1710 589 jmp error_code
kaf24@1710 590
kaf24@1710 591 ENTRY(spurious_interrupt_bug)
kaf24@1710 592 pushl $0
kaf24@1710 593 pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
kaf24@1710 594 jmp error_code
kaf24@1710 595
kaf24@1710 596 ENTRY(nmi)
kaf24@1710 597 # Save state but do not trash the segment registers!
kaf24@1710 598 # We may otherwise be unable to reload them or copy them to ring 1.
kaf24@1710 599 pushl %eax
kaf24@1710 600 SAVE_ALL_NOSEGREGS
kaf24@1710 601
kaf24@2085 602 # Check for hardware problems.
kaf24@1710 603 inb $0x61,%al
kaf24@1710 604 testb $0x80,%al
kaf24@2080 605 jne nmi_parity_err
kaf24@1710 606 testb $0x40,%al
kaf24@1710 607 jne nmi_io_err
kaf24@1710 608 movl %eax,%ebx
kaf24@1710 609
kaf24@1710 610 # Okay, its almost a normal NMI tick. We can only process it if:
kaf24@1710 611 # A. We are the outermost Xen activation (in which case we have
kaf24@1710 612 # the selectors safely saved on our stack)
kaf24@1710 613 # B. DS-GS all contain sane Xen values.
kaf24@1710 614 # In all other cases we bail without touching DS-GS, as we have
kaf24@1710 615 # interrupted an enclosing Xen activation in tricky prologue or
kaf24@1710 616 # epilogue code.
kaf24@1710 617 movb CS(%esp),%al
kaf24@1710 618 testb $3,%al
kaf24@1710 619 jne do_watchdog_tick
kaf24@1710 620 movl DS(%esp),%eax
kaf24@1710 621 cmpw $(__HYPERVISOR_DS),%ax
kaf24@1710 622 jne nmi_badseg
kaf24@1710 623 movl ES(%esp),%eax
kaf24@1710 624 cmpw $(__HYPERVISOR_DS),%ax
kaf24@1710 625 jne nmi_badseg
kaf24@1710 626 movl FS(%esp),%eax
kaf24@1710 627 cmpw $(__HYPERVISOR_DS),%ax
kaf24@1710 628 jne nmi_badseg
kaf24@1710 629 movl GS(%esp),%eax
kaf24@1710 630 cmpw $(__HYPERVISOR_DS),%ax
kaf24@1710 631 jne nmi_badseg
kaf24@1710 632
kaf24@1710 633 do_watchdog_tick:
kaf24@1710 634 movl $(__HYPERVISOR_DS),%edx
kaf24@1710 635 movl %edx,%ds
kaf24@1710 636 movl %edx,%es
kaf24@1710 637 movl %esp,%edx
kaf24@1710 638 pushl %ebx # reason
kaf24@1710 639 pushl %edx # regs
kaf24@1710 640 call SYMBOL_NAME(do_nmi)
kaf24@1710 641 addl $8,%esp
kaf24@1710 642 movb CS(%esp),%al
kaf24@1710 643 testb $3,%al
kaf24@1710 644 je restore_all_xen
kaf24@1710 645 GET_CURRENT(%ebx)
kaf24@1710 646 jmp restore_all_guest
kaf24@1710 647
kaf24@1710 648 nmi_badseg:
kaf24@1710 649 popl %ebx
kaf24@1710 650 popl %ecx
kaf24@1710 651 popl %edx
kaf24@1710 652 popl %esi
kaf24@1710 653 popl %edi
kaf24@1710 654 popl %ebp
kaf24@1710 655 popl %eax
kaf24@1710 656 addl $20,%esp
kaf24@1710 657 iret
kaf24@1710 658
kaf24@2085 659 nmi_parity_err:
kaf24@2085 660 # Clear and disable the parity-error line
kaf24@2085 661 andb $0xf,%al
kaf24@2085 662 orb $0x4,%al
kaf24@2085 663 outb %al,$0x61
kaf24@2085 664 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
kaf24@2085 665 je nmi_badseg
kaf24@2085 666 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
kaf24@2085 667 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
kaf24@2085 668 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
kaf24@2085 669 je nmi_badseg
kaf24@2085 670 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
kaf24@1710 671 movl %edx,%ds
kaf24@1710 672 movl %edx,%es
kaf24@2079 673 movl %esp,%edx
kaf24@2079 674 push %edx
kaf24@2079 675 call SYMBOL_NAME(mem_parity_error)
kaf24@2085 676 addl $4,%esp
kaf24@2085 677 jmp ret_from_intr
kaf24@2085 678
kaf24@1710 679 nmi_io_err:
kaf24@2085 680 # Clear and disable the I/O-error line
kaf24@2085 681 andb $0xf,%al
kaf24@2085 682 orb $0x8,%al
kaf24@2085 683 outb %al,$0x61
kaf24@2085 684 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
kaf24@2085 685 je nmi_badseg
kaf24@2085 686 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
kaf24@2085 687 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
kaf24@2085 688 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
kaf24@2085 689 je nmi_badseg
kaf24@2085 690 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
kaf24@1710 691 movl %edx,%ds
kaf24@1710 692 movl %edx,%es
kaf24@2079 693 movl %esp,%edx
kaf24@2079 694 push %edx
kaf24@2079 695 call SYMBOL_NAME(io_check_error)
kaf24@2085 696 addl $4,%esp
kaf24@2085 697 jmp ret_from_intr
kaf24@2079 698
kaf24@1710 699 .data
kaf24@1710 700 ENTRY(hypercall_table)
kaf24@1710 701 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
kaf24@1710 702 .long SYMBOL_NAME(do_mmu_update)
kaf24@1710 703 .long SYMBOL_NAME(do_set_gdt)
kaf24@1710 704 .long SYMBOL_NAME(do_stack_switch)
kaf24@1710 705 .long SYMBOL_NAME(do_set_callbacks)
kaf24@1710 706 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
kaf24@1710 707 .long SYMBOL_NAME(do_sched_op)
kaf24@1710 708 .long SYMBOL_NAME(do_dom0_op)
kaf24@1710 709 .long SYMBOL_NAME(do_set_debugreg)
kaf24@1710 710 .long SYMBOL_NAME(do_get_debugreg)
kaf24@1710 711 .long SYMBOL_NAME(do_update_descriptor) /* 10 */
kaf24@1710 712 .long SYMBOL_NAME(do_set_fast_trap)
kaf24@1710 713 .long SYMBOL_NAME(do_dom_mem_op)
kaf24@1710 714 .long SYMBOL_NAME(do_multicall)
kaf24@1710 715 .long SYMBOL_NAME(do_update_va_mapping)
kaf24@1710 716 .long SYMBOL_NAME(do_set_timer_op) /* 15 */
kaf24@1710 717 .long SYMBOL_NAME(do_event_channel_op)
kaf24@1710 718 .long SYMBOL_NAME(do_xen_version)
kaf24@1710 719 .long SYMBOL_NAME(do_console_io)
kaf24@1710 720 .long SYMBOL_NAME(do_physdev_op)
kaf24@1710 721 .long SYMBOL_NAME(do_update_va_mapping_otherdomain) /* 20 */
kaf24@2111 722 .long SYMBOL_NAME(do_vm_assist)
kaf24@1710 723 .rept NR_hypercalls-((.-hypercall_table)/4)
kaf24@1710 724 .long SYMBOL_NAME(do_ni_hypercall)
kaf24@1710 725 .endr