debuggers.hg

annotate xen/arch/x86/x86_32/entry.S @ 2954:5974d9d97d89

bitkeeper revision 1.1159.170.1 (418fbcfftbJRf270n_KReJDuXIouGg)

Rejig the layout of saved activations on Xen's stack. Touches a bunch
of stuff but things are generally a bit cleaner now. Should be easier
to integrate vm86 support, and I'm now ready to do bigger changes to
the Xen->guest exit code.
author kaf24@freefall.cl.cam.ac.uk
date Mon Nov 08 18:37:51 2004 +0000 (2004-11-08)
parents 61a55dee09d8
children 4a610e420c0d
rev   line source
kaf24@1710 1 /*
kaf24@1710 2 * Hypercall and fault low-level handling routines.
kaf24@1710 3 *
kaf24@1710 4 * Copyright (c) 2002-2004, K A Fraser
kaf24@1710 5 * Copyright (c) 1991, 1992 Linus Torvalds
kaf24@1710 6 */
kaf24@1710 7
kaf24@1710 8 /*
kaf24@1710 9 * The idea for callbacks to guest OSes
kaf24@1710 10 * ====================================
kaf24@1710 11 *
kaf24@1710 12 * First, we require that all callbacks (either via a supplied
kaf24@1710 13 * interrupt-descriptor-table, or via the special event or failsafe callbacks
kaf24@1710 14 * in the shared-info-structure) are to ring 1. This just makes life easier,
kaf24@1710 15 * in that it means we don't have to do messy GDT/LDT lookups to find
kaf24@1710 16 * out which the privilege-level of the return code-selector. That code
kaf24@1710 17 * would just be a hassle to write, and would need to account for running
kaf24@1710 18 * off the end of the GDT/LDT, for example. For all callbacks we check
kaf24@1710 19 * that the provided
kaf24@1710 20 * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as
kaf24@1710 21 * don't allow a guest OS to install ring-0 privileges into the GDT/LDT.
kaf24@1710 22 * It's up to the guest OS to ensure all returns via the IDT are to ring 1.
kaf24@1710 23 * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather
kaf24@1710 24 * than the correct ring) and bad things are bound to ensue -- IRET is
kaf24@1710 25 * likely to fault, and we may end up killing the domain (no harm can
kaf24@1710 26 * come to Xen, though).
kaf24@1710 27 *
kaf24@1710 28 * When doing a callback, we check if the return CS is in ring 0. If so,
kaf24@1710 29 * callback is delayed until next return to ring != 0.
kaf24@1710 30 * If return CS is in ring 1, then we create a callback frame
kaf24@1710 31 * starting at return SS/ESP. The base of the frame does an intra-privilege
kaf24@1710 32 * interrupt-return.
kaf24@1710 33 * If return CS is in ring > 1, we create a callback frame starting
kaf24@1710 34 * at SS/ESP taken from appropriate section of the current TSS. The base
kaf24@1710 35 * of the frame does an inter-privilege interrupt-return.
kaf24@1710 36 *
kaf24@1710 37 * Note that the "failsafe callback" uses a special stackframe:
kaf24@1710 38 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
kaf24@1710 39 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
kaf24@1710 40 * That is, original values for DS/ES/FS/GS are placed on stack rather than
kaf24@1710 41 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
kaf24@1710 42 * saved/restored in guest OS. Furthermore, if we load them we may cause
kaf24@1710 43 * a fault if they are invalid, which is a hassle to deal with. We avoid
kaf24@1710 44 * that problem if we don't load them :-) This property allows us to use
kaf24@1710 45 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
kaf24@1710 46 * on return to ring != 0, we can simply package it up as a return via
kaf24@1710 47 * the failsafe callback, and let the guest OS sort it out (perhaps by
kaf24@1710 48 * killing an application process). Note that we also do this for any
kaf24@1710 49 * faulting IRET -- just let the guest OS handle it via the event
kaf24@1710 50 * callback.
kaf24@1710 51 *
kaf24@1710 52 * We terminate a domain in the following cases:
kaf24@1710 53 * - creating a callback stack frame (due to bad ring-1 stack).
kaf24@1710 54 * - faulting IRET on entry to failsafe callback handler.
kaf24@1710 55 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
kaf24@1710 56 * handler in good order (absolutely no faults allowed!).
kaf24@1710 57 */
kaf24@1710 58
kaf24@1710 59 #include <xen/config.h>
kaf24@1710 60 #include <xen/errno.h>
kaf24@2085 61 #include <xen/softirq.h>
kaf24@2954 62 #include <asm/x86_32/asm_defns.h>
kaf24@2827 63 #include <public/xen.h>
kaf24@1710 64
kaf24@1710 65 #define GET_CURRENT(reg) \
kaf24@1710 66 movl $4096-4, reg; \
kaf24@1710 67 orl %esp, reg; \
kaf24@1710 68 andl $~3,reg; \
kaf24@1710 69 movl (reg),reg;
kaf24@1710 70
kaf24@1710 71 ENTRY(continue_nonidle_task)
kaf24@1710 72 GET_CURRENT(%ebx)
kaf24@1710 73 jmp test_all_events
kaf24@1710 74
kaf24@1710 75 ALIGN
kaf24@1710 76 /*
kaf24@1710 77 * HYPERVISOR_multicall(call_list, nr_calls)
kaf24@1710 78 * Execute a list of 'nr_calls' hypercalls, pointed at by 'call_list'.
kaf24@1710 79 * This is fairly easy except that:
kaf24@1710 80 * 1. We may fault reading the call list, and must patch that up; and
kaf24@1710 81 * 2. We cannot recursively call HYPERVISOR_multicall, or a malicious
kaf24@1710 82 * caller could cause our stack to blow up.
kaf24@1710 83 */
kaf24@2446 84 #define MULTICALL_ENTRY_ORDER 5
kaf24@1710 85 do_multicall:
kaf24@1710 86 popl %eax
kaf24@1710 87 cmpl $SYMBOL_NAME(multicall_return_from_call),%eax
kaf24@1710 88 je multicall_return_from_call
kaf24@1710 89 pushl %ebx
kaf24@1710 90 movl 4(%esp),%ebx /* EBX == call_list */
kaf24@1710 91 movl 8(%esp),%ecx /* ECX == nr_calls */
kaf24@2446 92 /* Ensure the entire multicall list is below HYPERVISOR_VIRT_START. */
kaf24@2446 93 movl %ecx,%eax
kaf24@2446 94 shll $MULTICALL_ENTRY_ORDER,%eax
kaf24@2446 95 addl %ebx,%eax /* EAX == end of multicall list */
kaf24@2446 96 jc bad_multicall_address
kaf24@2446 97 cmpl $__HYPERVISOR_VIRT_START,%eax
kaf24@2446 98 jnc bad_multicall_address
kaf24@1710 99 multicall_loop:
kaf24@1710 100 pushl %ecx
kaf24@1710 101 multicall_fault1:
kaf24@1710 102 pushl 20(%ebx) # args[4]
kaf24@1710 103 multicall_fault2:
kaf24@1710 104 pushl 16(%ebx) # args[3]
kaf24@1710 105 multicall_fault3:
kaf24@1710 106 pushl 12(%ebx) # args[2]
kaf24@1710 107 multicall_fault4:
kaf24@1710 108 pushl 8(%ebx) # args[1]
kaf24@1710 109 multicall_fault5:
kaf24@1710 110 pushl 4(%ebx) # args[0]
kaf24@1710 111 multicall_fault6:
kaf24@1710 112 movl (%ebx),%eax # op
kaf24@1710 113 andl $(NR_hypercalls-1),%eax
kaf24@1710 114 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
kaf24@1710 115 multicall_return_from_call:
kaf24@1710 116 multicall_fault7:
kaf24@1710 117 movl %eax,24(%ebx) # args[5] == result
kaf24@1710 118 addl $20,%esp
kaf24@1710 119 popl %ecx
kaf24@2446 120 addl $(1<<MULTICALL_ENTRY_ORDER),%ebx
kaf24@1710 121 loop multicall_loop
kaf24@1710 122 popl %ebx
kaf24@1710 123 xorl %eax,%eax
kaf24@1710 124 jmp ret_from_hypercall
kaf24@1710 125
kaf24@2446 126 bad_multicall_address:
kaf24@2446 127 popl %ebx
kaf24@2446 128 movl $-EFAULT,%eax
kaf24@2446 129 jmp ret_from_hypercall
kaf24@2446 130
kaf24@1710 131 .section __ex_table,"a"
kaf24@1710 132 .align 4
kaf24@1710 133 .long multicall_fault1, multicall_fixup1
kaf24@1710 134 .long multicall_fault2, multicall_fixup2
kaf24@1710 135 .long multicall_fault3, multicall_fixup3
kaf24@1710 136 .long multicall_fault4, multicall_fixup4
kaf24@1710 137 .long multicall_fault5, multicall_fixup5
kaf24@1710 138 .long multicall_fault6, multicall_fixup6
cl349@2570 139 .long multicall_fault7, multicall_fixup6
kaf24@1710 140 .previous
kaf24@1710 141
kaf24@1710 142 .section .fixup,"ax"
kaf24@1710 143 multicall_fixup6:
kaf24@1710 144 addl $4,%esp
kaf24@1710 145 multicall_fixup5:
kaf24@1710 146 addl $4,%esp
kaf24@1710 147 multicall_fixup4:
kaf24@1710 148 addl $4,%esp
kaf24@1710 149 multicall_fixup3:
kaf24@1710 150 addl $4,%esp
kaf24@1710 151 multicall_fixup2:
kaf24@1710 152 addl $4,%esp
kaf24@1710 153 multicall_fixup1:
kaf24@1710 154 addl $4,%esp
kaf24@1710 155 popl %ebx
kaf24@1710 156 movl $-EFAULT,%eax
kaf24@1710 157 jmp ret_from_hypercall
kaf24@1710 158 .previous
kaf24@1710 159
kaf24@1710 160 ALIGN
kaf24@1710 161 restore_all_guest:
kaf24@2954 162 1: movl XREGS_ds(%esp),%ds
kaf24@2954 163 2: movl XREGS_es(%esp),%es
kaf24@2954 164 3: movl XREGS_fs(%esp),%fs
kaf24@2954 165 4: movl XREGS_gs(%esp),%gs
kaf24@1710 166 popl %ebx
kaf24@1710 167 popl %ecx
kaf24@1710 168 popl %edx
kaf24@1710 169 popl %esi
kaf24@1710 170 popl %edi
kaf24@1710 171 popl %ebp
kaf24@1710 172 popl %eax
kaf24@1710 173 addl $4,%esp
kaf24@1710 174 5: iret
kaf24@1710 175 .section .fixup,"ax"
kaf24@2954 176 6: subl $4,%esp
kaf24@2954 177 pushl %eax
kaf24@1710 178 pushl %ebp
kaf24@1710 179 pushl %edi
kaf24@1710 180 pushl %esi
kaf24@1710 181 pushl %edx
kaf24@1710 182 pushl %ecx
kaf24@1710 183 pushl %ebx
kaf24@2954 184 7: SET_XEN_SEGMENTS
kaf24@2954 185 jmp failsafe_callback
kaf24@1710 186 .previous
kaf24@1710 187 .section __ex_table,"a"
kaf24@1710 188 .align 4
kaf24@2954 189 .long 1b,7b
kaf24@1710 190 .long 2b,7b
kaf24@2954 191 .long 3b,7b
kaf24@2954 192 .long 4b,7b
kaf24@2954 193 .long 5b,6b
kaf24@1710 194 .previous
kaf24@1710 195
kaf24@1710 196 /* No special register assumptions */
kaf24@1710 197 failsafe_callback:
kaf24@1710 198 GET_CURRENT(%ebx)
kaf24@2954 199 movl DOMAIN_processor(%ebx),%eax
kaf24@1710 200 shl $4,%eax
kaf24@1710 201 lea guest_trap_bounce(%eax),%edx
kaf24@2954 202 movl DOMAIN_failsafe_addr(%ebx),%eax
kaf24@2954 203 movl %eax,GTB_eip(%edx)
kaf24@2954 204 movl DOMAIN_failsafe_sel(%ebx),%eax
kaf24@2954 205 movw %ax,GTB_cs(%edx)
kaf24@1710 206 call create_bounce_frame
kaf24@1710 207 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
kaf24@2954 208 movl XREGS_ds(%esp),%eax
kaf24@2954 209 FAULT1: movl %eax,%gs:(%esi)
kaf24@2954 210 movl XREGS_es(%esp),%eax
kaf24@2954 211 FAULT2: movl %eax,%gs:4(%esi)
kaf24@2954 212 movl XREGS_fs(%esp),%eax
kaf24@2954 213 FAULT3: movl %eax,%gs:8(%esi)
kaf24@2954 214 movl XREGS_gs(%esp),%eax
kaf24@2954 215 FAULT4: movl %eax,%gs:12(%esi)
kaf24@2954 216 movl %esi,XREGS_esp(%esp)
kaf24@1710 217 popl %ebx
kaf24@1710 218 popl %ecx
kaf24@1710 219 popl %edx
kaf24@1710 220 popl %esi
kaf24@1710 221 popl %edi
kaf24@1710 222 popl %ebp
kaf24@1710 223 popl %eax
kaf24@2954 224 addl $4,%esp
kaf24@1710 225 FAULT5: iret
kaf24@1710 226
kaf24@1710 227 ALIGN
kaf24@1710 228 restore_all_xen:
kaf24@1710 229 popl %ebx
kaf24@1710 230 popl %ecx
kaf24@1710 231 popl %edx
kaf24@1710 232 popl %esi
kaf24@1710 233 popl %edi
kaf24@1710 234 popl %ebp
kaf24@1710 235 popl %eax
kaf24@1710 236 addl $4,%esp
kaf24@1710 237 iret
kaf24@1710 238
kaf24@1710 239 ALIGN
kaf24@1710 240 ENTRY(hypercall)
kaf24@1710 241 pushl %eax # save orig_eax
kaf24@1710 242 SAVE_ALL
kaf24@2954 243 sti
kaf24@2954 244 GET_CURRENT(%ebx)
kaf24@1710 245 andl $(NR_hypercalls-1),%eax
kaf24@1710 246 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
kaf24@1710 247
kaf24@1710 248 ret_from_hypercall:
kaf24@2954 249 movl %eax,XREGS_eax(%esp) # save the return value
kaf24@1710 250
kaf24@1710 251 test_all_events:
kaf24@1710 252 xorl %ecx,%ecx
kaf24@1710 253 notl %ecx
kaf24@1710 254 cli # tests must not race interrupts
kaf24@1710 255 /*test_softirqs:*/
kaf24@2954 256 movl DOMAIN_processor(%ebx),%eax
kaf24@1710 257 shl $6,%eax # sizeof(irq_cpustat) == 64
kaf24@1710 258 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
kaf24@1710 259 jnz process_softirqs
kaf24@1710 260 /*test_guest_events:*/
kaf24@2954 261 movl DOMAIN_shared_info(%ebx),%eax
kaf24@2954 262 testb $0xFF,SHINFO_upcall_mask(%eax)
kaf24@1710 263 jnz restore_all_guest
kaf24@2954 264 testb $0xFF,SHINFO_upcall_pending(%eax)
kaf24@1710 265 jz restore_all_guest
kaf24@2954 266 movb $1,SHINFO_upcall_mask(%eax) # Upcalls are masked during delivery
kaf24@1710 267 /*process_guest_events:*/
kaf24@2954 268 movl DOMAIN_processor(%ebx),%edx
kaf24@2954 269 shl $4,%edx # sizeof(guest_trap_bounce) == 16
kaf24@1710 270 lea guest_trap_bounce(%edx),%edx
kaf24@2954 271 movl DOMAIN_event_addr(%ebx),%eax
kaf24@2954 272 movl %eax,GTB_eip(%edx)
kaf24@2954 273 movl DOMAIN_event_sel(%ebx),%eax
kaf24@2954 274 movw %ax,GTB_cs(%edx)
kaf24@1710 275 call create_bounce_frame
kaf24@1710 276 jmp restore_all_guest
kaf24@1710 277
kaf24@1710 278 ALIGN
kaf24@1710 279 process_softirqs:
kaf24@1710 280 sti
kaf24@1710 281 call SYMBOL_NAME(do_softirq)
kaf24@1710 282 jmp test_all_events
kaf24@1710 283
kaf24@2954 284 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
kaf24@2954 285 /* {EIP, CS, EFLAGS, [ESP, SS]} */
kaf24@2954 286 /* %edx == guest_trap_bounce, %ebx == task_struct */
kaf24@2954 287 /* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
kaf24@1710 288 create_bounce_frame:
kaf24@2954 289 mov XREGS_cs+4(%esp),%cl
kaf24@1710 290 test $2,%cl
kaf24@1710 291 jz 1f /* jump if returning to an existing ring-1 activation */
kaf24@1710 292 /* obtain ss/esp from TSS -- no current ring-1 activations */
kaf24@2954 293 movl DOMAIN_processor(%ebx),%eax
kaf24@1710 294 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
kaf24@1710 295 movl %eax, %ecx
kaf24@1710 296 shll $7, %ecx
kaf24@1710 297 shll $13, %eax
kaf24@1710 298 addl %ecx,%eax
kaf24@1710 299 addl $init_tss + 12,%eax
kaf24@1710 300 movl (%eax),%esi /* tss->esp1 */
kaf24@2954 301 FAULT6: movl 4(%eax),%gs /* tss->ss1 */
kaf24@1710 302 /* base of stack frame must contain ss/esp (inter-priv iret) */
kaf24@1710 303 subl $8,%esi
kaf24@2954 304 movl XREGS_esp+4(%esp),%eax
kaf24@2954 305 FAULT7: movl %eax,%gs:(%esi)
kaf24@2954 306 movl XREGS_ss+4(%esp),%eax
kaf24@2954 307 FAULT8: movl %eax,%gs:4(%esi)
kaf24@1710 308 jmp 2f
kaf24@1710 309 1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
kaf24@2954 310 movl XREGS_esp+4(%esp),%esi
kaf24@2954 311 FAULT9: movl XREGS_ss+4(%esp),%gs
kaf24@1710 312 2: /* Construct a stack frame: EFLAGS, CS/EIP */
kaf24@1710 313 subl $12,%esi
kaf24@2954 314 movl XREGS_eip+4(%esp),%eax
kaf24@2954 315 FAULT10:movl %eax,%gs:(%esi)
kaf24@2954 316 movl XREGS_cs+4(%esp),%eax
kaf24@2954 317 FAULT11:movl %eax,%gs:4(%esi)
kaf24@2954 318 movl XREGS_eflags+4(%esp),%eax
kaf24@2954 319 FAULT12:movl %eax,%gs:8(%esi)
kaf24@1710 320 /* Rewrite our stack frame and return to ring 1. */
kaf24@1710 321 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
kaf24@1710 322 andl $0xfffcbeff,%eax
kaf24@2954 323 movl %eax,XREGS_eflags+4(%esp)
kaf24@2954 324 movl %gs,XREGS_ss+4(%esp)
kaf24@2954 325 movl %esi,XREGS_esp+4(%esp)
kaf24@2954 326 movzwl GTB_cs(%edx),%eax
kaf24@2954 327 movl %eax,XREGS_cs+4(%esp)
kaf24@2954 328 movl GTB_eip(%edx),%eax
kaf24@2954 329 movl %eax,XREGS_eip+4(%esp)
kaf24@1710 330 ret
kaf24@2954 331
kaf24@1710 332 .section __ex_table,"a"
kaf24@1710 333 .align 4
kaf24@1710 334 .long FAULT1, crash_domain_fixup3 # Fault writing to ring-1 stack
kaf24@1710 335 .long FAULT2, crash_domain_fixup3 # Fault writing to ring-1 stack
kaf24@1710 336 .long FAULT3, crash_domain_fixup3 # Fault writing to ring-1 stack
kaf24@1710 337 .long FAULT4, crash_domain_fixup3 # Fault writing to ring-1 stack
kaf24@1710 338 .long FAULT5, crash_domain_fixup1 # Fault executing failsafe iret
kaf24@1710 339 .long FAULT6, crash_domain_fixup2 # Fault loading ring-1 stack selector
kaf24@1710 340 .long FAULT7, crash_domain_fixup2 # Fault writing to ring-1 stack
kaf24@1710 341 .long FAULT8, crash_domain_fixup2 # Fault writing to ring-1 stack
kaf24@1710 342 .long FAULT9, crash_domain_fixup2 # Fault loading ring-1 stack selector
kaf24@1710 343 .long FAULT10,crash_domain_fixup2 # Fault writing to ring-1 stack
kaf24@1710 344 .long FAULT11,crash_domain_fixup2 # Fault writing to ring-1 stack
kaf24@1710 345 .long FAULT12,crash_domain_fixup2 # Fault writing to ring-1 stack
kaf24@1710 346 .long FAULT13,crash_domain_fixup3 # Fault writing to ring-1 stack
kaf24@1710 347 .long FAULT14,crash_domain_fixup3 # Fault writing to ring-1 stack
kaf24@1710 348 .previous
kaf24@1710 349
kaf24@1710 350 # This handler kills domains which experience unrecoverable faults.
kaf24@1710 351 .section .fixup,"ax"
kaf24@1710 352 crash_domain_fixup1:
kaf24@1710 353 subl $4,%esp
kaf24@1710 354 SAVE_ALL
kaf24@2954 355 sti
kaf24@1710 356 jmp domain_crash
kaf24@1710 357 crash_domain_fixup2:
kaf24@1710 358 addl $4,%esp
kaf24@1710 359 crash_domain_fixup3:
kaf24@1710 360 jmp domain_crash
kaf24@1710 361 .previous
kaf24@1710 362
kaf24@1710 363 ALIGN
kaf24@1710 364 process_guest_exception_and_events:
kaf24@2954 365 movl DOMAIN_processor(%ebx),%eax
kaf24@1710 366 shl $4,%eax
kaf24@1710 367 lea guest_trap_bounce(%eax),%edx
kaf24@2954 368 testb $~0,GTB_flags(%edx)
kaf24@1710 369 jz test_all_events
kaf24@1710 370 call create_bounce_frame # just the basic frame
kaf24@2954 371 mov GTB_flags(%edx),%cl
kaf24@1710 372 test $GTBF_TRAP_NOCODE,%cl
kaf24@1710 373 jnz 2f
kaf24@1710 374 subl $4,%esi # push error_code onto guest frame
kaf24@2954 375 movl GTB_error_code(%edx),%eax
kaf24@2954 376 FAULT13:movl %eax,%gs:(%esi)
kaf24@1710 377 test $GTBF_TRAP_CR2,%cl
kaf24@1710 378 jz 1f
kaf24@1710 379 subl $4,%esi # push %cr2 onto guest frame
kaf24@2954 380 movl GTB_cr2(%edx),%eax
kaf24@2954 381 FAULT14:movl %eax,%gs:(%esi)
kaf24@2954 382 1: movl %esi,XREGS_esp(%esp)
kaf24@2954 383 2: movb $0,GTB_flags(%edx)
kaf24@1710 384 jmp test_all_events
kaf24@1710 385
kaf24@1710 386 ALIGN
kaf24@1710 387 ENTRY(ret_from_intr)
kaf24@1710 388 GET_CURRENT(%ebx)
kaf24@2954 389 movb XREGS_cs(%esp),%al
kaf24@1710 390 testb $3,%al # return to non-supervisor?
kaf24@1710 391 jne test_all_events
kaf24@1710 392 jmp restore_all_xen
kaf24@1710 393
kaf24@1710 394 ENTRY(divide_error)
kaf24@1710 395 pushl $0 # no error code
kaf24@1710 396 pushl $ SYMBOL_NAME(do_divide_error)
kaf24@1710 397 ALIGN
kaf24@1710 398 error_code:
kaf24@2954 399 cld
kaf24@2954 400 pushl %ebp
kaf24@1710 401 pushl %edi
kaf24@1710 402 pushl %esi
kaf24@1710 403 pushl %edx
kaf24@1710 404 pushl %ecx
kaf24@1710 405 pushl %ebx
kaf24@2954 406 movb XREGS_cs(%esp),%bl
kaf24@2954 407 testb $3,%bl
kaf24@2954 408 je 1f
kaf24@2954 409 movl %ds,XREGS_ds(%esp)
kaf24@2954 410 movl %es,XREGS_es(%esp)
kaf24@2954 411 movl %fs,XREGS_fs(%esp)
kaf24@2954 412 movl %gs,XREGS_gs(%esp)
kaf24@2954 413 1: SET_XEN_SEGMENTS
kaf24@2954 414 movl XREGS_orig_eax(%esp),%esi # get the error code
kaf24@2954 415 movl XREGS_eax(%esp),%edi # get the function address
kaf24@2954 416 movl %eax,XREGS_eax(%esp)
kaf24@1710 417 movl %esp,%edx
kaf24@1710 418 pushl %esi # push the error code
ach61@2843 419 pushl %edx # push the xen_regs pointer
kaf24@1710 420 GET_CURRENT(%ebx)
kaf24@2954 421 call *%edi
kaf24@1710 422 addl $8,%esp
kaf24@2954 423 movb XREGS_cs(%esp),%al
kaf24@1710 424 testb $3,%al
kaf24@1710 425 je restore_all_xen
kaf24@1710 426 jmp process_guest_exception_and_events
kaf24@1710 427
kaf24@1710 428 ENTRY(coprocessor_error)
kaf24@1710 429 pushl $0
kaf24@1710 430 pushl $ SYMBOL_NAME(do_coprocessor_error)
kaf24@1710 431 jmp error_code
kaf24@1710 432
kaf24@1710 433 ENTRY(simd_coprocessor_error)
kaf24@1710 434 pushl $0
kaf24@1710 435 pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
kaf24@1710 436 jmp error_code
kaf24@1710 437
kaf24@1710 438 ENTRY(device_not_available)
kaf24@1710 439 pushl $0
kaf24@1710 440 pushl $SYMBOL_NAME(math_state_restore)
kaf24@1710 441 jmp error_code
kaf24@1710 442
kaf24@1710 443 ENTRY(debug)
kaf24@1710 444 pushl $0
kaf24@1710 445 pushl $ SYMBOL_NAME(do_debug)
kaf24@1710 446 jmp error_code
kaf24@1710 447
kaf24@1710 448 ENTRY(int3)
kaf24@1710 449 pushl $0
kaf24@1710 450 pushl $ SYMBOL_NAME(do_int3)
kaf24@1710 451 jmp error_code
kaf24@1710 452
kaf24@1710 453 ENTRY(overflow)
kaf24@1710 454 pushl $0
kaf24@1710 455 pushl $ SYMBOL_NAME(do_overflow)
kaf24@1710 456 jmp error_code
kaf24@1710 457
kaf24@1710 458 ENTRY(bounds)
kaf24@1710 459 pushl $0
kaf24@1710 460 pushl $ SYMBOL_NAME(do_bounds)
kaf24@1710 461 jmp error_code
kaf24@1710 462
kaf24@1710 463 ENTRY(invalid_op)
kaf24@1710 464 pushl $0
kaf24@1710 465 pushl $ SYMBOL_NAME(do_invalid_op)
kaf24@1710 466 jmp error_code
kaf24@1710 467
kaf24@1710 468 ENTRY(coprocessor_segment_overrun)
kaf24@1710 469 pushl $0
kaf24@1710 470 pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
kaf24@1710 471 jmp error_code
kaf24@1710 472
kaf24@1710 473 ENTRY(invalid_TSS)
kaf24@1710 474 pushl $ SYMBOL_NAME(do_invalid_TSS)
kaf24@1710 475 jmp error_code
kaf24@1710 476
kaf24@1710 477 ENTRY(segment_not_present)
kaf24@1710 478 pushl $ SYMBOL_NAME(do_segment_not_present)
kaf24@1710 479 jmp error_code
kaf24@1710 480
kaf24@1710 481 ENTRY(stack_segment)
kaf24@1710 482 pushl $ SYMBOL_NAME(do_stack_segment)
kaf24@1710 483 jmp error_code
kaf24@1710 484
kaf24@1710 485 ENTRY(general_protection)
kaf24@1710 486 pushl $ SYMBOL_NAME(do_general_protection)
kaf24@1710 487 jmp error_code
kaf24@1710 488
kaf24@1710 489 ENTRY(alignment_check)
kaf24@1710 490 pushl $ SYMBOL_NAME(do_alignment_check)
kaf24@1710 491 jmp error_code
kaf24@1710 492
kaf24@1710 493 ENTRY(page_fault)
kaf24@1710 494 pushl $ SYMBOL_NAME(do_page_fault)
kaf24@1710 495 jmp error_code
kaf24@1710 496
kaf24@1710 497 ENTRY(machine_check)
kaf24@1710 498 pushl $0
kaf24@1710 499 pushl $ SYMBOL_NAME(do_machine_check)
kaf24@1710 500 jmp error_code
kaf24@1710 501
kaf24@1710 502 ENTRY(spurious_interrupt_bug)
kaf24@1710 503 pushl $0
kaf24@1710 504 pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
kaf24@1710 505 jmp error_code
kaf24@1710 506
kaf24@1710 507 ENTRY(nmi)
kaf24@1710 508 # Save state but do not trash the segment registers!
kaf24@1710 509 # We may otherwise be unable to reload them or copy them to ring 1.
kaf24@1710 510 pushl %eax
kaf24@1710 511 SAVE_ALL_NOSEGREGS
kaf24@1710 512
kaf24@2085 513 # Check for hardware problems.
kaf24@1710 514 inb $0x61,%al
kaf24@1710 515 testb $0x80,%al
kaf24@2080 516 jne nmi_parity_err
kaf24@1710 517 testb $0x40,%al
kaf24@1710 518 jne nmi_io_err
kaf24@1710 519 movl %eax,%ebx
kaf24@1710 520
kaf24@1710 521 # Okay, its almost a normal NMI tick. We can only process it if:
kaf24@1710 522 # A. We are the outermost Xen activation (in which case we have
kaf24@1710 523 # the selectors safely saved on our stack)
kaf24@1710 524 # B. DS-GS all contain sane Xen values.
kaf24@1710 525 # In all other cases we bail without touching DS-GS, as we have
kaf24@1710 526 # interrupted an enclosing Xen activation in tricky prologue or
kaf24@1710 527 # epilogue code.
kaf24@2954 528 movb XREGS_cs(%esp),%al
kaf24@1710 529 testb $3,%al
kaf24@1710 530 jne do_watchdog_tick
kaf24@2954 531 movl XREGS_ds(%esp),%eax
kaf24@1710 532 cmpw $(__HYPERVISOR_DS),%ax
kaf24@2954 533 jne restore_all_xen
kaf24@2954 534 movl XREGS_es(%esp),%eax
kaf24@1710 535 cmpw $(__HYPERVISOR_DS),%ax
kaf24@2954 536 jne restore_all_xen
kaf24@2954 537 movl XREGS_fs(%esp),%eax
kaf24@1710 538 cmpw $(__HYPERVISOR_DS),%ax
kaf24@2954 539 jne restore_all_xen
kaf24@2954 540 movl XREGS_gs(%esp),%eax
kaf24@1710 541 cmpw $(__HYPERVISOR_DS),%ax
kaf24@2954 542 jne restore_all_xen
kaf24@1710 543
kaf24@1710 544 do_watchdog_tick:
kaf24@1710 545 movl $(__HYPERVISOR_DS),%edx
kaf24@1710 546 movl %edx,%ds
kaf24@1710 547 movl %edx,%es
kaf24@1710 548 movl %esp,%edx
kaf24@1710 549 pushl %ebx # reason
kaf24@1710 550 pushl %edx # regs
kaf24@1710 551 call SYMBOL_NAME(do_nmi)
kaf24@1710 552 addl $8,%esp
kaf24@2954 553 movb XREGS_cs(%esp),%al
kaf24@1710 554 testb $3,%al
kaf24@1710 555 je restore_all_xen
kaf24@1710 556 GET_CURRENT(%ebx)
kaf24@1710 557 jmp restore_all_guest
kaf24@1710 558
kaf24@2085 559 nmi_parity_err:
kaf24@2085 560 # Clear and disable the parity-error line
kaf24@2085 561 andb $0xf,%al
kaf24@2085 562 orb $0x4,%al
kaf24@2085 563 outb %al,$0x61
kaf24@2085 564 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
kaf24@2954 565 je restore_all_xen
kaf24@2085 566 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
kaf24@2085 567 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
kaf24@2085 568 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
kaf24@2954 569 je restore_all_xen
kaf24@2085 570 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
kaf24@1710 571 movl %edx,%ds
kaf24@1710 572 movl %edx,%es
kaf24@2079 573 movl %esp,%edx
kaf24@2079 574 push %edx
kaf24@2079 575 call SYMBOL_NAME(mem_parity_error)
kaf24@2085 576 addl $4,%esp
kaf24@2085 577 jmp ret_from_intr
kaf24@2085 578
kaf24@1710 579 nmi_io_err:
kaf24@2085 580 # Clear and disable the I/O-error line
kaf24@2085 581 andb $0xf,%al
kaf24@2085 582 orb $0x8,%al
kaf24@2085 583 outb %al,$0x61
kaf24@2085 584 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
kaf24@2954 585 je restore_all_xen
kaf24@2085 586 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
kaf24@2085 587 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
kaf24@2085 588 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
kaf24@2954 589 je restore_all_xen
kaf24@2085 590 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
kaf24@1710 591 movl %edx,%ds
kaf24@1710 592 movl %edx,%es
kaf24@2079 593 movl %esp,%edx
kaf24@2079 594 push %edx
kaf24@2079 595 call SYMBOL_NAME(io_check_error)
kaf24@2085 596 addl $4,%esp
kaf24@2085 597 jmp ret_from_intr
kaf24@2079 598
kaf24@1710 599 .data
kaf24@1710 600 ENTRY(hypercall_table)
kaf24@1710 601 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
kaf24@1710 602 .long SYMBOL_NAME(do_mmu_update)
kaf24@1710 603 .long SYMBOL_NAME(do_set_gdt)
kaf24@1710 604 .long SYMBOL_NAME(do_stack_switch)
kaf24@1710 605 .long SYMBOL_NAME(do_set_callbacks)
kaf24@1710 606 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
kaf24@1710 607 .long SYMBOL_NAME(do_sched_op)
kaf24@1710 608 .long SYMBOL_NAME(do_dom0_op)
kaf24@1710 609 .long SYMBOL_NAME(do_set_debugreg)
kaf24@1710 610 .long SYMBOL_NAME(do_get_debugreg)
kaf24@1710 611 .long SYMBOL_NAME(do_update_descriptor) /* 10 */
kaf24@1710 612 .long SYMBOL_NAME(do_set_fast_trap)
kaf24@1710 613 .long SYMBOL_NAME(do_dom_mem_op)
kaf24@1710 614 .long SYMBOL_NAME(do_multicall)
kaf24@1710 615 .long SYMBOL_NAME(do_update_va_mapping)
kaf24@1710 616 .long SYMBOL_NAME(do_set_timer_op) /* 15 */
kaf24@1710 617 .long SYMBOL_NAME(do_event_channel_op)
kaf24@1710 618 .long SYMBOL_NAME(do_xen_version)
kaf24@1710 619 .long SYMBOL_NAME(do_console_io)
kaf24@1710 620 .long SYMBOL_NAME(do_physdev_op)
kaf24@2375 621 .long SYMBOL_NAME(do_grant_table_op) /* 20 */
kaf24@2111 622 .long SYMBOL_NAME(do_vm_assist)
kaf24@2375 623 .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
kaf24@1710 624 .rept NR_hypercalls-((.-hypercall_table)/4)
kaf24@1710 625 .long SYMBOL_NAME(do_ni_hypercall)
kaf24@1710 626 .endr