debuggers.hg

annotate xen/arch/x86/x86_32/entry.S @ 3207:861d3cdc1dc5

bitkeeper revision 1.1159.187.22 (41a89729VHxMK8Tp4qcLrOPCoZrdog)

First cut for VM86 support, based on Stephan Diestelhorst's patches. It
doesn't actually work yet -- it's possible that e.g., signal delivery is
broken.
author kaf24@scramble.cl.cam.ac.uk
date Sat Nov 27 15:03:05 2004 +0000 (2004-11-27)
parents a46548db5e52
children 08ca2c180189 4580e96f30e1
rev   line source
kaf24@1710 1 /*
kaf24@1710 2 * Hypercall and fault low-level handling routines.
kaf24@1710 3 *
kaf24@1710 4 * Copyright (c) 2002-2004, K A Fraser
kaf24@1710 5 * Copyright (c) 1991, 1992 Linus Torvalds
kaf24@3127 6 *
kaf24@3127 7 * Calling back to a guest OS:
kaf24@3127 8 * ===========================
kaf24@3127 9 *
kaf24@1710 10 * First, we require that all callbacks (either via a supplied
kaf24@1710 11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
kaf24@1710 12 * in the shared-info-structure) are to ring 1. This just makes life easier,
kaf24@1710 13 * in that it means we don't have to do messy GDT/LDT lookups to find
kaf24@1710 14 * out which the privilege-level of the return code-selector. That code
kaf24@1710 15 * would just be a hassle to write, and would need to account for running
kaf24@1710 16 * off the end of the GDT/LDT, for example. For all callbacks we check
kaf24@3127 17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
kaf24@3127 18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
kaf24@3127 19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
kaf24@3127 20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
kaf24@3127 21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
kaf24@1710 22 * likely to fault, and we may end up killing the domain (no harm can
kaf24@1710 23 * come to Xen, though).
kaf24@1710 24 *
kaf24@1710 25 * When doing a callback, we check if the return CS is in ring 0. If so,
kaf24@1710 26 * callback is delayed until next return to ring != 0.
kaf24@1710 27 * If return CS is in ring 1, then we create a callback frame
kaf24@1710 28 * starting at return SS/ESP. The base of the frame does an intra-privilege
kaf24@1710 29 * interrupt-return.
kaf24@1710 30 * If return CS is in ring > 1, we create a callback frame starting
kaf24@1710 31 * at SS/ESP taken from appropriate section of the current TSS. The base
kaf24@1710 32 * of the frame does an inter-privilege interrupt-return.
kaf24@1710 33 *
kaf24@1710 34 * Note that the "failsafe callback" uses a special stackframe:
kaf24@1710 35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
kaf24@1710 36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
kaf24@1710 37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
kaf24@1710 38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
kaf24@1710 39 * saved/restored in guest OS. Furthermore, if we load them we may cause
kaf24@1710 40 * a fault if they are invalid, which is a hassle to deal with. We avoid
kaf24@1710 41 * that problem if we don't load them :-) This property allows us to use
kaf24@1710 42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
kaf24@1710 43 * on return to ring != 0, we can simply package it up as a return via
kaf24@1710 44 * the failsafe callback, and let the guest OS sort it out (perhaps by
kaf24@1710 45 * killing an application process). Note that we also do this for any
kaf24@1710 46 * faulting IRET -- just let the guest OS handle it via the event
kaf24@1710 47 * callback.
kaf24@1710 48 *
kaf24@1710 49 * We terminate a domain in the following cases:
kaf24@1710 50 * - creating a callback stack frame (due to bad ring-1 stack).
kaf24@1710 51 * - faulting IRET on entry to failsafe callback handler.
kaf24@1710 52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
kaf24@1710 53 * handler in good order (absolutely no faults allowed!).
kaf24@1710 54 */
kaf24@1710 55
kaf24@1710 56 #include <xen/config.h>
kaf24@1710 57 #include <xen/errno.h>
kaf24@2085 58 #include <xen/softirq.h>
kaf24@2954 59 #include <asm/x86_32/asm_defns.h>
kaf24@2827 60 #include <public/xen.h>
kaf24@1710 61
kaf24@1710 62 #define GET_CURRENT(reg) \
kaf24@3010 63 movl $8192-4, reg; \
kaf24@1710 64 orl %esp, reg; \
kaf24@1710 65 andl $~3,reg; \
kaf24@1710 66 movl (reg),reg;
kaf24@1710 67
kaf24@1710 68 ENTRY(continue_nonidle_task)
kaf24@1710 69 GET_CURRENT(%ebx)
kaf24@1710 70 jmp test_all_events
kaf24@1710 71
kaf24@1710 72 ALIGN
kaf24@1710 73 restore_all_guest:
kaf24@3127 74 testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
kaf24@3127 75 jnz failsafe_callback
kaf24@3207 76 testl $X86_EFLAGS_VM,XREGS_eflags(%esp)
kaf24@3207 77 jnz restore_all_vm86
kaf24@3127 78 FLT1: movl XREGS_ds(%esp),%ds
kaf24@3127 79 FLT2: movl XREGS_es(%esp),%es
kaf24@3127 80 FLT3: movl XREGS_fs(%esp),%fs
kaf24@3127 81 FLT4: movl XREGS_gs(%esp),%gs
kaf24@3207 82 restore_all_vm86:
kaf24@1710 83 popl %ebx
kaf24@1710 84 popl %ecx
kaf24@1710 85 popl %edx
kaf24@1710 86 popl %esi
kaf24@1710 87 popl %edi
kaf24@1710 88 popl %ebp
kaf24@1710 89 popl %eax
kaf24@2954 90 addl $4,%esp
kaf24@3127 91 FLT5: iret
kaf24@3127 92 .section .fixup,"ax"
kaf24@3127 93 FIX5: subl $28,%esp
kaf24@3127 94 pushl 28(%esp) # error_code/entry_vector
kaf24@3127 95 movl %eax,XREGS_eax+4(%esp)
kaf24@3127 96 movl %ebp,XREGS_ebp+4(%esp)
kaf24@3127 97 movl %edi,XREGS_edi+4(%esp)
kaf24@3127 98 movl %esi,XREGS_esi+4(%esp)
kaf24@3127 99 movl %edx,XREGS_edx+4(%esp)
kaf24@3127 100 movl %ecx,XREGS_ecx+4(%esp)
kaf24@3127 101 movl %ebx,XREGS_ebx+4(%esp)
kaf24@3127 102 FIX1: SET_XEN_SEGMENTS(a)
kaf24@3127 103 movl %eax,%fs
kaf24@3127 104 movl %eax,%gs
kaf24@3127 105 sti
kaf24@3127 106 popl %esi
kaf24@3127 107 pushfl # EFLAGS
kaf24@3127 108 movl $__HYPERVISOR_CS,%eax
kaf24@3127 109 pushl %eax # CS
kaf24@3127 110 movl $DBLFLT1,%eax
kaf24@3127 111 pushl %eax # EIP
kaf24@3127 112 pushl %esi # error_code/entry_vector
kaf24@3127 113 jmp error_code
kaf24@3127 114 DBLFLT1:GET_CURRENT(%ebx)
kaf24@3127 115 jmp test_all_events
kaf24@3127 116 DBLFIX1:GET_CURRENT(%ebx)
kaf24@3127 117 testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
kaf24@3127 118 jnz domain_crash # cannot reenter failsafe code
kaf24@3127 119 orb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
kaf24@3127 120 jmp test_all_events # will return via failsafe code
kaf24@3127 121 .previous
kaf24@3127 122 .section __pre_ex_table,"a"
kaf24@3127 123 .long FLT1,FIX1
kaf24@3127 124 .long FLT2,FIX1
kaf24@3127 125 .long FLT3,FIX1
kaf24@3127 126 .long FLT4,FIX1
kaf24@3127 127 .long FLT5,FIX5
kaf24@3127 128 .previous
kaf24@3127 129 .section __ex_table,"a"
kaf24@3127 130 .long DBLFLT1,DBLFIX1
kaf24@3127 131 .previous
kaf24@3127 132
kaf24@3127 133 /* No special register assumptions */
kaf24@3127 134 failsafe_callback:
kaf24@3127 135 GET_CURRENT(%ebx)
kaf24@3127 136 andb $~TF_failsafe_return,DOMAIN_thread_flags(%ebx)
kaf24@3127 137 leal DOMAIN_trap_bounce(%ebx),%edx
kaf24@3127 138 movl DOMAIN_failsafe_addr(%ebx),%eax
kaf24@3127 139 movl %eax,TRAPBOUNCE_eip(%edx)
kaf24@3127 140 movl DOMAIN_failsafe_sel(%ebx),%eax
kaf24@3127 141 movw %ax,TRAPBOUNCE_cs(%edx)
kaf24@3127 142 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
kaf24@3127 143 call create_bounce_frame
kaf24@3127 144 popl %ebx
kaf24@3127 145 popl %ecx
kaf24@3127 146 popl %edx
kaf24@3127 147 popl %esi
kaf24@3127 148 popl %edi
kaf24@3127 149 popl %ebp
kaf24@3127 150 popl %eax
kaf24@3127 151 addl $4,%esp
kaf24@3127 152 FLT6: iret
kaf24@3127 153 .section .fixup,"ax"
kaf24@3127 154 FIX6: pushl %ebx
kaf24@3127 155 GET_CURRENT(%ebx)
kaf24@3127 156 orb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
kaf24@3127 157 pop %ebx
kaf24@3127 158 jmp FIX5
kaf24@3127 159 .section __pre_ex_table,"a"
kaf24@3127 160 .long FLT6,FIX6
kaf24@3127 161 .previous
kaf24@1710 162
kaf24@1710 163 ALIGN
kaf24@1710 164 restore_all_xen:
kaf24@1710 165 popl %ebx
kaf24@1710 166 popl %ecx
kaf24@1710 167 popl %edx
kaf24@1710 168 popl %esi
kaf24@1710 169 popl %edi
kaf24@1710 170 popl %ebp
kaf24@1710 171 popl %eax
kaf24@1710 172 addl $4,%esp
kaf24@1710 173 iret
kaf24@1710 174
kaf24@1710 175 ALIGN
kaf24@1710 176 ENTRY(hypercall)
kaf24@3127 177 subl $4,%esp
kaf24@2955 178 SAVE_ALL(b)
kaf24@2954 179 sti
kaf24@2954 180 GET_CURRENT(%ebx)
kaf24@1710 181 andl $(NR_hypercalls-1),%eax
kaf24@1710 182 call *SYMBOL_NAME(hypercall_table)(,%eax,4)
kaf24@1710 183
kaf24@1710 184 ret_from_hypercall:
kaf24@3127 185 movl %eax,XREGS_eax(%esp) # save the return value
kaf24@1710 186
kaf24@1710 187 test_all_events:
kaf24@1710 188 xorl %ecx,%ecx
kaf24@1710 189 notl %ecx
kaf24@1710 190 cli # tests must not race interrupts
kaf24@1710 191 /*test_softirqs:*/
kaf24@2954 192 movl DOMAIN_processor(%ebx),%eax
kaf24@1710 193 shl $6,%eax # sizeof(irq_cpustat) == 64
kaf24@1710 194 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
kaf24@1710 195 jnz process_softirqs
kaf24@1710 196 /*test_guest_events:*/
kaf24@2954 197 movl DOMAIN_shared_info(%ebx),%eax
kaf24@2954 198 testb $0xFF,SHINFO_upcall_mask(%eax)
kaf24@1710 199 jnz restore_all_guest
kaf24@2954 200 testb $0xFF,SHINFO_upcall_pending(%eax)
kaf24@1710 201 jz restore_all_guest
kaf24@1710 202 /*process_guest_events:*/
kaf24@3081 203 leal DOMAIN_trap_bounce(%ebx),%edx
kaf24@2954 204 movl DOMAIN_event_addr(%ebx),%eax
kaf24@3081 205 movl %eax,TRAPBOUNCE_eip(%edx)
kaf24@2954 206 movl DOMAIN_event_sel(%ebx),%eax
kaf24@3081 207 movw %ax,TRAPBOUNCE_cs(%edx)
kaf24@3127 208 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
kaf24@1710 209 call create_bounce_frame
kaf24@3127 210 movl DOMAIN_shared_info(%ebx),%eax
kaf24@3127 211 movb $1,SHINFO_upcall_mask(%eax) # Upcalls are masked during delivery
kaf24@1710 212 jmp restore_all_guest
kaf24@1710 213
kaf24@1710 214 ALIGN
kaf24@1710 215 process_softirqs:
kaf24@1710 216 sti
kaf24@1710 217 call SYMBOL_NAME(do_softirq)
kaf24@1710 218 jmp test_all_events
kaf24@1710 219
kaf24@2954 220 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
kaf24@2954 221 /* {EIP, CS, EFLAGS, [ESP, SS]} */
kaf24@3081 222 /* %edx == trap_bounce, %ebx == task_struct */
kaf24@2954 223 /* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
kaf24@3207 224 create_bounce_frame:
kaf24@3207 225 movl XREGS_eflags+4(%esp),%ecx
kaf24@3127 226 movb XREGS_cs+4(%esp),%cl
kaf24@3207 227 testl $(2|X86_EFLAGS_VM),%ecx
kaf24@3207 228 jz ring1 /* jump if returning to an existing ring-1 activation */
kaf24@1710 229 /* obtain ss/esp from TSS -- no current ring-1 activations */
kaf24@2954 230 movl DOMAIN_processor(%ebx),%eax
kaf24@1710 231 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
kaf24@1710 232 movl %eax, %ecx
kaf24@1710 233 shll $7, %ecx
kaf24@1710 234 shll $13, %eax
kaf24@1710 235 addl %ecx,%eax
kaf24@1710 236 addl $init_tss + 12,%eax
kaf24@1710 237 movl (%eax),%esi /* tss->esp1 */
kaf24@3127 238 FLT7: movl 4(%eax),%gs /* tss->ss1 */
kaf24@3207 239 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
kaf24@3207 240 jz nvm86_1
kaf24@3207 241 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
kaf24@3207 242 movl XREGS_es+4(%esp),%eax
kaf24@3207 243 FLT8: movl %eax,%gs:(%esi)
kaf24@3207 244 movl XREGS_ds+4(%esp),%eax
kaf24@3207 245 FLT9: movl %eax,%gs:4(%esi)
kaf24@3207 246 movl XREGS_fs+4(%esp),%eax
kaf24@3207 247 FLT10: movl %eax,%gs:8(%esi)
kaf24@3207 248 movl XREGS_gs+4(%esp),%eax
kaf24@3207 249 FLT11: movl %eax,%gs:12(%esi)
kaf24@3207 250 nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
kaf24@2954 251 movl XREGS_esp+4(%esp),%eax
kaf24@3207 252 FLT12: movl %eax,%gs:(%esi)
kaf24@2954 253 movl XREGS_ss+4(%esp),%eax
kaf24@3207 254 FLT13: movl %eax,%gs:4(%esi)
kaf24@3207 255 jmp 1f
kaf24@3207 256 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
kaf24@2954 257 movl XREGS_esp+4(%esp),%esi
kaf24@3207 258 FLT14: movl XREGS_ss+4(%esp),%gs
kaf24@3207 259 1: /* Construct a stack frame: EFLAGS, CS/EIP */
kaf24@1710 260 subl $12,%esi
kaf24@2954 261 movl XREGS_eip+4(%esp),%eax
kaf24@3207 262 FLT15: movl %eax,%gs:(%esi)
kaf24@2954 263 movl XREGS_cs+4(%esp),%eax
kaf24@3207 264 FLT16: movl %eax,%gs:4(%esi)
kaf24@2954 265 movl XREGS_eflags+4(%esp),%eax
kaf24@3207 266 FLT17: movl %eax,%gs:8(%esi)
kaf24@3127 267 movb TRAPBOUNCE_flags(%edx),%cl
kaf24@3127 268 test $TBF_EXCEPTION_ERRCODE,%cl
kaf24@3127 269 jz 1f
kaf24@3127 270 subl $4,%esi # push error_code onto guest frame
kaf24@3127 271 movl TRAPBOUNCE_error_code(%edx),%eax
kaf24@3207 272 FLT18: movl %eax,%gs:(%esi)
kaf24@3127 273 testb $TBF_EXCEPTION_CR2,%cl
kaf24@3127 274 jz 2f
kaf24@3127 275 subl $4,%esi # push %cr2 onto guest frame
kaf24@3127 276 movl TRAPBOUNCE_cr2(%edx),%eax
kaf24@3207 277 FLT19: movl %eax,%gs:(%esi)
kaf24@3127 278 1: testb $TBF_FAILSAFE,%cl
kaf24@3127 279 jz 2f
kaf24@3127 280 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
kaf24@3207 281 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
kaf24@3207 282 jz nvm86_2
kaf24@3207 283 xorl %eax,%eax # VM86: we write zero selector values
kaf24@3207 284 FLT20: movl %eax,%gs:(%esi)
kaf24@3207 285 FLT21: movl %eax,%gs:4(%esi)
kaf24@3207 286 FLT22: movl %eax,%gs:8(%esi)
kaf24@3207 287 FLT23: movl %eax,%gs:12(%esi)
kaf24@3207 288 jmp 2f
kaf24@3207 289 nvm86_2:movl XREGS_ds+4(%esp),%eax # non-VM86: write real selector values
kaf24@3207 290 FLT24: movl %eax,%gs:(%esi)
kaf24@3127 291 movl XREGS_es+4(%esp),%eax
kaf24@3207 292 FLT25: movl %eax,%gs:4(%esi)
kaf24@3127 293 movl XREGS_fs+4(%esp),%eax
kaf24@3207 294 FLT26: movl %eax,%gs:8(%esi)
kaf24@3127 295 movl XREGS_gs+4(%esp),%eax
kaf24@3207 296 FLT27: movl %eax,%gs:12(%esi)
kaf24@3127 297 2: movb $0,TRAPBOUNCE_flags(%edx)
kaf24@3207 298 testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
kaf24@3207 299 jz nvm86_3
kaf24@3207 300 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
kaf24@3207 301 movl %eax,XREGS_ds+4(%esp)
kaf24@3207 302 movl %eax,XREGS_es+4(%esp)
kaf24@3207 303 movl %eax,XREGS_fs+4(%esp)
kaf24@3207 304 movl %eax,XREGS_gs+4(%esp)
kaf24@3207 305 nvm86_3:/* Rewrite our stack frame and return to ring 1. */
kaf24@1710 306 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
kaf24@3127 307 andl $0xfffcbeff,XREGS_eflags+4(%esp)
kaf24@2954 308 movl %gs,XREGS_ss+4(%esp)
kaf24@2954 309 movl %esi,XREGS_esp+4(%esp)
kaf24@3081 310 movzwl TRAPBOUNCE_cs(%edx),%eax
kaf24@2954 311 movl %eax,XREGS_cs+4(%esp)
kaf24@3081 312 movl TRAPBOUNCE_eip(%edx),%eax
kaf24@2954 313 movl %eax,XREGS_eip+4(%esp)
kaf24@1710 314 ret
kaf24@3127 315 .section .fixup,"ax"
kaf24@3127 316 FIX7: sti
kaf24@3127 317 popl %esi
kaf24@3127 318 addl $4,%esp # Discard create_b_frame return address
kaf24@3127 319 pushfl # EFLAGS
kaf24@3127 320 movl $__HYPERVISOR_CS,%eax
kaf24@3127 321 pushl %eax # CS
kaf24@3127 322 movl $DBLFLT2,%eax
kaf24@3127 323 pushl %eax # EIP
kaf24@3127 324 pushl %esi # error_code/entry_vector
kaf24@3127 325 jmp error_code
kaf24@3127 326 DBLFLT2:jmp process_guest_exception_and_events
kaf24@3127 327 .previous
kaf24@3127 328 .section __pre_ex_table,"a"
kaf24@3207 329 .long FLT7,FIX7 , FLT8,FIX7 , FLT9,FIX7 , FLT10,FIX7
kaf24@3207 330 .long FLT11,FIX7 , FLT12,FIX7 , FLT13,FIX7 , FLT14,FIX7
kaf24@3207 331 .long FLT15,FIX7 , FLT16,FIX7 , FLT17,FIX7 , FLT18,FIX7
kaf24@3207 332 .long FLT19,FIX7 , FLT20,FIX7 , FLT21,FIX7 , FLT22,FIX7
kaf24@3207 333 .long FLT23,FIX7 , FLT24,FIX7 , FLT25,FIX7 , FLT26,FIX7 , FLT27,FIX7
kaf24@3127 334 .previous
kaf24@1710 335 .section __ex_table,"a"
kaf24@3127 336 .long DBLFLT2,domain_crash
kaf24@1710 337 .previous
kaf24@1710 338
kaf24@1710 339 ALIGN
kaf24@3127 340 process_guest_exception_and_events:
kaf24@3081 341 leal DOMAIN_trap_bounce(%ebx),%edx
kaf24@3127 342 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
kaf24@1710 343 jz test_all_events
kaf24@3127 344 call create_bounce_frame
kaf24@1710 345 jmp test_all_events
kaf24@1710 346
kaf24@1710 347 ALIGN
kaf24@1710 348 ENTRY(ret_from_intr)
kaf24@3207 349 GET_CURRENT(%ebx)
kaf24@3207 350 movl XREGS_eflags(%esp),%eax
kaf24@3207 351 movb XREGS_cs(%esp),%al
kaf24@3207 352 testl $(3|X86_EFLAGS_VM),%eax
kaf24@3207 353 jnz test_all_events
kaf24@3207 354 jmp restore_all_xen
kaf24@1710 355
kaf24@1710 356 ENTRY(divide_error)
kaf24@3127 357 pushl $TRAP_divide_error<<16
kaf24@1710 358 ALIGN
kaf24@1710 359 error_code:
kaf24@3127 360 SAVE_ALL_NOSEGREGS(a)
kaf24@3127 361 SET_XEN_SEGMENTS(a)
kaf24@3127 362 testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp)
kaf24@3127 363 jz exception_with_ints_disabled
kaf24@3198 364 1: sti # re-enable interrupts
kaf24@3127 365 xorl %eax,%eax
kaf24@3127 366 movw XREGS_entry_vector(%esp),%ax
kaf24@3127 367 movl %esp,%edx
ach61@2843 368 pushl %edx # push the xen_regs pointer
kaf24@1710 369 GET_CURRENT(%ebx)
kaf24@3127 370 call *SYMBOL_NAME(exception_table)(,%eax,4)
kaf24@3127 371 addl $4,%esp
kaf24@3207 372 movl XREGS_eflags(%esp),%eax
kaf24@2954 373 movb XREGS_cs(%esp),%al
kaf24@3207 374 testl $(3|X86_EFLAGS_VM),%eax
kaf24@3127 375 jz restore_all_xen
kaf24@1710 376 jmp process_guest_exception_and_events
kaf24@1710 377
kaf24@3127 378 exception_with_ints_disabled:
kaf24@3207 379 movl XREGS_eflags(%esp),%eax
kaf24@3127 380 movb XREGS_cs(%esp),%al
kaf24@3207 381 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
kaf24@3207 382 jnz 1b # it really does happen!
kaf24@3207 383 # (e.g., DOM0 X server)
kaf24@3127 384 pushl XREGS_eip(%esp)
kaf24@3127 385 call search_pre_exception_table
kaf24@3127 386 addl $4,%esp
kaf24@3127 387 testl %eax,%eax # no fixup code for faulting EIP?
kaf24@3127 388 jz FATAL_exception_with_ints_disabled
kaf24@3127 389 movl %eax,XREGS_eip(%esp)
kaf24@3127 390 movl %esp,%esi
kaf24@3127 391 subl $4,%esp
kaf24@3127 392 movl %esp,%edi
kaf24@3127 393 movl $XREGS_kernel_sizeof/4,%ecx
kaf24@3127 394 rep; movsl # make room for error_code/entry_vector
kaf24@3127 395 movl XREGS_error_code(%esp),%eax # error_code/entry_vector
kaf24@3127 396 movl %eax,XREGS_kernel_sizeof(%esp)
kaf24@3127 397 jmp restore_all_xen # return to fixup code
kaf24@3127 398
kaf24@3127 399 FATAL_exception_with_ints_disabled:
kaf24@3127 400 xorl %esi,%esi
kaf24@3127 401 movw XREGS_entry_vector(%esp),%si
kaf24@3127 402 movl %esp,%edx
kaf24@3127 403 pushl %edx # push the xen_regs pointer
kaf24@3127 404 pushl %esi # push the trapnr (entry vector)
kaf24@3127 405 call SYMBOL_NAME(fatal_trap)
kaf24@3127 406 ud2
kaf24@3127 407
kaf24@1710 408 ENTRY(coprocessor_error)
kaf24@3127 409 pushl $TRAP_copro_error<<16
kaf24@1710 410 jmp error_code
kaf24@1710 411
kaf24@1710 412 ENTRY(simd_coprocessor_error)
kaf24@3127 413 pushl $TRAP_simd_error<<16
kaf24@1710 414 jmp error_code
kaf24@1710 415
kaf24@1710 416 ENTRY(device_not_available)
kaf24@3127 417 pushl $TRAP_no_device<<16
kaf24@1710 418 jmp error_code
kaf24@1710 419
kaf24@1710 420 ENTRY(debug)
kaf24@3127 421 pushl $TRAP_debug<<16
kaf24@1710 422 jmp error_code
kaf24@1710 423
kaf24@1710 424 ENTRY(int3)
kaf24@3127 425 pushl $TRAP_int3<<16
kaf24@1710 426 jmp error_code
kaf24@1710 427
kaf24@1710 428 ENTRY(overflow)
kaf24@3127 429 pushl $TRAP_overflow<<16
kaf24@1710 430 jmp error_code
kaf24@1710 431
kaf24@1710 432 ENTRY(bounds)
kaf24@3127 433 pushl $TRAP_bounds<<16
kaf24@1710 434 jmp error_code
kaf24@1710 435
kaf24@1710 436 ENTRY(invalid_op)
kaf24@3127 437 pushl $TRAP_invalid_op<<16
kaf24@1710 438 jmp error_code
kaf24@1710 439
kaf24@1710 440 ENTRY(coprocessor_segment_overrun)
kaf24@3127 441 pushl $TRAP_copro_seg<<16
kaf24@1710 442 jmp error_code
kaf24@1710 443
kaf24@1710 444 ENTRY(invalid_TSS)
kaf24@3127 445 movw $TRAP_invalid_tss,2(%esp)
kaf24@1710 446 jmp error_code
kaf24@1710 447
kaf24@1710 448 ENTRY(segment_not_present)
kaf24@3127 449 movw $TRAP_no_segment,2(%esp)
kaf24@1710 450 jmp error_code
kaf24@1710 451
kaf24@1710 452 ENTRY(stack_segment)
kaf24@3127 453 movw $TRAP_stack_error,2(%esp)
kaf24@1710 454 jmp error_code
kaf24@1710 455
kaf24@1710 456 ENTRY(general_protection)
kaf24@3127 457 movw $TRAP_gp_fault,2(%esp)
kaf24@1710 458 jmp error_code
kaf24@1710 459
kaf24@1710 460 ENTRY(alignment_check)
kaf24@3127 461 movw $TRAP_alignment_check,2(%esp)
kaf24@1710 462 jmp error_code
kaf24@1710 463
kaf24@1710 464 ENTRY(page_fault)
kaf24@3127 465 movw $TRAP_page_fault,2(%esp)
kaf24@1710 466 jmp error_code
kaf24@1710 467
kaf24@1710 468 ENTRY(machine_check)
kaf24@3127 469 pushl $TRAP_machine_check<<16
kaf24@1710 470 jmp error_code
kaf24@1710 471
kaf24@1710 472 ENTRY(spurious_interrupt_bug)
kaf24@3127 473 pushl $TRAP_spurious_int<<16
kaf24@1710 474 jmp error_code
kaf24@1710 475
kaf24@1710 476 ENTRY(nmi)
kaf24@1710 477 # Save state but do not trash the segment registers!
kaf24@1710 478 # We may otherwise be unable to reload them or copy them to ring 1.
kaf24@1710 479 pushl %eax
kaf24@2955 480 SAVE_ALL_NOSEGREGS(a)
kaf24@1710 481
kaf24@2085 482 # Check for hardware problems.
kaf24@1710 483 inb $0x61,%al
kaf24@1710 484 testb $0x80,%al
kaf24@2080 485 jne nmi_parity_err
kaf24@1710 486 testb $0x40,%al
kaf24@1710 487 jne nmi_io_err
kaf24@1710 488 movl %eax,%ebx
kaf24@1710 489
kaf24@1710 490 # Okay, its almost a normal NMI tick. We can only process it if:
kaf24@1710 491 # A. We are the outermost Xen activation (in which case we have
kaf24@1710 492 # the selectors safely saved on our stack)
kaf24@1710 493 # B. DS-GS all contain sane Xen values.
kaf24@1710 494 # In all other cases we bail without touching DS-GS, as we have
kaf24@1710 495 # interrupted an enclosing Xen activation in tricky prologue or
kaf24@1710 496 # epilogue code.
kaf24@3207 497 movl XREGS_eflags(%esp),%eax
kaf24@2954 498 movb XREGS_cs(%esp),%al
kaf24@3207 499 testl $(3|X86_EFLAGS_VM),%eax
kaf24@3127 500 jnz do_watchdog_tick
kaf24@2954 501 movl XREGS_ds(%esp),%eax
kaf24@1710 502 cmpw $(__HYPERVISOR_DS),%ax
kaf24@2954 503 jne restore_all_xen
kaf24@2954 504 movl XREGS_es(%esp),%eax
kaf24@1710 505 cmpw $(__HYPERVISOR_DS),%ax
kaf24@2954 506 jne restore_all_xen
kaf24@2954 507 movl XREGS_fs(%esp),%eax
kaf24@1710 508 cmpw $(__HYPERVISOR_DS),%ax
kaf24@2954 509 jne restore_all_xen
kaf24@2954 510 movl XREGS_gs(%esp),%eax
kaf24@1710 511 cmpw $(__HYPERVISOR_DS),%ax
kaf24@2954 512 jne restore_all_xen
kaf24@1710 513
kaf24@1710 514 do_watchdog_tick:
kaf24@1710 515 movl $(__HYPERVISOR_DS),%edx
kaf24@1710 516 movl %edx,%ds
kaf24@1710 517 movl %edx,%es
kaf24@1710 518 movl %esp,%edx
kaf24@1710 519 pushl %ebx # reason
kaf24@1710 520 pushl %edx # regs
kaf24@1710 521 call SYMBOL_NAME(do_nmi)
kaf24@1710 522 addl $8,%esp
kaf24@3207 523 movl XREGS_eflags(%esp),%eax
kaf24@2954 524 movb XREGS_cs(%esp),%al
kaf24@3207 525 testl $(3|X86_EFLAGS_VM),%eax
kaf24@3127 526 jz restore_all_xen
kaf24@1710 527 GET_CURRENT(%ebx)
kaf24@1710 528 jmp restore_all_guest
kaf24@1710 529
kaf24@2085 530 nmi_parity_err:
kaf24@2085 531 # Clear and disable the parity-error line
kaf24@2085 532 andb $0xf,%al
kaf24@2085 533 orb $0x4,%al
kaf24@2085 534 outb %al,$0x61
kaf24@2085 535 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
kaf24@2954 536 je restore_all_xen
kaf24@2085 537 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
kaf24@2085 538 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
kaf24@2085 539 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
kaf24@2954 540 je restore_all_xen
kaf24@2085 541 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
kaf24@1710 542 movl %edx,%ds
kaf24@1710 543 movl %edx,%es
kaf24@2079 544 movl %esp,%edx
kaf24@2079 545 push %edx
kaf24@2079 546 call SYMBOL_NAME(mem_parity_error)
kaf24@2085 547 addl $4,%esp
kaf24@2085 548 jmp ret_from_intr
kaf24@2085 549
kaf24@1710 550 nmi_io_err:
kaf24@2085 551 # Clear and disable the I/O-error line
kaf24@2085 552 andb $0xf,%al
kaf24@2085 553 orb $0x8,%al
kaf24@2085 554 outb %al,$0x61
kaf24@2085 555 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
kaf24@2954 556 je restore_all_xen
kaf24@2085 557 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
kaf24@2085 558 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
kaf24@2085 559 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
kaf24@2954 560 je restore_all_xen
kaf24@2085 561 movl $(__HYPERVISOR_DS),%edx # nmi=fatal
kaf24@1710 562 movl %edx,%ds
kaf24@1710 563 movl %edx,%es
kaf24@2079 564 movl %esp,%edx
kaf24@2079 565 push %edx
kaf24@2079 566 call SYMBOL_NAME(io_check_error)
kaf24@2085 567 addl $4,%esp
kaf24@2085 568 jmp ret_from_intr
kaf24@3207 569
kaf24@3207 570
kaf24@3207 571 ENTRY(setup_vm86_frame)
kaf24@3207 572 # Copies the entire stack frame forwards by 16 bytes.
kaf24@3207 573 .macro copy_vm86_words count=18
kaf24@3207 574 .if \count
kaf24@3207 575 pushl ((\count-1)*4)(%esp)
kaf24@3207 576 popl ((\count-1)*4)+16(%esp)
kaf24@3207 577 copy_vm86_words "(\count-1)"
kaf24@3207 578 .endif
kaf24@3207 579 .endm
kaf24@3207 580 copy_vm86_words
kaf24@3207 581 addl $16,%esp
kaf24@3207 582 ret
kaf24@3207 583
kaf24@3207 584 do_switch_vm86:
kaf24@3207 585 # Discard the return address
kaf24@3207 586 addl $4,%esp
kaf24@3207 587
kaf24@3207 588 movl XREGS_eflags(%esp),%ecx
kaf24@3207 589
kaf24@3207 590 # GS:ESI == Ring-1 stack activation
kaf24@3207 591 movl XREGS_esp(%esp),%esi
kaf24@3207 592 VFLT1: movl XREGS_ss(%esp),%gs
kaf24@3207 593
kaf24@3207 594 # ES:EDI == Ring-0 stack activation
kaf24@3207 595 leal XREGS_eip(%esp),%edi
kaf24@3207 596
kaf24@3207 597 # Restore the hypercall-number-clobbered EAX on our stack frame
kaf24@3207 598 VFLT2: movl %gs:(%esi),%eax
kaf24@3207 599 movl %eax,XREGS_eax(%esp)
kaf24@3207 600 addl $4,%esi
kaf24@3207 601
kaf24@3207 602 # Copy the VM86 activation from the ring-1 stack to the ring-0 stack
kaf24@3207 603 movl $(XREGS_user_sizeof-XREGS_eip)/4,%ecx
kaf24@3207 604 VFLT3: movl %gs:(%esi),%eax
kaf24@3207 605 stosl
kaf24@3207 606 addl $4,%esi
kaf24@3207 607 loop VFLT3
kaf24@3207 608
kaf24@3207 609 # Fix up EFLAGS
kaf24@3207 610 andl $~X86_EFLAGS_IOPL,XREGS_eflags(%esp)
kaf24@3207 611 andl $X86_EFLAGS_IOPL,%ecx # Ignore attempts to change EFLAGS.IOPL
kaf24@3207 612 jnz 1f
kaf24@3207 613 orl $X86_EFLAGS_IF,%ecx # EFLAGS.IOPL=0 => no messing with EFLAGS.IF
kaf24@3207 614 1: orl $X86_EFLAGS_VM,%ecx # Force EFLAGS.VM
kaf24@3207 615 orl %ecx,XREGS_eflags(%esp)
kaf24@3207 616
kaf24@3207 617 jmp test_all_events
kaf24@3207 618
kaf24@3207 619 .section __ex_table,"a"
kaf24@3207 620 .long VFLT1,domain_crash
kaf24@3207 621 .long VFLT2,domain_crash
kaf24@3207 622 .long VFLT3,domain_crash
kaf24@3207 623 .previous
kaf24@3207 624
kaf24@1710 625 .data
kaf24@3127 626
kaf24@3127 627 ENTRY(exception_table)
kaf24@3127 628 .long SYMBOL_NAME(do_divide_error)
kaf24@3127 629 .long SYMBOL_NAME(do_debug)
kaf24@3127 630 .long 0 # nmi
kaf24@3127 631 .long SYMBOL_NAME(do_int3)
kaf24@3127 632 .long SYMBOL_NAME(do_overflow)
kaf24@3127 633 .long SYMBOL_NAME(do_bounds)
kaf24@3127 634 .long SYMBOL_NAME(do_invalid_op)
kaf24@3127 635 .long SYMBOL_NAME(math_state_restore)
kaf24@3127 636 .long 0 # double fault
kaf24@3127 637 .long SYMBOL_NAME(do_coprocessor_segment_overrun)
kaf24@3127 638 .long SYMBOL_NAME(do_invalid_TSS)
kaf24@3127 639 .long SYMBOL_NAME(do_segment_not_present)
kaf24@3127 640 .long SYMBOL_NAME(do_stack_segment)
kaf24@3127 641 .long SYMBOL_NAME(do_general_protection)
kaf24@3127 642 .long SYMBOL_NAME(do_page_fault)
kaf24@3127 643 .long SYMBOL_NAME(do_spurious_interrupt_bug)
kaf24@3127 644 .long SYMBOL_NAME(do_coprocessor_error)
kaf24@3127 645 .long SYMBOL_NAME(do_alignment_check)
kaf24@3127 646 .long SYMBOL_NAME(do_machine_check)
kaf24@3127 647 .long SYMBOL_NAME(do_simd_coprocessor_error)
kaf24@3127 648
kaf24@1710 649 ENTRY(hypercall_table)
kaf24@1710 650 .long SYMBOL_NAME(do_set_trap_table) /* 0 */
kaf24@1710 651 .long SYMBOL_NAME(do_mmu_update)
kaf24@1710 652 .long SYMBOL_NAME(do_set_gdt)
kaf24@1710 653 .long SYMBOL_NAME(do_stack_switch)
kaf24@1710 654 .long SYMBOL_NAME(do_set_callbacks)
kaf24@1710 655 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
kaf24@1710 656 .long SYMBOL_NAME(do_sched_op)
kaf24@1710 657 .long SYMBOL_NAME(do_dom0_op)
kaf24@1710 658 .long SYMBOL_NAME(do_set_debugreg)
kaf24@1710 659 .long SYMBOL_NAME(do_get_debugreg)
kaf24@1710 660 .long SYMBOL_NAME(do_update_descriptor) /* 10 */
kaf24@1710 661 .long SYMBOL_NAME(do_set_fast_trap)
kaf24@1710 662 .long SYMBOL_NAME(do_dom_mem_op)
kaf24@1710 663 .long SYMBOL_NAME(do_multicall)
kaf24@1710 664 .long SYMBOL_NAME(do_update_va_mapping)
kaf24@1710 665 .long SYMBOL_NAME(do_set_timer_op) /* 15 */
kaf24@1710 666 .long SYMBOL_NAME(do_event_channel_op)
kaf24@1710 667 .long SYMBOL_NAME(do_xen_version)
kaf24@1710 668 .long SYMBOL_NAME(do_console_io)
kaf24@1710 669 .long SYMBOL_NAME(do_physdev_op)
kaf24@2375 670 .long SYMBOL_NAME(do_grant_table_op) /* 20 */
kaf24@2111 671 .long SYMBOL_NAME(do_vm_assist)
kaf24@2375 672 .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
kaf24@3207 673 .long SYMBOL_NAME(do_switch_vm86)
kaf24@1710 674 .rept NR_hypercalls-((.-hypercall_table)/4)
kaf24@1710 675 .long SYMBOL_NAME(do_ni_hypercall)
kaf24@1710 676 .endr