debuggers.hg

annotate xen/arch/x86/x86_32/entry.S @ 22906:700ac6445812

Now add KDB to the non-kdb tree
author Mukesh Rathor
date Thu Feb 03 15:42:41 2011 -0800 (2011-02-03)
parents b9017fdaad4d
children
rev   line source
kaf24@1710 1 /*
kaf24@1710 2 * Hypercall and fault low-level handling routines.
kaf24@1710 3 *
kaf24@1710 4 * Copyright (c) 2002-2004, K A Fraser
kaf24@1710 5 * Copyright (c) 1991, 1992 Linus Torvalds
kaf24@3127 6 *
kaf24@3127 7 * Calling back to a guest OS:
kaf24@3127 8 * ===========================
kaf24@3127 9 *
kaf24@1710 10 * First, we require that all callbacks (either via a supplied
kaf24@1710 11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
kaf24@1710 12 * in the shared-info-structure) are to ring 1. This just makes life easier,
kaf24@1710 13 * in that it means we don't have to do messy GDT/LDT lookups to find
kaf24@1710 14 * out which the privilege-level of the return code-selector. That code
kaf24@1710 15 * would just be a hassle to write, and would need to account for running
kaf24@1710 16 * off the end of the GDT/LDT, for example. For all callbacks we check
kaf24@3127 17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
kaf24@3127 18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
kaf24@3127 19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
kaf24@3127 20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
kaf24@3127 21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
kaf24@1710 22 * likely to fault, and we may end up killing the domain (no harm can
kaf24@1710 23 * come to Xen, though).
kaf24@1710 24 *
kaf24@1710 25 * When doing a callback, we check if the return CS is in ring 0. If so,
kaf24@1710 26 * callback is delayed until next return to ring != 0.
kaf24@1710 27 * If return CS is in ring 1, then we create a callback frame
kaf24@1710 28 * starting at return SS/ESP. The base of the frame does an intra-privilege
kaf24@1710 29 * interrupt-return.
kaf24@1710 30 * If return CS is in ring > 1, we create a callback frame starting
kaf24@1710 31 * at SS/ESP taken from appropriate section of the current TSS. The base
kaf24@1710 32 * of the frame does an inter-privilege interrupt-return.
kaf24@1710 33 *
kaf24@1710 34 * Note that the "failsafe callback" uses a special stackframe:
kaf24@1710 35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
kaf24@1710 36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
kaf24@1710 37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
kaf24@1710 38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
kaf24@1710 39 * saved/restored in guest OS. Furthermore, if we load them we may cause
kaf24@1710 40 * a fault if they are invalid, which is a hassle to deal with. We avoid
kaf24@1710 41 * that problem if we don't load them :-) This property allows us to use
kaf24@1710 42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
kaf24@1710 43 * on return to ring != 0, we can simply package it up as a return via
kaf24@1710 44 * the failsafe callback, and let the guest OS sort it out (perhaps by
kaf24@1710 45 * killing an application process). Note that we also do this for any
kaf24@1710 46 * faulting IRET -- just let the guest OS handle it via the event
kaf24@1710 47 * callback.
kaf24@1710 48 *
kaf24@1710 49 * We terminate a domain in the following cases:
kaf24@1710 50 * - creating a callback stack frame (due to bad ring-1 stack).
kaf24@1710 51 * - faulting IRET on entry to failsafe callback handler.
kaf24@1710 52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
kaf24@1710 53 * handler in good order (absolutely no faults allowed!).
kaf24@1710 54 */
kaf24@1710 55
kaf24@1710 56 #include <xen/config.h>
kaf24@1710 57 #include <xen/errno.h>
kaf24@2085 58 #include <xen/softirq.h>
kaf24@3314 59 #include <asm/asm_defns.h>
kaf24@3633 60 #include <asm/apicdef.h>
kaf24@3792 61 #include <asm/page.h>
kaf24@2827 62 #include <public/xen.h>
kaf24@1710 63
kaf24@1710 64 ALIGN
kaf24@1710 65 restore_all_guest:
kfraser@14968 66 ASSERT_INTERRUPTS_DISABLED
kaf24@4721 67 testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
kfraser@15450 68 popl %ebx
kfraser@15450 69 popl %ecx
kfraser@15450 70 popl %edx
kfraser@15450 71 popl %esi
kfraser@15450 72 popl %edi
kfraser@15450 73 popl %ebp
kfraser@15450 74 popl %eax
kfraser@15450 75 leal 4(%esp),%esp
kfraser@15450 76 jnz .Lrestore_iret_guest
kaf24@9029 77 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
kfraser@15450 78 testb $2,UREGS_cs-UREGS_eip(%esp)
kfraser@15450 79 jnz .Lrestore_sregs_guest
kaf24@9029 80 call restore_ring0_guest
kfraser@15450 81 jmp .Lrestore_iret_guest
kaf24@9029 82 #endif
kfraser@15450 83 .Lrestore_sregs_guest:
kfraser@15450 84 .Lft1: mov UREGS_ds-UREGS_eip(%esp),%ds
kfraser@15450 85 .Lft2: mov UREGS_es-UREGS_eip(%esp),%es
kfraser@15450 86 .Lft3: mov UREGS_fs-UREGS_eip(%esp),%fs
kfraser@15450 87 .Lft4: mov UREGS_gs-UREGS_eip(%esp),%gs
kfraser@15450 88 .Lrestore_iret_guest:
keir@13930 89 .Lft5: iret
kaf24@3127 90 .section .fixup,"ax"
kfraser@15455 91 .Lfx1: sti
kfraser@15455 92 SAVE_ALL_GPRS
kfraser@15455 93 mov UREGS_error_code(%esp),%esi
kaf24@3127 94 pushfl # EFLAGS
kaf24@3127 95 movl $__HYPERVISOR_CS,%eax
kaf24@3127 96 pushl %eax # CS
keir@13930 97 movl $.Ldf1,%eax
kaf24@3127 98 pushl %eax # EIP
kaf24@3127 99 pushl %esi # error_code/entry_vector
kaf24@9604 100 jmp handle_exception
keir@13930 101 .Ldf1: GET_CURRENT(%ebx)
kaf24@3127 102 jmp test_all_events
kaf24@4176 103 failsafe_callback:
kaf24@4176 104 GET_CURRENT(%ebx)
kaf24@5327 105 leal VCPU_trap_bounce(%ebx),%edx
kaf24@5327 106 movl VCPU_failsafe_addr(%ebx),%eax
kaf24@4388 107 movl %eax,TRAPBOUNCE_eip(%edx)
kaf24@5327 108 movl VCPU_failsafe_sel(%ebx),%eax
kaf24@4176 109 movw %ax,TRAPBOUNCE_cs(%edx)
kfraser@14968 110 movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
kaf24@10305 111 bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%ebx)
kaf24@10305 112 jnc 1f
kfraser@14968 113 orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
kaf24@10305 114 1: call create_bounce_frame
kaf24@4176 115 xorl %eax,%eax
kaf24@4721 116 movl %eax,UREGS_ds(%esp)
kaf24@4721 117 movl %eax,UREGS_es(%esp)
kaf24@4721 118 movl %eax,UREGS_fs(%esp)
kaf24@4721 119 movl %eax,UREGS_gs(%esp)
kaf24@4176 120 jmp test_all_events
kaf24@3127 121 .previous
keir@22674 122 _ASM_PRE_EXTABLE(.Lft1, .Lfx1)
keir@22674 123 _ASM_PRE_EXTABLE(.Lft2, .Lfx1)
keir@22674 124 _ASM_PRE_EXTABLE(.Lft3, .Lfx1)
keir@22674 125 _ASM_PRE_EXTABLE(.Lft4, .Lfx1)
keir@22674 126 _ASM_PRE_EXTABLE(.Lft5, .Lfx1)
keir@22674 127 _ASM_EXTABLE(.Ldf1, failsafe_callback)
kaf24@1710 128
kaf24@1710 129 ALIGN
kaf24@1710 130 restore_all_xen:
kfraser@11221 131 popl %ebx
kfraser@11221 132 popl %ecx
kfraser@11221 133 popl %edx
kfraser@11221 134 popl %esi
kfraser@11221 135 popl %edi
kfraser@11221 136 popl %ebp
kfraser@11221 137 popl %eax
kaf24@1710 138 addl $4,%esp
kaf24@1710 139 iret
kaf24@1710 140
kaf24@1710 141 ALIGN
kaf24@1710 142 ENTRY(hypercall)
kaf24@3127 143 subl $4,%esp
kaf24@9029 144 FIXUP_RING0_GUEST_STACK
keir@22605 145 SAVE_ALL(,1f)
kfraser@15450 146 1: sti
kaf24@2954 147 GET_CURRENT(%ebx)
kaf24@10370 148 cmpl $NR_hypercalls,%eax
kaf24@10370 149 jae bad_hypercall
keir@22860 150 PERFC_INCR(hypercalls, %eax, %ebx)
kaf24@6490 151 #ifndef NDEBUG
kfraser@11676 152 /* Create shadow parameters and corrupt those not used by this call. */
kaf24@6490 153 pushl %eax
kaf24@6490 154 pushl UREGS_eip+4(%esp)
kaf24@6490 155 pushl 28(%esp) # EBP
kaf24@6490 156 pushl 28(%esp) # EDI
kaf24@6490 157 pushl 28(%esp) # ESI
kaf24@6490 158 pushl 28(%esp) # EDX
kaf24@6490 159 pushl 28(%esp) # ECX
kaf24@6490 160 pushl 28(%esp) # EBX
kaf24@6490 161 movzb hypercall_args_table(,%eax,1),%ecx
kaf24@6490 162 leal (%esp,%ecx,4),%edi
kaf24@6490 163 subl $6,%ecx
kaf24@6490 164 negl %ecx
kaf24@6490 165 movl %eax,%esi
kaf24@6490 166 movl $0xDEADBEEF,%eax
kaf24@6490 167 rep stosl
kaf24@6490 168 movl %esi,%eax
keir@16197 169 #define SHADOW_BYTES 32 /* 6 shadow parameters + EIP + hypercall # */
kfraser@11676 170 #else
kfraser@11676 171 /*
kfraser@11676 172 * We need shadow parameters even on non-debug builds. We depend on the
kfraser@11676 173 * original versions not being clobbered (needed to create a hypercall
kfraser@11676 174 * continuation). But that isn't guaranteed by the function-call ABI.
kfraser@11676 175 */
kfraser@11676 176 pushl 20(%esp) # EBP
kfraser@11676 177 pushl 20(%esp) # EDI
kfraser@11676 178 pushl 20(%esp) # ESI
kfraser@11676 179 pushl 20(%esp) # EDX
kfraser@11676 180 pushl 20(%esp) # ECX
kfraser@11676 181 pushl 20(%esp) # EBX
keir@16197 182 #define SHADOW_BYTES 24 /* 6 shadow parameters */
kaf24@6490 183 #endif
keir@16142 184 cmpb $0,tb_init_done
keir@22605 185 UNLIKELY_START(ne, trace)
keir@16142 186 call trace_hypercall
keir@16142 187 /* Now restore all the registers that trace_hypercall clobbered */
keir@16197 188 movl UREGS_eax+SHADOW_BYTES(%esp),%eax /* Hypercall # */
keir@22605 189 UNLIKELY_END(trace)
keir@22605 190 call *hypercall_table(,%eax,4)
keir@22605 191 movl %eax,UREGS_eax+SHADOW_BYTES(%esp) # save the return value
keir@16197 192 #undef SHADOW_BYTES
kfraser@11676 193 addl $24,%esp # Discard the shadow parameters
kaf24@6490 194 #ifndef NDEBUG
kfraser@11676 195 /* Deliberately corrupt real parameter regs used by this hypercall. */
kaf24@6490 196 popl %ecx # Shadow EIP
kfraser@11577 197 cmpl %ecx,UREGS_eip+4(%esp)
kaf24@6490 198 popl %ecx # Shadow hypercall index
kaf24@6490 199 jne skip_clobber # If EIP has changed then don't clobber
kaf24@6490 200 movzb hypercall_args_table(,%ecx,1),%ecx
kaf24@6490 201 movl %esp,%edi
kaf24@6490 202 movl $0xDEADBEEF,%eax
kaf24@6490 203 rep stosl
kaf24@6490 204 skip_clobber:
kaf24@6490 205 #endif
kaf24@1710 206
kaf24@1710 207 test_all_events:
kaf24@1710 208 xorl %ecx,%ecx
kaf24@1710 209 notl %ecx
kaf24@1710 210 cli # tests must not race interrupts
kaf24@1710 211 /*test_softirqs:*/
kaf24@5327 212 movl VCPU_processor(%ebx),%eax
kaf24@4631 213 shl $IRQSTAT_shift,%eax
kaf24@4738 214 test %ecx,irq_stat(%eax,1)
kaf24@1710 215 jnz process_softirqs
keir@18006 216 testb $1,VCPU_mce_pending(%ebx)
keir@18006 217 jnz process_mce
kfraser@14696 218 testb $1,VCPU_nmi_pending(%ebx)
kfraser@14696 219 jnz process_nmi
Ian@8590 220 test_guest_events:
kaf24@5327 221 movl VCPU_vcpu_info(%ebx),%eax
cl349@2959 222 testb $0xFF,VCPUINFO_upcall_mask(%eax)
kaf24@1710 223 jnz restore_all_guest
cl349@2959 224 testb $0xFF,VCPUINFO_upcall_pending(%eax)
kaf24@1710 225 jz restore_all_guest
kaf24@1710 226 /*process_guest_events:*/
kaf24@4176 227 sti
kaf24@5327 228 leal VCPU_trap_bounce(%ebx),%edx
kaf24@5327 229 movl VCPU_event_addr(%ebx),%eax
kaf24@3081 230 movl %eax,TRAPBOUNCE_eip(%edx)
kaf24@5327 231 movl VCPU_event_sel(%ebx),%eax
kaf24@3081 232 movw %ax,TRAPBOUNCE_cs(%edx)
kfraser@14968 233 movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
kaf24@1710 234 call create_bounce_frame
kaf24@4176 235 jmp test_all_events
kaf24@1710 236
kaf24@1710 237 ALIGN
kaf24@1710 238 process_softirqs:
kaf24@1710 239 sti
kaf24@4738 240 call do_softirq
kaf24@1710 241 jmp test_all_events
kfraser@11221 242
kfraser@11221 243 ALIGN
keir@18006 244 /* %ebx: struct vcpu */
keir@18006 245 process_mce:
keir@20593 246 testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%ebx)
keir@20593 247 jnz test_guest_events
keir@18006 248 sti
keir@18006 249 movb $0,VCPU_mce_pending(%ebx)
keir@18006 250 call set_guest_machinecheck_trapbounce
keir@18006 251 test %eax,%eax
keir@18006 252 jz test_all_events
keir@20593 253 movzbl VCPU_async_exception_mask(%ebx),%edx # save mask for the
keir@20593 254 movb %dl,VCPU_mce_old_mask(%ebx) # iret hypercall
keir@20593 255 orl $1 << VCPU_TRAP_MCE,%edx
keir@20593 256 movb %dl,VCPU_async_exception_mask(%ebx)
keir@18006 257 jmp process_trap
keir@18006 258
keir@18006 259 ALIGN
keir@18006 260 /* %ebx: struct vcpu */
Ian@8590 261 process_nmi:
keir@21972 262 testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%ebx)
keir@20593 263 jnz test_guest_events
keir@16293 264 sti
kfraser@14696 265 movb $0,VCPU_nmi_pending(%ebx)
keir@16293 266 call set_guest_nmi_trapbounce
Ian@8590 267 test %eax,%eax
keir@16293 268 jz test_all_events
keir@20593 269 movzbl VCPU_async_exception_mask(%ebx),%edx # save mask for the
keir@20593 270 movb %dl,VCPU_nmi_old_mask(%ebx) # iret hypercall
keir@20593 271 orl $1 << VCPU_TRAP_NMI,%edx
keir@20593 272 movb %dl,VCPU_async_exception_mask(%ebx)
keir@18006 273 /* FALLTHROUGH */
keir@18006 274 process_trap:
Ian@8590 275 leal VCPU_trap_bounce(%ebx),%edx
Ian@8590 276 call create_bounce_frame
Ian@8590 277 jmp test_all_events
Ian@8590 278
kaf24@10370 279 bad_hypercall:
kaf24@10370 280 movl $-ENOSYS,UREGS_eax(%esp)
kaf24@10370 281 jmp test_all_events
kaf24@10370 282
kaf24@2954 283 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
kaf24@2954 284 /* {EIP, CS, EFLAGS, [ESP, SS]} */
kaf24@5327 285 /* %edx == trap_bounce, %ebx == struct vcpu */
kaf24@4721 286 /* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
kaf24@3207 287 create_bounce_frame:
keir@14799 288 ASSERT_INTERRUPTS_ENABLED
kaf24@4721 289 movl UREGS_eflags+4(%esp),%ecx
kaf24@4721 290 movb UREGS_cs+4(%esp),%cl
kaf24@3207 291 testl $(2|X86_EFLAGS_VM),%ecx
kaf24@3207 292 jz ring1 /* jump if returning to an existing ring-1 activation */
kaf24@5327 293 movl VCPU_kernel_sp(%ebx),%esi
keir@13930 294 .Lft6: mov VCPU_kernel_ss(%ebx),%gs
keir@22605 295 testl $X86_EFLAGS_VM,%ecx
keir@22605 296 UNLIKELY_START(nz, bounce_vm86_1)
kaf24@4176 297 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
kaf24@4721 298 movl UREGS_es+4(%esp),%eax
keir@13930 299 .Lft7: movl %eax,%gs:(%esi)
kaf24@4721 300 movl UREGS_ds+4(%esp),%eax
keir@13930 301 .Lft8: movl %eax,%gs:4(%esi)
kaf24@4721 302 movl UREGS_fs+4(%esp),%eax
keir@13930 303 .Lft9: movl %eax,%gs:8(%esi)
kaf24@4721 304 movl UREGS_gs+4(%esp),%eax
keir@13930 305 .Lft10: movl %eax,%gs:12(%esi)
keir@22605 306 UNLIKELY_END(bounce_vm86_1)
keir@13930 307 subl $8,%esi /* push SS/ESP (inter-priv iret) */
kaf24@4721 308 movl UREGS_esp+4(%esp),%eax
keir@13930 309 .Lft11: movl %eax,%gs:(%esi)
kaf24@4721 310 movl UREGS_ss+4(%esp),%eax
keir@13930 311 .Lft12: movl %eax,%gs:4(%esi)
kaf24@3207 312 jmp 1f
kaf24@3207 313 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
kaf24@4721 314 movl UREGS_esp+4(%esp),%esi
keir@13930 315 .Lft13: mov UREGS_ss+4(%esp),%gs
kaf24@3207 316 1: /* Construct a stack frame: EFLAGS, CS/EIP */
kaf24@4987 317 movb TRAPBOUNCE_flags(%edx),%cl
kaf24@1710 318 subl $12,%esi
kaf24@4721 319 movl UREGS_eip+4(%esp),%eax
keir@13930 320 .Lft14: movl %eax,%gs:(%esi)
kaf24@5327 321 movl VCPU_vcpu_info(%ebx),%eax
kaf24@4987 322 pushl VCPUINFO_upcall_mask(%eax)
kaf24@4987 323 testb $TBF_INTERRUPT,%cl
kaf24@6055 324 setnz %ch # TBF_INTERRUPT -> set upcall mask
kaf24@6055 325 orb %ch,VCPUINFO_upcall_mask(%eax)
kaf24@4987 326 popl %eax
kaf24@4987 327 shll $16,%eax # Bits 16-23: saved_upcall_mask
kaf24@4987 328 movw UREGS_cs+4(%esp),%ax # Bits 0-15: CS
kaf24@9029 329 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
kaf24@9029 330 testw $2,%ax
keir@13930 331 jnz .Lft15
kaf24@9029 332 and $~3,%ax # RPL 1 -> RPL 0
kaf24@9029 333 #endif
keir@13930 334 .Lft15: movl %eax,%gs:4(%esi)
kaf24@7774 335 test $0x00FF0000,%eax # Bits 16-23: saved_upcall_mask
kaf24@7774 336 setz %ch # %ch == !saved_upcall_mask
kaf24@4721 337 movl UREGS_eflags+4(%esp),%eax
kaf24@7774 338 andl $~X86_EFLAGS_IF,%eax
kaf24@7774 339 shlb $1,%ch # Bit 9 (EFLAGS.IF)
kaf24@7774 340 orb %ch,%ah # Fold EFLAGS.IF into %eax
keir@13930 341 .Lft16: movl %eax,%gs:8(%esi)
kaf24@3127 342 test $TBF_EXCEPTION_ERRCODE,%cl
kaf24@3127 343 jz 1f
kaf24@3127 344 subl $4,%esi # push error_code onto guest frame
kaf24@3127 345 movl TRAPBOUNCE_error_code(%edx),%eax
keir@13930 346 .Lft17: movl %eax,%gs:(%esi)
kaf24@3127 347 1: testb $TBF_FAILSAFE,%cl
keir@22605 348 UNLIKELY_START(nz, bounce_failsafe)
kaf24@3127 349 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
kaf24@4721 350 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
keir@22605 351 jnz .Lvm86_2
keir@13930 352 movl UREGS_ds+4(%esp),%eax # non-VM86: write real selector values
keir@13930 353 .Lft22: movl %eax,%gs:(%esi)
kaf24@4721 354 movl UREGS_es+4(%esp),%eax
keir@13930 355 .Lft23: movl %eax,%gs:4(%esi)
kaf24@4721 356 movl UREGS_fs+4(%esp),%eax
keir@13930 357 .Lft24: movl %eax,%gs:8(%esi)
kaf24@4721 358 movl UREGS_gs+4(%esp),%eax
keir@13930 359 .Lft25: movl %eax,%gs:12(%esi)
keir@22605 360 jmp .Lnvm86_3
keir@22605 361 .Lvm86_2:
keir@22605 362 xorl %eax,%eax # VM86: we write zero selector values
keir@22605 363 .Lft18: movl %eax,%gs:(%esi)
keir@22605 364 .Lft19: movl %eax,%gs:4(%esi)
keir@22605 365 .Lft20: movl %eax,%gs:8(%esi)
keir@22605 366 .Lft21: movl %eax,%gs:12(%esi)
keir@22605 367 UNLIKELY_END(bounce_failsafe)
keir@22605 368 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
keir@22605 369 UNLIKELY_START(nz, bounce_vm86_3)
kaf24@3207 370 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
kaf24@4721 371 movl %eax,UREGS_ds+4(%esp)
kaf24@4721 372 movl %eax,UREGS_es+4(%esp)
kaf24@4721 373 movl %eax,UREGS_fs+4(%esp)
kaf24@4721 374 movl %eax,UREGS_gs+4(%esp)
keir@22605 375 UNLIKELY_END(bounce_vm86_3)
keir@13930 376 .Lnvm86_3:
keir@13930 377 /* Rewrite our stack frame and return to ring 1. */
kaf24@1710 378 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
kfraser@11201 379 andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
kfraser@11201 380 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+4(%esp)
kaf24@5469 381 mov %gs,UREGS_ss+4(%esp)
kaf24@4721 382 movl %esi,UREGS_esp+4(%esp)
kaf24@3081 383 movzwl TRAPBOUNCE_cs(%edx),%eax
kfraser@12493 384 /* Null selectors (0-3) are not allowed. */
kfraser@12493 385 testl $~3,%eax
kfraser@12493 386 jz domain_crash_synchronous
kaf24@4721 387 movl %eax,UREGS_cs+4(%esp)
kaf24@3081 388 movl TRAPBOUNCE_eip(%edx),%eax
kaf24@4721 389 movl %eax,UREGS_eip+4(%esp)
kaf24@1710 390 ret
keir@22674 391 _ASM_EXTABLE(.Lft6, domain_crash_synchronous)
keir@22674 392 _ASM_EXTABLE(.Lft7, domain_crash_synchronous)
keir@22674 393 _ASM_EXTABLE(.Lft8, domain_crash_synchronous)
keir@22674 394 _ASM_EXTABLE(.Lft9, domain_crash_synchronous)
keir@22674 395 _ASM_EXTABLE(.Lft10, domain_crash_synchronous)
keir@22674 396 _ASM_EXTABLE(.Lft11, domain_crash_synchronous)
keir@22674 397 _ASM_EXTABLE(.Lft12, domain_crash_synchronous)
keir@22674 398 _ASM_EXTABLE(.Lft13, domain_crash_synchronous)
keir@22674 399 _ASM_EXTABLE(.Lft14, domain_crash_synchronous)
keir@22674 400 _ASM_EXTABLE(.Lft15, domain_crash_synchronous)
keir@22674 401 _ASM_EXTABLE(.Lft16, domain_crash_synchronous)
keir@22674 402 _ASM_EXTABLE(.Lft17, domain_crash_synchronous)
keir@22674 403 _ASM_EXTABLE(.Lft18, domain_crash_synchronous)
keir@22674 404 _ASM_EXTABLE(.Lft19, domain_crash_synchronous)
keir@22674 405 _ASM_EXTABLE(.Lft20, domain_crash_synchronous)
keir@22674 406 _ASM_EXTABLE(.Lft21, domain_crash_synchronous)
keir@22674 407 _ASM_EXTABLE(.Lft22, domain_crash_synchronous)
keir@22674 408 _ASM_EXTABLE(.Lft23, domain_crash_synchronous)
keir@22674 409 _ASM_EXTABLE(.Lft24, domain_crash_synchronous)
keir@22674 410 _ASM_EXTABLE(.Lft25, domain_crash_synchronous)
kaf24@1710 411
sos22@8698 412 domain_crash_synchronous_string:
kaf24@8701 413 .asciz "domain_crash_sync called from entry.S (%lx)\n"
kaf24@8701 414
sos22@8698 415 domain_crash_synchronous:
kaf24@8701 416 pushl $domain_crash_synchronous_string
kfraser@11908 417 call printk
kaf24@8701 418 jmp __domain_crash_synchronous
kaf24@8701 419
kaf24@1710 420 ALIGN
kaf24@1710 421 ENTRY(ret_from_intr)
kaf24@3207 422 GET_CURRENT(%ebx)
kaf24@4721 423 movl UREGS_eflags(%esp),%eax
kaf24@4721 424 movb UREGS_cs(%esp),%al
kaf24@3207 425 testl $(3|X86_EFLAGS_VM),%eax
kaf24@3207 426 jnz test_all_events
kaf24@3207 427 jmp restore_all_xen
kaf24@1710 428
kaf24@1710 429 ENTRY(divide_error)
kfraser@11221 430 pushl $TRAP_divide_error<<16
kfraser@11221 431 ALIGN
kaf24@9604 432 handle_exception:
kaf24@9029 433 FIXUP_RING0_GUEST_STACK
kfraser@15450 434 SAVE_ALL(1f,2f)
kfraser@15450 435 .text 1
kfraser@15450 436 /* Exception within Xen: make sure we have valid %ds,%es. */
kfraser@15450 437 1: mov %ecx,%ds
kfraser@15450 438 mov %ecx,%es
kfraser@15450 439 jmp 2f
kfraser@15450 440 .previous
kfraser@15450 441 2: testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
kaf24@3127 442 jz exception_with_ints_disabled
kaf24@4141 443 sti # re-enable interrupts
kfraser@14069 444 1: xorl %eax,%eax
kaf24@4721 445 movw UREGS_entry_vector(%esp),%ax
kaf24@3127 446 movl %esp,%edx
kfraser@11221 447 pushl %edx # push the cpu_user_regs pointer
kfraser@11221 448 GET_CURRENT(%ebx)
keir@22860 449 PERFC_INCR(exceptions, %eax, %ebx)
kfraser@11221 450 call *exception_table(,%eax,4)
kaf24@3127 451 addl $4,%esp
kaf24@4721 452 movl UREGS_eflags(%esp),%eax
kaf24@4721 453 movb UREGS_cs(%esp),%al
kaf24@3207 454 testl $(3|X86_EFLAGS_VM),%eax
kfraser@11221 455 jz restore_all_xen
kaf24@9604 456 leal VCPU_trap_bounce(%ebx),%edx
kaf24@9604 457 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
kaf24@9604 458 jz test_all_events
kaf24@9604 459 call create_bounce_frame
kfraser@14968 460 movb $0,TRAPBOUNCE_flags(%edx)
kaf24@9604 461 jmp test_all_events
kaf24@1710 462
kaf24@3127 463 exception_with_ints_disabled:
kaf24@4721 464 movl UREGS_eflags(%esp),%eax
kaf24@4721 465 movb UREGS_cs(%esp),%al
kaf24@3207 466 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
kaf24@4141 467 jnz FATAL_exception_with_ints_disabled
cl349@3832 468 pushl %esp
kaf24@3127 469 call search_pre_exception_table
kaf24@3127 470 addl $4,%esp
kaf24@3127 471 testl %eax,%eax # no fixup code for faulting EIP?
kfraser@14069 472 jz 1b
kaf24@4721 473 movl %eax,UREGS_eip(%esp)
kaf24@3127 474 movl %esp,%esi
kaf24@3127 475 subl $4,%esp
kaf24@3127 476 movl %esp,%edi
kaf24@4721 477 movl $UREGS_kernel_sizeof/4,%ecx
kaf24@3127 478 rep; movsl # make room for error_code/entry_vector
kaf24@4721 479 movl UREGS_error_code(%esp),%eax # error_code/entry_vector
kaf24@4721 480 movl %eax,UREGS_kernel_sizeof(%esp)
kaf24@3127 481 jmp restore_all_xen # return to fixup code
kaf24@3127 482
kaf24@3127 483 FATAL_exception_with_ints_disabled:
kaf24@3127 484 xorl %esi,%esi
kaf24@4721 485 movw UREGS_entry_vector(%esp),%si
kaf24@3127 486 movl %esp,%edx
kfraser@11221 487 pushl %edx # push the cpu_user_regs pointer
kaf24@3127 488 pushl %esi # push the trapnr (entry vector)
kaf24@4738 489 call fatal_trap
kaf24@3127 490 ud2
kaf24@3127 491
kaf24@1710 492 ENTRY(coprocessor_error)
kfraser@11221 493 pushl $TRAP_copro_error<<16
kfraser@11221 494 jmp handle_exception
kaf24@1710 495
kaf24@1710 496 ENTRY(simd_coprocessor_error)
kfraser@11221 497 pushl $TRAP_simd_error<<16
kfraser@11221 498 jmp handle_exception
kaf24@1710 499
kaf24@1710 500 ENTRY(device_not_available)
kfraser@11221 501 pushl $TRAP_no_device<<16
kaf24@9604 502 jmp handle_exception
kaf24@1710 503
kaf24@1710 504 ENTRY(debug)
kfraser@11221 505 pushl $TRAP_debug<<16
kfraser@11221 506 jmp handle_exception
kaf24@1710 507
kaf24@1710 508 ENTRY(int3)
kfraser@11221 509 pushl $TRAP_int3<<16
kfraser@11221 510 jmp handle_exception
kaf24@1710 511
kaf24@1710 512 ENTRY(overflow)
kfraser@11221 513 pushl $TRAP_overflow<<16
kfraser@11221 514 jmp handle_exception
kaf24@1710 515
kaf24@1710 516 ENTRY(bounds)
kfraser@11221 517 pushl $TRAP_bounds<<16
kfraser@11221 518 jmp handle_exception
kaf24@1710 519
kaf24@1710 520 ENTRY(invalid_op)
kfraser@11221 521 pushl $TRAP_invalid_op<<16
kfraser@11221 522 jmp handle_exception
kaf24@1710 523
kaf24@1710 524 ENTRY(coprocessor_segment_overrun)
kfraser@11221 525 pushl $TRAP_copro_seg<<16
kfraser@11221 526 jmp handle_exception
kaf24@1710 527
kaf24@1710 528 ENTRY(invalid_TSS)
kaf24@9604 529 movw $TRAP_invalid_tss,2(%esp)
kfraser@11221 530 jmp handle_exception
kaf24@1710 531
kaf24@1710 532 ENTRY(segment_not_present)
kaf24@9604 533 movw $TRAP_no_segment,2(%esp)
kfraser@11221 534 jmp handle_exception
kaf24@1710 535
kaf24@1710 536 ENTRY(stack_segment)
kaf24@9604 537 movw $TRAP_stack_error,2(%esp)
kfraser@11221 538 jmp handle_exception
kaf24@1710 539
kaf24@1710 540 ENTRY(general_protection)
kaf24@9604 541 movw $TRAP_gp_fault,2(%esp)
kfraser@11221 542 jmp handle_exception
kaf24@1710 543
kaf24@1710 544 ENTRY(alignment_check)
kaf24@9604 545 movw $TRAP_alignment_check,2(%esp)
kfraser@11221 546 jmp handle_exception
kaf24@1710 547
kaf24@1710 548 ENTRY(page_fault)
kaf24@9604 549 movw $TRAP_page_fault,2(%esp)
kfraser@11221 550 jmp handle_exception
kaf24@1710 551
kaf24@1710 552 ENTRY(spurious_interrupt_bug)
kaf24@3127 553 pushl $TRAP_spurious_int<<16
kfraser@11221 554 jmp handle_exception
kaf24@1710 555
keir@22606 556 .pushsection .init.text, "ax", @progbits
kfraser@12822 557 ENTRY(early_page_fault)
kfraser@15450 558 SAVE_ALL(1f,1f)
kfraser@15450 559 1: movl %esp,%eax
kfraser@15450 560 pushl %eax
kfraser@12822 561 call do_early_page_fault
kfraser@12822 562 addl $4,%esp
kfraser@12822 563 jmp restore_all_xen
keir@22606 564 .popsection
kfraser@12822 565
kfraser@15452 566 handle_nmi_mce:
kaf24@9029 567 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
kfraser@15452 568 # NMI/MCE entry protocol is incompatible with guest kernel in ring 0.
kfraser@15452 569 addl $4,%esp
kaf24@9029 570 iret
kaf24@9029 571 #else
kaf24@1710 572 # Save state but do not trash the segment registers!
kfraser@15452 573 SAVE_ALL(.Lnmi_mce_xen,.Lnmi_mce_common)
kfraser@15452 574 .Lnmi_mce_common:
kfraser@15452 575 xorl %eax,%eax
kfraser@15452 576 movw UREGS_entry_vector(%esp),%ax
kfraser@15452 577 movl %esp,%edx
kfraser@15452 578 pushl %edx
kfraser@15452 579 call *exception_table(,%eax,4)
kaf24@8438 580 addl $4,%esp
kfraser@15450 581 /*
kfraser@15450 582 * NB. We may return to Xen context with polluted %ds/%es. But in such
kfraser@15450 583 * cases we have put guest DS/ES on the guest stack frame, which will
kfraser@15450 584 * be detected by SAVE_ALL(), or we have rolled back restore_guest.
kfraser@15450 585 */
kaf24@4543 586 jmp ret_from_intr
kfraser@15452 587 .Lnmi_mce_xen:
kfraser@15450 588 /* Check the outer (guest) context for %ds/%es state validity. */
keir@21831 589 GET_CPUINFO_FIELD(CPUINFO_guest_cpu_user_regs,%ebx)
kfraser@15450 590 testl $X86_EFLAGS_VM,%ss:UREGS_eflags(%ebx)
kfraser@15450 591 mov %ds,%eax
kfraser@15450 592 mov %es,%edx
kfraser@15452 593 jnz .Lnmi_mce_vm86
kfraser@15450 594 /* We may have interrupted Xen while messing with %ds/%es... */
kfraser@15450 595 cmpw %ax,%cx
kfraser@15450 596 mov %ecx,%ds /* Ensure %ds is valid */
kfraser@15450 597 cmove UREGS_ds(%ebx),%eax /* Grab guest DS if it wasn't in %ds */
kfraser@15450 598 cmpw %dx,%cx
kfraser@15450 599 movl %eax,UREGS_ds(%ebx) /* Ensure guest frame contains guest DS */
kfraser@15450 600 cmove UREGS_es(%ebx),%edx /* Grab guest ES if it wasn't in %es */
kfraser@15450 601 mov %ecx,%es /* Ensure %es is valid */
kfraser@15450 602 movl $.Lrestore_sregs_guest,%ecx
kfraser@15450 603 movl %edx,UREGS_es(%ebx) /* Ensure guest frame contains guest ES */
kfraser@15450 604 cmpl %ecx,UREGS_eip(%esp)
kfraser@15452 605 jbe .Lnmi_mce_common
kfraser@15450 606 cmpl $.Lrestore_iret_guest,UREGS_eip(%esp)
kfraser@15452 607 ja .Lnmi_mce_common
kfraser@15450 608 /* Roll outer context restore_guest back to restoring %ds/%es. */
kfraser@15450 609 movl %ecx,UREGS_eip(%esp)
kfraser@15452 610 jmp .Lnmi_mce_common
kfraser@15452 611 .Lnmi_mce_vm86:
kfraser@15450 612 /* vm86 is easy: the CPU saved %ds/%es so we can safely stomp them. */
kfraser@15450 613 mov %ecx,%ds
kfraser@15450 614 mov %ecx,%es
kfraser@15452 615 jmp .Lnmi_mce_common
kaf24@9029 616 #endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
kaf24@3633 617
kfraser@15452 618 ENTRY(nmi)
kfraser@15452 619 pushl $TRAP_nmi<<16
kfraser@15452 620 jmp handle_nmi_mce
kfraser@15452 621
kfraser@15452 622 ENTRY(machine_check)
kfraser@15452 623 pushl $TRAP_machine_check<<16
kfraser@15452 624 jmp handle_nmi_mce
kfraser@15452 625
kaf24@3207 626 ENTRY(setup_vm86_frame)
kfraser@15450 627 mov %ecx,%ds
kfraser@15450 628 mov %ecx,%es
kaf24@3207 629 # Copies the entire stack frame forwards by 16 bytes.
kaf24@3207 630 .macro copy_vm86_words count=18
kaf24@3207 631 .if \count
kaf24@3207 632 pushl ((\count-1)*4)(%esp)
kaf24@3207 633 popl ((\count-1)*4)+16(%esp)
kaf24@3207 634 copy_vm86_words "(\count-1)"
kaf24@3207 635 .endif
kaf24@3207 636 .endm
kaf24@3207 637 copy_vm86_words
kaf24@3207 638 addl $16,%esp
kaf24@3207 639 ret
kaf24@3207 640
keir@20420 641 .section .rodata, "a", @progbits
kaf24@3127 642
kaf24@3127 643 ENTRY(exception_table)
kaf24@4738 644 .long do_divide_error
kaf24@4738 645 .long do_debug
kfraser@15452 646 .long do_nmi
kaf24@4738 647 .long do_int3
kaf24@4738 648 .long do_overflow
kaf24@4738 649 .long do_bounds
kaf24@4738 650 .long do_invalid_op
keir@16025 651 .long do_device_not_available
kaf24@3127 652 .long 0 # double fault
kaf24@4738 653 .long do_coprocessor_segment_overrun
kaf24@4738 654 .long do_invalid_TSS
kaf24@4738 655 .long do_segment_not_present
kaf24@4738 656 .long do_stack_segment
kaf24@4738 657 .long do_general_protection
kaf24@4738 658 .long do_page_fault
kaf24@4738 659 .long do_spurious_interrupt_bug
kaf24@4738 660 .long do_coprocessor_error
kaf24@4738 661 .long do_alignment_check
kaf24@4738 662 .long do_machine_check
kaf24@4738 663 .long do_simd_coprocessor_error
kaf24@3127 664
kaf24@1710 665 ENTRY(hypercall_table)
kaf24@4738 666 .long do_set_trap_table /* 0 */
kaf24@4738 667 .long do_mmu_update
kaf24@4738 668 .long do_set_gdt
kaf24@4738 669 .long do_stack_switch
kaf24@4738 670 .long do_set_callbacks
kaf24@4738 671 .long do_fpu_taskswitch /* 5 */
kfraser@12509 672 .long do_sched_op_compat
kfraser@11295 673 .long do_platform_op
kaf24@4738 674 .long do_set_debugreg
kaf24@4738 675 .long do_get_debugreg
kaf24@4738 676 .long do_update_descriptor /* 10 */
kaf24@4968 677 .long do_ni_hypercall
kaf24@6506 678 .long do_memory_op
kaf24@4738 679 .long do_multicall
kaf24@4738 680 .long do_update_va_mapping
kaf24@4738 681 .long do_set_timer_op /* 15 */
kaf24@9927 682 .long do_event_channel_op_compat
kaf24@4738 683 .long do_xen_version
kaf24@4738 684 .long do_console_io
kaf24@9927 685 .long do_physdev_op_compat
kaf24@4738 686 .long do_grant_table_op /* 20 */
kaf24@4738 687 .long do_vm_assist
kaf24@4738 688 .long do_update_va_mapping_otherdomain
Ian@8590 689 .long do_iret
kaf24@7199 690 .long do_vcpu_op
kaf24@4738 691 .long do_ni_hypercall /* 25 */
kaf24@4738 692 .long do_mmuext_op
kfraser@15849 693 .long do_xsm_op
Ian@8590 694 .long do_nmi_op
kfraser@12509 695 .long do_sched_op
Ian@9595 696 .long do_callback_op /* 30 */
ack@9610 697 .long do_xenoprof_op
kaf24@9927 698 .long do_event_channel_op
kaf24@9927 699 .long do_physdev_op
kfraser@11295 700 .long do_hvm_op
kfraser@11295 701 .long do_sysctl /* 35 */
kfraser@11295 702 .long do_domctl
ian@12663 703 .long do_kexec_op
keir@19440 704 .long do_tmem_op
keir@18006 705 .rept __HYPERVISOR_arch_0-((.-hypercall_table)/4)
keir@18006 706 .long do_ni_hypercall
keir@18006 707 .endr
keir@18006 708 .long do_mca /* 48 */
kaf24@1710 709 .rept NR_hypercalls-((.-hypercall_table)/4)
kaf24@4738 710 .long do_ni_hypercall
kaf24@1710 711 .endr
kaf24@6490 712
kaf24@6490 713 ENTRY(hypercall_args_table)
kaf24@6490 714 .byte 1 /* do_set_trap_table */ /* 0 */
kaf24@6490 715 .byte 4 /* do_mmu_update */
kaf24@6490 716 .byte 2 /* do_set_gdt */
kaf24@6490 717 .byte 2 /* do_stack_switch */
kaf24@6490 718 .byte 4 /* do_set_callbacks */
kaf24@6490 719 .byte 1 /* do_fpu_taskswitch */ /* 5 */
kfraser@12509 720 .byte 2 /* do_sched_op_compat */
kfraser@11295 721 .byte 1 /* do_platform_op */
kaf24@6490 722 .byte 2 /* do_set_debugreg */
kaf24@6490 723 .byte 1 /* do_get_debugreg */
kaf24@6490 724 .byte 4 /* do_update_descriptor */ /* 10 */
kaf24@6490 725 .byte 0 /* do_ni_hypercall */
kaf24@6506 726 .byte 2 /* do_memory_op */
kaf24@6490 727 .byte 2 /* do_multicall */
kaf24@6490 728 .byte 4 /* do_update_va_mapping */
kaf24@6490 729 .byte 2 /* do_set_timer_op */ /* 15 */
kaf24@9927 730 .byte 1 /* do_event_channel_op_compat */
kaf24@6772 731 .byte 2 /* do_xen_version */
kaf24@6490 732 .byte 3 /* do_console_io */
kaf24@9927 733 .byte 1 /* do_physdev_op_compat */
kaf24@6490 734 .byte 3 /* do_grant_table_op */ /* 20 */
kaf24@6490 735 .byte 2 /* do_vm_assist */
kaf24@6490 736 .byte 5 /* do_update_va_mapping_otherdomain */
Ian@8590 737 .byte 0 /* do_iret */
kaf24@7199 738 .byte 3 /* do_vcpu_op */
kaf24@6490 739 .byte 0 /* do_ni_hypercall */ /* 25 */
kaf24@6490 740 .byte 4 /* do_mmuext_op */
kfraser@15849 741 .byte 1 /* do_xsm_op */
Ian@8590 742 .byte 2 /* do_nmi_op */
kfraser@12509 743 .byte 2 /* do_sched_op */
Ian@9595 744 .byte 2 /* do_callback_op */ /* 30 */
kaf24@9712 745 .byte 2 /* do_xenoprof_op */
kaf24@9927 746 .byte 2 /* do_event_channel_op */
kaf24@9927 747 .byte 2 /* do_physdev_op */
kfraser@11295 748 .byte 2 /* do_hvm_op */
kfraser@11295 749 .byte 1 /* do_sysctl */ /* 35 */
kfraser@11295 750 .byte 1 /* do_domctl */
ian@12663 751 .byte 2 /* do_kexec_op */
keir@19440 752 .byte 1 /* do_tmem_op */
keir@18006 753 .rept __HYPERVISOR_arch_0-(.-hypercall_args_table)
keir@18006 754 .byte 0 /* do_ni_hypercall */
keir@18006 755 .endr
keir@18006 756 .byte 1 /* do_mca */ /* 48 */
kaf24@6490 757 .rept NR_hypercalls-(.-hypercall_args_table)
kaf24@6490 758 .byte 0 /* do_ni_hypercall */
kaf24@6490 759 .endr