debuggers.hg
annotate xen/arch/x86/x86_32/entry.S @ 3081:ceacd1c23c6c
bitkeeper revision 1.1159.179.3 (419cc874fJd3ljoD3_vTHS4nWtM2lg)
Move event/failsafe callback addresses into thread_info (arch-dep).
Make the trap_bounce per-domain rather than per-cpu.
Move event/failsafe callback addresses into thread_info (arch-dep).
Make the trap_bounce per-domain rather than per-cpu.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Thu Nov 18 16:06:12 2004 +0000 (2004-11-18) |
parents | 3c505be01ff1 |
children | 81e74f5bf690 |
rev | line source |
---|---|
kaf24@1710 | 1 /* |
kaf24@1710 | 2 * Hypercall and fault low-level handling routines. |
kaf24@1710 | 3 * |
kaf24@1710 | 4 * Copyright (c) 2002-2004, K A Fraser |
kaf24@1710 | 5 * Copyright (c) 1991, 1992 Linus Torvalds |
kaf24@1710 | 6 */ |
kaf24@1710 | 7 |
kaf24@1710 | 8 /* |
kaf24@1710 | 9 * The idea for callbacks to guest OSes |
kaf24@1710 | 10 * ==================================== |
kaf24@1710 | 11 * |
kaf24@1710 | 12 * First, we require that all callbacks (either via a supplied |
kaf24@1710 | 13 * interrupt-descriptor-table, or via the special event or failsafe callbacks |
kaf24@1710 | 14 * in the shared-info-structure) are to ring 1. This just makes life easier, |
kaf24@1710 | 15 * in that it means we don't have to do messy GDT/LDT lookups to find |
kaf24@1710 | 16 * out which the privilege-level of the return code-selector. That code |
kaf24@1710 | 17 * would just be a hassle to write, and would need to account for running |
kaf24@1710 | 18 * off the end of the GDT/LDT, for example. For all callbacks we check |
kaf24@1710 | 19 * that the provided |
kaf24@1710 | 20 * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as |
kaf24@1710 | 21 * don't allow a guest OS to install ring-0 privileges into the GDT/LDT. |
kaf24@1710 | 22 * It's up to the guest OS to ensure all returns via the IDT are to ring 1. |
kaf24@1710 | 23 * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather |
kaf24@1710 | 24 * than the correct ring) and bad things are bound to ensue -- IRET is |
kaf24@1710 | 25 * likely to fault, and we may end up killing the domain (no harm can |
kaf24@1710 | 26 * come to Xen, though). |
kaf24@1710 | 27 * |
kaf24@1710 | 28 * When doing a callback, we check if the return CS is in ring 0. If so, |
kaf24@1710 | 29 * callback is delayed until next return to ring != 0. |
kaf24@1710 | 30 * If return CS is in ring 1, then we create a callback frame |
kaf24@1710 | 31 * starting at return SS/ESP. The base of the frame does an intra-privilege |
kaf24@1710 | 32 * interrupt-return. |
kaf24@1710 | 33 * If return CS is in ring > 1, we create a callback frame starting |
kaf24@1710 | 34 * at SS/ESP taken from appropriate section of the current TSS. The base |
kaf24@1710 | 35 * of the frame does an inter-privilege interrupt-return. |
kaf24@1710 | 36 * |
kaf24@1710 | 37 * Note that the "failsafe callback" uses a special stackframe: |
kaf24@1710 | 38 * { return_DS, return_ES, return_FS, return_GS, return_EIP, |
kaf24@1710 | 39 * return_CS, return_EFLAGS[, return_ESP, return_SS] } |
kaf24@1710 | 40 * That is, original values for DS/ES/FS/GS are placed on stack rather than |
kaf24@1710 | 41 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them |
kaf24@1710 | 42 * saved/restored in guest OS. Furthermore, if we load them we may cause |
kaf24@1710 | 43 * a fault if they are invalid, which is a hassle to deal with. We avoid |
kaf24@1710 | 44 * that problem if we don't load them :-) This property allows us to use |
kaf24@1710 | 45 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS |
kaf24@1710 | 46 * on return to ring != 0, we can simply package it up as a return via |
kaf24@1710 | 47 * the failsafe callback, and let the guest OS sort it out (perhaps by |
kaf24@1710 | 48 * killing an application process). Note that we also do this for any |
kaf24@1710 | 49 * faulting IRET -- just let the guest OS handle it via the event |
kaf24@1710 | 50 * callback. |
kaf24@1710 | 51 * |
kaf24@1710 | 52 * We terminate a domain in the following cases: |
kaf24@1710 | 53 * - creating a callback stack frame (due to bad ring-1 stack). |
kaf24@1710 | 54 * - faulting IRET on entry to failsafe callback handler. |
kaf24@1710 | 55 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback |
kaf24@1710 | 56 * handler in good order (absolutely no faults allowed!). |
kaf24@1710 | 57 */ |
kaf24@1710 | 58 |
kaf24@1710 | 59 #include <xen/config.h> |
kaf24@1710 | 60 #include <xen/errno.h> |
kaf24@2085 | 61 #include <xen/softirq.h> |
kaf24@2954 | 62 #include <asm/x86_32/asm_defns.h> |
kaf24@2827 | 63 #include <public/xen.h> |
kaf24@1710 | 64 |
kaf24@1710 | 65 #define GET_CURRENT(reg) \ |
kaf24@3010 | 66 movl $8192-4, reg; \ |
kaf24@1710 | 67 orl %esp, reg; \ |
kaf24@1710 | 68 andl $~3,reg; \ |
kaf24@1710 | 69 movl (reg),reg; |
kaf24@1710 | 70 |
kaf24@1710 | 71 ENTRY(continue_nonidle_task) |
kaf24@1710 | 72 GET_CURRENT(%ebx) |
kaf24@1710 | 73 jmp test_all_events |
kaf24@1710 | 74 |
kaf24@1710 | 75 ALIGN |
kaf24@1710 | 76 /* |
kaf24@1710 | 77 * HYPERVISOR_multicall(call_list, nr_calls) |
kaf24@1710 | 78 * Execute a list of 'nr_calls' hypercalls, pointed at by 'call_list'. |
kaf24@1710 | 79 * This is fairly easy except that: |
kaf24@1710 | 80 * 1. We may fault reading the call list, and must patch that up; and |
kaf24@1710 | 81 * 2. We cannot recursively call HYPERVISOR_multicall, or a malicious |
kaf24@1710 | 82 * caller could cause our stack to blow up. |
kaf24@1710 | 83 */ |
kaf24@2446 | 84 #define MULTICALL_ENTRY_ORDER 5 |
kaf24@1710 | 85 do_multicall: |
kaf24@1710 | 86 popl %eax |
kaf24@1710 | 87 cmpl $SYMBOL_NAME(multicall_return_from_call),%eax |
kaf24@1710 | 88 je multicall_return_from_call |
kaf24@1710 | 89 pushl %ebx |
kaf24@1710 | 90 movl 4(%esp),%ebx /* EBX == call_list */ |
kaf24@1710 | 91 movl 8(%esp),%ecx /* ECX == nr_calls */ |
kaf24@2446 | 92 /* Ensure the entire multicall list is below HYPERVISOR_VIRT_START. */ |
kaf24@2446 | 93 movl %ecx,%eax |
kaf24@2446 | 94 shll $MULTICALL_ENTRY_ORDER,%eax |
kaf24@2446 | 95 addl %ebx,%eax /* EAX == end of multicall list */ |
kaf24@2446 | 96 jc bad_multicall_address |
kaf24@2446 | 97 cmpl $__HYPERVISOR_VIRT_START,%eax |
kaf24@2446 | 98 jnc bad_multicall_address |
kaf24@1710 | 99 multicall_loop: |
kaf24@1710 | 100 pushl %ecx |
kaf24@1710 | 101 multicall_fault1: |
kaf24@1710 | 102 pushl 20(%ebx) # args[4] |
kaf24@1710 | 103 multicall_fault2: |
kaf24@1710 | 104 pushl 16(%ebx) # args[3] |
kaf24@1710 | 105 multicall_fault3: |
kaf24@1710 | 106 pushl 12(%ebx) # args[2] |
kaf24@1710 | 107 multicall_fault4: |
kaf24@1710 | 108 pushl 8(%ebx) # args[1] |
kaf24@1710 | 109 multicall_fault5: |
kaf24@1710 | 110 pushl 4(%ebx) # args[0] |
kaf24@1710 | 111 multicall_fault6: |
kaf24@1710 | 112 movl (%ebx),%eax # op |
kaf24@1710 | 113 andl $(NR_hypercalls-1),%eax |
kaf24@1710 | 114 call *SYMBOL_NAME(hypercall_table)(,%eax,4) |
kaf24@1710 | 115 multicall_return_from_call: |
kaf24@1710 | 116 multicall_fault7: |
kaf24@1710 | 117 movl %eax,24(%ebx) # args[5] == result |
kaf24@1710 | 118 addl $20,%esp |
kaf24@1710 | 119 popl %ecx |
kaf24@2446 | 120 addl $(1<<MULTICALL_ENTRY_ORDER),%ebx |
kaf24@1710 | 121 loop multicall_loop |
kaf24@1710 | 122 popl %ebx |
kaf24@1710 | 123 xorl %eax,%eax |
kaf24@1710 | 124 jmp ret_from_hypercall |
kaf24@1710 | 125 |
kaf24@2446 | 126 bad_multicall_address: |
kaf24@2446 | 127 popl %ebx |
kaf24@2446 | 128 movl $-EFAULT,%eax |
kaf24@2446 | 129 jmp ret_from_hypercall |
kaf24@2446 | 130 |
kaf24@1710 | 131 .section __ex_table,"a" |
kaf24@1710 | 132 .align 4 |
kaf24@1710 | 133 .long multicall_fault1, multicall_fixup1 |
kaf24@1710 | 134 .long multicall_fault2, multicall_fixup2 |
kaf24@1710 | 135 .long multicall_fault3, multicall_fixup3 |
kaf24@1710 | 136 .long multicall_fault4, multicall_fixup4 |
kaf24@1710 | 137 .long multicall_fault5, multicall_fixup5 |
kaf24@1710 | 138 .long multicall_fault6, multicall_fixup6 |
cl349@2570 | 139 .long multicall_fault7, multicall_fixup6 |
kaf24@1710 | 140 .previous |
kaf24@1710 | 141 |
kaf24@1710 | 142 .section .fixup,"ax" |
kaf24@1710 | 143 multicall_fixup6: |
kaf24@1710 | 144 addl $4,%esp |
kaf24@1710 | 145 multicall_fixup5: |
kaf24@1710 | 146 addl $4,%esp |
kaf24@1710 | 147 multicall_fixup4: |
kaf24@1710 | 148 addl $4,%esp |
kaf24@1710 | 149 multicall_fixup3: |
kaf24@1710 | 150 addl $4,%esp |
kaf24@1710 | 151 multicall_fixup2: |
kaf24@1710 | 152 addl $4,%esp |
kaf24@1710 | 153 multicall_fixup1: |
kaf24@1710 | 154 addl $4,%esp |
kaf24@1710 | 155 popl %ebx |
kaf24@1710 | 156 movl $-EFAULT,%eax |
kaf24@1710 | 157 jmp ret_from_hypercall |
kaf24@1710 | 158 .previous |
kaf24@1710 | 159 |
kaf24@1710 | 160 ALIGN |
kaf24@1710 | 161 restore_all_guest: |
kaf24@2954 | 162 1: movl XREGS_ds(%esp),%ds |
kaf24@2954 | 163 2: movl XREGS_es(%esp),%es |
kaf24@2954 | 164 3: movl XREGS_fs(%esp),%fs |
kaf24@2954 | 165 4: movl XREGS_gs(%esp),%gs |
kaf24@1710 | 166 popl %ebx |
kaf24@1710 | 167 popl %ecx |
kaf24@1710 | 168 popl %edx |
kaf24@1710 | 169 popl %esi |
kaf24@1710 | 170 popl %edi |
kaf24@1710 | 171 popl %ebp |
kaf24@1710 | 172 popl %eax |
kaf24@1710 | 173 addl $4,%esp |
kaf24@1710 | 174 5: iret |
kaf24@1710 | 175 .section .fixup,"ax" |
kaf24@2954 | 176 6: subl $4,%esp |
kaf24@2954 | 177 pushl %eax |
kaf24@1710 | 178 pushl %ebp |
kaf24@1710 | 179 pushl %edi |
kaf24@1710 | 180 pushl %esi |
kaf24@1710 | 181 pushl %edx |
kaf24@1710 | 182 pushl %ecx |
kaf24@1710 | 183 pushl %ebx |
kaf24@2955 | 184 7: SET_XEN_SEGMENTS(a) |
kaf24@2954 | 185 jmp failsafe_callback |
kaf24@1710 | 186 .previous |
kaf24@1710 | 187 .section __ex_table,"a" |
kaf24@1710 | 188 .align 4 |
kaf24@2954 | 189 .long 1b,7b |
kaf24@1710 | 190 .long 2b,7b |
kaf24@2954 | 191 .long 3b,7b |
kaf24@2954 | 192 .long 4b,7b |
kaf24@2954 | 193 .long 5b,6b |
kaf24@1710 | 194 .previous |
kaf24@1710 | 195 |
kaf24@1710 | 196 /* No special register assumptions */ |
kaf24@1710 | 197 failsafe_callback: |
kaf24@1710 | 198 GET_CURRENT(%ebx) |
kaf24@3081 | 199 leal DOMAIN_trap_bounce(%ebx),%edx |
kaf24@2954 | 200 movl DOMAIN_failsafe_addr(%ebx),%eax |
kaf24@3081 | 201 movl %eax,TRAPBOUNCE_eip(%edx) |
kaf24@2954 | 202 movl DOMAIN_failsafe_sel(%ebx),%eax |
kaf24@3081 | 203 movw %ax,TRAPBOUNCE_cs(%edx) |
kaf24@1710 | 204 call create_bounce_frame |
kaf24@1710 | 205 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame |
kaf24@2954 | 206 movl XREGS_ds(%esp),%eax |
kaf24@2954 | 207 FAULT1: movl %eax,%gs:(%esi) |
kaf24@2954 | 208 movl XREGS_es(%esp),%eax |
kaf24@2954 | 209 FAULT2: movl %eax,%gs:4(%esi) |
kaf24@2954 | 210 movl XREGS_fs(%esp),%eax |
kaf24@2954 | 211 FAULT3: movl %eax,%gs:8(%esi) |
kaf24@2954 | 212 movl XREGS_gs(%esp),%eax |
kaf24@2954 | 213 FAULT4: movl %eax,%gs:12(%esi) |
kaf24@2954 | 214 movl %esi,XREGS_esp(%esp) |
kaf24@1710 | 215 popl %ebx |
kaf24@1710 | 216 popl %ecx |
kaf24@1710 | 217 popl %edx |
kaf24@1710 | 218 popl %esi |
kaf24@1710 | 219 popl %edi |
kaf24@1710 | 220 popl %ebp |
kaf24@1710 | 221 popl %eax |
kaf24@2954 | 222 addl $4,%esp |
kaf24@1710 | 223 FAULT5: iret |
kaf24@1710 | 224 |
kaf24@1710 | 225 ALIGN |
kaf24@1710 | 226 restore_all_xen: |
kaf24@1710 | 227 popl %ebx |
kaf24@1710 | 228 popl %ecx |
kaf24@1710 | 229 popl %edx |
kaf24@1710 | 230 popl %esi |
kaf24@1710 | 231 popl %edi |
kaf24@1710 | 232 popl %ebp |
kaf24@1710 | 233 popl %eax |
kaf24@1710 | 234 addl $4,%esp |
kaf24@1710 | 235 iret |
kaf24@1710 | 236 |
kaf24@1710 | 237 ALIGN |
kaf24@1710 | 238 ENTRY(hypercall) |
kaf24@1710 | 239 pushl %eax # save orig_eax |
kaf24@2955 | 240 SAVE_ALL(b) |
kaf24@2954 | 241 sti |
kaf24@2954 | 242 GET_CURRENT(%ebx) |
kaf24@1710 | 243 andl $(NR_hypercalls-1),%eax |
kaf24@1710 | 244 call *SYMBOL_NAME(hypercall_table)(,%eax,4) |
kaf24@1710 | 245 |
kaf24@1710 | 246 ret_from_hypercall: |
kaf24@2954 | 247 movl %eax,XREGS_eax(%esp) # save the return value |
kaf24@1710 | 248 |
kaf24@1710 | 249 test_all_events: |
kaf24@1710 | 250 xorl %ecx,%ecx |
kaf24@1710 | 251 notl %ecx |
kaf24@1710 | 252 cli # tests must not race interrupts |
kaf24@1710 | 253 /*test_softirqs:*/ |
kaf24@2954 | 254 movl DOMAIN_processor(%ebx),%eax |
kaf24@1710 | 255 shl $6,%eax # sizeof(irq_cpustat) == 64 |
kaf24@1710 | 256 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1) |
kaf24@1710 | 257 jnz process_softirqs |
kaf24@1710 | 258 /*test_guest_events:*/ |
kaf24@2954 | 259 movl DOMAIN_shared_info(%ebx),%eax |
kaf24@2954 | 260 testb $0xFF,SHINFO_upcall_mask(%eax) |
kaf24@1710 | 261 jnz restore_all_guest |
kaf24@2954 | 262 testb $0xFF,SHINFO_upcall_pending(%eax) |
kaf24@1710 | 263 jz restore_all_guest |
kaf24@2954 | 264 movb $1,SHINFO_upcall_mask(%eax) # Upcalls are masked during delivery |
kaf24@1710 | 265 /*process_guest_events:*/ |
kaf24@3081 | 266 leal DOMAIN_trap_bounce(%ebx),%edx |
kaf24@2954 | 267 movl DOMAIN_event_addr(%ebx),%eax |
kaf24@3081 | 268 movl %eax,TRAPBOUNCE_eip(%edx) |
kaf24@2954 | 269 movl DOMAIN_event_sel(%ebx),%eax |
kaf24@3081 | 270 movw %ax,TRAPBOUNCE_cs(%edx) |
kaf24@1710 | 271 call create_bounce_frame |
kaf24@1710 | 272 jmp restore_all_guest |
kaf24@1710 | 273 |
kaf24@1710 | 274 ALIGN |
kaf24@1710 | 275 process_softirqs: |
kaf24@1710 | 276 sti |
kaf24@1710 | 277 call SYMBOL_NAME(do_softirq) |
kaf24@1710 | 278 jmp test_all_events |
kaf24@1710 | 279 |
kaf24@2954 | 280 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */ |
kaf24@2954 | 281 /* {EIP, CS, EFLAGS, [ESP, SS]} */ |
kaf24@3081 | 282 /* %edx == trap_bounce, %ebx == task_struct */ |
kaf24@2954 | 283 /* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */ |
kaf24@1710 | 284 create_bounce_frame: |
kaf24@2954 | 285 mov XREGS_cs+4(%esp),%cl |
kaf24@1710 | 286 test $2,%cl |
kaf24@1710 | 287 jz 1f /* jump if returning to an existing ring-1 activation */ |
kaf24@1710 | 288 /* obtain ss/esp from TSS -- no current ring-1 activations */ |
kaf24@2954 | 289 movl DOMAIN_processor(%ebx),%eax |
kaf24@1710 | 290 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */ |
kaf24@1710 | 291 movl %eax, %ecx |
kaf24@1710 | 292 shll $7, %ecx |
kaf24@1710 | 293 shll $13, %eax |
kaf24@1710 | 294 addl %ecx,%eax |
kaf24@1710 | 295 addl $init_tss + 12,%eax |
kaf24@1710 | 296 movl (%eax),%esi /* tss->esp1 */ |
kaf24@2954 | 297 FAULT6: movl 4(%eax),%gs /* tss->ss1 */ |
kaf24@1710 | 298 /* base of stack frame must contain ss/esp (inter-priv iret) */ |
kaf24@1710 | 299 subl $8,%esi |
kaf24@2954 | 300 movl XREGS_esp+4(%esp),%eax |
kaf24@2954 | 301 FAULT7: movl %eax,%gs:(%esi) |
kaf24@2954 | 302 movl XREGS_ss+4(%esp),%eax |
kaf24@2954 | 303 FAULT8: movl %eax,%gs:4(%esi) |
kaf24@1710 | 304 jmp 2f |
kaf24@1710 | 305 1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */ |
kaf24@2954 | 306 movl XREGS_esp+4(%esp),%esi |
kaf24@2954 | 307 FAULT9: movl XREGS_ss+4(%esp),%gs |
kaf24@1710 | 308 2: /* Construct a stack frame: EFLAGS, CS/EIP */ |
kaf24@1710 | 309 subl $12,%esi |
kaf24@2954 | 310 movl XREGS_eip+4(%esp),%eax |
kaf24@2954 | 311 FAULT10:movl %eax,%gs:(%esi) |
kaf24@2954 | 312 movl XREGS_cs+4(%esp),%eax |
kaf24@2954 | 313 FAULT11:movl %eax,%gs:4(%esi) |
kaf24@2954 | 314 movl XREGS_eflags+4(%esp),%eax |
kaf24@2954 | 315 FAULT12:movl %eax,%gs:8(%esi) |
kaf24@1710 | 316 /* Rewrite our stack frame and return to ring 1. */ |
kaf24@1710 | 317 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */ |
kaf24@1710 | 318 andl $0xfffcbeff,%eax |
kaf24@2954 | 319 movl %eax,XREGS_eflags+4(%esp) |
kaf24@2954 | 320 movl %gs,XREGS_ss+4(%esp) |
kaf24@2954 | 321 movl %esi,XREGS_esp+4(%esp) |
kaf24@3081 | 322 movzwl TRAPBOUNCE_cs(%edx),%eax |
kaf24@2954 | 323 movl %eax,XREGS_cs+4(%esp) |
kaf24@3081 | 324 movl TRAPBOUNCE_eip(%edx),%eax |
kaf24@2954 | 325 movl %eax,XREGS_eip+4(%esp) |
kaf24@1710 | 326 ret |
kaf24@2954 | 327 |
kaf24@1710 | 328 .section __ex_table,"a" |
kaf24@1710 | 329 .align 4 |
kaf24@1710 | 330 .long FAULT1, crash_domain_fixup3 # Fault writing to ring-1 stack |
kaf24@1710 | 331 .long FAULT2, crash_domain_fixup3 # Fault writing to ring-1 stack |
kaf24@1710 | 332 .long FAULT3, crash_domain_fixup3 # Fault writing to ring-1 stack |
kaf24@1710 | 333 .long FAULT4, crash_domain_fixup3 # Fault writing to ring-1 stack |
kaf24@1710 | 334 .long FAULT5, crash_domain_fixup1 # Fault executing failsafe iret |
kaf24@1710 | 335 .long FAULT6, crash_domain_fixup2 # Fault loading ring-1 stack selector |
kaf24@1710 | 336 .long FAULT7, crash_domain_fixup2 # Fault writing to ring-1 stack |
kaf24@1710 | 337 .long FAULT8, crash_domain_fixup2 # Fault writing to ring-1 stack |
kaf24@1710 | 338 .long FAULT9, crash_domain_fixup2 # Fault loading ring-1 stack selector |
kaf24@1710 | 339 .long FAULT10,crash_domain_fixup2 # Fault writing to ring-1 stack |
kaf24@1710 | 340 .long FAULT11,crash_domain_fixup2 # Fault writing to ring-1 stack |
kaf24@1710 | 341 .long FAULT12,crash_domain_fixup2 # Fault writing to ring-1 stack |
kaf24@1710 | 342 .long FAULT13,crash_domain_fixup3 # Fault writing to ring-1 stack |
kaf24@1710 | 343 .long FAULT14,crash_domain_fixup3 # Fault writing to ring-1 stack |
kaf24@1710 | 344 .previous |
kaf24@1710 | 345 |
kaf24@1710 | 346 # This handler kills domains which experience unrecoverable faults. |
kaf24@1710 | 347 .section .fixup,"ax" |
kaf24@1710 | 348 crash_domain_fixup1: |
kaf24@1710 | 349 subl $4,%esp |
kaf24@2955 | 350 SAVE_ALL(a) |
kaf24@2954 | 351 sti |
kaf24@1710 | 352 jmp domain_crash |
kaf24@1710 | 353 crash_domain_fixup2: |
kaf24@1710 | 354 addl $4,%esp |
kaf24@1710 | 355 crash_domain_fixup3: |
kaf24@1710 | 356 jmp domain_crash |
kaf24@1710 | 357 .previous |
kaf24@1710 | 358 |
kaf24@1710 | 359 ALIGN |
kaf24@1710 | 360 process_guest_exception_and_events: |
kaf24@3081 | 361 leal DOMAIN_trap_bounce(%ebx),%edx |
kaf24@3081 | 362 testb $~0,TRAPBOUNCE_flags(%edx) |
kaf24@1710 | 363 jz test_all_events |
kaf24@1710 | 364 call create_bounce_frame # just the basic frame |
kaf24@3081 | 365 mov TRAPBOUNCE_flags(%edx),%cl |
kaf24@3081 | 366 test $TBF_TRAP_NOCODE,%cl |
kaf24@1710 | 367 jnz 2f |
kaf24@1710 | 368 subl $4,%esi # push error_code onto guest frame |
kaf24@3081 | 369 movl TRAPBOUNCE_error_code(%edx),%eax |
kaf24@2954 | 370 FAULT13:movl %eax,%gs:(%esi) |
kaf24@3081 | 371 test $TBF_TRAP_CR2,%cl |
kaf24@1710 | 372 jz 1f |
kaf24@1710 | 373 subl $4,%esi # push %cr2 onto guest frame |
kaf24@3081 | 374 movl TRAPBOUNCE_cr2(%edx),%eax |
kaf24@2954 | 375 FAULT14:movl %eax,%gs:(%esi) |
kaf24@2954 | 376 1: movl %esi,XREGS_esp(%esp) |
kaf24@3081 | 377 2: movb $0,TRAPBOUNCE_flags(%edx) |
kaf24@1710 | 378 jmp test_all_events |
kaf24@1710 | 379 |
kaf24@1710 | 380 ALIGN |
kaf24@1710 | 381 ENTRY(ret_from_intr) |
kaf24@1710 | 382 GET_CURRENT(%ebx) |
kaf24@2954 | 383 movb XREGS_cs(%esp),%al |
kaf24@1710 | 384 testb $3,%al # return to non-supervisor? |
kaf24@1710 | 385 jne test_all_events |
kaf24@1710 | 386 jmp restore_all_xen |
kaf24@1710 | 387 |
kaf24@1710 | 388 ENTRY(divide_error) |
kaf24@1710 | 389 pushl $0 # no error code |
kaf24@1710 | 390 pushl $ SYMBOL_NAME(do_divide_error) |
kaf24@1710 | 391 ALIGN |
kaf24@1710 | 392 error_code: |
kaf24@2954 | 393 cld |
kaf24@2954 | 394 pushl %ebp |
kaf24@1710 | 395 pushl %edi |
kaf24@1710 | 396 pushl %esi |
kaf24@1710 | 397 pushl %edx |
kaf24@1710 | 398 pushl %ecx |
kaf24@1710 | 399 pushl %ebx |
kaf24@2954 | 400 movb XREGS_cs(%esp),%bl |
kaf24@2954 | 401 testb $3,%bl |
kaf24@2954 | 402 je 1f |
kaf24@2954 | 403 movl %ds,XREGS_ds(%esp) |
kaf24@2954 | 404 movl %es,XREGS_es(%esp) |
kaf24@2954 | 405 movl %fs,XREGS_fs(%esp) |
kaf24@2954 | 406 movl %gs,XREGS_gs(%esp) |
kaf24@2955 | 407 1: SET_XEN_SEGMENTS(b) |
kaf24@2954 | 408 movl XREGS_orig_eax(%esp),%esi # get the error code |
kaf24@2954 | 409 movl XREGS_eax(%esp),%edi # get the function address |
kaf24@2954 | 410 movl %eax,XREGS_eax(%esp) |
kaf24@1710 | 411 movl %esp,%edx |
kaf24@1710 | 412 pushl %esi # push the error code |
ach61@2843 | 413 pushl %edx # push the xen_regs pointer |
kaf24@1710 | 414 GET_CURRENT(%ebx) |
kaf24@2954 | 415 call *%edi |
kaf24@1710 | 416 addl $8,%esp |
kaf24@2954 | 417 movb XREGS_cs(%esp),%al |
kaf24@1710 | 418 testb $3,%al |
kaf24@1710 | 419 je restore_all_xen |
kaf24@1710 | 420 jmp process_guest_exception_and_events |
kaf24@1710 | 421 |
kaf24@1710 | 422 ENTRY(coprocessor_error) |
kaf24@1710 | 423 pushl $0 |
kaf24@1710 | 424 pushl $ SYMBOL_NAME(do_coprocessor_error) |
kaf24@1710 | 425 jmp error_code |
kaf24@1710 | 426 |
kaf24@1710 | 427 ENTRY(simd_coprocessor_error) |
kaf24@1710 | 428 pushl $0 |
kaf24@1710 | 429 pushl $ SYMBOL_NAME(do_simd_coprocessor_error) |
kaf24@1710 | 430 jmp error_code |
kaf24@1710 | 431 |
kaf24@1710 | 432 ENTRY(device_not_available) |
kaf24@1710 | 433 pushl $0 |
kaf24@1710 | 434 pushl $SYMBOL_NAME(math_state_restore) |
kaf24@1710 | 435 jmp error_code |
kaf24@1710 | 436 |
kaf24@1710 | 437 ENTRY(debug) |
kaf24@1710 | 438 pushl $0 |
kaf24@1710 | 439 pushl $ SYMBOL_NAME(do_debug) |
kaf24@1710 | 440 jmp error_code |
kaf24@1710 | 441 |
kaf24@1710 | 442 ENTRY(int3) |
kaf24@1710 | 443 pushl $0 |
kaf24@1710 | 444 pushl $ SYMBOL_NAME(do_int3) |
kaf24@1710 | 445 jmp error_code |
kaf24@1710 | 446 |
kaf24@1710 | 447 ENTRY(overflow) |
kaf24@1710 | 448 pushl $0 |
kaf24@1710 | 449 pushl $ SYMBOL_NAME(do_overflow) |
kaf24@1710 | 450 jmp error_code |
kaf24@1710 | 451 |
kaf24@1710 | 452 ENTRY(bounds) |
kaf24@1710 | 453 pushl $0 |
kaf24@1710 | 454 pushl $ SYMBOL_NAME(do_bounds) |
kaf24@1710 | 455 jmp error_code |
kaf24@1710 | 456 |
kaf24@1710 | 457 ENTRY(invalid_op) |
kaf24@1710 | 458 pushl $0 |
kaf24@1710 | 459 pushl $ SYMBOL_NAME(do_invalid_op) |
kaf24@1710 | 460 jmp error_code |
kaf24@1710 | 461 |
kaf24@1710 | 462 ENTRY(coprocessor_segment_overrun) |
kaf24@1710 | 463 pushl $0 |
kaf24@1710 | 464 pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun) |
kaf24@1710 | 465 jmp error_code |
kaf24@1710 | 466 |
kaf24@1710 | 467 ENTRY(invalid_TSS) |
kaf24@1710 | 468 pushl $ SYMBOL_NAME(do_invalid_TSS) |
kaf24@1710 | 469 jmp error_code |
kaf24@1710 | 470 |
kaf24@1710 | 471 ENTRY(segment_not_present) |
kaf24@1710 | 472 pushl $ SYMBOL_NAME(do_segment_not_present) |
kaf24@1710 | 473 jmp error_code |
kaf24@1710 | 474 |
kaf24@1710 | 475 ENTRY(stack_segment) |
kaf24@1710 | 476 pushl $ SYMBOL_NAME(do_stack_segment) |
kaf24@1710 | 477 jmp error_code |
kaf24@1710 | 478 |
kaf24@1710 | 479 ENTRY(general_protection) |
kaf24@1710 | 480 pushl $ SYMBOL_NAME(do_general_protection) |
kaf24@1710 | 481 jmp error_code |
kaf24@1710 | 482 |
kaf24@1710 | 483 ENTRY(alignment_check) |
kaf24@1710 | 484 pushl $ SYMBOL_NAME(do_alignment_check) |
kaf24@1710 | 485 jmp error_code |
kaf24@1710 | 486 |
kaf24@1710 | 487 ENTRY(page_fault) |
kaf24@1710 | 488 pushl $ SYMBOL_NAME(do_page_fault) |
kaf24@1710 | 489 jmp error_code |
kaf24@1710 | 490 |
kaf24@1710 | 491 ENTRY(machine_check) |
kaf24@1710 | 492 pushl $0 |
kaf24@1710 | 493 pushl $ SYMBOL_NAME(do_machine_check) |
kaf24@1710 | 494 jmp error_code |
kaf24@1710 | 495 |
kaf24@1710 | 496 ENTRY(spurious_interrupt_bug) |
kaf24@1710 | 497 pushl $0 |
kaf24@1710 | 498 pushl $ SYMBOL_NAME(do_spurious_interrupt_bug) |
kaf24@1710 | 499 jmp error_code |
kaf24@1710 | 500 |
kaf24@1710 | 501 ENTRY(nmi) |
kaf24@1710 | 502 # Save state but do not trash the segment registers! |
kaf24@1710 | 503 # We may otherwise be unable to reload them or copy them to ring 1. |
kaf24@1710 | 504 pushl %eax |
kaf24@2955 | 505 SAVE_ALL_NOSEGREGS(a) |
kaf24@1710 | 506 |
kaf24@2085 | 507 # Check for hardware problems. |
kaf24@1710 | 508 inb $0x61,%al |
kaf24@1710 | 509 testb $0x80,%al |
kaf24@2080 | 510 jne nmi_parity_err |
kaf24@1710 | 511 testb $0x40,%al |
kaf24@1710 | 512 jne nmi_io_err |
kaf24@1710 | 513 movl %eax,%ebx |
kaf24@1710 | 514 |
kaf24@1710 | 515 # Okay, its almost a normal NMI tick. We can only process it if: |
kaf24@1710 | 516 # A. We are the outermost Xen activation (in which case we have |
kaf24@1710 | 517 # the selectors safely saved on our stack) |
kaf24@1710 | 518 # B. DS-GS all contain sane Xen values. |
kaf24@1710 | 519 # In all other cases we bail without touching DS-GS, as we have |
kaf24@1710 | 520 # interrupted an enclosing Xen activation in tricky prologue or |
kaf24@1710 | 521 # epilogue code. |
kaf24@2954 | 522 movb XREGS_cs(%esp),%al |
kaf24@1710 | 523 testb $3,%al |
kaf24@1710 | 524 jne do_watchdog_tick |
kaf24@2954 | 525 movl XREGS_ds(%esp),%eax |
kaf24@1710 | 526 cmpw $(__HYPERVISOR_DS),%ax |
kaf24@2954 | 527 jne restore_all_xen |
kaf24@2954 | 528 movl XREGS_es(%esp),%eax |
kaf24@1710 | 529 cmpw $(__HYPERVISOR_DS),%ax |
kaf24@2954 | 530 jne restore_all_xen |
kaf24@2954 | 531 movl XREGS_fs(%esp),%eax |
kaf24@1710 | 532 cmpw $(__HYPERVISOR_DS),%ax |
kaf24@2954 | 533 jne restore_all_xen |
kaf24@2954 | 534 movl XREGS_gs(%esp),%eax |
kaf24@1710 | 535 cmpw $(__HYPERVISOR_DS),%ax |
kaf24@2954 | 536 jne restore_all_xen |
kaf24@1710 | 537 |
kaf24@1710 | 538 do_watchdog_tick: |
kaf24@1710 | 539 movl $(__HYPERVISOR_DS),%edx |
kaf24@1710 | 540 movl %edx,%ds |
kaf24@1710 | 541 movl %edx,%es |
kaf24@1710 | 542 movl %esp,%edx |
kaf24@1710 | 543 pushl %ebx # reason |
kaf24@1710 | 544 pushl %edx # regs |
kaf24@1710 | 545 call SYMBOL_NAME(do_nmi) |
kaf24@1710 | 546 addl $8,%esp |
kaf24@2954 | 547 movb XREGS_cs(%esp),%al |
kaf24@1710 | 548 testb $3,%al |
kaf24@1710 | 549 je restore_all_xen |
kaf24@1710 | 550 GET_CURRENT(%ebx) |
kaf24@1710 | 551 jmp restore_all_guest |
kaf24@1710 | 552 |
kaf24@2085 | 553 nmi_parity_err: |
kaf24@2085 | 554 # Clear and disable the parity-error line |
kaf24@2085 | 555 andb $0xf,%al |
kaf24@2085 | 556 orb $0x4,%al |
kaf24@2085 | 557 outb %al,$0x61 |
kaf24@2085 | 558 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore |
kaf24@2954 | 559 je restore_all_xen |
kaf24@2085 | 560 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason) |
kaf24@2085 | 561 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat) |
kaf24@2085 | 562 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0 |
kaf24@2954 | 563 je restore_all_xen |
kaf24@2085 | 564 movl $(__HYPERVISOR_DS),%edx # nmi=fatal |
kaf24@1710 | 565 movl %edx,%ds |
kaf24@1710 | 566 movl %edx,%es |
kaf24@2079 | 567 movl %esp,%edx |
kaf24@2079 | 568 push %edx |
kaf24@2079 | 569 call SYMBOL_NAME(mem_parity_error) |
kaf24@2085 | 570 addl $4,%esp |
kaf24@2085 | 571 jmp ret_from_intr |
kaf24@2085 | 572 |
kaf24@1710 | 573 nmi_io_err: |
kaf24@2085 | 574 # Clear and disable the I/O-error line |
kaf24@2085 | 575 andb $0xf,%al |
kaf24@2085 | 576 orb $0x8,%al |
kaf24@2085 | 577 outb %al,$0x61 |
kaf24@2085 | 578 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore |
kaf24@2954 | 579 je restore_all_xen |
kaf24@2085 | 580 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason) |
kaf24@2085 | 581 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat) |
kaf24@2085 | 582 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0 |
kaf24@2954 | 583 je restore_all_xen |
kaf24@2085 | 584 movl $(__HYPERVISOR_DS),%edx # nmi=fatal |
kaf24@1710 | 585 movl %edx,%ds |
kaf24@1710 | 586 movl %edx,%es |
kaf24@2079 | 587 movl %esp,%edx |
kaf24@2079 | 588 push %edx |
kaf24@2079 | 589 call SYMBOL_NAME(io_check_error) |
kaf24@2085 | 590 addl $4,%esp |
kaf24@2085 | 591 jmp ret_from_intr |
kaf24@2079 | 592 |
kaf24@1710 | 593 .data |
kaf24@1710 | 594 ENTRY(hypercall_table) |
kaf24@1710 | 595 .long SYMBOL_NAME(do_set_trap_table) /* 0 */ |
kaf24@1710 | 596 .long SYMBOL_NAME(do_mmu_update) |
kaf24@1710 | 597 .long SYMBOL_NAME(do_set_gdt) |
kaf24@1710 | 598 .long SYMBOL_NAME(do_stack_switch) |
kaf24@1710 | 599 .long SYMBOL_NAME(do_set_callbacks) |
kaf24@1710 | 600 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */ |
kaf24@1710 | 601 .long SYMBOL_NAME(do_sched_op) |
kaf24@1710 | 602 .long SYMBOL_NAME(do_dom0_op) |
kaf24@1710 | 603 .long SYMBOL_NAME(do_set_debugreg) |
kaf24@1710 | 604 .long SYMBOL_NAME(do_get_debugreg) |
kaf24@1710 | 605 .long SYMBOL_NAME(do_update_descriptor) /* 10 */ |
kaf24@1710 | 606 .long SYMBOL_NAME(do_set_fast_trap) |
kaf24@1710 | 607 .long SYMBOL_NAME(do_dom_mem_op) |
kaf24@1710 | 608 .long SYMBOL_NAME(do_multicall) |
kaf24@1710 | 609 .long SYMBOL_NAME(do_update_va_mapping) |
kaf24@1710 | 610 .long SYMBOL_NAME(do_set_timer_op) /* 15 */ |
kaf24@1710 | 611 .long SYMBOL_NAME(do_event_channel_op) |
kaf24@1710 | 612 .long SYMBOL_NAME(do_xen_version) |
kaf24@1710 | 613 .long SYMBOL_NAME(do_console_io) |
kaf24@1710 | 614 .long SYMBOL_NAME(do_physdev_op) |
kaf24@2375 | 615 .long SYMBOL_NAME(do_grant_table_op) /* 20 */ |
kaf24@2111 | 616 .long SYMBOL_NAME(do_vm_assist) |
kaf24@2375 | 617 .long SYMBOL_NAME(do_update_va_mapping_otherdomain) |
kaf24@1710 | 618 .rept NR_hypercalls-((.-hypercall_table)/4) |
kaf24@1710 | 619 .long SYMBOL_NAME(do_ni_hypercall) |
kaf24@1710 | 620 .endr |