debuggers.hg
annotate xen/arch/x86/x86_32/entry.S @ 2570:c627fa2b0dc3
bitkeeper revision 1.1159.84.2 (4153f09dU0R_tGAbFp1ucJ_ns3SaXw)
Add missing fixup entry.
Add missing fixup entry.
author | cl349@freefall.cl.cam.ac.uk |
---|---|
date | Fri Sep 24 10:02:05 2004 +0000 (2004-09-24) |
parents | 7ed44d755dda |
children | 7d618b439da4 |
rev | line source |
---|---|
kaf24@1710 | 1 /* |
kaf24@1710 | 2 * Hypercall and fault low-level handling routines. |
kaf24@1710 | 3 * |
kaf24@1710 | 4 * Copyright (c) 2002-2004, K A Fraser |
kaf24@1710 | 5 * Copyright (c) 1991, 1992 Linus Torvalds |
kaf24@1710 | 6 */ |
kaf24@1710 | 7 |
kaf24@1710 | 8 /* |
kaf24@1710 | 9 * The idea for callbacks to guest OSes |
kaf24@1710 | 10 * ==================================== |
kaf24@1710 | 11 * |
kaf24@1710 | 12 * First, we require that all callbacks (either via a supplied |
kaf24@1710 | 13 * interrupt-descriptor-table, or via the special event or failsafe callbacks |
kaf24@1710 | 14 * in the shared-info-structure) are to ring 1. This just makes life easier, |
kaf24@1710 | 15 * in that it means we don't have to do messy GDT/LDT lookups to find |
kaf24@1710 | 16 * out which the privilege-level of the return code-selector. That code |
kaf24@1710 | 17 * would just be a hassle to write, and would need to account for running |
kaf24@1710 | 18 * off the end of the GDT/LDT, for example. For all callbacks we check |
kaf24@1710 | 19 * that the provided |
kaf24@1710 | 20 * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as |
kaf24@1710 | 21 * don't allow a guest OS to install ring-0 privileges into the GDT/LDT. |
kaf24@1710 | 22 * It's up to the guest OS to ensure all returns via the IDT are to ring 1. |
kaf24@1710 | 23 * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather |
kaf24@1710 | 24 * than the correct ring) and bad things are bound to ensue -- IRET is |
kaf24@1710 | 25 * likely to fault, and we may end up killing the domain (no harm can |
kaf24@1710 | 26 * come to Xen, though). |
kaf24@1710 | 27 * |
kaf24@1710 | 28 * When doing a callback, we check if the return CS is in ring 0. If so, |
kaf24@1710 | 29 * callback is delayed until next return to ring != 0. |
kaf24@1710 | 30 * If return CS is in ring 1, then we create a callback frame |
kaf24@1710 | 31 * starting at return SS/ESP. The base of the frame does an intra-privilege |
kaf24@1710 | 32 * interrupt-return. |
kaf24@1710 | 33 * If return CS is in ring > 1, we create a callback frame starting |
kaf24@1710 | 34 * at SS/ESP taken from appropriate section of the current TSS. The base |
kaf24@1710 | 35 * of the frame does an inter-privilege interrupt-return. |
kaf24@1710 | 36 * |
kaf24@1710 | 37 * Note that the "failsafe callback" uses a special stackframe: |
kaf24@1710 | 38 * { return_DS, return_ES, return_FS, return_GS, return_EIP, |
kaf24@1710 | 39 * return_CS, return_EFLAGS[, return_ESP, return_SS] } |
kaf24@1710 | 40 * That is, original values for DS/ES/FS/GS are placed on stack rather than |
kaf24@1710 | 41 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them |
kaf24@1710 | 42 * saved/restored in guest OS. Furthermore, if we load them we may cause |
kaf24@1710 | 43 * a fault if they are invalid, which is a hassle to deal with. We avoid |
kaf24@1710 | 44 * that problem if we don't load them :-) This property allows us to use |
kaf24@1710 | 45 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS |
kaf24@1710 | 46 * on return to ring != 0, we can simply package it up as a return via |
kaf24@1710 | 47 * the failsafe callback, and let the guest OS sort it out (perhaps by |
kaf24@1710 | 48 * killing an application process). Note that we also do this for any |
kaf24@1710 | 49 * faulting IRET -- just let the guest OS handle it via the event |
kaf24@1710 | 50 * callback. |
kaf24@1710 | 51 * |
kaf24@1710 | 52 * We terminate a domain in the following cases: |
kaf24@1710 | 53 * - creating a callback stack frame (due to bad ring-1 stack). |
kaf24@1710 | 54 * - faulting IRET on entry to failsafe callback handler. |
kaf24@1710 | 55 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback |
kaf24@1710 | 56 * handler in good order (absolutely no faults allowed!). |
kaf24@1710 | 57 */ |
kaf24@1710 | 58 |
kaf24@1710 | 59 #include <xen/config.h> |
kaf24@1710 | 60 #include <xen/errno.h> |
kaf24@2085 | 61 #include <xen/softirq.h> |
kaf24@1710 | 62 #include <hypervisor-ifs/hypervisor-if.h> |
kaf24@1710 | 63 |
kaf24@1710 | 64 EBX = 0x00 |
kaf24@1710 | 65 ECX = 0x04 |
kaf24@1710 | 66 EDX = 0x08 |
kaf24@1710 | 67 ESI = 0x0C |
kaf24@1710 | 68 EDI = 0x10 |
kaf24@1710 | 69 EBP = 0x14 |
kaf24@1710 | 70 EAX = 0x18 |
kaf24@1710 | 71 DS = 0x1C |
kaf24@1710 | 72 ES = 0x20 |
kaf24@1710 | 73 FS = 0x24 |
kaf24@1710 | 74 GS = 0x28 |
kaf24@1710 | 75 ORIG_EAX = 0x2C |
kaf24@1710 | 76 EIP = 0x30 |
kaf24@1710 | 77 CS = 0x34 |
kaf24@1710 | 78 EFLAGS = 0x38 |
kaf24@1710 | 79 OLDESP = 0x3C |
kaf24@1710 | 80 OLDSS = 0x40 |
kaf24@1710 | 81 |
kaf24@1710 | 82 /* Offsets in domain structure */ |
kaf24@1710 | 83 PROCESSOR = 0 |
kaf24@1710 | 84 SHARED_INFO = 4 |
kaf24@1710 | 85 EVENT_SEL = 8 |
kaf24@1710 | 86 EVENT_ADDR = 12 |
kaf24@1710 | 87 FAILSAFE_BUFFER = 16 |
kaf24@1710 | 88 FAILSAFE_SEL = 32 |
kaf24@1710 | 89 FAILSAFE_ADDR = 36 |
kaf24@1710 | 90 |
kaf24@1710 | 91 /* Offsets in shared_info_t */ |
kaf24@1710 | 92 #define UPCALL_PENDING /* 0 */ |
kaf24@1710 | 93 #define UPCALL_MASK 1 |
kaf24@1710 | 94 |
kaf24@1710 | 95 /* Offsets in guest_trap_bounce */ |
kaf24@1710 | 96 GTB_ERROR_CODE = 0 |
kaf24@1710 | 97 GTB_CR2 = 4 |
kaf24@1710 | 98 GTB_FLAGS = 8 |
kaf24@1710 | 99 GTB_CS = 10 |
kaf24@1710 | 100 GTB_EIP = 12 |
kaf24@1710 | 101 GTBF_TRAP = 1 |
kaf24@1710 | 102 GTBF_TRAP_NOCODE = 2 |
kaf24@1710 | 103 GTBF_TRAP_CR2 = 4 |
kaf24@1710 | 104 |
kaf24@1710 | 105 CF_MASK = 0x00000001 |
kaf24@1710 | 106 IF_MASK = 0x00000200 |
kaf24@1710 | 107 NT_MASK = 0x00004000 |
kaf24@1710 | 108 |
kaf24@1710 | 109 #define SAVE_ALL_NOSEGREGS \ |
kaf24@1710 | 110 cld; \ |
kaf24@1710 | 111 pushl %gs; \ |
kaf24@1710 | 112 pushl %fs; \ |
kaf24@1710 | 113 pushl %es; \ |
kaf24@1710 | 114 pushl %ds; \ |
kaf24@1710 | 115 pushl %eax; \ |
kaf24@1710 | 116 pushl %ebp; \ |
kaf24@1710 | 117 pushl %edi; \ |
kaf24@1710 | 118 pushl %esi; \ |
kaf24@1710 | 119 pushl %edx; \ |
kaf24@1710 | 120 pushl %ecx; \ |
kaf24@1710 | 121 pushl %ebx; \ |
kaf24@1710 | 122 |
kaf24@1710 | 123 #define SAVE_ALL \ |
kaf24@1710 | 124 SAVE_ALL_NOSEGREGS \ |
kaf24@1710 | 125 movl $(__HYPERVISOR_DS),%edx; \ |
kaf24@1710 | 126 movl %edx,%ds; \ |
kaf24@1710 | 127 movl %edx,%es; \ |
kaf24@1710 | 128 movl %edx,%fs; \ |
kaf24@1710 | 129 movl %edx,%gs; \ |
kaf24@1710 | 130 sti; |
kaf24@1710 | 131 |
kaf24@1710 | 132 #define GET_CURRENT(reg) \ |
kaf24@1710 | 133 movl $4096-4, reg; \ |
kaf24@1710 | 134 orl %esp, reg; \ |
kaf24@1710 | 135 andl $~3,reg; \ |
kaf24@1710 | 136 movl (reg),reg; |
kaf24@1710 | 137 |
kaf24@1710 | 138 ENTRY(continue_nonidle_task) |
kaf24@1710 | 139 GET_CURRENT(%ebx) |
kaf24@1710 | 140 jmp test_all_events |
kaf24@1710 | 141 |
kaf24@1710 | 142 ALIGN |
kaf24@1710 | 143 /* |
kaf24@1710 | 144 * HYPERVISOR_multicall(call_list, nr_calls) |
kaf24@1710 | 145 * Execute a list of 'nr_calls' hypercalls, pointed at by 'call_list'. |
kaf24@1710 | 146 * This is fairly easy except that: |
kaf24@1710 | 147 * 1. We may fault reading the call list, and must patch that up; and |
kaf24@1710 | 148 * 2. We cannot recursively call HYPERVISOR_multicall, or a malicious |
kaf24@1710 | 149 * caller could cause our stack to blow up. |
kaf24@1710 | 150 */ |
kaf24@2446 | 151 #define MULTICALL_ENTRY_ORDER 5 |
kaf24@1710 | 152 do_multicall: |
kaf24@1710 | 153 popl %eax |
kaf24@1710 | 154 cmpl $SYMBOL_NAME(multicall_return_from_call),%eax |
kaf24@1710 | 155 je multicall_return_from_call |
kaf24@1710 | 156 pushl %ebx |
kaf24@1710 | 157 movl 4(%esp),%ebx /* EBX == call_list */ |
kaf24@1710 | 158 movl 8(%esp),%ecx /* ECX == nr_calls */ |
kaf24@2446 | 159 /* Ensure the entire multicall list is below HYPERVISOR_VIRT_START. */ |
kaf24@2446 | 160 movl %ecx,%eax |
kaf24@2446 | 161 shll $MULTICALL_ENTRY_ORDER,%eax |
kaf24@2446 | 162 addl %ebx,%eax /* EAX == end of multicall list */ |
kaf24@2446 | 163 jc bad_multicall_address |
kaf24@2446 | 164 cmpl $__HYPERVISOR_VIRT_START,%eax |
kaf24@2446 | 165 jnc bad_multicall_address |
kaf24@1710 | 166 multicall_loop: |
kaf24@1710 | 167 pushl %ecx |
kaf24@1710 | 168 multicall_fault1: |
kaf24@1710 | 169 pushl 20(%ebx) # args[4] |
kaf24@1710 | 170 multicall_fault2: |
kaf24@1710 | 171 pushl 16(%ebx) # args[3] |
kaf24@1710 | 172 multicall_fault3: |
kaf24@1710 | 173 pushl 12(%ebx) # args[2] |
kaf24@1710 | 174 multicall_fault4: |
kaf24@1710 | 175 pushl 8(%ebx) # args[1] |
kaf24@1710 | 176 multicall_fault5: |
kaf24@1710 | 177 pushl 4(%ebx) # args[0] |
kaf24@1710 | 178 multicall_fault6: |
kaf24@1710 | 179 movl (%ebx),%eax # op |
kaf24@1710 | 180 andl $(NR_hypercalls-1),%eax |
kaf24@1710 | 181 call *SYMBOL_NAME(hypercall_table)(,%eax,4) |
kaf24@1710 | 182 multicall_return_from_call: |
kaf24@1710 | 183 multicall_fault7: |
kaf24@1710 | 184 movl %eax,24(%ebx) # args[5] == result |
kaf24@1710 | 185 addl $20,%esp |
kaf24@1710 | 186 popl %ecx |
kaf24@2446 | 187 addl $(1<<MULTICALL_ENTRY_ORDER),%ebx |
kaf24@1710 | 188 loop multicall_loop |
kaf24@1710 | 189 popl %ebx |
kaf24@1710 | 190 xorl %eax,%eax |
kaf24@1710 | 191 jmp ret_from_hypercall |
kaf24@1710 | 192 |
kaf24@2446 | 193 bad_multicall_address: |
kaf24@2446 | 194 popl %ebx |
kaf24@2446 | 195 movl $-EFAULT,%eax |
kaf24@2446 | 196 jmp ret_from_hypercall |
kaf24@2446 | 197 |
kaf24@1710 | 198 .section __ex_table,"a" |
kaf24@1710 | 199 .align 4 |
kaf24@1710 | 200 .long multicall_fault1, multicall_fixup1 |
kaf24@1710 | 201 .long multicall_fault2, multicall_fixup2 |
kaf24@1710 | 202 .long multicall_fault3, multicall_fixup3 |
kaf24@1710 | 203 .long multicall_fault4, multicall_fixup4 |
kaf24@1710 | 204 .long multicall_fault5, multicall_fixup5 |
kaf24@1710 | 205 .long multicall_fault6, multicall_fixup6 |
cl349@2570 | 206 .long multicall_fault7, multicall_fixup6 |
kaf24@1710 | 207 .previous |
kaf24@1710 | 208 |
kaf24@1710 | 209 .section .fixup,"ax" |
kaf24@1710 | 210 multicall_fixup6: |
kaf24@1710 | 211 addl $4,%esp |
kaf24@1710 | 212 multicall_fixup5: |
kaf24@1710 | 213 addl $4,%esp |
kaf24@1710 | 214 multicall_fixup4: |
kaf24@1710 | 215 addl $4,%esp |
kaf24@1710 | 216 multicall_fixup3: |
kaf24@1710 | 217 addl $4,%esp |
kaf24@1710 | 218 multicall_fixup2: |
kaf24@1710 | 219 addl $4,%esp |
kaf24@1710 | 220 multicall_fixup1: |
kaf24@1710 | 221 addl $4,%esp |
kaf24@1710 | 222 popl %ebx |
kaf24@1710 | 223 movl $-EFAULT,%eax |
kaf24@1710 | 224 jmp ret_from_hypercall |
kaf24@1710 | 225 .previous |
kaf24@1710 | 226 |
kaf24@1710 | 227 ALIGN |
kaf24@1710 | 228 restore_all_guest: |
kaf24@1710 | 229 # First, may need to restore %ds if clobbered by create_bounce_frame |
kaf24@1710 | 230 pushl %ss |
kaf24@1710 | 231 popl %ds |
kaf24@1710 | 232 # Second, create a failsafe copy of DS,ES,FS,GS in case any are bad |
kaf24@1710 | 233 leal DS(%esp),%esi |
kaf24@1710 | 234 leal FAILSAFE_BUFFER(%ebx),%edi |
kaf24@1710 | 235 movsl |
kaf24@1710 | 236 movsl |
kaf24@1710 | 237 movsl |
kaf24@1710 | 238 movsl |
kaf24@1710 | 239 # Finally, restore guest registers -- faults will cause failsafe |
kaf24@1710 | 240 popl %ebx |
kaf24@1710 | 241 popl %ecx |
kaf24@1710 | 242 popl %edx |
kaf24@1710 | 243 popl %esi |
kaf24@1710 | 244 popl %edi |
kaf24@1710 | 245 popl %ebp |
kaf24@1710 | 246 popl %eax |
kaf24@1710 | 247 1: popl %ds |
kaf24@1710 | 248 2: popl %es |
kaf24@1710 | 249 3: popl %fs |
kaf24@1710 | 250 4: popl %gs |
kaf24@1710 | 251 addl $4,%esp |
kaf24@1710 | 252 5: iret |
kaf24@1710 | 253 .section .fixup,"ax" |
kaf24@1710 | 254 10: subl $4,%esp |
kaf24@1710 | 255 pushl %gs |
kaf24@1710 | 256 9: pushl %fs |
kaf24@1710 | 257 8: pushl %es |
kaf24@1710 | 258 7: pushl %ds |
kaf24@1710 | 259 6: pushl %eax |
kaf24@1710 | 260 pushl %ebp |
kaf24@1710 | 261 pushl %edi |
kaf24@1710 | 262 pushl %esi |
kaf24@1710 | 263 pushl %edx |
kaf24@1710 | 264 pushl %ecx |
kaf24@1710 | 265 pushl %ebx |
kaf24@1710 | 266 pushl %ss |
kaf24@1710 | 267 popl %ds |
kaf24@1710 | 268 pushl %ss |
kaf24@1710 | 269 popl %es |
kaf24@1710 | 270 jmp failsafe_callback |
kaf24@1710 | 271 .previous |
kaf24@1710 | 272 .section __ex_table,"a" |
kaf24@1710 | 273 .align 4 |
kaf24@1710 | 274 .long 1b,6b |
kaf24@1710 | 275 .long 2b,7b |
kaf24@1710 | 276 .long 3b,8b |
kaf24@1710 | 277 .long 4b,9b |
kaf24@1710 | 278 .long 5b,10b |
kaf24@1710 | 279 .previous |
kaf24@1710 | 280 |
kaf24@1710 | 281 /* No special register assumptions */ |
kaf24@1710 | 282 failsafe_callback: |
kaf24@1710 | 283 GET_CURRENT(%ebx) |
kaf24@1710 | 284 movl PROCESSOR(%ebx),%eax |
kaf24@1710 | 285 shl $4,%eax |
kaf24@1710 | 286 lea guest_trap_bounce(%eax),%edx |
kaf24@1710 | 287 movl FAILSAFE_ADDR(%ebx),%eax |
kaf24@1710 | 288 movl %eax,GTB_EIP(%edx) |
kaf24@1710 | 289 movl FAILSAFE_SEL(%ebx),%eax |
kaf24@1710 | 290 movw %ax,GTB_CS(%edx) |
kaf24@1710 | 291 call create_bounce_frame |
kaf24@1710 | 292 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame |
kaf24@1710 | 293 leal FAILSAFE_BUFFER(%ebx),%ebp |
kaf24@1710 | 294 movl 0(%ebp),%eax # DS |
kaf24@1710 | 295 FAULT1: movl %eax,(%esi) |
kaf24@1710 | 296 movl 4(%ebp),%eax # ES |
kaf24@1710 | 297 FAULT2: movl %eax,4(%esi) |
kaf24@1710 | 298 movl 8(%ebp),%eax # FS |
kaf24@1710 | 299 FAULT3: movl %eax,8(%esi) |
kaf24@1710 | 300 movl 12(%ebp),%eax # GS |
kaf24@1710 | 301 FAULT4: movl %eax,12(%esi) |
kaf24@1710 | 302 movl %esi,OLDESP(%esp) |
kaf24@1710 | 303 popl %ebx |
kaf24@1710 | 304 popl %ecx |
kaf24@1710 | 305 popl %edx |
kaf24@1710 | 306 popl %esi |
kaf24@1710 | 307 popl %edi |
kaf24@1710 | 308 popl %ebp |
kaf24@1710 | 309 popl %eax |
kaf24@1710 | 310 addl $20,%esp # skip DS/ES/FS/GS/ORIG_EAX |
kaf24@1710 | 311 FAULT5: iret |
kaf24@1710 | 312 |
kaf24@1710 | 313 |
kaf24@1710 | 314 ALIGN |
kaf24@1710 | 315 # Simple restore -- we should never fault as we we will only interrupt ring 0 |
kaf24@1710 | 316 # when sane values have been placed in all registers. The only exception is |
kaf24@1710 | 317 # NMI, which may interrupt before good values have been placed in DS-GS. |
kaf24@1710 | 318 # The NMI return code deals with this problem itself. |
kaf24@1710 | 319 restore_all_xen: |
kaf24@1710 | 320 popl %ebx |
kaf24@1710 | 321 popl %ecx |
kaf24@1710 | 322 popl %edx |
kaf24@1710 | 323 popl %esi |
kaf24@1710 | 324 popl %edi |
kaf24@1710 | 325 popl %ebp |
kaf24@1710 | 326 popl %eax |
kaf24@1710 | 327 popl %ds |
kaf24@1710 | 328 popl %es |
kaf24@1710 | 329 popl %fs |
kaf24@1710 | 330 popl %gs |
kaf24@1710 | 331 addl $4,%esp |
kaf24@1710 | 332 iret |
kaf24@1710 | 333 |
kaf24@1710 | 334 ALIGN |
kaf24@1710 | 335 ENTRY(hypercall) |
kaf24@1710 | 336 pushl %eax # save orig_eax |
kaf24@1710 | 337 SAVE_ALL |
kaf24@1710 | 338 GET_CURRENT(%ebx) |
kaf24@1710 | 339 andl $(NR_hypercalls-1),%eax |
kaf24@1710 | 340 call *SYMBOL_NAME(hypercall_table)(,%eax,4) |
kaf24@1710 | 341 |
kaf24@1710 | 342 ret_from_hypercall: |
kaf24@1710 | 343 movl %eax,EAX(%esp) # save the return value |
kaf24@1710 | 344 |
kaf24@1710 | 345 test_all_events: |
kaf24@1710 | 346 xorl %ecx,%ecx |
kaf24@1710 | 347 notl %ecx |
kaf24@1710 | 348 cli # tests must not race interrupts |
kaf24@1710 | 349 /*test_softirqs:*/ |
kaf24@1710 | 350 movl PROCESSOR(%ebx),%eax |
kaf24@1710 | 351 shl $6,%eax # sizeof(irq_cpustat) == 64 |
kaf24@1710 | 352 test %ecx,SYMBOL_NAME(irq_stat)(%eax,1) |
kaf24@1710 | 353 jnz process_softirqs |
kaf24@1710 | 354 /*test_guest_events:*/ |
kaf24@1710 | 355 movl SHARED_INFO(%ebx),%eax |
kaf24@1710 | 356 testb $0xFF,UPCALL_MASK(%eax) |
kaf24@1710 | 357 jnz restore_all_guest |
kaf24@1710 | 358 testb $0xFF,UPCALL_PENDING(%eax) |
kaf24@1710 | 359 jz restore_all_guest |
kaf24@1710 | 360 movb $1,UPCALL_MASK(%eax) # Upcalls are masked during delivery |
kaf24@1710 | 361 /*process_guest_events:*/ |
kaf24@1710 | 362 movl PROCESSOR(%ebx),%edx |
kaf24@1710 | 363 shl $4,%edx # sizeof(guest_trap_bounce) == 16 |
kaf24@1710 | 364 lea guest_trap_bounce(%edx),%edx |
kaf24@1710 | 365 movl EVENT_ADDR(%ebx),%eax |
kaf24@1710 | 366 movl %eax,GTB_EIP(%edx) |
kaf24@1710 | 367 movl EVENT_SEL(%ebx),%eax |
kaf24@1710 | 368 movw %ax,GTB_CS(%edx) |
kaf24@1710 | 369 call create_bounce_frame |
kaf24@1710 | 370 jmp restore_all_guest |
kaf24@1710 | 371 |
kaf24@1710 | 372 ALIGN |
kaf24@1710 | 373 process_softirqs: |
kaf24@1710 | 374 sti |
kaf24@1710 | 375 call SYMBOL_NAME(do_softirq) |
kaf24@1710 | 376 jmp test_all_events |
kaf24@1710 | 377 |
kaf24@1710 | 378 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */ |
kaf24@1710 | 379 /* {EIP, CS, EFLAGS, [ESP, SS]} */ |
kaf24@1710 | 380 /* %edx == guest_trap_bounce, %ebx == task_struct */ |
kaf24@1710 | 381 /* %eax,%ecx are clobbered. %ds:%esi contain new OLDSS/OLDESP. */ |
kaf24@1710 | 382 create_bounce_frame: |
kaf24@1710 | 383 mov CS+4(%esp),%cl |
kaf24@1710 | 384 test $2,%cl |
kaf24@1710 | 385 jz 1f /* jump if returning to an existing ring-1 activation */ |
kaf24@1710 | 386 /* obtain ss/esp from TSS -- no current ring-1 activations */ |
kaf24@1710 | 387 movl PROCESSOR(%ebx),%eax |
kaf24@1710 | 388 /* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */ |
kaf24@1710 | 389 movl %eax, %ecx |
kaf24@1710 | 390 shll $7, %ecx |
kaf24@1710 | 391 shll $13, %eax |
kaf24@1710 | 392 addl %ecx,%eax |
kaf24@1710 | 393 addl $init_tss + 12,%eax |
kaf24@1710 | 394 movl (%eax),%esi /* tss->esp1 */ |
kaf24@1710 | 395 FAULT6: movl 4(%eax),%ds /* tss->ss1 */ |
kaf24@1710 | 396 /* base of stack frame must contain ss/esp (inter-priv iret) */ |
kaf24@1710 | 397 subl $8,%esi |
kaf24@1710 | 398 movl OLDESP+4(%esp),%eax |
kaf24@1710 | 399 FAULT7: movl %eax,(%esi) |
kaf24@1710 | 400 movl OLDSS+4(%esp),%eax |
kaf24@1710 | 401 FAULT8: movl %eax,4(%esi) |
kaf24@1710 | 402 jmp 2f |
kaf24@1710 | 403 1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */ |
kaf24@1710 | 404 movl OLDESP+4(%esp),%esi |
kaf24@1710 | 405 FAULT9: movl OLDSS+4(%esp),%ds |
kaf24@1710 | 406 2: /* Construct a stack frame: EFLAGS, CS/EIP */ |
kaf24@1710 | 407 subl $12,%esi |
kaf24@1710 | 408 movl EIP+4(%esp),%eax |
kaf24@1710 | 409 FAULT10:movl %eax,(%esi) |
kaf24@1710 | 410 movl CS+4(%esp),%eax |
kaf24@1710 | 411 FAULT11:movl %eax,4(%esi) |
kaf24@1710 | 412 movl EFLAGS+4(%esp),%eax |
kaf24@1710 | 413 FAULT12:movl %eax,8(%esi) |
kaf24@1710 | 414 /* Rewrite our stack frame and return to ring 1. */ |
kaf24@1710 | 415 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */ |
kaf24@1710 | 416 andl $0xfffcbeff,%eax |
kaf24@1710 | 417 movl %eax,EFLAGS+4(%esp) |
kaf24@1710 | 418 movl %ds,OLDSS+4(%esp) |
kaf24@1710 | 419 movl %esi,OLDESP+4(%esp) |
kaf24@1710 | 420 movzwl %es:GTB_CS(%edx),%eax |
kaf24@1710 | 421 movl %eax,CS+4(%esp) |
kaf24@1710 | 422 movl %es:GTB_EIP(%edx),%eax |
kaf24@1710 | 423 movl %eax,EIP+4(%esp) |
kaf24@1710 | 424 ret |
kaf24@1710 | 425 |
kaf24@1710 | 426 |
kaf24@1710 | 427 .section __ex_table,"a" |
kaf24@1710 | 428 .align 4 |
kaf24@1710 | 429 .long FAULT1, crash_domain_fixup3 # Fault writing to ring-1 stack |
kaf24@1710 | 430 .long FAULT2, crash_domain_fixup3 # Fault writing to ring-1 stack |
kaf24@1710 | 431 .long FAULT3, crash_domain_fixup3 # Fault writing to ring-1 stack |
kaf24@1710 | 432 .long FAULT4, crash_domain_fixup3 # Fault writing to ring-1 stack |
kaf24@1710 | 433 .long FAULT5, crash_domain_fixup1 # Fault executing failsafe iret |
kaf24@1710 | 434 .long FAULT6, crash_domain_fixup2 # Fault loading ring-1 stack selector |
kaf24@1710 | 435 .long FAULT7, crash_domain_fixup2 # Fault writing to ring-1 stack |
kaf24@1710 | 436 .long FAULT8, crash_domain_fixup2 # Fault writing to ring-1 stack |
kaf24@1710 | 437 .long FAULT9, crash_domain_fixup2 # Fault loading ring-1 stack selector |
kaf24@1710 | 438 .long FAULT10,crash_domain_fixup2 # Fault writing to ring-1 stack |
kaf24@1710 | 439 .long FAULT11,crash_domain_fixup2 # Fault writing to ring-1 stack |
kaf24@1710 | 440 .long FAULT12,crash_domain_fixup2 # Fault writing to ring-1 stack |
kaf24@1710 | 441 .long FAULT13,crash_domain_fixup3 # Fault writing to ring-1 stack |
kaf24@1710 | 442 .long FAULT14,crash_domain_fixup3 # Fault writing to ring-1 stack |
kaf24@1710 | 443 .previous |
kaf24@1710 | 444 |
kaf24@1710 | 445 # This handler kills domains which experience unrecoverable faults. |
kaf24@1710 | 446 .section .fixup,"ax" |
kaf24@1710 | 447 crash_domain_fixup1: |
kaf24@1710 | 448 subl $4,%esp |
kaf24@1710 | 449 SAVE_ALL |
kaf24@1710 | 450 jmp domain_crash |
kaf24@1710 | 451 crash_domain_fixup2: |
kaf24@1710 | 452 addl $4,%esp |
kaf24@1710 | 453 crash_domain_fixup3: |
kaf24@1710 | 454 pushl %ss |
kaf24@1710 | 455 popl %ds |
kaf24@1710 | 456 jmp domain_crash |
kaf24@1710 | 457 .previous |
kaf24@1710 | 458 |
kaf24@1710 | 459 ALIGN |
kaf24@1710 | 460 process_guest_exception_and_events: |
kaf24@1710 | 461 movl PROCESSOR(%ebx),%eax |
kaf24@1710 | 462 shl $4,%eax |
kaf24@1710 | 463 lea guest_trap_bounce(%eax),%edx |
kaf24@1710 | 464 testb $~0,GTB_FLAGS(%edx) |
kaf24@1710 | 465 jz test_all_events |
kaf24@1710 | 466 call create_bounce_frame # just the basic frame |
kaf24@1710 | 467 mov %es:GTB_FLAGS(%edx),%cl |
kaf24@1710 | 468 test $GTBF_TRAP_NOCODE,%cl |
kaf24@1710 | 469 jnz 2f |
kaf24@1710 | 470 subl $4,%esi # push error_code onto guest frame |
kaf24@1710 | 471 movl %es:GTB_ERROR_CODE(%edx),%eax |
kaf24@1710 | 472 FAULT13:movl %eax,(%esi) |
kaf24@1710 | 473 test $GTBF_TRAP_CR2,%cl |
kaf24@1710 | 474 jz 1f |
kaf24@1710 | 475 subl $4,%esi # push %cr2 onto guest frame |
kaf24@1710 | 476 movl %es:GTB_CR2(%edx),%eax |
kaf24@1710 | 477 FAULT14:movl %eax,(%esi) |
kaf24@1710 | 478 1: movl %esi,OLDESP(%esp) |
kaf24@1710 | 479 2: push %es # unclobber %ds |
kaf24@1710 | 480 pop %ds |
kaf24@1710 | 481 movb $0,GTB_FLAGS(%edx) |
kaf24@1710 | 482 jmp test_all_events |
kaf24@1710 | 483 |
kaf24@1710 | 484 ALIGN |
kaf24@1710 | 485 ENTRY(ret_from_intr) |
kaf24@1710 | 486 GET_CURRENT(%ebx) |
kaf24@1710 | 487 movb CS(%esp),%al |
kaf24@1710 | 488 testb $3,%al # return to non-supervisor? |
kaf24@1710 | 489 jne test_all_events |
kaf24@1710 | 490 jmp restore_all_xen |
kaf24@1710 | 491 |
kaf24@1710 | 492 ENTRY(divide_error) |
kaf24@1710 | 493 pushl $0 # no error code |
kaf24@1710 | 494 pushl $ SYMBOL_NAME(do_divide_error) |
kaf24@1710 | 495 ALIGN |
kaf24@1710 | 496 error_code: |
kaf24@1710 | 497 pushl %fs |
kaf24@1710 | 498 pushl %es |
kaf24@1710 | 499 pushl %ds |
kaf24@1710 | 500 pushl %eax |
kaf24@1710 | 501 xorl %eax,%eax |
kaf24@1710 | 502 pushl %ebp |
kaf24@1710 | 503 pushl %edi |
kaf24@1710 | 504 pushl %esi |
kaf24@1710 | 505 pushl %edx |
kaf24@1710 | 506 decl %eax # eax = -1 |
kaf24@1710 | 507 pushl %ecx |
kaf24@1710 | 508 pushl %ebx |
kaf24@1710 | 509 cld |
kaf24@1710 | 510 movl %gs,%ecx |
kaf24@1710 | 511 movl ORIG_EAX(%esp), %esi # get the error code |
kaf24@1710 | 512 movl GS(%esp), %edi # get the function address |
kaf24@1710 | 513 movl %eax, ORIG_EAX(%esp) |
kaf24@1710 | 514 movl %ecx, GS(%esp) |
kaf24@1710 | 515 movl $(__HYPERVISOR_DS),%edx |
kaf24@1710 | 516 movl %edx,%ds |
kaf24@1710 | 517 movl %edx,%es |
kaf24@1710 | 518 movl %edx,%fs |
kaf24@1710 | 519 movl %edx,%gs |
kaf24@1710 | 520 movl %esp,%edx |
kaf24@1710 | 521 pushl %esi # push the error code |
kaf24@1710 | 522 pushl %edx # push the pt_regs pointer |
kaf24@1710 | 523 GET_CURRENT(%ebx) |
kaf24@1710 | 524 call *%edi |
kaf24@1710 | 525 addl $8,%esp |
kaf24@1710 | 526 movb CS(%esp),%al |
kaf24@1710 | 527 testb $3,%al |
kaf24@1710 | 528 je restore_all_xen |
kaf24@1710 | 529 jmp process_guest_exception_and_events |
kaf24@1710 | 530 |
kaf24@1710 | 531 ENTRY(coprocessor_error) |
kaf24@1710 | 532 pushl $0 |
kaf24@1710 | 533 pushl $ SYMBOL_NAME(do_coprocessor_error) |
kaf24@1710 | 534 jmp error_code |
kaf24@1710 | 535 |
kaf24@1710 | 536 ENTRY(simd_coprocessor_error) |
kaf24@1710 | 537 pushl $0 |
kaf24@1710 | 538 pushl $ SYMBOL_NAME(do_simd_coprocessor_error) |
kaf24@1710 | 539 jmp error_code |
kaf24@1710 | 540 |
kaf24@1710 | 541 ENTRY(device_not_available) |
kaf24@1710 | 542 pushl $0 |
kaf24@1710 | 543 pushl $SYMBOL_NAME(math_state_restore) |
kaf24@1710 | 544 jmp error_code |
kaf24@1710 | 545 |
kaf24@1710 | 546 ENTRY(debug) |
kaf24@1710 | 547 pushl $0 |
kaf24@1710 | 548 pushl $ SYMBOL_NAME(do_debug) |
kaf24@1710 | 549 jmp error_code |
kaf24@1710 | 550 |
kaf24@1710 | 551 ENTRY(int3) |
kaf24@1710 | 552 pushl $0 |
kaf24@1710 | 553 pushl $ SYMBOL_NAME(do_int3) |
kaf24@1710 | 554 jmp error_code |
kaf24@1710 | 555 |
kaf24@1710 | 556 ENTRY(overflow) |
kaf24@1710 | 557 pushl $0 |
kaf24@1710 | 558 pushl $ SYMBOL_NAME(do_overflow) |
kaf24@1710 | 559 jmp error_code |
kaf24@1710 | 560 |
kaf24@1710 | 561 ENTRY(bounds) |
kaf24@1710 | 562 pushl $0 |
kaf24@1710 | 563 pushl $ SYMBOL_NAME(do_bounds) |
kaf24@1710 | 564 jmp error_code |
kaf24@1710 | 565 |
kaf24@1710 | 566 ENTRY(invalid_op) |
kaf24@1710 | 567 pushl $0 |
kaf24@1710 | 568 pushl $ SYMBOL_NAME(do_invalid_op) |
kaf24@1710 | 569 jmp error_code |
kaf24@1710 | 570 |
kaf24@1710 | 571 ENTRY(coprocessor_segment_overrun) |
kaf24@1710 | 572 pushl $0 |
kaf24@1710 | 573 pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun) |
kaf24@1710 | 574 jmp error_code |
kaf24@1710 | 575 |
kaf24@1710 | 576 ENTRY(invalid_TSS) |
kaf24@1710 | 577 pushl $ SYMBOL_NAME(do_invalid_TSS) |
kaf24@1710 | 578 jmp error_code |
kaf24@1710 | 579 |
kaf24@1710 | 580 ENTRY(segment_not_present) |
kaf24@1710 | 581 pushl $ SYMBOL_NAME(do_segment_not_present) |
kaf24@1710 | 582 jmp error_code |
kaf24@1710 | 583 |
kaf24@1710 | 584 ENTRY(stack_segment) |
kaf24@1710 | 585 pushl $ SYMBOL_NAME(do_stack_segment) |
kaf24@1710 | 586 jmp error_code |
kaf24@1710 | 587 |
kaf24@1710 | 588 ENTRY(general_protection) |
kaf24@1710 | 589 pushl $ SYMBOL_NAME(do_general_protection) |
kaf24@1710 | 590 jmp error_code |
kaf24@1710 | 591 |
kaf24@1710 | 592 ENTRY(alignment_check) |
kaf24@1710 | 593 pushl $ SYMBOL_NAME(do_alignment_check) |
kaf24@1710 | 594 jmp error_code |
kaf24@1710 | 595 |
kaf24@1710 | 596 ENTRY(page_fault) |
kaf24@1710 | 597 pushl $ SYMBOL_NAME(do_page_fault) |
kaf24@1710 | 598 jmp error_code |
kaf24@1710 | 599 |
kaf24@1710 | 600 ENTRY(machine_check) |
kaf24@1710 | 601 pushl $0 |
kaf24@1710 | 602 pushl $ SYMBOL_NAME(do_machine_check) |
kaf24@1710 | 603 jmp error_code |
kaf24@1710 | 604 |
kaf24@1710 | 605 ENTRY(spurious_interrupt_bug) |
kaf24@1710 | 606 pushl $0 |
kaf24@1710 | 607 pushl $ SYMBOL_NAME(do_spurious_interrupt_bug) |
kaf24@1710 | 608 jmp error_code |
kaf24@1710 | 609 |
kaf24@1710 | 610 ENTRY(nmi) |
kaf24@1710 | 611 # Save state but do not trash the segment registers! |
kaf24@1710 | 612 # We may otherwise be unable to reload them or copy them to ring 1. |
kaf24@1710 | 613 pushl %eax |
kaf24@1710 | 614 SAVE_ALL_NOSEGREGS |
kaf24@1710 | 615 |
kaf24@2085 | 616 # Check for hardware problems. |
kaf24@1710 | 617 inb $0x61,%al |
kaf24@1710 | 618 testb $0x80,%al |
kaf24@2080 | 619 jne nmi_parity_err |
kaf24@1710 | 620 testb $0x40,%al |
kaf24@1710 | 621 jne nmi_io_err |
kaf24@1710 | 622 movl %eax,%ebx |
kaf24@1710 | 623 |
kaf24@1710 | 624 # Okay, its almost a normal NMI tick. We can only process it if: |
kaf24@1710 | 625 # A. We are the outermost Xen activation (in which case we have |
kaf24@1710 | 626 # the selectors safely saved on our stack) |
kaf24@1710 | 627 # B. DS-GS all contain sane Xen values. |
kaf24@1710 | 628 # In all other cases we bail without touching DS-GS, as we have |
kaf24@1710 | 629 # interrupted an enclosing Xen activation in tricky prologue or |
kaf24@1710 | 630 # epilogue code. |
kaf24@1710 | 631 movb CS(%esp),%al |
kaf24@1710 | 632 testb $3,%al |
kaf24@1710 | 633 jne do_watchdog_tick |
kaf24@1710 | 634 movl DS(%esp),%eax |
kaf24@1710 | 635 cmpw $(__HYPERVISOR_DS),%ax |
kaf24@1710 | 636 jne nmi_badseg |
kaf24@1710 | 637 movl ES(%esp),%eax |
kaf24@1710 | 638 cmpw $(__HYPERVISOR_DS),%ax |
kaf24@1710 | 639 jne nmi_badseg |
kaf24@1710 | 640 movl FS(%esp),%eax |
kaf24@1710 | 641 cmpw $(__HYPERVISOR_DS),%ax |
kaf24@1710 | 642 jne nmi_badseg |
kaf24@1710 | 643 movl GS(%esp),%eax |
kaf24@1710 | 644 cmpw $(__HYPERVISOR_DS),%ax |
kaf24@1710 | 645 jne nmi_badseg |
kaf24@1710 | 646 |
kaf24@1710 | 647 do_watchdog_tick: |
kaf24@1710 | 648 movl $(__HYPERVISOR_DS),%edx |
kaf24@1710 | 649 movl %edx,%ds |
kaf24@1710 | 650 movl %edx,%es |
kaf24@1710 | 651 movl %esp,%edx |
kaf24@1710 | 652 pushl %ebx # reason |
kaf24@1710 | 653 pushl %edx # regs |
kaf24@1710 | 654 call SYMBOL_NAME(do_nmi) |
kaf24@1710 | 655 addl $8,%esp |
kaf24@1710 | 656 movb CS(%esp),%al |
kaf24@1710 | 657 testb $3,%al |
kaf24@1710 | 658 je restore_all_xen |
kaf24@1710 | 659 GET_CURRENT(%ebx) |
kaf24@1710 | 660 jmp restore_all_guest |
kaf24@1710 | 661 |
kaf24@1710 | 662 nmi_badseg: |
kaf24@1710 | 663 popl %ebx |
kaf24@1710 | 664 popl %ecx |
kaf24@1710 | 665 popl %edx |
kaf24@1710 | 666 popl %esi |
kaf24@1710 | 667 popl %edi |
kaf24@1710 | 668 popl %ebp |
kaf24@1710 | 669 popl %eax |
kaf24@1710 | 670 addl $20,%esp |
kaf24@1710 | 671 iret |
kaf24@1710 | 672 |
kaf24@2085 | 673 nmi_parity_err: |
kaf24@2085 | 674 # Clear and disable the parity-error line |
kaf24@2085 | 675 andb $0xf,%al |
kaf24@2085 | 676 orb $0x4,%al |
kaf24@2085 | 677 outb %al,$0x61 |
kaf24@2085 | 678 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore |
kaf24@2085 | 679 je nmi_badseg |
kaf24@2085 | 680 bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason) |
kaf24@2085 | 681 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat) |
kaf24@2085 | 682 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0 |
kaf24@2085 | 683 je nmi_badseg |
kaf24@2085 | 684 movl $(__HYPERVISOR_DS),%edx # nmi=fatal |
kaf24@1710 | 685 movl %edx,%ds |
kaf24@1710 | 686 movl %edx,%es |
kaf24@2079 | 687 movl %esp,%edx |
kaf24@2079 | 688 push %edx |
kaf24@2079 | 689 call SYMBOL_NAME(mem_parity_error) |
kaf24@2085 | 690 addl $4,%esp |
kaf24@2085 | 691 jmp ret_from_intr |
kaf24@2085 | 692 |
kaf24@1710 | 693 nmi_io_err: |
kaf24@2085 | 694 # Clear and disable the I/O-error line |
kaf24@2085 | 695 andb $0xf,%al |
kaf24@2085 | 696 orb $0x8,%al |
kaf24@2085 | 697 outb %al,$0x61 |
kaf24@2085 | 698 cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore |
kaf24@2085 | 699 je nmi_badseg |
kaf24@2085 | 700 bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason) |
kaf24@2085 | 701 bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat) |
kaf24@2085 | 702 cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0 |
kaf24@2085 | 703 je nmi_badseg |
kaf24@2085 | 704 movl $(__HYPERVISOR_DS),%edx # nmi=fatal |
kaf24@1710 | 705 movl %edx,%ds |
kaf24@1710 | 706 movl %edx,%es |
kaf24@2079 | 707 movl %esp,%edx |
kaf24@2079 | 708 push %edx |
kaf24@2079 | 709 call SYMBOL_NAME(io_check_error) |
kaf24@2085 | 710 addl $4,%esp |
kaf24@2085 | 711 jmp ret_from_intr |
kaf24@2079 | 712 |
kaf24@1710 | 713 .data |
kaf24@1710 | 714 ENTRY(hypercall_table) |
kaf24@1710 | 715 .long SYMBOL_NAME(do_set_trap_table) /* 0 */ |
kaf24@1710 | 716 .long SYMBOL_NAME(do_mmu_update) |
kaf24@1710 | 717 .long SYMBOL_NAME(do_set_gdt) |
kaf24@1710 | 718 .long SYMBOL_NAME(do_stack_switch) |
kaf24@1710 | 719 .long SYMBOL_NAME(do_set_callbacks) |
kaf24@1710 | 720 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */ |
kaf24@1710 | 721 .long SYMBOL_NAME(do_sched_op) |
kaf24@1710 | 722 .long SYMBOL_NAME(do_dom0_op) |
kaf24@1710 | 723 .long SYMBOL_NAME(do_set_debugreg) |
kaf24@1710 | 724 .long SYMBOL_NAME(do_get_debugreg) |
kaf24@1710 | 725 .long SYMBOL_NAME(do_update_descriptor) /* 10 */ |
kaf24@1710 | 726 .long SYMBOL_NAME(do_set_fast_trap) |
kaf24@1710 | 727 .long SYMBOL_NAME(do_dom_mem_op) |
kaf24@1710 | 728 .long SYMBOL_NAME(do_multicall) |
kaf24@1710 | 729 .long SYMBOL_NAME(do_update_va_mapping) |
kaf24@1710 | 730 .long SYMBOL_NAME(do_set_timer_op) /* 15 */ |
kaf24@1710 | 731 .long SYMBOL_NAME(do_event_channel_op) |
kaf24@1710 | 732 .long SYMBOL_NAME(do_xen_version) |
kaf24@1710 | 733 .long SYMBOL_NAME(do_console_io) |
kaf24@1710 | 734 .long SYMBOL_NAME(do_physdev_op) |
kaf24@2375 | 735 .long SYMBOL_NAME(do_grant_table_op) /* 20 */ |
kaf24@2111 | 736 .long SYMBOL_NAME(do_vm_assist) |
kaf24@2375 | 737 .long SYMBOL_NAME(do_update_va_mapping_otherdomain) |
kaf24@1710 | 738 .rept NR_hypercalls-((.-hypercall_table)/4) |
kaf24@1710 | 739 .long SYMBOL_NAME(do_ni_hypercall) |
kaf24@1710 | 740 .endr |