debuggers.hg

view xen/arch/x86/x86_32/entry.S @ 10949:b33c08de3d98

[HVM] Add a concept of HVM parameters to the hypervisor.

Each HVM domain has a space of HVM parameters associated with it,
and these can be manipulated via a new hvm_op hypercall. This means
that the hypervisor no longer needs to parse the hvm_info table, so
remove that code.

Signed-off-by: Steven Smith <ssmith@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Aug 03 13:53:33 2006 +0100 (2006-08-03)
parents 833d05bdb4a4
children b9af81884b99
line source
1 /*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 * return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 * - creating a callback stack frame (due to bad ring-1 stack).
51 * - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
56 #include <xen/config.h>
57 #include <xen/errno.h>
58 #include <xen/softirq.h>
59 #include <asm/asm_defns.h>
60 #include <asm/apicdef.h>
61 #include <asm/page.h>
62 #include <public/xen.h>
64 #define GET_GUEST_REGS(reg) \
65 movl $~(STACK_SIZE-1),reg; \
66 andl %esp,reg; \
67 orl $(STACK_SIZE-CPUINFO_sizeof),reg;
69 #define GET_CURRENT(reg) \
70 movl $STACK_SIZE-4, reg; \
71 orl %esp, reg; \
72 andl $~3,reg; \
73 movl (reg),reg;
76 ALIGN
77 restore_all_guest:
78 testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
79 jnz restore_all_vm86
80 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
81 testl $2,UREGS_cs(%esp)
82 jnz 1f
83 call restore_ring0_guest
84 jmp restore_all_vm86
85 1:
86 #endif
87 FLT1: mov UREGS_ds(%esp),%ds
88 FLT2: mov UREGS_es(%esp),%es
89 FLT3: mov UREGS_fs(%esp),%fs
90 FLT4: mov UREGS_gs(%esp),%gs
91 restore_all_vm86:
92 popl %ebx
93 popl %ecx
94 popl %edx
95 popl %esi
96 popl %edi
97 popl %ebp
98 popl %eax
99 addl $4,%esp
100 FLT5: iret
101 .section .fixup,"ax"
102 FIX5: subl $28,%esp
103 pushl 28(%esp) # error_code/entry_vector
104 movl %eax,UREGS_eax+4(%esp)
105 movl %ebp,UREGS_ebp+4(%esp)
106 movl %edi,UREGS_edi+4(%esp)
107 movl %esi,UREGS_esi+4(%esp)
108 movl %edx,UREGS_edx+4(%esp)
109 movl %ecx,UREGS_ecx+4(%esp)
110 movl %ebx,UREGS_ebx+4(%esp)
111 FIX1: SET_XEN_SEGMENTS(a)
112 movl %eax,%fs
113 movl %eax,%gs
114 sti
115 popl %esi
116 pushfl # EFLAGS
117 movl $__HYPERVISOR_CS,%eax
118 pushl %eax # CS
119 movl $DBLFLT1,%eax
120 pushl %eax # EIP
121 pushl %esi # error_code/entry_vector
122 jmp handle_exception
123 DBLFLT1:GET_CURRENT(%ebx)
124 jmp test_all_events
125 failsafe_callback:
126 GET_CURRENT(%ebx)
127 leal VCPU_trap_bounce(%ebx),%edx
128 movl VCPU_failsafe_addr(%ebx),%eax
129 movl %eax,TRAPBOUNCE_eip(%edx)
130 movl VCPU_failsafe_sel(%ebx),%eax
131 movw %ax,TRAPBOUNCE_cs(%edx)
132 movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
133 bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%ebx)
134 jnc 1f
135 orw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
136 1: call create_bounce_frame
137 xorl %eax,%eax
138 movl %eax,UREGS_ds(%esp)
139 movl %eax,UREGS_es(%esp)
140 movl %eax,UREGS_fs(%esp)
141 movl %eax,UREGS_gs(%esp)
142 jmp test_all_events
143 .previous
144 .section __pre_ex_table,"a"
145 .long FLT1,FIX1
146 .long FLT2,FIX1
147 .long FLT3,FIX1
148 .long FLT4,FIX1
149 .long FLT5,FIX5
150 .previous
151 .section __ex_table,"a"
152 .long DBLFLT1,failsafe_callback
153 .previous
155 ALIGN
156 restore_all_xen:
157 popl %ebx
158 popl %ecx
159 popl %edx
160 popl %esi
161 popl %edi
162 popl %ebp
163 popl %eax
164 addl $4,%esp
165 iret
167 ALIGN
168 ENTRY(hypercall)
169 subl $4,%esp
170 FIXUP_RING0_GUEST_STACK
171 SAVE_ALL(b)
172 sti
173 GET_CURRENT(%ebx)
174 cmpl $NR_hypercalls,%eax
175 jae bad_hypercall
176 PERFC_INCR(PERFC_hypercalls, %eax)
177 #ifndef NDEBUG
178 /* Deliberately corrupt parameter regs not used by this hypercall. */
179 pushl %eax
180 pushl UREGS_eip+4(%esp)
181 pushl 28(%esp) # EBP
182 pushl 28(%esp) # EDI
183 pushl 28(%esp) # ESI
184 pushl 28(%esp) # EDX
185 pushl 28(%esp) # ECX
186 pushl 28(%esp) # EBX
187 movzb hypercall_args_table(,%eax,1),%ecx
188 leal (%esp,%ecx,4),%edi
189 subl $6,%ecx
190 negl %ecx
191 movl %eax,%esi
192 movl $0xDEADBEEF,%eax
193 rep stosl
194 movl %esi,%eax
195 #endif
196 call *hypercall_table(,%eax,4)
197 #ifndef NDEBUG
198 /* Deliberately corrupt parameter regs used by this hypercall. */
199 addl $24,%esp # Shadow parameters
200 popl %ecx # Shadow EIP
201 cmpl %ecx,UREGS_eip(%esp)
202 popl %ecx # Shadow hypercall index
203 jne skip_clobber # If EIP has changed then don't clobber
204 movzb hypercall_args_table(,%ecx,1),%ecx
205 movl %esp,%edi
206 movl %eax,%esi
207 movl $0xDEADBEEF,%eax
208 rep stosl
209 movl %esi,%eax
210 skip_clobber:
211 #endif
212 movl %eax,UREGS_eax(%esp) # save the return value
214 test_all_events:
215 xorl %ecx,%ecx
216 notl %ecx
217 cli # tests must not race interrupts
218 /*test_softirqs:*/
219 movl VCPU_processor(%ebx),%eax
220 shl $IRQSTAT_shift,%eax
221 test %ecx,irq_stat(%eax,1)
222 jnz process_softirqs
223 btr $_VCPUF_nmi_pending,VCPU_flags(%ebx)
224 jc process_nmi
225 test_guest_events:
226 movl VCPU_vcpu_info(%ebx),%eax
227 testb $0xFF,VCPUINFO_upcall_mask(%eax)
228 jnz restore_all_guest
229 testb $0xFF,VCPUINFO_upcall_pending(%eax)
230 jz restore_all_guest
231 /*process_guest_events:*/
232 sti
233 leal VCPU_trap_bounce(%ebx),%edx
234 movl VCPU_event_addr(%ebx),%eax
235 movl %eax,TRAPBOUNCE_eip(%edx)
236 movl VCPU_event_sel(%ebx),%eax
237 movw %ax,TRAPBOUNCE_cs(%edx)
238 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
239 call create_bounce_frame
240 jmp test_all_events
242 ALIGN
243 process_softirqs:
244 sti
245 call do_softirq
246 jmp test_all_events
248 ALIGN
249 process_nmi:
250 movl VCPU_nmi_addr(%ebx),%eax
251 test %eax,%eax
252 jz test_all_events
253 bts $_VCPUF_nmi_masked,VCPU_flags(%ebx)
254 jc 1f
255 sti
256 leal VCPU_trap_bounce(%ebx),%edx
257 movl %eax,TRAPBOUNCE_eip(%edx)
258 movw $FLAT_KERNEL_CS,TRAPBOUNCE_cs(%edx)
259 movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
260 call create_bounce_frame
261 jmp test_all_events
262 1: bts $_VCPUF_nmi_pending,VCPU_flags(%ebx)
263 jmp test_guest_events
265 bad_hypercall:
266 movl $-ENOSYS,UREGS_eax(%esp)
267 jmp test_all_events
269 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
270 /* {EIP, CS, EFLAGS, [ESP, SS]} */
271 /* %edx == trap_bounce, %ebx == struct vcpu */
272 /* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
273 create_bounce_frame:
274 movl UREGS_eflags+4(%esp),%ecx
275 movb UREGS_cs+4(%esp),%cl
276 testl $(2|X86_EFLAGS_VM),%ecx
277 jz ring1 /* jump if returning to an existing ring-1 activation */
278 movl VCPU_kernel_sp(%ebx),%esi
279 FLT6: mov VCPU_kernel_ss(%ebx),%gs
280 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
281 jz nvm86_1
282 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
283 movl UREGS_es+4(%esp),%eax
284 FLT7: movl %eax,%gs:(%esi)
285 movl UREGS_ds+4(%esp),%eax
286 FLT8: movl %eax,%gs:4(%esi)
287 movl UREGS_fs+4(%esp),%eax
288 FLT9: movl %eax,%gs:8(%esi)
289 movl UREGS_gs+4(%esp),%eax
290 FLT10: movl %eax,%gs:12(%esi)
291 nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
292 movl UREGS_esp+4(%esp),%eax
293 FLT11: movl %eax,%gs:(%esi)
294 movl UREGS_ss+4(%esp),%eax
295 FLT12: movl %eax,%gs:4(%esi)
296 jmp 1f
297 ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
298 movl UREGS_esp+4(%esp),%esi
299 FLT13: mov UREGS_ss+4(%esp),%gs
300 1: /* Construct a stack frame: EFLAGS, CS/EIP */
301 movb TRAPBOUNCE_flags(%edx),%cl
302 subl $12,%esi
303 movl UREGS_eip+4(%esp),%eax
304 FLT14: movl %eax,%gs:(%esi)
305 movl VCPU_vcpu_info(%ebx),%eax
306 pushl VCPUINFO_upcall_mask(%eax)
307 testb $TBF_INTERRUPT,%cl
308 setnz %ch # TBF_INTERRUPT -> set upcall mask
309 orb %ch,VCPUINFO_upcall_mask(%eax)
310 popl %eax
311 shll $16,%eax # Bits 16-23: saved_upcall_mask
312 movw UREGS_cs+4(%esp),%ax # Bits 0-15: CS
313 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
314 testw $2,%ax
315 jnz FLT15
316 and $~3,%ax # RPL 1 -> RPL 0
317 #endif
318 FLT15: movl %eax,%gs:4(%esi)
319 test $0x00FF0000,%eax # Bits 16-23: saved_upcall_mask
320 setz %ch # %ch == !saved_upcall_mask
321 movl UREGS_eflags+4(%esp),%eax
322 andl $~X86_EFLAGS_IF,%eax
323 shlb $1,%ch # Bit 9 (EFLAGS.IF)
324 orb %ch,%ah # Fold EFLAGS.IF into %eax
325 FLT16: movl %eax,%gs:8(%esi)
326 test $TBF_EXCEPTION_ERRCODE,%cl
327 jz 1f
328 subl $4,%esi # push error_code onto guest frame
329 movl TRAPBOUNCE_error_code(%edx),%eax
330 FLT17: movl %eax,%gs:(%esi)
331 1: testb $TBF_FAILSAFE,%cl
332 jz 2f
333 subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
334 testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
335 jz nvm86_2
336 xorl %eax,%eax # VM86: we write zero selector values
337 FLT18: movl %eax,%gs:(%esi)
338 FLT19: movl %eax,%gs:4(%esi)
339 FLT20: movl %eax,%gs:8(%esi)
340 FLT21: movl %eax,%gs:12(%esi)
341 jmp 2f
342 nvm86_2:movl UREGS_ds+4(%esp),%eax # non-VM86: write real selector values
343 FLT22: movl %eax,%gs:(%esi)
344 movl UREGS_es+4(%esp),%eax
345 FLT23: movl %eax,%gs:4(%esi)
346 movl UREGS_fs+4(%esp),%eax
347 FLT24: movl %eax,%gs:8(%esi)
348 movl UREGS_gs+4(%esp),%eax
349 FLT25: movl %eax,%gs:12(%esi)
350 2: testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
351 jz nvm86_3
352 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
353 movl %eax,UREGS_ds+4(%esp)
354 movl %eax,UREGS_es+4(%esp)
355 movl %eax,UREGS_fs+4(%esp)
356 movl %eax,UREGS_gs+4(%esp)
357 nvm86_3:/* Rewrite our stack frame and return to ring 1. */
358 /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
359 andl $0xfffcbeff,UREGS_eflags+4(%esp)
360 mov %gs,UREGS_ss+4(%esp)
361 movl %esi,UREGS_esp+4(%esp)
362 movzwl TRAPBOUNCE_cs(%edx),%eax
363 movl %eax,UREGS_cs+4(%esp)
364 movl TRAPBOUNCE_eip(%edx),%eax
365 test %eax,%eax
366 jz domain_crash_synchronous
367 movl %eax,UREGS_eip+4(%esp)
368 movb $0,TRAPBOUNCE_flags(%edx)
369 ret
370 .section __ex_table,"a"
371 .long FLT6,domain_crash_synchronous , FLT7,domain_crash_synchronous
372 .long FLT8,domain_crash_synchronous , FLT9,domain_crash_synchronous
373 .long FLT10,domain_crash_synchronous , FLT11,domain_crash_synchronous
374 .long FLT12,domain_crash_synchronous , FLT13,domain_crash_synchronous
375 .long FLT14,domain_crash_synchronous , FLT15,domain_crash_synchronous
376 .long FLT16,domain_crash_synchronous , FLT17,domain_crash_synchronous
377 .long FLT18,domain_crash_synchronous , FLT19,domain_crash_synchronous
378 .long FLT20,domain_crash_synchronous , FLT21,domain_crash_synchronous
379 .long FLT22,domain_crash_synchronous , FLT23,domain_crash_synchronous
380 .long FLT24,domain_crash_synchronous , FLT25,domain_crash_synchronous
381 .previous
383 domain_crash_synchronous_string:
384 .asciz "domain_crash_sync called from entry.S (%lx)\n"
386 domain_crash_synchronous:
387 pushl $domain_crash_synchronous_string
388 call printf
389 jmp __domain_crash_synchronous
391 ALIGN
392 ENTRY(ret_from_intr)
393 GET_CURRENT(%ebx)
394 movl UREGS_eflags(%esp),%eax
395 movb UREGS_cs(%esp),%al
396 testl $(3|X86_EFLAGS_VM),%eax
397 jnz test_all_events
398 jmp restore_all_xen
400 ENTRY(divide_error)
401 pushl $TRAP_divide_error<<16
402 ALIGN
403 handle_exception:
404 FIXUP_RING0_GUEST_STACK
405 SAVE_ALL_NOSEGREGS(a)
406 SET_XEN_SEGMENTS(a)
407 testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
408 jz exception_with_ints_disabled
409 sti # re-enable interrupts
410 xorl %eax,%eax
411 movw UREGS_entry_vector(%esp),%ax
412 movl %esp,%edx
413 pushl %edx # push the cpu_user_regs pointer
414 GET_CURRENT(%ebx)
415 PERFC_INCR(PERFC_exceptions, %eax)
416 call *exception_table(,%eax,4)
417 addl $4,%esp
418 movl UREGS_eflags(%esp),%eax
419 movb UREGS_cs(%esp),%al
420 testl $(3|X86_EFLAGS_VM),%eax
421 jz restore_all_xen
422 leal VCPU_trap_bounce(%ebx),%edx
423 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
424 jz test_all_events
425 call create_bounce_frame
426 jmp test_all_events
428 exception_with_ints_disabled:
429 movl UREGS_eflags(%esp),%eax
430 movb UREGS_cs(%esp),%al
431 testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
432 jnz FATAL_exception_with_ints_disabled
433 pushl %esp
434 call search_pre_exception_table
435 addl $4,%esp
436 testl %eax,%eax # no fixup code for faulting EIP?
437 jz FATAL_exception_with_ints_disabled
438 movl %eax,UREGS_eip(%esp)
439 movl %esp,%esi
440 subl $4,%esp
441 movl %esp,%edi
442 movl $UREGS_kernel_sizeof/4,%ecx
443 rep; movsl # make room for error_code/entry_vector
444 movl UREGS_error_code(%esp),%eax # error_code/entry_vector
445 movl %eax,UREGS_kernel_sizeof(%esp)
446 jmp restore_all_xen # return to fixup code
448 FATAL_exception_with_ints_disabled:
449 xorl %esi,%esi
450 movw UREGS_entry_vector(%esp),%si
451 movl %esp,%edx
452 pushl %edx # push the cpu_user_regs pointer
453 pushl %esi # push the trapnr (entry vector)
454 call fatal_trap
455 ud2
457 ENTRY(coprocessor_error)
458 pushl $TRAP_copro_error<<16
459 jmp handle_exception
461 ENTRY(simd_coprocessor_error)
462 pushl $TRAP_simd_error<<16
463 jmp handle_exception
465 ENTRY(device_not_available)
466 pushl $TRAP_no_device<<16
467 jmp handle_exception
469 ENTRY(debug)
470 pushl $TRAP_debug<<16
471 jmp handle_exception
473 ENTRY(int3)
474 pushl $TRAP_int3<<16
475 jmp handle_exception
477 ENTRY(overflow)
478 pushl $TRAP_overflow<<16
479 jmp handle_exception
481 ENTRY(bounds)
482 pushl $TRAP_bounds<<16
483 jmp handle_exception
485 ENTRY(invalid_op)
486 pushl $TRAP_invalid_op<<16
487 jmp handle_exception
489 ENTRY(coprocessor_segment_overrun)
490 pushl $TRAP_copro_seg<<16
491 jmp handle_exception
493 ENTRY(invalid_TSS)
494 movw $TRAP_invalid_tss,2(%esp)
495 jmp handle_exception
497 ENTRY(segment_not_present)
498 movw $TRAP_no_segment,2(%esp)
499 jmp handle_exception
501 ENTRY(stack_segment)
502 movw $TRAP_stack_error,2(%esp)
503 jmp handle_exception
505 ENTRY(general_protection)
506 movw $TRAP_gp_fault,2(%esp)
507 jmp handle_exception
509 ENTRY(alignment_check)
510 movw $TRAP_alignment_check,2(%esp)
511 jmp handle_exception
513 ENTRY(page_fault)
514 movw $TRAP_page_fault,2(%esp)
515 jmp handle_exception
517 ENTRY(machine_check)
518 pushl $TRAP_machine_check<<16
519 jmp handle_exception
521 ENTRY(spurious_interrupt_bug)
522 pushl $TRAP_spurious_int<<16
523 jmp handle_exception
525 ENTRY(nmi)
526 #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
527 # NMI entry protocol is incompatible with guest kernel in ring 0.
528 iret
529 #else
530 # Save state but do not trash the segment registers!
531 # We may otherwise be unable to reload them or copy them to ring 1.
532 pushl %eax
533 SAVE_ALL_NOSEGREGS(a)
535 # We can only process the NMI if:
536 # A. We are the outermost Xen activation (in which case we have
537 # the selectors safely saved on our stack)
538 # B. DS and ES contain sane Xen values.
539 # In all other cases we bail without touching DS-GS, as we have
540 # interrupted an enclosing Xen activation in tricky prologue or
541 # epilogue code.
542 movl UREGS_eflags(%esp),%eax
543 movb UREGS_cs(%esp),%al
544 testl $(3|X86_EFLAGS_VM),%eax
545 jnz continue_nmi
546 movl %ds,%eax
547 cmpw $(__HYPERVISOR_DS),%ax
548 jne defer_nmi
549 movl %es,%eax
550 cmpw $(__HYPERVISOR_DS),%ax
551 jne defer_nmi
553 continue_nmi:
554 SET_XEN_SEGMENTS(d)
555 movl %esp,%edx
556 pushl %edx
557 call do_nmi
558 addl $4,%esp
559 jmp ret_from_intr
561 defer_nmi:
562 movl $FIXMAP_apic_base,%eax
563 # apic_wait_icr_idle()
564 1: movl %ss:APIC_ICR(%eax),%ebx
565 testl $APIC_ICR_BUSY,%ebx
566 jnz 1b
567 # __send_IPI_shortcut(APIC_DEST_SELF, TRAP_deferred_nmi)
568 movl $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_PHYSICAL | \
569 TRAP_deferred_nmi),%ss:APIC_ICR(%eax)
570 jmp restore_all_xen
571 #endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
573 ENTRY(setup_vm86_frame)
574 # Copies the entire stack frame forwards by 16 bytes.
575 .macro copy_vm86_words count=18
576 .if \count
577 pushl ((\count-1)*4)(%esp)
578 popl ((\count-1)*4)+16(%esp)
579 copy_vm86_words "(\count-1)"
580 .endif
581 .endm
582 copy_vm86_words
583 addl $16,%esp
584 ret
586 do_arch_sched_op_compat:
587 # Ensure we return success even if we return via schedule_tail()
588 xorl %eax,%eax
589 GET_GUEST_REGS(%ecx)
590 movl %eax,UREGS_eax(%ecx)
591 jmp do_sched_op_compat
593 do_arch_sched_op:
594 # Ensure we return success even if we return via schedule_tail()
595 xorl %eax,%eax
596 GET_GUEST_REGS(%ecx)
597 movl %eax,UREGS_eax(%ecx)
598 jmp do_sched_op
600 .data
602 ENTRY(exception_table)
603 .long do_divide_error
604 .long do_debug
605 .long 0 # nmi
606 .long do_int3
607 .long do_overflow
608 .long do_bounds
609 .long do_invalid_op
610 .long math_state_restore
611 .long 0 # double fault
612 .long do_coprocessor_segment_overrun
613 .long do_invalid_TSS
614 .long do_segment_not_present
615 .long do_stack_segment
616 .long do_general_protection
617 .long do_page_fault
618 .long do_spurious_interrupt_bug
619 .long do_coprocessor_error
620 .long do_alignment_check
621 .long do_machine_check
622 .long do_simd_coprocessor_error
624 ENTRY(hypercall_table)
625 .long do_set_trap_table /* 0 */
626 .long do_mmu_update
627 .long do_set_gdt
628 .long do_stack_switch
629 .long do_set_callbacks
630 .long do_fpu_taskswitch /* 5 */
631 .long do_arch_sched_op_compat
632 .long do_dom0_op
633 .long do_set_debugreg
634 .long do_get_debugreg
635 .long do_update_descriptor /* 10 */
636 .long do_ni_hypercall
637 .long do_memory_op
638 .long do_multicall
639 .long do_update_va_mapping
640 .long do_set_timer_op /* 15 */
641 .long do_event_channel_op_compat
642 .long do_xen_version
643 .long do_console_io
644 .long do_physdev_op_compat
645 .long do_grant_table_op /* 20 */
646 .long do_vm_assist
647 .long do_update_va_mapping_otherdomain
648 .long do_iret
649 .long do_vcpu_op
650 .long do_ni_hypercall /* 25 */
651 .long do_mmuext_op
652 .long do_acm_op
653 .long do_nmi_op
654 .long do_arch_sched_op
655 .long do_callback_op /* 30 */
656 .long do_xenoprof_op
657 .long do_event_channel_op
658 .long do_physdev_op
659 .long do_hvm_op /* 34 */
660 .rept NR_hypercalls-((.-hypercall_table)/4)
661 .long do_ni_hypercall
662 .endr
664 ENTRY(hypercall_args_table)
665 .byte 1 /* do_set_trap_table */ /* 0 */
666 .byte 4 /* do_mmu_update */
667 .byte 2 /* do_set_gdt */
668 .byte 2 /* do_stack_switch */
669 .byte 4 /* do_set_callbacks */
670 .byte 1 /* do_fpu_taskswitch */ /* 5 */
671 .byte 2 /* do_arch_sched_op_compat */
672 .byte 1 /* do_dom0_op */
673 .byte 2 /* do_set_debugreg */
674 .byte 1 /* do_get_debugreg */
675 .byte 4 /* do_update_descriptor */ /* 10 */
676 .byte 0 /* do_ni_hypercall */
677 .byte 2 /* do_memory_op */
678 .byte 2 /* do_multicall */
679 .byte 4 /* do_update_va_mapping */
680 .byte 2 /* do_set_timer_op */ /* 15 */
681 .byte 1 /* do_event_channel_op_compat */
682 .byte 2 /* do_xen_version */
683 .byte 3 /* do_console_io */
684 .byte 1 /* do_physdev_op_compat */
685 .byte 3 /* do_grant_table_op */ /* 20 */
686 .byte 2 /* do_vm_assist */
687 .byte 5 /* do_update_va_mapping_otherdomain */
688 .byte 0 /* do_iret */
689 .byte 3 /* do_vcpu_op */
690 .byte 0 /* do_ni_hypercall */ /* 25 */
691 .byte 4 /* do_mmuext_op */
692 .byte 1 /* do_acm_op */
693 .byte 2 /* do_nmi_op */
694 .byte 2 /* do_arch_sched_op */
695 .byte 2 /* do_callback_op */ /* 30 */
696 .byte 2 /* do_xenoprof_op */
697 .byte 2 /* do_event_channel_op */
698 .byte 2 /* do_physdev_op */
699 .byte 2 /* do_hvm_op */ /* 34 */
700 .rept NR_hypercalls-(.-hypercall_args_table)
701 .byte 0 /* do_ni_hypercall */
702 .endr