3#include <arch/processor.h>
4#include <arch/segment.h>
5#include <xtf/asm_macros.h>
11| Xen | Hardware | Notes |
12|-------------------+-------------------+----------------------|
14|-------------------+-------------------+----------------------|
15| %ss | %ss | only on stack switch |
16| %esp | %esp | only on stack switch |
18| upcall_mask / %cs | %cs | |
20| %esp-> error_code | %esp-> error_code | if applicable |
22These stubs push an error_code of zero (if applicable) to make a common layout
23for the frame. A further word of metadata is then pushed, currently just
24containing the entry vector.
28.macro env_IRET /* Environment specific version of `iret`. */
31 jmp HYPERCALL_iret /* PV guests use the 'iret' hypercall. */
34 iretl /* HVM guests use a real 'iret' instruction. */
38.macro exception_entry sym vec
42 .if !((1 << \vec) & X86_EXC_HAVE_EC)
43 /* Push dummy error code (if needed) to align stack. */
47 /* Push metadata (entry vector). */
55exception_entry DE X86_EXC_DE
56exception_entry DB X86_EXC_DB
57exception_entry NMI X86_EXC_NMI
58exception_entry BP X86_EXC_BP
59exception_entry OF X86_EXC_OF
60exception_entry BR X86_EXC_BR
61exception_entry UD X86_EXC_UD
62exception_entry NM X86_EXC_NM
63exception_entry DF X86_EXC_DF
64exception_entry TS X86_EXC_TS
65exception_entry NP X86_EXC_NP
66exception_entry SS X86_EXC_SS
67exception_entry GP X86_EXC_GP
68exception_entry PF X86_EXC_PF
69exception_entry MF X86_EXC_MF
70exception_entry AC X86_EXC_AC
71exception_entry MC X86_EXC_MC
72exception_entry XM X86_EXC_XM
73exception_entry VE X86_EXC_VE
83 mov $__KERN_DS, %eax /* Restore data segments. */
87 mov %esp, %eax /* struct cpu_regs * */
95 add $8, %esp /* Pop error_code/entry_vector. */
98ENDFUNC(handle_exception)
101ENTRY(entry_ret_to_kernel) /* int $X86_VEC_RET2KERN */
102 mov %ebp, %esp /* Restore %esp to exec_user_param()'s context. */
104ENDFUNC(entry_ret_to_kernel)
106ENTRY(exec_user_param) /* %eax = ulong (*fn)(ulong p1), %edx = ulong p1 */
109 /* Prepare to "call" exec_user_stub(). */
110 push $1f /* Fake return addr as if we'd called exec_user_stub(). */
111 mov %esp, %ebp /* Stash %esp for entry_ret_to_kernel(). */
113 /* Prepare an IRET frame. */
114 push exec_user_ss /* SS */
116 push $user_stack + PAGE_SIZE
119 /* Apply and/or masks to eflags. */
120 mov exec_user_efl_and_mask, %ecx
122 mov exec_user_efl_or_mask, %ecx
125 push exec_user_cs /* CS */
126 push $exec_user_stub /* EIP */
128 env_IRET /* Drop to user privilege. */
1301: /* entry_ret_to_kernel() returns here with a sensible stack. */
134ENDFUNC(exec_user_param)
136.pushsection .text.user, "ax", @progbits
137ENTRY(exec_user_stub) /* %eax = ulong (*fn)(ulong p1), %edx = ulong p1 */
138 xchg %eax, %edx /* Swap p1 to be first parameter to fn(). */
139 call *%edx /* fn(p1) */
141 int $X86_VEC_RET2KERN /* Return to kernel privilege. */
142ENDFUNC(exec_user_stub)
154 mov $__KERN_DS, %eax /* Restore data segments. */
158 mov %esp, %eax /* struct cpu_regs * */
166 add $8, %esp /* Pop error_code/entry_vector. */
171#if defined(CONFIG_PV)
181 mov $__KERN_DS, %eax /* Restore data segments. */
185 mov %esp, %eax /* struct cpu_regs * */
193 add $8, %esp /* Pop error_code/entry_vector. */
196ENDFUNC(entry_SYSCALL)
207 mov $__KERN_DS, %eax /* Restore data segments. */
211 mov %esp, %eax /* struct cpu_regs * */
219 add $8, %esp /* Pop error_code/entry_vector. */
222ENDFUNC(entry_SYSENTER)
228 * indent-tabs-mode: nil