3#include <arch/processor.h>
4#include <arch/segment.h>
5#include <xtf/asm_macros.h>
9Stack frame layout: (first aligned to 16 byte boundary)
11| Xen | Hardware | Notes |
12|-------------------+-------------------+---------------|
14|-------------------+-------------------+---------------|
18| upcall_mask / %cs | %cs | |
20| error_code | %rsp-> error_code | if applicable |
24The %rcx and %r11 parameters are because Xen will typically SYSRET to the
25entry point; they should be restored promptly.
27The stubs then push an error_code (if required) to make a common layout for
28the frame, then use the upper 32bits of the error_code to stash additional
29metadata. Currently just the entry vector.
33.macro env_ADJUST_FRAME /* Environment specific exception entry. */
35 /* Restore results of Xen SYSRET'ing to this point. */
41.macro env_IRETQ /* Environment specific version of `iretq`. */
44 push $0 /* Indicate that this isn't a SYSRET'able */
45 jmp HYPERCALL_iret /* situation, and use the 'iret' hypercall. */
48 iretq /* HVM guests use a real 'iretq' instruction. */
52.macro exception_entry sym vec
57 .if !((1 << \vec) & X86_EXC_HAVE_EC)
58 /* Push dummy error code (if needed) to align stack. */
62 /* Store entry vector in the top 32 bits of error_code. */
70exception_entry DE X86_EXC_DE
71exception_entry DB X86_EXC_DB
72exception_entry NMI X86_EXC_NMI
73exception_entry BP X86_EXC_BP
74exception_entry OF X86_EXC_OF
75exception_entry BR X86_EXC_BR
76exception_entry UD X86_EXC_UD
77exception_entry NM X86_EXC_NM
78exception_entry DF X86_EXC_DF
79exception_entry TS X86_EXC_TS
80exception_entry NP X86_EXC_NP
81exception_entry SS X86_EXC_SS
82exception_entry GP X86_EXC_GP
83exception_entry PF X86_EXC_PF
84exception_entry MF X86_EXC_MF
85exception_entry AC X86_EXC_AC
86exception_entry MC X86_EXC_MC
87exception_entry XM X86_EXC_XM
88exception_entry VE X86_EXC_VE
95 mov %rsp, %rdi /* struct cpu_regs * */
99 add $8, %rsp /* Pop error_code/entry_vector. */
102ENDFUNC(handle_exception)
105ENTRY(entry_ret_to_kernel) /* int $X86_VEC_RET2KERN */
108 mov %rbp, %rsp /* Restore %rsp to exec_user_param()'s context. */
110ENDFUNC(entry_ret_to_kernel)
112ENTRY(exec_user_param) /* ulong (*fn)(ulong), ulong p1 */
115 /* Prepare to "call" exec_user_stub(). */
116 push $1f /* Fake return addr as if we'd called exec_user_stub(). */
117 mov %rsp, %rbp /* Stash %rsp for entry_ret_to_kernel(). */
119 /* Prepare an IRET frame. */
120 push exec_user_ss(%rip) /* SS */
122 push $user_stack + PAGE_SIZE
125 /* Apply and/or masks to eflags. */
126 mov exec_user_efl_and_mask(%rip), %rdx
128 mov exec_user_efl_or_mask(%rip), %rdx
131 push exec_user_cs(%rip) /* CS */
132 push $exec_user_stub /* RIP */
134 env_IRETQ /* Drop to user privilege. */
1361: /* entry_ret_to_kernel() returns here with a sensible stack. */
140ENDFUNC(exec_user_param)
142.pushsection .text.user, "ax", @progbits
144 xchg %rdi, %rsi /* Swap p1 to be first parameter to fn(). */
145 call *%rsi /* fn(p1) */
147 int $X86_VEC_RET2KERN /* Return to kernel privilege. */
148ENDFUNC(exec_user_stub)
159 mov %rsp, %rdi /* struct cpu_regs * */
163 add $8, %rsp /* Pop error_code/entry_vector. */
168#if defined(CONFIG_PV)
177 mov %rsp, %rdi /* struct cpu_regs * */
182 movq $VGCF_in_syscall, (%rsp) /* Clobber error_code/entry_vector */
185ENDFUNC(entry_SYSCALL)
195 mov %rsp, %rdi /* struct cpu_regs * */
200 movq $0, (%rsp) /* Clobber error_code/entry_vector */
203ENDFUNC(entry_SYSENTER)
204#endif /* CONFIG_PV */
209 * indent-tabs-mode: nil