Xen Test Framework
entry_64.S
Go to the documentation of this file.
1#include <arch/idt.h>
2#include <arch/page.h>
3#include <arch/processor.h>
4#include <arch/segment.h>
5#include <xtf/asm_macros.h>
6
7/*
8
9Stack frame layout: (first aligned to 16 byte boundary)
10
11| Xen | Hardware | Notes |
12|-------------------+-------------------+---------------|
13| <r> | <r> | <l> |
14|-------------------+-------------------+---------------|
15| %ss | %ss | |
16| %rsp | %rsp | |
17| rflags | rflags | |
18| upcall_mask / %cs | %cs | |
19| %rip | %rip | |
20| error_code | %rsp-> error_code | if applicable |
21| %r11 | | |
22| %rsp-> %rcx | | |
23
24The %rcx and %r11 parameters are because Xen will typically SYSRET to the
25entry point; they should be restored promptly.
26
27The stubs then push an error_code (if required) to make a common layout for
28the frame, then use the upper 32bits of the error_code to stash additional
29metadata. Currently just the entry vector.
30
31*/
32
33.macro env_ADJUST_FRAME /* Environment specific exception entry. */
34#if defined(CONFIG_PV)
35 /* Restore results of Xen SYSRET'ing to this point. */
36 pop %rcx
37 pop %r11
38#endif
39.endm
40
41.macro env_IRETQ /* Environment specific version of `iretq`. */
42#if defined(CONFIG_PV)
43
44 push $0 /* Indicate that this isn't a SYSRET'able */
45 jmp HYPERCALL_iret /* situation, and use the 'iret' hypercall. */
46
47#else
48 iretq /* HVM guests use a real 'iretq' instruction. */
49#endif
50.endm
51
52.macro exception_entry sym vec
53
54ENTRY(entry_\sym)
55 env_ADJUST_FRAME
56
57 .if !((1 << \vec) & X86_EXC_HAVE_EC)
58 /* Push dummy error code (if needed) to align stack. */
59 push $0
60 .endif
61
62 /* Store entry vector in the top 32 bits of error_code. */
63 movl $\vec, 4(%rsp)
64
65 jmp handle_exception
66
67ENDFUNC(entry_\sym)
68.endm
69
70exception_entry DE X86_EXC_DE
71exception_entry DB X86_EXC_DB
72exception_entry NMI X86_EXC_NMI
73exception_entry BP X86_EXC_BP
74exception_entry OF X86_EXC_OF
75exception_entry BR X86_EXC_BR
76exception_entry UD X86_EXC_UD
77exception_entry NM X86_EXC_NM
78exception_entry DF X86_EXC_DF
79exception_entry TS X86_EXC_TS
80exception_entry NP X86_EXC_NP
81exception_entry SS X86_EXC_SS
82exception_entry GP X86_EXC_GP
83exception_entry PF X86_EXC_PF
84exception_entry MF X86_EXC_MF
85exception_entry AC X86_EXC_AC
86exception_entry MC X86_EXC_MC
87exception_entry XM X86_EXC_XM
88exception_entry VE X86_EXC_VE
89
90 .align 16
91handle_exception:
92
93 SAVE_ALL
94
95 mov %rsp, %rdi /* struct cpu_regs * */
96 call do_exception
97
98 RESTORE_ALL
99 add $8, %rsp /* Pop error_code/entry_vector. */
100
101 env_IRETQ
102ENDFUNC(handle_exception)
103
104
105ENTRY(entry_ret_to_kernel) /* int $X86_VEC_RET2KERN */
106 env_ADJUST_FRAME
107
108 mov %rbp, %rsp /* Restore %rsp to exec_user_param()'s context. */
109 ret
110ENDFUNC(entry_ret_to_kernel)
111
112ENTRY(exec_user_param) /* ulong (*fn)(ulong), ulong p1 */
113 push %rbp
114
115 /* Prepare to "call" exec_user_stub(). */
116 push $1f /* Fake return addr as if we'd called exec_user_stub(). */
117 mov %rsp, %rbp /* Stash %rsp for entry_ret_to_kernel(). */
118
119 /* Prepare an IRET frame. */
120 push exec_user_ss(%rip) /* SS */
121 /* RSP */
122 push $user_stack + PAGE_SIZE
123 pushf /* RFLAGS */
124
125 /* Apply and/or masks to eflags. */
126 mov exec_user_efl_and_mask(%rip), %rdx
127 and %rdx, (%rsp)
128 mov exec_user_efl_or_mask(%rip), %rdx
129 or %rdx, (%rsp)
130
131 push exec_user_cs(%rip) /* CS */
132 push $exec_user_stub /* RIP */
133
134 env_IRETQ /* Drop to user privilege. */
135
1361: /* entry_ret_to_kernel() returns here with a sensible stack. */
137 pop %rbp
138 ret
139
140ENDFUNC(exec_user_param)
141
142.pushsection .text.user, "ax", @progbits
143ENTRY(exec_user_stub)
144 xchg %rdi, %rsi /* Swap p1 to be first parameter to fn(). */
145 call *%rsi /* fn(p1) */
146
147 int $X86_VEC_RET2KERN /* Return to kernel privilege. */
148ENDFUNC(exec_user_stub)
149.popsection
150
151ENTRY(entry_EVTCHN)
152 env_ADJUST_FRAME
153
154 push $0
155 movl $0x200, 4(%rsp)
156
157 SAVE_ALL
158
159 mov %rsp, %rdi /* struct cpu_regs * */
160 call do_evtchn
161
162 RESTORE_ALL
163 add $8, %rsp /* Pop error_code/entry_vector. */
164
165 env_IRETQ
166ENDFUNC(entry_EVTCHN)
167
168#if defined(CONFIG_PV)
169ENTRY(entry_SYSCALL)
170 env_ADJUST_FRAME
171
172 push $0
173 movl $0x100, 4(%rsp)
174
175 SAVE_ALL
176
177 mov %rsp, %rdi /* struct cpu_regs * */
178 call do_syscall
179
180 RESTORE_ALL
181
182 movq $VGCF_in_syscall, (%rsp) /* Clobber error_code/entry_vector */
183 jmp HYPERCALL_iret
184
185ENDFUNC(entry_SYSCALL)
186
187ENTRY(entry_SYSENTER)
188 env_ADJUST_FRAME
189
190 push $0
191 movl $0x200, 4(%rsp)
192
193 SAVE_ALL
194
195 mov %rsp, %rdi /* struct cpu_regs * */
196 call do_sysenter
197
198 RESTORE_ALL
199
200 movq $0, (%rsp) /* Clobber error_code/entry_vector */
201 jmp HYPERCALL_iret
202
203ENDFUNC(entry_SYSENTER)
204#endif /* CONFIG_PV */
205
206/*
207 * Local variables:
208 * tab-width: 8
209 * indent-tabs-mode: nil
210 * End:
211 */