/root/src/xen/xen/arch/x86/pv/traps.c
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * arch/x86/pv/traps.c |
3 | | * |
4 | | * PV low level entry points. |
5 | | * |
6 | | * This program is free software; you can redistribute it and/or modify |
7 | | * it under the terms of the GNU General Public License as published by |
8 | | * the Free Software Foundation; either version 2 of the License, or |
9 | | * (at your option) any later version. |
10 | | * |
11 | | * This program is distributed in the hope that it will be useful, |
12 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | | * GNU General Public License for more details. |
15 | | * |
16 | | * You should have received a copy of the GNU General Public License |
17 | | * along with this program; If not, see <http://www.gnu.org/licenses/>. |
18 | | * |
19 | | * Copyright (c) 2017 Citrix Systems Ltd. |
20 | | */ |
21 | | |
22 | | #include <xen/event.h> |
23 | | #include <xen/hypercall.h> |
24 | | #include <xen/lib.h> |
25 | | #include <xen/trace.h> |
26 | | #include <xen/softirq.h> |
27 | | |
28 | | #include <asm/apic.h> |
29 | | #include <asm/shared.h> |
30 | | #include <asm/traps.h> |
31 | | |
32 | | /* Override macros from asm/page.h to make them work with mfn_t */ |
33 | | #undef mfn_to_page |
34 | | #define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn)) |
35 | | #undef page_to_mfn |
36 | | #define page_to_mfn(pg) _mfn(__page_to_mfn(pg)) |
37 | | |
38 | | void do_entry_int82(struct cpu_user_regs *regs) |
39 | 0 | { |
40 | 0 | if ( unlikely(untrusted_msi) ) |
41 | 0 | check_for_unexpected_msi((uint8_t)regs->entry_vector); |
42 | 0 |
|
43 | 0 | pv_hypercall(regs); |
44 | 0 | } |
45 | | |
46 | | void pv_inject_event(const struct x86_event *event) |
47 | 0 | { |
48 | 0 | struct vcpu *curr = current; |
49 | 0 | struct cpu_user_regs *regs = guest_cpu_user_regs(); |
50 | 0 | struct trap_bounce *tb; |
51 | 0 | const struct trap_info *ti; |
52 | 0 | const uint8_t vector = event->vector; |
53 | 0 | unsigned int error_code = event->error_code; |
54 | 0 | bool use_error_code; |
55 | 0 |
|
56 | 0 | ASSERT(vector == event->vector); /* Confirm no truncation. */ |
57 | 0 | if ( event->type == X86_EVENTTYPE_HW_EXCEPTION ) |
58 | 0 | { |
59 | 0 | ASSERT(vector < 32); |
60 | 0 | use_error_code = TRAP_HAVE_EC & (1u << vector); |
61 | 0 | } |
62 | 0 | else |
63 | 0 | { |
64 | 0 | ASSERT(event->type == X86_EVENTTYPE_SW_INTERRUPT); |
65 | 0 | use_error_code = false; |
66 | 0 | } |
67 | 0 | if ( use_error_code ) |
68 | 0 | ASSERT(error_code != X86_EVENT_NO_EC); |
69 | 0 | else |
70 | 0 | ASSERT(error_code == X86_EVENT_NO_EC); |
71 | 0 |
|
72 | 0 | tb = &curr->arch.pv_vcpu.trap_bounce; |
73 | 0 | ti = &curr->arch.pv_vcpu.trap_ctxt[vector]; |
74 | 0 |
|
75 | 0 | tb->flags = TBF_EXCEPTION; |
76 | 0 | tb->cs = ti->cs; |
77 | 0 | tb->eip = ti->address; |
78 | 0 |
|
79 | 0 | if ( event->type == X86_EVENTTYPE_HW_EXCEPTION && |
80 | 0 | vector == TRAP_page_fault ) |
81 | 0 | { |
82 | 0 | curr->arch.pv_vcpu.ctrlreg[2] = event->cr2; |
83 | 0 | arch_set_cr2(curr, event->cr2); |
84 | 0 |
|
85 | 0 | /* Re-set error_code.user flag appropriately for the guest. */ |
86 | 0 | error_code &= ~PFEC_user_mode; |
87 | 0 | if ( !guest_kernel_mode(curr, regs) ) |
88 | 0 | error_code |= PFEC_user_mode; |
89 | 0 |
|
90 | 0 | trace_pv_page_fault(event->cr2, error_code); |
91 | 0 | } |
92 | 0 | else |
93 | 0 | trace_pv_trap(vector, regs->rip, use_error_code, error_code); |
94 | 0 |
|
95 | 0 | if ( use_error_code ) |
96 | 0 | { |
97 | 0 | tb->flags |= TBF_EXCEPTION_ERRCODE; |
98 | 0 | tb->error_code = error_code; |
99 | 0 | } |
100 | 0 |
|
101 | 0 | if ( TI_GET_IF(ti) ) |
102 | 0 | tb->flags |= TBF_INTERRUPT; |
103 | 0 |
|
104 | 0 | if ( unlikely(null_trap_bounce(curr, tb)) ) |
105 | 0 | { |
106 | 0 | gprintk(XENLOG_WARNING, |
107 | 0 | "Unhandled %s fault/trap [#%d, ec=%04x]\n", |
108 | 0 | trapstr(vector), vector, error_code); |
109 | 0 |
|
110 | 0 | if ( vector == TRAP_page_fault ) |
111 | 0 | show_page_walk(event->cr2); |
112 | 0 | } |
113 | 0 | } |
114 | | |
115 | | /* |
116 | | * Called from asm to set up the MCE trapbounce info. |
117 | | * Returns false no callback is set up, else true. |
118 | | */ |
119 | | bool set_guest_machinecheck_trapbounce(void) |
120 | 0 | { |
121 | 0 | struct vcpu *curr = current; |
122 | 0 | struct trap_bounce *tb = &curr->arch.pv_vcpu.trap_bounce; |
123 | 0 |
|
124 | 0 | pv_inject_hw_exception(TRAP_machine_check, X86_EVENT_NO_EC); |
125 | 0 | tb->flags &= ~TBF_EXCEPTION; /* not needed for MCE delivery path */ |
126 | 0 |
|
127 | 0 | return !null_trap_bounce(curr, tb); |
128 | 0 | } |
129 | | |
130 | | /* |
131 | | * Called from asm to set up the NMI trapbounce info. |
132 | | * Returns false if no callback is set up, else true. |
133 | | */ |
134 | | bool set_guest_nmi_trapbounce(void) |
135 | 0 | { |
136 | 0 | struct vcpu *curr = current; |
137 | 0 | struct trap_bounce *tb = &curr->arch.pv_vcpu.trap_bounce; |
138 | 0 |
|
139 | 0 | pv_inject_hw_exception(TRAP_nmi, X86_EVENT_NO_EC); |
140 | 0 | tb->flags &= ~TBF_EXCEPTION; /* not needed for NMI delivery path */ |
141 | 0 |
|
142 | 0 | return !null_trap_bounce(curr, tb); |
143 | 0 | } |
144 | | |
145 | | void init_int80_direct_trap(struct vcpu *v) |
146 | 0 | { |
147 | 0 | struct trap_info *ti = &v->arch.pv_vcpu.trap_ctxt[0x80]; |
148 | 0 | struct trap_bounce *tb = &v->arch.pv_vcpu.int80_bounce; |
149 | 0 |
|
150 | 0 | tb->cs = ti->cs; |
151 | 0 | tb->eip = ti->address; |
152 | 0 |
|
153 | 0 | if ( null_trap_bounce(v, tb) ) |
154 | 0 | tb->flags = 0; |
155 | 0 | else |
156 | 0 | tb->flags = TBF_EXCEPTION | (TI_GET_IF(ti) ? TBF_INTERRUPT : 0); |
157 | 0 | } |
158 | | |
159 | | struct softirq_trap { |
160 | | struct domain *domain; /* domain to inject trap */ |
161 | | struct vcpu *vcpu; /* vcpu to inject trap */ |
162 | | unsigned int processor; /* physical cpu to inject trap */ |
163 | | }; |
164 | | |
165 | | static DEFINE_PER_CPU(struct softirq_trap, softirq_trap); |
166 | | |
167 | | static void nmi_mce_softirq(void) |
168 | 0 | { |
169 | 0 | unsigned int cpu = smp_processor_id(); |
170 | 0 | struct softirq_trap *st = &per_cpu(softirq_trap, cpu); |
171 | 0 |
|
172 | 0 | BUG_ON(st->vcpu == NULL); |
173 | 0 |
|
174 | 0 | /* |
175 | 0 | * Set the tmp value unconditionally, so that the check in the iret |
176 | 0 | * hypercall works. |
177 | 0 | */ |
178 | 0 | cpumask_copy(st->vcpu->cpu_hard_affinity_tmp, |
179 | 0 | st->vcpu->cpu_hard_affinity); |
180 | 0 |
|
181 | 0 | if ( (cpu != st->processor) || |
182 | 0 | (st->processor != st->vcpu->processor) ) |
183 | 0 | { |
184 | 0 |
|
185 | 0 | /* |
186 | 0 | * We are on a different physical cpu. Make sure to wakeup the vcpu on |
187 | 0 | * the specified processor. |
188 | 0 | */ |
189 | 0 | vcpu_set_hard_affinity(st->vcpu, cpumask_of(st->processor)); |
190 | 0 |
|
191 | 0 | /* Affinity is restored in the iret hypercall. */ |
192 | 0 | } |
193 | 0 |
|
194 | 0 | /* |
195 | 0 | * Only used to defer wakeup of domain/vcpu to a safe (non-NMI/MCE) |
196 | 0 | * context. |
197 | 0 | */ |
198 | 0 | vcpu_kick(st->vcpu); |
199 | 0 | st->vcpu = NULL; |
200 | 0 | } |
201 | | |
202 | | void __init pv_trap_init(void) |
203 | 1 | { |
204 | 1 | /* The 32-on-64 hypercall vector is only accessible from ring 1. */ |
205 | 1 | _set_gate(idt_table + HYPERCALL_VECTOR, |
206 | 1 | SYS_DESC_trap_gate, 1, entry_int82); |
207 | 1 | |
208 | 1 | /* Fast trap for int80 (faster than taking the #GP-fixup path). */ |
209 | 1 | _set_gate(idt_table + LEGACY_SYSCALL_VECTOR, SYS_DESC_trap_gate, 3, |
210 | 1 | &int80_direct_trap); |
211 | 1 | |
212 | 1 | open_softirq(NMI_MCE_SOFTIRQ, nmi_mce_softirq); |
213 | 1 | } |
214 | | |
215 | | int pv_raise_interrupt(struct vcpu *v, uint8_t vector) |
216 | 0 | { |
217 | 0 | struct softirq_trap *st = &per_cpu(softirq_trap, smp_processor_id()); |
218 | 0 |
|
219 | 0 | switch ( vector ) |
220 | 0 | { |
221 | 0 | case TRAP_nmi: |
222 | 0 | if ( cmpxchgptr(&st->vcpu, NULL, v) ) |
223 | 0 | return -EBUSY; |
224 | 0 | if ( !test_and_set_bool(v->nmi_pending) ) |
225 | 0 | { |
226 | 0 | st->domain = v->domain; |
227 | 0 | st->processor = v->processor; |
228 | 0 |
|
229 | 0 | /* Not safe to wake up a vcpu here */ |
230 | 0 | raise_softirq(NMI_MCE_SOFTIRQ); |
231 | 0 | return 0; |
232 | 0 | } |
233 | 0 | st->vcpu = NULL; |
234 | 0 | break; |
235 | 0 |
|
236 | 0 | case TRAP_machine_check: |
237 | 0 | if ( cmpxchgptr(&st->vcpu, NULL, v) ) |
238 | 0 | return -EBUSY; |
239 | 0 |
|
240 | 0 | /* |
241 | 0 | * We are called by the machine check (exception or polling) handlers |
242 | 0 | * on the physical CPU that reported a machine check error. |
243 | 0 | */ |
244 | 0 | if ( !test_and_set_bool(v->mce_pending) ) |
245 | 0 | { |
246 | 0 | st->domain = v->domain; |
247 | 0 | st->processor = v->processor; |
248 | 0 |
|
249 | 0 | /* not safe to wake up a vcpu here */ |
250 | 0 | raise_softirq(NMI_MCE_SOFTIRQ); |
251 | 0 | return 0; |
252 | 0 | } |
253 | 0 | st->vcpu = NULL; |
254 | 0 | break; |
255 | 0 | } |
256 | 0 |
|
257 | 0 | /* Delivery failed */ |
258 | 0 | return -EIO; |
259 | 0 | } |
260 | | |
261 | | /* |
262 | | * Local variables: |
263 | | * mode: C |
264 | | * c-file-style: "BSD" |
265 | | * c-basic-offset: 4 |
266 | | * tab-width: 4 |
267 | | * indent-tabs-mode: nil |
268 | | * End: |
269 | | */ |