/root/src/xen/xen/arch/x86/vm_event.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * arch/x86/vm_event.c |
3 | | * |
4 | | * Architecture-specific vm_event handling routines |
5 | | * |
6 | | * Copyright (c) 2015 Tamas K Lengyel (tamas@tklengyel.com) |
7 | | * |
8 | | * This program is free software; you can redistribute it and/or |
9 | | * modify it under the terms of the GNU General Public |
10 | | * License v2 as published by the Free Software Foundation. |
11 | | * |
12 | | * This program is distributed in the hope that it will be useful, |
13 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | | * General Public License for more details. |
16 | | * |
17 | | * You should have received a copy of the GNU General Public |
18 | | * License along with this program; If not, see <http://www.gnu.org/licenses/>. |
19 | | */ |
20 | | |
21 | | #include <xen/sched.h> |
22 | | #include <xen/mem_access.h> |
23 | | #include <asm/vm_event.h> |
24 | | |
25 | | /* Implicitly serialized by the domctl lock. */ |
26 | | int vm_event_init_domain(struct domain *d) |
27 | 0 | { |
28 | 0 | struct vcpu *v; |
29 | 0 |
|
30 | 0 | for_each_vcpu ( d, v ) |
31 | 0 | { |
32 | 0 | if ( v->arch.vm_event ) |
33 | 0 | continue; |
34 | 0 |
|
35 | 0 | v->arch.vm_event = xzalloc(struct arch_vm_event); |
36 | 0 |
|
37 | 0 | if ( !v->arch.vm_event ) |
38 | 0 | return -ENOMEM; |
39 | 0 | } |
40 | 0 |
|
41 | 0 | return 0; |
42 | 0 | } |
43 | | |
44 | | /* |
45 | | * Implicitly serialized by the domctl lock, |
46 | | * or on domain cleanup paths only. |
47 | | */ |
48 | | void vm_event_cleanup_domain(struct domain *d) |
49 | 0 | { |
50 | 0 | struct vcpu *v; |
51 | 0 |
|
52 | 0 | for_each_vcpu ( d, v ) |
53 | 0 | { |
54 | 0 | xfree(v->arch.vm_event); |
55 | 0 | v->arch.vm_event = NULL; |
56 | 0 | } |
57 | 0 |
|
58 | 0 | d->arch.mem_access_emulate_each_rep = 0; |
59 | 0 | } |
60 | | |
61 | | void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v, |
62 | | vm_event_response_t *rsp) |
63 | 0 | { |
64 | 0 | if ( !(rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP) ) |
65 | 0 | return; |
66 | 0 |
|
67 | 0 | if ( !is_hvm_domain(d) ) |
68 | 0 | return; |
69 | 0 |
|
70 | 0 | ASSERT(atomic_read(&v->vm_event_pause_count)); |
71 | 0 |
|
72 | 0 | hvm_toggle_singlestep(v); |
73 | 0 | } |
74 | | |
75 | | void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp) |
76 | 0 | { |
77 | 0 | if ( rsp->flags & VM_EVENT_FLAG_DENY ) |
78 | 0 | { |
79 | 0 | struct monitor_write_data *w; |
80 | 0 |
|
81 | 0 | ASSERT(v->arch.vm_event); |
82 | 0 |
|
83 | 0 | /* deny flag requires the vCPU to be paused */ |
84 | 0 | if ( !atomic_read(&v->vm_event_pause_count) ) |
85 | 0 | return; |
86 | 0 |
|
87 | 0 | w = &v->arch.vm_event->write_data; |
88 | 0 |
|
89 | 0 | switch ( rsp->reason ) |
90 | 0 | { |
91 | 0 | case VM_EVENT_REASON_MOV_TO_MSR: |
92 | 0 | w->do_write.msr = 0; |
93 | 0 | break; |
94 | 0 | case VM_EVENT_REASON_WRITE_CTRLREG: |
95 | 0 | switch ( rsp->u.write_ctrlreg.index ) |
96 | 0 | { |
97 | 0 | case VM_EVENT_X86_CR0: |
98 | 0 | w->do_write.cr0 = 0; |
99 | 0 | break; |
100 | 0 | case VM_EVENT_X86_CR3: |
101 | 0 | w->do_write.cr3 = 0; |
102 | 0 | break; |
103 | 0 | case VM_EVENT_X86_CR4: |
104 | 0 | w->do_write.cr4 = 0; |
105 | 0 | break; |
106 | 0 | } |
107 | 0 | break; |
108 | 0 | } |
109 | 0 | } |
110 | 0 | } |
111 | | |
112 | | void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp) |
113 | 0 | { |
114 | 0 | ASSERT(atomic_read(&v->vm_event_pause_count)); |
115 | 0 |
|
116 | 0 | v->arch.vm_event->gprs = rsp->data.regs.x86; |
117 | 0 | v->arch.vm_event->set_gprs = true; |
118 | 0 | } |
119 | | |
120 | | void vm_event_monitor_next_interrupt(struct vcpu *v) |
121 | 0 | { |
122 | 0 | v->arch.monitor.next_interrupt_enabled = true; |
123 | 0 | } |
124 | | |
125 | | void vm_event_fill_regs(vm_event_request_t *req) |
126 | 0 | { |
127 | 0 | const struct cpu_user_regs *regs = guest_cpu_user_regs(); |
128 | 0 | struct segment_register seg; |
129 | 0 | struct hvm_hw_cpu ctxt; |
130 | 0 | struct vcpu *curr = current; |
131 | 0 |
|
132 | 0 | ASSERT(is_hvm_vcpu(curr)); |
133 | 0 |
|
134 | 0 | /* Architecture-specific vmcs/vmcb bits */ |
135 | 0 | hvm_funcs.save_cpu_ctxt(curr, &ctxt); |
136 | 0 |
|
137 | 0 | req->data.regs.x86.rax = regs->rax; |
138 | 0 | req->data.regs.x86.rcx = regs->rcx; |
139 | 0 | req->data.regs.x86.rdx = regs->rdx; |
140 | 0 | req->data.regs.x86.rbx = regs->rbx; |
141 | 0 | req->data.regs.x86.rsp = regs->rsp; |
142 | 0 | req->data.regs.x86.rbp = regs->rbp; |
143 | 0 | req->data.regs.x86.rsi = regs->rsi; |
144 | 0 | req->data.regs.x86.rdi = regs->rdi; |
145 | 0 |
|
146 | 0 | req->data.regs.x86.r8 = regs->r8; |
147 | 0 | req->data.regs.x86.r9 = regs->r9; |
148 | 0 | req->data.regs.x86.r10 = regs->r10; |
149 | 0 | req->data.regs.x86.r11 = regs->r11; |
150 | 0 | req->data.regs.x86.r12 = regs->r12; |
151 | 0 | req->data.regs.x86.r13 = regs->r13; |
152 | 0 | req->data.regs.x86.r14 = regs->r14; |
153 | 0 | req->data.regs.x86.r15 = regs->r15; |
154 | 0 |
|
155 | 0 | req->data.regs.x86.rflags = regs->rflags; |
156 | 0 | req->data.regs.x86.rip = regs->rip; |
157 | 0 |
|
158 | 0 | req->data.regs.x86.dr7 = curr->arch.debugreg[7]; |
159 | 0 | req->data.regs.x86.cr0 = ctxt.cr0; |
160 | 0 | req->data.regs.x86.cr2 = ctxt.cr2; |
161 | 0 | req->data.regs.x86.cr3 = ctxt.cr3; |
162 | 0 | req->data.regs.x86.cr4 = ctxt.cr4; |
163 | 0 |
|
164 | 0 | req->data.regs.x86.sysenter_cs = ctxt.sysenter_cs; |
165 | 0 | req->data.regs.x86.sysenter_esp = ctxt.sysenter_esp; |
166 | 0 | req->data.regs.x86.sysenter_eip = ctxt.sysenter_eip; |
167 | 0 |
|
168 | 0 | req->data.regs.x86.msr_efer = ctxt.msr_efer; |
169 | 0 | req->data.regs.x86.msr_star = ctxt.msr_star; |
170 | 0 | req->data.regs.x86.msr_lstar = ctxt.msr_lstar; |
171 | 0 |
|
172 | 0 | hvm_get_segment_register(curr, x86_seg_fs, &seg); |
173 | 0 | req->data.regs.x86.fs_base = seg.base; |
174 | 0 |
|
175 | 0 | hvm_get_segment_register(curr, x86_seg_gs, &seg); |
176 | 0 | req->data.regs.x86.gs_base = seg.base; |
177 | 0 |
|
178 | 0 | hvm_get_segment_register(curr, x86_seg_cs, &seg); |
179 | 0 | req->data.regs.x86.cs_arbytes = seg.attr; |
180 | 0 | } |
181 | | |
182 | | void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp) |
183 | 0 | { |
184 | 0 | if ( !(rsp->flags & VM_EVENT_FLAG_EMULATE) ) |
185 | 0 | { |
186 | 0 | v->arch.vm_event->emulate_flags = 0; |
187 | 0 | return; |
188 | 0 | } |
189 | 0 |
|
190 | 0 | switch ( rsp->reason ) |
191 | 0 | { |
192 | 0 | case VM_EVENT_REASON_MEM_ACCESS: |
193 | 0 | /* |
194 | 0 | * Emulate iff this is a response to a mem_access violation and there |
195 | 0 | * are still conflicting mem_access permissions in-place. |
196 | 0 | */ |
197 | 0 | if ( p2m_mem_access_emulate_check(v, rsp) ) |
198 | 0 | { |
199 | 0 | if ( rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA ) |
200 | 0 | v->arch.vm_event->emul.read = rsp->data.emul.read; |
201 | 0 |
|
202 | 0 | v->arch.vm_event->emulate_flags = rsp->flags; |
203 | 0 | } |
204 | 0 | break; |
205 | 0 |
|
206 | 0 | case VM_EVENT_REASON_SOFTWARE_BREAKPOINT: |
207 | 0 | if ( rsp->flags & VM_EVENT_FLAG_SET_EMUL_INSN_DATA ) |
208 | 0 | { |
209 | 0 | v->arch.vm_event->emul.insn = rsp->data.emul.insn; |
210 | 0 | v->arch.vm_event->emulate_flags = rsp->flags; |
211 | 0 | } |
212 | 0 | break; |
213 | 0 |
|
214 | 0 | case VM_EVENT_REASON_DESCRIPTOR_ACCESS: |
215 | 0 | if ( rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA ) |
216 | 0 | v->arch.vm_event->emul.read = rsp->data.emul.read; |
217 | 0 | v->arch.vm_event->emulate_flags = rsp->flags; |
218 | 0 | break; |
219 | 0 |
|
220 | 0 | default: |
221 | 0 | break; |
222 | 0 | }; |
223 | 0 | } |
224 | | |
225 | | /* |
226 | | * Local variables: |
227 | | * mode: C |
228 | | * c-file-style: "BSD" |
229 | | * c-basic-offset: 4 |
230 | | * indent-tabs-mode: nil |
231 | | * End: |
232 | | */ |