/root/src/xen/xen/arch/x86/hvm/dm.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2016 Citrix Systems Inc. |
3 | | * |
4 | | * This program is free software; you can redistribute it and/or modify it |
5 | | * under the terms and conditions of the GNU General Public License, |
6 | | * version 2, as published by the Free Software Foundation. |
7 | | * |
8 | | * This program is distributed in the hope it will be useful, but WITHOUT |
9 | | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
10 | | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
11 | | * more details. |
12 | | * |
13 | | * You should have received a copy of the GNU General Public License along with |
14 | | * this program; If not, see <http://www.gnu.org/licenses/>. |
15 | | */ |
16 | | |
17 | | #include <xen/event.h> |
18 | | #include <xen/guest_access.h> |
19 | | #include <xen/hypercall.h> |
20 | | #include <xen/sched.h> |
21 | | |
22 | | #include <asm/hap.h> |
23 | | #include <asm/hvm/ioreq.h> |
24 | | #include <asm/shadow.h> |
25 | | |
26 | | #include <xsm/xsm.h> |
27 | | |
28 | | struct dmop_args { |
29 | | domid_t domid; |
30 | | unsigned int nr_bufs; |
31 | | /* Reserve enough buf elements for all current hypercalls. */ |
32 | | struct xen_dm_op_buf buf[2]; |
33 | | }; |
34 | | |
35 | | static bool _raw_copy_from_guest_buf_offset(void *dst, |
36 | | const struct dmop_args *args, |
37 | | unsigned int buf_idx, |
38 | | size_t offset_bytes, |
39 | | size_t dst_bytes) |
40 | 0 | { |
41 | 0 | size_t buf_bytes; |
42 | 0 |
|
43 | 0 | if ( buf_idx >= args->nr_bufs ) |
44 | 0 | return false; |
45 | 0 |
|
46 | 0 | buf_bytes = args->buf[buf_idx].size; |
47 | 0 |
|
48 | 0 | if ( (offset_bytes + dst_bytes) < offset_bytes || |
49 | 0 | (offset_bytes + dst_bytes) > buf_bytes ) |
50 | 0 | return false; |
51 | 0 |
|
52 | 0 | return !copy_from_guest_offset(dst, args->buf[buf_idx].h, |
53 | 0 | offset_bytes, dst_bytes); |
54 | 0 | } |
55 | | |
56 | | static bool _raw_copy_to_guest_buf_offset(const struct dmop_args *args, |
57 | | unsigned int buf_idx, |
58 | | size_t offset_bytes, |
59 | | const void *src, |
60 | | size_t src_bytes) |
61 | 0 | { |
62 | 0 | size_t buf_bytes; |
63 | 0 |
|
64 | 0 | if ( buf_idx >= args->nr_bufs ) |
65 | 0 | return false; |
66 | 0 |
|
67 | 0 | buf_bytes = args->buf[buf_idx].size; |
68 | 0 |
|
69 | 0 |
|
70 | 0 | if ( (offset_bytes + src_bytes) < offset_bytes || |
71 | 0 | (offset_bytes + src_bytes) > buf_bytes ) |
72 | 0 | return false; |
73 | 0 |
|
74 | 0 | return !copy_to_guest_offset(args->buf[buf_idx].h, offset_bytes, |
75 | 0 | src, src_bytes); |
76 | 0 | } |
77 | | |
78 | | #define COPY_FROM_GUEST_BUF_OFFSET(dst, bufs, buf_idx, offset_bytes) \ |
79 | 0 | _raw_copy_from_guest_buf_offset(&(dst), bufs, buf_idx, offset_bytes, \ |
80 | 0 | sizeof(dst)) |
81 | | |
82 | | #define COPY_TO_GUEST_BUF_OFFSET(bufs, buf_idx, offset_bytes, src) \ |
83 | 0 | _raw_copy_to_guest_buf_offset(bufs, buf_idx, offset_bytes, \ |
84 | 0 | &(src), sizeof(src)) |
85 | | |
86 | | #define COPY_FROM_GUEST_BUF(dst, bufs, buf_idx) \ |
87 | 0 | COPY_FROM_GUEST_BUF_OFFSET(dst, bufs, buf_idx, 0) |
88 | | |
89 | | #define COPY_TO_GUEST_BUF(bufs, buf_idx, src) \ |
90 | 0 | COPY_TO_GUEST_BUF_OFFSET(bufs, buf_idx, 0, src) |
91 | | |
92 | | static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn, |
93 | | unsigned int nr, const struct xen_dm_op_buf *buf) |
94 | 0 | { |
95 | 0 | if ( nr > (GB(1) >> PAGE_SHIFT) ) |
96 | 0 | return -EINVAL; |
97 | 0 |
|
98 | 0 | if ( d->is_dying ) |
99 | 0 | return -ESRCH; |
100 | 0 |
|
101 | 0 | if ( !d->max_vcpus || !d->vcpu[0] ) |
102 | 0 | return -EINVAL; |
103 | 0 |
|
104 | 0 | if ( ((nr + 7) / 8) > buf->size ) |
105 | 0 | return -EINVAL; |
106 | 0 |
|
107 | 0 | return shadow_mode_enabled(d) ? |
108 | 0 | shadow_track_dirty_vram(d, first_pfn, nr, buf->h) : |
109 | 0 | hap_track_dirty_vram(d, first_pfn, nr, buf->h); |
110 | 0 | } |
111 | | |
112 | | static int set_pci_intx_level(struct domain *d, uint16_t domain, |
113 | | uint8_t bus, uint8_t device, |
114 | | uint8_t intx, uint8_t level) |
115 | 0 | { |
116 | 0 | if ( domain != 0 || bus != 0 || device > 0x1f || intx > 3 ) |
117 | 0 | return -EINVAL; |
118 | 0 |
|
119 | 0 | switch ( level ) |
120 | 0 | { |
121 | 0 | case 0: |
122 | 0 | hvm_pci_intx_deassert(d, device, intx); |
123 | 0 | break; |
124 | 0 | case 1: |
125 | 0 | hvm_pci_intx_assert(d, device, intx); |
126 | 0 | break; |
127 | 0 | default: |
128 | 0 | return -EINVAL; |
129 | 0 | } |
130 | 0 |
|
131 | 0 | return 0; |
132 | 0 | } |
133 | | |
134 | | static int set_isa_irq_level(struct domain *d, uint8_t isa_irq, |
135 | | uint8_t level) |
136 | 0 | { |
137 | 0 | if ( isa_irq > 15 ) |
138 | 0 | return -EINVAL; |
139 | 0 |
|
140 | 0 | switch ( level ) |
141 | 0 | { |
142 | 0 | case 0: |
143 | 0 | hvm_isa_irq_deassert(d, isa_irq); |
144 | 0 | break; |
145 | 0 | case 1: |
146 | 0 | hvm_isa_irq_assert(d, isa_irq); |
147 | 0 | break; |
148 | 0 | default: |
149 | 0 | return -EINVAL; |
150 | 0 | } |
151 | 0 |
|
152 | 0 | return 0; |
153 | 0 | } |
154 | | |
155 | | static int modified_memory(struct domain *d, |
156 | | const struct dmop_args *bufs, |
157 | | struct xen_dm_op_modified_memory *header) |
158 | 0 | { |
159 | 0 | #define EXTENTS_BUFFER 1 |
160 | 0 |
|
161 | 0 | /* Process maximum of 256 pfns before checking for continuation. */ |
162 | 0 | const unsigned int cont_check_interval = 0x100; |
163 | 0 | unsigned int *rem_extents = &header->nr_extents; |
164 | 0 | unsigned int batch_rem_pfns = cont_check_interval; |
165 | 0 | /* Used for continuation. */ |
166 | 0 | unsigned int *pfns_done = &header->opaque; |
167 | 0 |
|
168 | 0 | if ( !paging_mode_log_dirty(d) ) |
169 | 0 | return 0; |
170 | 0 |
|
171 | 0 | if ( (bufs->buf[EXTENTS_BUFFER].size / |
172 | 0 | sizeof(struct xen_dm_op_modified_memory_extent)) < |
173 | 0 | *rem_extents ) |
174 | 0 | return -EINVAL; |
175 | 0 |
|
176 | 0 | while ( *rem_extents > 0 ) |
177 | 0 | { |
178 | 0 | struct xen_dm_op_modified_memory_extent extent; |
179 | 0 | unsigned int batch_nr; |
180 | 0 | xen_pfn_t pfn, end_pfn; |
181 | 0 |
|
182 | 0 | if ( !COPY_FROM_GUEST_BUF_OFFSET(extent, bufs, EXTENTS_BUFFER, |
183 | 0 | (*rem_extents - 1) * sizeof(extent)) ) |
184 | 0 | return -EFAULT; |
185 | 0 |
|
186 | 0 | if ( extent.pad ) |
187 | 0 | return -EINVAL; |
188 | 0 |
|
189 | 0 | end_pfn = extent.first_pfn + extent.nr; |
190 | 0 |
|
191 | 0 | if ( end_pfn <= extent.first_pfn || |
192 | 0 | end_pfn > domain_get_maximum_gpfn(d) ) |
193 | 0 | return -EINVAL; |
194 | 0 |
|
195 | 0 | if ( *pfns_done >= extent.nr ) |
196 | 0 | return -EINVAL; |
197 | 0 |
|
198 | 0 | pfn = extent.first_pfn + *pfns_done; |
199 | 0 | batch_nr = extent.nr - *pfns_done; |
200 | 0 |
|
201 | 0 | if ( batch_nr > batch_rem_pfns ) |
202 | 0 | { |
203 | 0 | batch_nr = batch_rem_pfns; |
204 | 0 | *pfns_done += batch_nr; |
205 | 0 | end_pfn = pfn + batch_nr; |
206 | 0 | } |
207 | 0 | else |
208 | 0 | { |
209 | 0 | (*rem_extents)--; |
210 | 0 | *pfns_done = 0; |
211 | 0 | } |
212 | 0 |
|
213 | 0 | batch_rem_pfns -= batch_nr; |
214 | 0 |
|
215 | 0 | for ( ; pfn < end_pfn; pfn++ ) |
216 | 0 | { |
217 | 0 | struct page_info *page; |
218 | 0 |
|
219 | 0 | page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE); |
220 | 0 | if ( page ) |
221 | 0 | { |
222 | 0 | mfn_t gmfn = _mfn(page_to_mfn(page)); |
223 | 0 |
|
224 | 0 | paging_mark_dirty(d, gmfn); |
225 | 0 | /* |
226 | 0 | * These are most probably not page tables any more |
227 | 0 | * don't take a long time and don't die either. |
228 | 0 | */ |
229 | 0 | sh_remove_shadows(d, gmfn, 1, 0); |
230 | 0 | put_page(page); |
231 | 0 | } |
232 | 0 | } |
233 | 0 |
|
234 | 0 | /* |
235 | 0 | * After a full batch of cont_check_interval pfns |
236 | 0 | * have been processed, and there are still extents |
237 | 0 | * remaining to process, check for continuation. |
238 | 0 | */ |
239 | 0 | if ( (batch_rem_pfns == 0) && (*rem_extents > 0) ) |
240 | 0 | { |
241 | 0 | if ( hypercall_preempt_check() ) |
242 | 0 | return -ERESTART; |
243 | 0 |
|
244 | 0 | batch_rem_pfns = cont_check_interval; |
245 | 0 | } |
246 | 0 | } |
247 | 0 | return 0; |
248 | 0 |
|
249 | 0 | #undef EXTENTS_BUFFER |
250 | 0 | } |
251 | | |
252 | | static bool allow_p2m_type_change(p2m_type_t old, p2m_type_t new) |
253 | 0 | { |
254 | 0 | if ( new == p2m_ioreq_server ) |
255 | 0 | return old == p2m_ram_rw; |
256 | 0 |
|
257 | 0 | if ( old == p2m_ioreq_server ) |
258 | 0 | return new == p2m_ram_rw; |
259 | 0 |
|
260 | 0 | return p2m_is_ram(old) || |
261 | 0 | (p2m_is_hole(old) && new == p2m_mmio_dm); |
262 | 0 | } |
263 | | |
264 | | static int set_mem_type(struct domain *d, |
265 | | struct xen_dm_op_set_mem_type *data) |
266 | 0 | { |
267 | 0 | xen_pfn_t last_pfn = data->first_pfn + data->nr - 1; |
268 | 0 | unsigned int iter = 0; |
269 | 0 | int rc = 0; |
270 | 0 |
|
271 | 0 | /* Interface types to internal p2m types */ |
272 | 0 | static const p2m_type_t memtype[] = { |
273 | 0 | [HVMMEM_ram_rw] = p2m_ram_rw, |
274 | 0 | [HVMMEM_ram_ro] = p2m_ram_ro, |
275 | 0 | [HVMMEM_mmio_dm] = p2m_mmio_dm, |
276 | 0 | [HVMMEM_unused] = p2m_invalid, |
277 | 0 | [HVMMEM_ioreq_server] = p2m_ioreq_server, |
278 | 0 | }; |
279 | 0 |
|
280 | 0 | if ( (data->first_pfn > last_pfn) || |
281 | 0 | (last_pfn > domain_get_maximum_gpfn(d)) ) |
282 | 0 | return -EINVAL; |
283 | 0 |
|
284 | 0 | if ( data->mem_type >= ARRAY_SIZE(memtype) || |
285 | 0 | unlikely(data->mem_type == HVMMEM_unused) ) |
286 | 0 | return -EINVAL; |
287 | 0 |
|
288 | 0 | if ( data->mem_type == HVMMEM_ioreq_server ) |
289 | 0 | { |
290 | 0 | unsigned int flags; |
291 | 0 |
|
292 | 0 | if ( !hap_enabled(d) ) |
293 | 0 | return -EOPNOTSUPP; |
294 | 0 |
|
295 | 0 | /* Do not change to HVMMEM_ioreq_server if no ioreq server mapped. */ |
296 | 0 | if ( !p2m_get_ioreq_server(d, &flags) ) |
297 | 0 | return -EINVAL; |
298 | 0 | } |
299 | 0 |
|
300 | 0 | while ( iter < data->nr ) |
301 | 0 | { |
302 | 0 | unsigned long pfn = data->first_pfn + iter; |
303 | 0 | p2m_type_t t; |
304 | 0 |
|
305 | 0 | get_gfn_unshare(d, pfn, &t); |
306 | 0 | if ( p2m_is_paging(t) ) |
307 | 0 | { |
308 | 0 | put_gfn(d, pfn); |
309 | 0 | p2m_mem_paging_populate(d, pfn); |
310 | 0 | return -EAGAIN; |
311 | 0 | } |
312 | 0 |
|
313 | 0 | if ( p2m_is_shared(t) ) |
314 | 0 | rc = -EAGAIN; |
315 | 0 | else if ( !allow_p2m_type_change(t, memtype[data->mem_type]) ) |
316 | 0 | rc = -EINVAL; |
317 | 0 | else |
318 | 0 | rc = p2m_change_type_one(d, pfn, t, memtype[data->mem_type]); |
319 | 0 |
|
320 | 0 | put_gfn(d, pfn); |
321 | 0 |
|
322 | 0 | if ( rc ) |
323 | 0 | break; |
324 | 0 |
|
325 | 0 | iter++; |
326 | 0 |
|
327 | 0 | /* |
328 | 0 | * Check for continuation every 256th iteration and if the |
329 | 0 | * iteration is not the last. |
330 | 0 | */ |
331 | 0 | if ( (iter < data->nr) && ((iter & 0xff) == 0) && |
332 | 0 | hypercall_preempt_check() ) |
333 | 0 | { |
334 | 0 | data->first_pfn += iter; |
335 | 0 | data->nr -= iter; |
336 | 0 |
|
337 | 0 | rc = -ERESTART; |
338 | 0 | break; |
339 | 0 | } |
340 | 0 | } |
341 | 0 |
|
342 | 0 | return rc; |
343 | 0 | } |
344 | | |
345 | | static int inject_event(struct domain *d, |
346 | | const struct xen_dm_op_inject_event *data) |
347 | 0 | { |
348 | 0 | struct vcpu *v; |
349 | 0 |
|
350 | 0 | if ( data->vcpuid >= d->max_vcpus || !(v = d->vcpu[data->vcpuid]) ) |
351 | 0 | return -EINVAL; |
352 | 0 |
|
353 | 0 | if ( cmpxchg(&v->arch.hvm_vcpu.inject_event.vector, |
354 | 0 | HVM_EVENT_VECTOR_UNSET, HVM_EVENT_VECTOR_UPDATING) != |
355 | 0 | HVM_EVENT_VECTOR_UNSET ) |
356 | 0 | return -EBUSY; |
357 | 0 |
|
358 | 0 | v->arch.hvm_vcpu.inject_event.type = data->type; |
359 | 0 | v->arch.hvm_vcpu.inject_event.insn_len = data->insn_len; |
360 | 0 | v->arch.hvm_vcpu.inject_event.error_code = data->error_code; |
361 | 0 | v->arch.hvm_vcpu.inject_event.cr2 = data->cr2; |
362 | 0 | smp_wmb(); |
363 | 0 | v->arch.hvm_vcpu.inject_event.vector = data->vector; |
364 | 0 |
|
365 | 0 | return 0; |
366 | 0 | } |
367 | | |
368 | | static int dm_op(const struct dmop_args *op_args) |
369 | 0 | { |
370 | 0 | struct domain *d; |
371 | 0 | struct xen_dm_op op; |
372 | 0 | bool const_op = true; |
373 | 0 | long rc; |
374 | 0 |
|
375 | 0 | rc = rcu_lock_remote_domain_by_id(op_args->domid, &d); |
376 | 0 | if ( rc ) |
377 | 0 | return rc; |
378 | 0 |
|
379 | 0 | if ( !is_hvm_domain(d) ) |
380 | 0 | goto out; |
381 | 0 |
|
382 | 0 | rc = xsm_dm_op(XSM_DM_PRIV, d); |
383 | 0 | if ( rc ) |
384 | 0 | goto out; |
385 | 0 |
|
386 | 0 | if ( !COPY_FROM_GUEST_BUF(op, op_args, 0) ) |
387 | 0 | { |
388 | 0 | rc = -EFAULT; |
389 | 0 | goto out; |
390 | 0 | } |
391 | 0 |
|
392 | 0 | rc = -EINVAL; |
393 | 0 | if ( op.pad ) |
394 | 0 | goto out; |
395 | 0 |
|
396 | 0 | switch ( op.op ) |
397 | 0 | { |
398 | 0 | case XEN_DMOP_create_ioreq_server: |
399 | 0 | { |
400 | 0 | struct domain *curr_d = current->domain; |
401 | 0 | struct xen_dm_op_create_ioreq_server *data = |
402 | 0 | &op.u.create_ioreq_server; |
403 | 0 |
|
404 | 0 | const_op = false; |
405 | 0 |
|
406 | 0 | rc = -EINVAL; |
407 | 0 | if ( data->pad[0] || data->pad[1] || data->pad[2] ) |
408 | 0 | break; |
409 | 0 |
|
410 | 0 | rc = hvm_create_ioreq_server(d, curr_d->domain_id, false, |
411 | 0 | data->handle_bufioreq, &data->id); |
412 | 0 | break; |
413 | 0 | } |
414 | 0 |
|
415 | 0 | case XEN_DMOP_get_ioreq_server_info: |
416 | 0 | { |
417 | 0 | struct xen_dm_op_get_ioreq_server_info *data = |
418 | 0 | &op.u.get_ioreq_server_info; |
419 | 0 |
|
420 | 0 | const_op = false; |
421 | 0 |
|
422 | 0 | rc = -EINVAL; |
423 | 0 | if ( data->pad ) |
424 | 0 | break; |
425 | 0 |
|
426 | 0 | rc = hvm_get_ioreq_server_info(d, data->id, |
427 | 0 | &data->ioreq_gfn, |
428 | 0 | &data->bufioreq_gfn, |
429 | 0 | &data->bufioreq_port); |
430 | 0 | break; |
431 | 0 | } |
432 | 0 |
|
433 | 0 | case XEN_DMOP_map_io_range_to_ioreq_server: |
434 | 0 | { |
435 | 0 | const struct xen_dm_op_ioreq_server_range *data = |
436 | 0 | &op.u.map_io_range_to_ioreq_server; |
437 | 0 |
|
438 | 0 | rc = -EINVAL; |
439 | 0 | if ( data->pad ) |
440 | 0 | break; |
441 | 0 |
|
442 | 0 | rc = hvm_map_io_range_to_ioreq_server(d, data->id, data->type, |
443 | 0 | data->start, data->end); |
444 | 0 | break; |
445 | 0 | } |
446 | 0 |
|
447 | 0 | case XEN_DMOP_unmap_io_range_from_ioreq_server: |
448 | 0 | { |
449 | 0 | const struct xen_dm_op_ioreq_server_range *data = |
450 | 0 | &op.u.unmap_io_range_from_ioreq_server; |
451 | 0 |
|
452 | 0 | rc = -EINVAL; |
453 | 0 | if ( data->pad ) |
454 | 0 | break; |
455 | 0 |
|
456 | 0 | rc = hvm_unmap_io_range_from_ioreq_server(d, data->id, data->type, |
457 | 0 | data->start, data->end); |
458 | 0 | break; |
459 | 0 | } |
460 | 0 |
|
461 | 0 | case XEN_DMOP_map_mem_type_to_ioreq_server: |
462 | 0 | { |
463 | 0 | struct xen_dm_op_map_mem_type_to_ioreq_server *data = |
464 | 0 | &op.u.map_mem_type_to_ioreq_server; |
465 | 0 | unsigned long first_gfn = data->opaque; |
466 | 0 |
|
467 | 0 | const_op = false; |
468 | 0 |
|
469 | 0 | rc = -EOPNOTSUPP; |
470 | 0 | if ( !hap_enabled(d) ) |
471 | 0 | break; |
472 | 0 |
|
473 | 0 | if ( first_gfn == 0 ) |
474 | 0 | rc = hvm_map_mem_type_to_ioreq_server(d, data->id, |
475 | 0 | data->type, data->flags); |
476 | 0 | else |
477 | 0 | rc = 0; |
478 | 0 |
|
479 | 0 | /* |
480 | 0 | * Iterate p2m table when an ioreq server unmaps from p2m_ioreq_server, |
481 | 0 | * and reset the remaining p2m_ioreq_server entries back to p2m_ram_rw. |
482 | 0 | */ |
483 | 0 | if ( rc == 0 && data->flags == 0 ) |
484 | 0 | { |
485 | 0 | struct p2m_domain *p2m = p2m_get_hostp2m(d); |
486 | 0 |
|
487 | 0 | while ( read_atomic(&p2m->ioreq.entry_count) && |
488 | 0 | first_gfn <= p2m->max_mapped_pfn ) |
489 | 0 | { |
490 | 0 | /* Iterate p2m table for 256 gfns each time. */ |
491 | 0 | rc = p2m_finish_type_change(d, _gfn(first_gfn), 256); |
492 | 0 | if ( rc < 0 ) |
493 | 0 | break; |
494 | 0 |
|
495 | 0 | first_gfn += 256; |
496 | 0 |
|
497 | 0 | /* Check for continuation if it's not the last iteration. */ |
498 | 0 | if ( first_gfn <= p2m->max_mapped_pfn && |
499 | 0 | hypercall_preempt_check() ) |
500 | 0 | { |
501 | 0 | rc = -ERESTART; |
502 | 0 | data->opaque = first_gfn; |
503 | 0 | break; |
504 | 0 | } |
505 | 0 | } |
506 | 0 | } |
507 | 0 |
|
508 | 0 | break; |
509 | 0 | } |
510 | 0 |
|
511 | 0 | case XEN_DMOP_set_ioreq_server_state: |
512 | 0 | { |
513 | 0 | const struct xen_dm_op_set_ioreq_server_state *data = |
514 | 0 | &op.u.set_ioreq_server_state; |
515 | 0 |
|
516 | 0 | rc = -EINVAL; |
517 | 0 | if ( data->pad ) |
518 | 0 | break; |
519 | 0 |
|
520 | 0 | rc = hvm_set_ioreq_server_state(d, data->id, !!data->enabled); |
521 | 0 | break; |
522 | 0 | } |
523 | 0 |
|
524 | 0 | case XEN_DMOP_destroy_ioreq_server: |
525 | 0 | { |
526 | 0 | const struct xen_dm_op_destroy_ioreq_server *data = |
527 | 0 | &op.u.destroy_ioreq_server; |
528 | 0 |
|
529 | 0 | rc = -EINVAL; |
530 | 0 | if ( data->pad ) |
531 | 0 | break; |
532 | 0 |
|
533 | 0 | rc = hvm_destroy_ioreq_server(d, data->id); |
534 | 0 | break; |
535 | 0 | } |
536 | 0 |
|
537 | 0 | case XEN_DMOP_track_dirty_vram: |
538 | 0 | { |
539 | 0 | const struct xen_dm_op_track_dirty_vram *data = |
540 | 0 | &op.u.track_dirty_vram; |
541 | 0 |
|
542 | 0 | rc = -EINVAL; |
543 | 0 | if ( data->pad ) |
544 | 0 | break; |
545 | 0 |
|
546 | 0 | if ( op_args->nr_bufs < 2 ) |
547 | 0 | break; |
548 | 0 |
|
549 | 0 | rc = track_dirty_vram(d, data->first_pfn, data->nr, &op_args->buf[1]); |
550 | 0 | break; |
551 | 0 | } |
552 | 0 |
|
553 | 0 | case XEN_DMOP_set_pci_intx_level: |
554 | 0 | { |
555 | 0 | const struct xen_dm_op_set_pci_intx_level *data = |
556 | 0 | &op.u.set_pci_intx_level; |
557 | 0 |
|
558 | 0 | rc = set_pci_intx_level(d, data->domain, data->bus, |
559 | 0 | data->device, data->intx, |
560 | 0 | data->level); |
561 | 0 | break; |
562 | 0 | } |
563 | 0 |
|
564 | 0 | case XEN_DMOP_set_isa_irq_level: |
565 | 0 | { |
566 | 0 | const struct xen_dm_op_set_isa_irq_level *data = |
567 | 0 | &op.u.set_isa_irq_level; |
568 | 0 |
|
569 | 0 | rc = set_isa_irq_level(d, data->isa_irq, data->level); |
570 | 0 | break; |
571 | 0 | } |
572 | 0 |
|
573 | 0 | case XEN_DMOP_set_pci_link_route: |
574 | 0 | { |
575 | 0 | const struct xen_dm_op_set_pci_link_route *data = |
576 | 0 | &op.u.set_pci_link_route; |
577 | 0 |
|
578 | 0 | rc = hvm_set_pci_link_route(d, data->link, data->isa_irq); |
579 | 0 | break; |
580 | 0 | } |
581 | 0 |
|
582 | 0 | case XEN_DMOP_modified_memory: |
583 | 0 | { |
584 | 0 | struct xen_dm_op_modified_memory *data = |
585 | 0 | &op.u.modified_memory; |
586 | 0 |
|
587 | 0 | rc = modified_memory(d, op_args, data); |
588 | 0 | const_op = !rc; |
589 | 0 | break; |
590 | 0 | } |
591 | 0 |
|
592 | 0 | case XEN_DMOP_set_mem_type: |
593 | 0 | { |
594 | 0 | struct xen_dm_op_set_mem_type *data = |
595 | 0 | &op.u.set_mem_type; |
596 | 0 |
|
597 | 0 | const_op = false; |
598 | 0 |
|
599 | 0 | rc = -EINVAL; |
600 | 0 | if ( data->pad ) |
601 | 0 | break; |
602 | 0 |
|
603 | 0 | rc = set_mem_type(d, data); |
604 | 0 | break; |
605 | 0 | } |
606 | 0 |
|
607 | 0 | case XEN_DMOP_inject_event: |
608 | 0 | { |
609 | 0 | const struct xen_dm_op_inject_event *data = |
610 | 0 | &op.u.inject_event; |
611 | 0 |
|
612 | 0 | rc = -EINVAL; |
613 | 0 | if ( data->pad0 || data->pad1 ) |
614 | 0 | break; |
615 | 0 |
|
616 | 0 | rc = inject_event(d, data); |
617 | 0 | break; |
618 | 0 | } |
619 | 0 |
|
620 | 0 | case XEN_DMOP_inject_msi: |
621 | 0 | { |
622 | 0 | const struct xen_dm_op_inject_msi *data = |
623 | 0 | &op.u.inject_msi; |
624 | 0 |
|
625 | 0 | rc = -EINVAL; |
626 | 0 | if ( data->pad ) |
627 | 0 | break; |
628 | 0 |
|
629 | 0 | rc = hvm_inject_msi(d, data->addr, data->data); |
630 | 0 | break; |
631 | 0 | } |
632 | 0 |
|
633 | 0 | case XEN_DMOP_remote_shutdown: |
634 | 0 | { |
635 | 0 | const struct xen_dm_op_remote_shutdown *data = |
636 | 0 | &op.u.remote_shutdown; |
637 | 0 |
|
638 | 0 | domain_shutdown(d, data->reason); |
639 | 0 | rc = 0; |
640 | 0 | break; |
641 | 0 | } |
642 | 0 |
|
643 | 0 | default: |
644 | 0 | rc = -EOPNOTSUPP; |
645 | 0 | break; |
646 | 0 | } |
647 | 0 |
|
648 | 0 | if ( (!rc || rc == -ERESTART) && |
649 | 0 | !const_op && !COPY_TO_GUEST_BUF(op_args, 0, op) ) |
650 | 0 | rc = -EFAULT; |
651 | 0 |
|
652 | 0 | out: |
653 | 0 | rcu_unlock_domain(d); |
654 | 0 |
|
655 | 0 | return rc; |
656 | 0 | } |
657 | | |
658 | | CHECK_dm_op_create_ioreq_server; |
659 | | CHECK_dm_op_get_ioreq_server_info; |
660 | | CHECK_dm_op_ioreq_server_range; |
661 | | CHECK_dm_op_set_ioreq_server_state; |
662 | | CHECK_dm_op_destroy_ioreq_server; |
663 | | CHECK_dm_op_track_dirty_vram; |
664 | | CHECK_dm_op_set_pci_intx_level; |
665 | | CHECK_dm_op_set_isa_irq_level; |
666 | | CHECK_dm_op_set_pci_link_route; |
667 | | CHECK_dm_op_modified_memory; |
668 | | CHECK_dm_op_set_mem_type; |
669 | | CHECK_dm_op_inject_event; |
670 | | CHECK_dm_op_inject_msi; |
671 | | CHECK_dm_op_remote_shutdown; |
672 | | |
673 | | int compat_dm_op(domid_t domid, |
674 | | unsigned int nr_bufs, |
675 | | XEN_GUEST_HANDLE_PARAM(void) bufs) |
676 | 0 | { |
677 | 0 | struct dmop_args args; |
678 | 0 | unsigned int i; |
679 | 0 | int rc; |
680 | 0 |
|
681 | 0 | if ( nr_bufs > ARRAY_SIZE(args.buf) ) |
682 | 0 | return -E2BIG; |
683 | 0 |
|
684 | 0 | args.domid = domid; |
685 | 0 | args.nr_bufs = nr_bufs; |
686 | 0 |
|
687 | 0 | for ( i = 0; i < args.nr_bufs; i++ ) |
688 | 0 | { |
689 | 0 | struct compat_dm_op_buf cmp; |
690 | 0 |
|
691 | 0 | if ( copy_from_guest_offset(&cmp, bufs, i, 1) ) |
692 | 0 | return -EFAULT; |
693 | 0 |
|
694 | 0 | #define XLAT_dm_op_buf_HNDL_h(_d_, _s_) \ |
695 | 0 | guest_from_compat_handle((_d_)->h, (_s_)->h) |
696 | 0 |
|
697 | 0 | XLAT_dm_op_buf(&args.buf[i], &cmp); |
698 | 0 |
|
699 | 0 | #undef XLAT_dm_op_buf_HNDL_h |
700 | 0 | } |
701 | 0 |
|
702 | 0 | rc = dm_op(&args); |
703 | 0 |
|
704 | 0 | if ( rc == -ERESTART ) |
705 | 0 | rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih", |
706 | 0 | domid, nr_bufs, bufs); |
707 | 0 |
|
708 | 0 | return rc; |
709 | 0 | } |
710 | | |
711 | | long do_dm_op(domid_t domid, |
712 | | unsigned int nr_bufs, |
713 | | XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs) |
714 | 0 | { |
715 | 0 | struct dmop_args args; |
716 | 0 | int rc; |
717 | 0 |
|
718 | 0 | if ( nr_bufs > ARRAY_SIZE(args.buf) ) |
719 | 0 | return -E2BIG; |
720 | 0 |
|
721 | 0 | args.domid = domid; |
722 | 0 | args.nr_bufs = nr_bufs; |
723 | 0 |
|
724 | 0 | if ( copy_from_guest_offset(&args.buf[0], bufs, 0, args.nr_bufs) ) |
725 | 0 | return -EFAULT; |
726 | 0 |
|
727 | 0 | rc = dm_op(&args); |
728 | 0 |
|
729 | 0 | if ( rc == -ERESTART ) |
730 | 0 | rc = hypercall_create_continuation(__HYPERVISOR_dm_op, "iih", |
731 | 0 | domid, nr_bufs, bufs); |
732 | 0 |
|
733 | 0 | return rc; |
734 | 0 | } |
735 | | |
736 | | /* |
737 | | * Local variables: |
738 | | * mode: C |
739 | | * c-file-style: "BSD" |
740 | | * c-basic-offset: 4 |
741 | | * tab-width: 4 |
742 | | * indent-tabs-mode: nil |
743 | | * End: |
744 | | */ |