/root/src/xen/xen/common/compat/memory.c
Line | Count | Source (jump to first uncovered line) |
1 | | asm(".file \"" __FILE__ "\""); |
2 | | |
3 | | #include <xen/types.h> |
4 | | #include <xen/hypercall.h> |
5 | | #include <xen/guest_access.h> |
6 | | #include <xen/sched.h> |
7 | | #include <xen/event.h> |
8 | | #include <xen/mem_access.h> |
9 | | #include <asm/current.h> |
10 | | #include <compat/memory.h> |
11 | | |
12 | | #define xen_domid_t domid_t |
13 | | #define compat_domid_t domid_compat_t |
14 | | CHECK_TYPE(domid); |
15 | | #undef compat_domid_t |
16 | | #undef xen_domid_t |
17 | | |
18 | | CHECK_vmemrange; |
19 | | |
20 | | #ifdef CONFIG_HAS_PASSTHROUGH |
21 | | struct get_reserved_device_memory { |
22 | | struct compat_reserved_device_memory_map map; |
23 | | unsigned int used_entries; |
24 | | }; |
25 | | |
26 | | static int get_reserved_device_memory(xen_pfn_t start, xen_ulong_t nr, |
27 | | u32 id, void *ctxt) |
28 | | { |
29 | | struct get_reserved_device_memory *grdm = ctxt; |
30 | | u32 sbdf = PCI_SBDF3(grdm->map.dev.pci.seg, grdm->map.dev.pci.bus, |
31 | | grdm->map.dev.pci.devfn); |
32 | | |
33 | | if ( !(grdm->map.flags & XENMEM_RDM_ALL) && (sbdf != id) ) |
34 | | return 0; |
35 | | |
36 | | if ( grdm->used_entries < grdm->map.nr_entries ) |
37 | | { |
38 | | struct compat_reserved_device_memory rdm = { |
39 | | .start_pfn = start, .nr_pages = nr |
40 | | }; |
41 | | |
42 | | if ( rdm.start_pfn != start || rdm.nr_pages != nr ) |
43 | | return -ERANGE; |
44 | | |
45 | | if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries, |
46 | | &rdm, 1) ) |
47 | | return -EFAULT; |
48 | | } |
49 | | |
50 | | ++grdm->used_entries; |
51 | | |
52 | | return 1; |
53 | | } |
54 | | #endif |
55 | | |
56 | | int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) |
57 | 0 | { |
58 | 0 | int split, op = cmd & MEMOP_CMD_MASK; |
59 | 0 | long rc; |
60 | 0 | unsigned int start_extent = cmd >> MEMOP_EXTENT_SHIFT; |
61 | 0 |
|
62 | 0 | do |
63 | 0 | { |
64 | 0 | unsigned int i, end_extent = 0; |
65 | 0 | union { |
66 | 0 | XEN_GUEST_HANDLE_PARAM(void) hnd; |
67 | 0 | struct xen_memory_reservation *rsrv; |
68 | 0 | struct xen_memory_exchange *xchg; |
69 | 0 | struct xen_add_to_physmap *atp; |
70 | 0 | struct xen_add_to_physmap_batch *atpb; |
71 | 0 | struct xen_remove_from_physmap *xrfp; |
72 | 0 | struct xen_vnuma_topology_info *vnuma; |
73 | 0 | struct xen_mem_access_op *mao; |
74 | 0 | } nat; |
75 | 0 | union { |
76 | 0 | struct compat_memory_reservation rsrv; |
77 | 0 | struct compat_memory_exchange xchg; |
78 | 0 | struct compat_add_to_physmap atp; |
79 | 0 | struct compat_add_to_physmap_batch atpb; |
80 | 0 | struct compat_vnuma_topology_info vnuma; |
81 | 0 | struct compat_mem_access_op mao; |
82 | 0 | } cmp; |
83 | 0 |
|
84 | 0 | set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE); |
85 | 0 | split = 0; |
86 | 0 | switch ( op ) |
87 | 0 | { |
88 | 0 | xen_pfn_t *space; |
89 | 0 |
|
90 | 0 | case XENMEM_increase_reservation: |
91 | 0 | case XENMEM_decrease_reservation: |
92 | 0 | case XENMEM_populate_physmap: |
93 | 0 | if ( copy_from_guest(&cmp.rsrv, compat, 1) ) |
94 | 0 | return start_extent; |
95 | 0 |
|
96 | 0 | /* Is size too large for us to encode a continuation? */ |
97 | 0 | if ( cmp.rsrv.nr_extents > (UINT_MAX >> MEMOP_EXTENT_SHIFT) ) |
98 | 0 | return start_extent; |
99 | 0 |
|
100 | 0 | if ( !compat_handle_is_null(cmp.rsrv.extent_start) && |
101 | 0 | !compat_handle_okay(cmp.rsrv.extent_start, cmp.rsrv.nr_extents) ) |
102 | 0 | return start_extent; |
103 | 0 |
|
104 | 0 | end_extent = start_extent + (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.rsrv)) / |
105 | 0 | sizeof(*space); |
106 | 0 | if ( end_extent > cmp.rsrv.nr_extents ) |
107 | 0 | end_extent = cmp.rsrv.nr_extents; |
108 | 0 |
|
109 | 0 | space = (xen_pfn_t *)(nat.rsrv + 1); |
110 | 0 | #define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \ |
111 | 0 | do \ |
112 | 0 | { \ |
113 | 0 | if ( !compat_handle_is_null((_s_)->extent_start) ) \ |
114 | 0 | { \ |
115 | 0 | set_xen_guest_handle((_d_)->extent_start, space - start_extent); \ |
116 | 0 | if ( op != XENMEM_increase_reservation ) \ |
117 | 0 | { \ |
118 | 0 | for ( i = start_extent; i < end_extent; ++i ) \ |
119 | 0 | { \ |
120 | 0 | compat_pfn_t pfn; \ |
121 | 0 | if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, i, 1) ) \ |
122 | 0 | { \ |
123 | 0 | end_extent = i; \ |
124 | 0 | split = -1; \ |
125 | 0 | break; \ |
126 | 0 | } \ |
127 | 0 | *space++ = pfn; \ |
128 | 0 | } \ |
129 | 0 | } \ |
130 | 0 | } \ |
131 | 0 | else \ |
132 | 0 | { \ |
133 | 0 | set_xen_guest_handle((_d_)->extent_start, NULL); \ |
134 | 0 | end_extent = cmp.rsrv.nr_extents; \ |
135 | 0 | } \ |
136 | 0 | } while (0) |
137 | 0 | XLAT_memory_reservation(nat.rsrv, &cmp.rsrv); |
138 | 0 | #undef XLAT_memory_reservation_HNDL_extent_start |
139 | 0 |
|
140 | 0 | if ( end_extent < cmp.rsrv.nr_extents ) |
141 | 0 | { |
142 | 0 | nat.rsrv->nr_extents = end_extent; |
143 | 0 | ++split; |
144 | 0 | } |
145 | 0 |
|
146 | 0 | break; |
147 | 0 |
|
148 | 0 | case XENMEM_exchange: |
149 | 0 | { |
150 | 0 | int order_delta; |
151 | 0 |
|
152 | 0 | if ( copy_from_guest(&cmp.xchg, compat, 1) ) |
153 | 0 | return -EFAULT; |
154 | 0 |
|
155 | 0 | order_delta = cmp.xchg.out.extent_order - cmp.xchg.in.extent_order; |
156 | 0 | /* Various sanity checks. */ |
157 | 0 | if ( (cmp.xchg.nr_exchanged > cmp.xchg.in.nr_extents) || |
158 | 0 | (order_delta > 0 && (cmp.xchg.nr_exchanged & ((1U << order_delta) - 1))) || |
159 | 0 | /* Sizes of input and output lists do not overflow an int? */ |
160 | 0 | ((~0U >> cmp.xchg.in.extent_order) < cmp.xchg.in.nr_extents) || |
161 | 0 | ((~0U >> cmp.xchg.out.extent_order) < cmp.xchg.out.nr_extents) || |
162 | 0 | /* Sizes of input and output lists match? */ |
163 | 0 | ((cmp.xchg.in.nr_extents << cmp.xchg.in.extent_order) != |
164 | 0 | (cmp.xchg.out.nr_extents << cmp.xchg.out.extent_order)) ) |
165 | 0 | return -EINVAL; |
166 | 0 |
|
167 | 0 | if ( !compat_handle_okay(cmp.xchg.in.extent_start, |
168 | 0 | cmp.xchg.in.nr_extents) || |
169 | 0 | !compat_handle_okay(cmp.xchg.out.extent_start, |
170 | 0 | cmp.xchg.out.nr_extents) ) |
171 | 0 | return -EFAULT; |
172 | 0 |
|
173 | 0 | start_extent = cmp.xchg.nr_exchanged; |
174 | 0 | end_extent = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.xchg)) / |
175 | 0 | (((1U << ABS(order_delta)) + 1) * |
176 | 0 | sizeof(*space)); |
177 | 0 | if ( end_extent == 0 ) |
178 | 0 | { |
179 | 0 | printk("Cannot translate compatibility mode XENMEM_exchange extents (%u,%u)\n", |
180 | 0 | cmp.xchg.in.extent_order, cmp.xchg.out.extent_order); |
181 | 0 | return -E2BIG; |
182 | 0 | } |
183 | 0 | if ( order_delta > 0 ) |
184 | 0 | end_extent <<= order_delta; |
185 | 0 | end_extent += start_extent; |
186 | 0 | if ( end_extent > cmp.xchg.in.nr_extents ) |
187 | 0 | end_extent = cmp.xchg.in.nr_extents; |
188 | 0 |
|
189 | 0 | space = (xen_pfn_t *)(nat.xchg + 1); |
190 | 0 | /* Code below depends upon .in preceding .out. */ |
191 | 0 | BUILD_BUG_ON(offsetof(xen_memory_exchange_t, in) > offsetof(xen_memory_exchange_t, out)); |
192 | 0 | #define XLAT_memory_reservation_HNDL_extent_start(_d_, _s_) \ |
193 | 0 | do \ |
194 | 0 | { \ |
195 | 0 | set_xen_guest_handle((_d_)->extent_start, space - start_extent); \ |
196 | 0 | for ( i = start_extent; i < end_extent; ++i ) \ |
197 | 0 | { \ |
198 | 0 | compat_pfn_t pfn; \ |
199 | 0 | if ( __copy_from_compat_offset(&pfn, (_s_)->extent_start, i, 1) ) \ |
200 | 0 | return -EFAULT; \ |
201 | 0 | *space++ = pfn; \ |
202 | 0 | } \ |
203 | 0 | if ( order_delta > 0 ) \ |
204 | 0 | { \ |
205 | 0 | start_extent >>= order_delta; \ |
206 | 0 | end_extent >>= order_delta; \ |
207 | 0 | } \ |
208 | 0 | else \ |
209 | 0 | { \ |
210 | 0 | start_extent <<= -order_delta; \ |
211 | 0 | end_extent <<= -order_delta; \ |
212 | 0 | } \ |
213 | 0 | order_delta = -order_delta; \ |
214 | 0 | } while (0) |
215 | 0 | XLAT_memory_exchange(nat.xchg, &cmp.xchg); |
216 | 0 | #undef XLAT_memory_reservation_HNDL_extent_start |
217 | 0 |
|
218 | 0 | if ( end_extent < cmp.xchg.in.nr_extents ) |
219 | 0 | { |
220 | 0 | nat.xchg->in.nr_extents = end_extent; |
221 | 0 | if ( order_delta >= 0 ) |
222 | 0 | nat.xchg->out.nr_extents = end_extent >> order_delta; |
223 | 0 | else |
224 | 0 | nat.xchg->out.nr_extents = end_extent << -order_delta; |
225 | 0 | ++split; |
226 | 0 | } |
227 | 0 |
|
228 | 0 | break; |
229 | 0 | } |
230 | 0 |
|
231 | 0 | case XENMEM_current_reservation: |
232 | 0 | case XENMEM_maximum_reservation: |
233 | 0 | case XENMEM_maximum_gpfn: |
234 | 0 | case XENMEM_maximum_ram_page: |
235 | 0 | nat.hnd = compat; |
236 | 0 | break; |
237 | 0 |
|
238 | 0 | case XENMEM_add_to_physmap: |
239 | 0 | BUILD_BUG_ON((typeof(cmp.atp.size))-1 > |
240 | 0 | (UINT_MAX >> MEMOP_EXTENT_SHIFT)); |
241 | 0 |
|
242 | 0 | if ( copy_from_guest(&cmp.atp, compat, 1) ) |
243 | 0 | return -EFAULT; |
244 | 0 |
|
245 | 0 | XLAT_add_to_physmap(nat.atp, &cmp.atp); |
246 | 0 |
|
247 | 0 | break; |
248 | 0 |
|
249 | 0 | case XENMEM_add_to_physmap_batch: |
250 | 0 | { |
251 | 0 | unsigned int limit = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.atpb)) |
252 | 0 | / (sizeof(nat.atpb->idxs.p) + sizeof(nat.atpb->gpfns.p)); |
253 | 0 | /* Use an intermediate variable to suppress warnings on old gcc: */ |
254 | 0 | unsigned int size; |
255 | 0 | xen_ulong_t *idxs = (void *)(nat.atpb + 1); |
256 | 0 | xen_pfn_t *gpfns = (void *)(idxs + limit); |
257 | 0 | /* |
258 | 0 | * The union will always be 16-bit width. So it is not |
259 | 0 | * necessary to have the exact field which correspond to the |
260 | 0 | * space. |
261 | 0 | */ |
262 | 0 | enum XLAT_add_to_physmap_batch_u u = |
263 | 0 | XLAT_add_to_physmap_batch_u_res0; |
264 | 0 |
|
265 | 0 | if ( copy_from_guest(&cmp.atpb, compat, 1) ) |
266 | 0 | return -EFAULT; |
267 | 0 | size = cmp.atpb.size; |
268 | 0 | if ( !compat_handle_okay(cmp.atpb.idxs, size) || |
269 | 0 | !compat_handle_okay(cmp.atpb.gpfns, size) || |
270 | 0 | !compat_handle_okay(cmp.atpb.errs, size) ) |
271 | 0 | return -EFAULT; |
272 | 0 |
|
273 | 0 | end_extent = start_extent + limit; |
274 | 0 | if ( end_extent > size ) |
275 | 0 | end_extent = size; |
276 | 0 |
|
277 | 0 | idxs -= start_extent; |
278 | 0 | gpfns -= start_extent; |
279 | 0 |
|
280 | 0 | for ( i = start_extent; i < end_extent; ++i ) |
281 | 0 | { |
282 | 0 | compat_ulong_t idx; |
283 | 0 | compat_pfn_t gpfn; |
284 | 0 |
|
285 | 0 | if ( __copy_from_compat_offset(&idx, cmp.atpb.idxs, i, 1) || |
286 | 0 | __copy_from_compat_offset(&gpfn, cmp.atpb.gpfns, i, 1) ) |
287 | 0 | return -EFAULT; |
288 | 0 | idxs[i] = idx; |
289 | 0 | gpfns[i] = gpfn; |
290 | 0 | } |
291 | 0 |
|
292 | 0 | #define XLAT_add_to_physmap_batch_HNDL_idxs(_d_, _s_) \ |
293 | 0 | set_xen_guest_handle((_d_)->idxs, idxs) |
294 | 0 | #define XLAT_add_to_physmap_batch_HNDL_gpfns(_d_, _s_) \ |
295 | 0 | set_xen_guest_handle((_d_)->gpfns, gpfns) |
296 | 0 | #define XLAT_add_to_physmap_batch_HNDL_errs(_d_, _s_) \ |
297 | 0 | guest_from_compat_handle((_d_)->errs, (_s_)->errs) |
298 | 0 |
|
299 | 0 | XLAT_add_to_physmap_batch(nat.atpb, &cmp.atpb); |
300 | 0 |
|
301 | 0 | #undef XLAT_add_to_physmap_batch_HNDL_errs |
302 | 0 | #undef XLAT_add_to_physmap_batch_HNDL_gpfns |
303 | 0 | #undef XLAT_add_to_physmap_batch_HNDL_idxs |
304 | 0 |
|
305 | 0 | if ( end_extent < cmp.atpb.size ) |
306 | 0 | { |
307 | 0 | nat.atpb->size = end_extent; |
308 | 0 | ++split; |
309 | 0 | } |
310 | 0 |
|
311 | 0 | break; |
312 | 0 | } |
313 | 0 |
|
314 | 0 | case XENMEM_remove_from_physmap: |
315 | 0 | { |
316 | 0 | struct compat_remove_from_physmap cmp; |
317 | 0 |
|
318 | 0 | if ( copy_from_guest(&cmp, compat, 1) ) |
319 | 0 | return -EFAULT; |
320 | 0 |
|
321 | 0 | XLAT_remove_from_physmap(nat.xrfp, &cmp); |
322 | 0 |
|
323 | 0 | break; |
324 | 0 | } |
325 | 0 |
|
326 | 0 | case XENMEM_access_op: |
327 | 0 | if ( copy_from_guest(&cmp.mao, compat, 1) ) |
328 | 0 | return -EFAULT; |
329 | 0 | |
330 | 0 | #define XLAT_mem_access_op_HNDL_pfn_list(_d_, _s_) \ |
331 | 0 | guest_from_compat_handle((_d_)->pfn_list, (_s_)->pfn_list) |
332 | 0 | #define XLAT_mem_access_op_HNDL_access_list(_d_, _s_) \ |
333 | 0 | guest_from_compat_handle((_d_)->access_list, (_s_)->access_list) |
334 | 0 | |
335 | 0 | XLAT_mem_access_op(nat.mao, &cmp.mao); |
336 | 0 | |
337 | 0 | #undef XLAT_mem_access_op_HNDL_pfn_list |
338 | 0 | #undef XLAT_mem_access_op_HNDL_access_list |
339 | 0 | |
340 | 0 | break; |
341 | 0 |
|
342 | 0 | case XENMEM_get_vnumainfo: |
343 | 0 | { |
344 | 0 | enum XLAT_vnuma_topology_info_vdistance vdistance = |
345 | 0 | XLAT_vnuma_topology_info_vdistance_h; |
346 | 0 | enum XLAT_vnuma_topology_info_vcpu_to_vnode vcpu_to_vnode = |
347 | 0 | XLAT_vnuma_topology_info_vcpu_to_vnode_h; |
348 | 0 | enum XLAT_vnuma_topology_info_vmemrange vmemrange = |
349 | 0 | XLAT_vnuma_topology_info_vmemrange_h; |
350 | 0 |
|
351 | 0 | if ( copy_from_guest(&cmp.vnuma, compat, 1) ) |
352 | 0 | return -EFAULT; |
353 | 0 |
|
354 | 0 | #define XLAT_vnuma_topology_info_HNDL_vdistance_h(_d_, _s_) \ |
355 | 0 | guest_from_compat_handle((_d_)->vdistance.h, (_s_)->vdistance.h) |
356 | 0 | #define XLAT_vnuma_topology_info_HNDL_vcpu_to_vnode_h(_d_, _s_) \ |
357 | 0 | guest_from_compat_handle((_d_)->vcpu_to_vnode.h, (_s_)->vcpu_to_vnode.h) |
358 | 0 | #define XLAT_vnuma_topology_info_HNDL_vmemrange_h(_d_, _s_) \ |
359 | 0 | guest_from_compat_handle((_d_)->vmemrange.h, (_s_)->vmemrange.h) |
360 | 0 |
|
361 | 0 | XLAT_vnuma_topology_info(nat.vnuma, &cmp.vnuma); |
362 | 0 |
|
363 | 0 | #undef XLAT_vnuma_topology_info_HNDL_vdistance_h |
364 | 0 | #undef XLAT_vnuma_topology_info_HNDL_vcpu_to_vnode_h |
365 | 0 | #undef XLAT_vnuma_topology_info_HNDL_vmemrange_h |
366 | 0 | break; |
367 | 0 | } |
368 | 0 |
|
369 | 0 | #ifdef CONFIG_HAS_PASSTHROUGH |
370 | 0 | case XENMEM_reserved_device_memory_map: |
371 | 0 | { |
372 | 0 | struct get_reserved_device_memory grdm; |
373 | 0 |
|
374 | 0 | if ( unlikely(start_extent) ) |
375 | 0 | return -EINVAL; |
376 | 0 |
|
377 | 0 | if ( copy_from_guest(&grdm.map, compat, 1) || |
378 | 0 | !compat_handle_okay(grdm.map.buffer, grdm.map.nr_entries) ) |
379 | 0 | return -EFAULT; |
380 | 0 |
|
381 | 0 | if ( grdm.map.flags & ~XENMEM_RDM_ALL ) |
382 | 0 | return -EINVAL; |
383 | 0 |
|
384 | 0 | grdm.used_entries = 0; |
385 | 0 | rc = iommu_get_reserved_device_memory(get_reserved_device_memory, |
386 | 0 | &grdm); |
387 | 0 |
|
388 | 0 | if ( !rc && grdm.map.nr_entries < grdm.used_entries ) |
389 | 0 | rc = -ENOBUFS; |
390 | 0 | grdm.map.nr_entries = grdm.used_entries; |
391 | 0 | if ( __copy_to_guest(compat, &grdm.map, 1) ) |
392 | 0 | rc = -EFAULT; |
393 | 0 |
|
394 | 0 | return rc; |
395 | 0 | } |
396 | 0 | #endif |
397 | 0 |
|
398 | 0 | default: |
399 | 0 | return compat_arch_memory_op(cmd, compat); |
400 | 0 | } |
401 | 0 |
|
402 | 0 | rc = do_memory_op(cmd, nat.hnd); |
403 | 0 | if ( rc < 0 ) |
404 | 0 | { |
405 | 0 | if ( rc == -ENOBUFS && op == XENMEM_get_vnumainfo ) |
406 | 0 | { |
407 | 0 | cmp.vnuma.nr_vnodes = nat.vnuma->nr_vnodes; |
408 | 0 | cmp.vnuma.nr_vcpus = nat.vnuma->nr_vcpus; |
409 | 0 | cmp.vnuma.nr_vmemranges = nat.vnuma->nr_vmemranges; |
410 | 0 | if ( __copy_to_guest(compat, &cmp.vnuma, 1) ) |
411 | 0 | rc = -EFAULT; |
412 | 0 | } |
413 | 0 | break; |
414 | 0 | } |
415 | 0 |
|
416 | 0 | cmd = 0; |
417 | 0 | if ( hypercall_xlat_continuation(&cmd, 2, 0x02, nat.hnd, compat) ) |
418 | 0 | { |
419 | 0 | BUG_ON(rc != __HYPERVISOR_memory_op); |
420 | 0 | BUG_ON((cmd & MEMOP_CMD_MASK) != op); |
421 | 0 | split = -1; |
422 | 0 | } |
423 | 0 |
|
424 | 0 | switch ( op ) |
425 | 0 | { |
426 | 0 | case XENMEM_increase_reservation: |
427 | 0 | case XENMEM_decrease_reservation: |
428 | 0 | case XENMEM_populate_physmap: |
429 | 0 | end_extent = split >= 0 ? rc : cmd >> MEMOP_EXTENT_SHIFT; |
430 | 0 | if ( (op != XENMEM_decrease_reservation) && |
431 | 0 | !guest_handle_is_null(nat.rsrv->extent_start) ) |
432 | 0 | { |
433 | 0 | for ( ; start_extent < end_extent; ++start_extent ) |
434 | 0 | { |
435 | 0 | compat_pfn_t pfn = nat.rsrv->extent_start.p[start_extent]; |
436 | 0 |
|
437 | 0 | BUG_ON(pfn != nat.rsrv->extent_start.p[start_extent]); |
438 | 0 | if ( __copy_to_compat_offset(cmp.rsrv.extent_start, |
439 | 0 | start_extent, &pfn, 1) ) |
440 | 0 | { |
441 | 0 | if ( split >= 0 ) |
442 | 0 | { |
443 | 0 | rc = start_extent; |
444 | 0 | split = 0; |
445 | 0 | } |
446 | 0 | else |
447 | 0 | /* |
448 | 0 | * Short of being able to cancel the continuation, |
449 | 0 | * force it to restart here; eventually we shall |
450 | 0 | * get out of this state. |
451 | 0 | */ |
452 | 0 | rc = (start_extent << MEMOP_EXTENT_SHIFT) | op; |
453 | 0 | break; |
454 | 0 | } |
455 | 0 | } |
456 | 0 | } |
457 | 0 | else |
458 | 0 | { |
459 | 0 | start_extent = end_extent; |
460 | 0 | } |
461 | 0 | /* Bail if there was an error. */ |
462 | 0 | if ( (split >= 0) && (end_extent != nat.rsrv->nr_extents) ) |
463 | 0 | split = 0; |
464 | 0 | break; |
465 | 0 |
|
466 | 0 | case XENMEM_exchange: |
467 | 0 | { |
468 | 0 | DEFINE_XEN_GUEST_HANDLE(compat_memory_exchange_t); |
469 | 0 | int order_delta; |
470 | 0 |
|
471 | 0 | BUG_ON(split >= 0 && rc); |
472 | 0 | BUG_ON(end_extent < nat.xchg->nr_exchanged); |
473 | 0 | end_extent = nat.xchg->nr_exchanged; |
474 | 0 |
|
475 | 0 | order_delta = cmp.xchg.out.extent_order - cmp.xchg.in.extent_order; |
476 | 0 | if ( order_delta > 0 ) |
477 | 0 | { |
478 | 0 | start_extent >>= order_delta; |
479 | 0 | BUG_ON(end_extent & ((1U << order_delta) - 1)); |
480 | 0 | end_extent >>= order_delta; |
481 | 0 | } |
482 | 0 | else |
483 | 0 | { |
484 | 0 | start_extent <<= -order_delta; |
485 | 0 | end_extent <<= -order_delta; |
486 | 0 | } |
487 | 0 |
|
488 | 0 | for ( ; start_extent < end_extent; ++start_extent ) |
489 | 0 | { |
490 | 0 | compat_pfn_t pfn = nat.xchg->out.extent_start.p[start_extent]; |
491 | 0 |
|
492 | 0 | BUG_ON(pfn != nat.xchg->out.extent_start.p[start_extent]); |
493 | 0 | if ( __copy_to_compat_offset(cmp.xchg.out.extent_start, |
494 | 0 | start_extent, &pfn, 1) ) |
495 | 0 | { |
496 | 0 | rc = -EFAULT; |
497 | 0 | break; |
498 | 0 | } |
499 | 0 | } |
500 | 0 |
|
501 | 0 | cmp.xchg.nr_exchanged = nat.xchg->nr_exchanged; |
502 | 0 | if ( __copy_field_to_guest(guest_handle_cast(compat, |
503 | 0 | compat_memory_exchange_t), |
504 | 0 | &cmp.xchg, nr_exchanged) ) |
505 | 0 | rc = -EFAULT; |
506 | 0 |
|
507 | 0 | if ( rc < 0 ) |
508 | 0 | { |
509 | 0 | if ( split < 0 ) |
510 | 0 | /* Cannot cancel the continuation... */ |
511 | 0 | domain_crash(current->domain); |
512 | 0 | return rc; |
513 | 0 | } |
514 | 0 | break; |
515 | 0 | } |
516 | 0 |
|
517 | 0 | case XENMEM_add_to_physmap_batch: |
518 | 0 | start_extent = end_extent; |
519 | 0 | break; |
520 | 0 |
|
521 | 0 | case XENMEM_maximum_ram_page: |
522 | 0 | case XENMEM_current_reservation: |
523 | 0 | case XENMEM_maximum_reservation: |
524 | 0 | case XENMEM_maximum_gpfn: |
525 | 0 | case XENMEM_add_to_physmap: |
526 | 0 | case XENMEM_remove_from_physmap: |
527 | 0 | case XENMEM_access_op: |
528 | 0 | break; |
529 | 0 |
|
530 | 0 | case XENMEM_get_vnumainfo: |
531 | 0 | cmp.vnuma.nr_vnodes = nat.vnuma->nr_vnodes; |
532 | 0 | cmp.vnuma.nr_vcpus = nat.vnuma->nr_vcpus; |
533 | 0 | cmp.vnuma.nr_vmemranges = nat.vnuma->nr_vmemranges; |
534 | 0 | if ( __copy_to_guest(compat, &cmp.vnuma, 1) ) |
535 | 0 | rc = -EFAULT; |
536 | 0 | break; |
537 | 0 |
|
538 | 0 | default: |
539 | 0 | domain_crash(current->domain); |
540 | 0 | split = 0; |
541 | 0 | break; |
542 | 0 | } |
543 | 0 |
|
544 | 0 | cmd = op | (start_extent << MEMOP_EXTENT_SHIFT); |
545 | 0 | if ( split > 0 && hypercall_preempt_check() ) |
546 | 0 | return hypercall_create_continuation( |
547 | 0 | __HYPERVISOR_memory_op, "ih", cmd, compat); |
548 | 0 | } while ( split > 0 ); |
549 | 0 |
|
550 | 0 | if ( unlikely(rc > INT_MAX) ) |
551 | 0 | return INT_MAX; |
552 | 0 |
|
553 | 0 | if ( unlikely(rc < INT_MIN) ) |
554 | 0 | return INT_MIN; |
555 | 0 |
|
556 | 0 | return rc; |
557 | 0 | } |