/root/src/xen/xen/arch/x86/x86_64/compat/mm.c
Line | Count | Source (jump to first uncovered line) |
1 | | #include <xen/event.h> |
2 | | #include <xen/mem_access.h> |
3 | | #include <xen/multicall.h> |
4 | | #include <compat/memory.h> |
5 | | #include <compat/xen.h> |
6 | | #include <asm/mem_paging.h> |
7 | | #include <asm/mem_sharing.h> |
8 | | |
9 | | #include <asm/pv/mm.h> |
10 | | |
11 | | int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) |
12 | 0 | { |
13 | 0 | struct compat_machphys_mfn_list xmml; |
14 | 0 | l2_pgentry_t l2e; |
15 | 0 | unsigned long v; |
16 | 0 | compat_pfn_t mfn; |
17 | 0 | unsigned int i; |
18 | 0 | int rc = 0; |
19 | 0 |
|
20 | 0 | switch ( cmd ) |
21 | 0 | { |
22 | 0 | case XENMEM_set_memory_map: |
23 | 0 | { |
24 | 0 | struct compat_foreign_memory_map cmp; |
25 | 0 | struct xen_foreign_memory_map *nat = COMPAT_ARG_XLAT_VIRT_BASE; |
26 | 0 |
|
27 | 0 | if ( copy_from_guest(&cmp, arg, 1) ) |
28 | 0 | return -EFAULT; |
29 | 0 |
|
30 | 0 | #define XLAT_memory_map_HNDL_buffer(_d_, _s_) \ |
31 | 0 | guest_from_compat_handle((_d_)->buffer, (_s_)->buffer) |
32 | 0 | XLAT_foreign_memory_map(nat, &cmp); |
33 | 0 | #undef XLAT_memory_map_HNDL_buffer |
34 | 0 |
|
35 | 0 | rc = arch_memory_op(cmd, guest_handle_from_ptr(nat, void)); |
36 | 0 |
|
37 | 0 | break; |
38 | 0 | } |
39 | 0 |
|
40 | 0 | case XENMEM_memory_map: |
41 | 0 | case XENMEM_machine_memory_map: |
42 | 0 | { |
43 | 0 | struct compat_memory_map cmp; |
44 | 0 | struct xen_memory_map *nat = COMPAT_ARG_XLAT_VIRT_BASE; |
45 | 0 |
|
46 | 0 | if ( copy_from_guest(&cmp, arg, 1) ) |
47 | 0 | return -EFAULT; |
48 | 0 |
|
49 | 0 | #define XLAT_memory_map_HNDL_buffer(_d_, _s_) \ |
50 | 0 | guest_from_compat_handle((_d_)->buffer, (_s_)->buffer) |
51 | 0 | XLAT_memory_map(nat, &cmp); |
52 | 0 | #undef XLAT_memory_map_HNDL_buffer |
53 | 0 |
|
54 | 0 | rc = arch_memory_op(cmd, guest_handle_from_ptr(nat, void)); |
55 | 0 | if ( rc < 0 ) |
56 | 0 | break; |
57 | 0 |
|
58 | 0 | #define XLAT_memory_map_HNDL_buffer(_d_, _s_) ((void)0) |
59 | 0 | XLAT_memory_map(&cmp, nat); |
60 | 0 | #undef XLAT_memory_map_HNDL_buffer |
61 | 0 | if ( __copy_to_guest(arg, &cmp, 1) ) |
62 | 0 | rc = -EFAULT; |
63 | 0 |
|
64 | 0 | break; |
65 | 0 | } |
66 | 0 |
|
67 | 0 | case XENMEM_set_pod_target: |
68 | 0 | case XENMEM_get_pod_target: |
69 | 0 | { |
70 | 0 | struct compat_pod_target cmp; |
71 | 0 | struct xen_pod_target *nat = COMPAT_ARG_XLAT_VIRT_BASE; |
72 | 0 |
|
73 | 0 | if ( copy_from_guest(&cmp, arg, 1) ) |
74 | 0 | return -EFAULT; |
75 | 0 |
|
76 | 0 | XLAT_pod_target(nat, &cmp); |
77 | 0 |
|
78 | 0 | rc = arch_memory_op(cmd, guest_handle_from_ptr(nat, void)); |
79 | 0 | if ( rc < 0 ) |
80 | 0 | break; |
81 | 0 |
|
82 | 0 | if ( rc == __HYPERVISOR_memory_op ) |
83 | 0 | hypercall_xlat_continuation(NULL, 2, 0x2, nat, arg); |
84 | 0 |
|
85 | 0 | XLAT_pod_target(&cmp, nat); |
86 | 0 |
|
87 | 0 | if ( __copy_to_guest(arg, &cmp, 1) ) |
88 | 0 | { |
89 | 0 | if ( rc == __HYPERVISOR_memory_op ) |
90 | 0 | hypercall_cancel_continuation(current); |
91 | 0 | rc = -EFAULT; |
92 | 0 | } |
93 | 0 |
|
94 | 0 | break; |
95 | 0 | } |
96 | 0 |
|
97 | 0 | case XENMEM_machphys_mapping: |
98 | 0 | { |
99 | 0 | struct domain *d = current->domain; |
100 | 0 | struct compat_machphys_mapping mapping = { |
101 | 0 | .v_start = MACH2PHYS_COMPAT_VIRT_START(d), |
102 | 0 | .v_end = MACH2PHYS_COMPAT_VIRT_END, |
103 | 0 | .max_mfn = MACH2PHYS_COMPAT_NR_ENTRIES(d) - 1 |
104 | 0 | }; |
105 | 0 |
|
106 | 0 | if ( copy_to_guest(arg, &mapping, 1) ) |
107 | 0 | rc = -EFAULT; |
108 | 0 |
|
109 | 0 | break; |
110 | 0 | } |
111 | 0 |
|
112 | 0 | case XENMEM_machphys_mfn_list: |
113 | 0 | case XENMEM_machphys_compat_mfn_list: |
114 | 0 | { |
115 | 0 | unsigned long limit; |
116 | 0 | compat_pfn_t last_mfn; |
117 | 0 |
|
118 | 0 | if ( copy_from_guest(&xmml, arg, 1) ) |
119 | 0 | return -EFAULT; |
120 | 0 |
|
121 | 0 | limit = (unsigned long)(compat_machine_to_phys_mapping + max_page); |
122 | 0 | if ( limit > RDWR_COMPAT_MPT_VIRT_END ) |
123 | 0 | limit = RDWR_COMPAT_MPT_VIRT_END; |
124 | 0 | for ( i = 0, v = RDWR_COMPAT_MPT_VIRT_START, last_mfn = 0; |
125 | 0 | (i != xmml.max_extents) && (v < limit); |
126 | 0 | i++, v += 1 << L2_PAGETABLE_SHIFT ) |
127 | 0 | { |
128 | 0 | l2e = compat_idle_pg_table_l2[l2_table_offset(v)]; |
129 | 0 | if ( l2e_get_flags(l2e) & _PAGE_PRESENT ) |
130 | 0 | mfn = l2e_get_pfn(l2e); |
131 | 0 | else |
132 | 0 | mfn = last_mfn; |
133 | 0 | ASSERT(mfn); |
134 | 0 | if ( copy_to_compat_offset(xmml.extent_start, i, &mfn, 1) ) |
135 | 0 | return -EFAULT; |
136 | 0 | last_mfn = mfn; |
137 | 0 | } |
138 | 0 |
|
139 | 0 | xmml.nr_extents = i; |
140 | 0 | if ( __copy_to_guest(arg, &xmml, 1) ) |
141 | 0 | rc = -EFAULT; |
142 | 0 |
|
143 | 0 | break; |
144 | 0 | } |
145 | 0 |
|
146 | 0 | case XENMEM_get_sharing_freed_pages: |
147 | 0 | return mem_sharing_get_nr_saved_mfns(); |
148 | 0 |
|
149 | 0 | case XENMEM_get_sharing_shared_pages: |
150 | 0 | return mem_sharing_get_nr_shared_mfns(); |
151 | 0 |
|
152 | 0 | case XENMEM_paging_op: |
153 | 0 | return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t)); |
154 | 0 |
|
155 | 0 | case XENMEM_sharing_op: |
156 | 0 | return mem_sharing_memop(guest_handle_cast(arg, xen_mem_sharing_op_t)); |
157 | 0 |
|
158 | 0 | default: |
159 | 0 | rc = -ENOSYS; |
160 | 0 | break; |
161 | 0 | } |
162 | 0 |
|
163 | 0 | return rc; |
164 | 0 | } |
165 | | |
166 | | int compat_update_va_mapping(unsigned int va, u32 lo, u32 hi, |
167 | | unsigned int flags) |
168 | 0 | { |
169 | 0 | return do_update_va_mapping(va, lo | ((u64)hi << 32), flags); |
170 | 0 | } |
171 | | |
172 | | int compat_update_va_mapping_otherdomain(unsigned long va, u32 lo, u32 hi, |
173 | | unsigned long flags, |
174 | | domid_t domid) |
175 | 0 | { |
176 | 0 | return do_update_va_mapping_otherdomain(va, lo | ((u64)hi << 32), flags, domid); |
177 | 0 | } |
178 | | |
179 | | DEFINE_XEN_GUEST_HANDLE(mmuext_op_compat_t); |
180 | | |
181 | | int compat_mmuext_op(XEN_GUEST_HANDLE_PARAM(void) arg, |
182 | | unsigned int count, |
183 | | XEN_GUEST_HANDLE_PARAM(uint) pdone, |
184 | | unsigned int foreigndom) |
185 | 0 | { |
186 | 0 | unsigned int i, preempt_mask; |
187 | 0 | int rc = 0; |
188 | 0 | XEN_GUEST_HANDLE_PARAM(mmuext_op_compat_t) cmp_uops = |
189 | 0 | guest_handle_cast(arg, mmuext_op_compat_t); |
190 | 0 | XEN_GUEST_HANDLE_PARAM(mmuext_op_t) nat_ops; |
191 | 0 |
|
192 | 0 | if ( unlikely(count == MMU_UPDATE_PREEMPTED) && |
193 | 0 | likely(guest_handle_is_null(cmp_uops)) ) |
194 | 0 | { |
195 | 0 | set_xen_guest_handle(nat_ops, NULL); |
196 | 0 | return do_mmuext_op(nat_ops, count, pdone, foreigndom); |
197 | 0 | } |
198 | 0 |
|
199 | 0 | preempt_mask = count & MMU_UPDATE_PREEMPTED; |
200 | 0 | count ^= preempt_mask; |
201 | 0 |
|
202 | 0 | if ( unlikely(!guest_handle_okay(cmp_uops, count)) ) |
203 | 0 | return -EFAULT; |
204 | 0 |
|
205 | 0 | set_xen_guest_handle(nat_ops, COMPAT_ARG_XLAT_VIRT_BASE); |
206 | 0 |
|
207 | 0 | for ( ; count; count -= i ) |
208 | 0 | { |
209 | 0 | mmuext_op_t *nat_op = nat_ops.p; |
210 | 0 | unsigned int limit = COMPAT_ARG_XLAT_SIZE / sizeof(*nat_op); |
211 | 0 | int err; |
212 | 0 |
|
213 | 0 | for ( i = 0; i < min(limit, count); ++i ) |
214 | 0 | { |
215 | 0 | mmuext_op_compat_t cmp_op; |
216 | 0 | enum XLAT_mmuext_op_arg1 arg1; |
217 | 0 | enum XLAT_mmuext_op_arg2 arg2; |
218 | 0 |
|
219 | 0 | if ( unlikely(__copy_from_guest(&cmp_op, cmp_uops, 1) != 0) ) |
220 | 0 | { |
221 | 0 | rc = -EFAULT; |
222 | 0 | break; |
223 | 0 | } |
224 | 0 |
|
225 | 0 | switch ( cmp_op.cmd ) |
226 | 0 | { |
227 | 0 | case MMUEXT_PIN_L1_TABLE: |
228 | 0 | case MMUEXT_PIN_L2_TABLE: |
229 | 0 | case MMUEXT_PIN_L3_TABLE: |
230 | 0 | case MMUEXT_PIN_L4_TABLE: |
231 | 0 | case MMUEXT_UNPIN_TABLE: |
232 | 0 | case MMUEXT_NEW_BASEPTR: |
233 | 0 | case MMUEXT_CLEAR_PAGE: |
234 | 0 | case MMUEXT_COPY_PAGE: |
235 | 0 | arg1 = XLAT_mmuext_op_arg1_mfn; |
236 | 0 | break; |
237 | 0 | default: |
238 | 0 | arg1 = XLAT_mmuext_op_arg1_linear_addr; |
239 | 0 | break; |
240 | 0 | case MMUEXT_NEW_USER_BASEPTR: |
241 | 0 | rc = -EINVAL; |
242 | 0 | /* fallthrough */ |
243 | 0 | case MMUEXT_TLB_FLUSH_LOCAL: |
244 | 0 | case MMUEXT_TLB_FLUSH_MULTI: |
245 | 0 | case MMUEXT_TLB_FLUSH_ALL: |
246 | 0 | case MMUEXT_FLUSH_CACHE: |
247 | 0 | arg1 = -1; |
248 | 0 | break; |
249 | 0 | } |
250 | 0 |
|
251 | 0 | if ( rc ) |
252 | 0 | break; |
253 | 0 |
|
254 | 0 | switch ( cmp_op.cmd ) |
255 | 0 | { |
256 | 0 | case MMUEXT_SET_LDT: |
257 | 0 | arg2 = XLAT_mmuext_op_arg2_nr_ents; |
258 | 0 | break; |
259 | 0 | case MMUEXT_TLB_FLUSH_MULTI: |
260 | 0 | case MMUEXT_INVLPG_MULTI: |
261 | 0 | arg2 = XLAT_mmuext_op_arg2_vcpumask; |
262 | 0 | break; |
263 | 0 | case MMUEXT_COPY_PAGE: |
264 | 0 | arg2 = XLAT_mmuext_op_arg2_src_mfn; |
265 | 0 | break; |
266 | 0 | default: |
267 | 0 | arg2 = -1; |
268 | 0 | break; |
269 | 0 | } |
270 | 0 |
|
271 | 0 | #define XLAT_mmuext_op_HNDL_arg2_vcpumask(_d_, _s_) \ |
272 | 0 | guest_from_compat_handle((_d_)->arg2.vcpumask, (_s_)->arg2.vcpumask) |
273 | 0 | XLAT_mmuext_op(nat_op, &cmp_op); |
274 | 0 | #undef XLAT_mmuext_op_HNDL_arg2_vcpumask |
275 | 0 |
|
276 | 0 | if ( rc || i >= limit ) |
277 | 0 | break; |
278 | 0 |
|
279 | 0 | guest_handle_add_offset(cmp_uops, 1); |
280 | 0 | ++nat_op; |
281 | 0 | } |
282 | 0 |
|
283 | 0 | err = do_mmuext_op(nat_ops, i | preempt_mask, pdone, foreigndom); |
284 | 0 |
|
285 | 0 | if ( err ) |
286 | 0 | { |
287 | 0 | BUILD_BUG_ON(__HYPERVISOR_mmuext_op <= 0); |
288 | 0 | if ( err == __HYPERVISOR_mmuext_op ) |
289 | 0 | { |
290 | 0 | struct cpu_user_regs *regs = guest_cpu_user_regs(); |
291 | 0 | struct mc_state *mcs = ¤t->mc_state; |
292 | 0 | unsigned int arg1 = !(mcs->flags & MCSF_in_multicall) |
293 | 0 | ? regs->ecx |
294 | 0 | : mcs->call.args[1]; |
295 | 0 | unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED; |
296 | 0 |
|
297 | 0 | BUG_ON(left == arg1 && left != i); |
298 | 0 | BUG_ON(left > count); |
299 | 0 | guest_handle_add_offset(nat_ops, i - left); |
300 | 0 | guest_handle_subtract_offset(cmp_uops, left); |
301 | 0 | left = 1; |
302 | 0 | if ( arg1 != MMU_UPDATE_PREEMPTED ) |
303 | 0 | { |
304 | 0 | BUG_ON(!hypercall_xlat_continuation(&left, 4, 0x01, nat_ops, |
305 | 0 | cmp_uops)); |
306 | 0 | if ( !(mcs->flags & MCSF_in_multicall) ) |
307 | 0 | regs->ecx += count - i; |
308 | 0 | else |
309 | 0 | mcs->compat_call.args[1] += count - i; |
310 | 0 | } |
311 | 0 | else |
312 | 0 | BUG_ON(hypercall_xlat_continuation(&left, 4, 0)); |
313 | 0 | BUG_ON(left != arg1); |
314 | 0 | } |
315 | 0 | else |
316 | 0 | BUG_ON(err > 0); |
317 | 0 | rc = err; |
318 | 0 | } |
319 | 0 |
|
320 | 0 | if ( rc ) |
321 | 0 | break; |
322 | 0 |
|
323 | 0 | /* Force do_mmuext_op() to not start counting from zero again. */ |
324 | 0 | preempt_mask = MMU_UPDATE_PREEMPTED; |
325 | 0 | } |
326 | 0 |
|
327 | 0 | return rc; |
328 | 0 | } |
329 | | |
330 | | /* |
331 | | * Local variables: |
332 | | * mode: C |
333 | | * c-file-style: "BSD" |
334 | | * c-basic-offset: 4 |
335 | | * tab-width: 4 |
336 | | * indent-tabs-mode: nil |
337 | | * End: |
338 | | */ |