/root/src/xen/xen/arch/x86/mm/mem_access.c
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * arch/x86/mm/mem_access.c |
3 | | * |
4 | | * Parts of this code are Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp) |
5 | | * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices. |
6 | | * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc. |
7 | | * Parts of this code are Copyright (c) 2006 by Michael A Fetterman |
8 | | * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. |
9 | | * |
10 | | * This program is free software; you can redistribute it and/or modify |
11 | | * it under the terms of the GNU General Public License as published by |
12 | | * the Free Software Foundation; either version 2 of the License, or |
13 | | * (at your option) any later version. |
14 | | * |
15 | | * This program is distributed in the hope that it will be useful, |
16 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
18 | | * GNU General Public License for more details. |
19 | | * |
20 | | * You should have received a copy of the GNU General Public License |
21 | | * along with this program; If not, see <http://www.gnu.org/licenses/>. |
22 | | */ |
23 | | |
24 | | #include <xen/guest_access.h> /* copy_from_guest() */ |
25 | | #include <xen/mem_access.h> |
26 | | #include <xen/vm_event.h> |
27 | | #include <xen/event.h> |
28 | | #include <public/vm_event.h> |
29 | | #include <asm/p2m.h> |
30 | | #include <asm/altp2m.h> |
31 | | #include <asm/vm_event.h> |
32 | | |
33 | | #include "mm-locks.h" |
34 | | |
35 | | /* |
36 | | * Get access type for a gfn. |
37 | | * If gfn == INVALID_GFN, gets the default access type. |
38 | | */ |
39 | | static int _p2m_get_mem_access(struct p2m_domain *p2m, gfn_t gfn, |
40 | | xenmem_access_t *access) |
41 | 0 | { |
42 | 0 | p2m_type_t t; |
43 | 0 | p2m_access_t a; |
44 | 0 | mfn_t mfn; |
45 | 0 |
|
46 | 0 | static const xenmem_access_t memaccess[] = { |
47 | 0 | #define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac |
48 | 0 | ACCESS(n), |
49 | 0 | ACCESS(r), |
50 | 0 | ACCESS(w), |
51 | 0 | ACCESS(rw), |
52 | 0 | ACCESS(x), |
53 | 0 | ACCESS(rx), |
54 | 0 | ACCESS(wx), |
55 | 0 | ACCESS(rwx), |
56 | 0 | ACCESS(rx2rw), |
57 | 0 | ACCESS(n2rwx), |
58 | 0 | #undef ACCESS |
59 | 0 | }; |
60 | 0 |
|
61 | 0 | /* If request to get default access. */ |
62 | 0 | if ( gfn_eq(gfn, INVALID_GFN) ) |
63 | 0 | { |
64 | 0 | *access = memaccess[p2m->default_access]; |
65 | 0 | return 0; |
66 | 0 | } |
67 | 0 |
|
68 | 0 | gfn_lock(p2m, gfn, 0); |
69 | 0 | mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL); |
70 | 0 | gfn_unlock(p2m, gfn, 0); |
71 | 0 |
|
72 | 0 | if ( mfn_eq(mfn, INVALID_MFN) ) |
73 | 0 | return -ESRCH; |
74 | 0 |
|
75 | 0 | if ( (unsigned int)a >= ARRAY_SIZE(memaccess) ) |
76 | 0 | return -ERANGE; |
77 | 0 |
|
78 | 0 | *access = memaccess[a]; |
79 | 0 | return 0; |
80 | 0 | } |
81 | | |
82 | | bool p2m_mem_access_emulate_check(struct vcpu *v, |
83 | | const vm_event_response_t *rsp) |
84 | 0 | { |
85 | 0 | xenmem_access_t access; |
86 | 0 | bool violation = true; |
87 | 0 | const struct vm_event_mem_access *data = &rsp->u.mem_access; |
88 | 0 | struct domain *d = v->domain; |
89 | 0 | struct p2m_domain *p2m = NULL; |
90 | 0 |
|
91 | 0 | if ( altp2m_active(d) ) |
92 | 0 | p2m = p2m_get_altp2m(v); |
93 | 0 | if ( !p2m ) |
94 | 0 | p2m = p2m_get_hostp2m(d); |
95 | 0 |
|
96 | 0 | if ( _p2m_get_mem_access(p2m, _gfn(data->gfn), &access) == 0 ) |
97 | 0 | { |
98 | 0 | switch ( access ) |
99 | 0 | { |
100 | 0 | case XENMEM_access_n: |
101 | 0 | case XENMEM_access_n2rwx: |
102 | 0 | default: |
103 | 0 | violation = data->flags & MEM_ACCESS_RWX; |
104 | 0 | break; |
105 | 0 |
|
106 | 0 | case XENMEM_access_r: |
107 | 0 | violation = data->flags & MEM_ACCESS_WX; |
108 | 0 | break; |
109 | 0 |
|
110 | 0 | case XENMEM_access_w: |
111 | 0 | violation = data->flags & MEM_ACCESS_RX; |
112 | 0 | break; |
113 | 0 |
|
114 | 0 | case XENMEM_access_x: |
115 | 0 | violation = data->flags & MEM_ACCESS_RW; |
116 | 0 | break; |
117 | 0 |
|
118 | 0 | case XENMEM_access_rx: |
119 | 0 | case XENMEM_access_rx2rw: |
120 | 0 | violation = data->flags & MEM_ACCESS_W; |
121 | 0 | break; |
122 | 0 |
|
123 | 0 | case XENMEM_access_wx: |
124 | 0 | violation = data->flags & MEM_ACCESS_R; |
125 | 0 | break; |
126 | 0 |
|
127 | 0 | case XENMEM_access_rw: |
128 | 0 | violation = data->flags & MEM_ACCESS_X; |
129 | 0 | break; |
130 | 0 |
|
131 | 0 | case XENMEM_access_rwx: |
132 | 0 | violation = false; |
133 | 0 | break; |
134 | 0 | } |
135 | 0 | } |
136 | 0 |
|
137 | 0 | return violation; |
138 | 0 | } |
139 | | |
140 | | bool p2m_mem_access_check(paddr_t gpa, unsigned long gla, |
141 | | struct npfec npfec, |
142 | | vm_event_request_t **req_ptr) |
143 | 0 | { |
144 | 0 | struct vcpu *v = current; |
145 | 0 | gfn_t gfn = gaddr_to_gfn(gpa); |
146 | 0 | struct domain *d = v->domain; |
147 | 0 | struct p2m_domain *p2m = NULL; |
148 | 0 | mfn_t mfn; |
149 | 0 | p2m_type_t p2mt; |
150 | 0 | p2m_access_t p2ma; |
151 | 0 | vm_event_request_t *req; |
152 | 0 | int rc; |
153 | 0 |
|
154 | 0 | if ( altp2m_active(d) ) |
155 | 0 | p2m = p2m_get_altp2m(v); |
156 | 0 | if ( !p2m ) |
157 | 0 | p2m = p2m_get_hostp2m(d); |
158 | 0 |
|
159 | 0 | /* First, handle rx2rw conversion automatically. |
160 | 0 | * These calls to p2m->set_entry() must succeed: we have the gfn |
161 | 0 | * locked and just did a successful get_entry(). */ |
162 | 0 | gfn_lock(p2m, gfn, 0); |
163 | 0 | mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL); |
164 | 0 |
|
165 | 0 | if ( npfec.write_access && p2ma == p2m_access_rx2rw ) |
166 | 0 | { |
167 | 0 | rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw, -1); |
168 | 0 | ASSERT(rc == 0); |
169 | 0 | gfn_unlock(p2m, gfn, 0); |
170 | 0 | return true; |
171 | 0 | } |
172 | 0 | else if ( p2ma == p2m_access_n2rwx ) |
173 | 0 | { |
174 | 0 | ASSERT(npfec.write_access || npfec.read_access || npfec.insn_fetch); |
175 | 0 | rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, |
176 | 0 | p2mt, p2m_access_rwx, -1); |
177 | 0 | ASSERT(rc == 0); |
178 | 0 | } |
179 | 0 | gfn_unlock(p2m, gfn, 0); |
180 | 0 |
|
181 | 0 | /* Otherwise, check if there is a memory event listener, and send the message along */ |
182 | 0 | if ( !vm_event_check_ring(d->vm_event_monitor) || !req_ptr ) |
183 | 0 | { |
184 | 0 | /* No listener */ |
185 | 0 | if ( p2m->access_required ) |
186 | 0 | { |
187 | 0 | gdprintk(XENLOG_INFO, "Memory access permissions failure, " |
188 | 0 | "no vm_event listener VCPU %d, dom %d\n", |
189 | 0 | v->vcpu_id, d->domain_id); |
190 | 0 | domain_crash(v->domain); |
191 | 0 | return false; |
192 | 0 | } |
193 | 0 | else |
194 | 0 | { |
195 | 0 | gfn_lock(p2m, gfn, 0); |
196 | 0 | mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL); |
197 | 0 | if ( p2ma != p2m_access_n2rwx ) |
198 | 0 | { |
199 | 0 | /* A listener is not required, so clear the access |
200 | 0 | * restrictions. This set must succeed: we have the |
201 | 0 | * gfn locked and just did a successful get_entry(). */ |
202 | 0 | rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, |
203 | 0 | p2mt, p2m_access_rwx, -1); |
204 | 0 | ASSERT(rc == 0); |
205 | 0 | } |
206 | 0 | gfn_unlock(p2m, gfn, 0); |
207 | 0 | return true; |
208 | 0 | } |
209 | 0 | } |
210 | 0 |
|
211 | 0 | *req_ptr = NULL; |
212 | 0 | req = xzalloc(vm_event_request_t); |
213 | 0 | if ( req ) |
214 | 0 | { |
215 | 0 | *req_ptr = req; |
216 | 0 |
|
217 | 0 | req->reason = VM_EVENT_REASON_MEM_ACCESS; |
218 | 0 | req->u.mem_access.gfn = gfn_x(gfn); |
219 | 0 | req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1); |
220 | 0 | if ( npfec.gla_valid ) |
221 | 0 | { |
222 | 0 | req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID; |
223 | 0 | req->u.mem_access.gla = gla; |
224 | 0 |
|
225 | 0 | if ( npfec.kind == npfec_kind_with_gla ) |
226 | 0 | req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA; |
227 | 0 | else if ( npfec.kind == npfec_kind_in_gpt ) |
228 | 0 | req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT; |
229 | 0 | } |
230 | 0 | req->u.mem_access.flags |= npfec.read_access ? MEM_ACCESS_R : 0; |
231 | 0 | req->u.mem_access.flags |= npfec.write_access ? MEM_ACCESS_W : 0; |
232 | 0 | req->u.mem_access.flags |= npfec.insn_fetch ? MEM_ACCESS_X : 0; |
233 | 0 | } |
234 | 0 |
|
235 | 0 | /* Return whether vCPU pause is required (aka. sync event) */ |
236 | 0 | return (p2ma != p2m_access_n2rwx); |
237 | 0 | } |
238 | | |
239 | | int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m, |
240 | | struct p2m_domain *ap2m, p2m_access_t a, |
241 | | gfn_t gfn) |
242 | 0 | { |
243 | 0 | mfn_t mfn; |
244 | 0 | p2m_type_t t; |
245 | 0 | p2m_access_t old_a; |
246 | 0 | unsigned int page_order; |
247 | 0 | unsigned long gfn_l = gfn_x(gfn); |
248 | 0 | int rc; |
249 | 0 |
|
250 | 0 | mfn = ap2m->get_entry(ap2m, gfn, &t, &old_a, 0, NULL, NULL); |
251 | 0 |
|
252 | 0 | /* Check host p2m if no valid entry in alternate */ |
253 | 0 | if ( !mfn_valid(mfn) ) |
254 | 0 | { |
255 | 0 |
|
256 | 0 | mfn = __get_gfn_type_access(hp2m, gfn_l, &t, &old_a, |
257 | 0 | P2M_ALLOC | P2M_UNSHARE, &page_order, 0); |
258 | 0 |
|
259 | 0 | rc = -ESRCH; |
260 | 0 | if ( !mfn_valid(mfn) || t != p2m_ram_rw ) |
261 | 0 | return rc; |
262 | 0 |
|
263 | 0 | /* If this is a superpage, copy that first */ |
264 | 0 | if ( page_order != PAGE_ORDER_4K ) |
265 | 0 | { |
266 | 0 | unsigned long mask = ~((1UL << page_order) - 1); |
267 | 0 | gfn_t gfn2 = _gfn(gfn_l & mask); |
268 | 0 | mfn_t mfn2 = _mfn(mfn_x(mfn) & mask); |
269 | 0 |
|
270 | 0 | rc = ap2m->set_entry(ap2m, gfn2, mfn2, page_order, t, old_a, 1); |
271 | 0 | if ( rc ) |
272 | 0 | return rc; |
273 | 0 | } |
274 | 0 | } |
275 | 0 |
|
276 | 0 | return ap2m->set_entry(ap2m, gfn, mfn, PAGE_ORDER_4K, t, a, |
277 | 0 | current->domain != d); |
278 | 0 | } |
279 | | |
280 | | static int set_mem_access(struct domain *d, struct p2m_domain *p2m, |
281 | | struct p2m_domain *ap2m, p2m_access_t a, |
282 | | gfn_t gfn) |
283 | 0 | { |
284 | 0 | int rc = 0; |
285 | 0 |
|
286 | 0 | if ( ap2m ) |
287 | 0 | { |
288 | 0 | rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, gfn); |
289 | 0 | /* If the corresponding mfn is invalid we will want to just skip it */ |
290 | 0 | if ( rc == -ESRCH ) |
291 | 0 | rc = 0; |
292 | 0 | } |
293 | 0 | else |
294 | 0 | { |
295 | 0 | mfn_t mfn; |
296 | 0 | p2m_access_t _a; |
297 | 0 | p2m_type_t t; |
298 | 0 |
|
299 | 0 | mfn = p2m->get_entry(p2m, gfn, &t, &_a, 0, NULL, NULL); |
300 | 0 | rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, t, a, -1); |
301 | 0 | } |
302 | 0 |
|
303 | 0 | return rc; |
304 | 0 | } |
305 | | |
306 | | static bool xenmem_access_to_p2m_access(struct p2m_domain *p2m, |
307 | | xenmem_access_t xaccess, |
308 | | p2m_access_t *paccess) |
309 | 0 | { |
310 | 0 | static const p2m_access_t memaccess[] = { |
311 | 0 | #define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac |
312 | 0 | ACCESS(n), |
313 | 0 | ACCESS(r), |
314 | 0 | ACCESS(w), |
315 | 0 | ACCESS(rw), |
316 | 0 | ACCESS(x), |
317 | 0 | ACCESS(rx), |
318 | 0 | ACCESS(wx), |
319 | 0 | ACCESS(rwx), |
320 | 0 | ACCESS(rx2rw), |
321 | 0 | ACCESS(n2rwx), |
322 | 0 | #undef ACCESS |
323 | 0 | }; |
324 | 0 |
|
325 | 0 | switch ( xaccess ) |
326 | 0 | { |
327 | 0 | case 0 ... ARRAY_SIZE(memaccess) - 1: |
328 | 0 | *paccess = memaccess[xaccess]; |
329 | 0 | break; |
330 | 0 | case XENMEM_access_default: |
331 | 0 | *paccess = p2m->default_access; |
332 | 0 | break; |
333 | 0 | default: |
334 | 0 | return false; |
335 | 0 | } |
336 | 0 |
|
337 | 0 | return true; |
338 | 0 | } |
339 | | |
340 | | /* |
341 | | * Set access type for a region of gfns. |
342 | | * If gfn == INVALID_GFN, sets the default access type. |
343 | | */ |
344 | | long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, |
345 | | uint32_t start, uint32_t mask, xenmem_access_t access, |
346 | | unsigned int altp2m_idx) |
347 | 0 | { |
348 | 0 | struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL; |
349 | 0 | p2m_access_t a; |
350 | 0 | unsigned long gfn_l; |
351 | 0 | long rc = 0; |
352 | 0 |
|
353 | 0 | /* altp2m view 0 is treated as the hostp2m */ |
354 | 0 | if ( altp2m_idx ) |
355 | 0 | { |
356 | 0 | if ( altp2m_idx >= MAX_ALTP2M || |
357 | 0 | d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) ) |
358 | 0 | return -EINVAL; |
359 | 0 |
|
360 | 0 | ap2m = d->arch.altp2m_p2m[altp2m_idx]; |
361 | 0 | } |
362 | 0 |
|
363 | 0 | if ( !xenmem_access_to_p2m_access(p2m, access, &a) ) |
364 | 0 | return -EINVAL; |
365 | 0 |
|
366 | 0 | /* If request to set default access. */ |
367 | 0 | if ( gfn_eq(gfn, INVALID_GFN) ) |
368 | 0 | { |
369 | 0 | p2m->default_access = a; |
370 | 0 | return 0; |
371 | 0 | } |
372 | 0 |
|
373 | 0 | p2m_lock(p2m); |
374 | 0 | if ( ap2m ) |
375 | 0 | p2m_lock(ap2m); |
376 | 0 |
|
377 | 0 | for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l ) |
378 | 0 | { |
379 | 0 | rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l)); |
380 | 0 |
|
381 | 0 | if ( rc ) |
382 | 0 | break; |
383 | 0 |
|
384 | 0 | /* Check for continuation if it's not the last iteration. */ |
385 | 0 | if ( nr > ++start && !(start & mask) && hypercall_preempt_check() ) |
386 | 0 | { |
387 | 0 | rc = start; |
388 | 0 | break; |
389 | 0 | } |
390 | 0 | } |
391 | 0 |
|
392 | 0 | if ( ap2m ) |
393 | 0 | p2m_unlock(ap2m); |
394 | 0 | p2m_unlock(p2m); |
395 | 0 |
|
396 | 0 | return rc; |
397 | 0 | } |
398 | | |
399 | | long p2m_set_mem_access_multi(struct domain *d, |
400 | | const XEN_GUEST_HANDLE(const_uint64) pfn_list, |
401 | | const XEN_GUEST_HANDLE(const_uint8) access_list, |
402 | | uint32_t nr, uint32_t start, uint32_t mask, |
403 | | unsigned int altp2m_idx) |
404 | 0 | { |
405 | 0 | struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL; |
406 | 0 | long rc = 0; |
407 | 0 |
|
408 | 0 | /* altp2m view 0 is treated as the hostp2m */ |
409 | 0 | if ( altp2m_idx ) |
410 | 0 | { |
411 | 0 | if ( altp2m_idx >= MAX_ALTP2M || |
412 | 0 | d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) ) |
413 | 0 | return -EINVAL; |
414 | 0 |
|
415 | 0 | ap2m = d->arch.altp2m_p2m[altp2m_idx]; |
416 | 0 | } |
417 | 0 |
|
418 | 0 | p2m_lock(p2m); |
419 | 0 | if ( ap2m ) |
420 | 0 | p2m_lock(ap2m); |
421 | 0 |
|
422 | 0 | while ( start < nr ) |
423 | 0 | { |
424 | 0 | p2m_access_t a; |
425 | 0 | uint8_t access; |
426 | 0 | uint64_t gfn_l; |
427 | 0 |
|
428 | 0 | if ( copy_from_guest_offset(&gfn_l, pfn_list, start, 1) || |
429 | 0 | copy_from_guest_offset(&access, access_list, start, 1) ) |
430 | 0 | { |
431 | 0 | rc = -EFAULT; |
432 | 0 | break; |
433 | 0 | } |
434 | 0 |
|
435 | 0 | if ( !xenmem_access_to_p2m_access(p2m, access, &a) ) |
436 | 0 | { |
437 | 0 | rc = -EINVAL; |
438 | 0 | break; |
439 | 0 | } |
440 | 0 |
|
441 | 0 | rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l)); |
442 | 0 |
|
443 | 0 | if ( rc ) |
444 | 0 | break; |
445 | 0 |
|
446 | 0 | /* Check for continuation if it's not the last iteration. */ |
447 | 0 | if ( nr > ++start && !(start & mask) && hypercall_preempt_check() ) |
448 | 0 | { |
449 | 0 | rc = start; |
450 | 0 | break; |
451 | 0 | } |
452 | 0 | } |
453 | 0 |
|
454 | 0 | if ( ap2m ) |
455 | 0 | p2m_unlock(ap2m); |
456 | 0 | p2m_unlock(p2m); |
457 | 0 |
|
458 | 0 | return rc; |
459 | 0 | } |
460 | | |
461 | | int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access) |
462 | 0 | { |
463 | 0 | struct p2m_domain *p2m = p2m_get_hostp2m(d); |
464 | 0 |
|
465 | 0 | return _p2m_get_mem_access(p2m, gfn, access); |
466 | 0 | } |
467 | | |
468 | | /* |
469 | | * Local variables: |
470 | | * mode: C |
471 | | * c-file-style: "BSD" |
472 | | * c-basic-offset: 4 |
473 | | * indent-tabs-mode: nil |
474 | | * End: |
475 | | */ |