debuggers.hg

view xen/common/grant_table.c @ 16369:ff2edb1fd9f2

x86: Change cache attributes of Xen 1:1 page mappings in response to
guest mapping requests.
Based on a patch by Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Wed Nov 07 11:44:05 2007 +0000 (2007-11-07)
parents 062fe1c7b09f
children 5b8730c78454
line source
1 /******************************************************************************
2 * common/grant_table.c
3 *
4 * Mechanism for granting foreign access to page frames, and receiving
5 * page-ownership transfers.
6 *
7 * Copyright (c) 2005-2006 Christopher Clark
8 * Copyright (c) 2004 K A Fraser
9 * Copyright (c) 2005 Andrew Warfield
10 * Modifications by Geoffrey Lefebvre are (c) Intel Research Cambridge
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
27 #include <xen/config.h>
28 #include <xen/iocap.h>
29 #include <xen/lib.h>
30 #include <xen/sched.h>
31 #include <xen/mm.h>
32 #include <xen/trace.h>
33 #include <xen/guest_access.h>
34 #include <xen/domain_page.h>
35 #include <xsm/xsm.h>
37 #ifndef max_nr_grant_frames
38 unsigned int max_nr_grant_frames = DEFAULT_MAX_NR_GRANT_FRAMES;
39 integer_param("gnttab_max_nr_frames", max_nr_grant_frames);
40 #endif
42 /* The maximum number of grant mappings is defined as a multiplier of the
43 * maximum number of grant table entries. This defines the multiplier used.
44 * Pretty arbitrary. [POLICY]
45 */
46 #define MAX_MAPTRACK_TO_GRANTS_RATIO 8
48 /*
49 * The first two members of a grant entry are updated as a combined pair.
50 * The following union allows that to happen in an endian-neutral fashion.
51 */
52 union grant_combo {
53 uint32_t word;
54 struct {
55 uint16_t flags;
56 domid_t domid;
57 } shorts;
58 };
60 /* Used to share code between unmap_grant_ref and unmap_and_replace. */
61 struct gnttab_unmap_common {
62 /* Input */
63 uint64_t host_addr;
64 uint64_t dev_bus_addr;
65 uint64_t new_addr;
66 grant_handle_t handle;
68 /* Return */
69 int16_t status;
71 /* Shared state beteen *_unmap and *_unmap_complete */
72 u16 flags;
73 unsigned long frame;
74 struct grant_mapping *map;
75 struct domain *rd;
76 };
78 /* Number of unmap operations that are done between each tlb flush */
79 #define GNTTAB_UNMAP_BATCH_SIZE 32
82 #define PIN_FAIL(_lbl, _rc, _f, _a...) \
83 do { \
84 gdprintk(XENLOG_WARNING, _f, ## _a ); \
85 rc = (_rc); \
86 goto _lbl; \
87 } while ( 0 )
89 #define MAPTRACK_PER_PAGE (PAGE_SIZE / sizeof(struct grant_mapping))
90 #define maptrack_entry(t, e) \
91 ((t)->maptrack[(e)/MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE])
93 static inline unsigned int
94 nr_maptrack_frames(struct grant_table *t)
95 {
96 return t->maptrack_limit / MAPTRACK_PER_PAGE;
97 }
99 static unsigned inline int max_nr_maptrack_frames(void)
100 {
101 return (max_nr_grant_frames * MAX_MAPTRACK_TO_GRANTS_RATIO);
102 }
105 #define SHGNT_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_t))
106 #define shared_entry(t, e) \
107 ((t)->shared[(e)/SHGNT_PER_PAGE][(e)%SHGNT_PER_PAGE])
108 #define ACGNT_PER_PAGE (PAGE_SIZE / sizeof(struct active_grant_entry))
109 #define active_entry(t, e) \
110 ((t)->active[(e)/ACGNT_PER_PAGE][(e)%ACGNT_PER_PAGE])
112 static inline int
113 __get_maptrack_handle(
114 struct grant_table *t)
115 {
116 unsigned int h;
117 if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) )
118 return -1;
119 t->maptrack_head = maptrack_entry(t, h).ref;
120 t->map_count++;
121 return h;
122 }
124 static inline void
125 put_maptrack_handle(
126 struct grant_table *t, int handle)
127 {
128 maptrack_entry(t, handle).ref = t->maptrack_head;
129 t->maptrack_head = handle;
130 t->map_count--;
131 }
133 static inline int
134 get_maptrack_handle(
135 struct grant_table *lgt)
136 {
137 int i;
138 grant_handle_t handle;
139 struct grant_mapping *new_mt;
140 unsigned int new_mt_limit, nr_frames;
142 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
143 {
144 spin_lock(&lgt->lock);
146 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
147 {
148 nr_frames = nr_maptrack_frames(lgt);
149 if ( nr_frames >= max_nr_maptrack_frames() )
150 {
151 spin_unlock(&lgt->lock);
152 return -1;
153 }
155 new_mt = alloc_xenheap_page();
156 if ( new_mt == NULL )
157 {
158 spin_unlock(&lgt->lock);
159 return -1;
160 }
162 clear_page(new_mt);
164 new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE;
166 for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
167 {
168 new_mt[i % MAPTRACK_PER_PAGE].ref = i+1;
169 new_mt[i % MAPTRACK_PER_PAGE].flags = 0;
170 }
172 lgt->maptrack[nr_frames] = new_mt;
173 lgt->maptrack_limit = new_mt_limit;
175 gdprintk(XENLOG_INFO,
176 "Increased maptrack size to %u frames.\n", nr_frames + 1);
177 handle = __get_maptrack_handle(lgt);
178 }
180 spin_unlock(&lgt->lock);
181 }
182 return handle;
183 }
185 /*
186 * Returns 0 if TLB flush / invalidate required by caller.
187 * va will indicate the address to be invalidated.
188 *
189 * addr is _either_ a host virtual address, or the address of the pte to
190 * update, as indicated by the GNTMAP_contains_pte flag.
191 */
192 static void
193 __gnttab_map_grant_ref(
194 struct gnttab_map_grant_ref *op)
195 {
196 struct domain *ld, *rd;
197 struct vcpu *led;
198 int handle;
199 unsigned long frame = 0;
200 int rc = GNTST_okay;
201 int is_iomem = 0;
202 struct active_grant_entry *act;
203 struct grant_mapping *mt;
204 grant_entry_t *sha;
205 union grant_combo scombo, prev_scombo, new_scombo;
207 /*
208 * We bound the number of times we retry CMPXCHG on memory locations that
209 * we share with a guest OS. The reason is that the guest can modify that
210 * location at a higher rate than we can read-modify-CMPXCHG, so the guest
211 * could cause us to livelock. There are a few cases where it is valid for
212 * the guest to race our updates (e.g., to change the GTF_readonly flag),
213 * so we allow a few retries before failing.
214 */
215 int retries = 0;
217 led = current;
218 ld = led->domain;
220 if ( unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) )
221 {
222 gdprintk(XENLOG_INFO, "Bad flags in grant map op (%x).\n", op->flags);
223 op->status = GNTST_bad_gntref;
224 return;
225 }
227 if ( unlikely((rd = rcu_lock_domain_by_id(op->dom)) == NULL) )
228 {
229 gdprintk(XENLOG_INFO, "Could not find domain %d\n", op->dom);
230 op->status = GNTST_bad_domain;
231 return;
232 }
234 rc = xsm_grant_mapref(ld, rd, op->flags);
235 if ( rc )
236 {
237 rcu_unlock_domain(rd);
238 op->status = GNTST_permission_denied;
239 return;
240 }
242 if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) )
243 {
244 rcu_unlock_domain(rd);
245 gdprintk(XENLOG_INFO, "Failed to obtain maptrack handle.\n");
246 op->status = GNTST_no_device_space;
247 return;
248 }
250 spin_lock(&rd->grant_table->lock);
252 /* Bounds check on the grant ref */
253 if ( unlikely(op->ref >= nr_grant_entries(rd->grant_table)))
254 PIN_FAIL(unlock_out, GNTST_bad_gntref, "Bad ref (%d).\n", op->ref);
256 act = &active_entry(rd->grant_table, op->ref);
257 sha = &shared_entry(rd->grant_table, op->ref);
259 /* If already pinned, check the active domid and avoid refcnt overflow. */
260 if ( act->pin &&
261 ((act->domid != ld->domain_id) ||
262 (act->pin & 0x80808080U) != 0) )
263 PIN_FAIL(unlock_out, GNTST_general_error,
264 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
265 act->domid, ld->domain_id, act->pin);
267 if ( !act->pin ||
268 (!(op->flags & GNTMAP_readonly) &&
269 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask))) )
270 {
271 scombo.word = *(u32 *)&sha->flags;
273 /*
274 * This loop attempts to set the access (reading/writing) flags
275 * in the grant table entry. It tries a cmpxchg on the field
276 * up to five times, and then fails under the assumption that
277 * the guest is misbehaving.
278 */
279 for ( ; ; )
280 {
281 /* If not already pinned, check the grant domid and type. */
282 if ( !act->pin &&
283 (((scombo.shorts.flags & GTF_type_mask) !=
284 GTF_permit_access) ||
285 (scombo.shorts.domid != ld->domain_id)) )
286 PIN_FAIL(unlock_out, GNTST_general_error,
287 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
288 scombo.shorts.flags, scombo.shorts.domid,
289 ld->domain_id);
291 new_scombo = scombo;
292 new_scombo.shorts.flags |= GTF_reading;
294 if ( !(op->flags & GNTMAP_readonly) )
295 {
296 new_scombo.shorts.flags |= GTF_writing;
297 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
298 PIN_FAIL(unlock_out, GNTST_general_error,
299 "Attempt to write-pin a r/o grant entry.\n");
300 }
302 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
303 scombo.word, new_scombo.word);
304 if ( likely(prev_scombo.word == scombo.word) )
305 break;
307 if ( retries++ == 4 )
308 PIN_FAIL(unlock_out, GNTST_general_error,
309 "Shared grant entry is unstable.\n");
311 scombo = prev_scombo;
312 }
314 if ( !act->pin )
315 {
316 act->domid = scombo.shorts.domid;
317 act->frame = gmfn_to_mfn(rd, sha->frame);
318 }
319 }
321 if ( op->flags & GNTMAP_device_map )
322 act->pin += (op->flags & GNTMAP_readonly) ?
323 GNTPIN_devr_inc : GNTPIN_devw_inc;
324 if ( op->flags & GNTMAP_host_map )
325 act->pin += (op->flags & GNTMAP_readonly) ?
326 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
328 frame = act->frame;
330 spin_unlock(&rd->grant_table->lock);
332 if ( op->flags & GNTMAP_host_map )
333 {
334 /* Could be an iomem page for setting up permission */
335 if ( is_iomem_page(frame) )
336 {
337 is_iomem = 1;
338 if ( iomem_permit_access(ld, frame, frame) )
339 {
340 gdprintk(XENLOG_WARNING,
341 "Could not permit access to grant frame "
342 "%lx as iomem\n", frame);
343 rc = GNTST_general_error;
344 goto undo_out;
345 }
346 }
347 }
349 if ( !is_iomem )
350 {
351 if ( unlikely(!mfn_valid(frame)) ||
352 unlikely(!((op->flags & GNTMAP_readonly) ?
353 get_page(mfn_to_page(frame), rd) :
354 get_page_and_type(mfn_to_page(frame), rd,
355 PGT_writable_page))))
356 {
357 if ( !rd->is_dying )
358 gdprintk(XENLOG_WARNING,
359 "Could not pin grant frame %lx\n", frame);
360 rc = GNTST_general_error;
361 goto undo_out;
362 }
364 if ( op->flags & GNTMAP_host_map )
365 {
366 rc = create_grant_host_mapping(op->host_addr, frame, op->flags);
367 if ( rc != GNTST_okay )
368 {
369 if ( !(op->flags & GNTMAP_readonly) )
370 put_page_type(mfn_to_page(frame));
371 put_page(mfn_to_page(frame));
372 goto undo_out;
373 }
375 if ( op->flags & GNTMAP_device_map )
376 {
377 (void)get_page(mfn_to_page(frame), rd);
378 if ( !(op->flags & GNTMAP_readonly) )
379 get_page_type(mfn_to_page(frame), PGT_writable_page);
380 }
381 }
382 }
384 TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
386 mt = &maptrack_entry(ld->grant_table, handle);
387 mt->domid = op->dom;
388 mt->ref = op->ref;
389 mt->flags = op->flags;
391 op->dev_bus_addr = (u64)frame << PAGE_SHIFT;
392 op->handle = handle;
393 op->status = GNTST_okay;
395 rcu_unlock_domain(rd);
396 return;
398 undo_out:
399 spin_lock(&rd->grant_table->lock);
401 act = &active_entry(rd->grant_table, op->ref);
402 sha = &shared_entry(rd->grant_table, op->ref);
404 if ( op->flags & GNTMAP_device_map )
405 act->pin -= (op->flags & GNTMAP_readonly) ?
406 GNTPIN_devr_inc : GNTPIN_devw_inc;
407 if ( op->flags & GNTMAP_host_map )
408 act->pin -= (op->flags & GNTMAP_readonly) ?
409 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
411 if ( !(op->flags & GNTMAP_readonly) &&
412 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
413 gnttab_clear_flag(_GTF_writing, &sha->flags);
415 if ( !act->pin )
416 gnttab_clear_flag(_GTF_reading, &sha->flags);
418 unlock_out:
419 spin_unlock(&rd->grant_table->lock);
420 op->status = rc;
421 put_maptrack_handle(ld->grant_table, handle);
422 rcu_unlock_domain(rd);
423 }
425 static long
426 gnttab_map_grant_ref(
427 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) uop, unsigned int count)
428 {
429 int i;
430 struct gnttab_map_grant_ref op;
432 for ( i = 0; i < count; i++ )
433 {
434 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
435 return -EFAULT;
436 __gnttab_map_grant_ref(&op);
437 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
438 return -EFAULT;
439 }
441 return 0;
442 }
444 static void
445 __gnttab_unmap_common(
446 struct gnttab_unmap_common *op)
447 {
448 domid_t dom;
449 struct domain *ld, *rd;
450 struct active_grant_entry *act;
451 grant_entry_t *sha;
452 s16 rc = 0;
454 ld = current->domain;
456 op->frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT);
458 if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) )
459 {
460 gdprintk(XENLOG_INFO, "Bad handle (%d).\n", op->handle);
461 op->status = GNTST_bad_handle;
462 return;
463 }
465 op->map = &maptrack_entry(ld->grant_table, op->handle);
467 if ( unlikely(!op->map->flags) )
468 {
469 gdprintk(XENLOG_INFO, "Zero flags for handle (%d).\n", op->handle);
470 op->status = GNTST_bad_handle;
471 return;
472 }
474 dom = op->map->domid;
475 op->flags = op->map->flags;
477 if ( unlikely((op->rd = rd = rcu_lock_domain_by_id(dom)) == NULL) )
478 {
479 /* This can happen when a grant is implicitly unmapped. */
480 gdprintk(XENLOG_INFO, "Could not find domain %d\n", dom);
481 domain_crash(ld); /* naughty... */
482 return;
483 }
485 rc = xsm_grant_unmapref(ld, rd);
486 if ( rc )
487 {
488 rcu_unlock_domain(rd);
489 op->status = GNTST_permission_denied;
490 return;
491 }
493 TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom);
495 spin_lock(&rd->grant_table->lock);
497 act = &active_entry(rd->grant_table, op->map->ref);
498 sha = &shared_entry(rd->grant_table, op->map->ref);
500 if ( op->frame == 0 )
501 {
502 op->frame = act->frame;
503 }
504 else
505 {
506 if ( unlikely(op->frame != act->frame) )
507 PIN_FAIL(unmap_out, GNTST_general_error,
508 "Bad frame number doesn't match gntref. (%lx != %lx)\n",
509 op->frame, act->frame);
510 if ( op->flags & GNTMAP_device_map )
511 {
512 ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
513 op->map->flags &= ~GNTMAP_device_map;
514 if ( op->flags & GNTMAP_readonly )
515 act->pin -= GNTPIN_devr_inc;
516 else
517 act->pin -= GNTPIN_devw_inc;
518 }
519 }
521 if ( op->flags & GNTMAP_host_map )
522 {
523 if ( (op->host_addr != 0) )
524 {
525 if ( (rc = replace_grant_host_mapping(op->host_addr,
526 op->frame, op->new_addr,
527 op->flags)) < 0 )
528 goto unmap_out;
529 }
530 else if ( is_iomem_page(op->frame) &&
531 iomem_access_permitted(ld, op->frame, op->frame) )
532 {
533 if ( (rc = iomem_deny_access(ld, op->frame, op->frame)) < 0 )
534 goto unmap_out;
535 }
537 ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
538 op->map->flags &= ~GNTMAP_host_map;
539 if ( op->flags & GNTMAP_readonly )
540 act->pin -= GNTPIN_hstr_inc;
541 else
542 act->pin -= GNTPIN_hstw_inc;
543 }
545 /* If just unmapped a writable mapping, mark as dirtied */
546 if ( !(op->flags & GNTMAP_readonly) )
547 gnttab_mark_dirty(rd, op->frame);
549 unmap_out:
550 op->status = rc;
551 spin_unlock(&rd->grant_table->lock);
552 rcu_unlock_domain(rd);
553 }
555 static void
556 __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
557 {
558 struct domain *ld, *rd;
559 struct active_grant_entry *act;
560 grant_entry_t *sha;
562 rd = op->rd;
564 if ( rd == NULL )
565 {
566 /*
567 * Suggests that __gntab_unmap_common failed in
568 * rcu_lock_domain_by_id() or earlier, and so we have nothing
569 * to complete
570 */
571 return;
572 }
574 ld = current->domain;
576 rcu_lock_domain(rd);
577 spin_lock(&rd->grant_table->lock);
579 act = &active_entry(rd->grant_table, op->map->ref);
580 sha = &shared_entry(rd->grant_table, op->map->ref);
582 if ( unlikely(op->frame != act->frame) )
583 {
584 /*
585 * Suggests that __gntab_unmap_common failed early and so
586 * nothing further to do
587 */
588 goto unmap_out;
589 }
591 if ( op->flags & GNTMAP_device_map )
592 {
593 if ( op->flags & GNTMAP_readonly )
594 put_page(mfn_to_page(op->frame));
595 else
596 put_page_and_type(mfn_to_page(op->frame));
597 }
599 if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
600 {
601 if ( op->status != 0 )
602 {
603 /*
604 * Suggests that __gntab_unmap_common failed in
605 * replace_grant_host_mapping() so nothing further to do
606 */
607 goto unmap_out;
608 }
610 if ( op->flags & GNTMAP_readonly )
611 put_page(mfn_to_page(op->frame));
612 else
613 put_page_and_type(mfn_to_page(op->frame));
614 }
616 if ( (op->map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
617 {
618 op->map->flags = 0;
619 put_maptrack_handle(ld->grant_table, op->handle);
620 }
622 if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
623 !(op->flags & GNTMAP_readonly) )
624 gnttab_clear_flag(_GTF_writing, &sha->flags);
626 if ( act->pin == 0 )
627 gnttab_clear_flag(_GTF_reading, &sha->flags);
629 unmap_out:
630 spin_unlock(&rd->grant_table->lock);
631 rcu_unlock_domain(rd);
632 }
634 static void
635 __gnttab_unmap_grant_ref(
636 struct gnttab_unmap_grant_ref *op,
637 struct gnttab_unmap_common *common)
638 {
639 common->host_addr = op->host_addr;
640 common->dev_bus_addr = op->dev_bus_addr;
641 common->handle = op->handle;
643 /* Intialise these in case common contains old state */
644 common->new_addr = 0;
645 common->rd = NULL;
647 __gnttab_unmap_common(common);
648 op->status = common->status;
649 }
652 static long
653 gnttab_unmap_grant_ref(
654 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) uop, unsigned int count)
655 {
656 int i, c, partial_done, done = 0;
657 struct gnttab_unmap_grant_ref op;
658 struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
660 while ( count != 0 )
661 {
662 c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
663 partial_done = 0;
665 for ( i = 0; i < c; i++ )
666 {
667 if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
668 goto fault;
669 __gnttab_unmap_grant_ref(&op, &(common[i]));
670 ++partial_done;
671 if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
672 goto fault;
673 }
675 flush_tlb_mask(current->domain->domain_dirty_cpumask);
677 for ( i = 0; i < partial_done; i++ )
678 __gnttab_unmap_common_complete(&(common[i]));
680 count -= c;
681 done += c;
682 }
684 return 0;
686 fault:
687 flush_tlb_mask(current->domain->domain_dirty_cpumask);
689 for ( i = 0; i < partial_done; i++ )
690 __gnttab_unmap_common_complete(&(common[i]));
691 return -EFAULT;
692 }
694 static void
695 __gnttab_unmap_and_replace(
696 struct gnttab_unmap_and_replace *op,
697 struct gnttab_unmap_common *common)
698 {
699 common->host_addr = op->host_addr;
700 common->new_addr = op->new_addr;
701 common->handle = op->handle;
703 /* Intialise these in case common contains old state */
704 common->dev_bus_addr = 0;
705 common->rd = NULL;
707 __gnttab_unmap_common(common);
708 op->status = common->status;
709 }
711 static long
712 gnttab_unmap_and_replace(
713 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) uop, unsigned int count)
714 {
715 int i, c, partial_done, done = 0;
716 struct gnttab_unmap_and_replace op;
717 struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
719 while ( count != 0 )
720 {
721 c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
722 partial_done = 0;
724 for ( i = 0; i < c; i++ )
725 {
726 if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
727 goto fault;
728 __gnttab_unmap_and_replace(&op, &(common[i]));
729 ++partial_done;
730 if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
731 goto fault;
732 }
734 flush_tlb_mask(current->domain->domain_dirty_cpumask);
736 for ( i = 0; i < partial_done; i++ )
737 __gnttab_unmap_common_complete(&(common[i]));
739 count -= c;
740 done += c;
741 }
743 return 0;
745 fault:
746 flush_tlb_mask(current->domain->domain_dirty_cpumask);
748 for ( i = 0; i < partial_done; i++ )
749 __gnttab_unmap_common_complete(&(common[i]));
750 return -EFAULT;
751 }
753 int
754 gnttab_grow_table(struct domain *d, unsigned int req_nr_frames)
755 {
756 /* d's grant table lock must be held by the caller */
758 struct grant_table *gt = d->grant_table;
759 unsigned int i;
761 ASSERT(req_nr_frames <= max_nr_grant_frames);
763 gdprintk(XENLOG_INFO,
764 "Expanding dom (%d) grant table from (%d) to (%d) frames.\n",
765 d->domain_id, nr_grant_frames(gt), req_nr_frames);
767 /* Active */
768 for ( i = nr_active_grant_frames(gt);
769 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
770 {
771 if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
772 goto active_alloc_failed;
773 clear_page(gt->active[i]);
774 }
776 /* Shared */
777 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
778 {
779 if ( (gt->shared[i] = alloc_xenheap_page()) == NULL )
780 goto shared_alloc_failed;
781 clear_page(gt->shared[i]);
782 }
784 /* Share the new shared frames with the recipient domain */
785 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
786 gnttab_create_shared_page(d, gt, i);
788 gt->nr_grant_frames = req_nr_frames;
790 return 1;
792 shared_alloc_failed:
793 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
794 {
795 free_xenheap_page(gt->shared[i]);
796 gt->shared[i] = NULL;
797 }
798 active_alloc_failed:
799 for ( i = nr_active_grant_frames(gt);
800 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
801 {
802 free_xenheap_page(gt->active[i]);
803 gt->active[i] = NULL;
804 }
805 gdprintk(XENLOG_INFO, "Allocation failure when expanding grant table.\n");
806 return 0;
807 }
809 static long
810 gnttab_setup_table(
811 XEN_GUEST_HANDLE(gnttab_setup_table_t) uop, unsigned int count)
812 {
813 struct gnttab_setup_table op;
814 struct domain *d;
815 int i;
816 unsigned long gmfn;
817 domid_t dom;
819 if ( count != 1 )
820 return -EINVAL;
822 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
823 {
824 gdprintk(XENLOG_INFO, "Fault while reading gnttab_setup_table_t.\n");
825 return -EFAULT;
826 }
828 if ( unlikely(op.nr_frames > max_nr_grant_frames) )
829 {
830 gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table frames"
831 " per domain.\n",
832 max_nr_grant_frames);
833 op.status = GNTST_general_error;
834 goto out;
835 }
837 dom = op.dom;
838 if ( dom == DOMID_SELF )
839 {
840 dom = current->domain->domain_id;
841 }
842 else if ( unlikely(!IS_PRIV(current->domain)) )
843 {
844 op.status = GNTST_permission_denied;
845 goto out;
846 }
848 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
849 {
850 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
851 op.status = GNTST_bad_domain;
852 goto out;
853 }
855 if ( xsm_grant_setup(current->domain, d) )
856 {
857 rcu_unlock_domain(d);
858 op.status = GNTST_permission_denied;
859 goto out;
860 }
862 spin_lock(&d->grant_table->lock);
864 if ( (op.nr_frames > nr_grant_frames(d->grant_table)) &&
865 !gnttab_grow_table(d, op.nr_frames) )
866 {
867 gdprintk(XENLOG_INFO,
868 "Expand grant table to %d failed. Current: %d Max: %d.\n",
869 op.nr_frames,
870 nr_grant_frames(d->grant_table),
871 max_nr_grant_frames);
872 op.status = GNTST_general_error;
873 goto setup_unlock_out;
874 }
876 op.status = GNTST_okay;
877 for ( i = 0; i < op.nr_frames; i++ )
878 {
879 gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
880 (void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
881 }
883 setup_unlock_out:
884 spin_unlock(&d->grant_table->lock);
886 rcu_unlock_domain(d);
888 out:
889 if ( unlikely(copy_to_guest(uop, &op, 1)) )
890 return -EFAULT;
892 return 0;
893 }
895 static long
896 gnttab_query_size(
897 XEN_GUEST_HANDLE(gnttab_query_size_t) uop, unsigned int count)
898 {
899 struct gnttab_query_size op;
900 struct domain *d;
901 domid_t dom;
902 int rc;
904 if ( count != 1 )
905 return -EINVAL;
907 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
908 {
909 gdprintk(XENLOG_INFO, "Fault while reading gnttab_query_size_t.\n");
910 return -EFAULT;
911 }
913 dom = op.dom;
914 if ( dom == DOMID_SELF )
915 {
916 dom = current->domain->domain_id;
917 }
918 else if ( unlikely(!IS_PRIV(current->domain)) )
919 {
920 op.status = GNTST_permission_denied;
921 goto query_out;
922 }
924 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
925 {
926 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
927 op.status = GNTST_bad_domain;
928 goto query_out;
929 }
931 rc = xsm_grant_query_size(current->domain, d);
932 if ( rc )
933 {
934 rcu_unlock_domain(d);
935 op.status = GNTST_permission_denied;
936 goto query_out;
937 }
939 spin_lock(&d->grant_table->lock);
941 op.nr_frames = nr_grant_frames(d->grant_table);
942 op.max_nr_frames = max_nr_grant_frames;
943 op.status = GNTST_okay;
945 spin_unlock(&d->grant_table->lock);
947 rcu_unlock_domain(d);
949 query_out:
950 if ( unlikely(copy_to_guest(uop, &op, 1)) )
951 return -EFAULT;
953 return 0;
954 }
956 /*
957 * Check that the given grant reference (rd,ref) allows 'ld' to transfer
958 * ownership of a page frame. If so, lock down the grant entry.
959 */
960 static int
961 gnttab_prepare_for_transfer(
962 struct domain *rd, struct domain *ld, grant_ref_t ref)
963 {
964 struct grant_table *rgt;
965 struct grant_entry *sha;
966 union grant_combo scombo, prev_scombo, new_scombo;
967 int retries = 0;
969 if ( unlikely((rgt = rd->grant_table) == NULL) )
970 {
971 gdprintk(XENLOG_INFO, "Dom %d has no grant table.\n", rd->domain_id);
972 return 0;
973 }
975 spin_lock(&rgt->lock);
977 if ( unlikely(ref >= nr_grant_entries(rd->grant_table)) )
978 {
979 gdprintk(XENLOG_INFO,
980 "Bad grant reference (%d) for transfer to domain(%d).\n",
981 ref, rd->domain_id);
982 goto fail;
983 }
985 sha = &shared_entry(rgt, ref);
987 scombo.word = *(u32 *)&sha->flags;
989 for ( ; ; )
990 {
991 if ( unlikely(scombo.shorts.flags != GTF_accept_transfer) ||
992 unlikely(scombo.shorts.domid != ld->domain_id) )
993 {
994 gdprintk(XENLOG_INFO, "Bad flags (%x) or dom (%d). "
995 "(NB. expected dom %d)\n",
996 scombo.shorts.flags, scombo.shorts.domid,
997 ld->domain_id);
998 goto fail;
999 }
1001 new_scombo = scombo;
1002 new_scombo.shorts.flags |= GTF_transfer_committed;
1004 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
1005 scombo.word, new_scombo.word);
1006 if ( likely(prev_scombo.word == scombo.word) )
1007 break;
1009 if ( retries++ == 4 )
1011 gdprintk(XENLOG_WARNING, "Shared grant entry is unstable.\n");
1012 goto fail;
1015 scombo = prev_scombo;
1018 spin_unlock(&rgt->lock);
1019 return 1;
1021 fail:
1022 spin_unlock(&rgt->lock);
1023 return 0;
1026 static long
1027 gnttab_transfer(
1028 XEN_GUEST_HANDLE(gnttab_transfer_t) uop, unsigned int count)
1030 struct domain *d = current->domain;
1031 struct domain *e;
1032 struct page_info *page;
1033 int i;
1034 grant_entry_t *sha;
1035 struct gnttab_transfer gop;
1036 unsigned long mfn;
1038 for ( i = 0; i < count; i++ )
1040 /* Read from caller address space. */
1041 if ( unlikely(__copy_from_guest_offset(&gop, uop, i, 1)) )
1043 gdprintk(XENLOG_INFO, "gnttab_transfer: error reading req %d/%d\n",
1044 i, count);
1045 return -EFAULT;
1048 mfn = gmfn_to_mfn(d, gop.mfn);
1050 /* Check the passed page frame for basic validity. */
1051 if ( unlikely(!mfn_valid(mfn)) )
1053 gdprintk(XENLOG_INFO, "gnttab_transfer: out-of-range %lx\n",
1054 (unsigned long)gop.mfn);
1055 gop.status = GNTST_bad_page;
1056 goto copyback;
1059 page = mfn_to_page(mfn);
1060 if ( unlikely(is_xen_heap_frame(page)) )
1062 gdprintk(XENLOG_INFO, "gnttab_transfer: xen frame %lx\n",
1063 (unsigned long)gop.mfn);
1064 gop.status = GNTST_bad_page;
1065 goto copyback;
1068 if ( steal_page(d, page, 0) < 0 )
1070 gop.status = GNTST_bad_page;
1071 goto copyback;
1074 /* Find the target domain. */
1075 if ( unlikely((e = rcu_lock_domain_by_id(gop.domid)) == NULL) )
1077 gdprintk(XENLOG_INFO, "gnttab_transfer: can't find domain %d\n",
1078 gop.domid);
1079 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1080 free_domheap_page(page);
1081 gop.status = GNTST_bad_domain;
1082 goto copyback;
1085 if ( xsm_grant_transfer(d, e) )
1087 rcu_unlock_domain(e);
1088 gop.status = GNTST_permission_denied;
1089 goto copyback;
1092 spin_lock(&e->page_alloc_lock);
1094 /*
1095 * Check that 'e' will accept the page and has reservation
1096 * headroom. Also, a domain mustn't have PGC_allocated
1097 * pages when it is dying.
1098 */
1099 if ( unlikely(e->is_dying) ||
1100 unlikely(e->tot_pages >= e->max_pages) ||
1101 unlikely(!gnttab_prepare_for_transfer(e, d, gop.ref)) )
1103 if ( !e->is_dying )
1104 gdprintk(XENLOG_INFO, "gnttab_transfer: "
1105 "Transferee has no reservation "
1106 "headroom (%d,%d) or provided a bad grant ref (%08x) "
1107 "or is dying (%d)\n",
1108 e->tot_pages, e->max_pages, gop.ref, e->is_dying);
1109 spin_unlock(&e->page_alloc_lock);
1110 rcu_unlock_domain(e);
1111 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1112 free_domheap_page(page);
1113 gop.status = GNTST_general_error;
1114 goto copyback;
1117 /* Okay, add the page to 'e'. */
1118 if ( unlikely(e->tot_pages++ == 0) )
1119 get_knownalive_domain(e);
1120 list_add_tail(&page->list, &e->page_list);
1121 page_set_owner(page, e);
1123 spin_unlock(&e->page_alloc_lock);
1125 TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
1127 /* Tell the guest about its new page frame. */
1128 spin_lock(&e->grant_table->lock);
1130 sha = &shared_entry(e->grant_table, gop.ref);
1131 guest_physmap_add_page(e, sha->frame, mfn);
1132 sha->frame = mfn;
1133 wmb();
1134 sha->flags |= GTF_transfer_completed;
1136 spin_unlock(&e->grant_table->lock);
1138 rcu_unlock_domain(e);
1140 gop.status = GNTST_okay;
1142 copyback:
1143 if ( unlikely(__copy_to_guest_offset(uop, i, &gop, 1)) )
1145 gdprintk(XENLOG_INFO, "gnttab_transfer: error writing resp "
1146 "%d/%d\n", i, count);
1147 return -EFAULT;
1151 return 0;
1154 /* Undo __acquire_grant_for_copy. Again, this has no effect on page
1155 type and reference counts. */
1156 static void
1157 __release_grant_for_copy(
1158 struct domain *rd, unsigned long gref, int readonly)
1160 grant_entry_t *sha;
1161 struct active_grant_entry *act;
1162 unsigned long r_frame;
1164 spin_lock(&rd->grant_table->lock);
1166 act = &active_entry(rd->grant_table, gref);
1167 sha = &shared_entry(rd->grant_table, gref);
1168 r_frame = act->frame;
1170 if ( readonly )
1172 act->pin -= GNTPIN_hstr_inc;
1174 else
1176 gnttab_mark_dirty(rd, r_frame);
1178 act->pin -= GNTPIN_hstw_inc;
1179 if ( !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) )
1180 gnttab_clear_flag(_GTF_writing, &sha->flags);
1183 if ( !act->pin )
1184 gnttab_clear_flag(_GTF_reading, &sha->flags);
1186 spin_unlock(&rd->grant_table->lock);
1189 /* Grab a frame number from a grant entry and update the flags and pin
1190 count as appropriate. Note that this does *not* update the page
1191 type or reference counts, and does not check that the mfn is
1192 actually valid. */
1193 static int
1194 __acquire_grant_for_copy(
1195 struct domain *rd, unsigned long gref, int readonly,
1196 unsigned long *frame)
1198 grant_entry_t *sha;
1199 struct active_grant_entry *act;
1200 s16 rc = GNTST_okay;
1201 int retries = 0;
1202 union grant_combo scombo, prev_scombo, new_scombo;
1204 spin_lock(&rd->grant_table->lock);
1206 if ( unlikely(gref >= nr_grant_entries(rd->grant_table)) )
1207 PIN_FAIL(unlock_out, GNTST_bad_gntref,
1208 "Bad grant reference %ld\n", gref);
1210 act = &active_entry(rd->grant_table, gref);
1211 sha = &shared_entry(rd->grant_table, gref);
1213 /* If already pinned, check the active domid and avoid refcnt overflow. */
1214 if ( act->pin &&
1215 ((act->domid != current->domain->domain_id) ||
1216 (act->pin & 0x80808080U) != 0) )
1217 PIN_FAIL(unlock_out, GNTST_general_error,
1218 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
1219 act->domid, current->domain->domain_id, act->pin);
1221 if ( !act->pin ||
1222 (!readonly && !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask))) )
1224 scombo.word = *(u32 *)&sha->flags;
1226 for ( ; ; )
1228 /* If not already pinned, check the grant domid and type. */
1229 if ( !act->pin &&
1230 (((scombo.shorts.flags & GTF_type_mask) !=
1231 GTF_permit_access) ||
1232 (scombo.shorts.domid != current->domain->domain_id)) )
1233 PIN_FAIL(unlock_out, GNTST_general_error,
1234 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
1235 scombo.shorts.flags, scombo.shorts.domid,
1236 current->domain->domain_id);
1238 new_scombo = scombo;
1239 new_scombo.shorts.flags |= GTF_reading;
1241 if ( !readonly )
1243 new_scombo.shorts.flags |= GTF_writing;
1244 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
1245 PIN_FAIL(unlock_out, GNTST_general_error,
1246 "Attempt to write-pin a r/o grant entry.\n");
1249 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
1250 scombo.word, new_scombo.word);
1251 if ( likely(prev_scombo.word == scombo.word) )
1252 break;
1254 if ( retries++ == 4 )
1255 PIN_FAIL(unlock_out, GNTST_general_error,
1256 "Shared grant entry is unstable.\n");
1258 scombo = prev_scombo;
1261 if ( !act->pin )
1263 act->domid = scombo.shorts.domid;
1264 act->frame = gmfn_to_mfn(rd, sha->frame);
1268 act->pin += readonly ? GNTPIN_hstr_inc : GNTPIN_hstw_inc;
1270 *frame = act->frame;
1272 unlock_out:
1273 spin_unlock(&rd->grant_table->lock);
1274 return rc;
1277 static void
1278 __gnttab_copy(
1279 struct gnttab_copy *op)
1281 struct domain *sd = NULL, *dd = NULL;
1282 unsigned long s_frame, d_frame;
1283 char *sp, *dp;
1284 s16 rc = GNTST_okay;
1285 int have_d_grant = 0, have_s_grant = 0, have_s_ref = 0;
1286 int src_is_gref, dest_is_gref;
1288 if ( ((op->source.offset + op->len) > PAGE_SIZE) ||
1289 ((op->dest.offset + op->len) > PAGE_SIZE) )
1290 PIN_FAIL(error_out, GNTST_bad_copy_arg, "copy beyond page area.\n");
1292 src_is_gref = op->flags & GNTCOPY_source_gref;
1293 dest_is_gref = op->flags & GNTCOPY_dest_gref;
1295 if ( (op->source.domid != DOMID_SELF && !src_is_gref ) ||
1296 (op->dest.domid != DOMID_SELF && !dest_is_gref) )
1297 PIN_FAIL(error_out, GNTST_permission_denied,
1298 "only allow copy-by-mfn for DOMID_SELF.\n");
1300 if ( op->source.domid == DOMID_SELF )
1301 sd = rcu_lock_current_domain();
1302 else if ( (sd = rcu_lock_domain_by_id(op->source.domid)) == NULL )
1303 PIN_FAIL(error_out, GNTST_bad_domain,
1304 "couldn't find %d\n", op->source.domid);
1306 if ( op->dest.domid == DOMID_SELF )
1307 dd = rcu_lock_current_domain();
1308 else if ( (dd = rcu_lock_domain_by_id(op->dest.domid)) == NULL )
1309 PIN_FAIL(error_out, GNTST_bad_domain,
1310 "couldn't find %d\n", op->dest.domid);
1312 rc = xsm_grant_copy(sd, dd);
1313 if ( rc )
1315 rc = GNTST_permission_denied;
1316 goto error_out;
1319 if ( src_is_gref )
1321 rc = __acquire_grant_for_copy(sd, op->source.u.ref, 1, &s_frame);
1322 if ( rc != GNTST_okay )
1323 goto error_out;
1324 have_s_grant = 1;
1326 else
1328 s_frame = gmfn_to_mfn(sd, op->source.u.gmfn);
1330 if ( unlikely(!mfn_valid(s_frame)) )
1331 PIN_FAIL(error_out, GNTST_general_error,
1332 "source frame %lx invalid.\n", s_frame);
1333 if ( !get_page(mfn_to_page(s_frame), sd) )
1335 if ( !sd->is_dying )
1336 gdprintk(XENLOG_WARNING, "Could not get src frame %lx\n", s_frame);
1337 rc = GNTST_general_error;
1338 goto error_out;
1340 have_s_ref = 1;
1342 if ( dest_is_gref )
1344 rc = __acquire_grant_for_copy(dd, op->dest.u.ref, 0, &d_frame);
1345 if ( rc != GNTST_okay )
1346 goto error_out;
1347 have_d_grant = 1;
1349 else
1351 d_frame = gmfn_to_mfn(dd, op->dest.u.gmfn);
1353 if ( unlikely(!mfn_valid(d_frame)) )
1354 PIN_FAIL(error_out, GNTST_general_error,
1355 "destination frame %lx invalid.\n", d_frame);
1356 if ( !get_page_and_type(mfn_to_page(d_frame), dd, PGT_writable_page) )
1358 if ( !dd->is_dying )
1359 gdprintk(XENLOG_WARNING, "Could not get dst frame %lx\n", d_frame);
1360 rc = GNTST_general_error;
1361 goto error_out;
1364 sp = map_domain_page(s_frame);
1365 dp = map_domain_page(d_frame);
1367 memcpy(dp + op->dest.offset, sp + op->source.offset, op->len);
1369 unmap_domain_page(dp);
1370 unmap_domain_page(sp);
1372 gnttab_mark_dirty(dd, d_frame);
1374 put_page_and_type(mfn_to_page(d_frame));
1375 error_out:
1376 if ( have_s_ref )
1377 put_page(mfn_to_page(s_frame));
1378 if ( have_s_grant )
1379 __release_grant_for_copy(sd, op->source.u.ref, 1);
1380 if ( have_d_grant )
1381 __release_grant_for_copy(dd, op->dest.u.ref, 0);
1382 if ( sd )
1383 rcu_unlock_domain(sd);
1384 if ( dd )
1385 rcu_unlock_domain(dd);
1386 op->status = rc;
1389 static long
1390 gnttab_copy(
1391 XEN_GUEST_HANDLE(gnttab_copy_t) uop, unsigned int count)
1393 int i;
1394 struct gnttab_copy op;
1396 for ( i = 0; i < count; i++ )
1398 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
1399 return -EFAULT;
1400 __gnttab_copy(&op);
1401 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
1402 return -EFAULT;
1404 return 0;
1407 long
1408 do_grant_table_op(
1409 unsigned int cmd, XEN_GUEST_HANDLE(void) uop, unsigned int count)
1411 long rc;
1412 struct domain *d = current->domain;
1414 if ( count > 512 )
1415 return -EINVAL;
1417 LOCK_BIGLOCK(d);
1419 rc = -EFAULT;
1420 switch ( cmd )
1422 case GNTTABOP_map_grant_ref:
1424 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) map =
1425 guest_handle_cast(uop, gnttab_map_grant_ref_t);
1426 if ( unlikely(!guest_handle_okay(map, count)) )
1427 goto out;
1428 rc = gnttab_map_grant_ref(map, count);
1429 break;
1431 case GNTTABOP_unmap_grant_ref:
1433 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) unmap =
1434 guest_handle_cast(uop, gnttab_unmap_grant_ref_t);
1435 if ( unlikely(!guest_handle_okay(unmap, count)) )
1436 goto out;
1437 rc = gnttab_unmap_grant_ref(unmap, count);
1438 break;
1440 case GNTTABOP_unmap_and_replace:
1442 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) unmap =
1443 guest_handle_cast(uop, gnttab_unmap_and_replace_t);
1444 if ( unlikely(!guest_handle_okay(unmap, count)) )
1445 goto out;
1446 rc = -ENOSYS;
1447 if ( unlikely(!replace_grant_supported()) )
1448 goto out;
1449 rc = gnttab_unmap_and_replace(unmap, count);
1450 break;
1452 case GNTTABOP_setup_table:
1454 rc = gnttab_setup_table(
1455 guest_handle_cast(uop, gnttab_setup_table_t), count);
1456 break;
1458 case GNTTABOP_transfer:
1460 XEN_GUEST_HANDLE(gnttab_transfer_t) transfer =
1461 guest_handle_cast(uop, gnttab_transfer_t);
1462 if ( unlikely(!guest_handle_okay(transfer, count)) )
1463 goto out;
1464 rc = gnttab_transfer(transfer, count);
1465 break;
1467 case GNTTABOP_copy:
1469 XEN_GUEST_HANDLE(gnttab_copy_t) copy =
1470 guest_handle_cast(uop, gnttab_copy_t);
1471 if ( unlikely(!guest_handle_okay(copy, count)) )
1472 goto out;
1473 rc = gnttab_copy(copy, count);
1474 break;
1476 case GNTTABOP_query_size:
1478 rc = gnttab_query_size(
1479 guest_handle_cast(uop, gnttab_query_size_t), count);
1480 break;
1482 default:
1483 rc = -ENOSYS;
1484 break;
1487 out:
1488 UNLOCK_BIGLOCK(d);
1490 return rc;
1493 #ifdef CONFIG_COMPAT
1494 #include "compat/grant_table.c"
1495 #endif
1497 static unsigned int max_nr_active_grant_frames(void)
1499 return (((max_nr_grant_frames * (PAGE_SIZE / sizeof(grant_entry_t))) +
1500 ((PAGE_SIZE / sizeof(struct active_grant_entry))-1))
1501 / (PAGE_SIZE / sizeof(struct active_grant_entry)));
1504 int
1505 grant_table_create(
1506 struct domain *d)
1508 struct grant_table *t;
1509 int i;
1511 /* If this sizeof assertion fails, fix the function: shared_index */
1512 ASSERT(sizeof(grant_entry_t) == 8);
1514 if ( (t = xmalloc(struct grant_table)) == NULL )
1515 goto no_mem_0;
1517 /* Simple stuff. */
1518 memset(t, 0, sizeof(*t));
1519 spin_lock_init(&t->lock);
1520 t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
1522 /* Active grant table. */
1523 if ( (t->active = xmalloc_array(struct active_grant_entry *,
1524 max_nr_active_grant_frames())) == NULL )
1525 goto no_mem_1;
1526 memset(t->active, 0, max_nr_active_grant_frames() * sizeof(t->active[0]));
1527 for ( i = 0;
1528 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1530 if ( (t->active[i] = alloc_xenheap_page()) == NULL )
1531 goto no_mem_2;
1532 clear_page(t->active[i]);
1535 /* Tracking of mapped foreign frames table */
1536 if ( (t->maptrack = xmalloc_array(struct grant_mapping *,
1537 max_nr_maptrack_frames())) == NULL )
1538 goto no_mem_2;
1539 memset(t->maptrack, 0, max_nr_maptrack_frames() * sizeof(t->maptrack[0]));
1540 if ( (t->maptrack[0] = alloc_xenheap_page()) == NULL )
1541 goto no_mem_3;
1542 clear_page(t->maptrack[0]);
1543 t->maptrack_limit = PAGE_SIZE / sizeof(struct grant_mapping);
1544 for ( i = 0; i < t->maptrack_limit; i++ )
1545 t->maptrack[0][i].ref = i+1;
1547 /* Shared grant table. */
1548 if ( (t->shared = xmalloc_array(struct grant_entry *,
1549 max_nr_grant_frames)) == NULL )
1550 goto no_mem_3;
1551 memset(t->shared, 0, max_nr_grant_frames * sizeof(t->shared[0]));
1552 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1554 if ( (t->shared[i] = alloc_xenheap_page()) == NULL )
1555 goto no_mem_4;
1556 clear_page(t->shared[i]);
1559 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1560 gnttab_create_shared_page(d, t, i);
1562 /* Okay, install the structure. */
1563 d->grant_table = t;
1564 return 0;
1566 no_mem_4:
1567 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1568 free_xenheap_page(t->shared[i]);
1569 xfree(t->shared);
1570 no_mem_3:
1571 free_xenheap_page(t->maptrack[0]);
1572 xfree(t->maptrack);
1573 no_mem_2:
1574 for ( i = 0;
1575 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1576 free_xenheap_page(t->active[i]);
1577 xfree(t->active);
1578 no_mem_1:
1579 xfree(t);
1580 no_mem_0:
1581 return -ENOMEM;
1584 void
1585 gnttab_release_mappings(
1586 struct domain *d)
1588 struct grant_table *gt = d->grant_table;
1589 struct grant_mapping *map;
1590 grant_ref_t ref;
1591 grant_handle_t handle;
1592 struct domain *rd;
1593 struct active_grant_entry *act;
1594 struct grant_entry *sha;
1595 int rc;
1597 BUG_ON(!d->is_dying);
1599 for ( handle = 0; handle < gt->maptrack_limit; handle++ )
1601 map = &maptrack_entry(gt, handle);
1602 if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
1603 continue;
1605 ref = map->ref;
1607 gdprintk(XENLOG_INFO, "Grant release (%hu) ref:(%hu) "
1608 "flags:(%x) dom:(%hu)\n",
1609 handle, ref, map->flags, map->domid);
1611 rd = rcu_lock_domain_by_id(map->domid);
1612 if ( rd == NULL )
1614 /* Nothing to clear up... */
1615 map->flags = 0;
1616 continue;
1619 spin_lock(&rd->grant_table->lock);
1621 act = &active_entry(rd->grant_table, ref);
1622 sha = &shared_entry(rd->grant_table, ref);
1624 if ( map->flags & GNTMAP_readonly )
1626 if ( map->flags & GNTMAP_device_map )
1628 BUG_ON(!(act->pin & GNTPIN_devr_mask));
1629 act->pin -= GNTPIN_devr_inc;
1630 put_page(mfn_to_page(act->frame));
1633 if ( map->flags & GNTMAP_host_map )
1635 BUG_ON(!(act->pin & GNTPIN_hstr_mask));
1636 act->pin -= GNTPIN_hstr_inc;
1637 gnttab_release_put_page(mfn_to_page(act->frame));
1640 else
1642 if ( map->flags & GNTMAP_device_map )
1644 BUG_ON(!(act->pin & GNTPIN_devw_mask));
1645 act->pin -= GNTPIN_devw_inc;
1646 put_page_and_type(mfn_to_page(act->frame));
1649 if ( map->flags & GNTMAP_host_map )
1651 BUG_ON(!(act->pin & GNTPIN_hstw_mask));
1652 act->pin -= GNTPIN_hstw_inc;
1654 if ( is_iomem_page(act->frame) &&
1655 iomem_access_permitted(rd, act->frame, act->frame) )
1656 rc = iomem_deny_access(rd, act->frame, act->frame);
1657 else
1658 gnttab_release_put_page_and_type(mfn_to_page(act->frame));
1661 if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
1662 gnttab_clear_flag(_GTF_writing, &sha->flags);
1665 if ( act->pin == 0 )
1666 gnttab_clear_flag(_GTF_reading, &sha->flags);
1668 spin_unlock(&rd->grant_table->lock);
1670 rcu_unlock_domain(rd);
1672 map->flags = 0;
1677 void
1678 grant_table_destroy(
1679 struct domain *d)
1681 struct grant_table *t = d->grant_table;
1682 int i;
1684 if ( t == NULL )
1685 return;
1687 for ( i = 0; i < nr_grant_frames(t); i++ )
1688 free_xenheap_page(t->shared[i]);
1689 xfree(t->shared);
1691 for ( i = 0; i < nr_maptrack_frames(t); i++ )
1692 free_xenheap_page(t->maptrack[i]);
1693 xfree(t->maptrack);
1695 for ( i = 0; i < nr_active_grant_frames(t); i++ )
1696 free_xenheap_page(t->active[i]);
1697 xfree(t->active);
1699 xfree(t);
1700 d->grant_table = NULL;
1703 /*
1704 * Local variables:
1705 * mode: C
1706 * c-set-style: "BSD"
1707 * c-basic-offset: 4
1708 * tab-width: 4
1709 * indent-tabs-mode: nil
1710 * End:
1711 */