debuggers.hg

view xen/common/grant_table.c @ 16410:37be0bb60518

Revert 16067:9f9f9b68cd08a03fc8cfad9f5ab702e50b6b6463.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Tue Nov 13 17:28:44 2007 +0000 (2007-11-13)
parents 5b8730c78454
children 2e5d922b7ee3
line source
1 /******************************************************************************
2 * common/grant_table.c
3 *
4 * Mechanism for granting foreign access to page frames, and receiving
5 * page-ownership transfers.
6 *
7 * Copyright (c) 2005-2006 Christopher Clark
8 * Copyright (c) 2004 K A Fraser
9 * Copyright (c) 2005 Andrew Warfield
10 * Modifications by Geoffrey Lefebvre are (c) Intel Research Cambridge
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
27 #include <xen/config.h>
28 #include <xen/iocap.h>
29 #include <xen/lib.h>
30 #include <xen/sched.h>
31 #include <xen/mm.h>
32 #include <xen/trace.h>
33 #include <xen/guest_access.h>
34 #include <xen/domain_page.h>
35 #include <xsm/xsm.h>
37 #ifndef max_nr_grant_frames
38 unsigned int max_nr_grant_frames = DEFAULT_MAX_NR_GRANT_FRAMES;
39 integer_param("gnttab_max_nr_frames", max_nr_grant_frames);
40 #endif
42 /* The maximum number of grant mappings is defined as a multiplier of the
43 * maximum number of grant table entries. This defines the multiplier used.
44 * Pretty arbitrary. [POLICY]
45 */
46 #define MAX_MAPTRACK_TO_GRANTS_RATIO 8
48 /*
49 * The first two members of a grant entry are updated as a combined pair.
50 * The following union allows that to happen in an endian-neutral fashion.
51 */
52 union grant_combo {
53 uint32_t word;
54 struct {
55 uint16_t flags;
56 domid_t domid;
57 } shorts;
58 };
60 /* Used to share code between unmap_grant_ref and unmap_and_replace. */
61 struct gnttab_unmap_common {
62 /* Input */
63 uint64_t host_addr;
64 uint64_t dev_bus_addr;
65 uint64_t new_addr;
66 grant_handle_t handle;
68 /* Return */
69 int16_t status;
71 /* Shared state beteen *_unmap and *_unmap_complete */
72 u16 flags;
73 unsigned long frame;
74 struct grant_mapping *map;
75 struct domain *rd;
76 };
78 /* Number of unmap operations that are done between each tlb flush */
79 #define GNTTAB_UNMAP_BATCH_SIZE 32
82 #define PIN_FAIL(_lbl, _rc, _f, _a...) \
83 do { \
84 gdprintk(XENLOG_WARNING, _f, ## _a ); \
85 rc = (_rc); \
86 goto _lbl; \
87 } while ( 0 )
89 #define MAPTRACK_PER_PAGE (PAGE_SIZE / sizeof(struct grant_mapping))
90 #define maptrack_entry(t, e) \
91 ((t)->maptrack[(e)/MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE])
93 static inline unsigned int
94 nr_maptrack_frames(struct grant_table *t)
95 {
96 return t->maptrack_limit / MAPTRACK_PER_PAGE;
97 }
99 static unsigned inline int max_nr_maptrack_frames(void)
100 {
101 return (max_nr_grant_frames * MAX_MAPTRACK_TO_GRANTS_RATIO);
102 }
105 #define SHGNT_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_t))
106 #define shared_entry(t, e) \
107 ((t)->shared[(e)/SHGNT_PER_PAGE][(e)%SHGNT_PER_PAGE])
108 #define ACGNT_PER_PAGE (PAGE_SIZE / sizeof(struct active_grant_entry))
109 #define active_entry(t, e) \
110 ((t)->active[(e)/ACGNT_PER_PAGE][(e)%ACGNT_PER_PAGE])
112 static inline int
113 __get_maptrack_handle(
114 struct grant_table *t)
115 {
116 unsigned int h;
117 if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) )
118 return -1;
119 t->maptrack_head = maptrack_entry(t, h).ref;
120 t->map_count++;
121 return h;
122 }
124 static inline void
125 put_maptrack_handle(
126 struct grant_table *t, int handle)
127 {
128 maptrack_entry(t, handle).ref = t->maptrack_head;
129 t->maptrack_head = handle;
130 t->map_count--;
131 }
133 static inline int
134 get_maptrack_handle(
135 struct grant_table *lgt)
136 {
137 int i;
138 grant_handle_t handle;
139 struct grant_mapping *new_mt;
140 unsigned int new_mt_limit, nr_frames;
142 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
143 {
144 spin_lock(&lgt->lock);
146 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
147 {
148 nr_frames = nr_maptrack_frames(lgt);
149 if ( nr_frames >= max_nr_maptrack_frames() )
150 {
151 spin_unlock(&lgt->lock);
152 return -1;
153 }
155 new_mt = alloc_xenheap_page();
156 if ( new_mt == NULL )
157 {
158 spin_unlock(&lgt->lock);
159 return -1;
160 }
162 clear_page(new_mt);
164 new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE;
166 for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
167 {
168 new_mt[i % MAPTRACK_PER_PAGE].ref = i+1;
169 new_mt[i % MAPTRACK_PER_PAGE].flags = 0;
170 }
172 lgt->maptrack[nr_frames] = new_mt;
173 lgt->maptrack_limit = new_mt_limit;
175 gdprintk(XENLOG_INFO,
176 "Increased maptrack size to %u frames.\n", nr_frames + 1);
177 handle = __get_maptrack_handle(lgt);
178 }
180 spin_unlock(&lgt->lock);
181 }
182 return handle;
183 }
185 /*
186 * Returns 0 if TLB flush / invalidate required by caller.
187 * va will indicate the address to be invalidated.
188 *
189 * addr is _either_ a host virtual address, or the address of the pte to
190 * update, as indicated by the GNTMAP_contains_pte flag.
191 */
192 static void
193 __gnttab_map_grant_ref(
194 struct gnttab_map_grant_ref *op)
195 {
196 struct domain *ld, *rd;
197 struct vcpu *led;
198 int handle;
199 unsigned long frame = 0;
200 int rc = GNTST_okay;
201 struct active_grant_entry *act;
202 struct grant_mapping *mt;
203 grant_entry_t *sha;
204 union grant_combo scombo, prev_scombo, new_scombo;
206 /*
207 * We bound the number of times we retry CMPXCHG on memory locations that
208 * we share with a guest OS. The reason is that the guest can modify that
209 * location at a higher rate than we can read-modify-CMPXCHG, so the guest
210 * could cause us to livelock. There are a few cases where it is valid for
211 * the guest to race our updates (e.g., to change the GTF_readonly flag),
212 * so we allow a few retries before failing.
213 */
214 int retries = 0;
216 led = current;
217 ld = led->domain;
219 if ( unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) )
220 {
221 gdprintk(XENLOG_INFO, "Bad flags in grant map op (%x).\n", op->flags);
222 op->status = GNTST_bad_gntref;
223 return;
224 }
226 if ( unlikely((rd = rcu_lock_domain_by_id(op->dom)) == NULL) )
227 {
228 gdprintk(XENLOG_INFO, "Could not find domain %d\n", op->dom);
229 op->status = GNTST_bad_domain;
230 return;
231 }
233 rc = xsm_grant_mapref(ld, rd, op->flags);
234 if ( rc )
235 {
236 rcu_unlock_domain(rd);
237 op->status = GNTST_permission_denied;
238 return;
239 }
241 if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) )
242 {
243 rcu_unlock_domain(rd);
244 gdprintk(XENLOG_INFO, "Failed to obtain maptrack handle.\n");
245 op->status = GNTST_no_device_space;
246 return;
247 }
249 spin_lock(&rd->grant_table->lock);
251 /* Bounds check on the grant ref */
252 if ( unlikely(op->ref >= nr_grant_entries(rd->grant_table)))
253 PIN_FAIL(unlock_out, GNTST_bad_gntref, "Bad ref (%d).\n", op->ref);
255 act = &active_entry(rd->grant_table, op->ref);
256 sha = &shared_entry(rd->grant_table, op->ref);
258 /* If already pinned, check the active domid and avoid refcnt overflow. */
259 if ( act->pin &&
260 ((act->domid != ld->domain_id) ||
261 (act->pin & 0x80808080U) != 0) )
262 PIN_FAIL(unlock_out, GNTST_general_error,
263 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
264 act->domid, ld->domain_id, act->pin);
266 if ( !act->pin ||
267 (!(op->flags & GNTMAP_readonly) &&
268 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask))) )
269 {
270 scombo.word = *(u32 *)&sha->flags;
272 /*
273 * This loop attempts to set the access (reading/writing) flags
274 * in the grant table entry. It tries a cmpxchg on the field
275 * up to five times, and then fails under the assumption that
276 * the guest is misbehaving.
277 */
278 for ( ; ; )
279 {
280 /* If not already pinned, check the grant domid and type. */
281 if ( !act->pin &&
282 (((scombo.shorts.flags & GTF_type_mask) !=
283 GTF_permit_access) ||
284 (scombo.shorts.domid != ld->domain_id)) )
285 PIN_FAIL(unlock_out, GNTST_general_error,
286 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
287 scombo.shorts.flags, scombo.shorts.domid,
288 ld->domain_id);
290 new_scombo = scombo;
291 new_scombo.shorts.flags |= GTF_reading;
293 if ( !(op->flags & GNTMAP_readonly) )
294 {
295 new_scombo.shorts.flags |= GTF_writing;
296 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
297 PIN_FAIL(unlock_out, GNTST_general_error,
298 "Attempt to write-pin a r/o grant entry.\n");
299 }
301 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
302 scombo.word, new_scombo.word);
303 if ( likely(prev_scombo.word == scombo.word) )
304 break;
306 if ( retries++ == 4 )
307 PIN_FAIL(unlock_out, GNTST_general_error,
308 "Shared grant entry is unstable.\n");
310 scombo = prev_scombo;
311 }
313 if ( !act->pin )
314 {
315 act->domid = scombo.shorts.domid;
316 act->frame = gmfn_to_mfn(rd, sha->frame);
317 }
318 }
320 if ( op->flags & GNTMAP_device_map )
321 act->pin += (op->flags & GNTMAP_readonly) ?
322 GNTPIN_devr_inc : GNTPIN_devw_inc;
323 if ( op->flags & GNTMAP_host_map )
324 act->pin += (op->flags & GNTMAP_readonly) ?
325 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
327 frame = act->frame;
329 spin_unlock(&rd->grant_table->lock);
331 if ( unlikely(!mfn_valid(frame)) ||
332 unlikely(!((op->flags & GNTMAP_readonly) ?
333 get_page(mfn_to_page(frame), rd) :
334 get_page_and_type(mfn_to_page(frame), rd,
335 PGT_writable_page))) )
336 {
337 if ( !rd->is_dying )
338 gdprintk(XENLOG_WARNING, "Could not pin grant frame %lx\n", frame);
339 rc = GNTST_general_error;
340 goto undo_out;
341 }
343 if ( op->flags & GNTMAP_host_map )
344 {
345 rc = create_grant_host_mapping(op->host_addr, frame, op->flags);
346 if ( rc != GNTST_okay )
347 {
348 if ( !(op->flags & GNTMAP_readonly) )
349 put_page_type(mfn_to_page(frame));
350 put_page(mfn_to_page(frame));
351 goto undo_out;
352 }
354 if ( op->flags & GNTMAP_device_map )
355 {
356 (void)get_page(mfn_to_page(frame), rd);
357 if ( !(op->flags & GNTMAP_readonly) )
358 get_page_type(mfn_to_page(frame), PGT_writable_page);
359 }
360 }
362 TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
364 mt = &maptrack_entry(ld->grant_table, handle);
365 mt->domid = op->dom;
366 mt->ref = op->ref;
367 mt->flags = op->flags;
369 op->dev_bus_addr = (u64)frame << PAGE_SHIFT;
370 op->handle = handle;
371 op->status = GNTST_okay;
373 rcu_unlock_domain(rd);
374 return;
376 undo_out:
377 spin_lock(&rd->grant_table->lock);
379 act = &active_entry(rd->grant_table, op->ref);
380 sha = &shared_entry(rd->grant_table, op->ref);
382 if ( op->flags & GNTMAP_device_map )
383 act->pin -= (op->flags & GNTMAP_readonly) ?
384 GNTPIN_devr_inc : GNTPIN_devw_inc;
385 if ( op->flags & GNTMAP_host_map )
386 act->pin -= (op->flags & GNTMAP_readonly) ?
387 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
389 if ( !(op->flags & GNTMAP_readonly) &&
390 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
391 gnttab_clear_flag(_GTF_writing, &sha->flags);
393 if ( !act->pin )
394 gnttab_clear_flag(_GTF_reading, &sha->flags);
396 unlock_out:
397 spin_unlock(&rd->grant_table->lock);
398 op->status = rc;
399 put_maptrack_handle(ld->grant_table, handle);
400 rcu_unlock_domain(rd);
401 }
403 static long
404 gnttab_map_grant_ref(
405 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) uop, unsigned int count)
406 {
407 int i;
408 struct gnttab_map_grant_ref op;
410 for ( i = 0; i < count; i++ )
411 {
412 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
413 return -EFAULT;
414 __gnttab_map_grant_ref(&op);
415 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
416 return -EFAULT;
417 }
419 return 0;
420 }
422 static void
423 __gnttab_unmap_common(
424 struct gnttab_unmap_common *op)
425 {
426 domid_t dom;
427 struct domain *ld, *rd;
428 struct active_grant_entry *act;
429 grant_entry_t *sha;
430 s16 rc = 0;
432 ld = current->domain;
434 op->frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT);
436 if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) )
437 {
438 gdprintk(XENLOG_INFO, "Bad handle (%d).\n", op->handle);
439 op->status = GNTST_bad_handle;
440 return;
441 }
443 op->map = &maptrack_entry(ld->grant_table, op->handle);
445 if ( unlikely(!op->map->flags) )
446 {
447 gdprintk(XENLOG_INFO, "Zero flags for handle (%d).\n", op->handle);
448 op->status = GNTST_bad_handle;
449 return;
450 }
452 dom = op->map->domid;
453 op->flags = op->map->flags;
455 if ( unlikely((op->rd = rd = rcu_lock_domain_by_id(dom)) == NULL) )
456 {
457 /* This can happen when a grant is implicitly unmapped. */
458 gdprintk(XENLOG_INFO, "Could not find domain %d\n", dom);
459 domain_crash(ld); /* naughty... */
460 return;
461 }
463 rc = xsm_grant_unmapref(ld, rd);
464 if ( rc )
465 {
466 rcu_unlock_domain(rd);
467 op->status = GNTST_permission_denied;
468 return;
469 }
471 TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom);
473 spin_lock(&rd->grant_table->lock);
475 act = &active_entry(rd->grant_table, op->map->ref);
476 sha = &shared_entry(rd->grant_table, op->map->ref);
478 if ( op->frame == 0 )
479 {
480 op->frame = act->frame;
481 }
482 else
483 {
484 if ( unlikely(op->frame != act->frame) )
485 PIN_FAIL(unmap_out, GNTST_general_error,
486 "Bad frame number doesn't match gntref. (%lx != %lx)\n",
487 op->frame, act->frame);
488 if ( op->flags & GNTMAP_device_map )
489 {
490 ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
491 op->map->flags &= ~GNTMAP_device_map;
492 if ( op->flags & GNTMAP_readonly )
493 act->pin -= GNTPIN_devr_inc;
494 else
495 act->pin -= GNTPIN_devw_inc;
496 }
497 }
499 if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
500 {
501 if ( (rc = replace_grant_host_mapping(op->host_addr,
502 op->frame, op->new_addr,
503 op->flags)) < 0 )
504 goto unmap_out;
506 ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
507 op->map->flags &= ~GNTMAP_host_map;
508 if ( op->flags & GNTMAP_readonly )
509 act->pin -= GNTPIN_hstr_inc;
510 else
511 act->pin -= GNTPIN_hstw_inc;
512 }
514 /* If just unmapped a writable mapping, mark as dirtied */
515 if ( !(op->flags & GNTMAP_readonly) )
516 gnttab_mark_dirty(rd, op->frame);
518 unmap_out:
519 op->status = rc;
520 spin_unlock(&rd->grant_table->lock);
521 rcu_unlock_domain(rd);
522 }
524 static void
525 __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
526 {
527 struct domain *ld, *rd;
528 struct active_grant_entry *act;
529 grant_entry_t *sha;
531 rd = op->rd;
533 if ( rd == NULL )
534 {
535 /*
536 * Suggests that __gntab_unmap_common failed in
537 * rcu_lock_domain_by_id() or earlier, and so we have nothing
538 * to complete
539 */
540 return;
541 }
543 ld = current->domain;
545 rcu_lock_domain(rd);
546 spin_lock(&rd->grant_table->lock);
548 act = &active_entry(rd->grant_table, op->map->ref);
549 sha = &shared_entry(rd->grant_table, op->map->ref);
551 if ( unlikely(op->frame != act->frame) )
552 {
553 /*
554 * Suggests that __gntab_unmap_common failed early and so
555 * nothing further to do
556 */
557 goto unmap_out;
558 }
560 if ( op->flags & GNTMAP_device_map )
561 {
562 if ( op->flags & GNTMAP_readonly )
563 put_page(mfn_to_page(op->frame));
564 else
565 put_page_and_type(mfn_to_page(op->frame));
566 }
568 if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
569 {
570 if ( op->status != 0 )
571 {
572 /*
573 * Suggests that __gntab_unmap_common failed in
574 * replace_grant_host_mapping() so nothing further to do
575 */
576 goto unmap_out;
577 }
579 if ( op->flags & GNTMAP_readonly )
580 put_page(mfn_to_page(op->frame));
581 else
582 put_page_and_type(mfn_to_page(op->frame));
583 }
585 if ( (op->map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
586 {
587 op->map->flags = 0;
588 put_maptrack_handle(ld->grant_table, op->handle);
589 }
591 if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
592 !(op->flags & GNTMAP_readonly) )
593 gnttab_clear_flag(_GTF_writing, &sha->flags);
595 if ( act->pin == 0 )
596 gnttab_clear_flag(_GTF_reading, &sha->flags);
598 unmap_out:
599 spin_unlock(&rd->grant_table->lock);
600 rcu_unlock_domain(rd);
601 }
603 static void
604 __gnttab_unmap_grant_ref(
605 struct gnttab_unmap_grant_ref *op,
606 struct gnttab_unmap_common *common)
607 {
608 common->host_addr = op->host_addr;
609 common->dev_bus_addr = op->dev_bus_addr;
610 common->handle = op->handle;
612 /* Intialise these in case common contains old state */
613 common->new_addr = 0;
614 common->rd = NULL;
616 __gnttab_unmap_common(common);
617 op->status = common->status;
618 }
621 static long
622 gnttab_unmap_grant_ref(
623 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) uop, unsigned int count)
624 {
625 int i, c, partial_done, done = 0;
626 struct gnttab_unmap_grant_ref op;
627 struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
629 while ( count != 0 )
630 {
631 c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
632 partial_done = 0;
634 for ( i = 0; i < c; i++ )
635 {
636 if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
637 goto fault;
638 __gnttab_unmap_grant_ref(&op, &(common[i]));
639 ++partial_done;
640 if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
641 goto fault;
642 }
644 flush_tlb_mask(current->domain->domain_dirty_cpumask);
646 for ( i = 0; i < partial_done; i++ )
647 __gnttab_unmap_common_complete(&(common[i]));
649 count -= c;
650 done += c;
651 }
653 return 0;
655 fault:
656 flush_tlb_mask(current->domain->domain_dirty_cpumask);
658 for ( i = 0; i < partial_done; i++ )
659 __gnttab_unmap_common_complete(&(common[i]));
660 return -EFAULT;
661 }
663 static void
664 __gnttab_unmap_and_replace(
665 struct gnttab_unmap_and_replace *op,
666 struct gnttab_unmap_common *common)
667 {
668 common->host_addr = op->host_addr;
669 common->new_addr = op->new_addr;
670 common->handle = op->handle;
672 /* Intialise these in case common contains old state */
673 common->dev_bus_addr = 0;
674 common->rd = NULL;
676 __gnttab_unmap_common(common);
677 op->status = common->status;
678 }
680 static long
681 gnttab_unmap_and_replace(
682 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) uop, unsigned int count)
683 {
684 int i, c, partial_done, done = 0;
685 struct gnttab_unmap_and_replace op;
686 struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
688 while ( count != 0 )
689 {
690 c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
691 partial_done = 0;
693 for ( i = 0; i < c; i++ )
694 {
695 if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
696 goto fault;
697 __gnttab_unmap_and_replace(&op, &(common[i]));
698 ++partial_done;
699 if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
700 goto fault;
701 }
703 flush_tlb_mask(current->domain->domain_dirty_cpumask);
705 for ( i = 0; i < partial_done; i++ )
706 __gnttab_unmap_common_complete(&(common[i]));
708 count -= c;
709 done += c;
710 }
712 return 0;
714 fault:
715 flush_tlb_mask(current->domain->domain_dirty_cpumask);
717 for ( i = 0; i < partial_done; i++ )
718 __gnttab_unmap_common_complete(&(common[i]));
719 return -EFAULT;
720 }
722 int
723 gnttab_grow_table(struct domain *d, unsigned int req_nr_frames)
724 {
725 /* d's grant table lock must be held by the caller */
727 struct grant_table *gt = d->grant_table;
728 unsigned int i;
730 ASSERT(req_nr_frames <= max_nr_grant_frames);
732 gdprintk(XENLOG_INFO,
733 "Expanding dom (%d) grant table from (%d) to (%d) frames.\n",
734 d->domain_id, nr_grant_frames(gt), req_nr_frames);
736 /* Active */
737 for ( i = nr_active_grant_frames(gt);
738 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
739 {
740 if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
741 goto active_alloc_failed;
742 clear_page(gt->active[i]);
743 }
745 /* Shared */
746 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
747 {
748 if ( (gt->shared[i] = alloc_xenheap_page()) == NULL )
749 goto shared_alloc_failed;
750 clear_page(gt->shared[i]);
751 }
753 /* Share the new shared frames with the recipient domain */
754 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
755 gnttab_create_shared_page(d, gt, i);
757 gt->nr_grant_frames = req_nr_frames;
759 return 1;
761 shared_alloc_failed:
762 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
763 {
764 free_xenheap_page(gt->shared[i]);
765 gt->shared[i] = NULL;
766 }
767 active_alloc_failed:
768 for ( i = nr_active_grant_frames(gt);
769 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
770 {
771 free_xenheap_page(gt->active[i]);
772 gt->active[i] = NULL;
773 }
774 gdprintk(XENLOG_INFO, "Allocation failure when expanding grant table.\n");
775 return 0;
776 }
778 static long
779 gnttab_setup_table(
780 XEN_GUEST_HANDLE(gnttab_setup_table_t) uop, unsigned int count)
781 {
782 struct gnttab_setup_table op;
783 struct domain *d;
784 int i;
785 unsigned long gmfn;
786 domid_t dom;
788 if ( count != 1 )
789 return -EINVAL;
791 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
792 {
793 gdprintk(XENLOG_INFO, "Fault while reading gnttab_setup_table_t.\n");
794 return -EFAULT;
795 }
797 if ( unlikely(op.nr_frames > max_nr_grant_frames) )
798 {
799 gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table frames"
800 " per domain.\n",
801 max_nr_grant_frames);
802 op.status = GNTST_general_error;
803 goto out;
804 }
806 dom = op.dom;
807 if ( dom == DOMID_SELF )
808 {
809 dom = current->domain->domain_id;
810 }
811 else if ( unlikely(!IS_PRIV(current->domain)) )
812 {
813 op.status = GNTST_permission_denied;
814 goto out;
815 }
817 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
818 {
819 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
820 op.status = GNTST_bad_domain;
821 goto out;
822 }
824 if ( xsm_grant_setup(current->domain, d) )
825 {
826 rcu_unlock_domain(d);
827 op.status = GNTST_permission_denied;
828 goto out;
829 }
831 spin_lock(&d->grant_table->lock);
833 if ( (op.nr_frames > nr_grant_frames(d->grant_table)) &&
834 !gnttab_grow_table(d, op.nr_frames) )
835 {
836 gdprintk(XENLOG_INFO,
837 "Expand grant table to %d failed. Current: %d Max: %d.\n",
838 op.nr_frames,
839 nr_grant_frames(d->grant_table),
840 max_nr_grant_frames);
841 op.status = GNTST_general_error;
842 goto setup_unlock_out;
843 }
845 op.status = GNTST_okay;
846 for ( i = 0; i < op.nr_frames; i++ )
847 {
848 gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
849 (void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
850 }
852 setup_unlock_out:
853 spin_unlock(&d->grant_table->lock);
855 rcu_unlock_domain(d);
857 out:
858 if ( unlikely(copy_to_guest(uop, &op, 1)) )
859 return -EFAULT;
861 return 0;
862 }
864 static long
865 gnttab_query_size(
866 XEN_GUEST_HANDLE(gnttab_query_size_t) uop, unsigned int count)
867 {
868 struct gnttab_query_size op;
869 struct domain *d;
870 domid_t dom;
871 int rc;
873 if ( count != 1 )
874 return -EINVAL;
876 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
877 {
878 gdprintk(XENLOG_INFO, "Fault while reading gnttab_query_size_t.\n");
879 return -EFAULT;
880 }
882 dom = op.dom;
883 if ( dom == DOMID_SELF )
884 {
885 dom = current->domain->domain_id;
886 }
887 else if ( unlikely(!IS_PRIV(current->domain)) )
888 {
889 op.status = GNTST_permission_denied;
890 goto query_out;
891 }
893 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
894 {
895 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
896 op.status = GNTST_bad_domain;
897 goto query_out;
898 }
900 rc = xsm_grant_query_size(current->domain, d);
901 if ( rc )
902 {
903 rcu_unlock_domain(d);
904 op.status = GNTST_permission_denied;
905 goto query_out;
906 }
908 spin_lock(&d->grant_table->lock);
910 op.nr_frames = nr_grant_frames(d->grant_table);
911 op.max_nr_frames = max_nr_grant_frames;
912 op.status = GNTST_okay;
914 spin_unlock(&d->grant_table->lock);
916 rcu_unlock_domain(d);
918 query_out:
919 if ( unlikely(copy_to_guest(uop, &op, 1)) )
920 return -EFAULT;
922 return 0;
923 }
925 /*
926 * Check that the given grant reference (rd,ref) allows 'ld' to transfer
927 * ownership of a page frame. If so, lock down the grant entry.
928 */
929 static int
930 gnttab_prepare_for_transfer(
931 struct domain *rd, struct domain *ld, grant_ref_t ref)
932 {
933 struct grant_table *rgt;
934 struct grant_entry *sha;
935 union grant_combo scombo, prev_scombo, new_scombo;
936 int retries = 0;
938 if ( unlikely((rgt = rd->grant_table) == NULL) )
939 {
940 gdprintk(XENLOG_INFO, "Dom %d has no grant table.\n", rd->domain_id);
941 return 0;
942 }
944 spin_lock(&rgt->lock);
946 if ( unlikely(ref >= nr_grant_entries(rd->grant_table)) )
947 {
948 gdprintk(XENLOG_INFO,
949 "Bad grant reference (%d) for transfer to domain(%d).\n",
950 ref, rd->domain_id);
951 goto fail;
952 }
954 sha = &shared_entry(rgt, ref);
956 scombo.word = *(u32 *)&sha->flags;
958 for ( ; ; )
959 {
960 if ( unlikely(scombo.shorts.flags != GTF_accept_transfer) ||
961 unlikely(scombo.shorts.domid != ld->domain_id) )
962 {
963 gdprintk(XENLOG_INFO, "Bad flags (%x) or dom (%d). "
964 "(NB. expected dom %d)\n",
965 scombo.shorts.flags, scombo.shorts.domid,
966 ld->domain_id);
967 goto fail;
968 }
970 new_scombo = scombo;
971 new_scombo.shorts.flags |= GTF_transfer_committed;
973 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
974 scombo.word, new_scombo.word);
975 if ( likely(prev_scombo.word == scombo.word) )
976 break;
978 if ( retries++ == 4 )
979 {
980 gdprintk(XENLOG_WARNING, "Shared grant entry is unstable.\n");
981 goto fail;
982 }
984 scombo = prev_scombo;
985 }
987 spin_unlock(&rgt->lock);
988 return 1;
990 fail:
991 spin_unlock(&rgt->lock);
992 return 0;
993 }
995 static long
996 gnttab_transfer(
997 XEN_GUEST_HANDLE(gnttab_transfer_t) uop, unsigned int count)
998 {
999 struct domain *d = current->domain;
1000 struct domain *e;
1001 struct page_info *page;
1002 int i;
1003 grant_entry_t *sha;
1004 struct gnttab_transfer gop;
1005 unsigned long mfn;
1007 for ( i = 0; i < count; i++ )
1009 /* Read from caller address space. */
1010 if ( unlikely(__copy_from_guest_offset(&gop, uop, i, 1)) )
1012 gdprintk(XENLOG_INFO, "gnttab_transfer: error reading req %d/%d\n",
1013 i, count);
1014 return -EFAULT;
1017 mfn = gmfn_to_mfn(d, gop.mfn);
1019 /* Check the passed page frame for basic validity. */
1020 if ( unlikely(!mfn_valid(mfn)) )
1022 gdprintk(XENLOG_INFO, "gnttab_transfer: out-of-range %lx\n",
1023 (unsigned long)gop.mfn);
1024 gop.status = GNTST_bad_page;
1025 goto copyback;
1028 page = mfn_to_page(mfn);
1029 if ( unlikely(is_xen_heap_page(page)) )
1031 gdprintk(XENLOG_INFO, "gnttab_transfer: xen frame %lx\n",
1032 (unsigned long)gop.mfn);
1033 gop.status = GNTST_bad_page;
1034 goto copyback;
1037 if ( steal_page(d, page, 0) < 0 )
1039 gop.status = GNTST_bad_page;
1040 goto copyback;
1043 /* Find the target domain. */
1044 if ( unlikely((e = rcu_lock_domain_by_id(gop.domid)) == NULL) )
1046 gdprintk(XENLOG_INFO, "gnttab_transfer: can't find domain %d\n",
1047 gop.domid);
1048 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1049 free_domheap_page(page);
1050 gop.status = GNTST_bad_domain;
1051 goto copyback;
1054 if ( xsm_grant_transfer(d, e) )
1056 rcu_unlock_domain(e);
1057 gop.status = GNTST_permission_denied;
1058 goto copyback;
1061 spin_lock(&e->page_alloc_lock);
1063 /*
1064 * Check that 'e' will accept the page and has reservation
1065 * headroom. Also, a domain mustn't have PGC_allocated
1066 * pages when it is dying.
1067 */
1068 if ( unlikely(e->is_dying) ||
1069 unlikely(e->tot_pages >= e->max_pages) ||
1070 unlikely(!gnttab_prepare_for_transfer(e, d, gop.ref)) )
1072 if ( !e->is_dying )
1073 gdprintk(XENLOG_INFO, "gnttab_transfer: "
1074 "Transferee has no reservation "
1075 "headroom (%d,%d) or provided a bad grant ref (%08x) "
1076 "or is dying (%d)\n",
1077 e->tot_pages, e->max_pages, gop.ref, e->is_dying);
1078 spin_unlock(&e->page_alloc_lock);
1079 rcu_unlock_domain(e);
1080 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1081 free_domheap_page(page);
1082 gop.status = GNTST_general_error;
1083 goto copyback;
1086 /* Okay, add the page to 'e'. */
1087 if ( unlikely(e->tot_pages++ == 0) )
1088 get_knownalive_domain(e);
1089 list_add_tail(&page->list, &e->page_list);
1090 page_set_owner(page, e);
1092 spin_unlock(&e->page_alloc_lock);
1094 TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
1096 /* Tell the guest about its new page frame. */
1097 spin_lock(&e->grant_table->lock);
1099 sha = &shared_entry(e->grant_table, gop.ref);
1100 guest_physmap_add_page(e, sha->frame, mfn);
1101 sha->frame = mfn;
1102 wmb();
1103 sha->flags |= GTF_transfer_completed;
1105 spin_unlock(&e->grant_table->lock);
1107 rcu_unlock_domain(e);
1109 gop.status = GNTST_okay;
1111 copyback:
1112 if ( unlikely(__copy_to_guest_offset(uop, i, &gop, 1)) )
1114 gdprintk(XENLOG_INFO, "gnttab_transfer: error writing resp "
1115 "%d/%d\n", i, count);
1116 return -EFAULT;
1120 return 0;
1123 /* Undo __acquire_grant_for_copy. Again, this has no effect on page
1124 type and reference counts. */
1125 static void
1126 __release_grant_for_copy(
1127 struct domain *rd, unsigned long gref, int readonly)
1129 grant_entry_t *sha;
1130 struct active_grant_entry *act;
1131 unsigned long r_frame;
1133 spin_lock(&rd->grant_table->lock);
1135 act = &active_entry(rd->grant_table, gref);
1136 sha = &shared_entry(rd->grant_table, gref);
1137 r_frame = act->frame;
1139 if ( readonly )
1141 act->pin -= GNTPIN_hstr_inc;
1143 else
1145 gnttab_mark_dirty(rd, r_frame);
1147 act->pin -= GNTPIN_hstw_inc;
1148 if ( !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) )
1149 gnttab_clear_flag(_GTF_writing, &sha->flags);
1152 if ( !act->pin )
1153 gnttab_clear_flag(_GTF_reading, &sha->flags);
1155 spin_unlock(&rd->grant_table->lock);
1158 /* Grab a frame number from a grant entry and update the flags and pin
1159 count as appropriate. Note that this does *not* update the page
1160 type or reference counts, and does not check that the mfn is
1161 actually valid. */
1162 static int
1163 __acquire_grant_for_copy(
1164 struct domain *rd, unsigned long gref, int readonly,
1165 unsigned long *frame)
1167 grant_entry_t *sha;
1168 struct active_grant_entry *act;
1169 s16 rc = GNTST_okay;
1170 int retries = 0;
1171 union grant_combo scombo, prev_scombo, new_scombo;
1173 spin_lock(&rd->grant_table->lock);
1175 if ( unlikely(gref >= nr_grant_entries(rd->grant_table)) )
1176 PIN_FAIL(unlock_out, GNTST_bad_gntref,
1177 "Bad grant reference %ld\n", gref);
1179 act = &active_entry(rd->grant_table, gref);
1180 sha = &shared_entry(rd->grant_table, gref);
1182 /* If already pinned, check the active domid and avoid refcnt overflow. */
1183 if ( act->pin &&
1184 ((act->domid != current->domain->domain_id) ||
1185 (act->pin & 0x80808080U) != 0) )
1186 PIN_FAIL(unlock_out, GNTST_general_error,
1187 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
1188 act->domid, current->domain->domain_id, act->pin);
1190 if ( !act->pin ||
1191 (!readonly && !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask))) )
1193 scombo.word = *(u32 *)&sha->flags;
1195 for ( ; ; )
1197 /* If not already pinned, check the grant domid and type. */
1198 if ( !act->pin &&
1199 (((scombo.shorts.flags & GTF_type_mask) !=
1200 GTF_permit_access) ||
1201 (scombo.shorts.domid != current->domain->domain_id)) )
1202 PIN_FAIL(unlock_out, GNTST_general_error,
1203 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
1204 scombo.shorts.flags, scombo.shorts.domid,
1205 current->domain->domain_id);
1207 new_scombo = scombo;
1208 new_scombo.shorts.flags |= GTF_reading;
1210 if ( !readonly )
1212 new_scombo.shorts.flags |= GTF_writing;
1213 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
1214 PIN_FAIL(unlock_out, GNTST_general_error,
1215 "Attempt to write-pin a r/o grant entry.\n");
1218 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
1219 scombo.word, new_scombo.word);
1220 if ( likely(prev_scombo.word == scombo.word) )
1221 break;
1223 if ( retries++ == 4 )
1224 PIN_FAIL(unlock_out, GNTST_general_error,
1225 "Shared grant entry is unstable.\n");
1227 scombo = prev_scombo;
1230 if ( !act->pin )
1232 act->domid = scombo.shorts.domid;
1233 act->frame = gmfn_to_mfn(rd, sha->frame);
1237 act->pin += readonly ? GNTPIN_hstr_inc : GNTPIN_hstw_inc;
1239 *frame = act->frame;
1241 unlock_out:
1242 spin_unlock(&rd->grant_table->lock);
1243 return rc;
1246 static void
1247 __gnttab_copy(
1248 struct gnttab_copy *op)
1250 struct domain *sd = NULL, *dd = NULL;
1251 unsigned long s_frame, d_frame;
1252 char *sp, *dp;
1253 s16 rc = GNTST_okay;
1254 int have_d_grant = 0, have_s_grant = 0, have_s_ref = 0;
1255 int src_is_gref, dest_is_gref;
1257 if ( ((op->source.offset + op->len) > PAGE_SIZE) ||
1258 ((op->dest.offset + op->len) > PAGE_SIZE) )
1259 PIN_FAIL(error_out, GNTST_bad_copy_arg, "copy beyond page area.\n");
1261 src_is_gref = op->flags & GNTCOPY_source_gref;
1262 dest_is_gref = op->flags & GNTCOPY_dest_gref;
1264 if ( (op->source.domid != DOMID_SELF && !src_is_gref ) ||
1265 (op->dest.domid != DOMID_SELF && !dest_is_gref) )
1266 PIN_FAIL(error_out, GNTST_permission_denied,
1267 "only allow copy-by-mfn for DOMID_SELF.\n");
1269 if ( op->source.domid == DOMID_SELF )
1270 sd = rcu_lock_current_domain();
1271 else if ( (sd = rcu_lock_domain_by_id(op->source.domid)) == NULL )
1272 PIN_FAIL(error_out, GNTST_bad_domain,
1273 "couldn't find %d\n", op->source.domid);
1275 if ( op->dest.domid == DOMID_SELF )
1276 dd = rcu_lock_current_domain();
1277 else if ( (dd = rcu_lock_domain_by_id(op->dest.domid)) == NULL )
1278 PIN_FAIL(error_out, GNTST_bad_domain,
1279 "couldn't find %d\n", op->dest.domid);
1281 rc = xsm_grant_copy(sd, dd);
1282 if ( rc )
1284 rc = GNTST_permission_denied;
1285 goto error_out;
1288 if ( src_is_gref )
1290 rc = __acquire_grant_for_copy(sd, op->source.u.ref, 1, &s_frame);
1291 if ( rc != GNTST_okay )
1292 goto error_out;
1293 have_s_grant = 1;
1295 else
1297 s_frame = gmfn_to_mfn(sd, op->source.u.gmfn);
1299 if ( unlikely(!mfn_valid(s_frame)) )
1300 PIN_FAIL(error_out, GNTST_general_error,
1301 "source frame %lx invalid.\n", s_frame);
1302 if ( !get_page(mfn_to_page(s_frame), sd) )
1304 if ( !sd->is_dying )
1305 gdprintk(XENLOG_WARNING, "Could not get src frame %lx\n", s_frame);
1306 rc = GNTST_general_error;
1307 goto error_out;
1309 have_s_ref = 1;
1311 if ( dest_is_gref )
1313 rc = __acquire_grant_for_copy(dd, op->dest.u.ref, 0, &d_frame);
1314 if ( rc != GNTST_okay )
1315 goto error_out;
1316 have_d_grant = 1;
1318 else
1320 d_frame = gmfn_to_mfn(dd, op->dest.u.gmfn);
1322 if ( unlikely(!mfn_valid(d_frame)) )
1323 PIN_FAIL(error_out, GNTST_general_error,
1324 "destination frame %lx invalid.\n", d_frame);
1325 if ( !get_page_and_type(mfn_to_page(d_frame), dd, PGT_writable_page) )
1327 if ( !dd->is_dying )
1328 gdprintk(XENLOG_WARNING, "Could not get dst frame %lx\n", d_frame);
1329 rc = GNTST_general_error;
1330 goto error_out;
1333 sp = map_domain_page(s_frame);
1334 dp = map_domain_page(d_frame);
1336 memcpy(dp + op->dest.offset, sp + op->source.offset, op->len);
1338 unmap_domain_page(dp);
1339 unmap_domain_page(sp);
1341 gnttab_mark_dirty(dd, d_frame);
1343 put_page_and_type(mfn_to_page(d_frame));
1344 error_out:
1345 if ( have_s_ref )
1346 put_page(mfn_to_page(s_frame));
1347 if ( have_s_grant )
1348 __release_grant_for_copy(sd, op->source.u.ref, 1);
1349 if ( have_d_grant )
1350 __release_grant_for_copy(dd, op->dest.u.ref, 0);
1351 if ( sd )
1352 rcu_unlock_domain(sd);
1353 if ( dd )
1354 rcu_unlock_domain(dd);
1355 op->status = rc;
1358 static long
1359 gnttab_copy(
1360 XEN_GUEST_HANDLE(gnttab_copy_t) uop, unsigned int count)
1362 int i;
1363 struct gnttab_copy op;
1365 for ( i = 0; i < count; i++ )
1367 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
1368 return -EFAULT;
1369 __gnttab_copy(&op);
1370 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
1371 return -EFAULT;
1373 return 0;
1376 long
1377 do_grant_table_op(
1378 unsigned int cmd, XEN_GUEST_HANDLE(void) uop, unsigned int count)
1380 long rc;
1381 struct domain *d = current->domain;
1383 if ( count > 512 )
1384 return -EINVAL;
1386 LOCK_BIGLOCK(d);
1388 rc = -EFAULT;
1389 switch ( cmd )
1391 case GNTTABOP_map_grant_ref:
1393 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) map =
1394 guest_handle_cast(uop, gnttab_map_grant_ref_t);
1395 if ( unlikely(!guest_handle_okay(map, count)) )
1396 goto out;
1397 rc = gnttab_map_grant_ref(map, count);
1398 break;
1400 case GNTTABOP_unmap_grant_ref:
1402 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) unmap =
1403 guest_handle_cast(uop, gnttab_unmap_grant_ref_t);
1404 if ( unlikely(!guest_handle_okay(unmap, count)) )
1405 goto out;
1406 rc = gnttab_unmap_grant_ref(unmap, count);
1407 break;
1409 case GNTTABOP_unmap_and_replace:
1411 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) unmap =
1412 guest_handle_cast(uop, gnttab_unmap_and_replace_t);
1413 if ( unlikely(!guest_handle_okay(unmap, count)) )
1414 goto out;
1415 rc = -ENOSYS;
1416 if ( unlikely(!replace_grant_supported()) )
1417 goto out;
1418 rc = gnttab_unmap_and_replace(unmap, count);
1419 break;
1421 case GNTTABOP_setup_table:
1423 rc = gnttab_setup_table(
1424 guest_handle_cast(uop, gnttab_setup_table_t), count);
1425 break;
1427 case GNTTABOP_transfer:
1429 XEN_GUEST_HANDLE(gnttab_transfer_t) transfer =
1430 guest_handle_cast(uop, gnttab_transfer_t);
1431 if ( unlikely(!guest_handle_okay(transfer, count)) )
1432 goto out;
1433 rc = gnttab_transfer(transfer, count);
1434 break;
1436 case GNTTABOP_copy:
1438 XEN_GUEST_HANDLE(gnttab_copy_t) copy =
1439 guest_handle_cast(uop, gnttab_copy_t);
1440 if ( unlikely(!guest_handle_okay(copy, count)) )
1441 goto out;
1442 rc = gnttab_copy(copy, count);
1443 break;
1445 case GNTTABOP_query_size:
1447 rc = gnttab_query_size(
1448 guest_handle_cast(uop, gnttab_query_size_t), count);
1449 break;
1451 default:
1452 rc = -ENOSYS;
1453 break;
1456 out:
1457 UNLOCK_BIGLOCK(d);
1459 return rc;
1462 #ifdef CONFIG_COMPAT
1463 #include "compat/grant_table.c"
1464 #endif
1466 static unsigned int max_nr_active_grant_frames(void)
1468 return (((max_nr_grant_frames * (PAGE_SIZE / sizeof(grant_entry_t))) +
1469 ((PAGE_SIZE / sizeof(struct active_grant_entry))-1))
1470 / (PAGE_SIZE / sizeof(struct active_grant_entry)));
1473 int
1474 grant_table_create(
1475 struct domain *d)
1477 struct grant_table *t;
1478 int i;
1480 /* If this sizeof assertion fails, fix the function: shared_index */
1481 ASSERT(sizeof(grant_entry_t) == 8);
1483 if ( (t = xmalloc(struct grant_table)) == NULL )
1484 goto no_mem_0;
1486 /* Simple stuff. */
1487 memset(t, 0, sizeof(*t));
1488 spin_lock_init(&t->lock);
1489 t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
1491 /* Active grant table. */
1492 if ( (t->active = xmalloc_array(struct active_grant_entry *,
1493 max_nr_active_grant_frames())) == NULL )
1494 goto no_mem_1;
1495 memset(t->active, 0, max_nr_active_grant_frames() * sizeof(t->active[0]));
1496 for ( i = 0;
1497 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1499 if ( (t->active[i] = alloc_xenheap_page()) == NULL )
1500 goto no_mem_2;
1501 clear_page(t->active[i]);
1504 /* Tracking of mapped foreign frames table */
1505 if ( (t->maptrack = xmalloc_array(struct grant_mapping *,
1506 max_nr_maptrack_frames())) == NULL )
1507 goto no_mem_2;
1508 memset(t->maptrack, 0, max_nr_maptrack_frames() * sizeof(t->maptrack[0]));
1509 if ( (t->maptrack[0] = alloc_xenheap_page()) == NULL )
1510 goto no_mem_3;
1511 clear_page(t->maptrack[0]);
1512 t->maptrack_limit = PAGE_SIZE / sizeof(struct grant_mapping);
1513 for ( i = 0; i < t->maptrack_limit; i++ )
1514 t->maptrack[0][i].ref = i+1;
1516 /* Shared grant table. */
1517 if ( (t->shared = xmalloc_array(struct grant_entry *,
1518 max_nr_grant_frames)) == NULL )
1519 goto no_mem_3;
1520 memset(t->shared, 0, max_nr_grant_frames * sizeof(t->shared[0]));
1521 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1523 if ( (t->shared[i] = alloc_xenheap_page()) == NULL )
1524 goto no_mem_4;
1525 clear_page(t->shared[i]);
1528 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1529 gnttab_create_shared_page(d, t, i);
1531 /* Okay, install the structure. */
1532 d->grant_table = t;
1533 return 0;
1535 no_mem_4:
1536 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1537 free_xenheap_page(t->shared[i]);
1538 xfree(t->shared);
1539 no_mem_3:
1540 free_xenheap_page(t->maptrack[0]);
1541 xfree(t->maptrack);
1542 no_mem_2:
1543 for ( i = 0;
1544 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1545 free_xenheap_page(t->active[i]);
1546 xfree(t->active);
1547 no_mem_1:
1548 xfree(t);
1549 no_mem_0:
1550 return -ENOMEM;
1553 void
1554 gnttab_release_mappings(
1555 struct domain *d)
1557 struct grant_table *gt = d->grant_table;
1558 struct grant_mapping *map;
1559 grant_ref_t ref;
1560 grant_handle_t handle;
1561 struct domain *rd;
1562 struct active_grant_entry *act;
1563 struct grant_entry *sha;
1565 BUG_ON(!d->is_dying);
1567 for ( handle = 0; handle < gt->maptrack_limit; handle++ )
1569 map = &maptrack_entry(gt, handle);
1570 if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
1571 continue;
1573 ref = map->ref;
1575 gdprintk(XENLOG_INFO, "Grant release (%hu) ref:(%hu) "
1576 "flags:(%x) dom:(%hu)\n",
1577 handle, ref, map->flags, map->domid);
1579 rd = rcu_lock_domain_by_id(map->domid);
1580 if ( rd == NULL )
1582 /* Nothing to clear up... */
1583 map->flags = 0;
1584 continue;
1587 spin_lock(&rd->grant_table->lock);
1589 act = &active_entry(rd->grant_table, ref);
1590 sha = &shared_entry(rd->grant_table, ref);
1592 if ( map->flags & GNTMAP_readonly )
1594 if ( map->flags & GNTMAP_device_map )
1596 BUG_ON(!(act->pin & GNTPIN_devr_mask));
1597 act->pin -= GNTPIN_devr_inc;
1598 put_page(mfn_to_page(act->frame));
1601 if ( map->flags & GNTMAP_host_map )
1603 BUG_ON(!(act->pin & GNTPIN_hstr_mask));
1604 act->pin -= GNTPIN_hstr_inc;
1605 gnttab_release_put_page(mfn_to_page(act->frame));
1608 else
1610 if ( map->flags & GNTMAP_device_map )
1612 BUG_ON(!(act->pin & GNTPIN_devw_mask));
1613 act->pin -= GNTPIN_devw_inc;
1614 put_page_and_type(mfn_to_page(act->frame));
1617 if ( map->flags & GNTMAP_host_map )
1619 BUG_ON(!(act->pin & GNTPIN_hstw_mask));
1620 act->pin -= GNTPIN_hstw_inc;
1621 gnttab_release_put_page_and_type(mfn_to_page(act->frame));
1624 if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
1625 gnttab_clear_flag(_GTF_writing, &sha->flags);
1628 if ( act->pin == 0 )
1629 gnttab_clear_flag(_GTF_reading, &sha->flags);
1631 spin_unlock(&rd->grant_table->lock);
1633 rcu_unlock_domain(rd);
1635 map->flags = 0;
1640 void
1641 grant_table_destroy(
1642 struct domain *d)
1644 struct grant_table *t = d->grant_table;
1645 int i;
1647 if ( t == NULL )
1648 return;
1650 for ( i = 0; i < nr_grant_frames(t); i++ )
1651 free_xenheap_page(t->shared[i]);
1652 xfree(t->shared);
1654 for ( i = 0; i < nr_maptrack_frames(t); i++ )
1655 free_xenheap_page(t->maptrack[i]);
1656 xfree(t->maptrack);
1658 for ( i = 0; i < nr_active_grant_frames(t); i++ )
1659 free_xenheap_page(t->active[i]);
1660 xfree(t->active);
1662 xfree(t);
1663 d->grant_table = NULL;
1666 /*
1667 * Local variables:
1668 * mode: C
1669 * c-set-style: "BSD"
1670 * c-basic-offset: 4
1671 * tab-width: 4
1672 * indent-tabs-mode: nil
1673 * End:
1674 */