debuggers.hg

view xen/common/grant_table.c @ 19969:09dbdf12c33d

Eliminate grant_table_op restriction

Eliminate the hard-coded, arbitrarily chosen limit of 512 grant table
ops a domain may submit at a time, and instead check for necessary
preemption after each individual element got processed, invoking the
hypercall continuation logic when necessary.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jul 13 12:18:04 2009 +0100 (2009-07-13)
parents 2e83c670f680
children c0cb307d927f
line source
1 /******************************************************************************
2 * common/grant_table.c
3 *
4 * Mechanism for granting foreign access to page frames, and receiving
5 * page-ownership transfers.
6 *
7 * Copyright (c) 2005-2006 Christopher Clark
8 * Copyright (c) 2004 K A Fraser
9 * Copyright (c) 2005 Andrew Warfield
10 * Modifications by Geoffrey Lefebvre are (c) Intel Research Cambridge
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
27 #include <xen/config.h>
28 #include <xen/iocap.h>
29 #include <xen/lib.h>
30 #include <xen/sched.h>
31 #include <xen/mm.h>
32 #include <xen/event.h>
33 #include <xen/trace.h>
34 #include <xen/guest_access.h>
35 #include <xen/domain_page.h>
36 #include <xen/iommu.h>
37 #include <xen/paging.h>
38 #include <xsm/xsm.h>
40 #ifndef max_nr_grant_frames
41 unsigned int max_nr_grant_frames = DEFAULT_MAX_NR_GRANT_FRAMES;
42 integer_param("gnttab_max_nr_frames", max_nr_grant_frames);
43 #endif
45 /* The maximum number of grant mappings is defined as a multiplier of the
46 * maximum number of grant table entries. This defines the multiplier used.
47 * Pretty arbitrary. [POLICY]
48 */
49 #define MAX_MAPTRACK_TO_GRANTS_RATIO 8
51 /*
52 * The first two members of a grant entry are updated as a combined pair.
53 * The following union allows that to happen in an endian-neutral fashion.
54 */
55 union grant_combo {
56 uint32_t word;
57 struct {
58 uint16_t flags;
59 domid_t domid;
60 } shorts;
61 };
63 /* Used to share code between unmap_grant_ref and unmap_and_replace. */
64 struct gnttab_unmap_common {
65 /* Input */
66 uint64_t host_addr;
67 uint64_t dev_bus_addr;
68 uint64_t new_addr;
69 grant_handle_t handle;
71 /* Return */
72 int16_t status;
74 /* Shared state beteen *_unmap and *_unmap_complete */
75 u16 flags;
76 unsigned long frame;
77 struct grant_mapping *map;
78 struct domain *rd;
79 };
81 /* Number of unmap operations that are done between each tlb flush */
82 #define GNTTAB_UNMAP_BATCH_SIZE 32
85 #define PIN_FAIL(_lbl, _rc, _f, _a...) \
86 do { \
87 gdprintk(XENLOG_WARNING, _f, ## _a ); \
88 rc = (_rc); \
89 goto _lbl; \
90 } while ( 0 )
92 #define MAPTRACK_PER_PAGE (PAGE_SIZE / sizeof(struct grant_mapping))
93 #define maptrack_entry(t, e) \
94 ((t)->maptrack[(e)/MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE])
96 static inline unsigned int
97 nr_maptrack_frames(struct grant_table *t)
98 {
99 return t->maptrack_limit / MAPTRACK_PER_PAGE;
100 }
102 static unsigned inline int max_nr_maptrack_frames(void)
103 {
104 return (max_nr_grant_frames * MAX_MAPTRACK_TO_GRANTS_RATIO);
105 }
108 #define SHGNT_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_t))
109 #define shared_entry(t, e) \
110 ((t)->shared[(e)/SHGNT_PER_PAGE][(e)%SHGNT_PER_PAGE])
111 #define ACGNT_PER_PAGE (PAGE_SIZE / sizeof(struct active_grant_entry))
112 #define active_entry(t, e) \
113 ((t)->active[(e)/ACGNT_PER_PAGE][(e)%ACGNT_PER_PAGE])
115 static inline int
116 __get_maptrack_handle(
117 struct grant_table *t)
118 {
119 unsigned int h;
120 if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) )
121 return -1;
122 t->maptrack_head = maptrack_entry(t, h).ref;
123 return h;
124 }
126 static inline void
127 put_maptrack_handle(
128 struct grant_table *t, int handle)
129 {
130 maptrack_entry(t, handle).ref = t->maptrack_head;
131 t->maptrack_head = handle;
132 }
134 static inline int
135 get_maptrack_handle(
136 struct grant_table *lgt)
137 {
138 int i;
139 grant_handle_t handle;
140 struct grant_mapping *new_mt;
141 unsigned int new_mt_limit, nr_frames;
143 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
144 {
145 spin_lock(&lgt->lock);
147 if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
148 {
149 nr_frames = nr_maptrack_frames(lgt);
150 if ( nr_frames >= max_nr_maptrack_frames() )
151 {
152 spin_unlock(&lgt->lock);
153 return -1;
154 }
156 new_mt = alloc_xenheap_page();
157 if ( new_mt == NULL )
158 {
159 spin_unlock(&lgt->lock);
160 return -1;
161 }
163 clear_page(new_mt);
165 new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE;
167 for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
168 {
169 new_mt[i % MAPTRACK_PER_PAGE].ref = i+1;
170 new_mt[i % MAPTRACK_PER_PAGE].flags = 0;
171 }
173 lgt->maptrack[nr_frames] = new_mt;
174 lgt->maptrack_limit = new_mt_limit;
176 gdprintk(XENLOG_INFO,
177 "Increased maptrack size to %u frames.\n", nr_frames + 1);
178 handle = __get_maptrack_handle(lgt);
179 }
181 spin_unlock(&lgt->lock);
182 }
183 return handle;
184 }
186 /*
187 * Returns 0 if TLB flush / invalidate required by caller.
188 * va will indicate the address to be invalidated.
189 *
190 * addr is _either_ a host virtual address, or the address of the pte to
191 * update, as indicated by the GNTMAP_contains_pte flag.
192 */
193 static void
194 __gnttab_map_grant_ref(
195 struct gnttab_map_grant_ref *op)
196 {
197 struct domain *ld, *rd, *owner;
198 struct vcpu *led;
199 int handle;
200 unsigned long frame = 0, nr_gets = 0;
201 int rc = GNTST_okay;
202 u32 old_pin;
203 u32 act_pin;
204 unsigned int cache_flags;
205 struct active_grant_entry *act;
206 struct grant_mapping *mt;
207 grant_entry_t *sha;
208 union grant_combo scombo, prev_scombo, new_scombo;
210 /*
211 * We bound the number of times we retry CMPXCHG on memory locations that
212 * we share with a guest OS. The reason is that the guest can modify that
213 * location at a higher rate than we can read-modify-CMPXCHG, so the guest
214 * could cause us to livelock. There are a few cases where it is valid for
215 * the guest to race our updates (e.g., to change the GTF_readonly flag),
216 * so we allow a few retries before failing.
217 */
218 int retries = 0;
220 led = current;
221 ld = led->domain;
223 if ( unlikely((op->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0) )
224 {
225 gdprintk(XENLOG_INFO, "Bad flags in grant map op (%x).\n", op->flags);
226 op->status = GNTST_bad_gntref;
227 return;
228 }
230 if ( unlikely((rd = rcu_lock_domain_by_id(op->dom)) == NULL) )
231 {
232 gdprintk(XENLOG_INFO, "Could not find domain %d\n", op->dom);
233 op->status = GNTST_bad_domain;
234 return;
235 }
237 rc = xsm_grant_mapref(ld, rd, op->flags);
238 if ( rc )
239 {
240 rcu_unlock_domain(rd);
241 op->status = GNTST_permission_denied;
242 return;
243 }
245 if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) )
246 {
247 rcu_unlock_domain(rd);
248 gdprintk(XENLOG_INFO, "Failed to obtain maptrack handle.\n");
249 op->status = GNTST_no_device_space;
250 return;
251 }
253 spin_lock(&rd->grant_table->lock);
255 /* Bounds check on the grant ref */
256 if ( unlikely(op->ref >= nr_grant_entries(rd->grant_table)))
257 PIN_FAIL(unlock_out, GNTST_bad_gntref, "Bad ref (%d).\n", op->ref);
259 act = &active_entry(rd->grant_table, op->ref);
260 sha = &shared_entry(rd->grant_table, op->ref);
262 /* If already pinned, check the active domid and avoid refcnt overflow. */
263 if ( act->pin &&
264 ((act->domid != ld->domain_id) ||
265 (act->pin & 0x80808080U) != 0) )
266 PIN_FAIL(unlock_out, GNTST_general_error,
267 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
268 act->domid, ld->domain_id, act->pin);
270 if ( !act->pin ||
271 (!(op->flags & GNTMAP_readonly) &&
272 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask))) )
273 {
274 scombo.word = *(u32 *)&sha->flags;
276 /*
277 * This loop attempts to set the access (reading/writing) flags
278 * in the grant table entry. It tries a cmpxchg on the field
279 * up to five times, and then fails under the assumption that
280 * the guest is misbehaving.
281 */
282 for ( ; ; )
283 {
284 /* If not already pinned, check the grant domid and type. */
285 if ( !act->pin &&
286 (((scombo.shorts.flags & GTF_type_mask) !=
287 GTF_permit_access) ||
288 (scombo.shorts.domid != ld->domain_id)) )
289 PIN_FAIL(unlock_out, GNTST_general_error,
290 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
291 scombo.shorts.flags, scombo.shorts.domid,
292 ld->domain_id);
294 new_scombo = scombo;
295 new_scombo.shorts.flags |= GTF_reading;
297 if ( !(op->flags & GNTMAP_readonly) )
298 {
299 new_scombo.shorts.flags |= GTF_writing;
300 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
301 PIN_FAIL(unlock_out, GNTST_general_error,
302 "Attempt to write-pin a r/o grant entry.\n");
303 }
305 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
306 scombo.word, new_scombo.word);
307 if ( likely(prev_scombo.word == scombo.word) )
308 break;
310 if ( retries++ == 4 )
311 PIN_FAIL(unlock_out, GNTST_general_error,
312 "Shared grant entry is unstable.\n");
314 scombo = prev_scombo;
315 }
317 if ( !act->pin )
318 {
319 act->domid = scombo.shorts.domid;
320 act->gfn = sha->frame;
321 act->frame = gmfn_to_mfn(rd, sha->frame);
322 }
323 }
325 old_pin = act->pin;
326 if ( op->flags & GNTMAP_device_map )
327 act->pin += (op->flags & GNTMAP_readonly) ?
328 GNTPIN_devr_inc : GNTPIN_devw_inc;
329 if ( op->flags & GNTMAP_host_map )
330 act->pin += (op->flags & GNTMAP_readonly) ?
331 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
333 frame = act->frame;
334 act_pin = act->pin;
336 cache_flags = (sha->flags & (GTF_PAT | GTF_PWT | GTF_PCD) );
338 spin_unlock(&rd->grant_table->lock);
340 if ( !mfn_valid(frame) ||
341 (owner = page_get_owner_and_reference(mfn_to_page(frame))) == dom_io )
342 {
343 /* Only needed the reference to confirm dom_io ownership. */
344 if ( mfn_valid(frame) )
345 put_page(mfn_to_page(frame));
347 if ( !iomem_access_permitted(rd, frame, frame) )
348 {
349 gdprintk(XENLOG_WARNING,
350 "Iomem mapping not permitted %lx (domain %d)\n",
351 frame, rd->domain_id);
352 rc = GNTST_general_error;
353 goto undo_out;
354 }
356 rc = create_grant_host_mapping(
357 op->host_addr, frame, op->flags, cache_flags);
358 if ( rc != GNTST_okay )
359 goto undo_out;
360 }
361 else if ( owner == rd )
362 {
363 if ( gnttab_host_mapping_get_page_type(op, ld, rd) &&
364 !get_page_type(mfn_to_page(frame), PGT_writable_page) )
365 goto could_not_pin;
367 nr_gets++;
368 if ( op->flags & GNTMAP_host_map )
369 {
370 rc = create_grant_host_mapping(op->host_addr, frame, op->flags, 0);
371 if ( rc != GNTST_okay )
372 goto undo_out;
374 if ( op->flags & GNTMAP_device_map )
375 {
376 nr_gets++;
377 (void)get_page(mfn_to_page(frame), rd);
378 if ( !(op->flags & GNTMAP_readonly) )
379 get_page_type(mfn_to_page(frame), PGT_writable_page);
380 }
381 }
382 }
383 else
384 {
385 could_not_pin:
386 if ( !rd->is_dying )
387 gdprintk(XENLOG_WARNING, "Could not pin grant frame %lx\n",
388 frame);
389 if ( owner != NULL )
390 put_page(mfn_to_page(frame));
391 rc = GNTST_general_error;
392 goto undo_out;
393 }
395 if ( need_iommu(ld) &&
396 !(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
397 (act_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
398 {
399 if ( iommu_map_page(ld, mfn_to_gmfn(ld, frame), frame) )
400 {
401 rc = GNTST_general_error;
402 goto undo_out;
403 }
404 }
406 TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
408 mt = &maptrack_entry(ld->grant_table, handle);
409 mt->domid = op->dom;
410 mt->ref = op->ref;
411 mt->flags = op->flags;
413 op->dev_bus_addr = (u64)frame << PAGE_SHIFT;
414 op->handle = handle;
415 op->status = GNTST_okay;
417 rcu_unlock_domain(rd);
418 return;
420 undo_out:
421 if ( nr_gets > 1 )
422 {
423 if ( !(op->flags & GNTMAP_readonly) )
424 put_page_type(mfn_to_page(frame));
425 put_page(mfn_to_page(frame));
426 }
427 if ( nr_gets > 0 )
428 {
429 if ( gnttab_host_mapping_get_page_type(op, ld, rd) )
430 put_page_type(mfn_to_page(frame));
431 put_page(mfn_to_page(frame));
432 }
434 spin_lock(&rd->grant_table->lock);
436 act = &active_entry(rd->grant_table, op->ref);
437 sha = &shared_entry(rd->grant_table, op->ref);
439 if ( op->flags & GNTMAP_device_map )
440 act->pin -= (op->flags & GNTMAP_readonly) ?
441 GNTPIN_devr_inc : GNTPIN_devw_inc;
442 if ( op->flags & GNTMAP_host_map )
443 act->pin -= (op->flags & GNTMAP_readonly) ?
444 GNTPIN_hstr_inc : GNTPIN_hstw_inc;
446 if ( !(op->flags & GNTMAP_readonly) &&
447 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
448 gnttab_clear_flag(_GTF_writing, &sha->flags);
450 if ( !act->pin )
451 gnttab_clear_flag(_GTF_reading, &sha->flags);
453 unlock_out:
454 spin_unlock(&rd->grant_table->lock);
455 op->status = rc;
456 put_maptrack_handle(ld->grant_table, handle);
457 rcu_unlock_domain(rd);
458 }
460 static long
461 gnttab_map_grant_ref(
462 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) uop, unsigned int count)
463 {
464 int i;
465 struct gnttab_map_grant_ref op;
467 for ( i = 0; i < count; i++ )
468 {
469 if (i && hypercall_preempt_check())
470 return i;
471 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
472 return -EFAULT;
473 __gnttab_map_grant_ref(&op);
474 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
475 return -EFAULT;
476 }
478 return 0;
479 }
481 static void
482 __gnttab_unmap_common(
483 struct gnttab_unmap_common *op)
484 {
485 domid_t dom;
486 struct domain *ld, *rd;
487 struct active_grant_entry *act;
488 grant_entry_t *sha;
489 s16 rc = 0;
490 u32 old_pin;
492 ld = current->domain;
494 op->frame = (unsigned long)(op->dev_bus_addr >> PAGE_SHIFT);
496 if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) )
497 {
498 gdprintk(XENLOG_INFO, "Bad handle (%d).\n", op->handle);
499 op->status = GNTST_bad_handle;
500 return;
501 }
503 op->map = &maptrack_entry(ld->grant_table, op->handle);
505 if ( unlikely(!op->map->flags) )
506 {
507 gdprintk(XENLOG_INFO, "Zero flags for handle (%d).\n", op->handle);
508 op->status = GNTST_bad_handle;
509 return;
510 }
512 dom = op->map->domid;
513 op->flags = op->map->flags;
515 if ( unlikely((op->rd = rd = rcu_lock_domain_by_id(dom)) == NULL) )
516 {
517 /* This can happen when a grant is implicitly unmapped. */
518 gdprintk(XENLOG_INFO, "Could not find domain %d\n", dom);
519 domain_crash(ld); /* naughty... */
520 return;
521 }
523 rc = xsm_grant_unmapref(ld, rd);
524 if ( rc )
525 {
526 rcu_unlock_domain(rd);
527 op->status = GNTST_permission_denied;
528 return;
529 }
531 TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom);
533 spin_lock(&rd->grant_table->lock);
535 act = &active_entry(rd->grant_table, op->map->ref);
536 sha = &shared_entry(rd->grant_table, op->map->ref);
537 old_pin = act->pin;
539 if ( op->frame == 0 )
540 {
541 op->frame = act->frame;
542 }
543 else
544 {
545 if ( unlikely(op->frame != act->frame) )
546 PIN_FAIL(unmap_out, GNTST_general_error,
547 "Bad frame number doesn't match gntref. (%lx != %lx)\n",
548 op->frame, act->frame);
549 if ( op->flags & GNTMAP_device_map )
550 {
551 ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
552 op->map->flags &= ~GNTMAP_device_map;
553 if ( op->flags & GNTMAP_readonly )
554 act->pin -= GNTPIN_devr_inc;
555 else
556 act->pin -= GNTPIN_devw_inc;
557 }
558 }
560 if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
561 {
562 if ( (rc = replace_grant_host_mapping(op->host_addr,
563 op->frame, op->new_addr,
564 op->flags)) < 0 )
565 goto unmap_out;
567 ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
568 op->map->flags &= ~GNTMAP_host_map;
569 if ( op->flags & GNTMAP_readonly )
570 act->pin -= GNTPIN_hstr_inc;
571 else
572 act->pin -= GNTPIN_hstw_inc;
573 }
575 if ( need_iommu(ld) &&
576 (old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
577 !(act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
578 {
579 if ( iommu_unmap_page(ld, mfn_to_gmfn(ld, op->frame)) )
580 {
581 rc = GNTST_general_error;
582 goto unmap_out;
583 }
584 }
586 /* If just unmapped a writable mapping, mark as dirtied */
587 if ( !(op->flags & GNTMAP_readonly) )
588 gnttab_mark_dirty(rd, op->frame);
590 unmap_out:
591 op->status = rc;
592 spin_unlock(&rd->grant_table->lock);
593 rcu_unlock_domain(rd);
594 }
596 static void
597 __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
598 {
599 struct domain *ld, *rd;
600 struct active_grant_entry *act;
601 grant_entry_t *sha;
603 rd = op->rd;
605 if ( rd == NULL )
606 {
607 /*
608 * Suggests that __gntab_unmap_common failed in
609 * rcu_lock_domain_by_id() or earlier, and so we have nothing
610 * to complete
611 */
612 return;
613 }
615 ld = current->domain;
617 rcu_lock_domain(rd);
618 spin_lock(&rd->grant_table->lock);
620 act = &active_entry(rd->grant_table, op->map->ref);
621 sha = &shared_entry(rd->grant_table, op->map->ref);
623 if ( unlikely(op->frame != act->frame) )
624 {
625 /*
626 * Suggests that __gntab_unmap_common failed early and so
627 * nothing further to do
628 */
629 goto unmap_out;
630 }
632 if ( op->flags & GNTMAP_device_map )
633 {
634 if ( !is_iomem_page(act->frame) )
635 {
636 if ( op->flags & GNTMAP_readonly )
637 put_page(mfn_to_page(op->frame));
638 else
639 put_page_and_type(mfn_to_page(op->frame));
640 }
641 }
643 if ( (op->host_addr != 0) && (op->flags & GNTMAP_host_map) )
644 {
645 if ( op->status != 0 )
646 {
647 /*
648 * Suggests that __gntab_unmap_common failed in
649 * replace_grant_host_mapping() so nothing further to do
650 */
651 goto unmap_out;
652 }
654 if ( !is_iomem_page(op->frame) )
655 {
656 if ( gnttab_host_mapping_get_page_type(op, ld, rd) )
657 put_page_type(mfn_to_page(op->frame));
658 put_page(mfn_to_page(op->frame));
659 }
660 }
662 if ( (op->map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
663 {
664 op->map->flags = 0;
665 put_maptrack_handle(ld->grant_table, op->handle);
666 }
668 if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
669 !(op->flags & GNTMAP_readonly) )
670 gnttab_clear_flag(_GTF_writing, &sha->flags);
672 if ( act->pin == 0 )
673 gnttab_clear_flag(_GTF_reading, &sha->flags);
675 unmap_out:
676 spin_unlock(&rd->grant_table->lock);
677 rcu_unlock_domain(rd);
678 }
680 static void
681 __gnttab_unmap_grant_ref(
682 struct gnttab_unmap_grant_ref *op,
683 struct gnttab_unmap_common *common)
684 {
685 common->host_addr = op->host_addr;
686 common->dev_bus_addr = op->dev_bus_addr;
687 common->handle = op->handle;
689 /* Intialise these in case common contains old state */
690 common->new_addr = 0;
691 common->rd = NULL;
693 __gnttab_unmap_common(common);
694 op->status = common->status;
695 }
698 static long
699 gnttab_unmap_grant_ref(
700 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) uop, unsigned int count)
701 {
702 int i, c, partial_done, done = 0;
703 struct gnttab_unmap_grant_ref op;
704 struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
706 while ( count != 0 )
707 {
708 c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
709 partial_done = 0;
711 for ( i = 0; i < c; i++ )
712 {
713 if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
714 goto fault;
715 __gnttab_unmap_grant_ref(&op, &(common[i]));
716 ++partial_done;
717 if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
718 goto fault;
719 }
721 flush_tlb_mask(&current->domain->domain_dirty_cpumask);
723 for ( i = 0; i < partial_done; i++ )
724 __gnttab_unmap_common_complete(&(common[i]));
726 count -= c;
727 done += c;
729 if (count && hypercall_preempt_check())
730 return done;
731 }
733 return 0;
735 fault:
736 flush_tlb_mask(&current->domain->domain_dirty_cpumask);
738 for ( i = 0; i < partial_done; i++ )
739 __gnttab_unmap_common_complete(&(common[i]));
740 return -EFAULT;
741 }
743 static void
744 __gnttab_unmap_and_replace(
745 struct gnttab_unmap_and_replace *op,
746 struct gnttab_unmap_common *common)
747 {
748 common->host_addr = op->host_addr;
749 common->new_addr = op->new_addr;
750 common->handle = op->handle;
752 /* Intialise these in case common contains old state */
753 common->dev_bus_addr = 0;
754 common->rd = NULL;
756 __gnttab_unmap_common(common);
757 op->status = common->status;
758 }
760 static long
761 gnttab_unmap_and_replace(
762 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) uop, unsigned int count)
763 {
764 int i, c, partial_done, done = 0;
765 struct gnttab_unmap_and_replace op;
766 struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
768 while ( count != 0 )
769 {
770 c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
771 partial_done = 0;
773 for ( i = 0; i < c; i++ )
774 {
775 if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
776 goto fault;
777 __gnttab_unmap_and_replace(&op, &(common[i]));
778 ++partial_done;
779 if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
780 goto fault;
781 }
783 flush_tlb_mask(&current->domain->domain_dirty_cpumask);
785 for ( i = 0; i < partial_done; i++ )
786 __gnttab_unmap_common_complete(&(common[i]));
788 count -= c;
789 done += c;
791 if (count && hypercall_preempt_check())
792 return done;
793 }
795 return 0;
797 fault:
798 flush_tlb_mask(&current->domain->domain_dirty_cpumask);
800 for ( i = 0; i < partial_done; i++ )
801 __gnttab_unmap_common_complete(&(common[i]));
802 return -EFAULT;
803 }
805 int
806 gnttab_grow_table(struct domain *d, unsigned int req_nr_frames)
807 {
808 /* d's grant table lock must be held by the caller */
810 struct grant_table *gt = d->grant_table;
811 unsigned int i;
813 ASSERT(req_nr_frames <= max_nr_grant_frames);
815 gdprintk(XENLOG_INFO,
816 "Expanding dom (%d) grant table from (%d) to (%d) frames.\n",
817 d->domain_id, nr_grant_frames(gt), req_nr_frames);
819 /* Active */
820 for ( i = nr_active_grant_frames(gt);
821 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
822 {
823 if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
824 goto active_alloc_failed;
825 clear_page(gt->active[i]);
826 }
828 /* Shared */
829 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
830 {
831 if ( (gt->shared[i] = alloc_xenheap_page()) == NULL )
832 goto shared_alloc_failed;
833 clear_page(gt->shared[i]);
834 }
836 /* Share the new shared frames with the recipient domain */
837 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
838 gnttab_create_shared_page(d, gt, i);
840 gt->nr_grant_frames = req_nr_frames;
842 return 1;
844 shared_alloc_failed:
845 for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
846 {
847 free_xenheap_page(gt->shared[i]);
848 gt->shared[i] = NULL;
849 }
850 active_alloc_failed:
851 for ( i = nr_active_grant_frames(gt);
852 i < num_act_frames_from_sha_frames(req_nr_frames); i++ )
853 {
854 free_xenheap_page(gt->active[i]);
855 gt->active[i] = NULL;
856 }
857 gdprintk(XENLOG_INFO, "Allocation failure when expanding grant table.\n");
858 return 0;
859 }
861 static long
862 gnttab_setup_table(
863 XEN_GUEST_HANDLE(gnttab_setup_table_t) uop, unsigned int count)
864 {
865 struct gnttab_setup_table op;
866 struct domain *d;
867 int i;
868 unsigned long gmfn;
869 domid_t dom;
871 if ( count != 1 )
872 return -EINVAL;
874 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
875 {
876 gdprintk(XENLOG_INFO, "Fault while reading gnttab_setup_table_t.\n");
877 return -EFAULT;
878 }
880 if ( unlikely(op.nr_frames > max_nr_grant_frames) )
881 {
882 gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table frames"
883 " per domain.\n",
884 max_nr_grant_frames);
885 op.status = GNTST_general_error;
886 goto out1;
887 }
889 dom = op.dom;
890 if ( dom == DOMID_SELF )
891 {
892 d = rcu_lock_current_domain();
893 }
894 else
895 {
896 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
897 {
898 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
899 op.status = GNTST_bad_domain;
900 goto out1;
901 }
903 if ( unlikely(!IS_PRIV_FOR(current->domain, d)) )
904 {
905 op.status = GNTST_permission_denied;
906 goto out2;
907 }
908 }
910 if ( xsm_grant_setup(current->domain, d) )
911 {
912 op.status = GNTST_permission_denied;
913 goto out2;
914 }
916 spin_lock(&d->grant_table->lock);
918 if ( (op.nr_frames > nr_grant_frames(d->grant_table)) &&
919 !gnttab_grow_table(d, op.nr_frames) )
920 {
921 gdprintk(XENLOG_INFO,
922 "Expand grant table to %d failed. Current: %d Max: %d.\n",
923 op.nr_frames,
924 nr_grant_frames(d->grant_table),
925 max_nr_grant_frames);
926 op.status = GNTST_general_error;
927 goto out3;
928 }
930 op.status = GNTST_okay;
931 for ( i = 0; i < op.nr_frames; i++ )
932 {
933 gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
934 (void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
935 }
937 out3:
938 spin_unlock(&d->grant_table->lock);
939 out2:
940 rcu_unlock_domain(d);
941 out1:
942 if ( unlikely(copy_to_guest(uop, &op, 1)) )
943 return -EFAULT;
945 return 0;
946 }
948 static long
949 gnttab_query_size(
950 XEN_GUEST_HANDLE(gnttab_query_size_t) uop, unsigned int count)
951 {
952 struct gnttab_query_size op;
953 struct domain *d;
954 domid_t dom;
955 int rc;
957 if ( count != 1 )
958 return -EINVAL;
960 if ( unlikely(copy_from_guest(&op, uop, 1) != 0) )
961 {
962 gdprintk(XENLOG_INFO, "Fault while reading gnttab_query_size_t.\n");
963 return -EFAULT;
964 }
966 dom = op.dom;
967 if ( dom == DOMID_SELF )
968 {
969 d = rcu_lock_current_domain();
970 }
971 else
972 {
973 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
974 {
975 gdprintk(XENLOG_INFO, "Bad domid %d.\n", dom);
976 op.status = GNTST_bad_domain;
977 goto query_out;
978 }
980 if ( unlikely(!IS_PRIV_FOR(current->domain, d)) )
981 {
982 op.status = GNTST_permission_denied;
983 goto query_out_unlock;
984 }
985 }
987 rc = xsm_grant_query_size(current->domain, d);
988 if ( rc )
989 {
990 op.status = GNTST_permission_denied;
991 goto query_out_unlock;
992 }
994 spin_lock(&d->grant_table->lock);
996 op.nr_frames = nr_grant_frames(d->grant_table);
997 op.max_nr_frames = max_nr_grant_frames;
998 op.status = GNTST_okay;
1000 spin_unlock(&d->grant_table->lock);
1003 query_out_unlock:
1004 rcu_unlock_domain(d);
1006 query_out:
1007 if ( unlikely(copy_to_guest(uop, &op, 1)) )
1008 return -EFAULT;
1010 return 0;
1013 /*
1014 * Check that the given grant reference (rd,ref) allows 'ld' to transfer
1015 * ownership of a page frame. If so, lock down the grant entry.
1016 */
1017 static int
1018 gnttab_prepare_for_transfer(
1019 struct domain *rd, struct domain *ld, grant_ref_t ref)
1021 struct grant_table *rgt;
1022 struct grant_entry *sha;
1023 union grant_combo scombo, prev_scombo, new_scombo;
1024 int retries = 0;
1026 if ( unlikely((rgt = rd->grant_table) == NULL) )
1028 gdprintk(XENLOG_INFO, "Dom %d has no grant table.\n", rd->domain_id);
1029 return 0;
1032 spin_lock(&rgt->lock);
1034 if ( unlikely(ref >= nr_grant_entries(rd->grant_table)) )
1036 gdprintk(XENLOG_INFO,
1037 "Bad grant reference (%d) for transfer to domain(%d).\n",
1038 ref, rd->domain_id);
1039 goto fail;
1042 sha = &shared_entry(rgt, ref);
1044 scombo.word = *(u32 *)&sha->flags;
1046 for ( ; ; )
1048 if ( unlikely(scombo.shorts.flags != GTF_accept_transfer) ||
1049 unlikely(scombo.shorts.domid != ld->domain_id) )
1051 gdprintk(XENLOG_INFO, "Bad flags (%x) or dom (%d). "
1052 "(NB. expected dom %d)\n",
1053 scombo.shorts.flags, scombo.shorts.domid,
1054 ld->domain_id);
1055 goto fail;
1058 new_scombo = scombo;
1059 new_scombo.shorts.flags |= GTF_transfer_committed;
1061 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
1062 scombo.word, new_scombo.word);
1063 if ( likely(prev_scombo.word == scombo.word) )
1064 break;
1066 if ( retries++ == 4 )
1068 gdprintk(XENLOG_WARNING, "Shared grant entry is unstable.\n");
1069 goto fail;
1072 scombo = prev_scombo;
1075 spin_unlock(&rgt->lock);
1076 return 1;
1078 fail:
1079 spin_unlock(&rgt->lock);
1080 return 0;
1083 static long
1084 gnttab_transfer(
1085 XEN_GUEST_HANDLE(gnttab_transfer_t) uop, unsigned int count)
1087 struct domain *d = current->domain;
1088 struct domain *e;
1089 struct page_info *page;
1090 int i;
1091 grant_entry_t *sha;
1092 struct gnttab_transfer gop;
1093 unsigned long mfn;
1094 unsigned int max_bitsize;
1096 for ( i = 0; i < count; i++ )
1098 if (i && hypercall_preempt_check())
1099 return i;
1101 /* Read from caller address space. */
1102 if ( unlikely(__copy_from_guest_offset(&gop, uop, i, 1)) )
1104 gdprintk(XENLOG_INFO, "gnttab_transfer: error reading req %d/%d\n",
1105 i, count);
1106 return -EFAULT;
1109 mfn = gmfn_to_mfn(d, gop.mfn);
1111 /* Check the passed page frame for basic validity. */
1112 if ( unlikely(!mfn_valid(mfn)) )
1114 gdprintk(XENLOG_INFO, "gnttab_transfer: out-of-range %lx\n",
1115 (unsigned long)gop.mfn);
1116 gop.status = GNTST_bad_page;
1117 goto copyback;
1120 page = mfn_to_page(mfn);
1121 if ( unlikely(is_xen_heap_page(page)) )
1123 gdprintk(XENLOG_INFO, "gnttab_transfer: xen frame %lx\n",
1124 (unsigned long)gop.mfn);
1125 gop.status = GNTST_bad_page;
1126 goto copyback;
1129 if ( steal_page(d, page, 0) < 0 )
1131 gop.status = GNTST_bad_page;
1132 goto copyback;
1135 #ifndef __ia64__ /* IA64 implicitly replaces the old page in steal_page(). */
1136 guest_physmap_remove_page(d, gop.mfn, mfn, 0);
1137 #endif
1138 flush_tlb_mask(&d->domain_dirty_cpumask);
1140 /* Find the target domain. */
1141 if ( unlikely((e = rcu_lock_domain_by_id(gop.domid)) == NULL) )
1143 gdprintk(XENLOG_INFO, "gnttab_transfer: can't find domain %d\n",
1144 gop.domid);
1145 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1146 free_domheap_page(page);
1147 gop.status = GNTST_bad_domain;
1148 goto copyback;
1151 if ( xsm_grant_transfer(d, e) )
1153 gop.status = GNTST_permission_denied;
1154 unlock_and_copyback:
1155 rcu_unlock_domain(e);
1156 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1157 free_domheap_page(page);
1158 goto copyback;
1161 max_bitsize = domain_clamp_alloc_bitsize(
1162 e, BITS_PER_LONG+PAGE_SHIFT-1);
1163 if ( (1UL << (max_bitsize - PAGE_SHIFT)) <= mfn )
1165 struct page_info *new_page;
1166 void *sp, *dp;
1168 new_page = alloc_domheap_page(NULL, MEMF_bits(max_bitsize));
1169 if ( new_page == NULL )
1171 gop.status = GNTST_address_too_big;
1172 goto unlock_and_copyback;
1175 sp = map_domain_page(mfn);
1176 dp = map_domain_page(page_to_mfn(new_page));
1177 memcpy(dp, sp, PAGE_SIZE);
1178 unmap_domain_page(dp);
1179 unmap_domain_page(sp);
1181 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1182 free_domheap_page(page);
1183 page = new_page;
1186 spin_lock(&e->page_alloc_lock);
1188 /*
1189 * Check that 'e' will accept the page and has reservation
1190 * headroom. Also, a domain mustn't have PGC_allocated
1191 * pages when it is dying.
1192 */
1193 if ( unlikely(e->is_dying) ||
1194 unlikely(e->tot_pages >= e->max_pages) ||
1195 unlikely(!gnttab_prepare_for_transfer(e, d, gop.ref)) )
1197 if ( !e->is_dying )
1198 gdprintk(XENLOG_INFO, "gnttab_transfer: "
1199 "Transferee has no reservation "
1200 "headroom (%d,%d) or provided a bad grant ref (%08x) "
1201 "or is dying (%d)\n",
1202 e->tot_pages, e->max_pages, gop.ref, e->is_dying);
1203 spin_unlock(&e->page_alloc_lock);
1204 rcu_unlock_domain(e);
1205 page->count_info &= ~(PGC_count_mask|PGC_allocated);
1206 free_domheap_page(page);
1207 gop.status = GNTST_general_error;
1208 goto copyback;
1211 /* Okay, add the page to 'e'. */
1212 if ( unlikely(e->tot_pages++ == 0) )
1213 get_knownalive_domain(e);
1214 page_list_add_tail(page, &e->page_list);
1215 page_set_owner(page, e);
1217 spin_unlock(&e->page_alloc_lock);
1219 TRACE_1D(TRC_MEM_PAGE_GRANT_TRANSFER, e->domain_id);
1221 /* Tell the guest about its new page frame. */
1222 spin_lock(&e->grant_table->lock);
1224 sha = &shared_entry(e->grant_table, gop.ref);
1225 guest_physmap_add_page(e, sha->frame, mfn, 0);
1226 sha->frame = mfn;
1227 wmb();
1228 sha->flags |= GTF_transfer_completed;
1230 spin_unlock(&e->grant_table->lock);
1232 rcu_unlock_domain(e);
1234 gop.status = GNTST_okay;
1236 copyback:
1237 if ( unlikely(__copy_to_guest_offset(uop, i, &gop, 1)) )
1239 gdprintk(XENLOG_INFO, "gnttab_transfer: error writing resp "
1240 "%d/%d\n", i, count);
1241 return -EFAULT;
1245 return 0;
1248 /* Undo __acquire_grant_for_copy. Again, this has no effect on page
1249 type and reference counts. */
1250 static void
1251 __release_grant_for_copy(
1252 struct domain *rd, unsigned long gref, int readonly)
1254 grant_entry_t *sha;
1255 struct active_grant_entry *act;
1256 unsigned long r_frame;
1258 spin_lock(&rd->grant_table->lock);
1260 act = &active_entry(rd->grant_table, gref);
1261 sha = &shared_entry(rd->grant_table, gref);
1262 r_frame = act->frame;
1264 if ( readonly )
1266 act->pin -= GNTPIN_hstr_inc;
1268 else
1270 gnttab_mark_dirty(rd, r_frame);
1272 act->pin -= GNTPIN_hstw_inc;
1273 if ( !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) )
1274 gnttab_clear_flag(_GTF_writing, &sha->flags);
1277 if ( !act->pin )
1278 gnttab_clear_flag(_GTF_reading, &sha->flags);
1280 spin_unlock(&rd->grant_table->lock);
1283 /* Grab a frame number from a grant entry and update the flags and pin
1284 count as appropriate. Note that this does *not* update the page
1285 type or reference counts, and does not check that the mfn is
1286 actually valid. */
1287 static int
1288 __acquire_grant_for_copy(
1289 struct domain *rd, unsigned long gref, int readonly,
1290 unsigned long *frame)
1292 grant_entry_t *sha;
1293 struct active_grant_entry *act;
1294 s16 rc = GNTST_okay;
1295 int retries = 0;
1296 union grant_combo scombo, prev_scombo, new_scombo;
1298 spin_lock(&rd->grant_table->lock);
1300 if ( unlikely(gref >= nr_grant_entries(rd->grant_table)) )
1301 PIN_FAIL(unlock_out, GNTST_bad_gntref,
1302 "Bad grant reference %ld\n", gref);
1304 act = &active_entry(rd->grant_table, gref);
1305 sha = &shared_entry(rd->grant_table, gref);
1307 /* If already pinned, check the active domid and avoid refcnt overflow. */
1308 if ( act->pin &&
1309 ((act->domid != current->domain->domain_id) ||
1310 (act->pin & 0x80808080U) != 0) )
1311 PIN_FAIL(unlock_out, GNTST_general_error,
1312 "Bad domain (%d != %d), or risk of counter overflow %08x\n",
1313 act->domid, current->domain->domain_id, act->pin);
1315 if ( !act->pin ||
1316 (!readonly && !(act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask))) )
1318 scombo.word = *(u32 *)&sha->flags;
1320 for ( ; ; )
1322 /* If not already pinned, check the grant domid and type. */
1323 if ( !act->pin &&
1324 (((scombo.shorts.flags & GTF_type_mask) !=
1325 GTF_permit_access) ||
1326 (scombo.shorts.domid != current->domain->domain_id)) )
1327 PIN_FAIL(unlock_out, GNTST_general_error,
1328 "Bad flags (%x) or dom (%d). (expected dom %d)\n",
1329 scombo.shorts.flags, scombo.shorts.domid,
1330 current->domain->domain_id);
1332 new_scombo = scombo;
1333 new_scombo.shorts.flags |= GTF_reading;
1335 if ( !readonly )
1337 new_scombo.shorts.flags |= GTF_writing;
1338 if ( unlikely(scombo.shorts.flags & GTF_readonly) )
1339 PIN_FAIL(unlock_out, GNTST_general_error,
1340 "Attempt to write-pin a r/o grant entry.\n");
1343 prev_scombo.word = cmpxchg((u32 *)&sha->flags,
1344 scombo.word, new_scombo.word);
1345 if ( likely(prev_scombo.word == scombo.word) )
1346 break;
1348 if ( retries++ == 4 )
1349 PIN_FAIL(unlock_out, GNTST_general_error,
1350 "Shared grant entry is unstable.\n");
1352 scombo = prev_scombo;
1355 if ( !act->pin )
1357 act->domid = scombo.shorts.domid;
1358 act->gfn = sha->frame;
1359 act->frame = gmfn_to_mfn(rd, sha->frame);
1363 act->pin += readonly ? GNTPIN_hstr_inc : GNTPIN_hstw_inc;
1365 *frame = act->frame;
1367 unlock_out:
1368 spin_unlock(&rd->grant_table->lock);
1369 return rc;
1372 static void
1373 __gnttab_copy(
1374 struct gnttab_copy *op)
1376 struct domain *sd = NULL, *dd = NULL;
1377 unsigned long s_frame, d_frame;
1378 char *sp, *dp;
1379 s16 rc = GNTST_okay;
1380 int have_d_grant = 0, have_s_grant = 0, have_s_ref = 0;
1381 int src_is_gref, dest_is_gref;
1383 if ( ((op->source.offset + op->len) > PAGE_SIZE) ||
1384 ((op->dest.offset + op->len) > PAGE_SIZE) )
1385 PIN_FAIL(error_out, GNTST_bad_copy_arg, "copy beyond page area.\n");
1387 src_is_gref = op->flags & GNTCOPY_source_gref;
1388 dest_is_gref = op->flags & GNTCOPY_dest_gref;
1390 if ( (op->source.domid != DOMID_SELF && !src_is_gref ) ||
1391 (op->dest.domid != DOMID_SELF && !dest_is_gref) )
1392 PIN_FAIL(error_out, GNTST_permission_denied,
1393 "only allow copy-by-mfn for DOMID_SELF.\n");
1395 if ( op->source.domid == DOMID_SELF )
1396 sd = rcu_lock_current_domain();
1397 else if ( (sd = rcu_lock_domain_by_id(op->source.domid)) == NULL )
1398 PIN_FAIL(error_out, GNTST_bad_domain,
1399 "couldn't find %d\n", op->source.domid);
1401 if ( op->dest.domid == DOMID_SELF )
1402 dd = rcu_lock_current_domain();
1403 else if ( (dd = rcu_lock_domain_by_id(op->dest.domid)) == NULL )
1404 PIN_FAIL(error_out, GNTST_bad_domain,
1405 "couldn't find %d\n", op->dest.domid);
1407 rc = xsm_grant_copy(sd, dd);
1408 if ( rc )
1410 rc = GNTST_permission_denied;
1411 goto error_out;
1414 if ( src_is_gref )
1416 rc = __acquire_grant_for_copy(sd, op->source.u.ref, 1, &s_frame);
1417 if ( rc != GNTST_okay )
1418 goto error_out;
1419 have_s_grant = 1;
1421 else
1423 s_frame = gmfn_to_mfn(sd, op->source.u.gmfn);
1425 if ( unlikely(!mfn_valid(s_frame)) )
1426 PIN_FAIL(error_out, GNTST_general_error,
1427 "source frame %lx invalid.\n", s_frame);
1428 if ( !get_page(mfn_to_page(s_frame), sd) )
1430 if ( !sd->is_dying )
1431 gdprintk(XENLOG_WARNING, "Could not get src frame %lx\n", s_frame);
1432 rc = GNTST_general_error;
1433 goto error_out;
1435 have_s_ref = 1;
1437 if ( dest_is_gref )
1439 rc = __acquire_grant_for_copy(dd, op->dest.u.ref, 0, &d_frame);
1440 if ( rc != GNTST_okay )
1441 goto error_out;
1442 have_d_grant = 1;
1444 else
1446 d_frame = gmfn_to_mfn(dd, op->dest.u.gmfn);
1448 if ( unlikely(!mfn_valid(d_frame)) )
1449 PIN_FAIL(error_out, GNTST_general_error,
1450 "destination frame %lx invalid.\n", d_frame);
1451 if ( !get_page_and_type(mfn_to_page(d_frame), dd, PGT_writable_page) )
1453 if ( !dd->is_dying )
1454 gdprintk(XENLOG_WARNING, "Could not get dst frame %lx\n", d_frame);
1455 rc = GNTST_general_error;
1456 goto error_out;
1459 sp = map_domain_page(s_frame);
1460 dp = map_domain_page(d_frame);
1462 memcpy(dp + op->dest.offset, sp + op->source.offset, op->len);
1464 unmap_domain_page(dp);
1465 unmap_domain_page(sp);
1467 gnttab_mark_dirty(dd, d_frame);
1469 put_page_and_type(mfn_to_page(d_frame));
1470 error_out:
1471 if ( have_s_ref )
1472 put_page(mfn_to_page(s_frame));
1473 if ( have_s_grant )
1474 __release_grant_for_copy(sd, op->source.u.ref, 1);
1475 if ( have_d_grant )
1476 __release_grant_for_copy(dd, op->dest.u.ref, 0);
1477 if ( sd )
1478 rcu_unlock_domain(sd);
1479 if ( dd )
1480 rcu_unlock_domain(dd);
1481 op->status = rc;
1484 static long
1485 gnttab_copy(
1486 XEN_GUEST_HANDLE(gnttab_copy_t) uop, unsigned int count)
1488 int i;
1489 struct gnttab_copy op;
1491 for ( i = 0; i < count; i++ )
1493 if (i && hypercall_preempt_check())
1494 return i;
1495 if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
1496 return -EFAULT;
1497 __gnttab_copy(&op);
1498 if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
1499 return -EFAULT;
1501 return 0;
1504 long
1505 do_grant_table_op(
1506 unsigned int cmd, XEN_GUEST_HANDLE(void) uop, unsigned int count)
1508 long rc;
1509 struct domain *d = current->domain;
1511 if ( (int)count < 0 )
1512 return -EINVAL;
1514 domain_lock(d);
1516 rc = -EFAULT;
1517 switch ( cmd )
1519 case GNTTABOP_map_grant_ref:
1521 XEN_GUEST_HANDLE(gnttab_map_grant_ref_t) map =
1522 guest_handle_cast(uop, gnttab_map_grant_ref_t);
1523 if ( unlikely(!guest_handle_okay(map, count)) )
1524 goto out;
1525 rc = gnttab_map_grant_ref(map, count);
1526 if ( rc > 0 )
1528 guest_handle_add_offset(map, rc);
1529 uop = guest_handle_cast(map, void);
1531 break;
1533 case GNTTABOP_unmap_grant_ref:
1535 XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t) unmap =
1536 guest_handle_cast(uop, gnttab_unmap_grant_ref_t);
1537 if ( unlikely(!guest_handle_okay(unmap, count)) )
1538 goto out;
1539 rc = gnttab_unmap_grant_ref(unmap, count);
1540 if ( rc > 0 )
1542 guest_handle_add_offset(unmap, rc);
1543 uop = guest_handle_cast(unmap, void);
1545 break;
1547 case GNTTABOP_unmap_and_replace:
1549 XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t) unmap =
1550 guest_handle_cast(uop, gnttab_unmap_and_replace_t);
1551 if ( unlikely(!guest_handle_okay(unmap, count)) )
1552 goto out;
1553 rc = -ENOSYS;
1554 if ( unlikely(!replace_grant_supported()) )
1555 goto out;
1556 rc = gnttab_unmap_and_replace(unmap, count);
1557 if ( rc > 0 )
1559 guest_handle_add_offset(unmap, rc);
1560 uop = guest_handle_cast(unmap, void);
1562 break;
1564 case GNTTABOP_setup_table:
1566 rc = gnttab_setup_table(
1567 guest_handle_cast(uop, gnttab_setup_table_t), count);
1568 ASSERT(rc <= 0);
1569 break;
1571 case GNTTABOP_transfer:
1573 XEN_GUEST_HANDLE(gnttab_transfer_t) transfer =
1574 guest_handle_cast(uop, gnttab_transfer_t);
1575 if ( unlikely(!guest_handle_okay(transfer, count)) )
1576 goto out;
1577 rc = gnttab_transfer(transfer, count);
1578 if ( rc > 0 )
1580 guest_handle_add_offset(transfer, rc);
1581 uop = guest_handle_cast(transfer, void);
1583 break;
1585 case GNTTABOP_copy:
1587 XEN_GUEST_HANDLE(gnttab_copy_t) copy =
1588 guest_handle_cast(uop, gnttab_copy_t);
1589 if ( unlikely(!guest_handle_okay(copy, count)) )
1590 goto out;
1591 rc = gnttab_copy(copy, count);
1592 if ( rc > 0 )
1594 guest_handle_add_offset(copy, rc);
1595 uop = guest_handle_cast(copy, void);
1597 break;
1599 case GNTTABOP_query_size:
1601 rc = gnttab_query_size(
1602 guest_handle_cast(uop, gnttab_query_size_t), count);
1603 ASSERT(rc <= 0);
1604 break;
1606 default:
1607 rc = -ENOSYS;
1608 break;
1611 out:
1612 domain_unlock(d);
1614 if ( rc > 0 )
1616 ASSERT(rc < count);
1617 rc = hypercall_create_continuation(__HYPERVISOR_grant_table_op,
1618 "ihi", cmd, uop, count - rc);
1621 return rc;
1624 #ifdef CONFIG_COMPAT
1625 #include "compat/grant_table.c"
1626 #endif
1628 static unsigned int max_nr_active_grant_frames(void)
1630 return (((max_nr_grant_frames * (PAGE_SIZE / sizeof(grant_entry_t))) +
1631 ((PAGE_SIZE / sizeof(struct active_grant_entry))-1))
1632 / (PAGE_SIZE / sizeof(struct active_grant_entry)));
1635 int
1636 grant_table_create(
1637 struct domain *d)
1639 struct grant_table *t;
1640 int i;
1642 /* If this sizeof assertion fails, fix the function: shared_index */
1643 ASSERT(sizeof(grant_entry_t) == 8);
1645 if ( (t = xmalloc(struct grant_table)) == NULL )
1646 goto no_mem_0;
1648 /* Simple stuff. */
1649 memset(t, 0, sizeof(*t));
1650 spin_lock_init(&t->lock);
1651 t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
1653 /* Active grant table. */
1654 if ( (t->active = xmalloc_array(struct active_grant_entry *,
1655 max_nr_active_grant_frames())) == NULL )
1656 goto no_mem_1;
1657 memset(t->active, 0, max_nr_active_grant_frames() * sizeof(t->active[0]));
1658 for ( i = 0;
1659 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1661 if ( (t->active[i] = alloc_xenheap_page()) == NULL )
1662 goto no_mem_2;
1663 clear_page(t->active[i]);
1666 /* Tracking of mapped foreign frames table */
1667 if ( (t->maptrack = xmalloc_array(struct grant_mapping *,
1668 max_nr_maptrack_frames())) == NULL )
1669 goto no_mem_2;
1670 memset(t->maptrack, 0, max_nr_maptrack_frames() * sizeof(t->maptrack[0]));
1671 if ( (t->maptrack[0] = alloc_xenheap_page()) == NULL )
1672 goto no_mem_3;
1673 clear_page(t->maptrack[0]);
1674 t->maptrack_limit = PAGE_SIZE / sizeof(struct grant_mapping);
1675 for ( i = 0; i < t->maptrack_limit; i++ )
1676 t->maptrack[0][i].ref = i+1;
1678 /* Shared grant table. */
1679 if ( (t->shared = xmalloc_array(struct grant_entry *,
1680 max_nr_grant_frames)) == NULL )
1681 goto no_mem_3;
1682 memset(t->shared, 0, max_nr_grant_frames * sizeof(t->shared[0]));
1683 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1685 if ( (t->shared[i] = alloc_xenheap_page()) == NULL )
1686 goto no_mem_4;
1687 clear_page(t->shared[i]);
1690 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1691 gnttab_create_shared_page(d, t, i);
1693 /* Okay, install the structure. */
1694 d->grant_table = t;
1695 return 0;
1697 no_mem_4:
1698 for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
1699 free_xenheap_page(t->shared[i]);
1700 xfree(t->shared);
1701 no_mem_3:
1702 free_xenheap_page(t->maptrack[0]);
1703 xfree(t->maptrack);
1704 no_mem_2:
1705 for ( i = 0;
1706 i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
1707 free_xenheap_page(t->active[i]);
1708 xfree(t->active);
1709 no_mem_1:
1710 xfree(t);
1711 no_mem_0:
1712 return -ENOMEM;
1715 void
1716 gnttab_release_mappings(
1717 struct domain *d)
1719 struct grant_table *gt = d->grant_table;
1720 struct grant_mapping *map;
1721 grant_ref_t ref;
1722 grant_handle_t handle;
1723 struct domain *rd;
1724 struct active_grant_entry *act;
1725 struct grant_entry *sha;
1727 BUG_ON(!d->is_dying);
1729 for ( handle = 0; handle < gt->maptrack_limit; handle++ )
1731 map = &maptrack_entry(gt, handle);
1732 if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
1733 continue;
1735 ref = map->ref;
1737 gdprintk(XENLOG_INFO, "Grant release (%hu) ref:(%hu) "
1738 "flags:(%x) dom:(%hu)\n",
1739 handle, ref, map->flags, map->domid);
1741 rd = rcu_lock_domain_by_id(map->domid);
1742 if ( rd == NULL )
1744 /* Nothing to clear up... */
1745 map->flags = 0;
1746 continue;
1749 spin_lock(&rd->grant_table->lock);
1751 act = &active_entry(rd->grant_table, ref);
1752 sha = &shared_entry(rd->grant_table, ref);
1754 if ( map->flags & GNTMAP_readonly )
1756 if ( map->flags & GNTMAP_device_map )
1758 BUG_ON(!(act->pin & GNTPIN_devr_mask));
1759 act->pin -= GNTPIN_devr_inc;
1760 if ( !is_iomem_page(act->frame) )
1761 put_page(mfn_to_page(act->frame));
1764 if ( map->flags & GNTMAP_host_map )
1766 BUG_ON(!(act->pin & GNTPIN_hstr_mask));
1767 act->pin -= GNTPIN_hstr_inc;
1768 if ( gnttab_release_host_mappings &&
1769 !is_iomem_page(act->frame) )
1770 put_page(mfn_to_page(act->frame));
1773 else
1775 if ( map->flags & GNTMAP_device_map )
1777 BUG_ON(!(act->pin & GNTPIN_devw_mask));
1778 act->pin -= GNTPIN_devw_inc;
1779 if ( !is_iomem_page(act->frame) )
1780 put_page_and_type(mfn_to_page(act->frame));
1783 if ( map->flags & GNTMAP_host_map )
1785 BUG_ON(!(act->pin & GNTPIN_hstw_mask));
1786 act->pin -= GNTPIN_hstw_inc;
1787 if ( gnttab_release_host_mappings &&
1788 !is_iomem_page(act->frame) )
1790 if ( gnttab_host_mapping_get_page_type(map, d, rd) )
1791 put_page_type(mfn_to_page(act->frame));
1792 put_page(mfn_to_page(act->frame));
1796 if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
1797 gnttab_clear_flag(_GTF_writing, &sha->flags);
1800 if ( act->pin == 0 )
1801 gnttab_clear_flag(_GTF_reading, &sha->flags);
1803 spin_unlock(&rd->grant_table->lock);
1805 rcu_unlock_domain(rd);
1807 map->flags = 0;
1812 void
1813 grant_table_destroy(
1814 struct domain *d)
1816 struct grant_table *t = d->grant_table;
1817 int i;
1819 if ( t == NULL )
1820 return;
1822 for ( i = 0; i < nr_grant_frames(t); i++ )
1823 free_xenheap_page(t->shared[i]);
1824 xfree(t->shared);
1826 for ( i = 0; i < nr_maptrack_frames(t); i++ )
1827 free_xenheap_page(t->maptrack[i]);
1828 xfree(t->maptrack);
1830 for ( i = 0; i < nr_active_grant_frames(t); i++ )
1831 free_xenheap_page(t->active[i]);
1832 xfree(t->active);
1834 xfree(t);
1835 d->grant_table = NULL;
1838 /*
1839 * Local variables:
1840 * mode: C
1841 * c-set-style: "BSD"
1842 * c-basic-offset: 4
1843 * tab-width: 4
1844 * indent-tabs-mode: nil
1845 * End:
1846 */