debuggers.hg

changeset 19968:0d4406bc5cb7

Allow XENMEM_exchange to support exchange on foreign domains.

Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jul 13 12:17:05 2009 +0100 (2009-07-13)
parents f17c897546bf
children 09dbdf12c33d
files xen/common/memory.c
line diff
     1.1 --- a/xen/common/memory.c	Mon Jul 13 11:52:49 2009 +0100
     1.2 +++ b/xen/common/memory.c	Mon Jul 13 12:17:05 2009 +0100
     1.3 @@ -265,16 +265,22 @@ static long memory_exchange(XEN_GUEST_HA
     1.4          out_chunk_order = exch.in.extent_order - exch.out.extent_order;
     1.5      }
     1.6  
     1.7 -    /*
     1.8 -     * Only support exchange on calling domain right now. Otherwise there are
     1.9 -     * tricky corner cases to consider (e.g., dying domain).
    1.10 -     */
    1.11 -    if ( unlikely(exch.in.domid != DOMID_SELF) )
    1.12 +    if ( likely(exch.in.domid == DOMID_SELF) )
    1.13 +    {
    1.14 +        d = rcu_lock_current_domain();
    1.15 +    }
    1.16 +    else
    1.17      {
    1.18 -        rc = IS_PRIV(current->domain) ? -EINVAL : -EPERM;
    1.19 -        goto fail_early;
    1.20 +        if ( (d = rcu_lock_domain_by_id(exch.in.domid)) == NULL )
    1.21 +            goto fail_early;
    1.22 +
    1.23 +        if ( !IS_PRIV_FOR(current->domain, d) )
    1.24 +        {
    1.25 +            rcu_unlock_domain(d);
    1.26 +            rc = -EPERM;
    1.27 +            goto fail_early;
    1.28 +        }
    1.29      }
    1.30 -    d = current->domain;
    1.31  
    1.32      memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
    1.33          d,
    1.34 @@ -292,6 +298,7 @@ static long memory_exchange(XEN_GUEST_HA
    1.35          if ( hypercall_preempt_check() )
    1.36          {
    1.37              exch.nr_exchanged = i << in_chunk_order;
    1.38 +            rcu_unlock_domain(d);
    1.39              if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
    1.40                  return -EFAULT;
    1.41              return hypercall_create_continuation(
    1.42 @@ -362,7 +369,32 @@ static long memory_exchange(XEN_GUEST_HA
    1.43          {
    1.44              if ( assign_pages(d, page, exch.out.extent_order,
    1.45                                MEMF_no_refcount) )
    1.46 -                BUG();
    1.47 +            {
    1.48 +                unsigned long dec_count;
    1.49 +                bool_t drop_dom_ref;
    1.50 +
    1.51 +                /*
    1.52 +                 * Pages in in_chunk_list is stolen without
    1.53 +                 * decreasing the tot_pages. If the domain is dying when
    1.54 +                 * assign pages, we need decrease the count. For those pages
    1.55 +                 * that has been assigned, it should be covered by
    1.56 +                 * domain_relinquish_resources().
    1.57 +                 */
    1.58 +                dec_count = (((1UL << exch.in.extent_order) *
    1.59 +                              (1UL << in_chunk_order)) -
    1.60 +                             (j * (1UL << exch.out.extent_order)));
    1.61 +
    1.62 +                spin_lock(&d->page_alloc_lock);
    1.63 +                d->tot_pages -= dec_count;
    1.64 +                drop_dom_ref = (dec_count && !d->tot_pages);
    1.65 +                spin_unlock(&d->page_alloc_lock);
    1.66 +
    1.67 +                if ( drop_dom_ref )
    1.68 +                    put_domain(d);
    1.69 +
    1.70 +                free_domheap_pages(page, exch.out.extent_order);
    1.71 +                goto dying;
    1.72 +            }
    1.73  
    1.74              /* Note that we ignore errors accessing the output extent list. */
    1.75              (void)__copy_from_guest_offset(
    1.76 @@ -378,15 +410,15 @@ static long memory_exchange(XEN_GUEST_HA
    1.77                  (void)__copy_to_guest_offset(
    1.78                      exch.out.extent_start, (i<<out_chunk_order)+j, &mfn, 1);
    1.79              }
    1.80 -
    1.81              j++;
    1.82          }
    1.83 -        BUG_ON(j != (1UL << out_chunk_order));
    1.84 +        BUG_ON( !(d->is_dying) && (j != (1UL << out_chunk_order)) );
    1.85      }
    1.86  
    1.87      exch.nr_exchanged = exch.in.nr_extents;
    1.88      if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
    1.89          rc = -EFAULT;
    1.90 +    rcu_unlock_domain(d);
    1.91      return rc;
    1.92  
    1.93      /*
    1.94 @@ -398,7 +430,8 @@ static long memory_exchange(XEN_GUEST_HA
    1.95      while ( (page = page_list_remove_head(&in_chunk_list)) )
    1.96          if ( assign_pages(d, page, 0, MEMF_no_refcount) )
    1.97              BUG();
    1.98 -
    1.99 + dying:
   1.100 +    rcu_unlock_domain(d);
   1.101      /* Free any output pages we managed to allocate. */
   1.102      while ( (page = page_list_remove_head(&out_chunk_list)) )
   1.103          free_domheap_pages(page, exch.out.extent_order);