debuggers.hg

changeset 20648:18342df0f9dc

tmem: reclaim minimal memory proactively

When a single domain is using most/all of tmem memory
for ephemeral pages belonging to the same object, e.g.
when copying a single huge file larger than ephemeral
memory, long lists are traversed looking for a page to
evict that doesn't belong to this object (as pages in
the object for which a page is currently being inserted
are locked and cannot be evicted). This is essentially
a livelock.

Avoid this by proactively ensuring there is a margin
of available memory (1MB) before locks are taken on
the object.

Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Dec 09 10:44:56 2009 +0000 (2009-12-09)
parents 2c6a04fdf8fb
children 9e9746e635f9
files xen/common/tmem.c xen/include/xen/tmem_xen.h
line diff
     1.1 --- a/xen/common/tmem.c	Wed Dec 09 10:44:11 2009 +0000
     1.2 +++ b/xen/common/tmem.c	Wed Dec 09 10:44:56 2009 +0000
     1.3 @@ -1093,6 +1093,24 @@ static unsigned long tmem_relinquish_npa
     1.4      return avail_pages;
     1.5  }
     1.6  
     1.7 +/* Under certain conditions (e.g. if each client is putting pages for exactly
     1.8 + * one object), once locks are held, freeing up memory may
     1.9 + * result in livelocks and very long "put" times, so we try to ensure there
    1.10 + * is a minimum amount of memory (1MB) available BEFORE any data structure
    1.11 + * locks are held */
    1.12 +static inline void tmem_ensure_avail_pages(void)
    1.13 +{
    1.14 +    int failed_evict = 10;
    1.15 +
    1.16 +    while ( !tmh_free_mb() )
    1.17 +    {
    1.18 +        if ( tmem_evict() )
    1.19 +            continue;
    1.20 +        else if ( failed_evict-- <= 0 )
    1.21 +            break;
    1.22 +    }
    1.23 +}
    1.24 +
    1.25  /************ TMEM CORE OPERATIONS ************************************/
    1.26  
    1.27  static NOINLINE int do_tmem_put_compress(pgp_t *pgp, tmem_cli_mfn_t cmfn,
    1.28 @@ -2315,10 +2333,12 @@ EXPORT long do_tmem_op(tmem_cli_op_t uop
    1.29                                op.u.new.uuid[0], op.u.new.uuid[1]);
    1.30          break;
    1.31      case TMEM_NEW_PAGE:
    1.32 +        tmem_ensure_avail_pages();
    1.33          rc = do_tmem_put(pool, op.u.gen.object,
    1.34                           op.u.gen.index, op.u.gen.cmfn, 0, 0, 0, NULL);
    1.35          break;
    1.36      case TMEM_PUT_PAGE:
    1.37 +        tmem_ensure_avail_pages();
    1.38          rc = do_tmem_put(pool, op.u.gen.object,
    1.39                      op.u.gen.index, op.u.gen.cmfn, 0, 0, PAGE_SIZE, NULL);
    1.40          if (rc == 1) succ_put = 1;
     2.1 --- a/xen/include/xen/tmem_xen.h	Wed Dec 09 10:44:11 2009 +0000
     2.2 +++ b/xen/include/xen/tmem_xen.h	Wed Dec 09 10:44:56 2009 +0000
     2.3 @@ -252,6 +252,11 @@ static inline unsigned long tmh_freeable
     2.4              (20 - PAGE_SHIFT);
     2.5  }
     2.6  
     2.7 +static inline unsigned long tmh_free_mb(void)
     2.8 +{
     2.9 +    return (tmh_avail_pages() + total_free_pages()) >> (20 - PAGE_SHIFT);
    2.10 +}
    2.11 +
    2.12  /*
    2.13   * Memory allocation for "infrastructure" data
    2.14   */