debuggers.hg

changeset 20991:3a0bd7ca6b11

When tmem is enabled, reserve a fraction of memory
for allocations of 0<order<9 to avoid fragmentation
issues.

Signed-off by: Dan Magenheimer <dan.magenheimer@oracle.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Feb 15 17:54:04 2010 +0000 (2010-02-15)
parents cbb147631e8c
children ae2b7f1c89c8
files xen/common/page_alloc.c
line diff
     1.1 --- a/xen/common/page_alloc.c	Mon Feb 15 17:49:14 2010 +0000
     1.2 +++ b/xen/common/page_alloc.c	Mon Feb 15 17:54:04 2010 +0000
     1.3 @@ -224,6 +224,10 @@ static heap_by_zone_and_order_t *_heap[M
     1.4  static unsigned long *avail[MAX_NUMNODES];
     1.5  static long total_avail_pages;
     1.6  
     1.7 +/* TMEM: Reserve a fraction of memory for mid-size (0<order<9) allocations.*/
     1.8 +static long midsize_alloc_zone_pages;
     1.9 +#define MIDSIZE_ALLOC_FRAC 128
    1.10 +
    1.11  static DEFINE_SPINLOCK(heap_lock);
    1.12  
    1.13  static unsigned long init_node_heap(int node, unsigned long mfn,
    1.14 @@ -304,6 +308,14 @@ static struct page_info *alloc_heap_page
    1.15      spin_lock(&heap_lock);
    1.16  
    1.17      /*
    1.18 +     * TMEM: When available memory is scarce, allow only mid-size allocations
    1.19 +     * to avoid worst of fragmentation issues.
    1.20 +     */
    1.21 +    if ( opt_tmem && ((order == 0) || (order >= 9)) &&
    1.22 +         (total_avail_pages <= midsize_alloc_zone_pages) )
    1.23 +        goto fail;
    1.24 +
    1.25 +    /*
    1.26       * Start with requested node, but exhaust all node memory in requested 
    1.27       * zone before failing, only calc new node value if we fail to find memory 
    1.28       * in target node, this avoids needless computation on fast-path.
    1.29 @@ -336,6 +348,7 @@ static struct page_info *alloc_heap_page
    1.30          return pg;
    1.31      }
    1.32  
    1.33 + fail:
    1.34      /* No suitable memory blocks. Fail the request. */
    1.35      spin_unlock(&heap_lock);
    1.36      return NULL;
    1.37 @@ -504,6 +517,10 @@ static void free_heap_pages(
    1.38      avail[node][zone] += 1 << order;
    1.39      total_avail_pages += 1 << order;
    1.40  
    1.41 +    if ( opt_tmem )
    1.42 +        midsize_alloc_zone_pages = max(
    1.43 +            midsize_alloc_zone_pages, total_avail_pages / MIDSIZE_ALLOC_FRAC);
    1.44 +
    1.45      /* Merge chunks as far as possible. */
    1.46      while ( order < MAX_ORDER )
    1.47      {
    1.48 @@ -842,7 +859,7 @@ static unsigned long avail_heap_pages(
    1.49  
    1.50  unsigned long total_free_pages(void)
    1.51  {
    1.52 -    return total_avail_pages;
    1.53 +    return total_avail_pages - midsize_alloc_zone_pages;
    1.54  }
    1.55  
    1.56  void __init end_boot_allocator(void)