debuggers.hg

changeset 22667:94d47b8b723f

credit2: Different unbalance tolerance for underloaded and overloaded queues

Allow the "unbalance tolerance" -- the amount of difference between
two runqueues that will be allowed before rebalancing -- to differ
depending on how busy the runqueue is. If it's less than 100%,
default to a difference of 1.0; if it's more than 100%, default to a
tolerance of 0.125.

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
author Keir Fraser <keir@xen.org>
date Fri Dec 24 08:32:20 2010 +0000 (2010-12-24)
parents 65b63f5af281
children a0228a0f3fd2
files xen/common/sched_credit2.c
line diff
     1.1 --- a/xen/common/sched_credit2.c	Fri Dec 24 08:31:54 2010 +0000
     1.2 +++ b/xen/common/sched_credit2.c	Fri Dec 24 08:32:20 2010 +0000
     1.3 @@ -193,6 +193,10 @@ integer_param("sched_credit2_migrate_res
     1.4  int opt_load_window_shift=18;
     1.5  #define  LOADAVG_WINDOW_SHIFT_MIN 4
     1.6  integer_param("credit2_load_window_shift", opt_load_window_shift);
     1.7 +int opt_underload_balance_tolerance=0;
     1.8 +integer_param("credit2_balance_under", opt_underload_balance_tolerance);
     1.9 +int opt_overload_balance_tolerance=-3;
    1.10 +integer_param("credit2_balance_over", opt_overload_balance_tolerance);
    1.11  
    1.12  /*
    1.13   * Per-runqueue data
    1.14 @@ -1232,14 +1236,34 @@ retry:
    1.15  
    1.16      /* Minimize holding the big lock */
    1.17      spin_unlock(&prv->lock);
    1.18 -
    1.19      if ( max_delta_rqi == -1 )
    1.20          goto out;
    1.21  
    1.22 -    /* Don't bother with load differences less than 25%. */
    1.23 -    if ( load_delta < (1ULL<<(prv->load_window_shift - 2)) )
    1.24 -        goto out;
    1.25 +    {
    1.26 +        s_time_t load_max;
    1.27 +        int cpus_max;
    1.28 +
    1.29 +        
    1.30 +        load_max = lrqd->b_avgload;
    1.31 +        if ( orqd->b_avgload > load_max )
    1.32 +            load_max = orqd->b_avgload;
    1.33 +
    1.34 +        cpus_max=cpus_weight(lrqd->active);
    1.35 +        if ( cpus_weight(orqd->active) > cpus_max )
    1.36 +            cpus_max = cpus_weight(orqd->active);
    1.37  
    1.38 +        /* If we're under 100% capacaty, only shift if load difference
    1.39 +         * is > 1.  otherwise, shift if under 12.5% */
    1.40 +        if ( load_max < (1ULL<<(prv->load_window_shift))*cpus_max )
    1.41 +        {
    1.42 +            if ( load_delta < (1ULL<<(prv->load_window_shift+opt_underload_balance_tolerance) ) )
    1.43 +                 goto out;
    1.44 +        }
    1.45 +        else
    1.46 +            if ( load_delta < (1ULL<<(prv->load_window_shift+opt_overload_balance_tolerance)) )
    1.47 +                goto out;
    1.48 +    }
    1.49 +             
    1.50      /* Try to grab the other runqueue lock; if it's been taken in the
    1.51       * meantime, try the process over again.  This can't deadlock
    1.52       * because if it doesn't get any other rqd locks, it will simply
    1.53 @@ -1982,6 +2006,8 @@ csched_init(struct scheduler *ops)
    1.54             " Use at your own risk.\n");
    1.55  
    1.56      printk(" load_window_shift: %d\n", opt_load_window_shift);
    1.57 +    printk(" underload_balance_tolerance: %d\n", opt_underload_balance_tolerance);
    1.58 +    printk(" overload_balance_tolerance: %d\n", opt_overload_balance_tolerance);
    1.59  
    1.60      if ( opt_load_window_shift < LOADAVG_WINDOW_SHIFT_MIN )
    1.61      {