debuggers.hg

changeset 22659:3e7702cb31db

credit2: Simple cpu picker based on instantaneous load

In preparation for multiple runqueues, add a simple cpu picker that
will look for
the runqueue with the lowest instantaneous load to assign the vcpu to.

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
author Keir Fraser <keir@xen.org>
date Fri Dec 24 08:29:00 2010 +0000 (2010-12-24)
parents 98f023d7717a
children 597e3fee23bc
files xen/common/sched_credit2.c
line diff
     1.1 --- a/xen/common/sched_credit2.c	Fri Dec 24 08:28:35 2010 +0000
     1.2 +++ b/xen/common/sched_credit2.c	Fri Dec 24 08:29:00 2010 +0000
     1.3 @@ -872,13 +872,75 @@ csched_context_saved(const struct schedu
     1.4      vcpu_schedule_unlock_irq(vc);
     1.5  }
     1.6  
     1.7 +#define MAX_LOAD (1<<30);
     1.8  static int
     1.9  choose_cpu(const struct scheduler *ops, struct vcpu *vc)
    1.10  {
    1.11 -    /* FIXME: Chose a schedule group based on load */
    1.12 -    /* FIXME: Migrate the vcpu to the new runqueue list, updating
    1.13 -       max_weight for each runqueue */
    1.14 -    return 0;
    1.15 +    struct csched_private *prv = CSCHED_PRIV(ops);
    1.16 +    int i, min_load, min_rqi = -1, new_cpu;
    1.17 +    struct csched_vcpu *svc = CSCHED_VCPU(vc);
    1.18 +
    1.19 +    BUG_ON(cpus_empty(prv->active_queues));
    1.20 +
    1.21 +    /* Locking:
    1.22 +     * - vc->processor is already locked
    1.23 +     * - Need to grab prv lock to make sure active runqueues don't
    1.24 +     *   change
    1.25 +     * - Need to grab locks for other runqueues while checking
    1.26 +     *   avgload
    1.27 +     * Locking constraint is:
    1.28 +     * - Lock prv before runqueue locks
    1.29 +     * - Trylock between runqueue locks (no ordering)
    1.30 +     *
    1.31 +     * Since one of the runqueue locks is already held, we can't
    1.32 +     * just grab the prv lock.  Instead, we'll have to trylock, and
    1.33 +     * do something else reasonable if we fail.
    1.34 +     */
    1.35 +
    1.36 +    if ( !spin_trylock(&prv->lock) )
    1.37 +    {
    1.38 +        /* Leave it where it is for now.  When we actually pay attention
    1.39 +         * to affinity we'll have to figure something out... */
    1.40 +        return vc->processor;
    1.41 +    }
    1.42 +
    1.43 +    /* FIXME: Pay attention to cpu affinity */                                                                                      
    1.44 +
    1.45 +    min_load = MAX_LOAD;
    1.46 +
    1.47 +    /* Find the runqueue with the lowest instantaneous load */
    1.48 +    for_each_cpu_mask(i, prv->active_queues)
    1.49 +    {
    1.50 +        struct csched_runqueue_data *rqd;
    1.51 +
    1.52 +        rqd = prv->rqd + i;
    1.53 +
    1.54 +        /* If checking a different runqueue, grab the lock,
    1.55 +         * read the avg, and then release the lock. */
    1.56 +        if ( rqd != svc->rqd
    1.57 +             && ! spin_trylock(&rqd->lock) )
    1.58 +            continue;
    1.59 +        if ( prv->rqd[i].load < min_load )
    1.60 +        {
    1.61 +            min_load=prv->rqd[i].load;
    1.62 +            min_rqi=i;
    1.63 +        }
    1.64 +        if ( rqd != svc->rqd )
    1.65 +            spin_unlock(&rqd->lock);
    1.66 +    }
    1.67 +
    1.68 +    /* We didn't find anyone (most likely because of spinlock contention); leave it where it is */
    1.69 +    if ( min_rqi == -1 )
    1.70 +        new_cpu = vc->processor;
    1.71 +    else
    1.72 +    {
    1.73 +        BUG_ON(cpus_empty(prv->rqd[min_rqi].active));
    1.74 +        new_cpu = first_cpu(prv->rqd[min_rqi].active);
    1.75 +    }
    1.76 + 
    1.77 +    spin_unlock(&prv->lock);
    1.78 +
    1.79 +    return new_cpu;
    1.80  }
    1.81  
    1.82  static int
    1.83 @@ -894,13 +956,12 @@ csched_cpu_pick(const struct scheduler *
    1.84       * runqueue is held.  It can't be actively waiting to run.  It
    1.85       * will be added to the new runqueue when it next wakes.
    1.86       *
    1.87 -     * If we want to be able to call pick() separately, we need
    1.88 -     * to add a mechansim to remove a vcpu from an old processor /
    1.89 -     * runqueue before releasing the lock. */
    1.90 +     * If we want to be able to call pick() separately, we need to add
    1.91 +     * a mechansim to remove a vcpu from an old processor / runqueue
    1.92 +     * before releasing the lock. */
    1.93      BUG_ON(__vcpu_on_runq(svc));
    1.94  
    1.95      new_cpu = choose_cpu(ops, vc);
    1.96 -
    1.97      /* If we're suggesting moving to a different runqueue, remove it
    1.98       * from the old runqueue while we have the lock.  It will be added
    1.99       * to the new one when it wakes. */