debuggers.hg
changeset 22665:41d1affef596
credit2: Use loadavg to pick cpus, instead of instantaneous load
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
author | Keir Fraser <keir@xen.org> |
---|---|
date | Fri Dec 24 08:31:24 2010 +0000 (2010-12-24) |
parents | df310dcd19cb |
children | 65b63f5af281 |
files | xen/common/sched_credit2.c |
line diff
1.1 --- a/xen/common/sched_credit2.c Fri Dec 24 08:31:04 2010 +0000 1.2 +++ b/xen/common/sched_credit2.c Fri Dec 24 08:31:24 2010 +0000 1.3 @@ -996,13 +996,14 @@ csched_context_saved(const struct schedu 1.4 vcpu_schedule_unlock_irq(vc); 1.5 } 1.6 1.7 -#define MAX_LOAD (1<<30); 1.8 +#define MAX_LOAD (1ULL<<60); 1.9 static int 1.10 choose_cpu(const struct scheduler *ops, struct vcpu *vc) 1.11 { 1.12 struct csched_private *prv = CSCHED_PRIV(ops); 1.13 - int i, min_load, min_rqi = -1, new_cpu; 1.14 + int i, min_rqi = -1, new_cpu; 1.15 struct csched_vcpu *svc = CSCHED_VCPU(vc); 1.16 + s_time_t min_avgload; 1.17 1.18 BUG_ON(cpus_empty(prv->active_queues)); 1.19 1.20 @@ -1053,27 +1054,39 @@ choose_cpu(const struct scheduler *ops, 1.21 1.22 /* FIXME: Pay attention to cpu affinity */ 1.23 1.24 - min_load = MAX_LOAD; 1.25 + min_avgload = MAX_LOAD; 1.26 1.27 /* Find the runqueue with the lowest instantaneous load */ 1.28 for_each_cpu_mask(i, prv->active_queues) 1.29 { 1.30 struct csched_runqueue_data *rqd; 1.31 + s_time_t rqd_avgload; 1.32 1.33 rqd = prv->rqd + i; 1.34 1.35 /* If checking a different runqueue, grab the lock, 1.36 - * read the avg, and then release the lock. */ 1.37 - if ( rqd != svc->rqd 1.38 - && ! spin_trylock(&rqd->lock) ) 1.39 + * read the avg, and then release the lock. 1.40 + * 1.41 + * If on our own runqueue, don't grab or release the lock; 1.42 + * but subtract our own load from the runqueue load to simulate 1.43 + * impartiality */ 1.44 + if ( rqd == svc->rqd ) 1.45 + { 1.46 + rqd_avgload = rqd->b_avgload - svc->avgload; 1.47 + } 1.48 + else if ( spin_trylock(&rqd->lock) ) 1.49 + { 1.50 + rqd_avgload = rqd->b_avgload; 1.51 + spin_unlock(&rqd->lock); 1.52 + } 1.53 + else 1.54 continue; 1.55 - if ( prv->rqd[i].load < min_load ) 1.56 + 1.57 + if ( rqd_avgload < min_avgload ) 1.58 { 1.59 - min_load=prv->rqd[i].load; 1.60 + min_avgload = rqd_avgload; 1.61 min_rqi=i; 1.62 } 1.63 - if ( rqd != svc->rqd ) 1.64 - spin_unlock(&rqd->lock); 1.65 } 1.66 1.67 /* We didn't find anyone (most likely because of spinlock contention); leave it where it is */