debuggers.hg
changeset 22663:6a970abb346f
credit2: Track expected load
As vcpus are migrated, track how we expect the load to change. This
helps smooth migrations when the balancing doesn't take immediate
effect on the load average. In theory, if vcpu activity remains
constant, then the measured avgload should converge to the balanced
avgload.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
As vcpus are migrated, track how we expect the load to change. This
helps smooth migrations when the balancing doesn't take immediate
effect on the load average. In theory, if vcpu activity remains
constant, then the measured avgload should converge to the balanced
avgload.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
author | Keir Fraser <keir@xen.org> |
---|---|
date | Fri Dec 24 08:30:42 2010 +0000 (2010-12-24) |
parents | cf1ea603b340 |
children | df310dcd19cb |
files | xen/common/sched_credit2.c |
line diff
1.1 --- a/xen/common/sched_credit2.c Fri Dec 24 08:30:15 2010 +0000 1.2 +++ b/xen/common/sched_credit2.c Fri Dec 24 08:30:42 2010 +0000 1.3 @@ -206,6 +206,7 @@ struct csched_runqueue_data { 1.4 int load; /* Instantaneous load: Length of queue + num non-idle threads */ 1.5 s_time_t load_last_update; /* Last time average was updated */ 1.6 s_time_t avgload; /* Decaying queue load */ 1.7 + s_time_t b_avgload; /* Decaying queue load modified by balancing */ 1.8 }; 1.9 1.10 /* 1.11 @@ -302,6 +303,7 @@ static void 1.12 if ( rqd->load_last_update + (1ULL<<prv->load_window_shift) < now ) 1.13 { 1.14 rqd->avgload = (unsigned long long)rqd->load << prv->load_window_shift; 1.15 + rqd->b_avgload = (unsigned long long)rqd->load << prv->load_window_shift; 1.16 } 1.17 else 1.18 { 1.19 @@ -310,6 +312,10 @@ static void 1.20 rqd->avgload = 1.21 ( ( delta * ( (unsigned long long)rqd->load << prv->load_window_shift ) ) 1.22 + ( ((1ULL<<prv->load_window_shift) - delta) * rqd->avgload ) ) >> prv->load_window_shift; 1.23 + 1.24 + rqd->b_avgload = 1.25 + ( ( delta * ( (unsigned long long)rqd->load << prv->load_window_shift ) ) 1.26 + + ( ((1ULL<<prv->load_window_shift) - delta) * rqd->b_avgload ) ) >> prv->load_window_shift; 1.27 } 1.28 rqd->load += change; 1.29 rqd->load_last_update = now; 1.30 @@ -317,11 +323,12 @@ static void 1.31 { 1.32 struct { 1.33 unsigned rq_load:4, rq_avgload:28; 1.34 - unsigned rq_id:4; 1.35 + unsigned rq_id:4, b_avgload:28; 1.36 } d; 1.37 d.rq_id=rqd->id; 1.38 d.rq_load = rqd->load; 1.39 d.rq_avgload = rqd->avgload; 1.40 + d.b_avgload = rqd->b_avgload; 1.41 trace_var(TRC_CSCHED2_UPDATE_RUNQ_LOAD, 1, 1.42 sizeof(d), 1.43 (unsigned char *)&d); 1.44 @@ -756,6 +763,9 @@ static void 1.45 1.46 update_max_weight(svc->rqd, svc->weight, 0); 1.47 1.48 + /* Expected new load based on adding this vcpu */ 1.49 + rqd->b_avgload += svc->avgload; 1.50 + 1.51 /* TRACE */ 1.52 { 1.53 struct { 1.54 @@ -790,6 +800,9 @@ static void 1.55 list_del_init(&svc->rqd_elem); 1.56 update_max_weight(svc->rqd, 0, svc->weight); 1.57 1.58 + /* Expected new load based on removing this vcpu */ 1.59 + svc->rqd->b_avgload -= svc->avgload; 1.60 + 1.61 svc->rqd = NULL; 1.62 } 1.63