debuggers.hg
changeset 22658:98f023d7717a
credit2: Calculate instantaneous runqueue load
Add hooks in the various places to detect vcpus becoming active or
inactive. At the moment, record only instantaneous runqueue load;
but this lays the groundwork for having a load average.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Add hooks in the various places to detect vcpus becoming active or
inactive. At the moment, record only instantaneous runqueue load;
but this lays the groundwork for having a load average.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
author | Keir Fraser <keir@xen.org> |
---|---|
date | Fri Dec 24 08:28:35 2010 +0000 (2010-12-24) |
parents | d93de09aa952 |
children | 3e7702cb31db |
files | xen/common/sched_credit2.c |
line diff
1.1 --- a/xen/common/sched_credit2.c Fri Dec 24 08:28:10 2010 +0000 1.2 +++ b/xen/common/sched_credit2.c Fri Dec 24 08:28:35 2010 +0000 1.3 @@ -42,6 +42,7 @@ 1.4 #define TRC_CSCHED2_TICKLE TRC_SCHED_CLASS + 6 1.5 #define TRC_CSCHED2_CREDIT_RESET TRC_SCHED_CLASS + 7 1.6 #define TRC_CSCHED2_SCHED_TASKLET TRC_SCHED_CLASS + 8 1.7 +#define TRC_CSCHED2_UPDATE_LOAD TRC_SCHED_CLASS + 9 1.8 #define TRC_CSCHED2_RUNQ_ASSIGN TRC_SCHED_CLASS + 10 1.9 1.10 /* 1.11 @@ -187,6 +188,7 @@ struct csched_runqueue_data { 1.12 1.13 cpumask_t idle, /* Currently idle */ 1.14 tickled; /* Another cpu in the queue is already targeted for this one */ 1.15 + int load; /* Instantaneous load: Length of queue + num non-idle threads */ 1.16 }; 1.17 1.18 /* 1.19 @@ -266,6 +268,23 @@ static /*inline*/ struct csched_vcpu * 1.20 return list_entry(elem, struct csched_vcpu, runq_elem); 1.21 } 1.22 1.23 +static void 1.24 +update_load(const struct scheduler *ops, 1.25 + struct csched_runqueue_data *rqd, int change, s_time_t now) 1.26 +{ 1.27 + rqd->load += change; 1.28 + 1.29 + { 1.30 + struct { 1.31 + unsigned load:4; 1.32 + } d; 1.33 + d.load = rqd->load; 1.34 + trace_var(TRC_CSCHED2_UPDATE_LOAD, 0, 1.35 + sizeof(d), 1.36 + (unsigned char *)&d); 1.37 + } 1.38 +} 1.39 + 1.40 static int 1.41 __runq_insert(struct list_head *runq, struct csched_vcpu *svc) 1.42 { 1.43 @@ -756,7 +775,11 @@ csched_vcpu_sleep(const struct scheduler 1.44 if ( per_cpu(schedule_data, vc->processor).curr == vc ) 1.45 cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ); 1.46 else if ( __vcpu_on_runq(svc) ) 1.47 + { 1.48 + BUG_ON(svc->rqd != RQD(ops, vc->processor)); 1.49 + update_load(ops, svc->rqd, -1, NOW()); 1.50 __runq_remove(svc); 1.51 + } 1.52 else if ( test_bit(__CSFLAG_delayed_runq_add, &svc->flags) ) 1.53 clear_bit(__CSFLAG_delayed_runq_add, &svc->flags); 1.54 } 1.55 @@ -803,6 +826,8 @@ csched_vcpu_wake(const struct scheduler 1.56 1.57 now = NOW(); 1.58 1.59 + update_load(ops, svc->rqd, 1, now); 1.60 + 1.61 /* Put the VCPU on the runq */ 1.62 runq_insert(ops, vc->processor, svc); 1.63 runq_tickle(ops, vc->processor, svc, now); 1.64 @@ -841,6 +866,8 @@ csched_context_saved(const struct schedu 1.65 runq_insert(ops, vc->processor, svc); 1.66 runq_tickle(ops, vc->processor, svc, now); 1.67 } 1.68 + else if ( !is_idle_vcpu(vc) ) 1.69 + update_load(ops, svc->rqd, -1, now); 1.70 1.71 vcpu_schedule_unlock_irq(vc); 1.72 } 1.73 @@ -1209,6 +1236,9 @@ csched_schedule( 1.74 /* Update the idle mask if necessary */ 1.75 if ( !cpu_isset(cpu, rqd->idle) ) 1.76 cpu_set(cpu, rqd->idle); 1.77 + /* Make sure avgload gets updated periodically even 1.78 + * if there's no activity */ 1.79 + update_load(ops, rqd, 0, now); 1.80 } 1.81 1.82 /* 1.83 @@ -1287,10 +1317,12 @@ csched_dump(const struct scheduler *ops) 1.84 { 1.85 printk("Runqueue %d:\n" 1.86 "\tncpus = %u\n" 1.87 - "\tmax_weight = %d\n", 1.88 + "\tmax_weight = %d\n" 1.89 + "\tload = %d\n", 1.90 i, 1.91 cpus_weight(prv->rqd[i].active), 1.92 - prv->rqd[i].max_weight); 1.93 + prv->rqd[i].max_weight, 1.94 + prv->rqd[i].load); 1.95 1.96 } 1.97 /* FIXME: Locking! */