debuggers.hg

changeset 22662:cf1ea603b340

credit2: Track average load contributed by a vcpu

Track the amount of load contributed by a particular vcpu, to help
us make informed decisions about what will happen if we make a move.

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
author Keir Fraser <keir@xen.org>
date Fri Dec 24 08:30:15 2010 +0000 (2010-12-24)
parents 00fc33d9f691
children 6a970abb346f
files xen/common/sched_credit2.c
line diff
     1.1 --- a/xen/common/sched_credit2.c	Fri Dec 24 08:29:53 2010 +0000
     1.2 +++ b/xen/common/sched_credit2.c	Fri Dec 24 08:30:15 2010 +0000
     1.3 @@ -45,6 +45,8 @@
     1.4  #define TRC_CSCHED2_SCHED_TASKLET TRC_SCHED_CLASS + 8
     1.5  #define TRC_CSCHED2_UPDATE_LOAD   TRC_SCHED_CLASS + 9
     1.6  #define TRC_CSCHED2_RUNQ_ASSIGN   TRC_SCHED_CLASS + 10
     1.7 +#define TRC_CSCHED2_UPDATE_VCPU_LOAD   TRC_SCHED_CLASS + 11
     1.8 +#define TRC_CSCHED2_UPDATE_RUNQ_LOAD   TRC_SCHED_CLASS + 12
     1.9  
    1.10  /*
    1.11   * WARNING: This is still in an experimental phase.  Status and work can be found at the
    1.12 @@ -241,6 +243,9 @@ struct csched_vcpu {
    1.13      s_time_t start_time; /* When we were scheduled (used for credit) */
    1.14      unsigned flags;      /* 16 bits doesn't seem to play well with clear_bit() */
    1.15  
    1.16 +    /* Individual contribution to load */
    1.17 +    s_time_t load_last_update;  /* Last time average was updated */
    1.18 +    s_time_t avgload;           /* Decaying queue load */
    1.19  };
    1.20  
    1.21  /*
    1.22 @@ -286,8 +291,8 @@ static /*inline*/ struct csched_vcpu *
    1.23  }
    1.24  
    1.25  static void
    1.26 -update_load(const struct scheduler *ops,
    1.27 -            struct csched_runqueue_data *rqd, int change, s_time_t now)
    1.28 +__update_runq_load(const struct scheduler *ops,
    1.29 +                  struct csched_runqueue_data *rqd, int change, s_time_t now)
    1.30  {
    1.31      struct csched_private *prv = CSCHED_PRIV(ops);
    1.32      s_time_t delta=-1;
    1.33 @@ -296,7 +301,7 @@ update_load(const struct scheduler *ops,
    1.34  
    1.35      if ( rqd->load_last_update + (1ULL<<prv->load_window_shift) < now )
    1.36      {
    1.37 -        rqd->avgload = rqd->load << (1ULL<prv->load_window_shift);
    1.38 +        rqd->avgload = (unsigned long long)rqd->load << prv->load_window_shift;
    1.39      }
    1.40      else
    1.41      {
    1.42 @@ -306,23 +311,78 @@ update_load(const struct scheduler *ops,
    1.43              ( ( delta * ( (unsigned long long)rqd->load << prv->load_window_shift ) )
    1.44                + ( ((1ULL<<prv->load_window_shift) - delta) * rqd->avgload ) ) >> prv->load_window_shift;
    1.45      }
    1.46 -
    1.47      rqd->load += change;
    1.48      rqd->load_last_update = now;
    1.49 +
    1.50      {
    1.51          struct {
    1.52 -            unsigned load:4, avgload:28;
    1.53 -            int delta;
    1.54 +            unsigned rq_load:4, rq_avgload:28;
    1.55 +            unsigned rq_id:4;
    1.56          } d;
    1.57 -        d.load = rqd->load;
    1.58 -        d.avgload = rqd->avgload;
    1.59 -        d.delta = delta;
    1.60 -        trace_var(TRC_CSCHED2_UPDATE_LOAD, 0,
    1.61 +        d.rq_id=rqd->id;
    1.62 +        d.rq_load = rqd->load;
    1.63 +        d.rq_avgload = rqd->avgload;
    1.64 +        trace_var(TRC_CSCHED2_UPDATE_RUNQ_LOAD, 1,
    1.65                    sizeof(d),
    1.66                    (unsigned char *)&d);
    1.67      }
    1.68  }
    1.69  
    1.70 +static void
    1.71 +__update_svc_load(const struct scheduler *ops,
    1.72 +                  struct csched_vcpu *svc, int change, s_time_t now)
    1.73 +{
    1.74 +    struct csched_private *prv = CSCHED_PRIV(ops);
    1.75 +    s_time_t delta=-1;
    1.76 +    int vcpu_load;
    1.77 +
    1.78 +    if ( change == -1 )
    1.79 +        vcpu_load = 1;
    1.80 +    else if ( change == 1 )
    1.81 +        vcpu_load = 0;
    1.82 +    else
    1.83 +        vcpu_load = vcpu_runnable(svc->vcpu);
    1.84 +
    1.85 +    now >>= LOADAVG_GRANULARITY_SHIFT;
    1.86 +
    1.87 +    if ( svc->load_last_update + (1ULL<<prv->load_window_shift) < now )
    1.88 +    {
    1.89 +        svc->avgload = (unsigned long long)vcpu_load << prv->load_window_shift;
    1.90 +    }
    1.91 +    else
    1.92 +    {
    1.93 +        delta = now - svc->load_last_update;
    1.94 +
    1.95 +        svc->avgload =
    1.96 +            ( ( delta * ( (unsigned long long)vcpu_load << prv->load_window_shift ) )
    1.97 +              + ( ((1ULL<<prv->load_window_shift) - delta) * svc->avgload ) ) >> prv->load_window_shift;
    1.98 +    }
    1.99 +    svc->load_last_update = now;
   1.100 +
   1.101 +    {
   1.102 +        struct {
   1.103 +            unsigned dom:16,vcpu:16;
   1.104 +            unsigned v_avgload:32;
   1.105 +        } d;
   1.106 +        d.dom = svc->vcpu->domain->domain_id;
   1.107 +        d.vcpu = svc->vcpu->vcpu_id;
   1.108 +        d.v_avgload = svc->avgload;
   1.109 +        trace_var(TRC_CSCHED2_UPDATE_VCPU_LOAD, 1,
   1.110 +                  sizeof(d),
   1.111 +                  (unsigned char *)&d);
   1.112 +    }
   1.113 +}
   1.114 +
   1.115 +static void
   1.116 +update_load(const struct scheduler *ops,
   1.117 +            struct csched_runqueue_data *rqd,
   1.118 +            struct csched_vcpu *svc, int change, s_time_t now)
   1.119 +{
   1.120 +    __update_runq_load(ops, rqd, change, now);
   1.121 +    if ( svc )
   1.122 +        __update_svc_load(ops, svc, change, now);
   1.123 +}
   1.124 +
   1.125  static int
   1.126  __runq_insert(struct list_head *runq, struct csched_vcpu *svc)
   1.127  {
   1.128 @@ -672,6 +732,9 @@ csched_alloc_vdata(const struct schedule
   1.129  
   1.130          svc->credit = CSCHED_CREDIT_INIT;
   1.131          svc->weight = svc->sdom->weight;
   1.132 +        /* Starting load of 50% */
   1.133 +        svc->avgload = 1ULL << (CSCHED_PRIV(ops)->load_window_shift - 1);
   1.134 +        svc->load_last_update = NOW();
   1.135      }
   1.136      else
   1.137      {
   1.138 @@ -817,7 +880,7 @@ csched_vcpu_sleep(const struct scheduler
   1.139      else if ( __vcpu_on_runq(svc) )
   1.140      {
   1.141          BUG_ON(svc->rqd != RQD(ops, vc->processor));
   1.142 -        update_load(ops, svc->rqd, -1, NOW());
   1.143 +        update_load(ops, svc->rqd, svc, -1, NOW());
   1.144          __runq_remove(svc);
   1.145      }
   1.146      else if ( test_bit(__CSFLAG_delayed_runq_add, &svc->flags) )
   1.147 @@ -866,7 +929,7 @@ csched_vcpu_wake(const struct scheduler 
   1.148  
   1.149      now = NOW();
   1.150  
   1.151 -    update_load(ops, svc->rqd, 1, now);
   1.152 +    update_load(ops, svc->rqd, svc, 1, now);
   1.153          
   1.154      /* Put the VCPU on the runq */
   1.155      runq_insert(ops, vc->processor, svc);
   1.156 @@ -907,7 +970,7 @@ csched_context_saved(const struct schedu
   1.157          runq_tickle(ops, vc->processor, svc, now);
   1.158      }
   1.159      else if ( !is_idle_vcpu(vc) )
   1.160 -        update_load(ops, svc->rqd, -1, now);
   1.161 +        update_load(ops, svc->rqd, svc, -1, now);
   1.162  
   1.163      vcpu_schedule_unlock_irq(vc);
   1.164  }
   1.165 @@ -1339,7 +1402,7 @@ csched_schedule(
   1.166              cpu_set(cpu, rqd->idle);
   1.167          /* Make sure avgload gets updated periodically even
   1.168           * if there's no activity */
   1.169 -        update_load(ops, rqd, 0, now);
   1.170 +        update_load(ops, rqd, NULL, 0, now);
   1.171      }
   1.172  
   1.173      /*