debuggers.hg

changeset 22657:d93de09aa952

credit2: Handle runqueue changes

In preparation for cross-runqueue migration, make changes to make that
more robust.
Changes include:
* An up-pointer from the svc struct to the runqueue it's assigned to
* Explicit runqueue assign/desassings, with appropriate ASSERTs
* cpu_pick will de-assign a vcpu from a runqueue if it's migrating,
and wake will re-assign it

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
author Keir Fraser <keir@xen.org>
date Fri Dec 24 08:28:10 2010 +0000 (2010-12-24)
parents f33f7a9f9d40
children 98f023d7717a
files xen/common/sched_credit2.c
line diff
     1.1 --- a/xen/common/sched_credit2.c	Fri Dec 24 08:27:42 2010 +0000
     1.2 +++ b/xen/common/sched_credit2.c	Fri Dec 24 08:28:10 2010 +0000
     1.3 @@ -42,6 +42,7 @@
     1.4  #define TRC_CSCHED2_TICKLE       TRC_SCHED_CLASS + 6
     1.5  #define TRC_CSCHED2_CREDIT_RESET TRC_SCHED_CLASS + 7
     1.6  #define TRC_CSCHED2_SCHED_TASKLET TRC_SCHED_CLASS + 8
     1.7 +#define TRC_CSCHED2_RUNQ_ASSIGN   TRC_SCHED_CLASS + 10
     1.8  
     1.9  /*
    1.10   * WARNING: This is still in an experimental phase.  Status and work can be found at the
    1.11 @@ -209,6 +210,7 @@ struct csched_vcpu {
    1.12      struct list_head rqd_elem;  /* On the runqueue data list */
    1.13      struct list_head sdom_elem; /* On the domain vcpu list */
    1.14      struct list_head runq_elem; /* On the runqueue         */
    1.15 +    struct csched_runqueue_data *rqd; /* Up-pointer to the runqueue */
    1.16  
    1.17      /* Up-pointers */
    1.18      struct csched_dom *sdom;
    1.19 @@ -274,6 +276,7 @@ static int
    1.20             svc->vcpu->domain->domain_id,
    1.21             svc->vcpu->vcpu_id);
    1.22  
    1.23 +    BUG_ON(&svc->rqd->runq != runq);
    1.24      /* Idle vcpus not allowed on the runqueue anymore */
    1.25      BUG_ON(is_idle_vcpu(svc->vcpu));
    1.26      BUG_ON(svc->vcpu->is_running);
    1.27 @@ -355,6 +358,7 @@ runq_tickle(const struct scheduler *ops,
    1.28               current->vcpu_id);
    1.29  
    1.30      BUG_ON(new->vcpu->processor != cpu);
    1.31 +    BUG_ON(new->rqd != rqd);
    1.32  
    1.33      /* Look at the cpu it's running on first */
    1.34      cur = CSCHED_VCPU(per_cpu(schedule_data, cpu).curr);
    1.35 @@ -445,15 +449,17 @@ no_tickle:
    1.36   */
    1.37  static void reset_credit(const struct scheduler *ops, int cpu, s_time_t now)
    1.38  {
    1.39 +    struct csched_runqueue_data *rqd = RQD(ops, cpu);
    1.40      struct list_head *iter;
    1.41  
    1.42 -    list_for_each( iter, &RQD(ops, cpu)->svc )
    1.43 +    list_for_each( iter, &rqd->svc )
    1.44      {
    1.45          struct csched_vcpu * svc = list_entry(iter, struct csched_vcpu, rqd_elem);
    1.46  
    1.47          int start_credit;
    1.48  
    1.49          BUG_ON( is_idle_vcpu(svc->vcpu) );
    1.50 +        BUG_ON( svc->rqd != rqd );
    1.51  
    1.52          start_credit = svc->credit;
    1.53  
    1.54 @@ -620,12 +626,69 @@ csched_alloc_vdata(const struct schedule
    1.55      return svc;
    1.56  }
    1.57  
    1.58 +/* Add and remove from runqueue assignment (not active run queue) */
    1.59 +static void
    1.60 +__runq_assign(struct csched_vcpu *svc, struct csched_runqueue_data *rqd)
    1.61 +{
    1.62 +
    1.63 +    svc->rqd = rqd;
    1.64 +    list_add_tail(&svc->rqd_elem, &svc->rqd->svc);
    1.65 +
    1.66 +    update_max_weight(svc->rqd, svc->weight, 0);
    1.67 +
    1.68 +    /* TRACE */
    1.69 +    {
    1.70 +        struct {
    1.71 +            unsigned dom:16,vcpu:16;
    1.72 +            unsigned rqi:16;
    1.73 +        } d;
    1.74 +        d.dom = svc->vcpu->domain->domain_id;
    1.75 +        d.vcpu = svc->vcpu->vcpu_id;
    1.76 +        d.rqi=rqd->id;
    1.77 +        trace_var(TRC_CSCHED2_RUNQ_ASSIGN, 1,
    1.78 +                  sizeof(d),
    1.79 +                  (unsigned char *)&d);
    1.80 +    }
    1.81 +
    1.82 +}
    1.83 +
    1.84 +static void
    1.85 +runq_assign(const struct scheduler *ops, struct vcpu *vc)
    1.86 +{
    1.87 +    struct csched_vcpu *svc = vc->sched_priv;
    1.88 +
    1.89 +    BUG_ON(svc->rqd != NULL);
    1.90 +
    1.91 +    __runq_assign(svc, RQD(ops, vc->processor));
    1.92 +}
    1.93 +
    1.94 +static void
    1.95 +__runq_deassign(struct csched_vcpu *svc)
    1.96 +{
    1.97 +    BUG_ON(__vcpu_on_runq(svc));
    1.98 +
    1.99 +    list_del_init(&svc->rqd_elem);
   1.100 +    update_max_weight(svc->rqd, 0, svc->weight);
   1.101 +
   1.102 +    svc->rqd = NULL;
   1.103 +}
   1.104 +
   1.105 +static void
   1.106 +runq_deassign(const struct scheduler *ops, struct vcpu *vc)
   1.107 +{
   1.108 +    struct csched_vcpu *svc = vc->sched_priv;
   1.109 +
   1.110 +    BUG_ON(svc->rqd != RQD(ops, vc->processor));
   1.111 +
   1.112 +    __runq_deassign(svc);
   1.113 +}
   1.114 +
   1.115  static void
   1.116  csched_vcpu_insert(const struct scheduler *ops, struct vcpu *vc)
   1.117  {
   1.118      struct csched_vcpu *svc = vc->sched_priv;
   1.119      struct domain * const dom = vc->domain;
   1.120 -    struct csched_dom *sdom = CSCHED_DOM(dom);
   1.121 +    struct csched_dom * const sdom = svc->sdom;
   1.122  
   1.123      printk("%s: Inserting d%dv%d\n",
   1.124             __func__, dom->domain_id, vc->vcpu_id);
   1.125 @@ -639,8 +702,7 @@ csched_vcpu_insert(const struct schedule
   1.126          /* FIXME: Abstract for multiple runqueues */
   1.127          vcpu_schedule_lock_irq(vc);
   1.128  
   1.129 -        list_add_tail(&svc->rqd_elem, &RQD(ops, vc->processor)->svc);
   1.130 -        update_max_weight(RQD(ops, vc->processor), svc->weight, 0);
   1.131 +        runq_assign(ops, vc);
   1.132  
   1.133          vcpu_schedule_unlock_irq(vc);
   1.134  
   1.135 @@ -672,8 +734,7 @@ csched_vcpu_remove(const struct schedule
   1.136          /* Remove from runqueue */
   1.137          vcpu_schedule_lock_irq(vc);
   1.138  
   1.139 -        list_del_init(&svc->rqd_elem);
   1.140 -        update_max_weight(RQD(ops, vc->processor), 0, svc->weight);
   1.141 +        runq_deassign(ops, vc);
   1.142  
   1.143          vcpu_schedule_unlock_irq(vc);
   1.144  
   1.145 @@ -734,6 +795,12 @@ csched_vcpu_wake(const struct scheduler 
   1.146          goto out;
   1.147      }
   1.148  
   1.149 +    /* Add into the new runqueue if necessary */
   1.150 +    if ( svc->rqd == NULL )
   1.151 +        runq_assign(ops, vc);
   1.152 +    else
   1.153 +        BUG_ON(RQD(ops, vc->processor) != svc->rqd );
   1.154 +
   1.155      now = NOW();
   1.156  
   1.157      /* Put the VCPU on the runq */
   1.158 @@ -753,6 +820,8 @@ csched_context_saved(const struct schedu
   1.159  
   1.160      vcpu_schedule_lock_irq(vc);
   1.161  
   1.162 +    BUG_ON( !is_idle_vcpu(vc) && svc->rqd != RQD(ops, vc->processor));
   1.163 +
   1.164      /* This vcpu is now eligible to be put on the runqueue again */
   1.165      clear_bit(__CSFLAG_scheduled, &svc->flags);
   1.166  
   1.167 @@ -777,7 +846,7 @@ csched_context_saved(const struct schedu
   1.168  }
   1.169  
   1.170  static int
   1.171 -csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
   1.172 +choose_cpu(const struct scheduler *ops, struct vcpu *vc)
   1.173  {
   1.174      /* FIXME: Chose a schedule group based on load */
   1.175      /* FIXME: Migrate the vcpu to the new runqueue list, updating
   1.176 @@ -786,6 +855,36 @@ csched_cpu_pick(const struct scheduler *
   1.177  }
   1.178  
   1.179  static int
   1.180 +csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
   1.181 +{
   1.182 +    struct csched_vcpu * const svc = CSCHED_VCPU(vc);
   1.183 +    int new_cpu;
   1.184 +
   1.185 +    /* The scheduler interface doesn't have an explicit mechanism to
   1.186 +     * involve the choosable scheduler in the migrate process, so we
   1.187 +     * infer that a change may happen by the call to cpu_pick, and
   1.188 +     * remove it from the old runqueue while the lock for the old
   1.189 +     * runqueue is held.  It can't be actively waiting to run.  It
   1.190 +     * will be added to the new runqueue when it next wakes.
   1.191 +     *
   1.192 +     * If we want to be able to call pick() separately, we need
   1.193 +     * to add a mechansim to remove a vcpu from an old processor /
   1.194 +     * runqueue before releasing the lock. */
   1.195 +    BUG_ON(__vcpu_on_runq(svc));
   1.196 +
   1.197 +    new_cpu = choose_cpu(ops, vc);
   1.198 +
   1.199 +    /* If we're suggesting moving to a different runqueue, remove it
   1.200 +     * from the old runqueue while we have the lock.  It will be added
   1.201 +     * to the new one when it wakes. */
   1.202 +    if ( svc->rqd != NULL
   1.203 +         && RQD(ops, new_cpu) != svc->rqd )
   1.204 +        runq_deassign(ops, vc);
   1.205 +
   1.206 +    return new_cpu;
   1.207 +}
   1.208 +
   1.209 +static int
   1.210  csched_dom_cntl(
   1.211      const struct scheduler *ops,
   1.212      struct domain *d,
   1.213 @@ -826,8 +925,10 @@ csched_dom_cntl(
   1.214                   * lock. */
   1.215                  vcpu_schedule_lock_irq(svc->vcpu);
   1.216  
   1.217 +                BUG_ON(svc->rqd != RQD(ops, svc->vcpu->processor));
   1.218 +
   1.219                  svc->weight = sdom->weight;
   1.220 -                update_max_weight(RQD(ops, svc->vcpu->processor), svc->weight, old_weight);
   1.221 +                update_max_weight(svc->rqd, svc->weight, old_weight);
   1.222  
   1.223                  vcpu_schedule_unlock_irq(svc->vcpu);
   1.224              }
   1.225 @@ -1017,7 +1118,9 @@ csched_schedule(
   1.226      rqd = RQD(ops, cpu);
   1.227      BUG_ON(!cpu_isset(cpu, rqd->active));
   1.228  
   1.229 -    /* Protected by runqueue lock */
   1.230 +    /* Protected by runqueue lock */        
   1.231 +
   1.232 +    BUG_ON(!is_idle_vcpu(scurr->vcpu) && scurr->rqd != rqd);
   1.233  
   1.234      /* Clear "tickled" bit now that we've been scheduled */
   1.235      if ( cpu_isset(cpu, rqd->tickled) )
   1.236 @@ -1067,6 +1170,8 @@ csched_schedule(
   1.237          /* If switching, remove this from the runqueue and mark it scheduled */
   1.238          if ( snext != scurr )
   1.239          {
   1.240 +            BUG_ON(snext->rqd != rqd);
   1.241 +    
   1.242              __runq_remove(snext);
   1.243              if ( snext->vcpu->is_running )
   1.244              {