xcp-1.6-updates/xen-4.1.hg

changeset 23254:1c0f76eea67d

xen,credit1: Add variable timeslice

Add a xen command-line parameter, sched_credit_tslice_ms,
to set the timeslice of the credit1 scheduler.

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
xen-unstable changeset: 23834:1d40b2793723
xen-unstable date: Tue Sep 13 10:43:43 2011 +0100
author George Dunlap <george.dunlap@eu.citrix.com>
date Wed Mar 07 09:33:45 2012 +0000 (2012-03-07)
parents f1b85192dbd8
children 1f95b55ef427
files xen/common/sched_credit.c
line diff
     1.1 --- a/xen/common/sched_credit.c	Wed Mar 07 09:30:42 2012 +0000
     1.2 +++ b/xen/common/sched_credit.c	Wed Mar 07 09:33:45 2012 +0000
     1.3 @@ -41,15 +41,9 @@
     1.4   */
     1.5  #define CSCHED_DEFAULT_WEIGHT       256
     1.6  #define CSCHED_TICKS_PER_TSLICE     3
     1.7 -#define CSCHED_TICKS_PER_ACCT       3
     1.8 -#define CSCHED_MSECS_PER_TICK       10
     1.9 -#define CSCHED_MSECS_PER_TSLICE     \
    1.10 -    (CSCHED_MSECS_PER_TICK * CSCHED_TICKS_PER_TSLICE)
    1.11 +/* Default timeslice: 30ms */
    1.12 +#define CSCHED_DEFAULT_TSLICE_MS    30
    1.13  #define CSCHED_CREDITS_PER_MSEC     10
    1.14 -#define CSCHED_CREDITS_PER_TSLICE   \
    1.15 -    (CSCHED_CREDITS_PER_MSEC * CSCHED_MSECS_PER_TSLICE)
    1.16 -#define CSCHED_CREDITS_PER_ACCT     \
    1.17 -    (CSCHED_CREDITS_PER_MSEC * CSCHED_MSECS_PER_TICK * CSCHED_TICKS_PER_ACCT)
    1.18  
    1.19  
    1.20  /*
    1.21 @@ -113,6 +107,8 @@
    1.22   */
    1.23  static bool_t __read_mostly sched_credit_default_yield;
    1.24  boolean_param("sched_credit_default_yield", sched_credit_default_yield);
    1.25 +static int __read_mostly sched_credit_tslice_ms = CSCHED_DEFAULT_TSLICE_MS;
    1.26 +integer_param("sched_credit_tslice_ms", sched_credit_tslice_ms);
    1.27  
    1.28  /*
    1.29   * Physical CPU
    1.30 @@ -176,6 +172,9 @@ struct csched_private {
    1.31      uint32_t credit;
    1.32      int credit_balance;
    1.33      uint32_t runq_sort;
    1.34 +    /* Period of master and tick in milliseconds */
    1.35 +    unsigned tslice_ms, tick_period_us, ticks_per_tslice;
    1.36 +    unsigned credits_per_tslice;
    1.37  };
    1.38  
    1.39  static void csched_tick(void *_cpu);
    1.40 @@ -326,7 +325,7 @@ csched_free_pdata(const struct scheduler
    1.41  
    1.42      spin_lock_irqsave(&prv->lock, flags);
    1.43  
    1.44 -    prv->credit -= CSCHED_CREDITS_PER_ACCT;
    1.45 +    prv->credit -= prv->credits_per_tslice;
    1.46      prv->ncpus--;
    1.47      cpu_clear(cpu, prv->idlers);
    1.48      cpu_clear(cpu, prv->cpus);
    1.49 @@ -360,19 +359,19 @@ csched_alloc_pdata(const struct schedule
    1.50      spin_lock_irqsave(&prv->lock, flags);
    1.51  
    1.52      /* Initialize/update system-wide config */
    1.53 -    prv->credit += CSCHED_CREDITS_PER_ACCT;
    1.54 +    prv->credit += prv->credits_per_tslice;
    1.55      prv->ncpus++;
    1.56      cpu_set(cpu, prv->cpus);
    1.57      if ( prv->ncpus == 1 )
    1.58      {
    1.59          prv->master = cpu;
    1.60          init_timer(&prv->master_ticker, csched_acct, prv, cpu);
    1.61 -        set_timer(&prv->master_ticker, NOW() +
    1.62 -                  MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT);
    1.63 +        set_timer(&prv->master_ticker,
    1.64 +                  NOW() + MILLISECS(prv->tslice_ms));
    1.65      }
    1.66  
    1.67      init_timer(&spc->ticker, csched_tick, (void *)(unsigned long)cpu, cpu);
    1.68 -    set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK));
    1.69 +    set_timer(&spc->ticker, NOW() + MICROSECS(prv->tick_period_us) );
    1.70  
    1.71      INIT_LIST_HEAD(&spc->runq);
    1.72      spc->runq_sort_last = prv->runq_sort;
    1.73 @@ -1000,7 +999,7 @@ csched_acct(void* dummy)
    1.74           * for one full accounting period. We allow a domain to earn more
    1.75           * only when the system-wide credit balance is negative.
    1.76           */
    1.77 -        credit_peak = sdom->active_vcpu_count * CSCHED_CREDITS_PER_ACCT;
    1.78 +        credit_peak = sdom->active_vcpu_count * prv->credits_per_tslice;
    1.79          if ( prv->credit_balance < 0 )
    1.80          {
    1.81              credit_peak += ( ( -prv->credit_balance
    1.82 @@ -1012,7 +1011,7 @@ csched_acct(void* dummy)
    1.83  
    1.84          if ( sdom->cap != 0U )
    1.85          {
    1.86 -            credit_cap = ((sdom->cap * CSCHED_CREDITS_PER_ACCT) + 99) / 100;
    1.87 +            credit_cap = ((sdom->cap * prv->credits_per_tslice) + 99) / 100;
    1.88              if ( credit_cap < credit_peak )
    1.89                  credit_peak = credit_cap;
    1.90  
    1.91 @@ -1090,10 +1089,10 @@ csched_acct(void* dummy)
    1.92                  }
    1.93  
    1.94                  /* Lower bound on credits */
    1.95 -                if ( credit < -CSCHED_CREDITS_PER_TSLICE )
    1.96 +                if ( credit < -prv->credits_per_tslice )
    1.97                  {
    1.98                      CSCHED_STAT_CRANK(acct_min_credit);
    1.99 -                    credit = -CSCHED_CREDITS_PER_TSLICE;
   1.100 +                    credit = -prv->credits_per_tslice;
   1.101                      atomic_set(&svc->credit, credit);
   1.102                  }
   1.103              }
   1.104 @@ -1115,7 +1114,7 @@ csched_acct(void* dummy)
   1.105                  }
   1.106  
   1.107                  /* Upper bound on credits means VCPU stops earning */
   1.108 -                if ( credit > CSCHED_CREDITS_PER_TSLICE )
   1.109 +                if ( credit > prv->credits_per_tslice )
   1.110                  {
   1.111                      __csched_vcpu_acct_stop_locked(prv, svc);
   1.112                      /* Divide credits in half, so that when it starts
   1.113 @@ -1139,8 +1138,8 @@ csched_acct(void* dummy)
   1.114      prv->runq_sort++;
   1.115  
   1.116  out:
   1.117 -    set_timer( &prv->master_ticker, NOW() +
   1.118 -            MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT );
   1.119 +    set_timer( &prv->master_ticker,
   1.120 +               NOW() + MILLISECS(prv->tslice_ms));
   1.121  }
   1.122  
   1.123  static void
   1.124 @@ -1167,7 +1166,7 @@ csched_tick(void *_cpu)
   1.125       */
   1.126      csched_runq_sort(prv, cpu);
   1.127  
   1.128 -    set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK));
   1.129 +    set_timer(&spc->ticker, NOW() + MICROSECS(prv->tick_period_us) );
   1.130  }
   1.131  
   1.132  static struct csched_vcpu *
   1.133 @@ -1373,7 +1372,7 @@ csched_schedule(
   1.134       * Return task to run next...
   1.135       */
   1.136      ret.time = (is_idle_vcpu(snext->vcpu) ?
   1.137 -                -1 : MILLISECS(CSCHED_MSECS_PER_TSLICE));
   1.138 +                -1 : MILLISECS(prv->tslice_ms));
   1.139      ret.task = snext->vcpu;
   1.140  
   1.141      CSCHED_VCPU_CHECK(ret.task);
   1.142 @@ -1463,10 +1462,9 @@ csched_dump(const struct scheduler *ops)
   1.143             "\tweight             = %u\n"
   1.144             "\trunq_sort          = %u\n"
   1.145             "\tdefault-weight     = %d\n"
   1.146 -           "\tmsecs per tick     = %dms\n"
   1.147 +           "\ttslice             = %dms\n"
   1.148             "\tcredits per msec   = %d\n"
   1.149             "\tticks per tslice   = %d\n"
   1.150 -           "\tticks per acct     = %d\n"
   1.151             "\tmigration delay    = %uus\n",
   1.152             prv->ncpus,
   1.153             prv->master,
   1.154 @@ -1475,10 +1473,9 @@ csched_dump(const struct scheduler *ops)
   1.155             prv->weight,
   1.156             prv->runq_sort,
   1.157             CSCHED_DEFAULT_WEIGHT,
   1.158 -           CSCHED_MSECS_PER_TICK,
   1.159 +           prv->tslice_ms,
   1.160             CSCHED_CREDITS_PER_MSEC,
   1.161 -           CSCHED_TICKS_PER_TSLICE,
   1.162 -           CSCHED_TICKS_PER_ACCT,
   1.163 +           prv->ticks_per_tslice,
   1.164             vcpu_migration_delay);
   1.165  
   1.166      cpumask_scnprintf(idlers_buf, sizeof(idlers_buf), prv->idlers);
   1.167 @@ -1518,6 +1515,13 @@ csched_init(struct scheduler *ops)
   1.168      INIT_LIST_HEAD(&prv->active_sdom);
   1.169      prv->master = UINT_MAX;
   1.170  
   1.171 +    prv->tslice_ms = sched_credit_tslice_ms;
   1.172 +    prv->ticks_per_tslice = CSCHED_TICKS_PER_TSLICE;
   1.173 +    if ( prv->tslice_ms < prv->ticks_per_tslice )
   1.174 +        prv->ticks_per_tslice = 1;
   1.175 +    prv->tick_period_us = prv->tslice_ms * 1000 / prv->ticks_per_tslice;
   1.176 +    prv->credits_per_tslice = CSCHED_CREDITS_PER_MSEC * prv->tslice_ms;
   1.177 +
   1.178      return 0;
   1.179  }
   1.180  
   1.181 @@ -1542,13 +1546,16 @@ static void csched_tick_suspend(const st
   1.182  
   1.183  static void csched_tick_resume(const struct scheduler *ops, unsigned int cpu)
   1.184  {
   1.185 +    struct csched_private *prv;
   1.186      struct csched_pcpu *spc;
   1.187      uint64_t now = NOW();
   1.188  
   1.189      spc = CSCHED_PCPU(cpu);
   1.190  
   1.191 -    set_timer(&spc->ticker, now + MILLISECS(CSCHED_MSECS_PER_TICK)
   1.192 -            - now % MILLISECS(CSCHED_MSECS_PER_TICK) );
   1.193 +    prv = CSCHED_PRIV(ops);
   1.194 +
   1.195 +    set_timer(&spc->ticker, now + MICROSECS(prv->tick_period_us)
   1.196 +            - now % MICROSECS(prv->tick_period_us) );
   1.197  }
   1.198  
   1.199  static struct csched_private _csched_priv;