debuggers.hg
changeset 22656:f33f7a9f9d40
credit2: Refactor runqueue initialization
Several refactorizations:
* Add prv->initialized cpu mask
* Replace prv->runq_count with active_queue mask
* Replace rqd->cpu_min,cpu_mask with active cpu mask
* Put locks in the runqueue structure, rather than borrowing the
existing cpu locks
* init() initializes all runqueues to NULL, inactive, and maps all
pcpus to runqueue -q
* alloc_pcpu() will add cpus to runqueues, "activating" the runqueue
if necessary. All cpus are currently assigned to runqueue 0.
End-to-end behavior of the system should remain largely the same.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Several refactorizations:
* Add prv->initialized cpu mask
* Replace prv->runq_count with active_queue mask
* Replace rqd->cpu_min,cpu_mask with active cpu mask
* Put locks in the runqueue structure, rather than borrowing the
existing cpu locks
* init() initializes all runqueues to NULL, inactive, and maps all
pcpus to runqueue -q
* alloc_pcpu() will add cpus to runqueues, "activating" the runqueue
if necessary. All cpus are currently assigned to runqueue 0.
End-to-end behavior of the system should remain largely the same.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
author | Keir Fraser <keir@xen.org> |
---|---|
date | Fri Dec 24 08:27:42 2010 +0000 (2010-12-24) |
parents | 05377a796952 |
children | d93de09aa952 |
files | xen/common/sched_credit2.c |
line diff
1.1 --- a/xen/common/sched_credit2.c Fri Dec 24 08:26:59 2010 +0000 1.2 +++ b/xen/common/sched_credit2.c Fri Dec 24 08:27:42 2010 +0000 1.3 @@ -176,10 +176,14 @@ integer_param("sched_credit2_migrate_res 1.4 */ 1.5 struct csched_runqueue_data { 1.6 int id; 1.7 + 1.8 + spinlock_t lock; /* Lock for this runqueue. */ 1.9 + cpumask_t active; /* CPUs enabled for this runqueue */ 1.10 + 1.11 struct list_head runq; /* Ordered list of runnable vms */ 1.12 struct list_head svc; /* List of all vcpus assigned to this runqueue */ 1.13 int max_weight; 1.14 - int cpu_min, cpu_max; /* Range of physical cpus this runqueue runs */ 1.15 + 1.16 cpumask_t idle, /* Currently idle */ 1.17 tickled; /* Another cpu in the queue is already targeted for this one */ 1.18 }; 1.19 @@ -189,12 +193,12 @@ struct csched_runqueue_data { 1.20 */ 1.21 struct csched_private { 1.22 spinlock_t lock; 1.23 - uint32_t ncpus; 1.24 - 1.25 + cpumask_t initialized; /* CPU is initialized for this pool */ 1.26 + 1.27 struct list_head sdom; /* Used mostly for dump keyhandler. */ 1.28 1.29 int runq_map[NR_CPUS]; 1.30 - uint32_t runq_count; 1.31 + cpumask_t active_queues; /* Queues which may have active cpus */ 1.32 struct csched_runqueue_data rqd[NR_CPUS]; 1.33 }; 1.34 1.35 @@ -341,7 +345,7 @@ runq_tickle(const struct scheduler *ops, 1.36 int i, ipid=-1; 1.37 s_time_t lowest=(1<<30); 1.38 struct csched_runqueue_data *rqd = RQD(ops, cpu); 1.39 - cpumask_t *online, mask; 1.40 + cpumask_t mask; 1.41 struct csched_vcpu * cur; 1.42 1.43 d2printk("rqt d%dv%d cd%dv%d\n", 1.44 @@ -374,9 +378,7 @@ runq_tickle(const struct scheduler *ops, 1.45 1.46 /* Otherwise, look for the non-idle cpu with the lowest credit, 1.47 * skipping cpus which have been tickled but not scheduled yet */ 1.48 - online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu)); 1.49 - 1.50 - cpus_andnot(mask, *online, rqd->idle); 1.51 + cpus_andnot(mask, rqd->active, rqd->idle); 1.52 cpus_andnot(mask, mask, rqd->tickled); 1.53 1.54 for_each_cpu_mask(i, mask) 1.55 @@ -997,7 +999,7 @@ csched_schedule( 1.56 const struct scheduler *ops, s_time_t now, bool_t tasklet_work_scheduled) 1.57 { 1.58 const int cpu = smp_processor_id(); 1.59 - struct csched_runqueue_data *rqd = RQD(ops, cpu); 1.60 + struct csched_runqueue_data *rqd; 1.61 struct csched_vcpu * const scurr = CSCHED_VCPU(current); 1.62 struct csched_vcpu *snext = NULL; 1.63 struct task_slice ret; 1.64 @@ -1010,6 +1012,10 @@ csched_schedule( 1.65 scurr->vcpu->vcpu_id, 1.66 now); 1.67 1.68 + BUG_ON(!cpu_isset(cpu, CSCHED_PRIV(ops)->initialized)); 1.69 + 1.70 + rqd = RQD(ops, cpu); 1.71 + BUG_ON(!cpu_isset(cpu, rqd->active)); 1.72 1.73 /* Protected by runqueue lock */ 1.74 1.75 @@ -1166,14 +1172,22 @@ csched_dump(const struct scheduler *ops) 1.76 { 1.77 struct list_head *iter_sdom, *iter_svc; 1.78 struct csched_private *prv = CSCHED_PRIV(ops); 1.79 - int loop; 1.80 + int i, loop; 1.81 1.82 - printk("info:\n" 1.83 - "\tncpus = %u\n" 1.84 + printk("Active queues: %d\n" 1.85 "\tdefault-weight = %d\n", 1.86 - prv->ncpus, 1.87 + cpus_weight(prv->active_queues), 1.88 CSCHED_DEFAULT_WEIGHT); 1.89 + for_each_cpu_mask(i, prv->active_queues) 1.90 + { 1.91 + printk("Runqueue %d:\n" 1.92 + "\tncpus = %u\n" 1.93 + "\tmax_weight = %d\n", 1.94 + i, 1.95 + cpus_weight(prv->rqd[i].active), 1.96 + prv->rqd[i].max_weight); 1.97 1.98 + } 1.99 /* FIXME: Locking! */ 1.100 1.101 printk("Domain info:\n"); 1.102 @@ -1199,16 +1213,82 @@ csched_dump(const struct scheduler *ops) 1.103 } 1.104 } 1.105 1.106 -static void 1.107 -csched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu) 1.108 +static void activate_runqueue(struct csched_private *prv, int rqi) 1.109 { 1.110 - unsigned long flags; 1.111 + struct csched_runqueue_data *rqd; 1.112 + 1.113 + rqd = prv->rqd + rqi; 1.114 + 1.115 + BUG_ON(!cpus_empty(rqd->active)); 1.116 + 1.117 + rqd->max_weight = 1; 1.118 + rqd->id = rqi; 1.119 + INIT_LIST_HEAD(&rqd->svc); 1.120 + INIT_LIST_HEAD(&rqd->runq); 1.121 + spin_lock_init(&rqd->lock); 1.122 + 1.123 + cpu_set(rqi, prv->active_queues); 1.124 +} 1.125 + 1.126 +static void deactivate_runqueue(struct csched_private *prv, int rqi) 1.127 +{ 1.128 + struct csched_runqueue_data *rqd; 1.129 + 1.130 + rqd = prv->rqd + rqi; 1.131 + 1.132 + BUG_ON(!cpus_empty(rqd->active)); 1.133 + 1.134 + rqd->id = -1; 1.135 + 1.136 + cpu_clear(rqi, prv->active_queues); 1.137 +} 1.138 + 1.139 +static void init_pcpu(const struct scheduler *ops, int cpu) 1.140 +{ 1.141 + int rqi, old_rqi, flags; 1.142 struct csched_private *prv = CSCHED_PRIV(ops); 1.143 + struct csched_runqueue_data *rqd; 1.144 + spinlock_t *old_lock; 1.145 1.146 spin_lock_irqsave(&prv->lock, flags); 1.147 - prv->ncpus--; 1.148 - cpu_clear(cpu, RQD(ops, cpu)->idle); 1.149 - printk("Removing cpu %d to pool (%d total)\n", cpu, prv->ncpus); 1.150 + 1.151 + if ( cpu_isset(cpu, prv->initialized) ) 1.152 + { 1.153 + printk("%s: Strange, cpu %d already initialized!\n", __func__, cpu); 1.154 + spin_unlock_irqrestore(&prv->lock, flags); 1.155 + return; 1.156 + } 1.157 + 1.158 + old_rqi = prv->runq_map[cpu]; 1.159 + 1.160 + /* Figure out which runqueue to put it in */ 1.161 + rqi = 0; 1.162 + 1.163 + rqd=prv->rqd + rqi; 1.164 + 1.165 + printk("Adding cpu %d to runqueue %d\n", cpu, rqi); 1.166 + if ( ! cpu_isset(rqi, prv->active_queues) ) 1.167 + { 1.168 + printk(" First cpu on runqueue, activating\n"); 1.169 + activate_runqueue(prv, rqi); 1.170 + } 1.171 + 1.172 + /* IRQs already disabled */ 1.173 + old_lock=pcpu_schedule_lock(cpu); 1.174 + 1.175 + /* Move spinlock to new runq lock. */ 1.176 + per_cpu(schedule_data, cpu).schedule_lock = &rqd->lock; 1.177 + 1.178 + /* Set the runqueue map */ 1.179 + prv->runq_map[cpu]=rqi; 1.180 + 1.181 + cpu_set(cpu, rqd->idle); 1.182 + cpu_set(cpu, rqd->active); 1.183 + 1.184 + spin_unlock(old_lock); 1.185 + 1.186 + cpu_set(cpu, prv->initialized); 1.187 + 1.188 spin_unlock_irqrestore(&prv->lock, flags); 1.189 1.190 return; 1.191 @@ -1217,33 +1297,51 @@ csched_free_pdata(const struct scheduler 1.192 static void * 1.193 csched_alloc_pdata(const struct scheduler *ops, int cpu) 1.194 { 1.195 - spinlock_t *new_lock; 1.196 - spinlock_t *old_lock = per_cpu(schedule_data, cpu).schedule_lock; 1.197 - unsigned long flags; 1.198 - struct csched_private *prv = CSCHED_PRIV(ops); 1.199 - 1.200 - spin_lock_irqsave(old_lock, flags); 1.201 - new_lock = &per_cpu(schedule_data, prv->runq_map[cpu])._lock; 1.202 - per_cpu(schedule_data, cpu).schedule_lock = new_lock; 1.203 - spin_unlock_irqrestore(old_lock, flags); 1.204 - 1.205 - spin_lock_irqsave(&prv->lock, flags); 1.206 - prv->ncpus++; 1.207 - cpu_set(cpu, RQD(ops, cpu)->idle); 1.208 - printk("Adding cpu %d to pool (%d total)\n", cpu, prv->ncpus); 1.209 - spin_unlock_irqrestore(&prv->lock, flags); 1.210 + init_pcpu(ops, cpu); 1.211 1.212 return (void *)1; 1.213 } 1.214 1.215 static void 1.216 -make_runq_map(struct csched_private *prv) 1.217 +csched_free_pdata(const struct scheduler *ops, void *pcpu, int cpu) 1.218 { 1.219 - /* FIXME: Read pcpu layout and do this properly */ 1.220 - prv->runq_count = 1; 1.221 - prv->rqd[0].cpu_min = 0; 1.222 - prv->rqd[0].cpu_max = NR_CPUS; 1.223 - memset(prv->runq_map, 0, sizeof(prv->runq_map)); 1.224 + unsigned long flags; 1.225 + struct csched_private *prv = CSCHED_PRIV(ops); 1.226 + struct csched_runqueue_data *rqd; 1.227 + int rqi; 1.228 + 1.229 + spin_lock_irqsave(&prv->lock, flags); 1.230 + 1.231 + BUG_ON( !cpu_isset(cpu, prv->initialized)); 1.232 + 1.233 + /* Find the old runqueue and remove this cpu from it */ 1.234 + rqi = prv->runq_map[cpu]; 1.235 + 1.236 + rqd = prv->rqd + rqi; 1.237 + 1.238 + /* No need to save IRQs here, they're already disabled */ 1.239 + spin_lock(&rqd->lock); 1.240 + 1.241 + BUG_ON(!cpu_isset(cpu, rqd->idle)); 1.242 + 1.243 + printk("Removing cpu %d from runqueue %d\n", cpu, rqi); 1.244 + 1.245 + cpu_clear(cpu, rqd->idle); 1.246 + cpu_clear(cpu, rqd->active); 1.247 + 1.248 + if ( cpus_empty(rqd->active) ) 1.249 + { 1.250 + printk(" No cpus left on runqueue, disabling\n"); 1.251 + deactivate_runqueue(prv, rqi); 1.252 + } 1.253 + 1.254 + spin_unlock(&rqd->lock); 1.255 + 1.256 + cpu_clear(cpu, prv->initialized); 1.257 + 1.258 + spin_unlock_irqrestore(&prv->lock, flags); 1.259 + 1.260 + return; 1.261 } 1.262 1.263 static int 1.264 @@ -1265,18 +1363,11 @@ csched_init(struct scheduler *ops) 1.265 spin_lock_init(&prv->lock); 1.266 INIT_LIST_HEAD(&prv->sdom); 1.267 1.268 - prv->ncpus = 0; 1.269 - 1.270 - make_runq_map(prv); 1.271 - 1.272 - for ( i=0; i<prv->runq_count ; i++ ) 1.273 + /* But un-initialize all runqueues */ 1.274 + for ( i=0; i<NR_CPUS; i++) 1.275 { 1.276 - struct csched_runqueue_data *rqd = prv->rqd + i; 1.277 - 1.278 - rqd->max_weight = 1; 1.279 - rqd->id = i; 1.280 - INIT_LIST_HEAD(&rqd->svc); 1.281 - INIT_LIST_HEAD(&rqd->runq); 1.282 + prv->runq_map[i] = -1; 1.283 + prv->rqd[i].id = -1; 1.284 } 1.285 1.286 return 0;