debuggers.hg
changeset 22664:df310dcd19cb
credit2: Migrate request infrastructure
Put in infrastructure to allow a vcpu to requeset to migrate to a
specific runqueue. This will allow a load balancer to choose running
VMs to migrate, and know they will go where expected when the VM is
descheduled.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Put in infrastructure to allow a vcpu to requeset to migrate to a
specific runqueue. This will allow a load balancer to choose running
VMs to migrate, and know they will go where expected when the VM is
descheduled.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
author | Keir Fraser <keir@xen.org> |
---|---|
date | Fri Dec 24 08:31:04 2010 +0000 (2010-12-24) |
parents | 6a970abb346f |
children | 41d1affef596 |
files | xen/common/sched_credit2.c |
line diff
1.1 --- a/xen/common/sched_credit2.c Fri Dec 24 08:30:42 2010 +0000 1.2 +++ b/xen/common/sched_credit2.c Fri Dec 24 08:31:04 2010 +0000 1.3 @@ -157,6 +157,12 @@ 1.4 */ 1.5 #define __CSFLAG_delayed_runq_add 2 1.6 #define CSFLAG_delayed_runq_add (1<<__CSFLAG_delayed_runq_add) 1.7 +/* CSFLAG_runq_migrate_request: This vcpu is being migrated as a result of a 1.8 + * credit2-initiated runq migrate request; migrate it to the runqueue indicated 1.9 + * in the svc struct. 1.10 + */ 1.11 +#define __CSFLAG_runq_migrate_request 3 1.12 +#define CSFLAG_runq_migrate_request (1<<__CSFLAG_runq_migrate_request) 1.13 1.14 1.15 int opt_migrate_resist=500; 1.16 @@ -247,6 +253,8 @@ struct csched_vcpu { 1.17 /* Individual contribution to load */ 1.18 s_time_t load_last_update; /* Last time average was updated */ 1.19 s_time_t avgload; /* Decaying queue load */ 1.20 + 1.21 + struct csched_runqueue_data *migrate_rqd; /* Pre-determined rqd to which to migrate */ 1.22 }; 1.23 1.24 /* 1.25 @@ -974,10 +982,10 @@ csched_context_saved(const struct schedu 1.26 * it seems a bit pointless; especially as we have plenty of 1.27 * bits free. 1.28 */ 1.29 - if ( test_bit(__CSFLAG_delayed_runq_add, &svc->flags) ) 1.30 + if ( test_and_clear_bit(__CSFLAG_delayed_runq_add, &svc->flags) 1.31 + && likely(vcpu_runnable(vc)) ) 1.32 { 1.33 BUG_ON(__vcpu_on_runq(svc)); 1.34 - clear_bit(__CSFLAG_delayed_runq_add, &svc->flags); 1.35 1.36 runq_insert(ops, vc->processor, svc); 1.37 runq_tickle(ops, vc->processor, svc, now); 1.38 @@ -1015,11 +1023,34 @@ choose_cpu(const struct scheduler *ops, 1.39 1.40 if ( !spin_trylock(&prv->lock) ) 1.41 { 1.42 + if ( test_and_clear_bit(__CSFLAG_runq_migrate_request, &svc->flags) ) 1.43 + { 1.44 + d2printk("d%dv%d -\n", svc->vcpu->domain->domain_id, svc->vcpu->vcpu_id); 1.45 + clear_bit(__CSFLAG_runq_migrate_request, &svc->flags); 1.46 + } 1.47 /* Leave it where it is for now. When we actually pay attention 1.48 * to affinity we'll have to figure something out... */ 1.49 return vc->processor; 1.50 } 1.51 1.52 + /* First check to see if we're here because someone else suggested a place 1.53 + * for us to move. */ 1.54 + if ( test_and_clear_bit(__CSFLAG_runq_migrate_request, &svc->flags) ) 1.55 + { 1.56 + if ( unlikely(svc->migrate_rqd->id < 0) ) 1.57 + { 1.58 + printk("%s: Runqueue migrate aborted because target runqueue disappeared!\n", 1.59 + __func__); 1.60 + /* Fall-through to normal cpu pick */ 1.61 + } 1.62 + else 1.63 + { 1.64 + d2printk("d%dv%d +\n", svc->vcpu->domain->domain_id, svc->vcpu->vcpu_id); 1.65 + new_cpu = first_cpu(svc->migrate_rqd->active); 1.66 + goto out_up; 1.67 + } 1.68 + } 1.69 + 1.70 /* FIXME: Pay attention to cpu affinity */ 1.71 1.72 min_load = MAX_LOAD; 1.73 @@ -1053,7 +1084,8 @@ choose_cpu(const struct scheduler *ops, 1.74 BUG_ON(cpus_empty(prv->rqd[min_rqi].active)); 1.75 new_cpu = first_cpu(prv->rqd[min_rqi].active); 1.76 } 1.77 - 1.78 + 1.79 +out_up: 1.80 spin_unlock(&prv->lock); 1.81 1.82 return new_cpu;