debuggers.hg

changeset 21960:49254cab8465

numa: Small tweaks to domain_update_node_affinity() and its callers.

From: Andrew Jones <drjones@redhat.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Aug 04 17:10:46 2010 +0100 (2010-08-04)
parents 581ebaa7e2da
children 8992134dcfd0
files xen/common/domain.c xen/common/schedule.c
line diff
     1.1 --- a/xen/common/domain.c	Wed Aug 04 15:35:28 2010 +0100
     1.2 +++ b/xen/common/domain.c	Wed Aug 04 17:10:46 2010 +0100
     1.3 @@ -358,12 +358,8 @@ void domain_update_node_affinity(struct 
     1.4          cpus_or(cpumask, cpumask, v->cpu_affinity);
     1.5  
     1.6      for_each_online_node ( node )
     1.7 -    {
     1.8          if ( cpus_intersects(node_to_cpumask(node), cpumask) )
     1.9              node_set(node, nodemask);
    1.10 -        else
    1.11 -            node_clear(node, nodemask);
    1.12 -    }
    1.13  
    1.14      d->node_affinity = nodemask;
    1.15      spin_unlock(&d->node_affinity_lock);
     2.1 --- a/xen/common/schedule.c	Wed Aug 04 15:35:28 2010 +0100
     2.2 +++ b/xen/common/schedule.c	Wed Aug 04 17:10:46 2010 +0100
     2.3 @@ -270,13 +270,13 @@ int sched_move_domain(struct domain *d, 
     2.4          SCHED_OP(VCPU2OP(v), destroy_vcpu, v);
     2.5  
     2.6          cpus_setall(v->cpu_affinity);
     2.7 -        domain_update_node_affinity(d);
     2.8          v->processor = new_p;
     2.9          v->sched_priv = vcpu_priv[v->vcpu_id];
    2.10          evtchn_move_pirqs(v);
    2.11  
    2.12          new_p = cycle_cpu(new_p, c->cpu_valid);
    2.13      }
    2.14 +    domain_update_node_affinity(d);
    2.15  
    2.16      d->cpupool = c;
    2.17      SCHED_OP(DOM2OP(d), free_domdata, d->sched_priv);
    2.18 @@ -458,6 +458,7 @@ int cpu_disable_scheduler(unsigned int c
    2.19      struct vcpu *v;
    2.20      struct cpupool *c;
    2.21      int    ret = 0;
    2.22 +    bool_t affinity_broken;
    2.23  
    2.24      c = per_cpu(cpupool, cpu);
    2.25      if ( c == NULL )
    2.26 @@ -468,6 +469,8 @@ int cpu_disable_scheduler(unsigned int c
    2.27          if ( d->cpupool != c )
    2.28              continue;
    2.29  
    2.30 +        affinity_broken = 0;
    2.31 +
    2.32          for_each_vcpu ( d, v )
    2.33          {
    2.34              vcpu_schedule_lock_irq(v);
    2.35 @@ -478,7 +481,7 @@ int cpu_disable_scheduler(unsigned int c
    2.36                  printk("Breaking vcpu affinity for domain %d vcpu %d\n",
    2.37                          v->domain->domain_id, v->vcpu_id);
    2.38                  cpus_setall(v->cpu_affinity);
    2.39 -                domain_update_node_affinity(d);
    2.40 +                affinity_broken = 1;
    2.41              }
    2.42  
    2.43              if ( v->processor == cpu )
    2.44 @@ -501,7 +504,11 @@ int cpu_disable_scheduler(unsigned int c
    2.45              if ( v->processor == cpu )
    2.46                  ret = -EAGAIN;
    2.47          }
    2.48 +
    2.49 +        if ( affinity_broken )
    2.50 +            domain_update_node_affinity(d);
    2.51      }
    2.52 +
    2.53      return ret;
    2.54  }
    2.55