debuggers.hg

changeset 21212:07befd9cf6d3

Architecture-independent, and tasklet-based, continue_hypercall_on_cpu().

Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Apr 14 11:29:05 2010 +0100 (2010-04-14)
parents 5057604eeefc
children ae08db793feb
files xen/arch/x86/domain.c xen/common/domain.c xen/common/schedule.c xen/include/asm-ia64/linux-xen/asm/ptrace.h xen/include/asm-x86/domain.h xen/include/asm-x86/regs.h xen/include/xen/domain.h xen/include/xen/sched.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Wed Apr 14 10:44:29 2010 +0100
     1.2 +++ b/xen/arch/x86/domain.c	Wed Apr 14 11:29:05 2010 +0100
     1.3 @@ -1517,82 +1517,6 @@ void sync_vcpu_execstate(struct vcpu *v)
     1.4      flush_tlb_mask(&v->vcpu_dirty_cpumask);
     1.5  }
     1.6  
     1.7 -struct migrate_info {
     1.8 -    long (*func)(void *data);
     1.9 -    void *data;
    1.10 -    void (*saved_schedule_tail)(struct vcpu *);
    1.11 -    cpumask_t saved_affinity;
    1.12 -    unsigned int nest;
    1.13 -};
    1.14 -
    1.15 -static void continue_hypercall_on_cpu_helper(struct vcpu *v)
    1.16 -{
    1.17 -    struct cpu_user_regs *regs = guest_cpu_user_regs();
    1.18 -    struct migrate_info *info = v->arch.continue_info;
    1.19 -    cpumask_t mask = info->saved_affinity;
    1.20 -    void (*saved_schedule_tail)(struct vcpu *) = info->saved_schedule_tail;
    1.21 -
    1.22 -    regs->eax = info->func(info->data);
    1.23 -
    1.24 -    if ( info->nest-- == 0 )
    1.25 -    {
    1.26 -        xfree(info);
    1.27 -        v->arch.schedule_tail = saved_schedule_tail;
    1.28 -        v->arch.continue_info = NULL;
    1.29 -        vcpu_unlock_affinity(v, &mask);
    1.30 -    }
    1.31 -
    1.32 -    (*saved_schedule_tail)(v);
    1.33 -}
    1.34 -
    1.35 -int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data)
    1.36 -{
    1.37 -    struct vcpu *v = current;
    1.38 -    struct migrate_info *info;
    1.39 -    cpumask_t mask = cpumask_of_cpu(cpu);
    1.40 -    int rc;
    1.41 -
    1.42 -    if ( cpu == smp_processor_id() )
    1.43 -        return func(data);
    1.44 -
    1.45 -    info = v->arch.continue_info;
    1.46 -    if ( info == NULL )
    1.47 -    {
    1.48 -        info = xmalloc(struct migrate_info);
    1.49 -        if ( info == NULL )
    1.50 -            return -ENOMEM;
    1.51 -
    1.52 -        rc = vcpu_lock_affinity(v, &mask);
    1.53 -        if ( rc )
    1.54 -        {
    1.55 -            xfree(info);
    1.56 -            return rc;
    1.57 -        }
    1.58 -
    1.59 -        info->saved_schedule_tail = v->arch.schedule_tail;
    1.60 -        info->saved_affinity = mask;
    1.61 -        info->nest = 0;
    1.62 -
    1.63 -        v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
    1.64 -        v->arch.continue_info = info;
    1.65 -    }
    1.66 -    else
    1.67 -    {
    1.68 -        BUG_ON(info->nest != 0);
    1.69 -        rc = vcpu_locked_change_affinity(v, &mask);
    1.70 -        if ( rc )
    1.71 -            return rc;
    1.72 -        info->nest++;
    1.73 -    }
    1.74 -
    1.75 -    info->func = func;
    1.76 -    info->data = data;
    1.77 -
    1.78 -    /* Dummy return value will be overwritten by new schedule_tail. */
    1.79 -    BUG_ON(!test_bit(SCHEDULE_SOFTIRQ, &softirq_pending(smp_processor_id())));
    1.80 -    return 0;
    1.81 -}
    1.82 -
    1.83  #define next_arg(fmt, args) ({                                              \
    1.84      unsigned long __arg;                                                    \
    1.85      switch ( *(fmt)++ )                                                     \
     2.1 --- a/xen/common/domain.c	Wed Apr 14 10:44:29 2010 +0100
     2.2 +++ b/xen/common/domain.c	Wed Apr 14 11:29:05 2010 +0100
     2.3 @@ -898,6 +898,73 @@ long vm_assist(struct domain *p, unsigne
     2.4      return -ENOSYS;
     2.5  }
     2.6  
     2.7 +struct migrate_info {
     2.8 +    long (*func)(void *data);
     2.9 +    void *data;
    2.10 +    struct vcpu *vcpu;
    2.11 +    unsigned int nest;
    2.12 +};
    2.13 +
    2.14 +static DEFINE_PER_CPU(struct migrate_info *, continue_info);
    2.15 +
    2.16 +static void continue_hypercall_tasklet_handler(unsigned long _info)
    2.17 +{
    2.18 +    struct migrate_info *info = (struct migrate_info *)_info;
    2.19 +    struct vcpu *v = info->vcpu;
    2.20 +
    2.21 +    vcpu_sleep_sync(v);
    2.22 +
    2.23 +    this_cpu(continue_info) = info;
    2.24 +    return_reg(v) = info->func(info->data);
    2.25 +    this_cpu(continue_info) = NULL;
    2.26 +
    2.27 +    if ( info->nest-- == 0 )
    2.28 +    {
    2.29 +        xfree(info);
    2.30 +        vcpu_unpause(v);
    2.31 +    }
    2.32 +}
    2.33 +
    2.34 +int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data)
    2.35 +{
    2.36 +    struct vcpu *curr = current;
    2.37 +    struct migrate_info *info;
    2.38 +
    2.39 +    if ( cpu == smp_processor_id() )
    2.40 +        return func(data);
    2.41 +
    2.42 +    info = this_cpu(continue_info);
    2.43 +    if ( info == NULL )
    2.44 +    {
    2.45 +        info = xmalloc(struct migrate_info);
    2.46 +        if ( info == NULL )
    2.47 +            return -ENOMEM;
    2.48 +
    2.49 +        info->vcpu = curr;
    2.50 +        info->nest = 0;
    2.51 +
    2.52 +        tasklet_init(
    2.53 +            &curr->continue_hypercall_tasklet,
    2.54 +            continue_hypercall_tasklet_handler,
    2.55 +            (unsigned long)info);
    2.56 +
    2.57 +        vcpu_pause_nosync(curr);
    2.58 +    }
    2.59 +    else
    2.60 +    {
    2.61 +        BUG_ON(info->nest != 0);
    2.62 +        info->nest++;
    2.63 +    }
    2.64 +
    2.65 +    info->func = func;
    2.66 +    info->data = data;
    2.67 +
    2.68 +    tasklet_schedule_on_cpu(&curr->continue_hypercall_tasklet, cpu);
    2.69 +
    2.70 +    /* Dummy return value will be overwritten by tasklet. */
    2.71 +    return 0;
    2.72 +}
    2.73 +
    2.74  /*
    2.75   * Local variables:
    2.76   * mode: C
     3.1 --- a/xen/common/schedule.c	Wed Apr 14 10:44:29 2010 +0100
     3.2 +++ b/xen/common/schedule.c	Wed Apr 14 11:29:05 2010 +0100
     3.3 @@ -408,27 +408,19 @@ void cpu_disable_scheduler(void)
     3.4      }
     3.5  }
     3.6  
     3.7 -static int __vcpu_set_affinity(
     3.8 -    struct vcpu *v, cpumask_t *affinity,
     3.9 -    bool_t old_lock_status, bool_t new_lock_status)
    3.10 +int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
    3.11  {
    3.12      cpumask_t online_affinity, old_affinity;
    3.13  
    3.14 +    if ( v->domain->is_pinned )
    3.15 +        return -EINVAL;
    3.16 +
    3.17      cpus_and(online_affinity, *affinity, cpu_online_map);
    3.18      if ( cpus_empty(online_affinity) )
    3.19          return -EINVAL;
    3.20  
    3.21      vcpu_schedule_lock_irq(v);
    3.22  
    3.23 -    if ( v->affinity_locked != old_lock_status )
    3.24 -    {
    3.25 -        BUG_ON(!v->affinity_locked);
    3.26 -        vcpu_schedule_unlock_irq(v);
    3.27 -        return -EBUSY;
    3.28 -    }
    3.29 -
    3.30 -    v->affinity_locked = new_lock_status;
    3.31 -
    3.32      old_affinity = v->cpu_affinity;
    3.33      v->cpu_affinity = *affinity;
    3.34      *affinity = old_affinity;
    3.35 @@ -446,36 +438,6 @@ static int __vcpu_set_affinity(
    3.36      return 0;
    3.37  }
    3.38  
    3.39 -int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
    3.40 -{
    3.41 -    if ( v->domain->is_pinned )
    3.42 -        return -EINVAL;
    3.43 -    return __vcpu_set_affinity(v, affinity, 0, 0);
    3.44 -}
    3.45 -
    3.46 -int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity)
    3.47 -{
    3.48 -    return __vcpu_set_affinity(v, affinity, 0, 1);
    3.49 -}
    3.50 -
    3.51 -int vcpu_locked_change_affinity(struct vcpu *v, cpumask_t *affinity)
    3.52 -{
    3.53 -    return __vcpu_set_affinity(v, affinity, 1, 1);
    3.54 -}
    3.55 -
    3.56 -void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity)
    3.57 -{
    3.58 -    cpumask_t online_affinity;
    3.59 -
    3.60 -    /* Do not fail if no CPU in old affinity mask is online. */
    3.61 -    cpus_and(online_affinity, *affinity, cpu_online_map);
    3.62 -    if ( cpus_empty(online_affinity) )
    3.63 -        *affinity = cpu_online_map;
    3.64 -
    3.65 -    if ( __vcpu_set_affinity(v, affinity, 1, 0) != 0 )
    3.66 -        BUG();
    3.67 -}
    3.68 -
    3.69  /* Block the currently-executing domain until a pertinent event occurs. */
    3.70  static long do_block(void)
    3.71  {
     4.1 --- a/xen/include/asm-ia64/linux-xen/asm/ptrace.h	Wed Apr 14 10:44:29 2010 +0100
     4.2 +++ b/xen/include/asm-ia64/linux-xen/asm/ptrace.h	Wed Apr 14 11:29:05 2010 +0100
     4.3 @@ -198,6 +198,8 @@ static inline struct cpu_user_regs *vcpu
     4.4  	return (struct cpu_user_regs *)((unsigned long)v + IA64_STK_OFFSET) - 1;
     4.5  }
     4.6  
     4.7 +#define return_reg(v) (vcpu_regs(v)->r8)
     4.8 +
     4.9  struct cpu_user_regs *guest_cpu_user_regs(void);
    4.10  
    4.11  extern void show_stack(struct task_struct *task, unsigned long *sp);
     5.1 --- a/xen/include/asm-x86/domain.h	Wed Apr 14 10:44:29 2010 +0100
     5.2 +++ b/xen/include/asm-x86/domain.h	Wed Apr 14 11:29:05 2010 +0100
     5.3 @@ -381,9 +381,6 @@ struct arch_vcpu
     5.4      void (*ctxt_switch_from) (struct vcpu *);
     5.5      void (*ctxt_switch_to) (struct vcpu *);
     5.6  
     5.7 -    /* Record information required to continue execution after migration */
     5.8 -    void *continue_info;
     5.9 -
    5.10      /* Bounce information for propagating an exception to guest OS. */
    5.11      struct trap_bounce trap_bounce;
    5.12  
    5.13 @@ -451,9 +448,6 @@ struct arch_vcpu
    5.14  #define hvm_vmx         hvm_vcpu.u.vmx
    5.15  #define hvm_svm         hvm_vcpu.u.svm
    5.16  
    5.17 -/* Continue the current hypercall via func(data) on specified cpu. */
    5.18 -int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data);
    5.19 -
    5.20  void vcpu_show_execution_state(struct vcpu *);
    5.21  void vcpu_show_registers(const struct vcpu *);
    5.22  
     6.1 --- a/xen/include/asm-x86/regs.h	Wed Apr 14 10:44:29 2010 +0100
     6.2 +++ b/xen/include/asm-x86/regs.h	Wed Apr 14 11:29:05 2010 +0100
     6.3 @@ -19,4 +19,6 @@
     6.4      (diff == 0);                                                              \
     6.5  })
     6.6  
     6.7 +#define return_reg(v) ((v)->arch.guest_context.user_regs.eax)
     6.8 +
     6.9  #endif /* __X86_REGS_H__ */
     7.1 --- a/xen/include/xen/domain.h	Wed Apr 14 10:44:29 2010 +0100
     7.2 +++ b/xen/include/xen/domain.h	Wed Apr 14 11:29:05 2010 +0100
     7.3 @@ -3,6 +3,7 @@
     7.4  #define __XEN_DOMAIN_H__
     7.5  
     7.6  #include <public/xen.h>
     7.7 +#include <asm/domain.h>
     7.8  
     7.9  typedef union {
    7.10      struct vcpu_guest_context *nat;
    7.11 @@ -62,6 +63,9 @@ void arch_vcpu_reset(struct vcpu *v);
    7.12  bool_t domctl_lock_acquire(void);
    7.13  void domctl_lock_release(void);
    7.14  
    7.15 +/* Continue the current hypercall via func(data) on specified cpu. */
    7.16 +int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data);
    7.17 +
    7.18  extern unsigned int xen_processor_pmbits;
    7.19  
    7.20  #endif /* __XEN_DOMAIN_H__ */
     8.1 --- a/xen/include/xen/sched.h	Wed Apr 14 10:44:29 2010 +0100
     8.2 +++ b/xen/include/xen/sched.h	Wed Apr 14 11:29:05 2010 +0100
     8.3 @@ -15,7 +15,7 @@
     8.4  #include <xen/timer.h>
     8.5  #include <xen/grant_table.h>
     8.6  #include <xen/rangeset.h>
     8.7 -#include <asm/domain.h>
     8.8 +#include <xen/domain.h>
     8.9  #include <xen/xenoprof.h>
    8.10  #include <xen/rcupdate.h>
    8.11  #include <xen/irq.h>
    8.12 @@ -132,8 +132,6 @@ struct vcpu
    8.13      bool_t           defer_shutdown;
    8.14      /* VCPU is paused following shutdown request (d->is_shutting_down)? */
    8.15      bool_t           paused_for_shutdown;
    8.16 -    /* VCPU affinity is temporarily locked from controller changes? */
    8.17 -    bool_t           affinity_locked;
    8.18  
    8.19      /*
    8.20       * > 0: a single port is being polled;
    8.21 @@ -157,6 +155,9 @@ struct vcpu
    8.22      /* Bitmask of CPUs which are holding onto this VCPU's state. */
    8.23      cpumask_t        vcpu_dirty_cpumask;
    8.24  
    8.25 +    /* Tasklet for continue_hypercall_on_cpu(). */
    8.26 +    struct tasklet   continue_hypercall_tasklet;
    8.27 +
    8.28      struct arch_vcpu arch;
    8.29  };
    8.30  
    8.31 @@ -581,9 +582,6 @@ void cpu_init(void);
    8.32  void vcpu_force_reschedule(struct vcpu *v);
    8.33  void cpu_disable_scheduler(void);
    8.34  int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);
    8.35 -int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity);
    8.36 -int vcpu_locked_change_affinity(struct vcpu *v, cpumask_t *affinity);
    8.37 -void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity);
    8.38  
    8.39  void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
    8.40  uint64_t get_cpu_idle_time(unsigned int cpu);