debuggers.hg

changeset 22813:7f6d529b5f4f

Merge
author Ian Jackson <Ian.Jackson@eu.citrix.com>
date Mon Jan 17 17:24:21 2011 +0000 (2011-01-17)
parents b7b29f51205f 75b6287626ee
children f8d801e5573e
files
line diff
     1.1 --- a/tools/firmware/hvmloader/util.c	Mon Jan 17 17:18:38 2011 +0000
     1.2 +++ b/tools/firmware/hvmloader/util.c	Mon Jan 17 17:24:21 2011 +0000
     1.3 @@ -425,10 +425,10 @@ static char *printnum(char *p, unsigned 
     1.4  
     1.5  static void _doprint(void (*put)(char), const char *fmt, va_list ap)
     1.6  {
     1.7 -    register char *str, c;
     1.8 +    char *str, c;
     1.9      int lflag, zflag, nflag;
    1.10      char buffer[17];
    1.11 -    unsigned value;
    1.12 +    unsigned long value;
    1.13      int i, slen, pad;
    1.14  
    1.15      for ( ; *fmt != '\0'; fmt++ )
    1.16 @@ -457,29 +457,40 @@ static void _doprint(void (*put)(char), 
    1.17              lflag = 1;
    1.18              c = *++fmt;
    1.19          }
    1.20 -        if ( (c == 'd') || (c == 'u') || (c == 'o') || (c == 'x') )
    1.21 +        if ( (c == 'd') || (c == 'u') || (c == 'o') ||
    1.22 +             (c == 'x') || (c == 'X') )
    1.23          {
    1.24              if ( lflag )
    1.25 -                value = va_arg(ap, unsigned);
    1.26 +            {
    1.27 +                value = va_arg(ap, unsigned long);
    1.28 +                if ( (c == 'd') && ((long)value < 0) )
    1.29 +                {
    1.30 +                    value = -value;
    1.31 +                    put('-');
    1.32 +                }
    1.33 +            }
    1.34              else
    1.35 -                value = (unsigned) va_arg(ap, unsigned int);
    1.36 +            {
    1.37 +                value = va_arg(ap, unsigned int);
    1.38 +                if ( (c == 'd') && ((int)value < 0) )
    1.39 +                {
    1.40 +                    value = -(int)value;
    1.41 +                    put('-');
    1.42 +                }
    1.43 +            }
    1.44              str = buffer;
    1.45              printnum(str, value,
    1.46 -                     c == 'o' ? 8 : (c == 'x' ? 16 : 10));
    1.47 -            goto printn;
    1.48 -        }
    1.49 -        else if ( (c == 'O') || (c == 'D') || (c == 'X') )
    1.50 -        {
    1.51 -            value = va_arg(ap, unsigned);
    1.52 -            str = buffer;
    1.53 -            printnum(str, value,
    1.54 -                     c == 'O' ? 8 : (c == 'X' ? 16 : 10));
    1.55 -        printn:
    1.56 +                     c == 'o' ? 8 : ((c == 'x') || (c == 'X') ? 16 : 10));
    1.57              slen = strlen(str);
    1.58              for ( i = pad - slen; i > 0; i-- )
    1.59                  put(zflag ? '0' : ' ');
    1.60              while ( *str )
    1.61 -                put(*str++);
    1.62 +            {
    1.63 +                char ch = *str++;
    1.64 +                if ( (ch >= 'a') && (c == 'X') )
    1.65 +                    ch += 'A'-'a';
    1.66 +                put(ch);
    1.67 +            }
    1.68          }
    1.69          else if ( c == 's' )
    1.70          {
     2.1 --- a/tools/libxc/xc_hvm_build.c	Mon Jan 17 17:18:38 2011 +0000
     2.2 +++ b/tools/libxc/xc_hvm_build.c	Mon Jan 17 17:24:21 2011 +0000
     2.3 @@ -431,8 +431,9 @@ int xc_hvm_build(xc_interface *xch,
     2.4  /* xc_hvm_build_target_mem: 
     2.5   * Create a domain for a pre-ballooned virtualized Linux, using
     2.6   * files/filenames.  If target < memsize, domain is created with
     2.7 - * memsize pages marked populate-on-demand, and with a PoD cache size
     2.8 - * of target.  If target == memsize, pages are populated normally.
     2.9 + * memsize pages marked populate-on-demand, 
    2.10 + * calculating pod cache size based on target.
    2.11 + * If target == memsize, pages are populated normally.
    2.12   */
    2.13  int xc_hvm_build_target_mem(xc_interface *xch,
    2.14                             uint32_t domid,
     3.1 --- a/tools/libxc/xenctrl_osdep_ENOSYS.c	Mon Jan 17 17:18:38 2011 +0000
     3.2 +++ b/tools/libxc/xenctrl_osdep_ENOSYS.c	Mon Jan 17 17:24:21 2011 +0000
     3.3 @@ -27,10 +27,10 @@ static int ENOSYS_privcmd_close(xc_inter
     3.4  
     3.5  static int ENOSYS_privcmd_hypercall(xc_interface *xch, xc_osdep_handle h, privcmd_hypercall_t *hypercall)
     3.6  {
     3.7 -    IPRINTF(xch, "ENOSYS_privcmd %p: hypercall: %02lld(%#llx,%#llx,%#llx,%#llx,%#llx,%#llx)\n",
     3.8 +    IPRINTF(xch, "ENOSYS_privcmd %p: hypercall: %02lld(%#llx,%#llx,%#llx,%#llx,%#llx)\n",
     3.9              h, hypercall->op,
    3.10              hypercall->arg[0], hypercall->arg[1], hypercall->arg[2],
    3.11 -            hypercall->arg[3], hypercall->arg[4], hypercall->arg[5]);
    3.12 +            hypercall->arg[3], hypercall->arg[4]);
    3.13      return -ENOSYS;
    3.14  }
    3.15  
     4.1 --- a/tools/misc/xen-hptool.c	Mon Jan 17 17:18:38 2011 +0000
     4.2 +++ b/tools/misc/xen-hptool.c	Mon Jan 17 17:24:21 2011 +0000
     4.3 @@ -2,6 +2,7 @@
     4.4  #include <xc_private.h>
     4.5  #include <xc_core.h>
     4.6  #include <errno.h>
     4.7 +#include <unistd.h>
     4.8  
     4.9  #define ARRAY_SIZE(a) (sizeof (a) / sizeof ((a)[0]))
    4.10  
    4.11 @@ -241,6 +242,20 @@ static int hp_mem_offline_func(int argc,
    4.12      return ret;
    4.13  }
    4.14  
    4.15 +static int exec_cpu_hp_fn(int (*hp_fn)(xc_interface *, int), int cpu)
    4.16 +{
    4.17 +    int ret;
    4.18 +
    4.19 +    for ( ; ; )
    4.20 +    {
    4.21 +        ret = (*hp_fn)(xch, cpu);
    4.22 +        if ( (ret >= 0) || (errno != EBUSY) )
    4.23 +            break;
    4.24 +        usleep(100000); /* 100ms */
    4.25 +    }
    4.26 +
    4.27 +    return ret;
    4.28 +}
    4.29  
    4.30  static int hp_cpu_online_func(int argc, char *argv[])
    4.31  {
    4.32 @@ -254,7 +269,7 @@ static int hp_cpu_online_func(int argc, 
    4.33  
    4.34      cpu = atoi(argv[0]);
    4.35      printf("Prepare to online CPU %d\n", cpu);
    4.36 -    ret = xc_cpu_online(xch, cpu);
    4.37 +    ret = exec_cpu_hp_fn(xc_cpu_online, cpu);
    4.38      if (ret < 0)
    4.39          fprintf(stderr, "CPU %d online failed (error %d: %s)\n",
    4.40                  cpu, errno, strerror(errno));
    4.41 @@ -275,7 +290,7 @@ static int hp_cpu_offline_func(int argc,
    4.42      }
    4.43      cpu = atoi(argv[0]);
    4.44      printf("Prepare to offline CPU %d\n", cpu);
    4.45 -    ret = xc_cpu_offline(xch, cpu);
    4.46 +    ret = exec_cpu_hp_fn(xc_cpu_offline, cpu);
    4.47      if (ret < 0)
    4.48          fprintf(stderr, "CPU %d offline failed (error %d: %s)\n",
    4.49                  cpu, errno, strerror(errno));
     5.1 --- a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c	Mon Jan 17 17:18:38 2011 +0000
     5.2 +++ b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c	Mon Jan 17 17:24:21 2011 +0000
     5.3 @@ -377,18 +377,13 @@ static int __devinit platform_pci_init(s
     5.4  		return -ENOENT;
     5.5  	}
     5.6  
     5.7 -	if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) {
     5.8 -		printk(KERN_ERR ":MEM I/O resource 0x%lx @ 0x%lx busy\n",
     5.9 -		       mmio_addr, mmio_len);
    5.10 -		return -EBUSY;
    5.11 -	}
    5.12 +	ret = pci_request_region(pdev, 1, DRV_NAME);
    5.13 +	if (ret < 0)
    5.14 +		return ret;
    5.15  
    5.16 -	if (request_region(ioaddr, iolen, DRV_NAME) == NULL) {
    5.17 -		printk(KERN_ERR DRV_NAME ":I/O resource 0x%lx @ 0x%lx busy\n",
    5.18 -		       iolen, ioaddr);
    5.19 -		release_mem_region(mmio_addr, mmio_len);
    5.20 -		return -EBUSY;
    5.21 -	}
    5.22 +	ret = pci_request_region(pdev, 0, DRV_NAME);
    5.23 +	if (ret < 0)
    5.24 +		goto mem_out;
    5.25  
    5.26  	platform_mmio = mmio_addr;
    5.27  	platform_mmiolen = mmio_len;
    5.28 @@ -424,8 +419,9 @@ static int __devinit platform_pci_init(s
    5.29  
    5.30   out:
    5.31  	if (ret) {
    5.32 -		release_mem_region(mmio_addr, mmio_len);
    5.33 -		release_region(ioaddr, iolen);
    5.34 +		pci_release_region(pdev, 0);
    5.35 +mem_out:
    5.36 +		pci_release_region(pdev, 1);
    5.37  	}
    5.38  
    5.39  	return ret;
     6.1 --- a/xen/arch/ia64/xen/xensetup.c	Mon Jan 17 17:18:38 2011 +0000
     6.2 +++ b/xen/arch/ia64/xen/xensetup.c	Mon Jan 17 17:24:21 2011 +0000
     6.3 @@ -606,7 +606,11 @@ printk("num_online_cpus=%d, max_cpus=%d\
     6.4          if ( num_online_cpus() >= max_cpus )
     6.5              break;
     6.6          if ( !cpu_online(i) )
     6.7 -            cpu_up(i);
     6.8 +        {
     6.9 +            int ret = cpu_up(i);
    6.10 +            if ( ret != 0 )
    6.11 +                printk("Failed to bring up CPU %u (error %d)\n", i, ret);
    6.12 +        }
    6.13      }
    6.14  
    6.15      local_irq_disable();
     7.1 --- a/xen/arch/x86/acpi/power.c	Mon Jan 17 17:18:38 2011 +0000
     7.2 +++ b/xen/arch/x86/acpi/power.c	Mon Jan 17 17:24:21 2011 +0000
     7.3 @@ -206,6 +206,7 @@ static int enter_state(u32 state)
     7.4   enable_cpu:
     7.5      cpufreq_add_cpu(0);
     7.6      microcode_resume_cpu(0);
     7.7 +    rcu_barrier();
     7.8      mtrr_aps_sync_begin();
     7.9      enable_nonboot_cpus();
    7.10      mtrr_aps_sync_end();
     8.1 --- a/xen/arch/x86/acpi/suspend.c	Mon Jan 17 17:18:38 2011 +0000
     8.2 +++ b/xen/arch/x86/acpi/suspend.c	Mon Jan 17 17:24:21 2011 +0000
     8.3 @@ -24,8 +24,7 @@ static uint16_t saved_segs[4];
     8.4  
     8.5  void save_rest_processor_state(void)
     8.6  {
     8.7 -    if ( !is_idle_vcpu(current) )
     8.8 -        unlazy_fpu(current);
     8.9 +    save_init_fpu(current);
    8.10  
    8.11  #if defined(CONFIG_X86_64)
    8.12      asm volatile (
     9.1 --- a/xen/arch/x86/domain.c	Mon Jan 17 17:18:38 2011 +0000
     9.2 +++ b/xen/arch/x86/domain.c	Mon Jan 17 17:24:21 2011 +0000
     9.3 @@ -1384,7 +1384,7 @@ static void __context_switch(void)
     9.4          memcpy(&p->arch.guest_context.user_regs,
     9.5                 stack_regs,
     9.6                 CTXT_SWITCH_STACK_BYTES);
     9.7 -        unlazy_fpu(p);
     9.8 +        save_init_fpu(p);
     9.9          p->arch.ctxt_switch_from(p);
    9.10      }
    9.11  
    10.1 --- a/xen/arch/x86/hvm/hvm.c	Mon Jan 17 17:18:38 2011 +0000
    10.2 +++ b/xen/arch/x86/hvm/hvm.c	Mon Jan 17 17:24:21 2011 +0000
    10.3 @@ -1994,11 +1994,20 @@ static enum hvm_copy_result __hvm_copy(
    10.4      int count, todo = size;
    10.5  
    10.6      /*
    10.7 +     * XXX Disable for 4.1.0: PV-on-HVM drivers will do grant-table ops
    10.8 +     * such as query_size. Grant-table code currently does copy_to/from_guest
    10.9 +     * accesses under the big per-domain lock, which this test would disallow.
   10.10 +     * The test is not needed until we implement sleeping-on-waitqueue when
   10.11 +     * we access a paged-out frame, and that's post 4.1.0 now.
   10.12 +     */
   10.13 +#if 0
   10.14 +    /*
   10.15       * If the required guest memory is paged out, this function may sleep.
   10.16       * Hence we bail immediately if called from atomic context.
   10.17       */
   10.18      if ( in_atomic() )
   10.19          return HVMCOPY_unhandleable;
   10.20 +#endif
   10.21  
   10.22      while ( todo > 0 )
   10.23      {
    11.1 --- a/xen/arch/x86/i387.c	Mon Jan 17 17:18:38 2011 +0000
    11.2 +++ b/xen/arch/x86/i387.c	Mon Jan 17 17:24:21 2011 +0000
    11.3 @@ -16,18 +16,101 @@
    11.4  #include <asm/i387.h>
    11.5  #include <asm/asm_defns.h>
    11.6  
    11.7 -void init_fpu(void)
    11.8 +static bool_t __read_mostly cpu_has_xsaveopt;
    11.9 +
   11.10 +static void xsave(struct vcpu *v)
   11.11 +{
   11.12 +    struct xsave_struct *ptr = v->arch.xsave_area;
   11.13 +
   11.14 +    asm volatile (
   11.15 +        ".byte " REX_PREFIX "0x0f,0xae,0x27"
   11.16 +        :
   11.17 +        : "a" (-1), "d" (-1), "D"(ptr)
   11.18 +        : "memory" );
   11.19 +}
   11.20 +
   11.21 +static void xsaveopt(struct vcpu *v)
   11.22 +{
   11.23 +    struct xsave_struct *ptr = v->arch.xsave_area;
   11.24 +
   11.25 +    asm volatile (
   11.26 +        ".byte " REX_PREFIX "0x0f,0xae,0x37"
   11.27 +        :
   11.28 +        : "a" (-1), "d" (-1), "D"(ptr)
   11.29 +        : "memory" );
   11.30 +}
   11.31 +
   11.32 +static void xrstor(struct vcpu *v)
   11.33 +{
   11.34 +    struct xsave_struct *ptr = v->arch.xsave_area;
   11.35 +
   11.36 +    asm volatile (
   11.37 +        ".byte " REX_PREFIX "0x0f,0xae,0x2f"
   11.38 +        :
   11.39 +        : "m" (*ptr), "a" (-1), "d" (-1), "D"(ptr) );
   11.40 +}
   11.41 +
   11.42 +static void load_mxcsr(unsigned long val)
   11.43 +{
   11.44 +    val &= 0xffbf;
   11.45 +    asm volatile ( "ldmxcsr %0" : : "m" (val) );
   11.46 +}
   11.47 +
   11.48 +static void init_fpu(void);
   11.49 +static void restore_fpu(struct vcpu *v);
   11.50 +
   11.51 +void setup_fpu(struct vcpu *v)
   11.52 +{
   11.53 +    ASSERT(!is_idle_vcpu(v));
   11.54 +
   11.55 +    /* Avoid recursion. */
   11.56 +    clts();
   11.57 +
   11.58 +    if ( v->fpu_dirtied )
   11.59 +        return;
   11.60 +
   11.61 +    if ( cpu_has_xsave )
   11.62 +    {
   11.63 +        /*
   11.64 +         * XCR0 normally represents what guest OS set. In case of Xen itself, 
   11.65 +         * we set all supported feature mask before doing save/restore.
   11.66 +         */
   11.67 +        set_xcr0(v->arch.xcr0_accum);
   11.68 +        xrstor(v);
   11.69 +        set_xcr0(v->arch.xcr0);
   11.70 +    }
   11.71 +    else if ( v->fpu_initialised )
   11.72 +    {
   11.73 +        restore_fpu(v);
   11.74 +    }
   11.75 +    else
   11.76 +    {
   11.77 +        init_fpu();
   11.78 +    }
   11.79 +
   11.80 +    v->fpu_initialised = 1;
   11.81 +    v->fpu_dirtied = 1;
   11.82 +}
   11.83 +
   11.84 +static void init_fpu(void)
   11.85  {
   11.86      asm volatile ( "fninit" );
   11.87      if ( cpu_has_xmm )
   11.88          load_mxcsr(0x1f80);
   11.89 -    current->fpu_initialised = 1;
   11.90  }
   11.91  
   11.92  void save_init_fpu(struct vcpu *v)
   11.93  {
   11.94 -    unsigned long cr0 = read_cr0();
   11.95 -    char *fpu_ctxt = v->arch.guest_context.fpu_ctxt.x;
   11.96 +    unsigned long cr0;
   11.97 +    char *fpu_ctxt;
   11.98 +
   11.99 +    if ( !v->fpu_dirtied )
  11.100 +        return;
  11.101 +
  11.102 +    ASSERT(!is_idle_vcpu(v));
  11.103 +
  11.104 +    cr0 = read_cr0();
  11.105 +    fpu_ctxt = v->arch.guest_context.fpu_ctxt.x;
  11.106  
  11.107      /* This can happen, if a paravirtualised guest OS has set its CR0.TS. */
  11.108      if ( cr0 & X86_CR0_TS )
  11.109 @@ -91,7 +174,7 @@ void save_init_fpu(struct vcpu *v)
  11.110      write_cr0(cr0|X86_CR0_TS);
  11.111  }
  11.112  
  11.113 -void restore_fpu(struct vcpu *v)
  11.114 +static void restore_fpu(struct vcpu *v)
  11.115  {
  11.116      char *fpu_ctxt = v->arch.guest_context.fpu_ctxt.x;
  11.117  
  11.118 @@ -138,6 +221,7 @@ void restore_fpu(struct vcpu *v)
  11.119  }
  11.120  
  11.121  #define XSTATE_CPUID 0xd
  11.122 +#define XSAVE_AREA_MIN_SIZE (512 + 64) /* FP/SSE + XSAVE.HEADER */
  11.123  
  11.124  /*
  11.125   * Maximum size (in byte) of the XSAVE/XRSTOR save area required by all
  11.126 @@ -152,32 +236,24 @@ u64 xfeature_mask;
  11.127  /* Cached xcr0 for fast read */
  11.128  DEFINE_PER_CPU(uint64_t, xcr0);
  11.129  
  11.130 -bool_t __read_mostly cpu_has_xsaveopt;
  11.131 -
  11.132  void xsave_init(void)
  11.133  {
  11.134      u32 eax, ebx, ecx, edx;
  11.135      int cpu = smp_processor_id();
  11.136      u32 min_size;
  11.137  
  11.138 -    if ( boot_cpu_data.cpuid_level < XSTATE_CPUID ) {
  11.139 -        printk(XENLOG_ERR "XSTATE_CPUID missing\n");
  11.140 +    if ( boot_cpu_data.cpuid_level < XSTATE_CPUID )
  11.141          return;
  11.142 -    }
  11.143  
  11.144      cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
  11.145  
  11.146 -    printk("%s: cpu%d: cntxt_max_size: 0x%x and states: %08x:%08x\n",
  11.147 -        __func__, cpu, ecx, edx, eax);
  11.148 -
  11.149 -    if ( ((eax & XSTATE_FP_SSE) != XSTATE_FP_SSE) ||
  11.150 -         ((eax & XSTATE_YMM) && !(eax & XSTATE_SSE)) )
  11.151 -    {
  11.152 -        BUG();
  11.153 -    }
  11.154 +    BUG_ON((eax & XSTATE_FP_SSE) != XSTATE_FP_SSE);
  11.155 +    BUG_ON((eax & XSTATE_YMM) && !(eax & XSTATE_SSE));
  11.156  
  11.157      /* FP/SSE, XSAVE.HEADER, YMM */
  11.158 -    min_size =  512 + 64 + ((eax & XSTATE_YMM) ? XSTATE_YMM_SIZE : 0);
  11.159 +    min_size =  XSAVE_AREA_MIN_SIZE;
  11.160 +    if ( eax & XSTATE_YMM )
  11.161 +        min_size += XSTATE_YMM_SIZE;
  11.162      BUG_ON(ecx < min_size);
  11.163  
  11.164      /*
  11.165 @@ -214,9 +290,11 @@ int xsave_alloc_save_area(struct vcpu *v
  11.166  {
  11.167      void *save_area;
  11.168  
  11.169 -    if ( !cpu_has_xsave )
  11.170 +    if ( !cpu_has_xsave || is_idle_vcpu(v) )
  11.171          return 0;
  11.172  
  11.173 +    BUG_ON(xsave_cntxt_size < XSAVE_AREA_MIN_SIZE);
  11.174 +
  11.175      /* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */
  11.176      save_area = _xmalloc(xsave_cntxt_size, 64);
  11.177      if ( save_area == NULL )
    12.1 --- a/xen/arch/x86/platform_hypercall.c	Mon Jan 17 17:18:38 2011 +0000
    12.2 +++ b/xen/arch/x86/platform_hypercall.c	Mon Jan 17 17:24:21 2011 +0000
    12.3 @@ -55,11 +55,9 @@ static long cpu_frequency_change_helper(
    12.4      return cpu_frequency_change(this_cpu(freq));
    12.5  }
    12.6  
    12.7 -static long cpu_down_helper(void *data)
    12.8 -{
    12.9 -    int cpu = (unsigned long)data;
   12.10 -    return cpu_down(cpu);
   12.11 -}
   12.12 +/* from sysctl.c */
   12.13 +long cpu_up_helper(void *data);
   12.14 +long cpu_down_helper(void *data);
   12.15  
   12.16  ret_t do_platform_op(XEN_GUEST_HANDLE(xen_platform_op_t) u_xenpf_op)
   12.17  {
   12.18 @@ -443,40 +441,43 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
   12.19  
   12.20      case XENPF_cpu_online:
   12.21      {
   12.22 -        int cpu;
   12.23 +        int cpu = op->u.cpu_ol.cpuid;
   12.24  
   12.25 -        cpu = op->u.cpu_ol.cpuid;
   12.26 -        if (!cpu_present(cpu))
   12.27 +        if ( !cpu_present(cpu) )
   12.28          {
   12.29              ret = -EINVAL;
   12.30              break;
   12.31          }
   12.32 -        else if (cpu_online(cpu))
   12.33 +
   12.34 +        if ( cpu_online(cpu) )
   12.35          {
   12.36              ret = 0;
   12.37              break;
   12.38          }
   12.39  
   12.40 -        ret = cpu_up(cpu);
   12.41 +        ret = continue_hypercall_on_cpu(
   12.42 +            0, cpu_up_helper, (void *)(unsigned long)cpu);
   12.43          break;
   12.44      }
   12.45  
   12.46      case XENPF_cpu_offline:
   12.47      {
   12.48 -        int cpu;
   12.49 +        int cpu = op->u.cpu_ol.cpuid;
   12.50  
   12.51 -        cpu = op->u.cpu_ol.cpuid;
   12.52 -        if (!cpu_present(cpu))
   12.53 +        if ( !cpu_present(cpu) )
   12.54          {
   12.55              ret = -EINVAL;
   12.56              break;
   12.57 -        } else if (!cpu_online(cpu))
   12.58 +        }
   12.59 +
   12.60 +        if ( !cpu_online(cpu) )
   12.61          {
   12.62              ret = 0;
   12.63              break;
   12.64          }
   12.65 +
   12.66          ret = continue_hypercall_on_cpu(
   12.67 -          0, cpu_down_helper, (void *)(unsigned long)cpu);
   12.68 +            0, cpu_down_helper, (void *)(unsigned long)cpu);
   12.69          break;
   12.70      }
   12.71      break;
    13.1 --- a/xen/arch/x86/setup.c	Mon Jan 17 17:18:38 2011 +0000
    13.2 +++ b/xen/arch/x86/setup.c	Mon Jan 17 17:24:21 2011 +0000
    13.3 @@ -1246,7 +1246,11 @@ void __init __start_xen(unsigned long mb
    13.4          numa_add_cpu(i);        
    13.5  
    13.6          if ( (num_online_cpus() < max_cpus) && !cpu_online(i) )
    13.7 -            cpu_up(i);
    13.8 +        {
    13.9 +            int ret = cpu_up(i);
   13.10 +            if ( ret != 0 )
   13.11 +                printk("Failed to bring up CPU %u (error %d)\n", i, ret);
   13.12 +        }
   13.13      }
   13.14  
   13.15      printk("Brought up %ld CPUs\n", (long)num_online_cpus());
    14.1 --- a/xen/arch/x86/sysctl.c	Mon Jan 17 17:18:38 2011 +0000
    14.2 +++ b/xen/arch/x86/sysctl.c	Mon Jan 17 17:24:21 2011 +0000
    14.3 @@ -30,10 +30,30 @@
    14.4  
    14.5  #define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
    14.6  
    14.7 -static long cpu_down_helper(void *data)
    14.8 +long cpu_up_helper(void *data)
    14.9  {
   14.10      int cpu = (unsigned long)data;
   14.11 -    return cpu_down(cpu);
   14.12 +    int ret = cpu_up(cpu);
   14.13 +    if ( ret == -EBUSY )
   14.14 +    {
   14.15 +        /* On EBUSY, flush RCU work and have one more go. */
   14.16 +        rcu_barrier();
   14.17 +        ret = cpu_up(cpu);
   14.18 +    }
   14.19 +    return ret;
   14.20 +}
   14.21 +
   14.22 +long cpu_down_helper(void *data)
   14.23 +{
   14.24 +    int cpu = (unsigned long)data;
   14.25 +    int ret = cpu_down(cpu);
   14.26 +    if ( ret == -EBUSY )
   14.27 +    {
   14.28 +        /* On EBUSY, flush RCU work and have one more go. */
   14.29 +        rcu_barrier();
   14.30 +        ret = cpu_down(cpu);
   14.31 +    }
   14.32 +    return ret;
   14.33  }
   14.34  
   14.35  extern int __node_distance(int a, int b);
   14.36 @@ -41,7 +61,7 @@ extern int __node_distance(int a, int b)
   14.37  long arch_do_sysctl(
   14.38      struct xen_sysctl *sysctl, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
   14.39  {
   14.40 -    long ret = 0, status;
   14.41 +    long ret = 0;
   14.42  
   14.43      switch ( sysctl->cmd )
   14.44      {
   14.45 @@ -167,41 +187,20 @@ long arch_do_sysctl(
   14.46      {
   14.47          unsigned int cpu = sysctl->u.cpu_hotplug.cpu;
   14.48  
   14.49 -        if (cpu_present(cpu)) {
   14.50 -            status = cpu_online(cpu) ? XEN_CPU_HOTPLUG_STATUS_ONLINE :
   14.51 -                XEN_CPU_HOTPLUG_STATUS_OFFLINE;
   14.52 -        } else {
   14.53 -            status = -EINVAL;
   14.54 -        }
   14.55 -
   14.56          switch ( sysctl->u.cpu_hotplug.op )
   14.57          {
   14.58          case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
   14.59 -            ret = cpu_up(cpu);
   14.60 -            /*
   14.61 -             * In the case of a true hotplug, this CPU wasn't present
   14.62 -             * before, so return the 'new' status for it.
   14.63 -             */
   14.64 -            if (ret == 0 && status == -EINVAL)
   14.65 -                status = XEN_CPU_HOTPLUG_STATUS_NEW;
   14.66 +            ret = continue_hypercall_on_cpu(
   14.67 +                0, cpu_up_helper, (void *)(unsigned long)cpu);
   14.68              break;
   14.69          case XEN_SYSCTL_CPU_HOTPLUG_OFFLINE:
   14.70              ret = continue_hypercall_on_cpu(
   14.71                  0, cpu_down_helper, (void *)(unsigned long)cpu);
   14.72              break;
   14.73 -        case XEN_SYSCTL_CPU_HOTPLUG_STATUS:
   14.74 -            ret = 0;
   14.75 -            break;
   14.76          default:
   14.77              ret = -EINVAL;
   14.78              break;
   14.79          }
   14.80 -
   14.81 -        /*
   14.82 -         * If the operation was successful, return the old status.
   14.83 -         */
   14.84 -        if (ret >= 0)
   14.85 -            ret = status;
   14.86      }
   14.87      break;
   14.88  
    15.1 --- a/xen/arch/x86/x86_64/mmconfig-shared.c	Mon Jan 17 17:18:38 2011 +0000
    15.2 +++ b/xen/arch/x86/x86_64/mmconfig-shared.c	Mon Jan 17 17:24:21 2011 +0000
    15.3 @@ -26,7 +26,7 @@
    15.4  #include "mmconfig.h"
    15.5  
    15.6  static int __initdata known_bridge;
    15.7 -unsigned int __cpuinitdata pci_probe = PCI_PROBE_CONF1 | PCI_PROBE_MMCONF;
    15.8 +unsigned int pci_probe = PCI_PROBE_CONF1 | PCI_PROBE_MMCONF;
    15.9  
   15.10  static void __init parse_mmcfg(char *s)
   15.11  {
    16.1 --- a/xen/common/cpu.c	Mon Jan 17 17:18:38 2011 +0000
    16.2 +++ b/xen/common/cpu.c	Mon Jan 17 17:24:21 2011 +0000
    16.3 @@ -108,7 +108,6 @@ int cpu_down(unsigned int cpu)
    16.4   fail:
    16.5      notifier_rc = notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, hcpu, &nb);
    16.6      BUG_ON(notifier_rc != NOTIFY_DONE);
    16.7 -    printk("Failed to take down CPU %u (error %d)\n", cpu, err);
    16.8      cpu_hotplug_done();
    16.9      return err;
   16.10  }
   16.11 @@ -150,7 +149,6 @@ int cpu_up(unsigned int cpu)
   16.12   fail:
   16.13      notifier_rc = notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu, &nb);
   16.14      BUG_ON(notifier_rc != NOTIFY_DONE);
   16.15 -    printk("Failed to bring up CPU %u (error %d)\n", cpu, err);
   16.16      cpu_hotplug_done();
   16.17      return err;
   16.18  }
    17.1 --- a/xen/common/rcupdate.c	Mon Jan 17 17:18:38 2011 +0000
    17.2 +++ b/xen/common/rcupdate.c	Mon Jan 17 17:24:21 2011 +0000
    17.3 @@ -44,6 +44,7 @@
    17.4  #include <xen/percpu.h>
    17.5  #include <xen/softirq.h>
    17.6  #include <xen/cpu.h>
    17.7 +#include <xen/stop_machine.h>
    17.8  
    17.9  /* Definition for rcupdate control block. */
   17.10  struct rcu_ctrlblk rcu_ctrlblk = {
   17.11 @@ -60,6 +61,49 @@ static int qhimark = 10000;
   17.12  static int qlowmark = 100;
   17.13  static int rsinterval = 1000;
   17.14  
   17.15 +struct rcu_barrier_data {
   17.16 +    struct rcu_head head;
   17.17 +    atomic_t *cpu_count;
   17.18 +};
   17.19 +
   17.20 +static void rcu_barrier_callback(struct rcu_head *head)
   17.21 +{
   17.22 +    struct rcu_barrier_data *data = container_of(
   17.23 +        head, struct rcu_barrier_data, head);
   17.24 +    atomic_inc(data->cpu_count);
   17.25 +}
   17.26 +
   17.27 +static int rcu_barrier_action(void *_cpu_count)
   17.28 +{
   17.29 +    struct rcu_barrier_data data = { .cpu_count = _cpu_count };
   17.30 +
   17.31 +    ASSERT(!local_irq_is_enabled());
   17.32 +    local_irq_enable();
   17.33 +
   17.34 +    /*
   17.35 +     * When callback is executed, all previously-queued RCU work on this CPU
   17.36 +     * is completed. When all CPUs have executed their callback, data.cpu_count
   17.37 +     * will have been incremented to include every online CPU.
   17.38 +     */
   17.39 +    call_rcu(&data.head, rcu_barrier_callback);
   17.40 +
   17.41 +    while ( atomic_read(data.cpu_count) != cpus_weight(cpu_online_map) )
   17.42 +    {
   17.43 +        process_pending_softirqs();
   17.44 +        cpu_relax();
   17.45 +    }
   17.46 +
   17.47 +    local_irq_disable();
   17.48 +
   17.49 +    return 0;
   17.50 +}
   17.51 +
   17.52 +int rcu_barrier(void)
   17.53 +{
   17.54 +    atomic_t cpu_count = ATOMIC_INIT(0);
   17.55 +    return stop_machine_run(rcu_barrier_action, &cpu_count, NR_CPUS);
   17.56 +}
   17.57 +
   17.58  static void force_quiescent_state(struct rcu_data *rdp,
   17.59                                    struct rcu_ctrlblk *rcp)
   17.60  {
    18.1 --- a/xen/common/stop_machine.c	Mon Jan 17 17:18:38 2011 +0000
    18.2 +++ b/xen/common/stop_machine.c	Mon Jan 17 17:24:21 2011 +0000
    18.3 @@ -61,6 +61,10 @@ static void stopmachine_set_state(enum s
    18.4      atomic_set(&stopmachine_data.done, 0);
    18.5      smp_wmb();
    18.6      stopmachine_data.state = state;
    18.7 +}
    18.8 +
    18.9 +static void stopmachine_wait_state(void)
   18.10 +{
   18.11      while ( atomic_read(&stopmachine_data.done) != stopmachine_data.nr_cpus )
   18.12          cpu_relax();
   18.13  }
   18.14 @@ -101,16 +105,20 @@ int stop_machine_run(int (*fn)(void *), 
   18.15          tasklet_schedule_on_cpu(&per_cpu(stopmachine_tasklet, i), i);
   18.16  
   18.17      stopmachine_set_state(STOPMACHINE_PREPARE);
   18.18 +    stopmachine_wait_state();
   18.19  
   18.20      local_irq_disable();
   18.21      stopmachine_set_state(STOPMACHINE_DISABLE_IRQ);
   18.22 +    stopmachine_wait_state();
   18.23  
   18.24 -    if ( cpu == smp_processor_id() )
   18.25 +    stopmachine_set_state(STOPMACHINE_INVOKE);
   18.26 +    if ( (cpu == smp_processor_id()) || (cpu == NR_CPUS) )
   18.27          stopmachine_data.fn_result = (*fn)(data);
   18.28 -    stopmachine_set_state(STOPMACHINE_INVOKE);
   18.29 +    stopmachine_wait_state();
   18.30      ret = stopmachine_data.fn_result;
   18.31  
   18.32      stopmachine_set_state(STOPMACHINE_EXIT);
   18.33 +    stopmachine_wait_state();
   18.34      local_irq_enable();
   18.35  
   18.36      spin_unlock(&stopmachine_lock);
   18.37 @@ -140,7 +148,8 @@ static void stopmachine_action(unsigned 
   18.38              local_irq_disable();
   18.39              break;
   18.40          case STOPMACHINE_INVOKE:
   18.41 -            if ( stopmachine_data.fn_cpu == smp_processor_id() )
   18.42 +            if ( (stopmachine_data.fn_cpu == smp_processor_id()) ||
   18.43 +                 (stopmachine_data.fn_cpu == NR_CPUS) )
   18.44                  stopmachine_data.fn_result =
   18.45                      stopmachine_data.fn(stopmachine_data.fn_data);
   18.46              break;
    19.1 --- a/xen/drivers/passthrough/vtd/extern.h	Mon Jan 17 17:18:38 2011 +0000
    19.2 +++ b/xen/drivers/passthrough/vtd/extern.h	Mon Jan 17 17:24:21 2011 +0000
    19.3 @@ -86,5 +86,6 @@ void __init platform_quirks_init(void);
    19.4  void vtd_ops_preamble_quirk(struct iommu* iommu);
    19.5  void vtd_ops_postamble_quirk(struct iommu* iommu);
    19.6  void me_wifi_quirk(struct domain *domain, u8 bus, u8 devfn, int map);
    19.7 +void pci_vtd_quirk(struct pci_dev *pdev);
    19.8  
    19.9  #endif // _VTD_EXTERN_H_
    20.1 --- a/xen/drivers/passthrough/vtd/iommu.c	Mon Jan 17 17:18:38 2011 +0000
    20.2 +++ b/xen/drivers/passthrough/vtd/iommu.c	Mon Jan 17 17:24:21 2011 +0000
    20.3 @@ -1773,9 +1773,13 @@ void iommu_set_pgd(struct domain *d)
    20.4      ASSERT( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled );
    20.5  
    20.6      iommu_hap_pt_share = vtd_ept_share();
    20.7 +    if ( !iommu_hap_pt_share )
    20.8 +        goto out;
    20.9 +
   20.10      pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
   20.11      hd->pgd_maddr = pagetable_get_paddr(pagetable_from_mfn(pgd_mfn));
   20.12  
   20.13 +out:
   20.14      dprintk(XENLOG_INFO VTDPREFIX,
   20.15              "VT-d page table %s with EPT table\n",
   20.16              iommu_hap_pt_share ? "shares" : "not sharing");
   20.17 @@ -1910,6 +1914,7 @@ static void __init setup_dom0_devices(st
   20.18              list_add(&pdev->domain_list, &d->arch.pdev_list);
   20.19              domain_context_mapping(d, pdev->bus, pdev->devfn);
   20.20              pci_enable_acs(pdev);
   20.21 +            pci_vtd_quirk(pdev);
   20.22          }
   20.23      }
   20.24      spin_unlock(&pcidevs_lock);
    21.1 --- a/xen/drivers/passthrough/vtd/quirks.c	Mon Jan 17 17:18:38 2011 +0000
    21.2 +++ b/xen/drivers/passthrough/vtd/quirks.c	Mon Jan 17 17:24:21 2011 +0000
    21.3 @@ -47,11 +47,13 @@
    21.4  #define IS_CTG(id)    (id == 0x2a408086)
    21.5  #define IS_ILK(id)    (id == 0x00408086 || id == 0x00448086 || id== 0x00628086 || id == 0x006A8086)
    21.6  #define IS_CPT(id)    (id == 0x01008086 || id == 0x01048086)
    21.7 +#define IS_SNB_GFX(id) (id == 0x01068086 || id == 0x01168086 || id == 0x01268086 || id == 0x01028086 || id == 0x01128086 || id == 0x01228086 || id == 0x010A8086)
    21.8  
    21.9  u32 ioh_id;
   21.10  u32 igd_id;
   21.11  bool_t rwbf_quirk;
   21.12  static int is_cantiga_b3;
   21.13 +static int is_snb_gfx;
   21.14  static u8 *igd_reg_va;
   21.15  
   21.16  /*
   21.17 @@ -92,6 +94,12 @@ static void cantiga_b3_errata_init(void)
   21.18          is_cantiga_b3 = 1;
   21.19  }
   21.20  
   21.21 +/* check for Sandybridge IGD device ID's */
   21.22 +static void snb_errata_init(void)
   21.23 +{
   21.24 +    is_snb_gfx = IS_SNB_GFX(igd_id);
   21.25 +}
   21.26 +
   21.27  /*
   21.28   * QUIRK to workaround Cantiga IGD VT-d low power errata.
   21.29   * This errata impacts IGD assignment on Cantiga systems
   21.30 @@ -104,12 +112,15 @@ static void cantiga_b3_errata_init(void)
   21.31  /*
   21.32   * map IGD MMIO+0x2000 page to allow Xen access to IGD 3D register.
   21.33   */
   21.34 -static void map_igd_reg(void)
   21.35 +static void *map_igd_reg(void)
   21.36  {
   21.37      u64 igd_mmio, igd_reg;
   21.38  
   21.39 -    if ( !is_cantiga_b3 || igd_reg_va != NULL )
   21.40 -        return;
   21.41 +    if ( !is_cantiga_b3 && !is_snb_gfx )
   21.42 +        return NULL;
   21.43 +
   21.44 +    if ( igd_reg_va )
   21.45 +        return igd_reg_va;
   21.46  
   21.47      /* get IGD mmio address in PCI BAR */
   21.48      igd_mmio = ((u64)pci_conf_read32(0, IGD_DEV, 0, 0x14) << 32) +
   21.49 @@ -125,6 +136,7 @@ static void map_igd_reg(void)
   21.50  #else
   21.51      igd_reg_va = ioremap_nocache(igd_reg, 0x100);
   21.52  #endif
   21.53 +    return igd_reg_va;
   21.54  }
   21.55  
   21.56  /*
   21.57 @@ -138,6 +150,9 @@ static int cantiga_vtd_ops_preamble(stru
   21.58      if ( !is_igd_drhd(drhd) || !is_cantiga_b3 )
   21.59          return 0;
   21.60  
   21.61 +    if ( !map_igd_reg() )
   21.62 +        return 0;
   21.63 +
   21.64      /*
   21.65       * read IGD register at IGD MMIO + 0x20A4 to force IGD
   21.66       * to exit low power state.  Since map_igd_reg()
   21.67 @@ -148,11 +163,64 @@ static int cantiga_vtd_ops_preamble(stru
   21.68  }
   21.69  
   21.70  /*
   21.71 + * Sandybridge RC6 power management inhibit state erratum.
   21.72 + * This can cause power high power consumption.
   21.73 + * Workaround is to prevent graphics get into RC6
   21.74 + * state when doing VT-d IOTLB operations, do the VT-d
   21.75 + * IOTLB operation, and then re-enable RC6 state.
   21.76 + */
   21.77 +static void snb_vtd_ops_preamble(struct iommu* iommu)
   21.78 +{
   21.79 +    struct intel_iommu *intel = iommu->intel;
   21.80 +    struct acpi_drhd_unit *drhd = intel ? intel->drhd : NULL;
   21.81 +    s_time_t start_time;
   21.82 +
   21.83 +    if ( !is_igd_drhd(drhd) || !is_snb_gfx )
   21.84 +        return;
   21.85 +
   21.86 +    if ( !map_igd_reg() )
   21.87 +        return;
   21.88 +
   21.89 +    *((volatile u32 *)(igd_reg_va + 0x54)) = 0x000FFFFF;
   21.90 +    *((volatile u32 *)(igd_reg_va + 0x700)) = 0;
   21.91 +
   21.92 +    start_time = NOW();
   21.93 +    while ( (*((volatile u32 *)(igd_reg_va + 0x2AC)) & 0xF) != 0 )
   21.94 +    {
   21.95 +        if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT )
   21.96 +        {
   21.97 +            dprintk(XENLOG_INFO VTDPREFIX,
   21.98 +                    "snb_vtd_ops_preamble: failed to disable idle handshake\n");
   21.99 +            break;
  21.100 +        }
  21.101 +        cpu_relax();
  21.102 +    }
  21.103 +
  21.104 +    *((volatile u32*)(igd_reg_va + 0x50)) = 0x10001;
  21.105 +}
  21.106 +
  21.107 +static void snb_vtd_ops_postamble(struct iommu* iommu)
  21.108 +{
  21.109 +    struct intel_iommu *intel = iommu->intel;
  21.110 +    struct acpi_drhd_unit *drhd = intel ? intel->drhd : NULL;
  21.111 +
  21.112 +    if ( !is_igd_drhd(drhd) || !is_snb_gfx )
  21.113 +        return;
  21.114 +
  21.115 +    if ( !map_igd_reg() )
  21.116 +        return;
  21.117 +
  21.118 +    *((volatile u32 *)(igd_reg_va + 0x54)) = 0xA;
  21.119 +    *((volatile u32 *)(igd_reg_va + 0x50)) = 0x10000;
  21.120 +}
  21.121 +
  21.122 +/*
  21.123   * call before VT-d translation enable and IOTLB flush operations.
  21.124   */
  21.125  void vtd_ops_preamble_quirk(struct iommu* iommu)
  21.126  {
  21.127      cantiga_vtd_ops_preamble(iommu);
  21.128 +    snb_vtd_ops_preamble(iommu);
  21.129  }
  21.130  
  21.131  /*
  21.132 @@ -160,7 +228,7 @@ void vtd_ops_preamble_quirk(struct iommu
  21.133   */
  21.134  void vtd_ops_postamble_quirk(struct iommu* iommu)
  21.135  {
  21.136 -    return;
  21.137 +    snb_vtd_ops_postamble(iommu);
  21.138  }
  21.139  
  21.140  /* initialize platform identification flags */
  21.141 @@ -179,6 +247,8 @@ void __init platform_quirks_init(void)
  21.142      /* initialize cantiga B3 identification */
  21.143      cantiga_b3_errata_init();
  21.144  
  21.145 +    snb_errata_init();
  21.146 +
  21.147      /* ioremap IGD MMIO+0x2000 page */
  21.148      map_igd_reg();
  21.149  }
  21.150 @@ -250,11 +320,14 @@ void me_wifi_quirk(struct domain *domain
  21.151          id = pci_conf_read32(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), 0);
  21.152          switch (id)
  21.153          {
  21.154 -            case 0x00878086:
  21.155 +            case 0x00878086:        /* Kilmer Peak */
  21.156              case 0x00898086:
  21.157 -            case 0x00828086:
  21.158 +            case 0x00828086:        /* Taylor Peak */
  21.159              case 0x00858086:
  21.160 -            case 0x42388086:
  21.161 +            case 0x008F8086:        /* Rainbow Peak */
  21.162 +            case 0x00908086:
  21.163 +            case 0x00918086:
  21.164 +            case 0x42388086:        /* Puma Peak */
  21.165              case 0x422b8086:
  21.166              case 0x422c8086:
  21.167                  map_me_phantom_function(domain, 22, map);
  21.168 @@ -262,6 +335,26 @@ void me_wifi_quirk(struct domain *domain
  21.169              default:
  21.170                  break;
  21.171          }
  21.172 -
  21.173      }
  21.174  }
  21.175 +
  21.176 +/*
  21.177 + * Mask reporting Intel VT-d faults to IOH core logic:
  21.178 + *   - Some platform escalates VT-d faults to platform errors 
  21.179 + *   - This can cause system failure upon non-fatal VT-d faults
  21.180 + *   - Potential security issue if malicious guest trigger VT-d faults
  21.181 + */
  21.182 +void pci_vtd_quirk(struct pci_dev *pdev)
  21.183 +{
  21.184 +    int bus = pdev->bus;
  21.185 +    int dev = PCI_SLOT(pdev->devfn);
  21.186 +    int func = PCI_FUNC(pdev->devfn);
  21.187 +    int id, val;
  21.188 +
  21.189 +    id = pci_conf_read32(bus, dev, func, 0);
  21.190 +    if ( id == 0x342e8086 || id == 0x3c288086 )
  21.191 +    {
  21.192 +        val = pci_conf_read32(bus, dev, func, 0x1AC);
  21.193 +        pci_conf_write32(bus, dev, func, 0x1AC, val | (1 << 31));
  21.194 +    }
  21.195 +}
    22.1 --- a/xen/include/asm-x86/i387.h	Mon Jan 17 17:18:38 2011 +0000
    22.2 +++ b/xen/include/asm-x86/i387.h	Mon Jan 17 17:24:21 2011 +0000
    22.3 @@ -16,7 +16,6 @@
    22.4  
    22.5  extern unsigned int xsave_cntxt_size;
    22.6  extern u64 xfeature_mask;
    22.7 -extern bool_t cpu_has_xsaveopt;
    22.8  
    22.9  void xsave_init(void);
   22.10  int xsave_alloc_save_area(struct vcpu *v);
   22.11 @@ -75,84 +74,7 @@ static inline uint64_t get_xcr0(void)
   22.12      return this_cpu(xcr0);
   22.13  }
   22.14  
   22.15 -static inline void xsave(struct vcpu *v)
   22.16 -{
   22.17 -    struct xsave_struct *ptr;
   22.18 -
   22.19 -    ptr =(struct xsave_struct *)v->arch.xsave_area;
   22.20 -
   22.21 -    asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x27"
   22.22 -        :
   22.23 -        : "a" (-1), "d" (-1), "D"(ptr)
   22.24 -        : "memory");
   22.25 -}
   22.26 -
   22.27 -static inline void xsaveopt(struct vcpu *v)
   22.28 -{
   22.29 -    struct xsave_struct *ptr;
   22.30 -
   22.31 -    ptr =(struct xsave_struct *)v->arch.xsave_area;
   22.32 -
   22.33 -    asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x37"
   22.34 -        :
   22.35 -        : "a" (-1), "d" (-1), "D"(ptr)
   22.36 -        : "memory");
   22.37 -}
   22.38 -
   22.39 -static inline void xrstor(struct vcpu *v)
   22.40 -{
   22.41 -    struct xsave_struct *ptr;
   22.42 -
   22.43 -    ptr =(struct xsave_struct *)v->arch.xsave_area;
   22.44 -
   22.45 -    asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x2f"
   22.46 -        :
   22.47 -        : "m" (*ptr), "a" (-1), "d" (-1), "D"(ptr));
   22.48 -}
   22.49 -
   22.50 -extern void init_fpu(void);
   22.51 +extern void setup_fpu(struct vcpu *v);
   22.52  extern void save_init_fpu(struct vcpu *v);
   22.53 -extern void restore_fpu(struct vcpu *v);
   22.54 -
   22.55 -#define unlazy_fpu(v) do {                      \
   22.56 -    if ( (v)->fpu_dirtied )                     \
   22.57 -        save_init_fpu(v);                       \
   22.58 -} while ( 0 )
   22.59 -
   22.60 -#define load_mxcsr(val) do {                                    \
   22.61 -    unsigned long __mxcsr = ((unsigned long)(val) & 0xffbf);    \
   22.62 -    __asm__ __volatile__ ( "ldmxcsr %0" : : "m" (__mxcsr) );    \
   22.63 -} while ( 0 )
   22.64 -
   22.65 -static inline void setup_fpu(struct vcpu *v)
   22.66 -{
   22.67 -    /* Avoid recursion. */
   22.68 -    clts();
   22.69 -
   22.70 -    if ( !v->fpu_dirtied )
   22.71 -    {
   22.72 -        v->fpu_dirtied = 1;
   22.73 -        if ( cpu_has_xsave )
   22.74 -        {
   22.75 -            if ( !v->fpu_initialised )
   22.76 -                v->fpu_initialised = 1;
   22.77 -
   22.78 -            /* XCR0 normally represents what guest OS set. In case of Xen
   22.79 -             * itself, we set all supported feature mask before doing
   22.80 -             * save/restore.
   22.81 -             */
   22.82 -            set_xcr0(v->arch.xcr0_accum);
   22.83 -            xrstor(v);
   22.84 -            set_xcr0(v->arch.xcr0);
   22.85 -        }
   22.86 -        else
   22.87 -        {
   22.88 -            if ( v->fpu_initialised )
   22.89 -                restore_fpu(v);
   22.90 -            else
   22.91 -                init_fpu();
   22.92 -        }
   22.93 -    }
   22.94 -}
   22.95  
   22.96  #endif /* __ASM_I386_I387_H */
    23.1 --- a/xen/include/public/sysctl.h	Mon Jan 17 17:18:38 2011 +0000
    23.2 +++ b/xen/include/public/sysctl.h	Mon Jan 17 17:24:21 2011 +0000
    23.3 @@ -253,21 +253,12 @@ struct xen_sysctl_get_pmstat {
    23.4  typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t;
    23.5  DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t);
    23.6  
    23.7 -/*
    23.8 - * Status codes. Must be greater than 0 to avoid confusing
    23.9 - * sysctl callers that see 0 as a plain successful return.
   23.10 - */
   23.11 -#define XEN_CPU_HOTPLUG_STATUS_OFFLINE 1
   23.12 -#define XEN_CPU_HOTPLUG_STATUS_ONLINE  2
   23.13 -#define XEN_CPU_HOTPLUG_STATUS_NEW     3
   23.14 -
   23.15  /* XEN_SYSCTL_cpu_hotplug */
   23.16  struct xen_sysctl_cpu_hotplug {
   23.17      /* IN variables */
   23.18      uint32_t cpu;   /* Physical cpu. */
   23.19  #define XEN_SYSCTL_CPU_HOTPLUG_ONLINE  0
   23.20  #define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1
   23.21 -#define XEN_SYSCTL_CPU_HOTPLUG_STATUS 2
   23.22      uint32_t op;    /* hotplug opcode */
   23.23  };
   23.24  typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t;
    24.1 --- a/xen/include/xen/rcupdate.h	Mon Jan 17 17:18:38 2011 +0000
    24.2 +++ b/xen/include/xen/rcupdate.h	Mon Jan 17 17:24:21 2011 +0000
    24.3 @@ -197,4 +197,6 @@ void rcu_check_callbacks(int cpu);
    24.4  void fastcall call_rcu(struct rcu_head *head, 
    24.5                         void (*func)(struct rcu_head *head));
    24.6  
    24.7 +int rcu_barrier(void);
    24.8 +
    24.9  #endif /* __XEN_RCUPDATE_H */
    25.1 --- a/xen/include/xen/sched.h	Mon Jan 17 17:18:38 2011 +0000
    25.2 +++ b/xen/include/xen/sched.h	Mon Jan 17 17:24:21 2011 +0000
    25.3 @@ -209,7 +209,7 @@ struct domain
    25.4      spinlock_t       domain_lock;
    25.5  
    25.6      spinlock_t       page_alloc_lock; /* protects all the following fields  */
    25.7 -    struct page_list_head page_list;  /* linked list, of size tot_pages     */
    25.8 +    struct page_list_head page_list;  /* linked list */
    25.9      struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
   25.10      unsigned int     tot_pages;       /* number of pages currently possesed */
   25.11      unsigned int     max_pages;       /* maximum value for tot_pages        */
    26.1 --- a/xen/include/xen/stop_machine.h	Mon Jan 17 17:18:38 2011 +0000
    26.2 +++ b/xen/include/xen/stop_machine.h	Mon Jan 17 17:24:21 2011 +0000
    26.3 @@ -5,7 +5,7 @@
    26.4   * stop_machine_run: freeze the machine on all CPUs and run this function
    26.5   * @fn: the function to run
    26.6   * @data: the data ptr for the @fn()
    26.7 - * @cpu: the cpu to run @fn() on (or any, if @cpu == NR_CPUS).
    26.8 + * @cpu: the cpu to run @fn() on (or all, if @cpu == NR_CPUS).
    26.9   *
   26.10   * Description: This causes every other cpu to enter a safe point, with
   26.11   * each of which disables interrupts, and finally interrupts are disabled