debuggers.hg

changeset 22796:4b7cb21caf0e

x86: Avoid calling xsave_alloc_save_area before xsave_init

Currently, xsave_alloc_save_area will be called in
init_idle_domain->scheduler_init->alloc_vcpu->vcpu_initialise calls
with xsave_cntxt_size=0, it is earlier than xsave_init called in
identity_cpu(). This may causing buffer overflow on xmem_pool.

Idle domain isn't using FPU,SSE,AVX or any such extended state and
doesn't need it saved. xsave_{alloc,free}_save_area() should
test-and-exit on is_idle_vcpu(), and our context switch code should
not be doing XSAVE when switching out an idle vcpu.

Signed-off-by: Wei Gang <gang.wei@intel.com>
Signed-off-by: Keir Fraser <keir@xen.org>
author Keir Fraser <keir@xen.org>
date Fri Jan 14 08:34:53 2011 +0000 (2011-01-14)
parents 93e7bf0e1845
children 58304c1cc725
files xen/arch/x86/i387.c xen/include/asm-x86/i387.h
line diff
     1.1 --- a/xen/arch/x86/i387.c	Fri Jan 14 08:11:46 2011 +0000
     1.2 +++ b/xen/arch/x86/i387.c	Fri Jan 14 08:34:53 2011 +0000
     1.3 @@ -16,6 +16,39 @@
     1.4  #include <asm/i387.h>
     1.5  #include <asm/asm_defns.h>
     1.6  
     1.7 +void setup_fpu(struct vcpu *v)
     1.8 +{
     1.9 +    ASSERT(!is_idle_vcpu(v));
    1.10 +
    1.11 +    /* Avoid recursion. */
    1.12 +    clts();
    1.13 +
    1.14 +    if ( !v->fpu_dirtied )
    1.15 +    {
    1.16 +        v->fpu_dirtied = 1;
    1.17 +        if ( cpu_has_xsave )
    1.18 +        {
    1.19 +            if ( !v->fpu_initialised )
    1.20 +                v->fpu_initialised = 1;
    1.21 +
    1.22 +            /* XCR0 normally represents what guest OS set. In case of Xen
    1.23 +             * itself, we set all supported feature mask before doing
    1.24 +             * save/restore.
    1.25 +             */
    1.26 +            set_xcr0(v->arch.xcr0_accum);
    1.27 +            xrstor(v);
    1.28 +            set_xcr0(v->arch.xcr0);
    1.29 +        }
    1.30 +        else
    1.31 +        {
    1.32 +            if ( v->fpu_initialised )
    1.33 +                restore_fpu(v);
    1.34 +            else
    1.35 +                init_fpu();
    1.36 +        }
    1.37 +    }
    1.38 +}
    1.39 +
    1.40  void init_fpu(void)
    1.41  {
    1.42      asm volatile ( "fninit" );
    1.43 @@ -29,6 +62,8 @@ void save_init_fpu(struct vcpu *v)
    1.44      unsigned long cr0 = read_cr0();
    1.45      char *fpu_ctxt = v->arch.guest_context.fpu_ctxt.x;
    1.46  
    1.47 +    ASSERT(!is_idle_vcpu(v));
    1.48 +
    1.49      /* This can happen, if a paravirtualised guest OS has set its CR0.TS. */
    1.50      if ( cr0 & X86_CR0_TS )
    1.51          clts();
    1.52 @@ -138,6 +173,7 @@ void restore_fpu(struct vcpu *v)
    1.53  }
    1.54  
    1.55  #define XSTATE_CPUID 0xd
    1.56 +#define XSAVE_AREA_MIN_SIZE (512 + 64) /* FP/SSE + XSAVE.HEADER */
    1.57  
    1.58  /*
    1.59   * Maximum size (in byte) of the XSAVE/XRSTOR save area required by all
    1.60 @@ -177,7 +213,9 @@ void xsave_init(void)
    1.61      }
    1.62  
    1.63      /* FP/SSE, XSAVE.HEADER, YMM */
    1.64 -    min_size =  512 + 64 + ((eax & XSTATE_YMM) ? XSTATE_YMM_SIZE : 0);
    1.65 +    min_size =  XSAVE_AREA_MIN_SIZE;
    1.66 +    if ( eax & XSTATE_YMM )
    1.67 +        min_size += XSTATE_YMM_SIZE;
    1.68      BUG_ON(ecx < min_size);
    1.69  
    1.70      /*
    1.71 @@ -214,9 +252,11 @@ int xsave_alloc_save_area(struct vcpu *v
    1.72  {
    1.73      void *save_area;
    1.74  
    1.75 -    if ( !cpu_has_xsave )
    1.76 +    if ( !cpu_has_xsave || is_idle_vcpu(v) )
    1.77          return 0;
    1.78  
    1.79 +    BUG_ON(xsave_cntxt_size < XSAVE_AREA_MIN_SIZE);
    1.80 +
    1.81      /* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */
    1.82      save_area = _xmalloc(xsave_cntxt_size, 64);
    1.83      if ( save_area == NULL )
     2.1 --- a/xen/include/asm-x86/i387.h	Fri Jan 14 08:11:46 2011 +0000
     2.2 +++ b/xen/include/asm-x86/i387.h	Fri Jan 14 08:34:53 2011 +0000
     2.3 @@ -110,6 +110,7 @@ static inline void xrstor(struct vcpu *v
     2.4          : "m" (*ptr), "a" (-1), "d" (-1), "D"(ptr));
     2.5  }
     2.6  
     2.7 +extern void setup_fpu(struct vcpu *v);
     2.8  extern void init_fpu(void);
     2.9  extern void save_init_fpu(struct vcpu *v);
    2.10  extern void restore_fpu(struct vcpu *v);
    2.11 @@ -124,35 +125,4 @@ extern void restore_fpu(struct vcpu *v);
    2.12      __asm__ __volatile__ ( "ldmxcsr %0" : : "m" (__mxcsr) );    \
    2.13  } while ( 0 )
    2.14  
    2.15 -static inline void setup_fpu(struct vcpu *v)
    2.16 -{
    2.17 -    /* Avoid recursion. */
    2.18 -    clts();
    2.19 -
    2.20 -    if ( !v->fpu_dirtied )
    2.21 -    {
    2.22 -        v->fpu_dirtied = 1;
    2.23 -        if ( cpu_has_xsave )
    2.24 -        {
    2.25 -            if ( !v->fpu_initialised )
    2.26 -                v->fpu_initialised = 1;
    2.27 -
    2.28 -            /* XCR0 normally represents what guest OS set. In case of Xen
    2.29 -             * itself, we set all supported feature mask before doing
    2.30 -             * save/restore.
    2.31 -             */
    2.32 -            set_xcr0(v->arch.xcr0_accum);
    2.33 -            xrstor(v);
    2.34 -            set_xcr0(v->arch.xcr0);
    2.35 -        }
    2.36 -        else
    2.37 -        {
    2.38 -            if ( v->fpu_initialised )
    2.39 -                restore_fpu(v);
    2.40 -            else
    2.41 -                init_fpu();
    2.42 -        }
    2.43 -    }
    2.44 -}
    2.45 -
    2.46  #endif /* __ASM_I386_I387_H */