debuggers.hg

view xen/include/asm-x86/i387.h @ 22796:4b7cb21caf0e

x86: Avoid calling xsave_alloc_save_area before xsave_init

Currently, xsave_alloc_save_area will be called in
init_idle_domain->scheduler_init->alloc_vcpu->vcpu_initialise calls
with xsave_cntxt_size=0, it is earlier than xsave_init called in
identity_cpu(). This may causing buffer overflow on xmem_pool.

Idle domain isn't using FPU,SSE,AVX or any such extended state and
doesn't need it saved. xsave_{alloc,free}_save_area() should
test-and-exit on is_idle_vcpu(), and our context switch code should
not be doing XSAVE when switching out an idle vcpu.

Signed-off-by: Wei Gang <gang.wei@intel.com>
Signed-off-by: Keir Fraser <keir@xen.org>
author Keir Fraser <keir@xen.org>
date Fri Jan 14 08:34:53 2011 +0000 (2011-01-14)
parents 26e7e6c6ff7f
children 58304c1cc725
line source
1 /*
2 * include/asm-i386/i387.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * General FPU state handling cleanups
8 * Gareth Hughes <gareth@valinux.com>, May 2000
9 */
11 #ifndef __ASM_I386_I387_H
12 #define __ASM_I386_I387_H
14 #include <xen/sched.h>
15 #include <asm/processor.h>
17 extern unsigned int xsave_cntxt_size;
18 extern u64 xfeature_mask;
19 extern bool_t cpu_has_xsaveopt;
21 void xsave_init(void);
22 int xsave_alloc_save_area(struct vcpu *v);
23 void xsave_free_save_area(struct vcpu *v);
25 #define XSTATE_FP (1ULL << 0)
26 #define XSTATE_SSE (1ULL << 1)
27 #define XSTATE_YMM (1ULL << 2)
28 #define XSTATE_LWP (1ULL << 62) /* AMD lightweight profiling */
29 #define XSTATE_FP_SSE (XSTATE_FP | XSTATE_SSE)
30 #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM | XSTATE_LWP)
31 #define XSTATE_YMM_OFFSET (512 + 64)
32 #define XSTATE_YMM_SIZE 256
33 #define XSAVEOPT (1 << 0)
35 struct xsave_struct
36 {
37 struct { char x[512]; } fpu_sse; /* FPU/MMX, SSE */
39 struct {
40 u64 xstate_bv;
41 u64 reserved[7];
42 } xsave_hdr; /* The 64-byte header */
44 struct { char x[XSTATE_YMM_SIZE]; } ymm; /* YMM */
45 char data[]; /* Future new states */
46 } __attribute__ ((packed, aligned (64)));
48 #define XCR_XFEATURE_ENABLED_MASK 0
50 #ifdef CONFIG_X86_64
51 #define REX_PREFIX "0x48, "
52 #else
53 #define REX_PREFIX
54 #endif
56 DECLARE_PER_CPU(uint64_t, xcr0);
58 static inline void xsetbv(u32 index, u64 xfeatures)
59 {
60 u32 hi = xfeatures >> 32;
61 u32 lo = (u32)xfeatures;
63 asm volatile (".byte 0x0f,0x01,0xd1" :: "c" (index),
64 "a" (lo), "d" (hi));
65 }
67 static inline void set_xcr0(u64 xfeatures)
68 {
69 this_cpu(xcr0) = xfeatures;
70 xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures);
71 }
73 static inline uint64_t get_xcr0(void)
74 {
75 return this_cpu(xcr0);
76 }
78 static inline void xsave(struct vcpu *v)
79 {
80 struct xsave_struct *ptr;
82 ptr =(struct xsave_struct *)v->arch.xsave_area;
84 asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x27"
85 :
86 : "a" (-1), "d" (-1), "D"(ptr)
87 : "memory");
88 }
90 static inline void xsaveopt(struct vcpu *v)
91 {
92 struct xsave_struct *ptr;
94 ptr =(struct xsave_struct *)v->arch.xsave_area;
96 asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x37"
97 :
98 : "a" (-1), "d" (-1), "D"(ptr)
99 : "memory");
100 }
102 static inline void xrstor(struct vcpu *v)
103 {
104 struct xsave_struct *ptr;
106 ptr =(struct xsave_struct *)v->arch.xsave_area;
108 asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x2f"
109 :
110 : "m" (*ptr), "a" (-1), "d" (-1), "D"(ptr));
111 }
113 extern void setup_fpu(struct vcpu *v);
114 extern void init_fpu(void);
115 extern void save_init_fpu(struct vcpu *v);
116 extern void restore_fpu(struct vcpu *v);
118 #define unlazy_fpu(v) do { \
119 if ( (v)->fpu_dirtied ) \
120 save_init_fpu(v); \
121 } while ( 0 )
123 #define load_mxcsr(val) do { \
124 unsigned long __mxcsr = ((unsigned long)(val) & 0xffbf); \
125 __asm__ __volatile__ ( "ldmxcsr %0" : : "m" (__mxcsr) ); \
126 } while ( 0 )
128 #endif /* __ASM_I386_I387_H */