From: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: x86: Support fully eager FPU context switching

This is controlled on a per-vcpu bases for flexibility.

This is part of XSA-267 / CVE-2018-3665

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>

diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 5584535..f9cf059 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -215,8 +215,25 @@ void vcpu_restore_fpu_eager(struct vcpu *v)
 {
     ASSERT(!is_idle_vcpu(v));
     
+    if ( v->arch.fully_eager_fpu )
+    {
+        /* Avoid recursion */
+        clts();
+
+        if ( cpu_has_xsave )
+            fpu_xrstor(v, XSTATE_ALL);
+        else
+            fpu_fxrstor(v);
+
+        v->fpu_initialised = 1;
+        v->fpu_dirtied = 1;
+
+        /* Xen doesn't need TS set, but the guest might. */
+        if ( is_pv_vcpu(v) && (v->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS) )
+            stts();
+    }
     /* save the nonlazy extended state which is not tracked by CR0.TS bit */
-    if ( v->arch.nonlazy_xstate_used )
+    else if ( v->arch.nonlazy_xstate_used )
     {
         /* Avoid recursion */
         clts();        
@@ -238,6 +255,8 @@ void vcpu_restore_fpu_lazy(struct vcpu *v)
     if ( v->fpu_dirtied )
         return;
 
+    ASSERT(!v->arch.fully_eager_fpu);
+
     if ( cpu_has_xsave )
         fpu_xrstor(v, XSTATE_LAZY);
     else
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index f917b46..20715e3 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -574,6 +574,9 @@ struct arch_vcpu
      * and thus should be saved/restored. */
     bool_t nonlazy_xstate_used;
 
+    /* Restore all FPU state (lazy and non-lazy state) on context switch? */
+    bool_t fully_eager_fpu;
+
     /*
      * The SMAP check policy when updating runstate_guest(v) and the
      * secondary system time.
