debuggers.hg

view xen/arch/x86/acpi/suspend.c @ 22797:58304c1cc725

x86 fpu: Code clean up. Eliminate per-cpu xsave init verbosity.

Signed-off-by: Keir Fraser <keir@xen.org>
author Keir Fraser <keir@xen.org>
date Fri Jan 14 09:11:28 2011 +0000 (2011-01-14)
parents a3a55a6e4761
children
line source
1 /*
2 * Portions are:
3 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
4 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
5 */
7 #include <xen/config.h>
8 #include <xen/acpi.h>
9 #include <xen/smp.h>
10 #include <asm/processor.h>
11 #include <asm/msr.h>
12 #include <asm/flushtlb.h>
13 #include <asm/hvm/hvm.h>
14 #include <asm/hvm/support.h>
15 #include <asm/i387.h>
16 #include <xen/hypercall.h>
18 #if defined(CONFIG_X86_64)
19 static unsigned long saved_lstar, saved_cstar;
20 static unsigned long saved_sysenter_esp, saved_sysenter_eip;
21 static unsigned long saved_fs_base, saved_gs_base, saved_kernel_gs_base;
22 static uint16_t saved_segs[4];
23 #endif
25 void save_rest_processor_state(void)
26 {
27 save_init_fpu(current);
29 #if defined(CONFIG_X86_64)
30 asm volatile (
31 "mov %%ds,(%0); mov %%es,2(%0); mov %%fs,4(%0); mov %%gs,6(%0)"
32 : : "r" (saved_segs) : "memory" );
33 rdmsrl(MSR_FS_BASE, saved_fs_base);
34 rdmsrl(MSR_GS_BASE, saved_gs_base);
35 rdmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
36 rdmsrl(MSR_CSTAR, saved_cstar);
37 rdmsrl(MSR_LSTAR, saved_lstar);
38 if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
39 {
40 rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
41 rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip);
42 }
43 #endif
44 }
47 void restore_rest_processor_state(void)
48 {
49 struct vcpu *curr = current;
51 load_TR();
53 #if defined(CONFIG_X86_64)
54 /* Recover syscall MSRs */
55 wrmsrl(MSR_LSTAR, saved_lstar);
56 wrmsrl(MSR_CSTAR, saved_cstar);
57 wrmsr(MSR_STAR, 0, (FLAT_RING3_CS32<<16) | __HYPERVISOR_CS);
58 wrmsr(MSR_SYSCALL_MASK,
59 X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT|
60 X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_TF,
61 0U);
63 wrmsrl(MSR_FS_BASE, saved_fs_base);
64 wrmsrl(MSR_GS_BASE, saved_gs_base);
65 wrmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base);
67 if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
68 {
69 /* Recover sysenter MSRs */
70 wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp);
71 wrmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip);
72 wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS, 0);
73 }
75 if ( !is_idle_vcpu(curr) )
76 {
77 asm volatile (
78 "mov (%0),%%ds; mov 2(%0),%%es; mov 4(%0),%%fs"
79 : : "r" (saved_segs) : "memory" );
80 do_set_segment_base(SEGBASE_GS_USER_SEL, saved_segs[3]);
81 }
83 #else /* !defined(CONFIG_X86_64) */
84 if ( supervisor_mode_kernel && cpu_has_sep )
85 wrmsr(MSR_IA32_SYSENTER_ESP, &this_cpu(init_tss).esp1, 0);
86 #endif
88 /* Maybe load the debug registers. */
89 BUG_ON(is_hvm_vcpu(curr));
90 if ( !is_idle_vcpu(curr) && curr->arch.guest_context.debugreg[7] )
91 {
92 write_debugreg(0, curr->arch.guest_context.debugreg[0]);
93 write_debugreg(1, curr->arch.guest_context.debugreg[1]);
94 write_debugreg(2, curr->arch.guest_context.debugreg[2]);
95 write_debugreg(3, curr->arch.guest_context.debugreg[3]);
96 write_debugreg(6, curr->arch.guest_context.debugreg[6]);
97 write_debugreg(7, curr->arch.guest_context.debugreg[7]);
98 }
100 /* Reload FPU state on next FPU use. */
101 stts();
103 if (cpu_has_pat)
104 wrmsrl(MSR_IA32_CR_PAT, host_pat);
106 mtrr_bp_restore();
107 }