/root/src/xen/xen/arch/x86/acpi/suspend.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Portions are: |
3 | | * Copyright (c) 2002 Pavel Machek <pavel@suse.cz> |
4 | | * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> |
5 | | */ |
6 | | |
7 | | #include <xen/acpi.h> |
8 | | #include <xen/smp.h> |
9 | | #include <asm/processor.h> |
10 | | #include <asm/msr.h> |
11 | | #include <asm/debugreg.h> |
12 | | #include <asm/hvm/hvm.h> |
13 | | #include <asm/hvm/support.h> |
14 | | #include <asm/i387.h> |
15 | | #include <asm/xstate.h> |
16 | | #include <xen/hypercall.h> |
17 | | |
18 | | static unsigned long saved_lstar, saved_cstar; |
19 | | static unsigned long saved_sysenter_esp, saved_sysenter_eip; |
20 | | static unsigned long saved_fs_base, saved_gs_base, saved_kernel_gs_base; |
21 | | static uint16_t saved_segs[4]; |
22 | | static uint64_t saved_xcr0; |
23 | | |
24 | | void save_rest_processor_state(void) |
25 | 0 | { |
26 | 0 | vcpu_save_fpu(current); |
27 | 0 |
|
28 | 0 | asm volatile ( |
29 | 0 | "movw %%ds,(%0); movw %%es,2(%0); movw %%fs,4(%0); movw %%gs,6(%0)" |
30 | 0 | : : "r" (saved_segs) : "memory" ); |
31 | 0 | saved_fs_base = rdfsbase(); |
32 | 0 | saved_gs_base = rdgsbase(); |
33 | 0 | rdmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base); |
34 | 0 | rdmsrl(MSR_CSTAR, saved_cstar); |
35 | 0 | rdmsrl(MSR_LSTAR, saved_lstar); |
36 | 0 | if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || |
37 | 0 | boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR ) |
38 | 0 | { |
39 | 0 | rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp); |
40 | 0 | rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip); |
41 | 0 | } |
42 | 0 | if ( cpu_has_xsave ) |
43 | 0 | saved_xcr0 = get_xcr0(); |
44 | 0 | } |
45 | | |
46 | | |
47 | | void restore_rest_processor_state(void) |
48 | 0 | { |
49 | 0 | struct vcpu *curr = current; |
50 | 0 |
|
51 | 0 | load_TR(); |
52 | 0 |
|
53 | 0 | /* Recover syscall MSRs */ |
54 | 0 | wrmsrl(MSR_LSTAR, saved_lstar); |
55 | 0 | wrmsrl(MSR_CSTAR, saved_cstar); |
56 | 0 | wrmsrl(MSR_STAR, XEN_MSR_STAR); |
57 | 0 | wrmsrl(MSR_SYSCALL_MASK, XEN_SYSCALL_MASK); |
58 | 0 |
|
59 | 0 | wrfsbase(saved_fs_base); |
60 | 0 | wrgsbase(saved_gs_base); |
61 | 0 | wrmsrl(MSR_SHADOW_GS_BASE, saved_kernel_gs_base); |
62 | 0 |
|
63 | 0 | if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || |
64 | 0 | boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR ) |
65 | 0 | { |
66 | 0 | /* Recover sysenter MSRs */ |
67 | 0 | wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp); |
68 | 0 | wrmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip); |
69 | 0 | wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS, 0); |
70 | 0 | } |
71 | 0 |
|
72 | 0 | if ( !is_idle_vcpu(curr) ) |
73 | 0 | { |
74 | 0 | asm volatile ( |
75 | 0 | "movw (%0),%%ds; movw 2(%0),%%es; movw 4(%0),%%fs" |
76 | 0 | : : "r" (saved_segs) : "memory" ); |
77 | 0 | do_set_segment_base(SEGBASE_GS_USER_SEL, saved_segs[3]); |
78 | 0 | } |
79 | 0 |
|
80 | 0 | if ( cpu_has_xsave && !set_xcr0(saved_xcr0) ) |
81 | 0 | BUG(); |
82 | 0 |
|
83 | 0 | /* Maybe load the debug registers. */ |
84 | 0 | BUG_ON(!is_pv_vcpu(curr)); |
85 | 0 | if ( !is_idle_vcpu(curr) && curr->arch.debugreg[7] ) |
86 | 0 | { |
87 | 0 | write_debugreg(0, curr->arch.debugreg[0]); |
88 | 0 | write_debugreg(1, curr->arch.debugreg[1]); |
89 | 0 | write_debugreg(2, curr->arch.debugreg[2]); |
90 | 0 | write_debugreg(3, curr->arch.debugreg[3]); |
91 | 0 | write_debugreg(6, curr->arch.debugreg[6]); |
92 | 0 | write_debugreg(7, curr->arch.debugreg[7]); |
93 | 0 | } |
94 | 0 |
|
95 | 0 | /* Reload FPU state on next FPU use. */ |
96 | 0 | stts(); |
97 | 0 |
|
98 | 0 | if (cpu_has_pat) |
99 | 0 | wrmsrl(MSR_IA32_CR_PAT, host_pat); |
100 | 0 |
|
101 | 0 | mtrr_bp_restore(); |
102 | 0 | } |