/root/src/xen/xen/arch/x86/msr.c
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * arch/x86/msr.c |
3 | | * |
4 | | * Policy objects for Model-Specific Registers. |
5 | | * |
6 | | * This program is free software; you can redistribute it and/or modify |
7 | | * it under the terms of the GNU General Public License as published by |
8 | | * the Free Software Foundation; either version 2 of the License, or |
9 | | * (at your option) any later version. |
10 | | * |
11 | | * This program is distributed in the hope that it will be useful, |
12 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | | * GNU General Public License for more details. |
15 | | * |
16 | | * You should have received a copy of the GNU General Public License |
17 | | * along with this program; If not, see <http://www.gnu.org/licenses/>. |
18 | | * |
19 | | * Copyright (c) 2017 Citrix Systems Ltd. |
20 | | */ |
21 | | |
22 | | #include <xen/init.h> |
23 | | #include <xen/lib.h> |
24 | | #include <xen/sched.h> |
25 | | #include <asm/msr.h> |
26 | | |
27 | | struct msr_domain_policy __read_mostly hvm_max_msr_domain_policy, |
28 | | __read_mostly pv_max_msr_domain_policy; |
29 | | |
30 | | struct msr_vcpu_policy __read_mostly hvm_max_msr_vcpu_policy, |
31 | | __read_mostly pv_max_msr_vcpu_policy; |
32 | | |
33 | | static void __init calculate_hvm_max_policy(void) |
34 | 1 | { |
35 | 1 | struct msr_domain_policy *dp = &hvm_max_msr_domain_policy; |
36 | 1 | struct msr_vcpu_policy *vp = &hvm_max_msr_vcpu_policy; |
37 | 1 | |
38 | 1 | if ( !hvm_enabled ) |
39 | 0 | return; |
40 | 1 | |
41 | 1 | /* 0x000000ce MSR_INTEL_PLATFORM_INFO */ |
42 | 1 | if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) |
43 | 1 | { |
44 | 1 | dp->plaform_info.available = true; |
45 | 1 | dp->plaform_info.cpuid_faulting = true; |
46 | 1 | } |
47 | 1 | |
48 | 1 | /* 0x00000140 MSR_INTEL_MISC_FEATURES_ENABLES */ |
49 | 1 | vp->misc_features_enables.available = dp->plaform_info.available; |
50 | 1 | } |
51 | | |
52 | | static void __init calculate_pv_max_policy(void) |
53 | 1 | { |
54 | 1 | struct msr_domain_policy *dp = &pv_max_msr_domain_policy; |
55 | 1 | struct msr_vcpu_policy *vp = &pv_max_msr_vcpu_policy; |
56 | 1 | |
57 | 1 | /* 0x000000ce MSR_INTEL_PLATFORM_INFO */ |
58 | 1 | if ( cpu_has_cpuid_faulting ) |
59 | 1 | { |
60 | 1 | dp->plaform_info.available = true; |
61 | 1 | dp->plaform_info.cpuid_faulting = true; |
62 | 1 | } |
63 | 1 | |
64 | 1 | /* 0x00000140 MSR_INTEL_MISC_FEATURES_ENABLES */ |
65 | 1 | vp->misc_features_enables.available = dp->plaform_info.available; |
66 | 1 | } |
67 | | |
68 | | void __init init_guest_msr_policy(void) |
69 | 1 | { |
70 | 1 | calculate_hvm_max_policy(); |
71 | 1 | calculate_pv_max_policy(); |
72 | 1 | } |
73 | | |
74 | | int init_domain_msr_policy(struct domain *d) |
75 | 1 | { |
76 | 1 | struct msr_domain_policy *dp; |
77 | 1 | |
78 | 1 | dp = xmalloc(struct msr_domain_policy); |
79 | 1 | |
80 | 1 | if ( !dp ) |
81 | 0 | return -ENOMEM; |
82 | 1 | |
83 | 1 | *dp = is_pv_domain(d) ? pv_max_msr_domain_policy : |
84 | 1 | hvm_max_msr_domain_policy; |
85 | 1 | |
86 | 1 | /* See comment in intel_ctxt_switch_levelling() */ |
87 | 1 | if ( is_control_domain(d) ) |
88 | 0 | { |
89 | 0 | dp->plaform_info.available = false; |
90 | 0 | dp->plaform_info.cpuid_faulting = false; |
91 | 0 | } |
92 | 1 | |
93 | 1 | d->arch.msr = dp; |
94 | 1 | |
95 | 1 | return 0; |
96 | 1 | } |
97 | | |
98 | | int init_vcpu_msr_policy(struct vcpu *v) |
99 | 12 | { |
100 | 12 | struct domain *d = v->domain; |
101 | 12 | struct msr_vcpu_policy *vp; |
102 | 12 | |
103 | 12 | vp = xmalloc(struct msr_vcpu_policy); |
104 | 12 | |
105 | 12 | if ( !vp ) |
106 | 0 | return -ENOMEM; |
107 | 12 | |
108 | 12 | *vp = is_pv_domain(d) ? pv_max_msr_vcpu_policy : |
109 | 12 | hvm_max_msr_vcpu_policy; |
110 | 12 | |
111 | 12 | /* See comment in intel_ctxt_switch_levelling() */ |
112 | 12 | if ( is_control_domain(d) ) |
113 | 11 | vp->misc_features_enables.available = false; |
114 | 12 | |
115 | 12 | v->arch.msr = vp; |
116 | 12 | |
117 | 12 | return 0; |
118 | 12 | } |
119 | | |
120 | | int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) |
121 | 271 | { |
122 | 271 | const struct msr_domain_policy *dp = v->domain->arch.msr; |
123 | 271 | const struct msr_vcpu_policy *vp = v->arch.msr; |
124 | 271 | |
125 | 271 | switch ( msr ) |
126 | 271 | { |
127 | 0 | case MSR_INTEL_PLATFORM_INFO: |
128 | 0 | if ( !dp->plaform_info.available ) |
129 | 0 | goto gp_fault; |
130 | 0 | *val = (uint64_t)dp->plaform_info.cpuid_faulting << |
131 | 0 | _MSR_PLATFORM_INFO_CPUID_FAULTING; |
132 | 0 | break; |
133 | 0 |
|
134 | 0 | case MSR_INTEL_MISC_FEATURES_ENABLES: |
135 | 0 | if ( !vp->misc_features_enables.available ) |
136 | 0 | goto gp_fault; |
137 | 0 | *val = (uint64_t)vp->misc_features_enables.cpuid_faulting << |
138 | 0 | _MSR_MISC_FEATURES_CPUID_FAULTING; |
139 | 0 | break; |
140 | 0 |
|
141 | 271 | default: |
142 | 271 | return X86EMUL_UNHANDLEABLE; |
143 | 271 | } |
144 | 271 | |
145 | 0 | return X86EMUL_OKAY; |
146 | 271 | |
147 | 0 | gp_fault: |
148 | 0 | return X86EMUL_EXCEPTION; |
149 | 271 | } |
150 | | |
151 | | int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) |
152 | 562 | { |
153 | 562 | struct domain *d = v->domain; |
154 | 562 | struct msr_domain_policy *dp = d->arch.msr; |
155 | 562 | struct msr_vcpu_policy *vp = v->arch.msr; |
156 | 562 | |
157 | 562 | switch ( msr ) |
158 | 562 | { |
159 | 0 | case MSR_INTEL_PLATFORM_INFO: |
160 | 0 | goto gp_fault; |
161 | 0 |
|
162 | 0 | case MSR_INTEL_MISC_FEATURES_ENABLES: |
163 | 0 | { |
164 | 0 | uint64_t rsvd = ~0ull; |
165 | 0 | bool old_cpuid_faulting = vp->misc_features_enables.cpuid_faulting; |
166 | 0 |
|
167 | 0 | if ( !vp->misc_features_enables.available ) |
168 | 0 | goto gp_fault; |
169 | 0 |
|
170 | 0 | if ( dp->plaform_info.cpuid_faulting ) |
171 | 0 | rsvd &= ~MSR_MISC_FEATURES_CPUID_FAULTING; |
172 | 0 |
|
173 | 0 | if ( val & rsvd ) |
174 | 0 | goto gp_fault; |
175 | 0 |
|
176 | 0 | vp->misc_features_enables.cpuid_faulting = |
177 | 0 | val & MSR_MISC_FEATURES_CPUID_FAULTING; |
178 | 0 |
|
179 | 0 | if ( is_hvm_domain(d) && cpu_has_cpuid_faulting && |
180 | 0 | (old_cpuid_faulting ^ vp->misc_features_enables.cpuid_faulting) ) |
181 | 0 | ctxt_switch_levelling(v); |
182 | 0 | break; |
183 | 0 | } |
184 | 0 |
|
185 | 562 | default: |
186 | 562 | return X86EMUL_UNHANDLEABLE; |
187 | 562 | } |
188 | 562 | |
189 | 0 | return X86EMUL_OKAY; |
190 | 562 | |
191 | 0 | gp_fault: |
192 | 0 | return X86EMUL_EXCEPTION; |
193 | 562 | } |
194 | | |
195 | | /* |
196 | | * Local variables: |
197 | | * mode: C |
198 | | * c-file-style: "BSD" |
199 | | * c-basic-offset: 4 |
200 | | * tab-width: 4 |
201 | | * indent-tabs-mode: nil |
202 | | * End: |
203 | | */ |