/root/src/xen/xen/arch/x86/hvm/viridian.c
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * viridian.c |
3 | | * |
4 | | * An implementation of some Viridian enlightenments. See Microsoft's |
5 | | * Hypervisor Top Level Functional Specification (v5.0a) at: |
6 | | * |
7 | | * https://github.com/Microsoft/Virtualization-Documentation/raw/master/tlfs/Hypervisor%20Top%20Level%20Functional%20Specification%20v5.0.pdf |
8 | | * |
9 | | * for more information. |
10 | | */ |
11 | | |
12 | | #include <xen/sched.h> |
13 | | #include <xen/version.h> |
14 | | #include <xen/perfc.h> |
15 | | #include <xen/hypercall.h> |
16 | | #include <xen/domain_page.h> |
17 | | #include <asm/guest_access.h> |
18 | | #include <asm/paging.h> |
19 | | #include <asm/p2m.h> |
20 | | #include <asm/apic.h> |
21 | | #include <asm/hvm/support.h> |
22 | | #include <public/sched.h> |
23 | | #include <public/hvm/hvm_op.h> |
24 | | |
25 | | /* Viridian MSR numbers. */ |
26 | 0 | #define HV_X64_MSR_GUEST_OS_ID 0x40000000 |
27 | 0 | #define HV_X64_MSR_HYPERCALL 0x40000001 |
28 | 0 | #define HV_X64_MSR_VP_INDEX 0x40000002 |
29 | | #define HV_X64_MSR_RESET 0x40000003 |
30 | | #define HV_X64_MSR_VP_RUNTIME 0x40000010 |
31 | 0 | #define HV_X64_MSR_TIME_REF_COUNT 0x40000020 |
32 | 0 | #define HV_X64_MSR_REFERENCE_TSC 0x40000021 |
33 | 0 | #define HV_X64_MSR_TSC_FREQUENCY 0x40000022 |
34 | 0 | #define HV_X64_MSR_APIC_FREQUENCY 0x40000023 |
35 | 0 | #define HV_X64_MSR_EOI 0x40000070 |
36 | 0 | #define HV_X64_MSR_ICR 0x40000071 |
37 | 0 | #define HV_X64_MSR_TPR 0x40000072 |
38 | 0 | #define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073 |
39 | | #define HV_X64_MSR_SCONTROL 0x40000080 |
40 | | #define HV_X64_MSR_SVERSION 0x40000081 |
41 | | #define HV_X64_MSR_SIEFP 0x40000082 |
42 | | #define HV_X64_MSR_SIMP 0x40000083 |
43 | | #define HV_X64_MSR_EOM 0x40000084 |
44 | | #define HV_X64_MSR_SINT0 0x40000090 |
45 | | #define HV_X64_MSR_SINT1 0x40000091 |
46 | | #define HV_X64_MSR_SINT2 0x40000092 |
47 | | #define HV_X64_MSR_SINT3 0x40000093 |
48 | | #define HV_X64_MSR_SINT4 0x40000094 |
49 | | #define HV_X64_MSR_SINT5 0x40000095 |
50 | | #define HV_X64_MSR_SINT6 0x40000096 |
51 | | #define HV_X64_MSR_SINT7 0x40000097 |
52 | | #define HV_X64_MSR_SINT8 0x40000098 |
53 | | #define HV_X64_MSR_SINT9 0x40000099 |
54 | | #define HV_X64_MSR_SINT10 0x4000009A |
55 | | #define HV_X64_MSR_SINT11 0x4000009B |
56 | | #define HV_X64_MSR_SINT12 0x4000009C |
57 | | #define HV_X64_MSR_SINT13 0x4000009D |
58 | | #define HV_X64_MSR_SINT14 0x4000009E |
59 | | #define HV_X64_MSR_SINT15 0x4000009F |
60 | | #define HV_X64_MSR_STIMER0_CONFIG 0x400000B0 |
61 | | #define HV_X64_MSR_STIMER0_COUNT 0x400000B1 |
62 | | #define HV_X64_MSR_STIMER1_CONFIG 0x400000B2 |
63 | | #define HV_X64_MSR_STIMER1_COUNT 0x400000B3 |
64 | | #define HV_X64_MSR_STIMER2_CONFIG 0x400000B4 |
65 | | #define HV_X64_MSR_STIMER2_COUNT 0x400000B5 |
66 | | #define HV_X64_MSR_STIMER3_CONFIG 0x400000B6 |
67 | | #define HV_X64_MSR_STIMER3_COUNT 0x400000B7 |
68 | | #define HV_X64_MSR_POWER_STATE_TRIGGER_C1 0x400000C1 |
69 | | #define HV_X64_MSR_POWER_STATE_TRIGGER_C2 0x400000C2 |
70 | | #define HV_X64_MSR_POWER_STATE_TRIGGER_C3 0x400000C3 |
71 | | #define HV_X64_MSR_POWER_STATE_CONFIG_C1 0x400000D1 |
72 | | #define HV_X64_MSR_POWER_STATE_CONFIG_C2 0x400000D2 |
73 | | #define HV_X64_MSR_POWER_STATE_CONFIG_C3 0x400000D3 |
74 | | #define HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE 0x400000E0 |
75 | | #define HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE 0x400000E1 |
76 | | #define HV_X64_MSR_STATS_VP_RETAIL_PAGE 0x400000E2 |
77 | | #define HV_X64_MSR_STATS_VP_INTERNAL_PAGE 0x400000E3 |
78 | | #define HV_X64_MSR_GUEST_IDLE 0x400000F0 |
79 | | #define HV_X64_MSR_SYNTH_DEBUG_CONTROL 0x400000F1 |
80 | | #define HV_X64_MSR_SYNTH_DEBUG_STATUS 0x400000F2 |
81 | | #define HV_X64_MSR_SYNTH_DEBUG_SEND_BUFFER 0x400000F3 |
82 | | #define HV_X64_MSR_SYNTH_DEBUG_RECEIVE_BUFFER 0x400000F4 |
83 | | #define HV_X64_MSR_SYNTH_DEBUG_PENDING_BUFFER 0x400000F5 |
84 | 0 | #define HV_X64_MSR_CRASH_P0 0x40000100 |
85 | 0 | #define HV_X64_MSR_CRASH_P1 0x40000101 |
86 | 0 | #define HV_X64_MSR_CRASH_P2 0x40000102 |
87 | 0 | #define HV_X64_MSR_CRASH_P3 0x40000103 |
88 | 0 | #define HV_X64_MSR_CRASH_P4 0x40000104 |
89 | 0 | #define HV_X64_MSR_CRASH_CTL 0x40000105 |
90 | | |
91 | 0 | #define VIRIDIAN_MSR_MIN HV_X64_MSR_GUEST_OS_ID |
92 | 0 | #define VIRIDIAN_MSR_MAX HV_X64_MSR_CRASH_CTL |
93 | | |
94 | | /* Viridian Hypercall Status Codes. */ |
95 | 0 | #define HV_STATUS_SUCCESS 0x0000 |
96 | 0 | #define HV_STATUS_INVALID_HYPERCALL_CODE 0x0002 |
97 | 0 | #define HV_STATUS_INVALID_PARAMETER 0x0005 |
98 | | |
99 | | /* Viridian Hypercall Codes. */ |
100 | 0 | #define HvFlushVirtualAddressSpace 0x0002 |
101 | 0 | #define HvFlushVirtualAddressList 0x0003 |
102 | 0 | #define HvNotifyLongSpinWait 0x0008 |
103 | 0 | #define HvGetPartitionId 0x0046 |
104 | 0 | #define HvExtCallQueryCapabilities 0x8001 |
105 | | |
106 | | /* Viridian Hypercall Flags. */ |
107 | 0 | #define HV_FLUSH_ALL_PROCESSORS 1 |
108 | | |
109 | | /* |
110 | | * Viridian Partition Privilege Flags. |
111 | | * |
112 | | * This is taken from section 4.2.2 of the specification, and fixed for |
113 | | * style and correctness. |
114 | | */ |
115 | | typedef struct { |
116 | | /* Access to virtual MSRs */ |
117 | | uint64_t AccessVpRunTimeReg:1; |
118 | | uint64_t AccessPartitionReferenceCounter:1; |
119 | | uint64_t AccessSynicRegs:1; |
120 | | uint64_t AccessSyntheticTimerRegs:1; |
121 | | uint64_t AccessIntrCtrlRegs:1; |
122 | | uint64_t AccessHypercallMsrs:1; |
123 | | uint64_t AccessVpIndex:1; |
124 | | uint64_t AccessResetReg:1; |
125 | | uint64_t AccessStatsReg:1; |
126 | | uint64_t AccessPartitionReferenceTsc:1; |
127 | | uint64_t AccessGuestIdleReg:1; |
128 | | uint64_t AccessFrequencyRegs:1; |
129 | | uint64_t AccessDebugRegs:1; |
130 | | uint64_t Reserved1:19; |
131 | | |
132 | | /* Access to hypercalls */ |
133 | | uint64_t CreatePartitions:1; |
134 | | uint64_t AccessPartitionId:1; |
135 | | uint64_t AccessMemoryPool:1; |
136 | | uint64_t AdjustMessageBuffers:1; |
137 | | uint64_t PostMessages:1; |
138 | | uint64_t SignalEvents:1; |
139 | | uint64_t CreatePort:1; |
140 | | uint64_t ConnectPort:1; |
141 | | uint64_t AccessStats:1; |
142 | | uint64_t Reserved2:2; |
143 | | uint64_t Debugging:1; |
144 | | uint64_t CpuManagement:1; |
145 | | uint64_t Reserved3:1; |
146 | | uint64_t Reserved4:1; |
147 | | uint64_t Reserved5:1; |
148 | | uint64_t AccessVSM:1; |
149 | | uint64_t AccessVpRegisters:1; |
150 | | uint64_t Reserved6:1; |
151 | | uint64_t Reserved7:1; |
152 | | uint64_t EnableExtendedHypercalls:1; |
153 | | uint64_t StartVirtualProcessor:1; |
154 | | uint64_t Reserved8:10; |
155 | | } HV_PARTITION_PRIVILEGE_MASK; |
156 | | |
157 | | typedef union _HV_CRASH_CTL_REG_CONTENTS |
158 | | { |
159 | | uint64_t AsUINT64; |
160 | | struct |
161 | | { |
162 | | uint64_t Reserved:63; |
163 | | uint64_t CrashNotify:1; |
164 | | } u; |
165 | | } HV_CRASH_CTL_REG_CONTENTS; |
166 | | |
167 | | /* Viridian CPUID leaf 3, Hypervisor Feature Indication */ |
168 | 0 | #define CPUID3D_CRASH_MSRS (1 << 10) |
169 | | |
170 | | /* Viridian CPUID leaf 4: Implementation Recommendations. */ |
171 | 0 | #define CPUID4A_HCALL_REMOTE_TLB_FLUSH (1 << 2) |
172 | 0 | #define CPUID4A_MSR_BASED_APIC (1 << 3) |
173 | 0 | #define CPUID4A_RELAX_TIMER_INT (1 << 5) |
174 | | |
175 | | /* Viridian CPUID leaf 6: Implementation HW features detected and in use. */ |
176 | 0 | #define CPUID6A_APIC_OVERLAY (1 << 0) |
177 | 0 | #define CPUID6A_MSR_BITMAPS (1 << 1) |
178 | 0 | #define CPUID6A_NESTED_PAGING (1 << 3) |
179 | | |
180 | | /* |
181 | | * Version and build number reported by CPUID leaf 2 |
182 | | * |
183 | | * These numbers are chosen to match the version numbers reported by |
184 | | * Windows Server 2008. |
185 | | */ |
186 | | static uint16_t __read_mostly viridian_major = 6; |
187 | | static uint16_t __read_mostly viridian_minor = 0; |
188 | | static uint32_t __read_mostly viridian_build = 0x1772; |
189 | | |
190 | | /* |
191 | | * Maximum number of retries before the guest will notify of failure |
192 | | * to acquire a spinlock. |
193 | | */ |
194 | | static uint32_t __read_mostly viridian_spinlock_retry_count = 2047; |
195 | | integer_param("viridian-spinlock-retry-count", |
196 | | viridian_spinlock_retry_count); |
197 | | |
198 | | void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf, |
199 | | uint32_t subleaf, struct cpuid_leaf *res) |
200 | 0 | { |
201 | 0 | const struct domain *d = v->domain; |
202 | 0 |
|
203 | 0 | ASSERT(is_viridian_domain(d)); |
204 | 0 | ASSERT(leaf >= 0x40000000 && leaf < 0x40000100); |
205 | 0 |
|
206 | 0 | leaf -= 0x40000000; |
207 | 0 |
|
208 | 0 | switch ( leaf ) |
209 | 0 | { |
210 | 0 | case 0: |
211 | 0 | /* See section 2.4.1 of the specification */ |
212 | 0 | res->a = 0x40000006; /* Maximum leaf */ |
213 | 0 | memcpy(&res->b, "Micr", 4); |
214 | 0 | memcpy(&res->c, "osof", 4); |
215 | 0 | memcpy(&res->d, "t Hv", 4); |
216 | 0 | break; |
217 | 0 |
|
218 | 0 | case 1: |
219 | 0 | /* See section 2.4.2 of the specification */ |
220 | 0 | memcpy(&res->a, "Hv#1", 4); |
221 | 0 | break; |
222 | 0 |
|
223 | 0 | case 2: |
224 | 0 | /* Hypervisor information, but only if the guest has set its |
225 | 0 | own version number. */ |
226 | 0 | if ( d->arch.hvm_domain.viridian.guest_os_id.raw == 0 ) |
227 | 0 | break; |
228 | 0 | res->a = viridian_build; |
229 | 0 | res->b = ((uint32_t)viridian_major << 16) | viridian_minor; |
230 | 0 | res->c = 0; /* SP */ |
231 | 0 | res->d = 0; /* Service branch and number */ |
232 | 0 | break; |
233 | 0 |
|
234 | 0 | case 3: |
235 | 0 | { |
236 | 0 | /* |
237 | 0 | * Section 2.4.4 details this leaf and states that EAX and EBX |
238 | 0 | * are defined to be the low and high parts of the partition |
239 | 0 | * privilege mask respectively. |
240 | 0 | */ |
241 | 0 | HV_PARTITION_PRIVILEGE_MASK mask = { |
242 | 0 | .AccessIntrCtrlRegs = 1, |
243 | 0 | .AccessHypercallMsrs = 1, |
244 | 0 | .AccessVpIndex = 1, |
245 | 0 | }; |
246 | 0 | union { |
247 | 0 | HV_PARTITION_PRIVILEGE_MASK mask; |
248 | 0 | uint32_t lo, hi; |
249 | 0 | } u; |
250 | 0 |
|
251 | 0 | if ( !(viridian_feature_mask(d) & HVMPV_no_freq) ) |
252 | 0 | mask.AccessFrequencyRegs = 1; |
253 | 0 | if ( viridian_feature_mask(d) & HVMPV_time_ref_count ) |
254 | 0 | mask.AccessPartitionReferenceCounter = 1; |
255 | 0 | if ( viridian_feature_mask(d) & HVMPV_reference_tsc ) |
256 | 0 | mask.AccessPartitionReferenceTsc = 1; |
257 | 0 |
|
258 | 0 | u.mask = mask; |
259 | 0 |
|
260 | 0 | res->a = u.lo; |
261 | 0 | res->b = u.hi; |
262 | 0 |
|
263 | 0 | if ( viridian_feature_mask(d) & HVMPV_crash_ctl ) |
264 | 0 | res->d = CPUID3D_CRASH_MSRS; |
265 | 0 |
|
266 | 0 | break; |
267 | 0 | } |
268 | 0 |
|
269 | 0 | case 4: |
270 | 0 | /* Recommended hypercall usage. */ |
271 | 0 | if ( (d->arch.hvm_domain.viridian.guest_os_id.raw == 0) || |
272 | 0 | (d->arch.hvm_domain.viridian.guest_os_id.fields.os < 4) ) |
273 | 0 | break; |
274 | 0 | res->a = CPUID4A_RELAX_TIMER_INT; |
275 | 0 | if ( viridian_feature_mask(d) & HVMPV_hcall_remote_tlb_flush ) |
276 | 0 | res->a |= CPUID4A_HCALL_REMOTE_TLB_FLUSH; |
277 | 0 | if ( !cpu_has_vmx_apic_reg_virt ) |
278 | 0 | res->a |= CPUID4A_MSR_BASED_APIC; |
279 | 0 |
|
280 | 0 | /* |
281 | 0 | * This value is the recommended number of attempts to try to |
282 | 0 | * acquire a spinlock before notifying the hypervisor via the |
283 | 0 | * HvNotifyLongSpinWait hypercall. |
284 | 0 | */ |
285 | 0 | res->b = viridian_spinlock_retry_count; |
286 | 0 | break; |
287 | 0 |
|
288 | 0 | case 6: |
289 | 0 | /* Detected and in use hardware features. */ |
290 | 0 | if ( cpu_has_vmx_virtualize_apic_accesses ) |
291 | 0 | res->a |= CPUID6A_APIC_OVERLAY; |
292 | 0 | if ( cpu_has_vmx_msr_bitmap || (read_efer() & EFER_SVME) ) |
293 | 0 | res->a |= CPUID6A_MSR_BITMAPS; |
294 | 0 | if ( hap_enabled(d) ) |
295 | 0 | res->a |= CPUID6A_NESTED_PAGING; |
296 | 0 | break; |
297 | 0 | } |
298 | 0 | } |
299 | | |
300 | | static void dump_guest_os_id(const struct domain *d) |
301 | 0 | { |
302 | 0 | const union viridian_guest_os_id *goi; |
303 | 0 |
|
304 | 0 | goi = &d->arch.hvm_domain.viridian.guest_os_id; |
305 | 0 |
|
306 | 0 | printk(XENLOG_G_INFO |
307 | 0 | "d%d: VIRIDIAN GUEST_OS_ID: vendor: %x os: %x major: %x minor: %x sp: %x build: %x\n", |
308 | 0 | d->domain_id, |
309 | 0 | goi->fields.vendor, goi->fields.os, |
310 | 0 | goi->fields.major, goi->fields.minor, |
311 | 0 | goi->fields.service_pack, goi->fields.build_number); |
312 | 0 | } |
313 | | |
314 | | static void dump_hypercall(const struct domain *d) |
315 | 0 | { |
316 | 0 | const union viridian_hypercall_gpa *hg; |
317 | 0 |
|
318 | 0 | hg = &d->arch.hvm_domain.viridian.hypercall_gpa; |
319 | 0 |
|
320 | 0 | printk(XENLOG_G_INFO "d%d: VIRIDIAN HYPERCALL: enabled: %x pfn: %lx\n", |
321 | 0 | d->domain_id, |
322 | 0 | hg->fields.enabled, (unsigned long)hg->fields.pfn); |
323 | 0 | } |
324 | | |
325 | | static void dump_vp_assist(const struct vcpu *v) |
326 | 0 | { |
327 | 0 | const union viridian_vp_assist *va; |
328 | 0 |
|
329 | 0 | va = &v->arch.hvm_vcpu.viridian.vp_assist.msr; |
330 | 0 |
|
331 | 0 | printk(XENLOG_G_INFO "%pv: VIRIDIAN VP_ASSIST_PAGE: enabled: %x pfn: %lx\n", |
332 | 0 | v, va->fields.enabled, (unsigned long)va->fields.pfn); |
333 | 0 | } |
334 | | |
335 | | static void dump_reference_tsc(const struct domain *d) |
336 | 0 | { |
337 | 0 | const union viridian_reference_tsc *rt; |
338 | 0 |
|
339 | 0 | rt = &d->arch.hvm_domain.viridian.reference_tsc; |
340 | 0 | |
341 | 0 | printk(XENLOG_G_INFO "d%d: VIRIDIAN REFERENCE_TSC: enabled: %x pfn: %lx\n", |
342 | 0 | d->domain_id, |
343 | 0 | rt->fields.enabled, (unsigned long)rt->fields.pfn); |
344 | 0 | } |
345 | | |
346 | | static void enable_hypercall_page(struct domain *d) |
347 | 0 | { |
348 | 0 | unsigned long gmfn = d->arch.hvm_domain.viridian.hypercall_gpa.fields.pfn; |
349 | 0 | struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); |
350 | 0 | uint8_t *p; |
351 | 0 |
|
352 | 0 | if ( !page || !get_page_type(page, PGT_writable_page) ) |
353 | 0 | { |
354 | 0 | if ( page ) |
355 | 0 | put_page(page); |
356 | 0 | gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", |
357 | 0 | gmfn, page ? page_to_mfn(page) : mfn_x(INVALID_MFN)); |
358 | 0 | return; |
359 | 0 | } |
360 | 0 |
|
361 | 0 | p = __map_domain_page(page); |
362 | 0 |
|
363 | 0 | /* |
364 | 0 | * We set the bit 31 in %eax (reserved field in the Viridian hypercall |
365 | 0 | * calling convention) to differentiate Xen and Viridian hypercalls. |
366 | 0 | */ |
367 | 0 | *(u8 *)(p + 0) = 0x0d; /* orl $0x80000000, %eax */ |
368 | 0 | *(u32 *)(p + 1) = 0x80000000; |
369 | 0 | *(u8 *)(p + 5) = 0x0f; /* vmcall/vmmcall */ |
370 | 0 | *(u8 *)(p + 6) = 0x01; |
371 | 0 | *(u8 *)(p + 7) = (cpu_has_vmx ? 0xc1 : 0xd9); |
372 | 0 | *(u8 *)(p + 8) = 0xc3; /* ret */ |
373 | 0 | memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */ |
374 | 0 |
|
375 | 0 | unmap_domain_page(p); |
376 | 0 |
|
377 | 0 | put_page_and_type(page); |
378 | 0 | } |
379 | | |
380 | | static void initialize_vp_assist(struct vcpu *v) |
381 | 0 | { |
382 | 0 | struct domain *d = v->domain; |
383 | 0 | unsigned long gmfn = v->arch.hvm_vcpu.viridian.vp_assist.msr.fields.pfn; |
384 | 0 | struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); |
385 | 0 | void *va; |
386 | 0 |
|
387 | 0 | ASSERT(!v->arch.hvm_vcpu.viridian.vp_assist.va); |
388 | 0 |
|
389 | 0 | /* |
390 | 0 | * See section 7.8.7 of the specification for details of this |
391 | 0 | * enlightenment. |
392 | 0 | */ |
393 | 0 |
|
394 | 0 | if ( !page ) |
395 | 0 | goto fail; |
396 | 0 |
|
397 | 0 | if ( !get_page_type(page, PGT_writable_page) ) |
398 | 0 | { |
399 | 0 | put_page(page); |
400 | 0 | goto fail; |
401 | 0 | } |
402 | 0 |
|
403 | 0 | va = __map_domain_page_global(page); |
404 | 0 | if ( !va ) |
405 | 0 | { |
406 | 0 | put_page_and_type(page); |
407 | 0 | goto fail; |
408 | 0 | } |
409 | 0 |
|
410 | 0 | clear_page(va); |
411 | 0 |
|
412 | 0 | v->arch.hvm_vcpu.viridian.vp_assist.va = va; |
413 | 0 | return; |
414 | 0 |
|
415 | 0 | fail: |
416 | 0 | gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", gmfn, |
417 | 0 | page ? page_to_mfn(page) : mfn_x(INVALID_MFN)); |
418 | 0 | } |
419 | | |
420 | | static void teardown_vp_assist(struct vcpu *v) |
421 | 0 | { |
422 | 0 | void *va = v->arch.hvm_vcpu.viridian.vp_assist.va; |
423 | 0 | struct page_info *page; |
424 | 0 |
|
425 | 0 | if ( !va ) |
426 | 0 | return; |
427 | 0 |
|
428 | 0 | v->arch.hvm_vcpu.viridian.vp_assist.va = NULL; |
429 | 0 |
|
430 | 0 | page = mfn_to_page(domain_page_map_to_mfn(va)); |
431 | 0 |
|
432 | 0 | unmap_domain_page_global(va); |
433 | 0 | put_page_and_type(page); |
434 | 0 | } |
435 | | |
436 | | void viridian_start_apic_assist(struct vcpu *v, int vector) |
437 | 0 | { |
438 | 0 | uint32_t *va = v->arch.hvm_vcpu.viridian.vp_assist.va; |
439 | 0 |
|
440 | 0 | if ( !va ) |
441 | 0 | return; |
442 | 0 |
|
443 | 0 | if ( vector < 0x10 ) |
444 | 0 | return; |
445 | 0 |
|
446 | 0 | /* |
447 | 0 | * If there is already an assist pending then something has gone |
448 | 0 | * wrong and the VM will most likely hang so force a crash now |
449 | 0 | * to make the problem clear. |
450 | 0 | */ |
451 | 0 | if ( v->arch.hvm_vcpu.viridian.vp_assist.vector ) |
452 | 0 | domain_crash(v->domain); |
453 | 0 |
|
454 | 0 | v->arch.hvm_vcpu.viridian.vp_assist.vector = vector; |
455 | 0 | *va |= 1u; |
456 | 0 | } |
457 | | |
458 | | int viridian_complete_apic_assist(struct vcpu *v) |
459 | 0 | { |
460 | 0 | uint32_t *va = v->arch.hvm_vcpu.viridian.vp_assist.va; |
461 | 0 | int vector; |
462 | 0 |
|
463 | 0 | if ( !va ) |
464 | 0 | return 0; |
465 | 0 |
|
466 | 0 | if ( *va & 1u ) |
467 | 0 | return 0; /* Interrupt not yet processed by the guest. */ |
468 | 0 |
|
469 | 0 | vector = v->arch.hvm_vcpu.viridian.vp_assist.vector; |
470 | 0 | v->arch.hvm_vcpu.viridian.vp_assist.vector = 0; |
471 | 0 |
|
472 | 0 | return vector; |
473 | 0 | } |
474 | | |
475 | | void viridian_abort_apic_assist(struct vcpu *v) |
476 | 0 | { |
477 | 0 | uint32_t *va = v->arch.hvm_vcpu.viridian.vp_assist.va; |
478 | 0 |
|
479 | 0 | if ( !va ) |
480 | 0 | return; |
481 | 0 |
|
482 | 0 | *va &= ~1u; |
483 | 0 | v->arch.hvm_vcpu.viridian.vp_assist.vector = 0; |
484 | 0 | } |
485 | | |
486 | | static void update_reference_tsc(struct domain *d, bool_t initialize) |
487 | 0 | { |
488 | 0 | unsigned long gmfn = d->arch.hvm_domain.viridian.reference_tsc.fields.pfn; |
489 | 0 | struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); |
490 | 0 | HV_REFERENCE_TSC_PAGE *p; |
491 | 0 |
|
492 | 0 | if ( !page || !get_page_type(page, PGT_writable_page) ) |
493 | 0 | { |
494 | 0 | if ( page ) |
495 | 0 | put_page(page); |
496 | 0 | gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", |
497 | 0 | gmfn, page ? page_to_mfn(page) : mfn_x(INVALID_MFN)); |
498 | 0 | return; |
499 | 0 | } |
500 | 0 |
|
501 | 0 | p = __map_domain_page(page); |
502 | 0 |
|
503 | 0 | if ( initialize ) |
504 | 0 | clear_page(p); |
505 | 0 |
|
506 | 0 | /* |
507 | 0 | * This enlightenment must be disabled is the host TSC is not invariant. |
508 | 0 | * However it is also disabled if vtsc is true (which means rdtsc is being |
509 | 0 | * emulated). This generally happens when guest TSC freq and host TSC freq |
510 | 0 | * don't match. The TscScale value could be adjusted to cope with this, |
511 | 0 | * allowing vtsc to be turned off, but support for this is not yet present |
512 | 0 | * in the hypervisor. Thus is it is possible that migrating a Windows VM |
513 | 0 | * between hosts of differing TSC frequencies may result in large |
514 | 0 | * differences in guest performance. |
515 | 0 | */ |
516 | 0 | if ( !host_tsc_is_safe() || d->arch.vtsc ) |
517 | 0 | { |
518 | 0 | /* |
519 | 0 | * The specification states that valid values of TscSequence range |
520 | 0 | * from 0 to 0xFFFFFFFE. The value 0xFFFFFFFF is used to indicate |
521 | 0 | * this mechanism is no longer a reliable source of time and that |
522 | 0 | * the VM should fall back to a different source. |
523 | 0 | * |
524 | 0 | * Server 2012 (6.2 kernel) and 2012 R2 (6.3 kernel) actually violate |
525 | 0 | * the spec. and rely on a value of 0 to indicate that this |
526 | 0 | * enlightenment should no longer be used. These two kernel |
527 | 0 | * versions are currently the only ones to make use of this |
528 | 0 | * enlightenment, so just use 0 here. |
529 | 0 | */ |
530 | 0 | p->TscSequence = 0; |
531 | 0 |
|
532 | 0 | printk(XENLOG_G_INFO "d%d: VIRIDIAN REFERENCE_TSC: invalidated\n", |
533 | 0 | d->domain_id); |
534 | 0 | goto out; |
535 | 0 | } |
536 | 0 |
|
537 | 0 | /* |
538 | 0 | * The guest will calculate reference time according to the following |
539 | 0 | * formula: |
540 | 0 | * |
541 | 0 | * ReferenceTime = ((RDTSC() * TscScale) >> 64) + TscOffset |
542 | 0 | * |
543 | 0 | * Windows uses a 100ns tick, so we need a scale which is cpu |
544 | 0 | * ticks per 100ns shifted left by 64. |
545 | 0 | */ |
546 | 0 | p->TscScale = ((10000ul << 32) / d->arch.tsc_khz) << 32; |
547 | 0 |
|
548 | 0 | p->TscSequence++; |
549 | 0 | if ( p->TscSequence == 0xFFFFFFFF || |
550 | 0 | p->TscSequence == 0 ) /* Avoid both 'invalid' values */ |
551 | 0 | p->TscSequence = 1; |
552 | 0 |
|
553 | 0 | out: |
554 | 0 | unmap_domain_page(p); |
555 | 0 |
|
556 | 0 | put_page_and_type(page); |
557 | 0 | } |
558 | | |
559 | | int wrmsr_viridian_regs(uint32_t idx, uint64_t val) |
560 | 50 | { |
561 | 50 | struct vcpu *v = current; |
562 | 50 | struct domain *d = v->domain; |
563 | 50 | |
564 | 50 | if ( !is_viridian_domain(d) ) |
565 | 50 | return 0; |
566 | 50 | |
567 | 0 | switch ( idx ) |
568 | 0 | { |
569 | 0 | case HV_X64_MSR_GUEST_OS_ID: |
570 | 0 | perfc_incr(mshv_wrmsr_osid); |
571 | 0 | d->arch.hvm_domain.viridian.guest_os_id.raw = val; |
572 | 0 | dump_guest_os_id(d); |
573 | 0 | break; |
574 | 0 |
|
575 | 0 | case HV_X64_MSR_HYPERCALL: |
576 | 0 | perfc_incr(mshv_wrmsr_hc_page); |
577 | 0 | d->arch.hvm_domain.viridian.hypercall_gpa.raw = val; |
578 | 0 | dump_hypercall(d); |
579 | 0 | if ( d->arch.hvm_domain.viridian.hypercall_gpa.fields.enabled ) |
580 | 0 | enable_hypercall_page(d); |
581 | 0 | break; |
582 | 0 |
|
583 | 0 | case HV_X64_MSR_VP_INDEX: |
584 | 0 | perfc_incr(mshv_wrmsr_vp_index); |
585 | 0 | break; |
586 | 0 |
|
587 | 0 | case HV_X64_MSR_EOI: |
588 | 0 | perfc_incr(mshv_wrmsr_eoi); |
589 | 0 | vlapic_EOI_set(vcpu_vlapic(v)); |
590 | 0 | break; |
591 | 0 |
|
592 | 0 | case HV_X64_MSR_ICR: { |
593 | 0 | u32 eax = (u32)val, edx = (u32)(val >> 32); |
594 | 0 | struct vlapic *vlapic = vcpu_vlapic(v); |
595 | 0 | perfc_incr(mshv_wrmsr_icr); |
596 | 0 | eax &= ~(1 << 12); |
597 | 0 | edx &= 0xff000000; |
598 | 0 | vlapic_set_reg(vlapic, APIC_ICR2, edx); |
599 | 0 | vlapic_ipi(vlapic, eax, edx); |
600 | 0 | vlapic_set_reg(vlapic, APIC_ICR, eax); |
601 | 0 | break; |
602 | 0 | } |
603 | 0 |
|
604 | 0 | case HV_X64_MSR_TPR: |
605 | 0 | perfc_incr(mshv_wrmsr_tpr); |
606 | 0 | vlapic_set_reg(vcpu_vlapic(v), APIC_TASKPRI, (uint8_t)val); |
607 | 0 | break; |
608 | 0 |
|
609 | 0 | case HV_X64_MSR_VP_ASSIST_PAGE: |
610 | 0 | perfc_incr(mshv_wrmsr_apic_msr); |
611 | 0 | teardown_vp_assist(v); /* release any previous mapping */ |
612 | 0 | v->arch.hvm_vcpu.viridian.vp_assist.msr.raw = val; |
613 | 0 | dump_vp_assist(v); |
614 | 0 | if ( v->arch.hvm_vcpu.viridian.vp_assist.msr.fields.enabled ) |
615 | 0 | initialize_vp_assist(v); |
616 | 0 | break; |
617 | 0 |
|
618 | 0 | case HV_X64_MSR_REFERENCE_TSC: |
619 | 0 | if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) ) |
620 | 0 | return 0; |
621 | 0 |
|
622 | 0 | perfc_incr(mshv_wrmsr_tsc_msr); |
623 | 0 | d->arch.hvm_domain.viridian.reference_tsc.raw = val; |
624 | 0 | dump_reference_tsc(d); |
625 | 0 | if ( d->arch.hvm_domain.viridian.reference_tsc.fields.enabled ) |
626 | 0 | update_reference_tsc(d, 1); |
627 | 0 | break; |
628 | 0 |
|
629 | 0 | case HV_X64_MSR_CRASH_P0: |
630 | 0 | case HV_X64_MSR_CRASH_P1: |
631 | 0 | case HV_X64_MSR_CRASH_P2: |
632 | 0 | case HV_X64_MSR_CRASH_P3: |
633 | 0 | case HV_X64_MSR_CRASH_P4: |
634 | 0 | BUILD_BUG_ON(HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 >= |
635 | 0 | ARRAY_SIZE(v->arch.hvm_vcpu.viridian.crash_param)); |
636 | 0 |
|
637 | 0 | idx -= HV_X64_MSR_CRASH_P0; |
638 | 0 | v->arch.hvm_vcpu.viridian.crash_param[idx] = val; |
639 | 0 | break; |
640 | 0 |
|
641 | 0 | case HV_X64_MSR_CRASH_CTL: |
642 | 0 | { |
643 | 0 | HV_CRASH_CTL_REG_CONTENTS ctl; |
644 | 0 |
|
645 | 0 | ctl.AsUINT64 = val; |
646 | 0 |
|
647 | 0 | if ( !ctl.u.CrashNotify ) |
648 | 0 | break; |
649 | 0 |
|
650 | 0 | gprintk(XENLOG_WARNING, "VIRIDIAN CRASH: %lx %lx %lx %lx %lx\n", |
651 | 0 | v->arch.hvm_vcpu.viridian.crash_param[0], |
652 | 0 | v->arch.hvm_vcpu.viridian.crash_param[1], |
653 | 0 | v->arch.hvm_vcpu.viridian.crash_param[2], |
654 | 0 | v->arch.hvm_vcpu.viridian.crash_param[3], |
655 | 0 | v->arch.hvm_vcpu.viridian.crash_param[4]); |
656 | 0 | break; |
657 | 0 | } |
658 | 0 |
|
659 | 0 | default: |
660 | 0 | if ( idx >= VIRIDIAN_MSR_MIN && idx <= VIRIDIAN_MSR_MAX ) |
661 | 0 | gprintk(XENLOG_WARNING, "write to unimplemented MSR %#x\n", |
662 | 0 | idx); |
663 | 0 |
|
664 | 0 | return 0; |
665 | 0 | } |
666 | 0 |
|
667 | 0 | return 1; |
668 | 0 | } |
669 | | |
670 | | static int64_t raw_trc_val(struct domain *d) |
671 | 0 | { |
672 | 0 | uint64_t tsc; |
673 | 0 | struct time_scale tsc_to_ns; |
674 | 0 |
|
675 | 0 | tsc = hvm_get_guest_tsc(pt_global_vcpu_target(d)); |
676 | 0 |
|
677 | 0 | /* convert tsc to count of 100ns periods */ |
678 | 0 | set_time_scale(&tsc_to_ns, d->arch.tsc_khz * 1000ul); |
679 | 0 | return scale_delta(tsc, &tsc_to_ns) / 100ul; |
680 | 0 | } |
681 | | |
682 | | void viridian_time_ref_count_freeze(struct domain *d) |
683 | 0 | { |
684 | 0 | struct viridian_time_ref_count *trc; |
685 | 0 |
|
686 | 0 | trc = &d->arch.hvm_domain.viridian.time_ref_count; |
687 | 0 |
|
688 | 0 | if ( test_and_clear_bit(_TRC_running, &trc->flags) ) |
689 | 0 | trc->val = raw_trc_val(d) + trc->off; |
690 | 0 | } |
691 | | |
692 | | void viridian_time_ref_count_thaw(struct domain *d) |
693 | 0 | { |
694 | 0 | struct viridian_time_ref_count *trc; |
695 | 0 |
|
696 | 0 | trc = &d->arch.hvm_domain.viridian.time_ref_count; |
697 | 0 |
|
698 | 0 | if ( !d->is_shutting_down && |
699 | 0 | !test_and_set_bit(_TRC_running, &trc->flags) ) |
700 | 0 | trc->off = (int64_t)trc->val - raw_trc_val(d); |
701 | 0 | } |
702 | | |
703 | | int rdmsr_viridian_regs(uint32_t idx, uint64_t *val) |
704 | 0 | { |
705 | 0 | struct vcpu *v = current; |
706 | 0 | struct domain *d = v->domain; |
707 | 0 | |
708 | 0 | if ( !is_viridian_domain(d) ) |
709 | 0 | return 0; |
710 | 0 |
|
711 | 0 | switch ( idx ) |
712 | 0 | { |
713 | 0 | case HV_X64_MSR_GUEST_OS_ID: |
714 | 0 | perfc_incr(mshv_rdmsr_osid); |
715 | 0 | *val = d->arch.hvm_domain.viridian.guest_os_id.raw; |
716 | 0 | break; |
717 | 0 |
|
718 | 0 | case HV_X64_MSR_HYPERCALL: |
719 | 0 | perfc_incr(mshv_rdmsr_hc_page); |
720 | 0 | *val = d->arch.hvm_domain.viridian.hypercall_gpa.raw; |
721 | 0 | break; |
722 | 0 |
|
723 | 0 | case HV_X64_MSR_VP_INDEX: |
724 | 0 | perfc_incr(mshv_rdmsr_vp_index); |
725 | 0 | *val = v->vcpu_id; |
726 | 0 | break; |
727 | 0 |
|
728 | 0 | case HV_X64_MSR_TSC_FREQUENCY: |
729 | 0 | if ( viridian_feature_mask(d) & HVMPV_no_freq ) |
730 | 0 | return 0; |
731 | 0 |
|
732 | 0 | perfc_incr(mshv_rdmsr_tsc_frequency); |
733 | 0 | *val = (uint64_t)d->arch.tsc_khz * 1000ull; |
734 | 0 | break; |
735 | 0 |
|
736 | 0 | case HV_X64_MSR_APIC_FREQUENCY: |
737 | 0 | if ( viridian_feature_mask(d) & HVMPV_no_freq ) |
738 | 0 | return 0; |
739 | 0 |
|
740 | 0 | perfc_incr(mshv_rdmsr_apic_frequency); |
741 | 0 | *val = 1000000000ull / APIC_BUS_CYCLE_NS; |
742 | 0 | break; |
743 | 0 |
|
744 | 0 | case HV_X64_MSR_ICR: |
745 | 0 | perfc_incr(mshv_rdmsr_icr); |
746 | 0 | *val = (((uint64_t)vlapic_get_reg(vcpu_vlapic(v), APIC_ICR2) << 32) | |
747 | 0 | vlapic_get_reg(vcpu_vlapic(v), APIC_ICR)); |
748 | 0 | break; |
749 | 0 |
|
750 | 0 | case HV_X64_MSR_TPR: |
751 | 0 | perfc_incr(mshv_rdmsr_tpr); |
752 | 0 | *val = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI); |
753 | 0 | break; |
754 | 0 |
|
755 | 0 | case HV_X64_MSR_VP_ASSIST_PAGE: |
756 | 0 | perfc_incr(mshv_rdmsr_apic_msr); |
757 | 0 | *val = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw; |
758 | 0 | break; |
759 | 0 |
|
760 | 0 | case HV_X64_MSR_REFERENCE_TSC: |
761 | 0 | if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) ) |
762 | 0 | return 0; |
763 | 0 |
|
764 | 0 | perfc_incr(mshv_rdmsr_tsc_msr); |
765 | 0 | *val = d->arch.hvm_domain.viridian.reference_tsc.raw; |
766 | 0 | break; |
767 | 0 |
|
768 | 0 | case HV_X64_MSR_TIME_REF_COUNT: |
769 | 0 | { |
770 | 0 | struct viridian_time_ref_count *trc; |
771 | 0 |
|
772 | 0 | trc = &d->arch.hvm_domain.viridian.time_ref_count; |
773 | 0 |
|
774 | 0 | if ( !(viridian_feature_mask(d) & HVMPV_time_ref_count) ) |
775 | 0 | return 0; |
776 | 0 |
|
777 | 0 | if ( !test_and_set_bit(_TRC_accessed, &trc->flags) ) |
778 | 0 | printk(XENLOG_G_INFO "d%d: VIRIDIAN MSR_TIME_REF_COUNT: accessed\n", |
779 | 0 | d->domain_id); |
780 | 0 |
|
781 | 0 | perfc_incr(mshv_rdmsr_time_ref_count); |
782 | 0 | *val = raw_trc_val(d) + trc->off; |
783 | 0 | break; |
784 | 0 | } |
785 | 0 |
|
786 | 0 | case HV_X64_MSR_CRASH_P0: |
787 | 0 | case HV_X64_MSR_CRASH_P1: |
788 | 0 | case HV_X64_MSR_CRASH_P2: |
789 | 0 | case HV_X64_MSR_CRASH_P3: |
790 | 0 | case HV_X64_MSR_CRASH_P4: |
791 | 0 | BUILD_BUG_ON(HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 >= |
792 | 0 | ARRAY_SIZE(v->arch.hvm_vcpu.viridian.crash_param)); |
793 | 0 |
|
794 | 0 | idx -= HV_X64_MSR_CRASH_P0; |
795 | 0 | *val = v->arch.hvm_vcpu.viridian.crash_param[idx]; |
796 | 0 | break; |
797 | 0 |
|
798 | 0 | case HV_X64_MSR_CRASH_CTL: |
799 | 0 | { |
800 | 0 | HV_CRASH_CTL_REG_CONTENTS ctl = { |
801 | 0 | .u.CrashNotify = 1, |
802 | 0 | }; |
803 | 0 |
|
804 | 0 | *val = ctl.AsUINT64; |
805 | 0 | break; |
806 | 0 | } |
807 | 0 |
|
808 | 0 | default: |
809 | 0 | if ( idx >= VIRIDIAN_MSR_MIN && idx <= VIRIDIAN_MSR_MAX ) |
810 | 0 | gprintk(XENLOG_WARNING, "read from unimplemented MSR %#x\n", |
811 | 0 | idx); |
812 | 0 |
|
813 | 0 | return 0; |
814 | 0 | } |
815 | 0 |
|
816 | 0 | return 1; |
817 | 0 | } |
818 | | |
819 | | void viridian_vcpu_deinit(struct vcpu *v) |
820 | 0 | { |
821 | 0 | teardown_vp_assist(v); |
822 | 0 | } |
823 | | |
824 | | void viridian_domain_deinit(struct domain *d) |
825 | 0 | { |
826 | 0 | struct vcpu *v; |
827 | 0 |
|
828 | 0 | for_each_vcpu ( d, v ) |
829 | 0 | teardown_vp_assist(v); |
830 | 0 | } |
831 | | |
832 | | static DEFINE_PER_CPU(cpumask_t, ipi_cpumask); |
833 | | |
834 | | int viridian_hypercall(struct cpu_user_regs *regs) |
835 | 0 | { |
836 | 0 | struct vcpu *curr = current; |
837 | 0 | struct domain *currd = curr->domain; |
838 | 0 | int mode = hvm_guest_x86_mode(curr); |
839 | 0 | unsigned long input_params_gpa, output_params_gpa; |
840 | 0 | uint16_t status = HV_STATUS_SUCCESS; |
841 | 0 |
|
842 | 0 | union hypercall_input { |
843 | 0 | uint64_t raw; |
844 | 0 | struct { |
845 | 0 | uint16_t call_code; |
846 | 0 | uint16_t fast:1; |
847 | 0 | uint16_t rsvd1:15; |
848 | 0 | uint16_t rep_count:12; |
849 | 0 | uint16_t rsvd2:4; |
850 | 0 | uint16_t rep_start:12; |
851 | 0 | uint16_t rsvd3:4; |
852 | 0 | }; |
853 | 0 | } input; |
854 | 0 |
|
855 | 0 | union hypercall_output { |
856 | 0 | uint64_t raw; |
857 | 0 | struct { |
858 | 0 | uint16_t result; |
859 | 0 | uint16_t rsvd1; |
860 | 0 | uint32_t rep_complete:12; |
861 | 0 | uint32_t rsvd2:20; |
862 | 0 | }; |
863 | 0 | } output = { 0 }; |
864 | 0 |
|
865 | 0 | ASSERT(is_viridian_domain(currd)); |
866 | 0 |
|
867 | 0 | switch ( mode ) |
868 | 0 | { |
869 | 0 | case 8: |
870 | 0 | input.raw = regs->rcx; |
871 | 0 | input_params_gpa = regs->rdx; |
872 | 0 | output_params_gpa = regs->r8; |
873 | 0 | break; |
874 | 0 | case 4: |
875 | 0 | input.raw = (regs->rdx << 32) | regs->eax; |
876 | 0 | input_params_gpa = (regs->rbx << 32) | regs->ecx; |
877 | 0 | output_params_gpa = (regs->rdi << 32) | regs->esi; |
878 | 0 | break; |
879 | 0 | default: |
880 | 0 | goto out; |
881 | 0 | } |
882 | 0 |
|
883 | 0 | switch ( input.call_code ) |
884 | 0 | { |
885 | 0 | case HvNotifyLongSpinWait: |
886 | 0 | /* |
887 | 0 | * See section 14.5.1 of the specification. |
888 | 0 | */ |
889 | 0 | perfc_incr(mshv_call_long_wait); |
890 | 0 | do_sched_op(SCHEDOP_yield, guest_handle_from_ptr(NULL, void)); |
891 | 0 | status = HV_STATUS_SUCCESS; |
892 | 0 | break; |
893 | 0 |
|
894 | 0 | case HvFlushVirtualAddressSpace: |
895 | 0 | case HvFlushVirtualAddressList: |
896 | 0 | { |
897 | 0 | cpumask_t *pcpu_mask; |
898 | 0 | struct vcpu *v; |
899 | 0 | struct { |
900 | 0 | uint64_t address_space; |
901 | 0 | uint64_t flags; |
902 | 0 | uint64_t vcpu_mask; |
903 | 0 | } input_params; |
904 | 0 |
|
905 | 0 | /* |
906 | 0 | * See sections 9.4.2 and 9.4.4 of the specification. |
907 | 0 | */ |
908 | 0 | perfc_incr(mshv_call_flush); |
909 | 0 |
|
910 | 0 | /* These hypercalls should never use the fast-call convention. */ |
911 | 0 | status = HV_STATUS_INVALID_PARAMETER; |
912 | 0 | if ( input.fast ) |
913 | 0 | break; |
914 | 0 |
|
915 | 0 | /* Get input parameters. */ |
916 | 0 | if ( hvm_copy_from_guest_phys(&input_params, input_params_gpa, |
917 | 0 | sizeof(input_params)) != HVMTRANS_okay ) |
918 | 0 | break; |
919 | 0 |
|
920 | 0 | /* |
921 | 0 | * It is not clear from the spec. if we are supposed to |
922 | 0 | * include current virtual CPU in the set or not in this case, |
923 | 0 | * so err on the safe side. |
924 | 0 | */ |
925 | 0 | if ( input_params.flags & HV_FLUSH_ALL_PROCESSORS ) |
926 | 0 | input_params.vcpu_mask = ~0ul; |
927 | 0 |
|
928 | 0 | pcpu_mask = &this_cpu(ipi_cpumask); |
929 | 0 | cpumask_clear(pcpu_mask); |
930 | 0 |
|
931 | 0 | /* |
932 | 0 | * For each specified virtual CPU flush all ASIDs to invalidate |
933 | 0 | * TLB entries the next time it is scheduled and then, if it |
934 | 0 | * is currently running, add its physical CPU to a mask of |
935 | 0 | * those which need to be interrupted to force a flush. |
936 | 0 | */ |
937 | 0 | for_each_vcpu ( currd, v ) |
938 | 0 | { |
939 | 0 | if ( v->vcpu_id >= (sizeof(input_params.vcpu_mask) * 8) ) |
940 | 0 | break; |
941 | 0 |
|
942 | 0 | if ( !(input_params.vcpu_mask & (1ul << v->vcpu_id)) ) |
943 | 0 | continue; |
944 | 0 |
|
945 | 0 | hvm_asid_flush_vcpu(v); |
946 | 0 | if ( v != curr && v->is_running ) |
947 | 0 | __cpumask_set_cpu(v->processor, pcpu_mask); |
948 | 0 | } |
949 | 0 |
|
950 | 0 | /* |
951 | 0 | * Since ASIDs have now been flushed it just remains to |
952 | 0 | * force any CPUs currently running target vCPUs out of non- |
953 | 0 | * root mode. It's possible that re-scheduling has taken place |
954 | 0 | * so we may unnecessarily IPI some CPUs. |
955 | 0 | */ |
956 | 0 | if ( !cpumask_empty(pcpu_mask) ) |
957 | 0 | smp_send_event_check_mask(pcpu_mask); |
958 | 0 |
|
959 | 0 | output.rep_complete = input.rep_count; |
960 | 0 |
|
961 | 0 | status = HV_STATUS_SUCCESS; |
962 | 0 | break; |
963 | 0 | } |
964 | 0 |
|
965 | 0 | default: |
966 | 0 | gprintk(XENLOG_WARNING, "unimplemented hypercall %04x\n", |
967 | 0 | input.call_code); |
968 | 0 | /* Fallthrough. */ |
969 | 0 | case HvGetPartitionId: |
970 | 0 | case HvExtCallQueryCapabilities: |
971 | 0 | /* |
972 | 0 | * These hypercalls seem to be erroneously issued by Windows |
973 | 0 | * despite neither AccessPartitionId nor EnableExtendedHypercalls |
974 | 0 | * being set in CPUID leaf 2. |
975 | 0 | * Given that return a status of 'invalid code' has not so far |
976 | 0 | * caused any problems it's not worth logging. |
977 | 0 | */ |
978 | 0 | status = HV_STATUS_INVALID_HYPERCALL_CODE; |
979 | 0 | break; |
980 | 0 | } |
981 | 0 |
|
982 | 0 | out: |
983 | 0 | output.result = status; |
984 | 0 | switch (mode) { |
985 | 0 | case 8: |
986 | 0 | regs->rax = output.raw; |
987 | 0 | break; |
988 | 0 | default: |
989 | 0 | regs->rdx = output.raw >> 32; |
990 | 0 | regs->rax = (uint32_t)output.raw; |
991 | 0 | break; |
992 | 0 | } |
993 | 0 |
|
994 | 0 | return HVM_HCALL_completed; |
995 | 0 | } |
996 | | |
997 | | static int viridian_save_domain_ctxt(struct domain *d, hvm_domain_context_t *h) |
998 | 0 | { |
999 | 0 | struct hvm_viridian_domain_context ctxt = { |
1000 | 0 | .time_ref_count = d->arch.hvm_domain.viridian.time_ref_count.val, |
1001 | 0 | .hypercall_gpa = d->arch.hvm_domain.viridian.hypercall_gpa.raw, |
1002 | 0 | .guest_os_id = d->arch.hvm_domain.viridian.guest_os_id.raw, |
1003 | 0 | .reference_tsc = d->arch.hvm_domain.viridian.reference_tsc.raw, |
1004 | 0 | }; |
1005 | 0 |
|
1006 | 0 | if ( !is_viridian_domain(d) ) |
1007 | 0 | return 0; |
1008 | 0 |
|
1009 | 0 | return (hvm_save_entry(VIRIDIAN_DOMAIN, 0, h, &ctxt) != 0); |
1010 | 0 | } |
1011 | | |
1012 | | static int viridian_load_domain_ctxt(struct domain *d, hvm_domain_context_t *h) |
1013 | 0 | { |
1014 | 0 | struct hvm_viridian_domain_context ctxt; |
1015 | 0 |
|
1016 | 0 | if ( hvm_load_entry_zeroextend(VIRIDIAN_DOMAIN, h, &ctxt) != 0 ) |
1017 | 0 | return -EINVAL; |
1018 | 0 |
|
1019 | 0 | d->arch.hvm_domain.viridian.time_ref_count.val = ctxt.time_ref_count; |
1020 | 0 | d->arch.hvm_domain.viridian.hypercall_gpa.raw = ctxt.hypercall_gpa; |
1021 | 0 | d->arch.hvm_domain.viridian.guest_os_id.raw = ctxt.guest_os_id; |
1022 | 0 | d->arch.hvm_domain.viridian.reference_tsc.raw = ctxt.reference_tsc; |
1023 | 0 |
|
1024 | 0 | if ( d->arch.hvm_domain.viridian.reference_tsc.fields.enabled ) |
1025 | 0 | update_reference_tsc(d, 0); |
1026 | 0 |
|
1027 | 0 | return 0; |
1028 | 0 | } |
1029 | | |
1030 | | HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_DOMAIN, viridian_save_domain_ctxt, |
1031 | | viridian_load_domain_ctxt, 1, HVMSR_PER_DOM); |
1032 | | |
1033 | | static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) |
1034 | 0 | { |
1035 | 0 | struct vcpu *v; |
1036 | 0 |
|
1037 | 0 | if ( !is_viridian_domain(d) ) |
1038 | 0 | return 0; |
1039 | 0 |
|
1040 | 0 | for_each_vcpu( d, v ) { |
1041 | 0 | struct hvm_viridian_vcpu_context ctxt = { |
1042 | 0 | .vp_assist_msr = v->arch.hvm_vcpu.viridian.vp_assist.msr.raw, |
1043 | 0 | .vp_assist_vector = v->arch.hvm_vcpu.viridian.vp_assist.vector, |
1044 | 0 | }; |
1045 | 0 |
|
1046 | 0 | if ( hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt) != 0 ) |
1047 | 0 | return 1; |
1048 | 0 | } |
1049 | 0 |
|
1050 | 0 | return 0; |
1051 | 0 | } |
1052 | | |
1053 | | static int viridian_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h) |
1054 | 0 | { |
1055 | 0 | int vcpuid; |
1056 | 0 | struct vcpu *v; |
1057 | 0 | struct hvm_viridian_vcpu_context ctxt; |
1058 | 0 |
|
1059 | 0 | vcpuid = hvm_load_instance(h); |
1060 | 0 | if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) |
1061 | 0 | { |
1062 | 0 | dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no vcpu%u\n", |
1063 | 0 | d->domain_id, vcpuid); |
1064 | 0 | return -EINVAL; |
1065 | 0 | } |
1066 | 0 |
|
1067 | 0 | if ( hvm_load_entry_zeroextend(VIRIDIAN_VCPU, h, &ctxt) != 0 ) |
1068 | 0 | return -EINVAL; |
1069 | 0 |
|
1070 | 0 | if ( memcmp(&ctxt._pad, zero_page, sizeof(ctxt._pad)) ) |
1071 | 0 | return -EINVAL; |
1072 | 0 |
|
1073 | 0 | v->arch.hvm_vcpu.viridian.vp_assist.msr.raw = ctxt.vp_assist_msr; |
1074 | 0 | if ( v->arch.hvm_vcpu.viridian.vp_assist.msr.fields.enabled && |
1075 | 0 | !v->arch.hvm_vcpu.viridian.vp_assist.va ) |
1076 | 0 | initialize_vp_assist(v); |
1077 | 0 |
|
1078 | 0 | v->arch.hvm_vcpu.viridian.vp_assist.vector = ctxt.vp_assist_vector; |
1079 | 0 |
|
1080 | 0 | return 0; |
1081 | 0 | } |
1082 | | |
1083 | | HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_VCPU, viridian_save_vcpu_ctxt, |
1084 | | viridian_load_vcpu_ctxt, 1, HVMSR_PER_VCPU); |
1085 | | |
1086 | | static int __init parse_viridian_version(const char *arg) |
1087 | 0 | { |
1088 | 0 | const char *t; |
1089 | 0 | unsigned int n[3]; |
1090 | 0 | unsigned int i = 0; |
1091 | 0 |
|
1092 | 0 | n[0] = viridian_major; |
1093 | 0 | n[1] = viridian_minor; |
1094 | 0 | n[2] = viridian_build; |
1095 | 0 |
|
1096 | 0 | do { |
1097 | 0 | const char *e; |
1098 | 0 |
|
1099 | 0 | t = strchr(arg, ','); |
1100 | 0 | if ( !t ) |
1101 | 0 | t = strchr(arg, '\0'); |
1102 | 0 |
|
1103 | 0 | if ( *arg && *arg != ',' && i < 3 ) |
1104 | 0 | { |
1105 | 0 | n[i] = simple_strtoul(arg, &e, 0); |
1106 | 0 | if ( e != t ) |
1107 | 0 | break; |
1108 | 0 | } |
1109 | 0 |
|
1110 | 0 | i++; |
1111 | 0 | arg = t + 1; |
1112 | 0 | } while ( *t ); |
1113 | 0 |
|
1114 | 0 | if ( i != 3 ) |
1115 | 0 | return -EINVAL; |
1116 | 0 |
|
1117 | 0 | if ( ((typeof(viridian_major))n[0] != n[0]) || |
1118 | 0 | ((typeof(viridian_minor))n[1] != n[1]) || |
1119 | 0 | ((typeof(viridian_build))n[2] != n[2]) ) |
1120 | 0 | return -EINVAL; |
1121 | 0 |
|
1122 | 0 | viridian_major = n[0]; |
1123 | 0 | viridian_minor = n[1]; |
1124 | 0 | viridian_build = n[2]; |
1125 | 0 |
|
1126 | 0 | printk("viridian-version = %#x,%#x,%#x\n", |
1127 | 0 | viridian_major, viridian_minor, viridian_build); |
1128 | 0 | return 0; |
1129 | 0 | } |
1130 | | custom_param("viridian-version", parse_viridian_version); |
1131 | | |
1132 | | /* |
1133 | | * Local variables: |
1134 | | * mode: C |
1135 | | * c-file-style: "BSD" |
1136 | | * c-basic-offset: 4 |
1137 | | * tab-width: 4 |
1138 | | * indent-tabs-mode: nil |
1139 | | * End: |
1140 | | */ |