/root/src/xen/xen/arch/x86/platform_hypercall.c
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * platform_hypercall.c |
3 | | * |
4 | | * Hardware platform operations. Intended for use by domain-0 kernel. |
5 | | * |
6 | | * Copyright (c) 2002-2006, K Fraser |
7 | | */ |
8 | | |
9 | | #include <xen/types.h> |
10 | | #include <xen/lib.h> |
11 | | #include <xen/mm.h> |
12 | | #include <xen/sched.h> |
13 | | #include <xen/domain.h> |
14 | | #include <xen/event.h> |
15 | | #include <xen/domain_page.h> |
16 | | #include <xen/trace.h> |
17 | | #include <xen/console.h> |
18 | | #include <xen/iocap.h> |
19 | | #include <xen/guest_access.h> |
20 | | #include <xen/acpi.h> |
21 | | #include <xen/efi.h> |
22 | | #include <xen/cpu.h> |
23 | | #include <xen/pmstat.h> |
24 | | #include <xen/irq.h> |
25 | | #include <xen/symbols.h> |
26 | | #include <asm/current.h> |
27 | | #include <public/platform.h> |
28 | | #include <acpi/cpufreq/processor_perf.h> |
29 | | #include <asm/edd.h> |
30 | | #include <asm/mtrr.h> |
31 | | #include <asm/io_apic.h> |
32 | | #include <asm/setup.h> |
33 | | #include "cpu/mtrr/mtrr.h" |
34 | | #include <xsm/xsm.h> |
35 | | |
36 | | /* Declarations for items shared with the compat mode handler. */ |
37 | | extern spinlock_t xenpf_lock; |
38 | | |
39 | 0 | #define RESOURCE_ACCESS_MAX_ENTRIES 3 |
40 | | struct resource_access { |
41 | | unsigned int nr_done; |
42 | | unsigned int nr_entries; |
43 | | xenpf_resource_entry_t *entries; |
44 | | }; |
45 | | |
46 | | long cpu_frequency_change_helper(void *); |
47 | | void check_resource_access(struct resource_access *); |
48 | | void resource_access(void *); |
49 | | |
50 | | #ifndef COMPAT |
51 | | typedef long ret_t; |
52 | | DEFINE_SPINLOCK(xenpf_lock); |
53 | | # undef copy_from_compat |
54 | 0 | # define copy_from_compat copy_from_guest |
55 | | # undef copy_to_compat |
56 | 0 | # define copy_to_compat copy_to_guest |
57 | | # undef guest_from_compat_handle |
58 | 0 | # define guest_from_compat_handle(x,y) ((x)=(y)) |
59 | | |
60 | | long cpu_frequency_change_helper(void *data) |
61 | 0 | { |
62 | 0 | return cpu_frequency_change((uint64_t)data); |
63 | 0 | } |
64 | | |
65 | | static bool allow_access_msr(unsigned int msr) |
66 | 0 | { |
67 | 0 | switch ( msr ) |
68 | 0 | { |
69 | 0 | /* MSR for CMT, refer to chapter 17.14 of Intel SDM. */ |
70 | 0 | case MSR_IA32_CMT_EVTSEL: |
71 | 0 | case MSR_IA32_CMT_CTR: |
72 | 0 | case MSR_IA32_TSC: |
73 | 0 | return true; |
74 | 0 | } |
75 | 0 |
|
76 | 0 | return false; |
77 | 0 | } |
78 | | |
79 | | void check_resource_access(struct resource_access *ra) |
80 | 0 | { |
81 | 0 | unsigned int i; |
82 | 0 |
|
83 | 0 | for ( i = 0; i < ra->nr_entries; i++ ) |
84 | 0 | { |
85 | 0 | int ret = 0; |
86 | 0 | xenpf_resource_entry_t *entry = ra->entries + i; |
87 | 0 |
|
88 | 0 | if ( entry->rsvd ) |
89 | 0 | { |
90 | 0 | entry->u.ret = -EINVAL; |
91 | 0 | break; |
92 | 0 | } |
93 | 0 |
|
94 | 0 | switch ( entry->u.cmd ) |
95 | 0 | { |
96 | 0 | case XEN_RESOURCE_OP_MSR_READ: |
97 | 0 | case XEN_RESOURCE_OP_MSR_WRITE: |
98 | 0 | if ( entry->idx >> 32 ) |
99 | 0 | ret = -EINVAL; |
100 | 0 | else if ( !allow_access_msr(entry->idx) ) |
101 | 0 | ret = -EACCES; |
102 | 0 | break; |
103 | 0 | default: |
104 | 0 | ret = -EOPNOTSUPP; |
105 | 0 | break; |
106 | 0 | } |
107 | 0 |
|
108 | 0 | if ( ret ) |
109 | 0 | { |
110 | 0 | entry->u.ret = ret; |
111 | 0 | break; |
112 | 0 | } |
113 | 0 | } |
114 | 0 |
|
115 | 0 | ra->nr_done = i; |
116 | 0 | } |
117 | | |
118 | | void resource_access(void *info) |
119 | 0 | { |
120 | 0 | struct resource_access *ra = info; |
121 | 0 | unsigned int i; |
122 | 0 | u64 tsc = 0; |
123 | 0 |
|
124 | 0 | for ( i = 0; i < ra->nr_done; i++ ) |
125 | 0 | { |
126 | 0 | int ret; |
127 | 0 | xenpf_resource_entry_t *entry = ra->entries + i; |
128 | 0 |
|
129 | 0 | switch ( entry->u.cmd ) |
130 | 0 | { |
131 | 0 | case XEN_RESOURCE_OP_MSR_READ: |
132 | 0 | if ( unlikely(entry->idx == MSR_IA32_TSC) ) |
133 | 0 | { |
134 | 0 | /* Return obfuscated scaled time instead of raw timestamp */ |
135 | 0 | entry->val = get_s_time_fixed(tsc) |
136 | 0 | + SECONDS(boot_random) - boot_random; |
137 | 0 | ret = 0; |
138 | 0 | } |
139 | 0 | else |
140 | 0 | { |
141 | 0 | unsigned long flags = 0; |
142 | 0 | /* |
143 | 0 | * If next entry is MSR_IA32_TSC read, then the actual rdtsc |
144 | 0 | * is performed together with current entry, with IRQ disabled. |
145 | 0 | */ |
146 | 0 | bool read_tsc = i < ra->nr_done - 1 && |
147 | 0 | unlikely(entry[1].idx == MSR_IA32_TSC); |
148 | 0 |
|
149 | 0 | if ( unlikely(read_tsc) ) |
150 | 0 | local_irq_save(flags); |
151 | 0 |
|
152 | 0 | ret = rdmsr_safe(entry->idx, entry->val); |
153 | 0 |
|
154 | 0 | if ( unlikely(read_tsc) ) |
155 | 0 | { |
156 | 0 | tsc = rdtsc(); |
157 | 0 | local_irq_restore(flags); |
158 | 0 | } |
159 | 0 | } |
160 | 0 | break; |
161 | 0 | case XEN_RESOURCE_OP_MSR_WRITE: |
162 | 0 | if ( unlikely(entry->idx == MSR_IA32_TSC) ) |
163 | 0 | ret = -EPERM; |
164 | 0 | else |
165 | 0 | ret = wrmsr_safe(entry->idx, entry->val); |
166 | 0 | break; |
167 | 0 | default: |
168 | 0 | BUG(); |
169 | 0 | break; |
170 | 0 | } |
171 | 0 |
|
172 | 0 | if ( ret ) |
173 | 0 | { |
174 | 0 | entry->u.ret = ret; |
175 | 0 | break; |
176 | 0 | } |
177 | 0 | } |
178 | 0 |
|
179 | 0 | ra->nr_done = i; |
180 | 0 | } |
181 | | #endif |
182 | | |
183 | | ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op) |
184 | 0 | { |
185 | 0 | ret_t ret; |
186 | 0 | struct xen_platform_op curop, *op = &curop; |
187 | 0 |
|
188 | 0 | if ( copy_from_guest(op, u_xenpf_op, 1) ) |
189 | 0 | return -EFAULT; |
190 | 0 |
|
191 | 0 | if ( op->interface_version != XENPF_INTERFACE_VERSION ) |
192 | 0 | return -EACCES; |
193 | 0 |
|
194 | 0 | ret = xsm_platform_op(XSM_PRIV, op->cmd); |
195 | 0 | if ( ret ) |
196 | 0 | return ret; |
197 | 0 |
|
198 | 0 | /* |
199 | 0 | * Trylock here avoids deadlock with an existing platform critical section |
200 | 0 | * which might (for some current or future reason) want to synchronise |
201 | 0 | * with this vcpu. |
202 | 0 | */ |
203 | 0 | while ( !spin_trylock(&xenpf_lock) ) |
204 | 0 | if ( hypercall_preempt_check() ) |
205 | 0 | return hypercall_create_continuation( |
206 | 0 | __HYPERVISOR_platform_op, "h", u_xenpf_op); |
207 | 0 |
|
208 | 0 | switch ( op->cmd ) |
209 | 0 | { |
210 | 0 | case XENPF_settime32: |
211 | 0 | do_settime(op->u.settime32.secs, |
212 | 0 | op->u.settime32.nsecs, |
213 | 0 | op->u.settime32.system_time); |
214 | 0 | break; |
215 | 0 |
|
216 | 0 | case XENPF_settime64: |
217 | 0 | if ( likely(!op->u.settime64.mbz) ) |
218 | 0 | do_settime(op->u.settime64.secs, |
219 | 0 | op->u.settime64.nsecs, |
220 | 0 | op->u.settime64.system_time); |
221 | 0 | else |
222 | 0 | ret = -EINVAL; |
223 | 0 | break; |
224 | 0 |
|
225 | 0 | case XENPF_add_memtype: |
226 | 0 | { |
227 | 0 | ret = mtrr_add_page( |
228 | 0 | op->u.add_memtype.mfn, |
229 | 0 | op->u.add_memtype.nr_mfns, |
230 | 0 | op->u.add_memtype.type, |
231 | 0 | 1); |
232 | 0 | if ( ret >= 0 ) |
233 | 0 | { |
234 | 0 | op->u.add_memtype.handle = 0; |
235 | 0 | op->u.add_memtype.reg = ret; |
236 | 0 | ret = __copy_field_to_guest(u_xenpf_op, op, u.add_memtype) ? |
237 | 0 | -EFAULT : 0; |
238 | 0 | if ( ret != 0 ) |
239 | 0 | mtrr_del_page(ret, 0, 0); |
240 | 0 | } |
241 | 0 | } |
242 | 0 | break; |
243 | 0 |
|
244 | 0 | case XENPF_del_memtype: |
245 | 0 | { |
246 | 0 | if (op->u.del_memtype.handle == 0 |
247 | 0 | /* mtrr/main.c otherwise does a lookup */ |
248 | 0 | && (int)op->u.del_memtype.reg >= 0) |
249 | 0 | { |
250 | 0 | ret = mtrr_del_page(op->u.del_memtype.reg, 0, 0); |
251 | 0 | if ( ret > 0 ) |
252 | 0 | ret = 0; |
253 | 0 | } |
254 | 0 | else |
255 | 0 | ret = -EINVAL; |
256 | 0 | } |
257 | 0 | break; |
258 | 0 |
|
259 | 0 | case XENPF_read_memtype: |
260 | 0 | { |
261 | 0 | unsigned long mfn, nr_mfns; |
262 | 0 | mtrr_type type; |
263 | 0 |
|
264 | 0 | ret = -EINVAL; |
265 | 0 | if ( op->u.read_memtype.reg < num_var_ranges ) |
266 | 0 | { |
267 | 0 | mtrr_if->get(op->u.read_memtype.reg, &mfn, &nr_mfns, &type); |
268 | 0 | op->u.read_memtype.mfn = mfn; |
269 | 0 | op->u.read_memtype.nr_mfns = nr_mfns; |
270 | 0 | op->u.read_memtype.type = type; |
271 | 0 | ret = __copy_field_to_guest(u_xenpf_op, op, u.read_memtype) |
272 | 0 | ? -EFAULT : 0; |
273 | 0 | } |
274 | 0 | } |
275 | 0 | break; |
276 | 0 |
|
277 | 0 | case XENPF_microcode_update: |
278 | 0 | { |
279 | 0 | XEN_GUEST_HANDLE(const_void) data; |
280 | 0 |
|
281 | 0 | guest_from_compat_handle(data, op->u.microcode.data); |
282 | 0 |
|
283 | 0 | /* |
284 | 0 | * alloc_vcpu() will access data which is modified during |
285 | 0 | * microcode update |
286 | 0 | */ |
287 | 0 | while ( !spin_trylock(&vcpu_alloc_lock) ) |
288 | 0 | { |
289 | 0 | if ( hypercall_preempt_check() ) |
290 | 0 | { |
291 | 0 | ret = hypercall_create_continuation( |
292 | 0 | __HYPERVISOR_platform_op, "h", u_xenpf_op); |
293 | 0 | goto out; |
294 | 0 | } |
295 | 0 | } |
296 | 0 |
|
297 | 0 | ret = microcode_update( |
298 | 0 | guest_handle_to_param(data, const_void), |
299 | 0 | op->u.microcode.length); |
300 | 0 | spin_unlock(&vcpu_alloc_lock); |
301 | 0 | } |
302 | 0 | break; |
303 | 0 |
|
304 | 0 | case XENPF_platform_quirk: |
305 | 0 | { |
306 | 0 | int quirk_id = op->u.platform_quirk.quirk_id; |
307 | 0 |
|
308 | 0 | switch ( quirk_id ) |
309 | 0 | { |
310 | 0 | case QUIRK_NOIRQBALANCING: |
311 | 0 | printk("Platform quirk -- Disabling IRQ balancing/affinity.\n"); |
312 | 0 | opt_noirqbalance = 1; |
313 | 0 | setup_ioapic_dest(); |
314 | 0 | break; |
315 | 0 | case QUIRK_IOAPIC_BAD_REGSEL: |
316 | 0 | dprintk(XENLOG_WARNING, |
317 | 0 | "Domain 0 thinks that IO-APIC REGSEL is bad\n"); |
318 | 0 | break; |
319 | 0 | case QUIRK_IOAPIC_GOOD_REGSEL: |
320 | 0 | break; |
321 | 0 | default: |
322 | 0 | ret = -EINVAL; |
323 | 0 | break; |
324 | 0 | } |
325 | 0 | } |
326 | 0 | break; |
327 | 0 |
|
328 | 0 | case XENPF_firmware_info: |
329 | 0 | switch ( op->u.firmware_info.type ) |
330 | 0 | { |
331 | 0 | case XEN_FW_DISK_INFO: { |
332 | 0 | const struct edd_info *info; |
333 | 0 | u16 length; |
334 | 0 |
|
335 | 0 | ret = -ESRCH; |
336 | 0 | if ( op->u.firmware_info.index >= bootsym(boot_edd_info_nr) ) |
337 | 0 | break; |
338 | 0 |
|
339 | 0 | info = bootsym(boot_edd_info) + op->u.firmware_info.index; |
340 | 0 |
|
341 | 0 | /* Transfer the EDD info block. */ |
342 | 0 | ret = -EFAULT; |
343 | 0 | if ( copy_from_compat(&length, op->u.firmware_info.u. |
344 | 0 | disk_info.edd_params, 1) ) |
345 | 0 | break; |
346 | 0 | if ( length > info->edd_device_params.length ) |
347 | 0 | length = info->edd_device_params.length; |
348 | 0 | if ( copy_to_compat(op->u.firmware_info.u.disk_info.edd_params, |
349 | 0 | (u8 *)&info->edd_device_params, |
350 | 0 | length) ) |
351 | 0 | break; |
352 | 0 | if ( copy_to_compat(op->u.firmware_info.u.disk_info.edd_params, |
353 | 0 | &length, 1) ) |
354 | 0 | break; |
355 | 0 |
|
356 | 0 | /* Transfer miscellaneous other information values. */ |
357 | 0 | #define C(x) op->u.firmware_info.u.disk_info.x = info->x |
358 | 0 | C(device); |
359 | 0 | C(version); |
360 | 0 | C(interface_support); |
361 | 0 | C(legacy_max_cylinder); |
362 | 0 | C(legacy_max_head); |
363 | 0 | C(legacy_sectors_per_track); |
364 | 0 | #undef C |
365 | 0 |
|
366 | 0 | ret = (__copy_field_to_guest(u_xenpf_op, op, |
367 | 0 | u.firmware_info.u.disk_info) |
368 | 0 | ? -EFAULT : 0); |
369 | 0 | break; |
370 | 0 | } |
371 | 0 | case XEN_FW_DISK_MBR_SIGNATURE: { |
372 | 0 | const struct mbr_signature *sig; |
373 | 0 |
|
374 | 0 | ret = -ESRCH; |
375 | 0 | if ( op->u.firmware_info.index >= bootsym(boot_mbr_signature_nr) ) |
376 | 0 | break; |
377 | 0 |
|
378 | 0 | sig = bootsym(boot_mbr_signature) + op->u.firmware_info.index; |
379 | 0 |
|
380 | 0 | op->u.firmware_info.u.disk_mbr_signature.device = sig->device; |
381 | 0 | op->u.firmware_info.u.disk_mbr_signature.mbr_signature = |
382 | 0 | sig->signature; |
383 | 0 |
|
384 | 0 | ret = (__copy_field_to_guest(u_xenpf_op, op, |
385 | 0 | u.firmware_info.u.disk_mbr_signature) |
386 | 0 | ? -EFAULT : 0); |
387 | 0 | break; |
388 | 0 | } |
389 | 0 | case XEN_FW_VBEDDC_INFO: |
390 | 0 | ret = -ESRCH; |
391 | 0 | if ( op->u.firmware_info.index != 0 ) |
392 | 0 | break; |
393 | 0 | if ( *(u32 *)bootsym(boot_edid_info) == 0x13131313 ) |
394 | 0 | break; |
395 | 0 |
|
396 | 0 | op->u.firmware_info.u.vbeddc_info.capabilities = |
397 | 0 | bootsym(boot_edid_caps); |
398 | 0 | op->u.firmware_info.u.vbeddc_info.edid_transfer_time = |
399 | 0 | bootsym(boot_edid_caps) >> 8; |
400 | 0 |
|
401 | 0 | ret = 0; |
402 | 0 | if ( __copy_field_to_guest(u_xenpf_op, op, u.firmware_info. |
403 | 0 | u.vbeddc_info.capabilities) || |
404 | 0 | __copy_field_to_guest(u_xenpf_op, op, u.firmware_info. |
405 | 0 | u.vbeddc_info.edid_transfer_time) || |
406 | 0 | copy_to_compat(op->u.firmware_info.u.vbeddc_info.edid, |
407 | 0 | bootsym(boot_edid_info), 128) ) |
408 | 0 | ret = -EFAULT; |
409 | 0 | break; |
410 | 0 | case XEN_FW_EFI_INFO: |
411 | 0 | ret = efi_get_info(op->u.firmware_info.index, |
412 | 0 | &op->u.firmware_info.u.efi_info); |
413 | 0 | if ( ret == 0 && |
414 | 0 | __copy_field_to_guest(u_xenpf_op, op, |
415 | 0 | u.firmware_info.u.efi_info) ) |
416 | 0 | ret = -EFAULT; |
417 | 0 | break; |
418 | 0 | case XEN_FW_KBD_SHIFT_FLAGS: |
419 | 0 | ret = -ESRCH; |
420 | 0 | if ( op->u.firmware_info.index != 0 ) |
421 | 0 | break; |
422 | 0 |
|
423 | 0 | op->u.firmware_info.u.kbd_shift_flags = bootsym(kbd_shift_flags); |
424 | 0 |
|
425 | 0 | ret = 0; |
426 | 0 | if ( __copy_field_to_guest(u_xenpf_op, op, |
427 | 0 | u.firmware_info.u.kbd_shift_flags) ) |
428 | 0 | ret = -EFAULT; |
429 | 0 | break; |
430 | 0 | default: |
431 | 0 | ret = -EINVAL; |
432 | 0 | break; |
433 | 0 | } |
434 | 0 | break; |
435 | 0 |
|
436 | 0 | case XENPF_efi_runtime_call: |
437 | 0 | ret = efi_runtime_call(&op->u.efi_runtime_call); |
438 | 0 | if ( ret == 0 && |
439 | 0 | __copy_field_to_guest(u_xenpf_op, op, u.efi_runtime_call) ) |
440 | 0 | ret = -EFAULT; |
441 | 0 | break; |
442 | 0 |
|
443 | 0 | case XENPF_enter_acpi_sleep: |
444 | 0 | ret = acpi_enter_sleep(&op->u.enter_acpi_sleep); |
445 | 0 | break; |
446 | 0 |
|
447 | 0 | case XENPF_change_freq: |
448 | 0 | ret = -ENOSYS; |
449 | 0 | if ( cpufreq_controller != FREQCTL_dom0_kernel ) |
450 | 0 | break; |
451 | 0 | ret = -EINVAL; |
452 | 0 | if ( op->u.change_freq.flags || !cpu_online(op->u.change_freq.cpu) ) |
453 | 0 | break; |
454 | 0 | ret = continue_hypercall_on_cpu(op->u.change_freq.cpu, |
455 | 0 | cpu_frequency_change_helper, |
456 | 0 | (void *)op->u.change_freq.freq); |
457 | 0 | break; |
458 | 0 |
|
459 | 0 | case XENPF_getidletime: |
460 | 0 | { |
461 | 0 | uint32_t cpu; |
462 | 0 | uint64_t idletime, now = NOW(); |
463 | 0 | struct xenctl_bitmap ctlmap; |
464 | 0 | cpumask_var_t cpumap; |
465 | 0 | XEN_GUEST_HANDLE(uint8) cpumap_bitmap; |
466 | 0 | XEN_GUEST_HANDLE(uint64) idletimes; |
467 | 0 |
|
468 | 0 | ret = -ENOSYS; |
469 | 0 | if ( cpufreq_controller != FREQCTL_dom0_kernel ) |
470 | 0 | break; |
471 | 0 |
|
472 | 0 | ctlmap.nr_bits = op->u.getidletime.cpumap_nr_cpus; |
473 | 0 | guest_from_compat_handle(cpumap_bitmap, |
474 | 0 | op->u.getidletime.cpumap_bitmap); |
475 | 0 | ctlmap.bitmap.p = cpumap_bitmap.p; /* handle -> handle_64 conversion */ |
476 | 0 | if ( (ret = xenctl_bitmap_to_cpumask(&cpumap, &ctlmap)) != 0 ) |
477 | 0 | goto out; |
478 | 0 | guest_from_compat_handle(idletimes, op->u.getidletime.idletime); |
479 | 0 |
|
480 | 0 | for_each_cpu ( cpu, cpumap ) |
481 | 0 | { |
482 | 0 | idletime = get_cpu_idle_time(cpu); |
483 | 0 |
|
484 | 0 | if ( !idletime ) |
485 | 0 | { |
486 | 0 | __cpumask_clear_cpu(cpu, cpumap); |
487 | 0 | continue; |
488 | 0 | } |
489 | 0 |
|
490 | 0 | if ( copy_to_guest_offset(idletimes, cpu, &idletime, 1) ) |
491 | 0 | { |
492 | 0 | ret = -EFAULT; |
493 | 0 | break; |
494 | 0 | } |
495 | 0 | } |
496 | 0 |
|
497 | 0 | op->u.getidletime.now = now; |
498 | 0 | if ( ret == 0 ) |
499 | 0 | ret = cpumask_to_xenctl_bitmap(&ctlmap, cpumap); |
500 | 0 | free_cpumask_var(cpumap); |
501 | 0 |
|
502 | 0 | if ( ret == 0 && __copy_field_to_guest(u_xenpf_op, op, u.getidletime) ) |
503 | 0 | ret = -EFAULT; |
504 | 0 | } |
505 | 0 | break; |
506 | 0 |
|
507 | 0 | case XENPF_set_processor_pminfo: |
508 | 0 | switch ( op->u.set_pminfo.type ) |
509 | 0 | { |
510 | 0 | case XEN_PM_PX: |
511 | 0 | if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_PX) ) |
512 | 0 | { |
513 | 0 | ret = -ENOSYS; |
514 | 0 | break; |
515 | 0 | } |
516 | 0 | ret = set_px_pminfo(op->u.set_pminfo.id, &op->u.set_pminfo.u.perf); |
517 | 0 | break; |
518 | 0 | |
519 | 0 | case XEN_PM_CX: |
520 | 0 | if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_CX) ) |
521 | 0 | { |
522 | 0 | ret = -ENOSYS; |
523 | 0 | break; |
524 | 0 | } |
525 | 0 | ret = set_cx_pminfo(op->u.set_pminfo.id, &op->u.set_pminfo.u.power); |
526 | 0 | break; |
527 | 0 |
|
528 | 0 | case XEN_PM_TX: |
529 | 0 | if ( !(xen_processor_pmbits & XEN_PROCESSOR_PM_TX) ) |
530 | 0 | { |
531 | 0 | ret = -ENOSYS; |
532 | 0 | break; |
533 | 0 | } |
534 | 0 | ret = -EINVAL; |
535 | 0 | break; |
536 | 0 |
|
537 | 0 | case XEN_PM_PDC: |
538 | 0 | { |
539 | 0 | XEN_GUEST_HANDLE(uint32) pdc; |
540 | 0 |
|
541 | 0 | guest_from_compat_handle(pdc, op->u.set_pminfo.u.pdc); |
542 | 0 | ret = acpi_set_pdc_bits( |
543 | 0 | op->u.set_pminfo.id, |
544 | 0 | guest_handle_to_param(pdc, uint32)); |
545 | 0 | } |
546 | 0 | break; |
547 | 0 |
|
548 | 0 | default: |
549 | 0 | ret = -EINVAL; |
550 | 0 | break; |
551 | 0 | } |
552 | 0 | break; |
553 | 0 |
|
554 | 0 | case XENPF_get_cpuinfo: |
555 | 0 | { |
556 | 0 | struct xenpf_pcpuinfo *g_info; |
557 | 0 |
|
558 | 0 | g_info = &op->u.pcpu_info; |
559 | 0 |
|
560 | 0 | if ( !get_cpu_maps() ) |
561 | 0 | { |
562 | 0 | ret = -EBUSY; |
563 | 0 | break; |
564 | 0 | } |
565 | 0 |
|
566 | 0 | if ( (g_info->xen_cpuid >= nr_cpu_ids) || |
567 | 0 | !cpu_present(g_info->xen_cpuid) ) |
568 | 0 | { |
569 | 0 | g_info->flags = XEN_PCPU_FLAGS_INVALID; |
570 | 0 | } |
571 | 0 | else |
572 | 0 | { |
573 | 0 | g_info->apic_id = x86_cpu_to_apicid[g_info->xen_cpuid]; |
574 | 0 | g_info->acpi_id = acpi_get_processor_id(g_info->xen_cpuid); |
575 | 0 | ASSERT(g_info->apic_id != BAD_APICID); |
576 | 0 | g_info->flags = 0; |
577 | 0 | if (cpu_online(g_info->xen_cpuid)) |
578 | 0 | g_info->flags |= XEN_PCPU_FLAGS_ONLINE; |
579 | 0 | } |
580 | 0 |
|
581 | 0 | g_info->max_present = cpumask_last(&cpu_present_map); |
582 | 0 |
|
583 | 0 | put_cpu_maps(); |
584 | 0 |
|
585 | 0 | ret = __copy_field_to_guest(u_xenpf_op, op, u.pcpu_info) ? -EFAULT : 0; |
586 | 0 | } |
587 | 0 | break; |
588 | 0 |
|
589 | 0 | case XENPF_get_cpu_version: |
590 | 0 | { |
591 | 0 | struct xenpf_pcpu_version *ver = &op->u.pcpu_version; |
592 | 0 |
|
593 | 0 | if ( !get_cpu_maps() ) |
594 | 0 | { |
595 | 0 | ret = -EBUSY; |
596 | 0 | break; |
597 | 0 | } |
598 | 0 |
|
599 | 0 | if ( (ver->xen_cpuid >= nr_cpu_ids) || !cpu_online(ver->xen_cpuid) ) |
600 | 0 | { |
601 | 0 | memset(ver->vendor_id, 0, sizeof(ver->vendor_id)); |
602 | 0 | ver->family = 0; |
603 | 0 | ver->model = 0; |
604 | 0 | ver->stepping = 0; |
605 | 0 | } |
606 | 0 | else |
607 | 0 | { |
608 | 0 | const struct cpuinfo_x86 *c = &cpu_data[ver->xen_cpuid]; |
609 | 0 |
|
610 | 0 | memcpy(ver->vendor_id, c->x86_vendor_id, sizeof(ver->vendor_id)); |
611 | 0 | ver->family = c->x86; |
612 | 0 | ver->model = c->x86_model; |
613 | 0 | ver->stepping = c->x86_mask; |
614 | 0 | } |
615 | 0 |
|
616 | 0 | ver->max_present = cpumask_last(&cpu_present_map); |
617 | 0 |
|
618 | 0 | put_cpu_maps(); |
619 | 0 |
|
620 | 0 | if ( __copy_field_to_guest(u_xenpf_op, op, u.pcpu_version) ) |
621 | 0 | ret = -EFAULT; |
622 | 0 | } |
623 | 0 | break; |
624 | 0 |
|
625 | 0 | case XENPF_cpu_online: |
626 | 0 | { |
627 | 0 | int cpu = op->u.cpu_ol.cpuid; |
628 | 0 |
|
629 | 0 | ret = xsm_resource_plug_core(XSM_HOOK); |
630 | 0 | if ( ret ) |
631 | 0 | break; |
632 | 0 |
|
633 | 0 | if ( cpu >= nr_cpu_ids || !cpu_present(cpu) || |
634 | 0 | clocksource_is_tsc() ) |
635 | 0 | { |
636 | 0 | ret = -EINVAL; |
637 | 0 | break; |
638 | 0 | } |
639 | 0 |
|
640 | 0 | if ( cpu_online(cpu) ) |
641 | 0 | { |
642 | 0 | ret = 0; |
643 | 0 | break; |
644 | 0 | } |
645 | 0 |
|
646 | 0 | ret = continue_hypercall_on_cpu( |
647 | 0 | 0, cpu_up_helper, (void *)(unsigned long)cpu); |
648 | 0 | break; |
649 | 0 | } |
650 | 0 |
|
651 | 0 | case XENPF_cpu_offline: |
652 | 0 | { |
653 | 0 | int cpu = op->u.cpu_ol.cpuid; |
654 | 0 |
|
655 | 0 | ret = xsm_resource_unplug_core(XSM_HOOK); |
656 | 0 | if ( ret ) |
657 | 0 | break; |
658 | 0 |
|
659 | 0 | if ( cpu == 0 ) |
660 | 0 | { |
661 | 0 | ret = -EOPNOTSUPP; |
662 | 0 | break; |
663 | 0 | } |
664 | 0 |
|
665 | 0 | if ( cpu >= nr_cpu_ids || !cpu_present(cpu) ) |
666 | 0 | { |
667 | 0 | ret = -EINVAL; |
668 | 0 | break; |
669 | 0 | } |
670 | 0 |
|
671 | 0 | if ( !cpu_online(cpu) ) |
672 | 0 | { |
673 | 0 | ret = 0; |
674 | 0 | break; |
675 | 0 | } |
676 | 0 |
|
677 | 0 | ret = continue_hypercall_on_cpu( |
678 | 0 | 0, cpu_down_helper, (void *)(unsigned long)cpu); |
679 | 0 | break; |
680 | 0 | } |
681 | 0 | break; |
682 | 0 |
|
683 | 0 | case XENPF_cpu_hotadd: |
684 | 0 | ret = xsm_resource_plug_core(XSM_HOOK); |
685 | 0 | if ( ret ) |
686 | 0 | break; |
687 | 0 |
|
688 | 0 | ret = cpu_add(op->u.cpu_add.apic_id, |
689 | 0 | op->u.cpu_add.acpi_id, |
690 | 0 | op->u.cpu_add.pxm); |
691 | 0 | break; |
692 | 0 |
|
693 | 0 | case XENPF_mem_hotadd: |
694 | 0 | ret = xsm_resource_plug_core(XSM_HOOK); |
695 | 0 | if ( ret ) |
696 | 0 | break; |
697 | 0 |
|
698 | 0 | ret = memory_add(op->u.mem_add.spfn, |
699 | 0 | op->u.mem_add.epfn, |
700 | 0 | op->u.mem_add.pxm); |
701 | 0 | break; |
702 | 0 |
|
703 | 0 | case XENPF_core_parking: |
704 | 0 | { |
705 | 0 | uint32_t idle_nums; |
706 | 0 |
|
707 | 0 | switch(op->u.core_parking.type) |
708 | 0 | { |
709 | 0 | case XEN_CORE_PARKING_SET: |
710 | 0 | idle_nums = min_t(uint32_t, |
711 | 0 | op->u.core_parking.idle_nums, num_present_cpus() - 1); |
712 | 0 | ret = continue_hypercall_on_cpu( |
713 | 0 | 0, core_parking_helper, (void *)(unsigned long)idle_nums); |
714 | 0 | break; |
715 | 0 |
|
716 | 0 | case XEN_CORE_PARKING_GET: |
717 | 0 | op->u.core_parking.idle_nums = get_cur_idle_nums(); |
718 | 0 | ret = __copy_field_to_guest(u_xenpf_op, op, u.core_parking) ? |
719 | 0 | -EFAULT : 0; |
720 | 0 | break; |
721 | 0 |
|
722 | 0 | default: |
723 | 0 | ret = -EINVAL; |
724 | 0 | break; |
725 | 0 | } |
726 | 0 | } |
727 | 0 | break; |
728 | 0 |
|
729 | 0 | case XENPF_resource_op: |
730 | 0 | { |
731 | 0 | struct resource_access ra; |
732 | 0 | unsigned int cpu; |
733 | 0 | XEN_GUEST_HANDLE(xenpf_resource_entry_t) guest_entries; |
734 | 0 |
|
735 | 0 | ra.nr_entries = op->u.resource_op.nr_entries; |
736 | 0 | if ( ra.nr_entries == 0 ) |
737 | 0 | break; |
738 | 0 | if ( ra.nr_entries > RESOURCE_ACCESS_MAX_ENTRIES ) |
739 | 0 | { |
740 | 0 | ret = -EINVAL; |
741 | 0 | break; |
742 | 0 | } |
743 | 0 |
|
744 | 0 | ra.entries = xmalloc_array(xenpf_resource_entry_t, ra.nr_entries); |
745 | 0 | if ( !ra.entries ) |
746 | 0 | { |
747 | 0 | ret = -ENOMEM; |
748 | 0 | break; |
749 | 0 | } |
750 | 0 |
|
751 | 0 | guest_from_compat_handle(guest_entries, op->u.resource_op.entries); |
752 | 0 |
|
753 | 0 | if ( copy_from_guest(ra.entries, guest_entries, ra.nr_entries) ) |
754 | 0 | { |
755 | 0 | xfree(ra.entries); |
756 | 0 | ret = -EFAULT; |
757 | 0 | break; |
758 | 0 | } |
759 | 0 |
|
760 | 0 | /* Do sanity check earlier to omit the potential IPI overhead. */ |
761 | 0 | check_resource_access(&ra); |
762 | 0 | if ( ra.nr_done == 0 ) |
763 | 0 | { |
764 | 0 | /* Copy the return value for entry 0 if it failed. */ |
765 | 0 | if ( __copy_to_guest(guest_entries, ra.entries, 1) ) |
766 | 0 | ret = -EFAULT; |
767 | 0 |
|
768 | 0 | xfree(ra.entries); |
769 | 0 | break; |
770 | 0 | } |
771 | 0 |
|
772 | 0 | cpu = op->u.resource_op.cpu; |
773 | 0 | if ( (cpu >= nr_cpu_ids) || !cpu_online(cpu) ) |
774 | 0 | { |
775 | 0 | xfree(ra.entries); |
776 | 0 | ret = -ENODEV; |
777 | 0 | break; |
778 | 0 | } |
779 | 0 | if ( cpu == smp_processor_id() ) |
780 | 0 | resource_access(&ra); |
781 | 0 | else |
782 | 0 | on_selected_cpus(cpumask_of(cpu), resource_access, &ra, 1); |
783 | 0 |
|
784 | 0 | /* Copy all if succeeded or up to the failed entry. */ |
785 | 0 | if ( __copy_to_guest(guest_entries, ra.entries, |
786 | 0 | ra.nr_done < ra.nr_entries ? ra.nr_done + 1 |
787 | 0 | : ra.nr_entries) ) |
788 | 0 | ret = -EFAULT; |
789 | 0 | else |
790 | 0 | ret = ra.nr_done; |
791 | 0 |
|
792 | 0 | xfree(ra.entries); |
793 | 0 | } |
794 | 0 | break; |
795 | 0 |
|
796 | 0 | case XENPF_get_symbol: |
797 | 0 | { |
798 | 0 | static char name[KSYM_NAME_LEN + 1]; /* protected by xenpf_lock */ |
799 | 0 | XEN_GUEST_HANDLE(char) nameh; |
800 | 0 | uint32_t namelen, copylen; |
801 | 0 | unsigned long addr; |
802 | 0 |
|
803 | 0 | guest_from_compat_handle(nameh, op->u.symdata.name); |
804 | 0 |
|
805 | 0 | ret = xensyms_read(&op->u.symdata.symnum, &op->u.symdata.type, |
806 | 0 | &addr, name); |
807 | 0 | op->u.symdata.address = addr; |
808 | 0 | namelen = strlen(name) + 1; |
809 | 0 |
|
810 | 0 | if ( namelen > op->u.symdata.namelen ) |
811 | 0 | copylen = op->u.symdata.namelen; |
812 | 0 | else |
813 | 0 | copylen = namelen; |
814 | 0 |
|
815 | 0 | op->u.symdata.namelen = namelen; |
816 | 0 |
|
817 | 0 | if ( !ret && copy_to_guest(nameh, name, copylen) ) |
818 | 0 | ret = -EFAULT; |
819 | 0 | if ( !ret && __copy_field_to_guest(u_xenpf_op, op, u.symdata) ) |
820 | 0 | ret = -EFAULT; |
821 | 0 | } |
822 | 0 | break; |
823 | 0 |
|
824 | 0 | default: |
825 | 0 | ret = -ENOSYS; |
826 | 0 | break; |
827 | 0 | } |
828 | 0 |
|
829 | 0 | out: |
830 | 0 | spin_unlock(&xenpf_lock); |
831 | 0 |
|
832 | 0 | return ret; |
833 | 0 | } Unexecuted instantiation: do_platform_op Unexecuted instantiation: compat_platform_op |
834 | | |
835 | | /* |
836 | | * Local variables: |
837 | | * mode: C |
838 | | * c-file-style: "BSD" |
839 | | * c-basic-offset: 4 |
840 | | * tab-width: 4 |
841 | | * indent-tabs-mode: nil |
842 | | * End: |
843 | | */ |