/root/src/xen/xen/arch/x86/microcode.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Intel CPU Microcode Update Driver for Linux |
3 | | * |
4 | | * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk> |
5 | | * 2006 Shaohua Li <shaohua.li@intel.com> * |
6 | | * This driver allows to upgrade microcode on Intel processors |
7 | | * belonging to IA-32 family - PentiumPro, Pentium II, |
8 | | * Pentium III, Xeon, Pentium 4, etc. |
9 | | * |
10 | | * Reference: Section 8.11 of Volume 3a, IA-32 Intel? Architecture |
11 | | * Software Developer's Manual |
12 | | * Order Number 253668 or free download from: |
13 | | * |
14 | | * http://developer.intel.com/design/pentium4/manuals/253668.htm |
15 | | * |
16 | | * For more information, go to http://www.urbanmyth.org/microcode |
17 | | * |
18 | | * This program is free software; you can redistribute it and/or |
19 | | * modify it under the terms of the GNU General Public License |
20 | | * as published by the Free Software Foundation; either version |
21 | | * 2 of the License, or (at your option) any later version. |
22 | | */ |
23 | | |
24 | | #include <xen/cpu.h> |
25 | | #include <xen/lib.h> |
26 | | #include <xen/kernel.h> |
27 | | #include <xen/init.h> |
28 | | #include <xen/notifier.h> |
29 | | #include <xen/sched.h> |
30 | | #include <xen/smp.h> |
31 | | #include <xen/softirq.h> |
32 | | #include <xen/spinlock.h> |
33 | | #include <xen/tasklet.h> |
34 | | #include <xen/guest_access.h> |
35 | | #include <xen/earlycpio.h> |
36 | | |
37 | | #include <asm/msr.h> |
38 | | #include <asm/processor.h> |
39 | | #include <asm/setup.h> |
40 | | #include <asm/microcode.h> |
41 | | |
42 | | static module_t __initdata ucode_mod; |
43 | | static void *(*__initdata ucode_mod_map)(const module_t *); |
44 | | static signed int __initdata ucode_mod_idx; |
45 | | static bool_t __initdata ucode_mod_forced; |
46 | | |
47 | | /* |
48 | | * If we scan the initramfs.cpio for the early microcode code |
49 | | * and find it, then 'ucode_blob' will contain the pointer |
50 | | * and the size of said blob. It is allocated from Xen's heap |
51 | | * memory. |
52 | | */ |
53 | | struct ucode_mod_blob { |
54 | | void *data; |
55 | | size_t size; |
56 | | }; |
57 | | |
58 | | static struct ucode_mod_blob __initdata ucode_blob; |
59 | | /* |
60 | | * By default we will NOT parse the multiboot modules to see if there is |
61 | | * cpio image with the microcode images. |
62 | | */ |
63 | | static bool_t __initdata ucode_scan; |
64 | | |
65 | | void __init microcode_set_module(unsigned int idx) |
66 | 0 | { |
67 | 0 | ucode_mod_idx = idx; |
68 | 0 | ucode_mod_forced = 1; |
69 | 0 | } |
70 | | |
71 | | /* |
72 | | * The format is '[<integer>|scan]'. Both options are optional. |
73 | | * If the EFI has forced which of the multiboot payloads is to be used, |
74 | | * no parsing will be attempted. |
75 | | */ |
76 | | static int __init parse_ucode(const char *s) |
77 | 0 | { |
78 | 0 | const char *q = NULL; |
79 | 0 |
|
80 | 0 | if ( ucode_mod_forced ) /* Forced by EFI */ |
81 | 0 | return 0; |
82 | 0 |
|
83 | 0 | if ( !strncmp(s, "scan", 4) ) |
84 | 0 | ucode_scan = 1; |
85 | 0 | else |
86 | 0 | ucode_mod_idx = simple_strtol(s, &q, 0); |
87 | 0 |
|
88 | 0 | return (q && *q) ? -EINVAL : 0; |
89 | 0 | } |
90 | | custom_param("ucode", parse_ucode); |
91 | | |
92 | | /* |
93 | | * 8MB ought to be enough. |
94 | | */ |
95 | 0 | #define MAX_EARLY_CPIO_MICROCODE (8 << 20) |
96 | | |
97 | | void __init microcode_scan_module( |
98 | | unsigned long *module_map, |
99 | | const multiboot_info_t *mbi, |
100 | | void *(*bootmap)(const module_t *)) |
101 | 0 | { |
102 | 0 | module_t *mod = (module_t *)__va(mbi->mods_addr); |
103 | 0 | uint64_t *_blob_start; |
104 | 0 | unsigned long _blob_size; |
105 | 0 | struct cpio_data cd; |
106 | 0 | long offset; |
107 | 0 | const char *p = NULL; |
108 | 0 | int i; |
109 | 0 |
|
110 | 0 | ucode_blob.size = 0; |
111 | 0 | if ( !ucode_scan ) |
112 | 0 | return; |
113 | 0 |
|
114 | 0 | if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) |
115 | 0 | p = "kernel/x86/microcode/AuthenticAMD.bin"; |
116 | 0 | else if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) |
117 | 0 | p = "kernel/x86/microcode/GenuineIntel.bin"; |
118 | 0 | else |
119 | 0 | return; |
120 | 0 |
|
121 | 0 | /* |
122 | 0 | * Try all modules and see whichever could be the microcode blob. |
123 | 0 | */ |
124 | 0 | for ( i = 1 /* Ignore dom0 kernel */; i < mbi->mods_count; i++ ) |
125 | 0 | { |
126 | 0 | if ( !test_bit(i, module_map) ) |
127 | 0 | continue; |
128 | 0 |
|
129 | 0 | _blob_start = bootmap(&mod[i]); |
130 | 0 | _blob_size = mod[i].mod_end; |
131 | 0 | if ( !_blob_start ) |
132 | 0 | { |
133 | 0 | printk("Could not map multiboot module #%d (size: %ld)\n", |
134 | 0 | i, _blob_size); |
135 | 0 | continue; |
136 | 0 | } |
137 | 0 | cd.data = NULL; |
138 | 0 | cd.size = 0; |
139 | 0 | cd = find_cpio_data(p, _blob_start, _blob_size, &offset /* ignore */); |
140 | 0 | if ( cd.data ) |
141 | 0 | { |
142 | 0 | /* |
143 | 0 | * This is an arbitrary check - it would be sad if the blob |
144 | 0 | * consumed most of the memory and did not allow guests |
145 | 0 | * to launch. |
146 | 0 | */ |
147 | 0 | if ( cd.size > MAX_EARLY_CPIO_MICROCODE ) |
148 | 0 | { |
149 | 0 | printk("Multiboot %d microcode payload too big! (%ld, we can do %d)\n", |
150 | 0 | i, cd.size, MAX_EARLY_CPIO_MICROCODE); |
151 | 0 | goto err; |
152 | 0 | } |
153 | 0 | ucode_blob.size = cd.size; |
154 | 0 | ucode_blob.data = xmalloc_bytes(cd.size); |
155 | 0 | if ( !ucode_blob.data ) |
156 | 0 | cd.data = NULL; |
157 | 0 | else |
158 | 0 | memcpy(ucode_blob.data, cd.data, cd.size); |
159 | 0 | } |
160 | 0 | bootmap(NULL); |
161 | 0 | if ( cd.data ) |
162 | 0 | break; |
163 | 0 | } |
164 | 0 | return; |
165 | 0 | err: |
166 | 0 | bootmap(NULL); |
167 | 0 | } |
168 | | void __init microcode_grab_module( |
169 | | unsigned long *module_map, |
170 | | const multiboot_info_t *mbi, |
171 | | void *(*map)(const module_t *)) |
172 | 1 | { |
173 | 1 | module_t *mod = (module_t *)__va(mbi->mods_addr); |
174 | 1 | |
175 | 1 | if ( ucode_mod_idx < 0 ) |
176 | 0 | ucode_mod_idx += mbi->mods_count; |
177 | 1 | if ( ucode_mod_idx <= 0 || ucode_mod_idx >= mbi->mods_count || |
178 | 0 | !__test_and_clear_bit(ucode_mod_idx, module_map) ) |
179 | 1 | goto scan; |
180 | 0 | ucode_mod = mod[ucode_mod_idx]; |
181 | 0 | ucode_mod_map = map; |
182 | 1 | scan: |
183 | 1 | if ( ucode_scan ) |
184 | 0 | microcode_scan_module(module_map, mbi, map); |
185 | 1 | } |
186 | | |
187 | | const struct microcode_ops *microcode_ops; |
188 | | |
189 | | static DEFINE_SPINLOCK(microcode_mutex); |
190 | | |
191 | | DEFINE_PER_CPU(struct ucode_cpu_info, ucode_cpu_info); |
192 | | |
193 | | struct microcode_info { |
194 | | unsigned int cpu; |
195 | | uint32_t buffer_size; |
196 | | int error; |
197 | | char buffer[1]; |
198 | | }; |
199 | | |
200 | | static void __microcode_fini_cpu(unsigned int cpu) |
201 | 0 | { |
202 | 0 | struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu); |
203 | 0 |
|
204 | 0 | xfree(uci->mc.mc_valid); |
205 | 0 | memset(uci, 0, sizeof(*uci)); |
206 | 0 | } |
207 | | |
208 | | static void microcode_fini_cpu(unsigned int cpu) |
209 | 0 | { |
210 | 0 | spin_lock(µcode_mutex); |
211 | 0 | __microcode_fini_cpu(cpu); |
212 | 0 | spin_unlock(µcode_mutex); |
213 | 0 | } |
214 | | |
215 | | int microcode_resume_cpu(unsigned int cpu) |
216 | 0 | { |
217 | 0 | int err; |
218 | 0 | struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu); |
219 | 0 | struct cpu_signature nsig; |
220 | 0 | unsigned int cpu2; |
221 | 0 |
|
222 | 0 | if ( !microcode_ops ) |
223 | 0 | return 0; |
224 | 0 |
|
225 | 0 | spin_lock(µcode_mutex); |
226 | 0 |
|
227 | 0 | err = microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); |
228 | 0 | if ( err ) |
229 | 0 | { |
230 | 0 | __microcode_fini_cpu(cpu); |
231 | 0 | spin_unlock(µcode_mutex); |
232 | 0 | return err; |
233 | 0 | } |
234 | 0 |
|
235 | 0 | if ( uci->mc.mc_valid ) |
236 | 0 | { |
237 | 0 | err = microcode_ops->microcode_resume_match(cpu, uci->mc.mc_valid); |
238 | 0 | if ( err >= 0 ) |
239 | 0 | { |
240 | 0 | if ( err ) |
241 | 0 | err = microcode_ops->apply_microcode(cpu); |
242 | 0 | spin_unlock(µcode_mutex); |
243 | 0 | return err; |
244 | 0 | } |
245 | 0 | } |
246 | 0 |
|
247 | 0 | nsig = uci->cpu_sig; |
248 | 0 | __microcode_fini_cpu(cpu); |
249 | 0 | uci->cpu_sig = nsig; |
250 | 0 |
|
251 | 0 | err = -EIO; |
252 | 0 | for_each_online_cpu ( cpu2 ) |
253 | 0 | { |
254 | 0 | uci = &per_cpu(ucode_cpu_info, cpu2); |
255 | 0 | if ( uci->mc.mc_valid && |
256 | 0 | microcode_ops->microcode_resume_match(cpu, uci->mc.mc_valid) > 0 ) |
257 | 0 | { |
258 | 0 | err = microcode_ops->apply_microcode(cpu); |
259 | 0 | break; |
260 | 0 | } |
261 | 0 | } |
262 | 0 |
|
263 | 0 | __microcode_fini_cpu(cpu); |
264 | 0 | spin_unlock(µcode_mutex); |
265 | 0 |
|
266 | 0 | return err; |
267 | 0 | } |
268 | | |
269 | | static int microcode_update_cpu(const void *buf, size_t size) |
270 | 0 | { |
271 | 0 | int err; |
272 | 0 | unsigned int cpu = smp_processor_id(); |
273 | 0 | struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu); |
274 | 0 |
|
275 | 0 | spin_lock(µcode_mutex); |
276 | 0 |
|
277 | 0 | err = microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); |
278 | 0 | if ( likely(!err) ) |
279 | 0 | err = microcode_ops->cpu_request_microcode(cpu, buf, size); |
280 | 0 | else |
281 | 0 | __microcode_fini_cpu(cpu); |
282 | 0 |
|
283 | 0 | spin_unlock(µcode_mutex); |
284 | 0 |
|
285 | 0 | return err; |
286 | 0 | } |
287 | | |
288 | | static long do_microcode_update(void *_info) |
289 | 0 | { |
290 | 0 | struct microcode_info *info = _info; |
291 | 0 | int error; |
292 | 0 |
|
293 | 0 | BUG_ON(info->cpu != smp_processor_id()); |
294 | 0 |
|
295 | 0 | error = microcode_update_cpu(info->buffer, info->buffer_size); |
296 | 0 | if ( error ) |
297 | 0 | info->error = error; |
298 | 0 |
|
299 | 0 | info->cpu = cpumask_next(info->cpu, &cpu_online_map); |
300 | 0 | if ( info->cpu < nr_cpu_ids ) |
301 | 0 | return continue_hypercall_on_cpu(info->cpu, do_microcode_update, info); |
302 | 0 |
|
303 | 0 | error = info->error; |
304 | 0 | xfree(info); |
305 | 0 | return error; |
306 | 0 | } |
307 | | |
308 | | int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void) buf, unsigned long len) |
309 | 0 | { |
310 | 0 | int ret; |
311 | 0 | struct microcode_info *info; |
312 | 0 |
|
313 | 0 | if ( len != (uint32_t)len ) |
314 | 0 | return -E2BIG; |
315 | 0 |
|
316 | 0 | if ( microcode_ops == NULL ) |
317 | 0 | return -EINVAL; |
318 | 0 |
|
319 | 0 | info = xmalloc_bytes(sizeof(*info) + len); |
320 | 0 | if ( info == NULL ) |
321 | 0 | return -ENOMEM; |
322 | 0 |
|
323 | 0 | ret = copy_from_guest(info->buffer, buf, len); |
324 | 0 | if ( ret != 0 ) |
325 | 0 | { |
326 | 0 | xfree(info); |
327 | 0 | return ret; |
328 | 0 | } |
329 | 0 |
|
330 | 0 | info->buffer_size = len; |
331 | 0 | info->error = 0; |
332 | 0 | info->cpu = cpumask_first(&cpu_online_map); |
333 | 0 |
|
334 | 0 | if ( microcode_ops->start_update ) |
335 | 0 | { |
336 | 0 | ret = microcode_ops->start_update(); |
337 | 0 | if ( ret != 0 ) |
338 | 0 | { |
339 | 0 | xfree(info); |
340 | 0 | return ret; |
341 | 0 | } |
342 | 0 | } |
343 | 0 |
|
344 | 0 | return continue_hypercall_on_cpu(info->cpu, do_microcode_update, info); |
345 | 0 | } |
346 | | |
347 | | static int __init microcode_init(void) |
348 | 1 | { |
349 | 1 | /* |
350 | 1 | * At this point, all CPUs should have updated their microcode |
351 | 1 | * via the early_microcode_* paths so free the microcode blob. |
352 | 1 | */ |
353 | 1 | if ( ucode_blob.size ) |
354 | 0 | { |
355 | 0 | xfree(ucode_blob.data); |
356 | 0 | ucode_blob.size = 0; |
357 | 0 | ucode_blob.data = NULL; |
358 | 0 | } |
359 | 1 | else if ( ucode_mod.mod_end ) |
360 | 0 | { |
361 | 0 | ucode_mod_map(NULL); |
362 | 0 | ucode_mod.mod_end = 0; |
363 | 0 | } |
364 | 1 | |
365 | 1 | return 0; |
366 | 1 | } |
367 | | __initcall(microcode_init); |
368 | | |
369 | | static int microcode_percpu_callback( |
370 | | struct notifier_block *nfb, unsigned long action, void *hcpu) |
371 | 33 | { |
372 | 33 | unsigned int cpu = (unsigned long)hcpu; |
373 | 33 | |
374 | 33 | switch ( action ) |
375 | 33 | { |
376 | 0 | case CPU_DEAD: |
377 | 0 | microcode_fini_cpu(cpu); |
378 | 0 | break; |
379 | 33 | } |
380 | 33 | |
381 | 33 | return NOTIFY_DONE; |
382 | 33 | } |
383 | | |
384 | | static struct notifier_block microcode_percpu_nfb = { |
385 | | .notifier_call = microcode_percpu_callback, |
386 | | }; |
387 | | |
388 | | int __init early_microcode_update_cpu(bool start_update) |
389 | 11 | { |
390 | 11 | int rc = 0; |
391 | 11 | void *data = NULL; |
392 | 11 | size_t len; |
393 | 11 | |
394 | 11 | if ( ucode_blob.size ) |
395 | 0 | { |
396 | 0 | len = ucode_blob.size; |
397 | 0 | data = ucode_blob.data; |
398 | 0 | } |
399 | 11 | else if ( ucode_mod.mod_end ) |
400 | 0 | { |
401 | 0 | len = ucode_mod.mod_end; |
402 | 0 | data = ucode_mod_map(&ucode_mod); |
403 | 0 | } |
404 | 11 | if ( data ) |
405 | 0 | { |
406 | 0 | if ( start_update && microcode_ops->start_update ) |
407 | 0 | rc = microcode_ops->start_update(); |
408 | 0 |
|
409 | 0 | if ( rc ) |
410 | 0 | return rc; |
411 | 0 |
|
412 | 0 | return microcode_update_cpu(data, len); |
413 | 0 | } |
414 | 11 | else |
415 | 11 | return -ENOMEM; |
416 | 11 | } |
417 | | |
418 | | int __init early_microcode_init(void) |
419 | 1 | { |
420 | 1 | int rc; |
421 | 1 | |
422 | 1 | rc = microcode_init_intel(); |
423 | 1 | if ( rc ) |
424 | 0 | return rc; |
425 | 1 | |
426 | 1 | rc = microcode_init_amd(); |
427 | 1 | if ( rc ) |
428 | 0 | return rc; |
429 | 1 | |
430 | 1 | if ( microcode_ops ) |
431 | 1 | { |
432 | 1 | if ( ucode_mod.mod_end || ucode_blob.size ) |
433 | 0 | rc = early_microcode_update_cpu(true); |
434 | 1 | |
435 | 1 | register_cpu_notifier(µcode_percpu_nfb); |
436 | 1 | } |
437 | 1 | |
438 | 1 | return rc; |
439 | 1 | } |