/root/src/xen/xen/arch/x86/cpu/mtrr/generic.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong |
2 | | because MTRRs can span upto 40 bits (36bits on most modern x86) */ |
3 | | #include <xen/lib.h> |
4 | | #include <xen/init.h> |
5 | | #include <xen/mm.h> |
6 | | #include <xen/stdbool.h> |
7 | | #include <asm/flushtlb.h> |
8 | | #include <asm/io.h> |
9 | | #include <asm/mtrr.h> |
10 | | #include <asm/msr.h> |
11 | | #include <asm/system.h> |
12 | | #include <asm/cpufeature.h> |
13 | | #include "mtrr.h" |
14 | | |
15 | | static const struct fixed_range_block { |
16 | | uint32_t base_msr; /* start address of an MTRR block */ |
17 | | unsigned int ranges; /* number of MTRRs in this block */ |
18 | | } fixed_range_blocks[] = { |
19 | | { MSR_MTRRfix64K_00000, (0x80000 - 0x00000) >> (16 + 3) }, |
20 | | { MSR_MTRRfix16K_80000, (0xC0000 - 0x80000) >> (14 + 3) }, |
21 | | { MSR_MTRRfix4K_C0000, (0x100000 - 0xC0000) >> (12 + 3) }, |
22 | | {} |
23 | | }; |
24 | | |
25 | | static unsigned long smp_changes_mask; |
26 | | struct mtrr_state mtrr_state = {}; |
27 | | |
28 | | /* Get the MSR pair relating to a var range */ |
29 | | static void |
30 | | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) |
31 | 10 | { |
32 | 10 | rdmsrl(MSR_IA32_MTRR_PHYSBASE(index), vr->base); |
33 | 10 | rdmsrl(MSR_IA32_MTRR_PHYSMASK(index), vr->mask); |
34 | 10 | } |
35 | | |
36 | | static void |
37 | | get_fixed_ranges(mtrr_type * frs) |
38 | 13 | { |
39 | 13 | uint64_t *p = (uint64_t *) frs; |
40 | 13 | const struct fixed_range_block *block; |
41 | 13 | |
42 | 13 | if (!mtrr_state.have_fixed) |
43 | 0 | return; |
44 | 13 | |
45 | 52 | for (block = fixed_range_blocks; block->ranges; ++block) { |
46 | 39 | unsigned int i; |
47 | 39 | |
48 | 182 | for (i = 0; i < block->ranges; ++i, ++p) |
49 | 143 | rdmsrl(block->base_msr + i, *p); |
50 | 39 | } |
51 | 13 | } |
52 | | |
53 | | void mtrr_save_fixed_ranges(void *info) |
54 | 12 | { |
55 | 12 | get_fixed_ranges(mtrr_state.fixed_ranges); |
56 | 12 | } |
57 | | |
58 | | /* Grab all of the MTRR state for this CPU into *state */ |
59 | | void __init get_mtrr_state(void) |
60 | 1 | { |
61 | 1 | unsigned int i; |
62 | 1 | struct mtrr_var_range *vrs; |
63 | 1 | uint64_t msr_content; |
64 | 1 | |
65 | 1 | if (!mtrr_state.var_ranges) { |
66 | 1 | mtrr_state.var_ranges = xmalloc_array(struct mtrr_var_range, |
67 | 1 | num_var_ranges); |
68 | 1 | if (!mtrr_state.var_ranges) |
69 | 0 | return; |
70 | 1 | } |
71 | 1 | vrs = mtrr_state.var_ranges; |
72 | 1 | |
73 | 1 | rdmsrl(MSR_MTRRcap, msr_content); |
74 | 1 | mtrr_state.have_fixed = (msr_content >> 8) & 1; |
75 | 1 | |
76 | 11 | for (i = 0; i < num_var_ranges; i++) |
77 | 10 | get_mtrr_var_range(i, &vrs[i]); |
78 | 1 | get_fixed_ranges(mtrr_state.fixed_ranges); |
79 | 1 | |
80 | 1 | rdmsrl(MSR_MTRRdefType, msr_content); |
81 | 1 | mtrr_state.def_type = (msr_content & 0xff); |
82 | 1 | mtrr_state.enabled = (msr_content & 0xc00) >> 10; |
83 | 1 | |
84 | 1 | /* Store mtrr_cap for HVM MTRR virtualisation. */ |
85 | 1 | rdmsrl(MSR_MTRRcap, mtrr_state.mtrr_cap); |
86 | 1 | } |
87 | | |
88 | | static bool_t __initdata mtrr_show; |
89 | | boolean_param("mtrr.show", mtrr_show); |
90 | | |
91 | | static const char *__init mtrr_attrib_to_str(mtrr_type x) |
92 | 0 | { |
93 | 0 | static const char __initconst strings[MTRR_NUM_TYPES][16] = |
94 | 0 | { |
95 | 0 | [MTRR_TYPE_UNCACHABLE] = "uncachable", |
96 | 0 | [MTRR_TYPE_WRCOMB] = "write-combining", |
97 | 0 | [MTRR_TYPE_WRTHROUGH] = "write-through", |
98 | 0 | [MTRR_TYPE_WRPROT] = "write-protect", |
99 | 0 | [MTRR_TYPE_WRBACK] = "write-back", |
100 | 0 | }; |
101 | 0 |
|
102 | 0 | return (x < ARRAY_SIZE(strings) && strings[x][0]) ? strings[x] : "?"; |
103 | 0 | } |
104 | | |
105 | | static unsigned int __initdata last_fixed_start; |
106 | | static unsigned int __initdata last_fixed_end; |
107 | | static mtrr_type __initdata last_fixed_type; |
108 | | |
109 | | static void __init print_fixed_last(const char *level) |
110 | 0 | { |
111 | 0 | if (!last_fixed_end) |
112 | 0 | return; |
113 | 0 |
|
114 | 0 | printk("%s %05x-%05x %s\n", level, last_fixed_start, |
115 | 0 | last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); |
116 | 0 |
|
117 | 0 | last_fixed_end = 0; |
118 | 0 | } |
119 | | |
120 | | static void __init update_fixed_last(unsigned int base, unsigned int end, |
121 | | mtrr_type type) |
122 | 0 | { |
123 | 0 | last_fixed_start = base; |
124 | 0 | last_fixed_end = end; |
125 | 0 | last_fixed_type = type; |
126 | 0 | } |
127 | | |
128 | | static void __init print_fixed(unsigned int base, unsigned int step, |
129 | | const mtrr_type *types, const char *level) |
130 | 0 | { |
131 | 0 | unsigned i; |
132 | 0 |
|
133 | 0 | for (i = 0; i < 8; ++i, ++types, base += step) { |
134 | 0 | if (last_fixed_end == 0) { |
135 | 0 | update_fixed_last(base, base + step, *types); |
136 | 0 | continue; |
137 | 0 | } |
138 | 0 | if (last_fixed_end == base && last_fixed_type == *types) { |
139 | 0 | last_fixed_end = base + step; |
140 | 0 | continue; |
141 | 0 | } |
142 | 0 | /* new segments: gap or different type */ |
143 | 0 | print_fixed_last(level); |
144 | 0 | update_fixed_last(base, base + step, *types); |
145 | 0 | } |
146 | 0 | } |
147 | | |
148 | | static void __init print_mtrr_state(const char *level) |
149 | 0 | { |
150 | 0 | unsigned int i; |
151 | 0 | int width; |
152 | 0 |
|
153 | 0 | printk("%sMTRR default type: %s\n", level, |
154 | 0 | mtrr_attrib_to_str(mtrr_state.def_type)); |
155 | 0 | if (mtrr_state.have_fixed) { |
156 | 0 | const mtrr_type *fr = mtrr_state.fixed_ranges; |
157 | 0 | const struct fixed_range_block *block = fixed_range_blocks; |
158 | 0 | unsigned int base = 0, step = 0x10000; |
159 | 0 |
|
160 | 0 | printk("%sMTRR fixed ranges %sabled:\n", level, |
161 | 0 | mtrr_state.enabled & 1 ? "en" : "dis"); |
162 | 0 | for (; block->ranges; ++block, step >>= 2) { |
163 | 0 | for (i = 0; i < block->ranges; ++i, fr += 8) { |
164 | 0 | print_fixed(base, step, fr, level); |
165 | 0 | base += 8 * step; |
166 | 0 | } |
167 | 0 | } |
168 | 0 | print_fixed_last(level); |
169 | 0 | } |
170 | 0 | printk("%sMTRR variable ranges %sabled:\n", level, |
171 | 0 | mtrr_state.enabled & 2 ? "en" : "dis"); |
172 | 0 | width = (paddr_bits - PAGE_SHIFT + 3) / 4; |
173 | 0 |
|
174 | 0 | for (i = 0; i < num_var_ranges; ++i) { |
175 | 0 | if (mtrr_state.var_ranges[i].mask & MTRR_PHYSMASK_VALID) |
176 | 0 | printk("%s %u base %0*"PRIx64"000 mask %0*"PRIx64"000 %s\n", |
177 | 0 | level, i, |
178 | 0 | width, mtrr_state.var_ranges[i].base >> 12, |
179 | 0 | width, mtrr_state.var_ranges[i].mask >> 12, |
180 | 0 | mtrr_attrib_to_str(mtrr_state.var_ranges[i].base & |
181 | 0 | MTRR_PHYSBASE_TYPE_MASK)); |
182 | 0 | else |
183 | 0 | printk("%s %u disabled\n", level, i); |
184 | 0 | } |
185 | 0 |
|
186 | 0 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD |
187 | 0 | && boot_cpu_data.x86 >= 0xf) { |
188 | 0 | uint64_t syscfg, tom2; |
189 | 0 |
|
190 | 0 | rdmsrl(MSR_K8_SYSCFG, syscfg); |
191 | 0 | if (syscfg & (1 << 21)) { |
192 | 0 | rdmsrl(MSR_K8_TOP_MEM2, tom2); |
193 | 0 | printk("%sTOM2: %012"PRIx64"%s\n", level, tom2, |
194 | 0 | syscfg & (1 << 22) ? " (WB)" : ""); |
195 | 0 | } |
196 | 0 | } |
197 | 0 | } |
198 | | |
199 | | /* Some BIOS's are fucked and don't set all MTRRs the same! */ |
200 | | void __init mtrr_state_warn(void) |
201 | 1 | { |
202 | 1 | unsigned long mask = smp_changes_mask; |
203 | 1 | |
204 | 1 | if (mtrr_show) |
205 | 0 | print_mtrr_state(mask ? KERN_WARNING : ""); |
206 | 1 | if (!mask) |
207 | 1 | return; |
208 | 0 | if (mask & MTRR_CHANGE_MASK_FIXED) |
209 | 0 | printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n"); |
210 | 0 | if (mask & MTRR_CHANGE_MASK_VARIABLE) |
211 | 0 | printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n"); |
212 | 0 | if (mask & MTRR_CHANGE_MASK_DEFTYPE) |
213 | 0 | printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n"); |
214 | 0 | printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); |
215 | 0 | printk(KERN_INFO "mtrr: corrected configuration.\n"); |
216 | 0 | if (!mtrr_show) |
217 | 0 | print_mtrr_state(KERN_INFO); |
218 | 0 | } |
219 | | |
220 | | /* Doesn't attempt to pass an error out to MTRR users |
221 | | because it's quite complicated in some cases and probably not |
222 | | worth it because the best error handling is to ignore it. */ |
223 | | static void mtrr_wrmsr(unsigned int msr, uint64_t msr_content) |
224 | 24 | { |
225 | 24 | if (wrmsr_safe(msr, msr_content) < 0) |
226 | 0 | printk(KERN_ERR |
227 | 0 | "MTRR: CPU %u: Writing MSR %x to %"PRIx64" failed\n", |
228 | 0 | smp_processor_id(), msr, msr_content); |
229 | 24 | /* Cache overlap status for efficient HVM MTRR virtualisation. */ |
230 | 24 | mtrr_state.overlapped = is_var_mtrr_overlapped(&mtrr_state); |
231 | 24 | } |
232 | | |
233 | | /** |
234 | | * Checks and updates an fixed-range MTRR if it differs from the value it |
235 | | * should have. If K8 extenstions are wanted, update the K8 SYSCFG MSR also. |
236 | | * see AMD publication no. 24593, chapter 7.8.1, page 233 for more information |
237 | | * \param msr MSR address of the MTTR which should be checked and updated |
238 | | * \param changed pointer which indicates whether the MTRR needed to be changed |
239 | | * \param msrwords pointer to the MSR values which the MSR should have |
240 | | */ |
241 | | static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) |
242 | 132 | { |
243 | 132 | uint64_t msr_content, val; |
244 | 132 | |
245 | 132 | rdmsrl(msr, msr_content); |
246 | 132 | val = ((uint64_t)msrwords[1] << 32) | msrwords[0]; |
247 | 132 | |
248 | 132 | if (msr_content != val) { |
249 | 0 | mtrr_wrmsr(msr, val); |
250 | 0 | *changed = true; |
251 | 0 | } |
252 | 132 | } |
253 | | |
254 | | int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) |
255 | | /* [SUMMARY] Get a free MTRR. |
256 | | <base> The starting (base) address of the region. |
257 | | <size> The size (in bytes) of the region. |
258 | | [RETURNS] The index of the region on success, else -1 on error. |
259 | | */ |
260 | 0 | { |
261 | 0 | int i, max; |
262 | 0 | mtrr_type ltype; |
263 | 0 | unsigned long lbase, lsize; |
264 | 0 |
|
265 | 0 | max = num_var_ranges; |
266 | 0 | if (replace_reg >= 0 && replace_reg < max) |
267 | 0 | return replace_reg; |
268 | 0 | for (i = 0; i < max; ++i) { |
269 | 0 | mtrr_if->get(i, &lbase, &lsize, <ype); |
270 | 0 | if (lsize == 0) |
271 | 0 | return i; |
272 | 0 | } |
273 | 0 | return -ENOSPC; |
274 | 0 | } |
275 | | |
276 | | static void generic_get_mtrr(unsigned int reg, unsigned long *base, |
277 | | unsigned long *size, mtrr_type *type) |
278 | 0 | { |
279 | 0 | uint64_t _mask, _base; |
280 | 0 |
|
281 | 0 | rdmsrl(MSR_IA32_MTRR_PHYSMASK(reg), _mask); |
282 | 0 | if (!(_mask & MTRR_PHYSMASK_VALID)) { |
283 | 0 | /* Invalid (i.e. free) range */ |
284 | 0 | *base = 0; |
285 | 0 | *size = 0; |
286 | 0 | *type = 0; |
287 | 0 | return; |
288 | 0 | } |
289 | 0 |
|
290 | 0 | rdmsrl(MSR_IA32_MTRR_PHYSBASE(reg), _base); |
291 | 0 |
|
292 | 0 | /* Work out the shifted address mask. */ |
293 | 0 | _mask = size_or_mask | (_mask >> PAGE_SHIFT); |
294 | 0 |
|
295 | 0 | /* This works correctly if size is a power of two, i.e. a |
296 | 0 | contiguous range. */ |
297 | 0 | *size = -(uint32_t)_mask; |
298 | 0 | *base = _base >> PAGE_SHIFT; |
299 | 0 | *type = _base & 0xff; |
300 | 0 | } |
301 | | |
302 | | /** |
303 | | * Checks and updates the fixed-range MTRRs if they differ from the saved set |
304 | | * \param frs pointer to fixed-range MTRR values, saved by get_fixed_ranges() |
305 | | */ |
306 | | static bool set_fixed_ranges(mtrr_type *frs) |
307 | 12 | { |
308 | 12 | unsigned long long *saved = (unsigned long long *) frs; |
309 | 12 | bool changed = false; |
310 | 12 | int block=-1, range; |
311 | 12 | |
312 | 48 | while (fixed_range_blocks[++block].ranges) |
313 | 168 | for (range=0; range < fixed_range_blocks[block].ranges; range++) |
314 | 132 | set_fixed_range(fixed_range_blocks[block].base_msr + range, |
315 | 132 | &changed, (unsigned int *) saved++); |
316 | 12 | |
317 | 12 | return changed; |
318 | 12 | } |
319 | | |
320 | | /* Set the MSR pair relating to a var range. Returns true if |
321 | | changes are made */ |
322 | | static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) |
323 | 120 | { |
324 | 120 | uint32_t lo, hi, base_lo, base_hi, mask_lo, mask_hi; |
325 | 120 | uint64_t msr_content; |
326 | 120 | bool changed = false; |
327 | 120 | |
328 | 120 | rdmsrl(MSR_IA32_MTRR_PHYSBASE(index), msr_content); |
329 | 120 | lo = (uint32_t)msr_content; |
330 | 120 | hi = (uint32_t)(msr_content >> 32); |
331 | 120 | base_lo = (uint32_t)vr->base; |
332 | 120 | base_hi = (uint32_t)(vr->base >> 32); |
333 | 120 | |
334 | 120 | lo &= 0xfffff0ffUL; |
335 | 120 | base_lo &= 0xfffff0ffUL; |
336 | 120 | hi &= size_and_mask >> (32 - PAGE_SHIFT); |
337 | 120 | base_hi &= size_and_mask >> (32 - PAGE_SHIFT); |
338 | 120 | |
339 | 120 | if ((base_lo != lo) || (base_hi != hi)) { |
340 | 0 | mtrr_wrmsr(MSR_IA32_MTRR_PHYSBASE(index), vr->base); |
341 | 0 | changed = true; |
342 | 0 | } |
343 | 120 | |
344 | 120 | rdmsrl(MSR_IA32_MTRR_PHYSMASK(index), msr_content); |
345 | 120 | lo = (uint32_t)msr_content; |
346 | 120 | hi = (uint32_t)(msr_content >> 32); |
347 | 120 | mask_lo = (uint32_t)vr->mask; |
348 | 120 | mask_hi = (uint32_t)(vr->mask >> 32); |
349 | 120 | |
350 | 120 | lo &= 0xfffff800UL; |
351 | 120 | mask_lo &= 0xfffff800UL; |
352 | 120 | hi &= size_and_mask >> (32 - PAGE_SHIFT); |
353 | 120 | mask_hi &= size_and_mask >> (32 - PAGE_SHIFT); |
354 | 120 | |
355 | 120 | if ((mask_lo != lo) || (mask_hi != hi)) { |
356 | 0 | mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(index), vr->mask); |
357 | 0 | changed = true; |
358 | 0 | } |
359 | 120 | return changed; |
360 | 120 | } |
361 | | |
362 | | static uint64_t deftype; |
363 | | |
364 | | static unsigned long set_mtrr_state(void) |
365 | | /* [SUMMARY] Set the MTRR state for this CPU. |
366 | | <state> The MTRR state information to read. |
367 | | <ctxt> Some relevant CPU context. |
368 | | [NOTE] The CPU must already be in a safe state for MTRR changes. |
369 | | [RETURNS] 0 if no changes made, else a mask indication what was changed. |
370 | | */ |
371 | 12 | { |
372 | 12 | unsigned int i; |
373 | 12 | unsigned long change_mask = 0; |
374 | 12 | |
375 | 132 | for (i = 0; i < num_var_ranges; i++) |
376 | 120 | if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) |
377 | 0 | change_mask |= MTRR_CHANGE_MASK_VARIABLE; |
378 | 12 | |
379 | 12 | if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) |
380 | 0 | change_mask |= MTRR_CHANGE_MASK_FIXED; |
381 | 12 | |
382 | 12 | /* Set_mtrr_restore restores the old value of MTRRdefType, |
383 | 12 | so to set it we fiddle with the saved value */ |
384 | 12 | if ((deftype & 0xff) != mtrr_state.def_type |
385 | 12 | || ((deftype & 0xc00) >> 10) != mtrr_state.enabled) { |
386 | 0 | deftype = (deftype & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10); |
387 | 0 | change_mask |= MTRR_CHANGE_MASK_DEFTYPE; |
388 | 0 | } |
389 | 12 | |
390 | 12 | return change_mask; |
391 | 12 | } |
392 | | |
393 | | |
394 | | static DEFINE_SPINLOCK(set_atomicity_lock); |
395 | | |
396 | | /* |
397 | | * Since we are disabling the cache don't allow any interrupts - they |
398 | | * would run extremely slow and would only increase the pain. The caller must |
399 | | * ensure that local interrupts are disabled and are reenabled after post_set() |
400 | | * has been called. |
401 | | */ |
402 | | |
403 | | static void prepare_set(void) |
404 | 3 | { |
405 | 3 | /* Note that this is not ideal, since the cache is only flushed/disabled |
406 | 3 | for this CPU while the MTRRs are changed, but changing this requires |
407 | 3 | more invasive changes to the way the kernel boots */ |
408 | 3 | |
409 | 3 | spin_lock(&set_atomicity_lock); |
410 | 3 | |
411 | 3 | /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ |
412 | 3 | write_cr0(read_cr0() | X86_CR0_CD); |
413 | 3 | wbinvd(); |
414 | 3 | |
415 | 3 | /* TLB flushing here relies on Xen always using CR4.PGE. */ |
416 | 3 | BUILD_BUG_ON(!(XEN_MINIMAL_CR4 & X86_CR4_PGE)); |
417 | 3 | write_cr4(read_cr4() & ~X86_CR4_PGE); |
418 | 3 | |
419 | 3 | /* Save MTRR state */ |
420 | 3 | rdmsrl(MSR_MTRRdefType, deftype); |
421 | 3 | |
422 | 3 | /* Disable MTRRs, and set the default type to uncached */ |
423 | 3 | mtrr_wrmsr(MSR_MTRRdefType, deftype & ~0xcff); |
424 | 3 | } |
425 | | |
426 | | static void post_set(void) |
427 | 12 | { |
428 | 12 | /* Intel (P6) standard MTRRs */ |
429 | 12 | mtrr_wrmsr(MSR_MTRRdefType, deftype); |
430 | 12 | |
431 | 12 | /* Enable caches */ |
432 | 12 | write_cr0(read_cr0() & ~X86_CR0_CD); |
433 | 12 | |
434 | 12 | /* Reenable CR4.PGE (also flushes the TLB) */ |
435 | 12 | write_cr4(read_cr4() | X86_CR4_PGE); |
436 | 12 | |
437 | 12 | spin_unlock(&set_atomicity_lock); |
438 | 12 | } |
439 | | |
440 | | static void generic_set_all(void) |
441 | 5 | { |
442 | 5 | unsigned long mask, count; |
443 | 5 | unsigned long flags; |
444 | 5 | |
445 | 5 | local_irq_save(flags); |
446 | 5 | prepare_set(); |
447 | 5 | |
448 | 5 | /* Actually set the state */ |
449 | 5 | mask = set_mtrr_state(); |
450 | 5 | |
451 | 5 | post_set(); |
452 | 5 | local_irq_restore(flags); |
453 | 5 | |
454 | 5 | /* Use the atomic bitops to update the global mask */ |
455 | 773 | for (count = 0; count < sizeof mask * 8; ++count) { |
456 | 768 | if (mask & 0x01) |
457 | 0 | set_bit(count, &smp_changes_mask); |
458 | 768 | mask >>= 1; |
459 | 768 | } |
460 | 5 | |
461 | 5 | } |
462 | | |
463 | | static void generic_set_mtrr(unsigned int reg, unsigned long base, |
464 | | unsigned long size, mtrr_type type) |
465 | | /* [SUMMARY] Set variable MTRR register on the local CPU. |
466 | | <reg> The register to set. |
467 | | <base> The base address of the region. |
468 | | <size> The size of the region. If this is 0 the region is disabled. |
469 | | <type> The type of the region. |
470 | | <do_safe> If true, do the change safely. If false, safety measures should |
471 | | be done externally. |
472 | | [RETURNS] Nothing. |
473 | | */ |
474 | 0 | { |
475 | 0 | unsigned long flags; |
476 | 0 | struct mtrr_var_range *vr; |
477 | 0 |
|
478 | 0 | vr = &mtrr_state.var_ranges[reg]; |
479 | 0 |
|
480 | 0 | local_irq_save(flags); |
481 | 0 | prepare_set(); |
482 | 0 |
|
483 | 0 | if (size == 0) { |
484 | 0 | /* The invalid bit is kept in the mask, so we simply clear the |
485 | 0 | relevant mask register to disable a range. */ |
486 | 0 | memset(vr, 0, sizeof(*vr)); |
487 | 0 | mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), 0); |
488 | 0 | } else { |
489 | 0 | uint32_t base_lo, base_hi, mask_lo, mask_hi; |
490 | 0 |
|
491 | 0 | base_lo = base << PAGE_SHIFT | type; |
492 | 0 | base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); |
493 | 0 | mask_lo = (-size << PAGE_SHIFT) | MTRR_PHYSMASK_VALID; |
494 | 0 | mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); |
495 | 0 | vr->base = ((uint64_t)base_hi << 32) | base_lo; |
496 | 0 | vr->mask = ((uint64_t)mask_hi << 32) | mask_lo; |
497 | 0 |
|
498 | 0 | mtrr_wrmsr(MSR_IA32_MTRR_PHYSBASE(reg), vr->base); |
499 | 0 | mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), vr->mask); |
500 | 0 | } |
501 | 0 |
|
502 | 0 | post_set(); |
503 | 0 | local_irq_restore(flags); |
504 | 0 | } |
505 | | |
506 | | int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) |
507 | 0 | { |
508 | 0 | unsigned long lbase, last; |
509 | 0 |
|
510 | 0 | /* For Intel PPro stepping <= 7, must be 4 MiB aligned |
511 | 0 | and not touch 0x70000000->0x7003FFFF */ |
512 | 0 | if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && |
513 | 0 | boot_cpu_data.x86_model == 1 && |
514 | 0 | boot_cpu_data.x86_mask <= 7) { |
515 | 0 | if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { |
516 | 0 | printk(KERN_WARNING "mtrr: base(%#lx000) is not 4 MiB aligned\n", base); |
517 | 0 | return -EINVAL; |
518 | 0 | } |
519 | 0 | if (!(base + size < 0x70000 || base > 0x7003F) && |
520 | 0 | (type == MTRR_TYPE_WRCOMB |
521 | 0 | || type == MTRR_TYPE_WRBACK)) { |
522 | 0 | printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); |
523 | 0 | return -EINVAL; |
524 | 0 | } |
525 | 0 | } |
526 | 0 |
|
527 | 0 | /* Check upper bits of base and last are equal and lower bits are 0 |
528 | 0 | for base and 1 for last */ |
529 | 0 | last = base + size - 1; |
530 | 0 | for (lbase = base; !(lbase & 1) && (last & 1); |
531 | 0 | lbase = lbase >> 1, last = last >> 1) ; |
532 | 0 | if (lbase != last) { |
533 | 0 | printk(KERN_WARNING "mtrr: base(%#lx000) is not aligned on a size(%#lx000) boundary\n", |
534 | 0 | base, size); |
535 | 0 | return -EINVAL; |
536 | 0 | } |
537 | 0 | return 0; |
538 | 0 | } |
539 | | |
540 | | |
541 | | static int generic_have_wrcomb(void) |
542 | 0 | { |
543 | 0 | unsigned long config; |
544 | 0 | rdmsrl(MSR_MTRRcap, config); |
545 | 0 | return (config & (1ULL << 10)); |
546 | 0 | } |
547 | | |
548 | | /* generic structure... |
549 | | */ |
550 | | const struct mtrr_ops generic_mtrr_ops = { |
551 | | .use_intel_if = true, |
552 | | .set_all = generic_set_all, |
553 | | .get = generic_get_mtrr, |
554 | | .get_free_region = generic_get_free_region, |
555 | | .set = generic_set_mtrr, |
556 | | .validate_add_page = generic_validate_add_page, |
557 | | .have_wrcomb = generic_have_wrcomb, |
558 | | }; |