debuggers.hg

view xen/arch/x86/mtrr/generic.c @ 3650:beb0887c54bc

bitkeeper revision 1.1159.238.1 (4200c8d8KsGlaM3w6o3y4GHhK1jKjg)

A typesafe allocator submitted by Rusty Russel with trivial renames by me.
Signed-off-by: Rusty Russel <rusty@rustcorp.com.au> (authored)
Signed-off-by: ian.pratt@cl.cam.ac.uk
author iap10@labyrinth.cl.cam.ac.uk
date Wed Feb 02 12:34:32 2005 +0000 (2005-02-02)
parents e17a946c7a91
children 0ef6e8e6e85d
line source
1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <xen/init.h>
4 #include <xen/slab.h>
5 #include <xen/mm.h>
6 #include <asm/io.h>
7 #include <asm/mtrr.h>
8 #include <asm/msr.h>
9 #include <asm/system.h>
10 #include <asm/cpufeature.h>
11 //#include <asm/tlbflush.h>
12 #include "mtrr.h"
14 struct mtrr_state {
15 struct mtrr_var_range *var_ranges;
16 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
17 unsigned char enabled;
18 mtrr_type def_type;
19 };
21 static unsigned long smp_changes_mask;
22 struct mtrr_state mtrr_state = {};
25 /* Get the MSR pair relating to a var range */
26 static void __init
27 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
28 {
29 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
30 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
31 }
33 static void __init
34 get_fixed_ranges(mtrr_type * frs)
35 {
36 unsigned int *p = (unsigned int *) frs;
37 int i;
39 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
41 for (i = 0; i < 2; i++)
42 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
43 for (i = 0; i < 8; i++)
44 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
45 }
47 /* Grab all of the MTRR state for this CPU into *state */
48 void __init get_mtrr_state(void)
49 {
50 unsigned int i;
51 struct mtrr_var_range *vrs;
52 unsigned lo, dummy;
54 if (!mtrr_state.var_ranges) {
55 mtrr_state.var_ranges = xmalloc_array(struct mtrr_var_range,
56 num_var_ranges);
57 if (!mtrr_state.var_ranges)
58 return;
59 }
60 vrs = mtrr_state.var_ranges;
62 for (i = 0; i < num_var_ranges; i++)
63 get_mtrr_var_range(i, &vrs[i]);
64 get_fixed_ranges(mtrr_state.fixed_ranges);
66 rdmsr(MTRRdefType_MSR, lo, dummy);
67 mtrr_state.def_type = (lo & 0xff);
68 mtrr_state.enabled = (lo & 0xc00) >> 10;
69 }
71 /* Free resources associated with a struct mtrr_state */
72 void __init finalize_mtrr_state(void)
73 {
74 if (mtrr_state.var_ranges)
75 xfree(mtrr_state.var_ranges);
76 mtrr_state.var_ranges = NULL;
77 }
79 /* Some BIOS's are fucked and don't set all MTRRs the same! */
80 void __init mtrr_state_warn(void)
81 {
82 unsigned long mask = smp_changes_mask;
84 if (!mask)
85 return;
86 if (mask & MTRR_CHANGE_MASK_FIXED)
87 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
88 if (mask & MTRR_CHANGE_MASK_VARIABLE)
89 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
90 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
91 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
92 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
93 printk(KERN_INFO "mtrr: corrected configuration.\n");
94 }
97 int generic_get_free_region(unsigned long base, unsigned long size)
98 /* [SUMMARY] Get a free MTRR.
99 <base> The starting (base) address of the region.
100 <size> The size (in bytes) of the region.
101 [RETURNS] The index of the region on success, else -1 on error.
102 */
103 {
104 int i, max;
105 mtrr_type ltype;
106 unsigned long lbase;
107 unsigned lsize;
109 max = num_var_ranges;
110 for (i = 0; i < max; ++i) {
111 mtrr_if->get(i, &lbase, &lsize, &ltype);
112 if (lsize == 0)
113 return i;
114 }
115 return -ENOSPC;
116 }
118 void generic_get_mtrr(unsigned int reg, unsigned long *base,
119 unsigned int *size, mtrr_type * type)
120 {
121 unsigned int mask_lo, mask_hi, base_lo, base_hi;
123 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
124 if ((mask_lo & 0x800) == 0) {
125 /* Invalid (i.e. free) range */
126 *base = 0;
127 *size = 0;
128 *type = 0;
129 return;
130 }
132 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
134 /* Work out the shifted address mask. */
135 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
136 | mask_lo >> PAGE_SHIFT;
138 /* This works correctly if size is a power of two, i.e. a
139 contiguous range. */
140 *size = -mask_lo;
141 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
142 *type = base_lo & 0xff;
143 }
145 static int set_fixed_ranges(mtrr_type * frs)
146 {
147 unsigned int *p = (unsigned int *) frs;
148 int changed = FALSE;
149 int i;
150 unsigned int lo, hi;
152 rdmsr(MTRRfix64K_00000_MSR, lo, hi);
153 if (p[0] != lo || p[1] != hi) {
154 wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
155 changed = TRUE;
156 }
158 for (i = 0; i < 2; i++) {
159 rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
160 if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
161 wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
162 p[3 + i * 2]);
163 changed = TRUE;
164 }
165 }
167 for (i = 0; i < 8; i++) {
168 rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
169 if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
170 wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
171 p[7 + i * 2]);
172 changed = TRUE;
173 }
174 }
175 return changed;
176 }
178 /* Set the MSR pair relating to a var range. Returns TRUE if
179 changes are made */
180 static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
181 {
182 unsigned int lo, hi;
183 int changed = FALSE;
185 rdmsr(MTRRphysBase_MSR(index), lo, hi);
186 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
187 || (vr->base_hi & 0xfUL) != (hi & 0xfUL)) {
188 wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
189 changed = TRUE;
190 }
192 rdmsr(MTRRphysMask_MSR(index), lo, hi);
194 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
195 || (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) {
196 wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
197 changed = TRUE;
198 }
199 return changed;
200 }
202 static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
203 /* [SUMMARY] Set the MTRR state for this CPU.
204 <state> The MTRR state information to read.
205 <ctxt> Some relevant CPU context.
206 [NOTE] The CPU must already be in a safe state for MTRR changes.
207 [RETURNS] 0 if no changes made, else a mask indication what was changed.
208 */
209 {
210 unsigned int i;
211 unsigned long change_mask = 0;
213 for (i = 0; i < num_var_ranges; i++)
214 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
215 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
217 if (set_fixed_ranges(mtrr_state.fixed_ranges))
218 change_mask |= MTRR_CHANGE_MASK_FIXED;
220 /* Set_mtrr_restore restores the old value of MTRRdefType,
221 so to set it we fiddle with the saved value */
222 if ((deftype_lo & 0xff) != mtrr_state.def_type
223 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
224 deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
225 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
226 }
228 return change_mask;
229 }
232 static unsigned long cr4 = 0;
233 static u32 deftype_lo, deftype_hi;
234 static spinlock_t set_atomicity_lock = SPIN_LOCK_UNLOCKED;
236 static void prepare_set(void)
237 {
238 unsigned long cr0;
240 /* Note that this is not ideal, since the cache is only flushed/disabled
241 for this CPU while the MTRRs are changed, but changing this requires
242 more invasive changes to the way the kernel boots */
243 spin_lock(&set_atomicity_lock);
245 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
246 cr0 = read_cr0() | 0x40000000; /* set CD flag */
247 wbinvd();
248 write_cr0(cr0);
249 wbinvd();
251 /* Save value of CR4 and clear Page Global Enable (bit 7) */
252 if ( cpu_has_pge ) {
253 cr4 = read_cr4();
254 write_cr4(cr4 & ~X86_CR4_PGE);
255 }
257 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
258 __flush_tlb();
260 /* Save MTRR state */
261 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
263 /* Disable MTRRs, and set the default type to uncached */
264 wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
265 }
267 static void post_set(void)
268 {
269 /* Flush caches and TLBs */
270 wbinvd();
271 __flush_tlb();
273 /* Intel (P6) standard MTRRs */
274 wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
276 /* Enable caches */
277 write_cr0(read_cr0() & 0xbfffffff);
279 /* Restore value of CR4 */
280 if ( cpu_has_pge )
281 write_cr4(cr4);
282 spin_unlock(&set_atomicity_lock);
283 }
285 static void generic_set_all(void)
286 {
287 unsigned long mask, count;
289 prepare_set();
291 /* Actually set the state */
292 mask = set_mtrr_state(deftype_lo,deftype_hi);
294 post_set();
296 /* Use the atomic bitops to update the global mask */
297 for (count = 0; count < sizeof mask * 8; ++count) {
298 if (mask & 0x01)
299 set_bit(count, &smp_changes_mask);
300 mask >>= 1;
301 }
303 }
305 static void generic_set_mtrr(unsigned int reg, unsigned long base,
306 unsigned long size, mtrr_type type)
307 /* [SUMMARY] Set variable MTRR register on the local CPU.
308 <reg> The register to set.
309 <base> The base address of the region.
310 <size> The size of the region. If this is 0 the region is disabled.
311 <type> The type of the region.
312 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
313 be done externally.
314 [RETURNS] Nothing.
315 */
316 {
317 prepare_set();
319 if (size == 0) {
320 /* The invalid bit is kept in the mask, so we simply clear the
321 relevant mask register to disable a range. */
322 wrmsr(MTRRphysMask_MSR(reg), 0, 0);
323 } else {
324 wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type,
325 (base & size_and_mask) >> (32 - PAGE_SHIFT));
326 wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800,
327 (-size & size_and_mask) >> (32 - PAGE_SHIFT));
328 }
330 post_set();
331 }
333 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
334 {
335 unsigned long lbase, last;
337 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
338 and not touch 0x70000000->0x7003FFFF */
339 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
340 boot_cpu_data.x86_model == 1 &&
341 boot_cpu_data.x86_mask <= 7) {
342 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
343 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
344 return -EINVAL;
345 }
346 if (!(base + size < 0x70000000 || base > 0x7003FFFF) &&
347 (type == MTRR_TYPE_WRCOMB
348 || type == MTRR_TYPE_WRBACK)) {
349 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
350 return -EINVAL;
351 }
352 }
354 if (base + size < 0x100) {
355 printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
356 base, size);
357 return -EINVAL;
358 }
359 /* Check upper bits of base and last are equal and lower bits are 0
360 for base and 1 for last */
361 last = base + size - 1;
362 for (lbase = base; !(lbase & 1) && (last & 1);
363 lbase = lbase >> 1, last = last >> 1) ;
364 if (lbase != last) {
365 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
366 base, size);
367 return -EINVAL;
368 }
369 return 0;
370 }
373 int generic_have_wrcomb(void)
374 {
375 unsigned long config, dummy;
376 rdmsr(MTRRcap_MSR, config, dummy);
377 return (config & (1 << 10));
378 }
380 int positive_have_wrcomb(void)
381 {
382 return 1;
383 }
385 /* generic structure...
386 */
387 struct mtrr_ops generic_mtrr_ops = {
388 .use_intel_if = 1,
389 .set_all = generic_set_all,
390 .get = generic_get_mtrr,
391 .get_free_region = generic_get_free_region,
392 .set = generic_set_mtrr,
393 .validate_add_page = generic_validate_add_page,
394 .have_wrcomb = generic_have_wrcomb,
395 };