debuggers.hg
annotate xen/arch/x86/mtrr/generic.c @ 3658:0ef6e8e6e85d
bitkeeper revision 1.1159.212.71 (4200f0afX_JumfbEHQex6TdFENULMQ)
Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into labyrinth.cl.cam.ac.uk:/auto/groups/xeno/users/iap10/xeno-clone/xen-unstable.bk
Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into labyrinth.cl.cam.ac.uk:/auto/groups/xeno/users/iap10/xeno-clone/xen-unstable.bk
author | iap10@labyrinth.cl.cam.ac.uk |
---|---|
date | Wed Feb 02 15:24:31 2005 +0000 (2005-02-02) |
parents | e17a946c7a91 beb0887c54bc |
children | bbe8541361dd |
rev | line source |
---|---|
kaf24@3232 | 1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong |
kaf24@3232 | 2 because MTRRs can span upto 40 bits (36bits on most modern x86) */ |
kaf24@3232 | 3 #include <xen/init.h> |
kaf24@3232 | 4 #include <xen/slab.h> |
kaf24@3232 | 5 #include <xen/mm.h> |
kaf24@3232 | 6 #include <asm/io.h> |
kaf24@3232 | 7 #include <asm/mtrr.h> |
kaf24@3232 | 8 #include <asm/msr.h> |
kaf24@3232 | 9 #include <asm/system.h> |
kaf24@3232 | 10 #include <asm/cpufeature.h> |
kaf24@3232 | 11 //#include <asm/tlbflush.h> |
kaf24@3232 | 12 #include "mtrr.h" |
kaf24@3232 | 13 |
kaf24@3232 | 14 struct mtrr_state { |
kaf24@3232 | 15 struct mtrr_var_range *var_ranges; |
kaf24@3232 | 16 mtrr_type fixed_ranges[NUM_FIXED_RANGES]; |
kaf24@3232 | 17 unsigned char enabled; |
kaf24@3232 | 18 mtrr_type def_type; |
kaf24@3232 | 19 }; |
kaf24@3232 | 20 |
kaf24@3232 | 21 static unsigned long smp_changes_mask; |
kaf24@3232 | 22 struct mtrr_state mtrr_state = {}; |
kaf24@3232 | 23 |
kaf24@3232 | 24 |
kaf24@3232 | 25 /* Get the MSR pair relating to a var range */ |
kaf24@3232 | 26 static void __init |
kaf24@3232 | 27 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) |
kaf24@3232 | 28 { |
kaf24@3232 | 29 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); |
kaf24@3232 | 30 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); |
kaf24@3232 | 31 } |
kaf24@3232 | 32 |
kaf24@3232 | 33 static void __init |
kaf24@3232 | 34 get_fixed_ranges(mtrr_type * frs) |
kaf24@3232 | 35 { |
kaf24@3232 | 36 unsigned int *p = (unsigned int *) frs; |
kaf24@3232 | 37 int i; |
kaf24@3232 | 38 |
kaf24@3232 | 39 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]); |
kaf24@3232 | 40 |
kaf24@3232 | 41 for (i = 0; i < 2; i++) |
kaf24@3232 | 42 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]); |
kaf24@3232 | 43 for (i = 0; i < 8; i++) |
kaf24@3232 | 44 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]); |
kaf24@3232 | 45 } |
kaf24@3232 | 46 |
kaf24@3232 | 47 /* Grab all of the MTRR state for this CPU into *state */ |
kaf24@3232 | 48 void __init get_mtrr_state(void) |
kaf24@3232 | 49 { |
kaf24@3232 | 50 unsigned int i; |
kaf24@3232 | 51 struct mtrr_var_range *vrs; |
kaf24@3232 | 52 unsigned lo, dummy; |
kaf24@3232 | 53 |
kaf24@3232 | 54 if (!mtrr_state.var_ranges) { |
iap10@3650 | 55 mtrr_state.var_ranges = xmalloc_array(struct mtrr_var_range, |
iap10@3650 | 56 num_var_ranges); |
kaf24@3232 | 57 if (!mtrr_state.var_ranges) |
kaf24@3232 | 58 return; |
kaf24@3232 | 59 } |
kaf24@3232 | 60 vrs = mtrr_state.var_ranges; |
kaf24@3232 | 61 |
kaf24@3232 | 62 for (i = 0; i < num_var_ranges; i++) |
kaf24@3232 | 63 get_mtrr_var_range(i, &vrs[i]); |
kaf24@3232 | 64 get_fixed_ranges(mtrr_state.fixed_ranges); |
kaf24@3232 | 65 |
kaf24@3232 | 66 rdmsr(MTRRdefType_MSR, lo, dummy); |
kaf24@3232 | 67 mtrr_state.def_type = (lo & 0xff); |
kaf24@3232 | 68 mtrr_state.enabled = (lo & 0xc00) >> 10; |
kaf24@3232 | 69 } |
kaf24@3232 | 70 |
kaf24@3232 | 71 /* Free resources associated with a struct mtrr_state */ |
kaf24@3232 | 72 void __init finalize_mtrr_state(void) |
kaf24@3232 | 73 { |
kaf24@3232 | 74 if (mtrr_state.var_ranges) |
kaf24@3232 | 75 xfree(mtrr_state.var_ranges); |
kaf24@3232 | 76 mtrr_state.var_ranges = NULL; |
kaf24@3232 | 77 } |
kaf24@3232 | 78 |
kaf24@3232 | 79 /* Some BIOS's are fucked and don't set all MTRRs the same! */ |
kaf24@3232 | 80 void __init mtrr_state_warn(void) |
kaf24@3232 | 81 { |
kaf24@3232 | 82 unsigned long mask = smp_changes_mask; |
kaf24@3232 | 83 |
kaf24@3232 | 84 if (!mask) |
kaf24@3232 | 85 return; |
kaf24@3232 | 86 if (mask & MTRR_CHANGE_MASK_FIXED) |
kaf24@3232 | 87 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n"); |
kaf24@3232 | 88 if (mask & MTRR_CHANGE_MASK_VARIABLE) |
kaf24@3232 | 89 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n"); |
kaf24@3232 | 90 if (mask & MTRR_CHANGE_MASK_DEFTYPE) |
kaf24@3232 | 91 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n"); |
kaf24@3232 | 92 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); |
kaf24@3232 | 93 printk(KERN_INFO "mtrr: corrected configuration.\n"); |
kaf24@3232 | 94 } |
kaf24@3232 | 95 |
kaf24@3232 | 96 |
kaf24@3232 | 97 int generic_get_free_region(unsigned long base, unsigned long size) |
kaf24@3232 | 98 /* [SUMMARY] Get a free MTRR. |
kaf24@3232 | 99 <base> The starting (base) address of the region. |
kaf24@3232 | 100 <size> The size (in bytes) of the region. |
kaf24@3232 | 101 [RETURNS] The index of the region on success, else -1 on error. |
kaf24@3232 | 102 */ |
kaf24@3232 | 103 { |
kaf24@3232 | 104 int i, max; |
kaf24@3232 | 105 mtrr_type ltype; |
kaf24@3232 | 106 unsigned long lbase; |
kaf24@3232 | 107 unsigned lsize; |
kaf24@3232 | 108 |
kaf24@3232 | 109 max = num_var_ranges; |
kaf24@3232 | 110 for (i = 0; i < max; ++i) { |
kaf24@3232 | 111 mtrr_if->get(i, &lbase, &lsize, <ype); |
kaf24@3232 | 112 if (lsize == 0) |
kaf24@3232 | 113 return i; |
kaf24@3232 | 114 } |
kaf24@3232 | 115 return -ENOSPC; |
kaf24@3232 | 116 } |
kaf24@3232 | 117 |
kaf24@3232 | 118 void generic_get_mtrr(unsigned int reg, unsigned long *base, |
kaf24@3232 | 119 unsigned int *size, mtrr_type * type) |
kaf24@3232 | 120 { |
kaf24@3232 | 121 unsigned int mask_lo, mask_hi, base_lo, base_hi; |
kaf24@3232 | 122 |
kaf24@3232 | 123 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); |
kaf24@3232 | 124 if ((mask_lo & 0x800) == 0) { |
kaf24@3232 | 125 /* Invalid (i.e. free) range */ |
kaf24@3232 | 126 *base = 0; |
kaf24@3232 | 127 *size = 0; |
kaf24@3232 | 128 *type = 0; |
kaf24@3232 | 129 return; |
kaf24@3232 | 130 } |
kaf24@3232 | 131 |
kaf24@3232 | 132 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); |
kaf24@3232 | 133 |
kaf24@3232 | 134 /* Work out the shifted address mask. */ |
kaf24@3232 | 135 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT) |
kaf24@3232 | 136 | mask_lo >> PAGE_SHIFT; |
kaf24@3232 | 137 |
kaf24@3232 | 138 /* This works correctly if size is a power of two, i.e. a |
kaf24@3232 | 139 contiguous range. */ |
kaf24@3232 | 140 *size = -mask_lo; |
kaf24@3232 | 141 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; |
kaf24@3232 | 142 *type = base_lo & 0xff; |
kaf24@3232 | 143 } |
kaf24@3232 | 144 |
kaf24@3232 | 145 static int set_fixed_ranges(mtrr_type * frs) |
kaf24@3232 | 146 { |
kaf24@3232 | 147 unsigned int *p = (unsigned int *) frs; |
kaf24@3232 | 148 int changed = FALSE; |
kaf24@3232 | 149 int i; |
kaf24@3232 | 150 unsigned int lo, hi; |
kaf24@3232 | 151 |
kaf24@3232 | 152 rdmsr(MTRRfix64K_00000_MSR, lo, hi); |
kaf24@3232 | 153 if (p[0] != lo || p[1] != hi) { |
kaf24@3232 | 154 wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]); |
kaf24@3232 | 155 changed = TRUE; |
kaf24@3232 | 156 } |
kaf24@3232 | 157 |
kaf24@3232 | 158 for (i = 0; i < 2; i++) { |
kaf24@3232 | 159 rdmsr(MTRRfix16K_80000_MSR + i, lo, hi); |
kaf24@3232 | 160 if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) { |
kaf24@3232 | 161 wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], |
kaf24@3232 | 162 p[3 + i * 2]); |
kaf24@3232 | 163 changed = TRUE; |
kaf24@3232 | 164 } |
kaf24@3232 | 165 } |
kaf24@3232 | 166 |
kaf24@3232 | 167 for (i = 0; i < 8; i++) { |
kaf24@3232 | 168 rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi); |
kaf24@3232 | 169 if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) { |
kaf24@3232 | 170 wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], |
kaf24@3232 | 171 p[7 + i * 2]); |
kaf24@3232 | 172 changed = TRUE; |
kaf24@3232 | 173 } |
kaf24@3232 | 174 } |
kaf24@3232 | 175 return changed; |
kaf24@3232 | 176 } |
kaf24@3232 | 177 |
kaf24@3232 | 178 /* Set the MSR pair relating to a var range. Returns TRUE if |
kaf24@3232 | 179 changes are made */ |
kaf24@3232 | 180 static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) |
kaf24@3232 | 181 { |
kaf24@3232 | 182 unsigned int lo, hi; |
kaf24@3232 | 183 int changed = FALSE; |
kaf24@3232 | 184 |
kaf24@3232 | 185 rdmsr(MTRRphysBase_MSR(index), lo, hi); |
kaf24@3232 | 186 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) |
kaf24@3232 | 187 || (vr->base_hi & 0xfUL) != (hi & 0xfUL)) { |
kaf24@3232 | 188 wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); |
kaf24@3232 | 189 changed = TRUE; |
kaf24@3232 | 190 } |
kaf24@3232 | 191 |
kaf24@3232 | 192 rdmsr(MTRRphysMask_MSR(index), lo, hi); |
kaf24@3232 | 193 |
kaf24@3232 | 194 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL) |
kaf24@3232 | 195 || (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) { |
kaf24@3232 | 196 wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); |
kaf24@3232 | 197 changed = TRUE; |
kaf24@3232 | 198 } |
kaf24@3232 | 199 return changed; |
kaf24@3232 | 200 } |
kaf24@3232 | 201 |
kaf24@3232 | 202 static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi) |
kaf24@3232 | 203 /* [SUMMARY] Set the MTRR state for this CPU. |
kaf24@3232 | 204 <state> The MTRR state information to read. |
kaf24@3232 | 205 <ctxt> Some relevant CPU context. |
kaf24@3232 | 206 [NOTE] The CPU must already be in a safe state for MTRR changes. |
kaf24@3232 | 207 [RETURNS] 0 if no changes made, else a mask indication what was changed. |
kaf24@3232 | 208 */ |
kaf24@3232 | 209 { |
kaf24@3232 | 210 unsigned int i; |
kaf24@3232 | 211 unsigned long change_mask = 0; |
kaf24@3232 | 212 |
kaf24@3232 | 213 for (i = 0; i < num_var_ranges; i++) |
kaf24@3232 | 214 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) |
kaf24@3232 | 215 change_mask |= MTRR_CHANGE_MASK_VARIABLE; |
kaf24@3232 | 216 |
kaf24@3232 | 217 if (set_fixed_ranges(mtrr_state.fixed_ranges)) |
kaf24@3232 | 218 change_mask |= MTRR_CHANGE_MASK_FIXED; |
kaf24@3232 | 219 |
kaf24@3232 | 220 /* Set_mtrr_restore restores the old value of MTRRdefType, |
kaf24@3232 | 221 so to set it we fiddle with the saved value */ |
kaf24@3232 | 222 if ((deftype_lo & 0xff) != mtrr_state.def_type |
kaf24@3232 | 223 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { |
kaf24@3232 | 224 deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10); |
kaf24@3232 | 225 change_mask |= MTRR_CHANGE_MASK_DEFTYPE; |
kaf24@3232 | 226 } |
kaf24@3232 | 227 |
kaf24@3232 | 228 return change_mask; |
kaf24@3232 | 229 } |
kaf24@3232 | 230 |
kaf24@3232 | 231 |
kaf24@3232 | 232 static unsigned long cr4 = 0; |
kaf24@3232 | 233 static u32 deftype_lo, deftype_hi; |
kaf24@3232 | 234 static spinlock_t set_atomicity_lock = SPIN_LOCK_UNLOCKED; |
kaf24@3232 | 235 |
kaf24@3232 | 236 static void prepare_set(void) |
kaf24@3232 | 237 { |
kaf24@3232 | 238 unsigned long cr0; |
kaf24@3232 | 239 |
kaf24@3232 | 240 /* Note that this is not ideal, since the cache is only flushed/disabled |
kaf24@3232 | 241 for this CPU while the MTRRs are changed, but changing this requires |
kaf24@3232 | 242 more invasive changes to the way the kernel boots */ |
kaf24@3232 | 243 spin_lock(&set_atomicity_lock); |
kaf24@3232 | 244 |
kaf24@3232 | 245 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ |
kaf24@3232 | 246 cr0 = read_cr0() | 0x40000000; /* set CD flag */ |
kaf24@3232 | 247 wbinvd(); |
kaf24@3232 | 248 write_cr0(cr0); |
kaf24@3232 | 249 wbinvd(); |
kaf24@3232 | 250 |
kaf24@3232 | 251 /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
kaf24@3232 | 252 if ( cpu_has_pge ) { |
kaf24@3232 | 253 cr4 = read_cr4(); |
iap10@3417 | 254 write_cr4(cr4 & ~X86_CR4_PGE); |
kaf24@3232 | 255 } |
kaf24@3232 | 256 |
kaf24@3232 | 257 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ |
kaf24@3232 | 258 __flush_tlb(); |
kaf24@3232 | 259 |
kaf24@3232 | 260 /* Save MTRR state */ |
kaf24@3232 | 261 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); |
kaf24@3232 | 262 |
kaf24@3232 | 263 /* Disable MTRRs, and set the default type to uncached */ |
kaf24@3232 | 264 wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi); |
kaf24@3232 | 265 } |
kaf24@3232 | 266 |
kaf24@3232 | 267 static void post_set(void) |
kaf24@3232 | 268 { |
kaf24@3232 | 269 /* Flush caches and TLBs */ |
kaf24@3232 | 270 wbinvd(); |
kaf24@3232 | 271 __flush_tlb(); |
kaf24@3232 | 272 |
kaf24@3232 | 273 /* Intel (P6) standard MTRRs */ |
kaf24@3232 | 274 wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); |
kaf24@3232 | 275 |
kaf24@3232 | 276 /* Enable caches */ |
kaf24@3232 | 277 write_cr0(read_cr0() & 0xbfffffff); |
kaf24@3232 | 278 |
kaf24@3232 | 279 /* Restore value of CR4 */ |
kaf24@3232 | 280 if ( cpu_has_pge ) |
kaf24@3232 | 281 write_cr4(cr4); |
kaf24@3232 | 282 spin_unlock(&set_atomicity_lock); |
kaf24@3232 | 283 } |
kaf24@3232 | 284 |
kaf24@3232 | 285 static void generic_set_all(void) |
kaf24@3232 | 286 { |
kaf24@3232 | 287 unsigned long mask, count; |
kaf24@3232 | 288 |
kaf24@3232 | 289 prepare_set(); |
kaf24@3232 | 290 |
kaf24@3232 | 291 /* Actually set the state */ |
kaf24@3232 | 292 mask = set_mtrr_state(deftype_lo,deftype_hi); |
kaf24@3232 | 293 |
kaf24@3232 | 294 post_set(); |
kaf24@3232 | 295 |
kaf24@3232 | 296 /* Use the atomic bitops to update the global mask */ |
kaf24@3232 | 297 for (count = 0; count < sizeof mask * 8; ++count) { |
kaf24@3232 | 298 if (mask & 0x01) |
kaf24@3232 | 299 set_bit(count, &smp_changes_mask); |
kaf24@3232 | 300 mask >>= 1; |
kaf24@3232 | 301 } |
kaf24@3232 | 302 |
kaf24@3232 | 303 } |
kaf24@3232 | 304 |
kaf24@3232 | 305 static void generic_set_mtrr(unsigned int reg, unsigned long base, |
kaf24@3232 | 306 unsigned long size, mtrr_type type) |
kaf24@3232 | 307 /* [SUMMARY] Set variable MTRR register on the local CPU. |
kaf24@3232 | 308 <reg> The register to set. |
kaf24@3232 | 309 <base> The base address of the region. |
kaf24@3232 | 310 <size> The size of the region. If this is 0 the region is disabled. |
kaf24@3232 | 311 <type> The type of the region. |
kaf24@3232 | 312 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should |
kaf24@3232 | 313 be done externally. |
kaf24@3232 | 314 [RETURNS] Nothing. |
kaf24@3232 | 315 */ |
kaf24@3232 | 316 { |
kaf24@3232 | 317 prepare_set(); |
kaf24@3232 | 318 |
kaf24@3232 | 319 if (size == 0) { |
kaf24@3232 | 320 /* The invalid bit is kept in the mask, so we simply clear the |
kaf24@3232 | 321 relevant mask register to disable a range. */ |
kaf24@3232 | 322 wrmsr(MTRRphysMask_MSR(reg), 0, 0); |
kaf24@3232 | 323 } else { |
kaf24@3232 | 324 wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type, |
kaf24@3232 | 325 (base & size_and_mask) >> (32 - PAGE_SHIFT)); |
kaf24@3232 | 326 wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800, |
kaf24@3232 | 327 (-size & size_and_mask) >> (32 - PAGE_SHIFT)); |
kaf24@3232 | 328 } |
kaf24@3232 | 329 |
kaf24@3232 | 330 post_set(); |
kaf24@3232 | 331 } |
kaf24@3232 | 332 |
kaf24@3232 | 333 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) |
kaf24@3232 | 334 { |
kaf24@3232 | 335 unsigned long lbase, last; |
kaf24@3232 | 336 |
kaf24@3232 | 337 /* For Intel PPro stepping <= 7, must be 4 MiB aligned |
kaf24@3232 | 338 and not touch 0x70000000->0x7003FFFF */ |
kaf24@3232 | 339 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && |
kaf24@3232 | 340 boot_cpu_data.x86_model == 1 && |
kaf24@3232 | 341 boot_cpu_data.x86_mask <= 7) { |
kaf24@3232 | 342 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { |
kaf24@3232 | 343 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); |
kaf24@3232 | 344 return -EINVAL; |
kaf24@3232 | 345 } |
kaf24@3232 | 346 if (!(base + size < 0x70000000 || base > 0x7003FFFF) && |
kaf24@3232 | 347 (type == MTRR_TYPE_WRCOMB |
kaf24@3232 | 348 || type == MTRR_TYPE_WRBACK)) { |
kaf24@3232 | 349 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); |
kaf24@3232 | 350 return -EINVAL; |
kaf24@3232 | 351 } |
kaf24@3232 | 352 } |
kaf24@3232 | 353 |
kaf24@3232 | 354 if (base + size < 0x100) { |
kaf24@3232 | 355 printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n", |
kaf24@3232 | 356 base, size); |
kaf24@3232 | 357 return -EINVAL; |
kaf24@3232 | 358 } |
kaf24@3232 | 359 /* Check upper bits of base and last are equal and lower bits are 0 |
kaf24@3232 | 360 for base and 1 for last */ |
kaf24@3232 | 361 last = base + size - 1; |
kaf24@3232 | 362 for (lbase = base; !(lbase & 1) && (last & 1); |
kaf24@3232 | 363 lbase = lbase >> 1, last = last >> 1) ; |
kaf24@3232 | 364 if (lbase != last) { |
kaf24@3232 | 365 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", |
kaf24@3232 | 366 base, size); |
kaf24@3232 | 367 return -EINVAL; |
kaf24@3232 | 368 } |
kaf24@3232 | 369 return 0; |
kaf24@3232 | 370 } |
kaf24@3232 | 371 |
kaf24@3232 | 372 |
kaf24@3232 | 373 int generic_have_wrcomb(void) |
kaf24@3232 | 374 { |
kaf24@3232 | 375 unsigned long config, dummy; |
kaf24@3232 | 376 rdmsr(MTRRcap_MSR, config, dummy); |
kaf24@3232 | 377 return (config & (1 << 10)); |
kaf24@3232 | 378 } |
kaf24@3232 | 379 |
kaf24@3232 | 380 int positive_have_wrcomb(void) |
kaf24@3232 | 381 { |
kaf24@3232 | 382 return 1; |
kaf24@3232 | 383 } |
kaf24@3232 | 384 |
kaf24@3232 | 385 /* generic structure... |
kaf24@3232 | 386 */ |
kaf24@3232 | 387 struct mtrr_ops generic_mtrr_ops = { |
kaf24@3232 | 388 .use_intel_if = 1, |
kaf24@3232 | 389 .set_all = generic_set_all, |
kaf24@3232 | 390 .get = generic_get_mtrr, |
kaf24@3232 | 391 .get_free_region = generic_get_free_region, |
kaf24@3232 | 392 .set = generic_set_mtrr, |
kaf24@3232 | 393 .validate_add_page = generic_validate_add_page, |
kaf24@3232 | 394 .have_wrcomb = generic_have_wrcomb, |
kaf24@3232 | 395 }; |