debuggers.hg
annotate xen/arch/x86/mtrr/main.c @ 3658:0ef6e8e6e85d
bitkeeper revision 1.1159.212.71 (4200f0afX_JumfbEHQex6TdFENULMQ)
Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into labyrinth.cl.cam.ac.uk:/auto/groups/xeno/users/iap10/xeno-clone/xen-unstable.bk
Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into labyrinth.cl.cam.ac.uk:/auto/groups/xeno/users/iap10/xeno-clone/xen-unstable.bk
author | iap10@labyrinth.cl.cam.ac.uk |
---|---|
date | Wed Feb 02 15:24:31 2005 +0000 (2005-02-02) |
parents | c23dd7ec1f54 beb0887c54bc |
children | bbe8541361dd |
rev | line source |
---|---|
kaf24@3232 | 1 /* Generic MTRR (Memory Type Range Register) driver. |
kaf24@3232 | 2 |
kaf24@3232 | 3 Copyright (C) 1997-2000 Richard Gooch |
kaf24@3232 | 4 Copyright (c) 2002 Patrick Mochel |
kaf24@3232 | 5 |
kaf24@3232 | 6 This library is free software; you can redistribute it and/or |
kaf24@3232 | 7 modify it under the terms of the GNU Library General Public |
kaf24@3232 | 8 License as published by the Free Software Foundation; either |
kaf24@3232 | 9 version 2 of the License, or (at your option) any later version. |
kaf24@3232 | 10 |
kaf24@3232 | 11 This library is distributed in the hope that it will be useful, |
kaf24@3232 | 12 but WITHOUT ANY WARRANTY; without even the implied warranty of |
kaf24@3232 | 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
kaf24@3232 | 14 Library General Public License for more details. |
kaf24@3232 | 15 |
kaf24@3232 | 16 You should have received a copy of the GNU Library General Public |
kaf24@3232 | 17 License along with this library; if not, write to the Free |
kaf24@3232 | 18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
kaf24@3232 | 19 |
kaf24@3232 | 20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au |
kaf24@3232 | 21 The postal address is: |
kaf24@3232 | 22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. |
kaf24@3232 | 23 |
kaf24@3232 | 24 Source: "Pentium Pro Family Developer's Manual, Volume 3: |
kaf24@3232 | 25 Operating System Writer's Guide" (Intel document number 242692), |
kaf24@3232 | 26 section 11.11.7 |
kaf24@3232 | 27 |
kaf24@3232 | 28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> |
kaf24@3232 | 29 on 6-7 March 2002. |
kaf24@3232 | 30 Source: Intel Architecture Software Developers Manual, Volume 3: |
kaf24@3232 | 31 System Programming Guide; Section 9.11. (1997 edition - PPro). |
kaf24@3232 | 32 */ |
kaf24@3232 | 33 |
kaf24@3232 | 34 #include <xen/config.h> |
kaf24@3232 | 35 #include <xen/init.h> |
kaf24@3232 | 36 #include <xen/pci.h> |
kaf24@3232 | 37 #include <xen/smp.h> |
kaf24@3232 | 38 #include <asm/mtrr.h> |
kaf24@3232 | 39 #include <asm/uaccess.h> |
kaf24@3232 | 40 #include <asm/processor.h> |
kaf24@3232 | 41 #include <asm/msr.h> |
kaf24@3232 | 42 #include "mtrr.h" |
kaf24@3232 | 43 |
kaf24@3232 | 44 #define MTRR_VERSION "2.0 (20020519)" |
kaf24@3232 | 45 |
kaf24@3232 | 46 /* No blocking mutexes in Xen. Spin instead. */ |
kaf24@3232 | 47 #define DECLARE_MUTEX(_m) spinlock_t _m = SPIN_LOCK_UNLOCKED |
kaf24@3232 | 48 #define down(_m) spin_lock(_m) |
kaf24@3232 | 49 #define up(_m) spin_unlock(_m) |
kaf24@3232 | 50 |
kaf24@3232 | 51 #define num_booting_cpus() smp_num_cpus |
kaf24@3232 | 52 |
kaf24@3232 | 53 u32 num_var_ranges = 0; |
kaf24@3232 | 54 |
kaf24@3232 | 55 unsigned int *usage_table; |
kaf24@3232 | 56 static DECLARE_MUTEX(main_lock); |
kaf24@3232 | 57 |
kaf24@3232 | 58 u32 size_or_mask, size_and_mask; |
kaf24@3232 | 59 |
kaf24@3232 | 60 static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {}; |
kaf24@3232 | 61 |
kaf24@3232 | 62 struct mtrr_ops * mtrr_if = NULL; |
kaf24@3232 | 63 |
kaf24@3232 | 64 __initdata char *mtrr_if_name[] = { |
kaf24@3232 | 65 "none", "Intel", "AMD K6", "Cyrix ARR", "Centaur MCR" |
kaf24@3232 | 66 }; |
kaf24@3232 | 67 |
kaf24@3232 | 68 static void set_mtrr(unsigned int reg, unsigned long base, |
kaf24@3232 | 69 unsigned long size, mtrr_type type); |
kaf24@3232 | 70 |
kaf24@3232 | 71 extern int arr3_protected; |
kaf24@3232 | 72 |
kaf24@3232 | 73 static char *mtrr_strings[MTRR_NUM_TYPES] = |
kaf24@3232 | 74 { |
kaf24@3232 | 75 "uncachable", /* 0 */ |
kaf24@3232 | 76 "write-combining", /* 1 */ |
kaf24@3232 | 77 "?", /* 2 */ |
kaf24@3232 | 78 "?", /* 3 */ |
kaf24@3232 | 79 "write-through", /* 4 */ |
kaf24@3232 | 80 "write-protect", /* 5 */ |
kaf24@3232 | 81 "write-back", /* 6 */ |
kaf24@3232 | 82 }; |
kaf24@3232 | 83 |
kaf24@3232 | 84 char *mtrr_attrib_to_str(int x) |
kaf24@3232 | 85 { |
kaf24@3232 | 86 return (x <= 6) ? mtrr_strings[x] : "?"; |
kaf24@3232 | 87 } |
kaf24@3232 | 88 |
kaf24@3232 | 89 void set_mtrr_ops(struct mtrr_ops * ops) |
kaf24@3232 | 90 { |
kaf24@3232 | 91 if (ops->vendor && ops->vendor < X86_VENDOR_NUM) |
kaf24@3232 | 92 mtrr_ops[ops->vendor] = ops; |
kaf24@3232 | 93 } |
kaf24@3232 | 94 |
kaf24@3232 | 95 /* Returns non-zero if we have the write-combining memory type */ |
kaf24@3232 | 96 static int have_wrcomb(void) |
kaf24@3232 | 97 { |
kaf24@3232 | 98 struct pci_dev *dev; |
kaf24@3232 | 99 |
kaf24@3232 | 100 if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) { |
kaf24@3232 | 101 /* ServerWorks LE chipsets have problems with write-combining |
kaf24@3232 | 102 Don't allow it and leave room for other chipsets to be tagged */ |
kaf24@3232 | 103 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && |
kaf24@3232 | 104 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { |
kaf24@3232 | 105 printk(KERN_INFO "mtrr: Serverworks LE detected. Write-combining disabled.\n"); |
kaf24@3232 | 106 return 0; |
kaf24@3232 | 107 } |
kaf24@3232 | 108 /* Intel 450NX errata # 23. Non ascending cachline evictions to |
kaf24@3232 | 109 write combining memory may resulting in data corruption */ |
kaf24@3232 | 110 if (dev->vendor == PCI_VENDOR_ID_INTEL && |
kaf24@3232 | 111 dev->device == PCI_DEVICE_ID_INTEL_82451NX) |
kaf24@3232 | 112 { |
kaf24@3232 | 113 printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); |
kaf24@3232 | 114 return 0; |
kaf24@3232 | 115 } |
kaf24@3232 | 116 } |
kaf24@3232 | 117 return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0); |
kaf24@3232 | 118 } |
kaf24@3232 | 119 |
kaf24@3232 | 120 /* This function returns the number of variable MTRRs */ |
kaf24@3232 | 121 void __init set_num_var_ranges(void) |
kaf24@3232 | 122 { |
kaf24@3232 | 123 unsigned long config = 0, dummy; |
kaf24@3232 | 124 |
kaf24@3232 | 125 if (use_intel()) { |
kaf24@3232 | 126 rdmsr(MTRRcap_MSR, config, dummy); |
kaf24@3232 | 127 } else if (is_cpu(AMD)) |
kaf24@3232 | 128 config = 2; |
kaf24@3232 | 129 else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) |
kaf24@3232 | 130 config = 8; |
kaf24@3232 | 131 num_var_ranges = config & 0xff; |
kaf24@3232 | 132 } |
kaf24@3232 | 133 |
kaf24@3232 | 134 static void __init init_table(void) |
kaf24@3232 | 135 { |
kaf24@3232 | 136 int i, max; |
kaf24@3232 | 137 |
kaf24@3232 | 138 max = num_var_ranges; |
iap10@3650 | 139 if ((usage_table = xmalloc_array(unsigned int, max)) == NULL) { |
kaf24@3232 | 140 printk(KERN_ERR "mtrr: could not allocate\n"); |
kaf24@3232 | 141 return; |
kaf24@3232 | 142 } |
kaf24@3232 | 143 for (i = 0; i < max; i++) |
kaf24@3232 | 144 usage_table[i] = 1; |
kaf24@3232 | 145 } |
kaf24@3232 | 146 |
kaf24@3232 | 147 struct set_mtrr_data { |
kaf24@3232 | 148 atomic_t count; |
kaf24@3232 | 149 atomic_t gate; |
kaf24@3232 | 150 unsigned long smp_base; |
kaf24@3232 | 151 unsigned long smp_size; |
kaf24@3232 | 152 unsigned int smp_reg; |
kaf24@3232 | 153 mtrr_type smp_type; |
kaf24@3232 | 154 }; |
kaf24@3232 | 155 |
kaf24@3232 | 156 #ifdef CONFIG_SMP |
kaf24@3232 | 157 |
kaf24@3232 | 158 static void ipi_handler(void *info) |
kaf24@3232 | 159 /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs. |
kaf24@3232 | 160 [RETURNS] Nothing. |
kaf24@3232 | 161 */ |
kaf24@3232 | 162 { |
kaf24@3232 | 163 struct set_mtrr_data *data = info; |
kaf24@3232 | 164 unsigned long flags; |
kaf24@3232 | 165 |
kaf24@3232 | 166 local_irq_save(flags); |
kaf24@3232 | 167 |
kaf24@3232 | 168 atomic_dec(&data->count); |
kaf24@3232 | 169 while(!atomic_read(&data->gate)) { |
kaf24@3232 | 170 cpu_relax(); |
kaf24@3232 | 171 barrier(); |
kaf24@3232 | 172 } |
kaf24@3232 | 173 |
kaf24@3232 | 174 /* The master has cleared me to execute */ |
kaf24@3232 | 175 if (data->smp_reg != ~0U) |
kaf24@3232 | 176 mtrr_if->set(data->smp_reg, data->smp_base, |
kaf24@3232 | 177 data->smp_size, data->smp_type); |
kaf24@3232 | 178 else |
kaf24@3232 | 179 mtrr_if->set_all(); |
kaf24@3232 | 180 |
kaf24@3232 | 181 atomic_dec(&data->count); |
kaf24@3232 | 182 while(atomic_read(&data->gate)) { |
kaf24@3232 | 183 cpu_relax(); |
kaf24@3232 | 184 barrier(); |
kaf24@3232 | 185 } |
kaf24@3232 | 186 atomic_dec(&data->count); |
kaf24@3232 | 187 local_irq_restore(flags); |
kaf24@3232 | 188 } |
kaf24@3232 | 189 |
kaf24@3232 | 190 #endif |
kaf24@3232 | 191 |
kaf24@3232 | 192 /** |
kaf24@3232 | 193 * set_mtrr - update mtrrs on all processors |
kaf24@3232 | 194 * @reg: mtrr in question |
kaf24@3232 | 195 * @base: mtrr base |
kaf24@3232 | 196 * @size: mtrr size |
kaf24@3232 | 197 * @type: mtrr type |
kaf24@3232 | 198 * |
kaf24@3232 | 199 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: |
kaf24@3232 | 200 * |
kaf24@3232 | 201 * 1. Send IPI to do the following: |
kaf24@3232 | 202 * 2. Disable Interrupts |
kaf24@3232 | 203 * 3. Wait for all procs to do so |
kaf24@3232 | 204 * 4. Enter no-fill cache mode |
kaf24@3232 | 205 * 5. Flush caches |
kaf24@3232 | 206 * 6. Clear PGE bit |
kaf24@3232 | 207 * 7. Flush all TLBs |
kaf24@3232 | 208 * 8. Disable all range registers |
kaf24@3232 | 209 * 9. Update the MTRRs |
kaf24@3232 | 210 * 10. Enable all range registers |
kaf24@3232 | 211 * 11. Flush all TLBs and caches again |
kaf24@3232 | 212 * 12. Enter normal cache mode and reenable caching |
kaf24@3232 | 213 * 13. Set PGE |
kaf24@3232 | 214 * 14. Wait for buddies to catch up |
kaf24@3232 | 215 * 15. Enable interrupts. |
kaf24@3232 | 216 * |
kaf24@3232 | 217 * What does that mean for us? Well, first we set data.count to the number |
kaf24@3232 | 218 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait |
kaf24@3232 | 219 * until it hits 0 and proceed. We set the data.gate flag and reset data.count. |
kaf24@3232 | 220 * Meanwhile, they are waiting for that flag to be set. Once it's set, each |
kaf24@3232 | 221 * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it |
kaf24@3232 | 222 * differently, so we call mtrr_if->set() callback and let them take care of it. |
kaf24@3232 | 223 * When they're done, they again decrement data->count and wait for data.gate to |
kaf24@3232 | 224 * be reset. |
kaf24@3232 | 225 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag. |
kaf24@3232 | 226 * Everyone then enables interrupts and we all continue on. |
kaf24@3232 | 227 * |
kaf24@3232 | 228 * Note that the mechanism is the same for UP systems, too; all the SMP stuff |
kaf24@3232 | 229 * becomes nops. |
kaf24@3232 | 230 */ |
kaf24@3232 | 231 static void set_mtrr(unsigned int reg, unsigned long base, |
kaf24@3232 | 232 unsigned long size, mtrr_type type) |
kaf24@3232 | 233 { |
kaf24@3232 | 234 struct set_mtrr_data data; |
kaf24@3232 | 235 unsigned long flags; |
kaf24@3232 | 236 |
kaf24@3232 | 237 data.smp_reg = reg; |
kaf24@3232 | 238 data.smp_base = base; |
kaf24@3232 | 239 data.smp_size = size; |
kaf24@3232 | 240 data.smp_type = type; |
kaf24@3232 | 241 atomic_set(&data.count, num_booting_cpus() - 1); |
kaf24@3232 | 242 atomic_set(&data.gate,0); |
kaf24@3232 | 243 |
kaf24@3232 | 244 /* Start the ball rolling on other CPUs */ |
kaf24@3232 | 245 if (smp_call_function(ipi_handler, &data, 1, 0) != 0) |
kaf24@3232 | 246 panic("mtrr: timed out waiting for other CPUs\n"); |
kaf24@3232 | 247 |
kaf24@3232 | 248 local_irq_save(flags); |
kaf24@3232 | 249 |
kaf24@3232 | 250 while(atomic_read(&data.count)) { |
kaf24@3232 | 251 cpu_relax(); |
kaf24@3232 | 252 barrier(); |
kaf24@3232 | 253 } |
kaf24@3232 | 254 /* ok, reset count and toggle gate */ |
kaf24@3232 | 255 atomic_set(&data.count, num_booting_cpus() - 1); |
kaf24@3232 | 256 atomic_set(&data.gate,1); |
kaf24@3232 | 257 |
kaf24@3232 | 258 /* do our MTRR business */ |
kaf24@3232 | 259 |
kaf24@3232 | 260 /* HACK! |
kaf24@3232 | 261 * We use this same function to initialize the mtrrs on boot. |
kaf24@3232 | 262 * The state of the boot cpu's mtrrs has been saved, and we want |
kaf24@3232 | 263 * to replicate across all the APs. |
kaf24@3232 | 264 * If we're doing that @reg is set to something special... |
kaf24@3232 | 265 */ |
kaf24@3232 | 266 if (reg != ~0U) |
kaf24@3232 | 267 mtrr_if->set(reg,base,size,type); |
kaf24@3232 | 268 |
kaf24@3232 | 269 /* wait for the others */ |
kaf24@3232 | 270 while(atomic_read(&data.count)) { |
kaf24@3232 | 271 cpu_relax(); |
kaf24@3232 | 272 barrier(); |
kaf24@3232 | 273 } |
kaf24@3232 | 274 atomic_set(&data.count, num_booting_cpus() - 1); |
kaf24@3232 | 275 atomic_set(&data.gate,0); |
kaf24@3232 | 276 |
kaf24@3232 | 277 /* |
kaf24@3232 | 278 * Wait here for everyone to have seen the gate change |
kaf24@3232 | 279 * So we're the last ones to touch 'data' |
kaf24@3232 | 280 */ |
kaf24@3232 | 281 while(atomic_read(&data.count)) { |
kaf24@3232 | 282 cpu_relax(); |
kaf24@3232 | 283 barrier(); |
kaf24@3232 | 284 } |
kaf24@3232 | 285 local_irq_restore(flags); |
kaf24@3232 | 286 } |
kaf24@3232 | 287 |
kaf24@3232 | 288 /** |
kaf24@3232 | 289 * mtrr_add_page - Add a memory type region |
kaf24@3232 | 290 * @base: Physical base address of region in pages (4 KB) |
kaf24@3232 | 291 * @size: Physical size of region in pages (4 KB) |
kaf24@3232 | 292 * @type: Type of MTRR desired |
kaf24@3232 | 293 * @increment: If this is true do usage counting on the region |
kaf24@3232 | 294 * |
kaf24@3232 | 295 * Memory type region registers control the caching on newer Intel and |
kaf24@3232 | 296 * non Intel processors. This function allows drivers to request an |
kaf24@3232 | 297 * MTRR is added. The details and hardware specifics of each processor's |
kaf24@3232 | 298 * implementation are hidden from the caller, but nevertheless the |
kaf24@3232 | 299 * caller should expect to need to provide a power of two size on an |
kaf24@3232 | 300 * equivalent power of two boundary. |
kaf24@3232 | 301 * |
kaf24@3232 | 302 * If the region cannot be added either because all regions are in use |
kaf24@3232 | 303 * or the CPU cannot support it a negative value is returned. On success |
kaf24@3232 | 304 * the register number for this entry is returned, but should be treated |
kaf24@3232 | 305 * as a cookie only. |
kaf24@3232 | 306 * |
kaf24@3232 | 307 * On a multiprocessor machine the changes are made to all processors. |
kaf24@3232 | 308 * This is required on x86 by the Intel processors. |
kaf24@3232 | 309 * |
kaf24@3232 | 310 * The available types are |
kaf24@3232 | 311 * |
kaf24@3232 | 312 * %MTRR_TYPE_UNCACHABLE - No caching |
kaf24@3232 | 313 * |
kaf24@3232 | 314 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever |
kaf24@3232 | 315 * |
kaf24@3232 | 316 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts |
kaf24@3232 | 317 * |
kaf24@3232 | 318 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes |
kaf24@3232 | 319 * |
kaf24@3232 | 320 * BUGS: Needs a quiet flag for the cases where drivers do not mind |
kaf24@3232 | 321 * failures and do not wish system log messages to be sent. |
kaf24@3232 | 322 */ |
kaf24@3232 | 323 |
kaf24@3232 | 324 int mtrr_add_page(unsigned long base, unsigned long size, |
kaf24@3232 | 325 unsigned int type, char increment) |
kaf24@3232 | 326 { |
kaf24@3232 | 327 int i; |
kaf24@3232 | 328 mtrr_type ltype; |
kaf24@3232 | 329 unsigned long lbase; |
kaf24@3232 | 330 unsigned int lsize; |
kaf24@3232 | 331 int error; |
kaf24@3232 | 332 |
kaf24@3232 | 333 if (!mtrr_if) |
kaf24@3232 | 334 return -ENXIO; |
kaf24@3232 | 335 |
kaf24@3232 | 336 if ((error = mtrr_if->validate_add_page(base,size,type))) |
kaf24@3232 | 337 return error; |
kaf24@3232 | 338 |
kaf24@3232 | 339 if (type >= MTRR_NUM_TYPES) { |
kaf24@3232 | 340 printk(KERN_WARNING "mtrr: type: %u invalid\n", type); |
kaf24@3232 | 341 return -EINVAL; |
kaf24@3232 | 342 } |
kaf24@3232 | 343 |
kaf24@3232 | 344 /* If the type is WC, check that this processor supports it */ |
kaf24@3232 | 345 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { |
kaf24@3232 | 346 printk(KERN_WARNING |
kaf24@3232 | 347 "mtrr: your processor doesn't support write-combining\n"); |
kaf24@3232 | 348 return -ENOSYS; |
kaf24@3232 | 349 } |
kaf24@3232 | 350 |
kaf24@3232 | 351 if (base & size_or_mask || size & size_or_mask) { |
kaf24@3232 | 352 printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n"); |
kaf24@3232 | 353 return -EINVAL; |
kaf24@3232 | 354 } |
kaf24@3232 | 355 |
kaf24@3232 | 356 error = -EINVAL; |
kaf24@3232 | 357 |
kaf24@3232 | 358 /* Search for existing MTRR */ |
kaf24@3232 | 359 down(&main_lock); |
kaf24@3232 | 360 for (i = 0; i < num_var_ranges; ++i) { |
kaf24@3232 | 361 mtrr_if->get(i, &lbase, &lsize, <ype); |
kaf24@3232 | 362 if (base >= lbase + lsize) |
kaf24@3232 | 363 continue; |
kaf24@3232 | 364 if ((base < lbase) && (base + size <= lbase)) |
kaf24@3232 | 365 continue; |
kaf24@3232 | 366 /* At this point we know there is some kind of overlap/enclosure */ |
kaf24@3232 | 367 if ((base < lbase) || (base + size > lbase + lsize)) { |
kaf24@3232 | 368 printk(KERN_WARNING |
kaf24@3232 | 369 "mtrr: 0x%lx000,0x%lx000 overlaps existing" |
kaf24@3232 | 370 " 0x%lx000,0x%x000\n", base, size, lbase, |
kaf24@3232 | 371 lsize); |
kaf24@3232 | 372 goto out; |
kaf24@3232 | 373 } |
kaf24@3232 | 374 /* New region is enclosed by an existing region */ |
kaf24@3232 | 375 if (ltype != type) { |
kaf24@3232 | 376 if (type == MTRR_TYPE_UNCACHABLE) |
kaf24@3232 | 377 continue; |
kaf24@3232 | 378 printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", |
kaf24@3232 | 379 base, size, mtrr_attrib_to_str(ltype), |
kaf24@3232 | 380 mtrr_attrib_to_str(type)); |
kaf24@3232 | 381 goto out; |
kaf24@3232 | 382 } |
kaf24@3232 | 383 if (increment) |
kaf24@3232 | 384 ++usage_table[i]; |
kaf24@3232 | 385 error = i; |
kaf24@3232 | 386 goto out; |
kaf24@3232 | 387 } |
kaf24@3232 | 388 /* Search for an empty MTRR */ |
kaf24@3232 | 389 i = mtrr_if->get_free_region(base, size); |
kaf24@3232 | 390 if (i >= 0) { |
kaf24@3232 | 391 set_mtrr(i, base, size, type); |
kaf24@3232 | 392 usage_table[i] = 1; |
kaf24@3232 | 393 } else |
kaf24@3232 | 394 printk(KERN_INFO "mtrr: no more MTRRs available\n"); |
kaf24@3232 | 395 error = i; |
kaf24@3232 | 396 out: |
kaf24@3232 | 397 up(&main_lock); |
kaf24@3232 | 398 return error; |
kaf24@3232 | 399 } |
kaf24@3232 | 400 |
kaf24@3232 | 401 /** |
kaf24@3232 | 402 * mtrr_add - Add a memory type region |
kaf24@3232 | 403 * @base: Physical base address of region |
kaf24@3232 | 404 * @size: Physical size of region |
kaf24@3232 | 405 * @type: Type of MTRR desired |
kaf24@3232 | 406 * @increment: If this is true do usage counting on the region |
kaf24@3232 | 407 * |
kaf24@3232 | 408 * Memory type region registers control the caching on newer Intel and |
kaf24@3232 | 409 * non Intel processors. This function allows drivers to request an |
kaf24@3232 | 410 * MTRR is added. The details and hardware specifics of each processor's |
kaf24@3232 | 411 * implementation are hidden from the caller, but nevertheless the |
kaf24@3232 | 412 * caller should expect to need to provide a power of two size on an |
kaf24@3232 | 413 * equivalent power of two boundary. |
kaf24@3232 | 414 * |
kaf24@3232 | 415 * If the region cannot be added either because all regions are in use |
kaf24@3232 | 416 * or the CPU cannot support it a negative value is returned. On success |
kaf24@3232 | 417 * the register number for this entry is returned, but should be treated |
kaf24@3232 | 418 * as a cookie only. |
kaf24@3232 | 419 * |
kaf24@3232 | 420 * On a multiprocessor machine the changes are made to all processors. |
kaf24@3232 | 421 * This is required on x86 by the Intel processors. |
kaf24@3232 | 422 * |
kaf24@3232 | 423 * The available types are |
kaf24@3232 | 424 * |
kaf24@3232 | 425 * %MTRR_TYPE_UNCACHABLE - No caching |
kaf24@3232 | 426 * |
kaf24@3232 | 427 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever |
kaf24@3232 | 428 * |
kaf24@3232 | 429 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts |
kaf24@3232 | 430 * |
kaf24@3232 | 431 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes |
kaf24@3232 | 432 * |
kaf24@3232 | 433 * BUGS: Needs a quiet flag for the cases where drivers do not mind |
kaf24@3232 | 434 * failures and do not wish system log messages to be sent. |
kaf24@3232 | 435 */ |
kaf24@3232 | 436 |
kaf24@3232 | 437 int |
kaf24@3232 | 438 mtrr_add(unsigned long base, unsigned long size, unsigned int type, |
kaf24@3232 | 439 char increment) |
kaf24@3232 | 440 { |
kaf24@3232 | 441 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { |
kaf24@3232 | 442 printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n"); |
kaf24@3232 | 443 printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base); |
kaf24@3232 | 444 return -EINVAL; |
kaf24@3232 | 445 } |
kaf24@3232 | 446 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, |
kaf24@3232 | 447 increment); |
kaf24@3232 | 448 } |
kaf24@3232 | 449 |
kaf24@3232 | 450 /** |
kaf24@3232 | 451 * mtrr_del_page - delete a memory type region |
kaf24@3232 | 452 * @reg: Register returned by mtrr_add |
kaf24@3232 | 453 * @base: Physical base address |
kaf24@3232 | 454 * @size: Size of region |
kaf24@3232 | 455 * |
kaf24@3232 | 456 * If register is supplied then base and size are ignored. This is |
kaf24@3232 | 457 * how drivers should call it. |
kaf24@3232 | 458 * |
kaf24@3232 | 459 * Releases an MTRR region. If the usage count drops to zero the |
kaf24@3232 | 460 * register is freed and the region returns to default state. |
kaf24@3232 | 461 * On success the register is returned, on failure a negative error |
kaf24@3232 | 462 * code. |
kaf24@3232 | 463 */ |
kaf24@3232 | 464 |
kaf24@3232 | 465 int mtrr_del_page(int reg, unsigned long base, unsigned long size) |
kaf24@3232 | 466 { |
kaf24@3232 | 467 int i, max; |
kaf24@3232 | 468 mtrr_type ltype; |
kaf24@3232 | 469 unsigned long lbase; |
kaf24@3232 | 470 unsigned int lsize; |
kaf24@3232 | 471 int error = -EINVAL; |
kaf24@3232 | 472 |
kaf24@3232 | 473 if (!mtrr_if) |
kaf24@3232 | 474 return -ENXIO; |
kaf24@3232 | 475 |
kaf24@3232 | 476 max = num_var_ranges; |
kaf24@3232 | 477 down(&main_lock); |
kaf24@3232 | 478 if (reg < 0) { |
kaf24@3232 | 479 /* Search for existing MTRR */ |
kaf24@3232 | 480 for (i = 0; i < max; ++i) { |
kaf24@3232 | 481 mtrr_if->get(i, &lbase, &lsize, <ype); |
kaf24@3232 | 482 if (lbase == base && lsize == size) { |
kaf24@3232 | 483 reg = i; |
kaf24@3232 | 484 break; |
kaf24@3232 | 485 } |
kaf24@3232 | 486 } |
kaf24@3232 | 487 if (reg < 0) { |
kaf24@3232 | 488 printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base, |
kaf24@3232 | 489 size); |
kaf24@3232 | 490 goto out; |
kaf24@3232 | 491 } |
kaf24@3232 | 492 } |
kaf24@3232 | 493 if (reg >= max) { |
kaf24@3232 | 494 printk(KERN_WARNING "mtrr: register: %d too big\n", reg); |
kaf24@3232 | 495 goto out; |
kaf24@3232 | 496 } |
kaf24@3232 | 497 if (is_cpu(CYRIX) && !use_intel()) { |
kaf24@3232 | 498 if ((reg == 3) && arr3_protected) { |
kaf24@3232 | 499 printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n"); |
kaf24@3232 | 500 goto out; |
kaf24@3232 | 501 } |
kaf24@3232 | 502 } |
kaf24@3232 | 503 mtrr_if->get(reg, &lbase, &lsize, <ype); |
kaf24@3232 | 504 if (lsize < 1) { |
kaf24@3232 | 505 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg); |
kaf24@3232 | 506 goto out; |
kaf24@3232 | 507 } |
kaf24@3232 | 508 if (usage_table[reg] < 1) { |
kaf24@3232 | 509 printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg); |
kaf24@3232 | 510 goto out; |
kaf24@3232 | 511 } |
kaf24@3232 | 512 if (--usage_table[reg] < 1) |
kaf24@3232 | 513 set_mtrr(reg, 0, 0, 0); |
kaf24@3232 | 514 error = reg; |
kaf24@3232 | 515 out: |
kaf24@3232 | 516 up(&main_lock); |
kaf24@3232 | 517 return error; |
kaf24@3232 | 518 } |
kaf24@3232 | 519 /** |
kaf24@3232 | 520 * mtrr_del - delete a memory type region |
kaf24@3232 | 521 * @reg: Register returned by mtrr_add |
kaf24@3232 | 522 * @base: Physical base address |
kaf24@3232 | 523 * @size: Size of region |
kaf24@3232 | 524 * |
kaf24@3232 | 525 * If register is supplied then base and size are ignored. This is |
kaf24@3232 | 526 * how drivers should call it. |
kaf24@3232 | 527 * |
kaf24@3232 | 528 * Releases an MTRR region. If the usage count drops to zero the |
kaf24@3232 | 529 * register is freed and the region returns to default state. |
kaf24@3232 | 530 * On success the register is returned, on failure a negative error |
kaf24@3232 | 531 * code. |
kaf24@3232 | 532 */ |
kaf24@3232 | 533 |
kaf24@3232 | 534 int |
kaf24@3232 | 535 mtrr_del(int reg, unsigned long base, unsigned long size) |
kaf24@3232 | 536 { |
kaf24@3232 | 537 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { |
kaf24@3232 | 538 printk(KERN_INFO "mtrr: size and base must be multiples of 4 kiB\n"); |
kaf24@3232 | 539 printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base); |
kaf24@3232 | 540 return -EINVAL; |
kaf24@3232 | 541 } |
kaf24@3232 | 542 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); |
kaf24@3232 | 543 } |
kaf24@3232 | 544 |
kaf24@3232 | 545 EXPORT_SYMBOL(mtrr_add); |
kaf24@3232 | 546 EXPORT_SYMBOL(mtrr_del); |
kaf24@3232 | 547 |
kaf24@3232 | 548 /* HACK ALERT! |
kaf24@3232 | 549 * These should be called implicitly, but we can't yet until all the initcall |
kaf24@3232 | 550 * stuff is done... |
kaf24@3232 | 551 */ |
kaf24@3232 | 552 extern void amd_init_mtrr(void); |
kaf24@3232 | 553 extern void cyrix_init_mtrr(void); |
kaf24@3232 | 554 extern void centaur_init_mtrr(void); |
kaf24@3232 | 555 |
kaf24@3232 | 556 static void __init init_ifs(void) |
kaf24@3232 | 557 { |
kaf24@3232 | 558 amd_init_mtrr(); |
kaf24@3232 | 559 cyrix_init_mtrr(); |
kaf24@3232 | 560 centaur_init_mtrr(); |
kaf24@3232 | 561 } |
kaf24@3232 | 562 |
kaf24@3232 | 563 static void __init init_other_cpus(void) |
kaf24@3232 | 564 { |
kaf24@3232 | 565 if (use_intel()) |
kaf24@3232 | 566 get_mtrr_state(); |
kaf24@3232 | 567 |
kaf24@3232 | 568 /* bring up the other processors */ |
kaf24@3232 | 569 set_mtrr(~0U,0,0,0); |
kaf24@3232 | 570 |
kaf24@3232 | 571 if (use_intel()) { |
kaf24@3232 | 572 finalize_mtrr_state(); |
kaf24@3232 | 573 mtrr_state_warn(); |
kaf24@3232 | 574 } |
kaf24@3232 | 575 } |
kaf24@3232 | 576 |
kaf24@3232 | 577 |
kaf24@3232 | 578 struct mtrr_value { |
kaf24@3232 | 579 mtrr_type ltype; |
kaf24@3232 | 580 unsigned long lbase; |
kaf24@3232 | 581 unsigned int lsize; |
kaf24@3232 | 582 }; |
kaf24@3232 | 583 |
kaf24@3232 | 584 /** |
kaf24@3232 | 585 * mtrr_init - initialize mtrrs on the boot CPU |
kaf24@3232 | 586 * |
kaf24@3232 | 587 * This needs to be called early; before any of the other CPUs are |
kaf24@3232 | 588 * initialized (i.e. before smp_init()). |
kaf24@3232 | 589 * |
kaf24@3232 | 590 */ |
kaf24@3232 | 591 static int __init mtrr_init(void) |
kaf24@3232 | 592 { |
kaf24@3232 | 593 init_ifs(); |
kaf24@3232 | 594 |
kaf24@3232 | 595 if (cpu_has_mtrr) { |
kaf24@3232 | 596 mtrr_if = &generic_mtrr_ops; |
kaf24@3232 | 597 size_or_mask = 0xff000000; /* 36 bits */ |
kaf24@3232 | 598 size_and_mask = 0x00f00000; |
kaf24@3232 | 599 |
kaf24@3232 | 600 switch (boot_cpu_data.x86_vendor) { |
kaf24@3232 | 601 case X86_VENDOR_AMD: |
kaf24@3232 | 602 /* The original Athlon docs said that |
kaf24@3232 | 603 total addressable memory is 44 bits wide. |
kaf24@3232 | 604 It was not really clear whether its MTRRs |
kaf24@3232 | 605 follow this or not. (Read: 44 or 36 bits). |
kaf24@3232 | 606 However, "x86-64_overview.pdf" explicitly |
kaf24@3232 | 607 states that "previous implementations support |
kaf24@3232 | 608 36 bit MTRRs" and also provides a way to |
kaf24@3232 | 609 query the width (in bits) of the physical |
kaf24@3232 | 610 addressable memory on the Hammer family. |
kaf24@3232 | 611 */ |
kaf24@3232 | 612 if (boot_cpu_data.x86 == 15 |
kaf24@3232 | 613 && (cpuid_eax(0x80000000) >= 0x80000008)) { |
kaf24@3232 | 614 u32 phys_addr; |
kaf24@3232 | 615 phys_addr = cpuid_eax(0x80000008) & 0xff; |
kaf24@3232 | 616 size_or_mask = |
kaf24@3232 | 617 ~((1 << (phys_addr - PAGE_SHIFT)) - 1); |
kaf24@3232 | 618 size_and_mask = ~size_or_mask & 0xfff00000; |
kaf24@3232 | 619 } |
kaf24@3232 | 620 /* Athlon MTRRs use an Intel-compatible interface for |
kaf24@3232 | 621 * getting and setting */ |
kaf24@3232 | 622 break; |
kaf24@3232 | 623 case X86_VENDOR_CENTAUR: |
kaf24@3232 | 624 if (boot_cpu_data.x86 == 6) { |
kaf24@3232 | 625 /* VIA Cyrix family have Intel style MTRRs, but don't support PAE */ |
kaf24@3232 | 626 size_or_mask = 0xfff00000; /* 32 bits */ |
kaf24@3232 | 627 size_and_mask = 0; |
kaf24@3232 | 628 } |
kaf24@3232 | 629 break; |
kaf24@3232 | 630 |
kaf24@3232 | 631 default: |
kaf24@3232 | 632 break; |
kaf24@3232 | 633 } |
kaf24@3232 | 634 } else { |
kaf24@3232 | 635 switch (boot_cpu_data.x86_vendor) { |
kaf24@3232 | 636 case X86_VENDOR_AMD: |
kaf24@3232 | 637 if (cpu_has_k6_mtrr) { |
kaf24@3232 | 638 /* Pre-Athlon (K6) AMD CPU MTRRs */ |
kaf24@3232 | 639 mtrr_if = mtrr_ops[X86_VENDOR_AMD]; |
kaf24@3232 | 640 size_or_mask = 0xfff00000; /* 32 bits */ |
kaf24@3232 | 641 size_and_mask = 0; |
kaf24@3232 | 642 } |
kaf24@3232 | 643 break; |
kaf24@3232 | 644 case X86_VENDOR_CENTAUR: |
kaf24@3232 | 645 if (cpu_has_centaur_mcr) { |
kaf24@3232 | 646 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR]; |
kaf24@3232 | 647 size_or_mask = 0xfff00000; /* 32 bits */ |
kaf24@3232 | 648 size_and_mask = 0; |
kaf24@3232 | 649 } |
kaf24@3232 | 650 break; |
kaf24@3232 | 651 case X86_VENDOR_CYRIX: |
kaf24@3232 | 652 if (cpu_has_cyrix_arr) { |
kaf24@3232 | 653 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX]; |
kaf24@3232 | 654 size_or_mask = 0xfff00000; /* 32 bits */ |
kaf24@3232 | 655 size_and_mask = 0; |
kaf24@3232 | 656 } |
kaf24@3232 | 657 break; |
kaf24@3232 | 658 default: |
kaf24@3232 | 659 break; |
kaf24@3232 | 660 } |
kaf24@3232 | 661 } |
kaf24@3232 | 662 printk(KERN_INFO "mtrr: v%s\n",MTRR_VERSION); |
kaf24@3232 | 663 |
kaf24@3232 | 664 if (mtrr_if) { |
kaf24@3232 | 665 set_num_var_ranges(); |
kaf24@3232 | 666 init_table(); |
kaf24@3232 | 667 init_other_cpus(); |
kaf24@3232 | 668 return 0; |
kaf24@3232 | 669 } |
kaf24@3232 | 670 return -ENXIO; |
kaf24@3232 | 671 } |
kaf24@3232 | 672 |
kaf24@3232 | 673 __initcall(mtrr_init); |