debuggers.hg

view xen/arch/x86/cpu/mtrr/cyrix.c @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children
line source
1 #include <xen/init.h>
2 #include <xen/mm.h>
3 #include <asm/mtrr.h>
4 #include <asm/msr.h>
5 #include <asm/io.h>
6 #include "mtrr.h"
8 int arr3_protected;
10 static void
11 cyrix_get_arr(unsigned int reg, unsigned long *base,
12 unsigned long *size, mtrr_type * type)
13 {
14 unsigned long flags;
15 unsigned char arr, ccr3, rcr, shift;
17 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
19 /* Save flags and disable interrupts */
20 local_irq_save(flags);
22 ccr3 = getCx86(CX86_CCR3);
23 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
24 ((unsigned char *) base)[3] = getCx86(arr);
25 ((unsigned char *) base)[2] = getCx86(arr + 1);
26 ((unsigned char *) base)[1] = getCx86(arr + 2);
27 rcr = getCx86(CX86_RCR_BASE + reg);
28 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
30 /* Enable interrupts if it was enabled previously */
31 local_irq_restore(flags);
32 shift = ((unsigned char *) base)[1] & 0x0f;
33 *base >>= PAGE_SHIFT;
35 /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
36 * Note: shift==0xf means 4G, this is unsupported.
37 */
38 if (shift)
39 *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
40 else
41 *size = 0;
43 /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
44 if (reg < 7) {
45 switch (rcr) {
46 case 1:
47 *type = MTRR_TYPE_UNCACHABLE;
48 break;
49 case 8:
50 *type = MTRR_TYPE_WRBACK;
51 break;
52 case 9:
53 *type = MTRR_TYPE_WRCOMB;
54 break;
55 case 24:
56 default:
57 *type = MTRR_TYPE_WRTHROUGH;
58 break;
59 }
60 } else {
61 switch (rcr) {
62 case 0:
63 *type = MTRR_TYPE_UNCACHABLE;
64 break;
65 case 8:
66 *type = MTRR_TYPE_WRCOMB;
67 break;
68 case 9:
69 *type = MTRR_TYPE_WRBACK;
70 break;
71 case 25:
72 default:
73 *type = MTRR_TYPE_WRTHROUGH;
74 break;
75 }
76 }
77 }
79 static int
80 cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
81 /* [SUMMARY] Get a free ARR.
82 <base> The starting (base) address of the region.
83 <size> The size (in bytes) of the region.
84 [RETURNS] The index of the region on success, else -1 on error.
85 */
86 {
87 int i;
88 mtrr_type ltype;
89 unsigned long lbase, lsize;
91 switch (replace_reg) {
92 case 7:
93 if (size < 0x40)
94 break;
95 case 6:
96 case 5:
97 case 4:
98 return replace_reg;
99 case 3:
100 if (arr3_protected)
101 break;
102 case 2:
103 case 1:
104 case 0:
105 return replace_reg;
106 }
107 /* If we are to set up a region >32M then look at ARR7 immediately */
108 if (size > 0x2000) {
109 cyrix_get_arr(7, &lbase, &lsize, &ltype);
110 if (lsize == 0)
111 return 7;
112 /* Else try ARR0-ARR6 first */
113 } else {
114 for (i = 0; i < 7; i++) {
115 cyrix_get_arr(i, &lbase, &lsize, &ltype);
116 if ((i == 3) && arr3_protected)
117 continue;
118 if (lsize == 0)
119 return i;
120 }
121 /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
122 cyrix_get_arr(i, &lbase, &lsize, &ltype);
123 if ((lsize == 0) && (size >= 0x40))
124 return i;
125 }
126 return -ENOSPC;
127 }
129 static u32 cr4 = 0;
130 static u32 ccr3;
132 static void prepare_set(void)
133 {
134 u32 cr0;
136 /* Save value of CR4 and clear Page Global Enable (bit 7) */
137 if ( cpu_has_pge ) {
138 cr4 = read_cr4();
139 write_cr4(cr4 & ~X86_CR4_PGE);
140 }
142 /* Disable and flush caches. Note that wbinvd flushes the TLBs as
143 a side-effect */
144 cr0 = read_cr0() | 0x40000000;
145 wbinvd();
146 write_cr0(cr0);
147 wbinvd();
149 /* Cyrix ARRs - everything else were excluded at the top */
150 ccr3 = getCx86(CX86_CCR3);
152 /* Cyrix ARRs - everything else were excluded at the top */
153 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
155 }
157 static void post_set(void)
158 {
159 /* Flush caches and TLBs */
160 wbinvd();
162 /* Cyrix ARRs - everything else was excluded at the top */
163 setCx86(CX86_CCR3, ccr3);
165 /* Enable caches */
166 write_cr0(read_cr0() & 0xbfffffff);
168 /* Restore value of CR4 */
169 if ( cpu_has_pge )
170 write_cr4(cr4);
171 }
173 static void cyrix_set_arr(unsigned int reg, unsigned long base,
174 unsigned long size, mtrr_type type)
175 {
176 unsigned char arr, arr_type, arr_size;
178 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
180 /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
181 if (reg >= 7)
182 size >>= 6;
184 size &= 0x7fff; /* make sure arr_size <= 14 */
185 for (arr_size = 0; size; arr_size++, size >>= 1) ;
187 if (reg < 7) {
188 switch (type) {
189 case MTRR_TYPE_UNCACHABLE:
190 arr_type = 1;
191 break;
192 case MTRR_TYPE_WRCOMB:
193 arr_type = 9;
194 break;
195 case MTRR_TYPE_WRTHROUGH:
196 arr_type = 24;
197 break;
198 default:
199 arr_type = 8;
200 break;
201 }
202 } else {
203 switch (type) {
204 case MTRR_TYPE_UNCACHABLE:
205 arr_type = 0;
206 break;
207 case MTRR_TYPE_WRCOMB:
208 arr_type = 8;
209 break;
210 case MTRR_TYPE_WRTHROUGH:
211 arr_type = 25;
212 break;
213 default:
214 arr_type = 9;
215 break;
216 }
217 }
219 prepare_set();
221 base <<= PAGE_SHIFT;
222 setCx86(arr, ((unsigned char *) &base)[3]);
223 setCx86(arr + 1, ((unsigned char *) &base)[2]);
224 setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
225 setCx86(CX86_RCR_BASE + reg, arr_type);
227 post_set();
228 }
230 typedef struct {
231 unsigned long base;
232 unsigned long size;
233 mtrr_type type;
234 } arr_state_t;
236 static arr_state_t arr_state[8] = {
237 {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
238 {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
239 };
241 static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 };
243 static void cyrix_set_all(void)
244 {
245 int i;
247 prepare_set();
249 /* the CCRs are not contiguous */
250 for (i = 0; i < 4; i++)
251 setCx86(CX86_CCR0 + i, ccr_state[i]);
252 for (; i < 7; i++)
253 setCx86(CX86_CCR4 + i, ccr_state[i]);
254 for (i = 0; i < 8; i++)
255 cyrix_set_arr(i, arr_state[i].base,
256 arr_state[i].size, arr_state[i].type);
258 post_set();
259 }
261 #if 0
262 /*
263 * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
264 * with the SMM (System Management Mode) mode. So we need the following:
265 * Check whether SMI_LOCK (CCR3 bit 0) is set
266 * if it is set, write a warning message: ARR3 cannot be changed!
267 * (it cannot be changed until the next processor reset)
268 * if it is reset, then we can change it, set all the needed bits:
269 * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
270 * - disable access to SMM memory (CCR1 bit 2 reset)
271 * - disable SMM mode (CCR1 bit 1 reset)
272 * - disable write protection of ARR3 (CCR6 bit 1 reset)
273 * - (maybe) disable ARR3
274 * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
275 */
276 static void __init
277 cyrix_arr_init(void)
278 {
279 struct set_mtrr_context ctxt;
280 unsigned char ccr[7];
281 int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
282 #ifdef CONFIG_SMP
283 int i;
284 #endif
286 /* flush cache and enable MAPEN */
287 set_mtrr_prepare_save(&ctxt);
288 set_mtrr_cache_disable(&ctxt);
290 /* Save all CCRs locally */
291 ccr[0] = getCx86(CX86_CCR0);
292 ccr[1] = getCx86(CX86_CCR1);
293 ccr[2] = getCx86(CX86_CCR2);
294 ccr[3] = ctxt.ccr3;
295 ccr[4] = getCx86(CX86_CCR4);
296 ccr[5] = getCx86(CX86_CCR5);
297 ccr[6] = getCx86(CX86_CCR6);
299 if (ccr[3] & 1) {
300 ccrc[3] = 1;
301 arr3_protected = 1;
302 } else {
303 /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
304 * access to SMM memory through ARR3 (bit 7).
305 */
306 if (ccr[1] & 0x80) {
307 ccr[1] &= 0x7f;
308 ccrc[1] |= 0x80;
309 }
310 if (ccr[1] & 0x04) {
311 ccr[1] &= 0xfb;
312 ccrc[1] |= 0x04;
313 }
314 if (ccr[1] & 0x02) {
315 ccr[1] &= 0xfd;
316 ccrc[1] |= 0x02;
317 }
318 arr3_protected = 0;
319 if (ccr[6] & 0x02) {
320 ccr[6] &= 0xfd;
321 ccrc[6] = 1; /* Disable write protection of ARR3 */
322 setCx86(CX86_CCR6, ccr[6]);
323 }
324 /* Disable ARR3. This is safe now that we disabled SMM. */
325 /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
326 }
327 /* If we changed CCR1 in memory, change it in the processor, too. */
328 if (ccrc[1])
329 setCx86(CX86_CCR1, ccr[1]);
331 /* Enable ARR usage by the processor */
332 if (!(ccr[5] & 0x20)) {
333 ccr[5] |= 0x20;
334 ccrc[5] = 1;
335 setCx86(CX86_CCR5, ccr[5]);
336 }
337 #ifdef CONFIG_SMP
338 for (i = 0; i < 7; i++)
339 ccr_state[i] = ccr[i];
340 for (i = 0; i < 8; i++)
341 cyrix_get_arr(i,
342 &arr_state[i].base, &arr_state[i].size,
343 &arr_state[i].type);
344 #endif
346 set_mtrr_done(&ctxt); /* flush cache and disable MAPEN */
348 if (ccrc[5])
349 printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n");
350 if (ccrc[3])
351 printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
352 /*
353 if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
354 if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
355 if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
356 */
357 if (ccrc[6])
358 printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n");
359 }
360 #endif
362 static struct mtrr_ops cyrix_mtrr_ops = {
363 .vendor = X86_VENDOR_CYRIX,
364 // .init = cyrix_arr_init,
365 .set_all = cyrix_set_all,
366 .set = cyrix_set_arr,
367 .get = cyrix_get_arr,
368 .get_free_region = cyrix_get_free_region,
369 .validate_add_page = generic_validate_add_page,
370 .have_wrcomb = positive_have_wrcomb,
371 };
373 int __init cyrix_init_mtrr(void)
374 {
375 set_mtrr_ops(&cyrix_mtrr_ops);
376 return 0;
377 }
379 //arch_initcall(cyrix_init_mtrr);