xen-vtx-unstable
annotate linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/system.h @ 4112:db5a30a327e6
bitkeeper revision 1.1236.25.8 (4233549cSYOSn-8TjPjFtQTvJB0ECA)
Update to Linux 2.6.11.
Signed-off-by: Christian Limpach <chris@xensource.com>
Update to Linux 2.6.11.
Signed-off-by: Christian Limpach <chris@xensource.com>
author | cl349@firebug.cl.cam.ac.uk |
---|---|
date | Sat Mar 12 20:44:12 2005 +0000 (2005-03-12) |
parents | cff0d3baf599 |
children | f234096eb41e |
rev | line source |
---|---|
cl349@4087 | 1 #ifndef __ASM_SYSTEM_H |
cl349@4087 | 2 #define __ASM_SYSTEM_H |
cl349@4087 | 3 |
cl349@4087 | 4 #include <linux/config.h> |
cl349@4087 | 5 #include <linux/kernel.h> |
cl349@4087 | 6 #include <linux/bitops.h> |
cl349@4087 | 7 #include <asm/synch_bitops.h> |
cl349@4087 | 8 #include <asm/segment.h> |
cl349@4087 | 9 #include <asm/cpufeature.h> |
cl349@4087 | 10 #include <asm-xen/hypervisor.h> |
cl349@4087 | 11 |
cl349@4087 | 12 #ifdef __KERNEL__ |
cl349@4087 | 13 |
cl349@4087 | 14 struct task_struct; /* one of the stranger aspects of C forward declarations.. */ |
cl349@4087 | 15 extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); |
cl349@4087 | 16 |
cl349@4087 | 17 #define switch_to(prev,next,last) do { \ |
cl349@4087 | 18 unsigned long esi,edi; \ |
cl349@4087 | 19 asm volatile("pushfl\n\t" \ |
cl349@4087 | 20 "pushl %%ebp\n\t" \ |
cl349@4087 | 21 "movl %%esp,%0\n\t" /* save ESP */ \ |
cl349@4087 | 22 "movl %5,%%esp\n\t" /* restore ESP */ \ |
cl349@4087 | 23 "movl $1f,%1\n\t" /* save EIP */ \ |
cl349@4087 | 24 "pushl %6\n\t" /* restore EIP */ \ |
cl349@4087 | 25 "jmp __switch_to\n" \ |
cl349@4087 | 26 "1:\t" \ |
cl349@4087 | 27 "popl %%ebp\n\t" \ |
cl349@4087 | 28 "popfl" \ |
cl349@4087 | 29 :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ |
cl349@4087 | 30 "=a" (last),"=S" (esi),"=D" (edi) \ |
cl349@4087 | 31 :"m" (next->thread.esp),"m" (next->thread.eip), \ |
cl349@4087 | 32 "2" (prev), "d" (next)); \ |
cl349@4087 | 33 } while (0) |
cl349@4087 | 34 |
cl349@4087 | 35 #define _set_base(addr,base) do { unsigned long __pr; \ |
cl349@4087 | 36 __asm__ __volatile__ ("movw %%dx,%1\n\t" \ |
cl349@4087 | 37 "rorl $16,%%edx\n\t" \ |
cl349@4087 | 38 "movb %%dl,%2\n\t" \ |
cl349@4087 | 39 "movb %%dh,%3" \ |
cl349@4087 | 40 :"=&d" (__pr) \ |
cl349@4087 | 41 :"m" (*((addr)+2)), \ |
cl349@4087 | 42 "m" (*((addr)+4)), \ |
cl349@4087 | 43 "m" (*((addr)+7)), \ |
cl349@4087 | 44 "0" (base) \ |
cl349@4087 | 45 ); } while(0) |
cl349@4087 | 46 |
cl349@4087 | 47 #define _set_limit(addr,limit) do { unsigned long __lr; \ |
cl349@4087 | 48 __asm__ __volatile__ ("movw %%dx,%1\n\t" \ |
cl349@4087 | 49 "rorl $16,%%edx\n\t" \ |
cl349@4087 | 50 "movb %2,%%dh\n\t" \ |
cl349@4087 | 51 "andb $0xf0,%%dh\n\t" \ |
cl349@4087 | 52 "orb %%dh,%%dl\n\t" \ |
cl349@4087 | 53 "movb %%dl,%2" \ |
cl349@4087 | 54 :"=&d" (__lr) \ |
cl349@4087 | 55 :"m" (*(addr)), \ |
cl349@4087 | 56 "m" (*((addr)+6)), \ |
cl349@4087 | 57 "0" (limit) \ |
cl349@4087 | 58 ); } while(0) |
cl349@4087 | 59 |
cl349@4087 | 60 #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) |
cl349@4087 | 61 #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 ) |
cl349@4087 | 62 |
cl349@4087 | 63 static inline unsigned long _get_base(char * addr) |
cl349@4087 | 64 { |
cl349@4087 | 65 unsigned long __base; |
cl349@4087 | 66 __asm__("movb %3,%%dh\n\t" |
cl349@4087 | 67 "movb %2,%%dl\n\t" |
cl349@4087 | 68 "shll $16,%%edx\n\t" |
cl349@4087 | 69 "movw %1,%%dx" |
cl349@4087 | 70 :"=&d" (__base) |
cl349@4087 | 71 :"m" (*((addr)+2)), |
cl349@4087 | 72 "m" (*((addr)+4)), |
cl349@4087 | 73 "m" (*((addr)+7))); |
cl349@4087 | 74 return __base; |
cl349@4087 | 75 } |
cl349@4087 | 76 |
cl349@4087 | 77 #define get_base(ldt) _get_base( ((char *)&(ldt)) ) |
cl349@4087 | 78 |
cl349@4087 | 79 /* |
cl349@4087 | 80 * Load a segment. Fall back on loading the zero |
cl349@4087 | 81 * segment if something goes wrong.. |
cl349@4087 | 82 */ |
cl349@4087 | 83 #define loadsegment(seg,value) \ |
cl349@4087 | 84 asm volatile("\n" \ |
cl349@4087 | 85 "1:\t" \ |
cl349@4087 | 86 "movl %0,%%" #seg "\n" \ |
cl349@4087 | 87 "2:\n" \ |
cl349@4087 | 88 ".section .fixup,\"ax\"\n" \ |
cl349@4087 | 89 "3:\t" \ |
cl349@4087 | 90 "pushl $0\n\t" \ |
cl349@4087 | 91 "popl %%" #seg "\n\t" \ |
cl349@4087 | 92 "jmp 2b\n" \ |
cl349@4087 | 93 ".previous\n" \ |
cl349@4087 | 94 ".section __ex_table,\"a\"\n\t" \ |
cl349@4087 | 95 ".align 4\n\t" \ |
cl349@4087 | 96 ".long 1b,3b\n" \ |
cl349@4087 | 97 ".previous" \ |
cl349@4087 | 98 : :"m" (*(unsigned int *)&(value))) |
cl349@4087 | 99 |
cl349@4087 | 100 /* |
cl349@4087 | 101 * Save a segment register away |
cl349@4087 | 102 */ |
cl349@4087 | 103 #define savesegment(seg, value) \ |
cl349@4087 | 104 asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value))) |
cl349@4087 | 105 |
cl349@4087 | 106 /* |
cl349@4087 | 107 * Clear and set 'TS' bit respectively |
cl349@4087 | 108 */ |
cl349@4112 | 109 #define clts() (HYPERVISOR_fpu_taskswitch(0)) |
cl349@4087 | 110 #define read_cr0() \ |
cl349@4087 | 111 BUG(); |
cl349@4087 | 112 #define write_cr0(x) \ |
cl349@4087 | 113 BUG(); |
cl349@4087 | 114 #define read_cr4() \ |
cl349@4087 | 115 BUG(); |
cl349@4087 | 116 #define write_cr4(x) \ |
cl349@4087 | 117 BUG(); |
cl349@4112 | 118 #define stts() (HYPERVISOR_fpu_taskswitch(1)) |
cl349@4087 | 119 |
cl349@4087 | 120 #endif /* __KERNEL__ */ |
cl349@4087 | 121 |
cl349@4112 | 122 #define wbinvd() \ |
cl349@4112 | 123 __asm__ __volatile__ ("wbinvd": : :"memory"); |
cl349@4087 | 124 |
cl349@4087 | 125 static inline unsigned long get_limit(unsigned long segment) |
cl349@4087 | 126 { |
cl349@4087 | 127 unsigned long __limit; |
cl349@4087 | 128 __asm__("lsll %1,%0" |
cl349@4087 | 129 :"=r" (__limit):"r" (segment)); |
cl349@4087 | 130 return __limit+1; |
cl349@4087 | 131 } |
cl349@4087 | 132 |
cl349@4087 | 133 #define nop() __asm__ __volatile__ ("nop") |
cl349@4087 | 134 |
cl349@4087 | 135 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) |
cl349@4087 | 136 |
cl349@4087 | 137 #define tas(ptr) (xchg((ptr),1)) |
cl349@4087 | 138 |
cl349@4087 | 139 struct __xchg_dummy { unsigned long a[100]; }; |
cl349@4087 | 140 #define __xg(x) ((struct __xchg_dummy *)(x)) |
cl349@4087 | 141 |
cl349@4087 | 142 |
cl349@4087 | 143 /* |
cl349@4087 | 144 * The semantics of XCHGCMP8B are a bit strange, this is why |
cl349@4087 | 145 * there is a loop and the loading of %%eax and %%edx has to |
cl349@4087 | 146 * be inside. This inlines well in most cases, the cached |
cl349@4087 | 147 * cost is around ~38 cycles. (in the future we might want |
cl349@4087 | 148 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that |
cl349@4087 | 149 * might have an implicit FPU-save as a cost, so it's not |
cl349@4087 | 150 * clear which path to go.) |
cl349@4087 | 151 * |
cl349@4087 | 152 * cmpxchg8b must be used with the lock prefix here to allow |
cl349@4087 | 153 * the instruction to be executed atomically, see page 3-102 |
cl349@4087 | 154 * of the instruction set reference 24319102.pdf. We need |
cl349@4087 | 155 * the reader side to see the coherent 64bit value. |
cl349@4087 | 156 */ |
cl349@4087 | 157 static inline void __set_64bit (unsigned long long * ptr, |
cl349@4087 | 158 unsigned int low, unsigned int high) |
cl349@4087 | 159 { |
cl349@4087 | 160 __asm__ __volatile__ ( |
cl349@4087 | 161 "\n1:\t" |
cl349@4087 | 162 "movl (%0), %%eax\n\t" |
cl349@4087 | 163 "movl 4(%0), %%edx\n\t" |
cl349@4087 | 164 "lock cmpxchg8b (%0)\n\t" |
cl349@4087 | 165 "jnz 1b" |
cl349@4087 | 166 : /* no outputs */ |
cl349@4087 | 167 : "D"(ptr), |
cl349@4087 | 168 "b"(low), |
cl349@4087 | 169 "c"(high) |
cl349@4087 | 170 : "ax","dx","memory"); |
cl349@4087 | 171 } |
cl349@4087 | 172 |
cl349@4087 | 173 static inline void __set_64bit_constant (unsigned long long *ptr, |
cl349@4087 | 174 unsigned long long value) |
cl349@4087 | 175 { |
cl349@4087 | 176 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); |
cl349@4087 | 177 } |
cl349@4087 | 178 #define ll_low(x) *(((unsigned int*)&(x))+0) |
cl349@4087 | 179 #define ll_high(x) *(((unsigned int*)&(x))+1) |
cl349@4087 | 180 |
cl349@4087 | 181 static inline void __set_64bit_var (unsigned long long *ptr, |
cl349@4087 | 182 unsigned long long value) |
cl349@4087 | 183 { |
cl349@4087 | 184 __set_64bit(ptr,ll_low(value), ll_high(value)); |
cl349@4087 | 185 } |
cl349@4087 | 186 |
cl349@4087 | 187 #define set_64bit(ptr,value) \ |
cl349@4087 | 188 (__builtin_constant_p(value) ? \ |
cl349@4087 | 189 __set_64bit_constant(ptr, value) : \ |
cl349@4087 | 190 __set_64bit_var(ptr, value) ) |
cl349@4087 | 191 |
cl349@4087 | 192 #define _set_64bit(ptr,value) \ |
cl349@4087 | 193 (__builtin_constant_p(value) ? \ |
cl349@4087 | 194 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ |
cl349@4087 | 195 __set_64bit(ptr, ll_low(value), ll_high(value)) ) |
cl349@4087 | 196 |
cl349@4087 | 197 /* |
cl349@4087 | 198 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway |
cl349@4087 | 199 * Note 2: xchg has side effect, so that attribute volatile is necessary, |
cl349@4087 | 200 * but generally the primitive is invalid, *ptr is output argument. --ANK |
cl349@4087 | 201 */ |
cl349@4087 | 202 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) |
cl349@4087 | 203 { |
cl349@4087 | 204 switch (size) { |
cl349@4087 | 205 case 1: |
cl349@4087 | 206 __asm__ __volatile__("xchgb %b0,%1" |
cl349@4087 | 207 :"=q" (x) |
cl349@4087 | 208 :"m" (*__xg(ptr)), "0" (x) |
cl349@4087 | 209 :"memory"); |
cl349@4087 | 210 break; |
cl349@4087 | 211 case 2: |
cl349@4087 | 212 __asm__ __volatile__("xchgw %w0,%1" |
cl349@4087 | 213 :"=r" (x) |
cl349@4087 | 214 :"m" (*__xg(ptr)), "0" (x) |
cl349@4087 | 215 :"memory"); |
cl349@4087 | 216 break; |
cl349@4087 | 217 case 4: |
cl349@4087 | 218 __asm__ __volatile__("xchgl %0,%1" |
cl349@4087 | 219 :"=r" (x) |
cl349@4087 | 220 :"m" (*__xg(ptr)), "0" (x) |
cl349@4087 | 221 :"memory"); |
cl349@4087 | 222 break; |
cl349@4087 | 223 } |
cl349@4087 | 224 return x; |
cl349@4087 | 225 } |
cl349@4087 | 226 |
cl349@4087 | 227 /* |
cl349@4087 | 228 * Atomic compare and exchange. Compare OLD with MEM, if identical, |
cl349@4087 | 229 * store NEW in MEM. Return the initial value in MEM. Success is |
cl349@4087 | 230 * indicated by comparing RETURN with OLD. |
cl349@4087 | 231 */ |
cl349@4087 | 232 |
cl349@4087 | 233 #ifdef CONFIG_X86_CMPXCHG |
cl349@4087 | 234 #define __HAVE_ARCH_CMPXCHG 1 |
cl349@4087 | 235 #endif |
cl349@4087 | 236 |
cl349@4087 | 237 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, |
cl349@4087 | 238 unsigned long new, int size) |
cl349@4087 | 239 { |
cl349@4087 | 240 unsigned long prev; |
cl349@4087 | 241 switch (size) { |
cl349@4087 | 242 case 1: |
cl349@4087 | 243 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" |
cl349@4087 | 244 : "=a"(prev) |
cl349@4087 | 245 : "q"(new), "m"(*__xg(ptr)), "0"(old) |
cl349@4087 | 246 : "memory"); |
cl349@4087 | 247 return prev; |
cl349@4087 | 248 case 2: |
cl349@4087 | 249 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" |
cl349@4087 | 250 : "=a"(prev) |
cl349@4087 | 251 : "q"(new), "m"(*__xg(ptr)), "0"(old) |
cl349@4087 | 252 : "memory"); |
cl349@4087 | 253 return prev; |
cl349@4087 | 254 case 4: |
cl349@4087 | 255 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" |
cl349@4087 | 256 : "=a"(prev) |
cl349@4087 | 257 : "q"(new), "m"(*__xg(ptr)), "0"(old) |
cl349@4087 | 258 : "memory"); |
cl349@4087 | 259 return prev; |
cl349@4087 | 260 } |
cl349@4087 | 261 return old; |
cl349@4087 | 262 } |
cl349@4087 | 263 |
cl349@4087 | 264 #define cmpxchg(ptr,o,n)\ |
cl349@4087 | 265 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ |
cl349@4087 | 266 (unsigned long)(n),sizeof(*(ptr)))) |
cl349@4087 | 267 |
cl349@4087 | 268 #ifdef __KERNEL__ |
cl349@4087 | 269 struct alt_instr { |
cl349@4087 | 270 __u8 *instr; /* original instruction */ |
cl349@4087 | 271 __u8 *replacement; |
cl349@4087 | 272 __u8 cpuid; /* cpuid bit set for replacement */ |
cl349@4087 | 273 __u8 instrlen; /* length of original instruction */ |
cl349@4087 | 274 __u8 replacementlen; /* length of new instruction, <= instrlen */ |
cl349@4087 | 275 __u8 pad; |
cl349@4087 | 276 }; |
cl349@4087 | 277 #endif |
cl349@4087 | 278 |
cl349@4087 | 279 /* |
cl349@4087 | 280 * Alternative instructions for different CPU types or capabilities. |
cl349@4087 | 281 * |
cl349@4087 | 282 * This allows to use optimized instructions even on generic binary |
cl349@4087 | 283 * kernels. |
cl349@4087 | 284 * |
cl349@4087 | 285 * length of oldinstr must be longer or equal the length of newinstr |
cl349@4087 | 286 * It can be padded with nops as needed. |
cl349@4087 | 287 * |
cl349@4087 | 288 * For non barrier like inlines please define new variants |
cl349@4087 | 289 * without volatile and memory clobber. |
cl349@4087 | 290 */ |
cl349@4087 | 291 #define alternative(oldinstr, newinstr, feature) \ |
cl349@4087 | 292 asm volatile ("661:\n\t" oldinstr "\n662:\n" \ |
cl349@4087 | 293 ".section .altinstructions,\"a\"\n" \ |
cl349@4087 | 294 " .align 4\n" \ |
cl349@4087 | 295 " .long 661b\n" /* label */ \ |
cl349@4087 | 296 " .long 663f\n" /* new instruction */ \ |
cl349@4087 | 297 " .byte %c0\n" /* feature bit */ \ |
cl349@4087 | 298 " .byte 662b-661b\n" /* sourcelen */ \ |
cl349@4087 | 299 " .byte 664f-663f\n" /* replacementlen */ \ |
cl349@4087 | 300 ".previous\n" \ |
cl349@4087 | 301 ".section .altinstr_replacement,\"ax\"\n" \ |
cl349@4087 | 302 "663:\n\t" newinstr "\n664:\n" /* replacement */ \ |
cl349@4087 | 303 ".previous" :: "i" (feature) : "memory") |
cl349@4087 | 304 |
cl349@4087 | 305 /* |
cl349@4087 | 306 * Alternative inline assembly with input. |
cl349@4087 | 307 * |
cl349@4087 | 308 * Pecularities: |
cl349@4087 | 309 * No memory clobber here. |
cl349@4087 | 310 * Argument numbers start with 1. |
cl349@4087 | 311 * Best is to use constraints that are fixed size (like (%1) ... "r") |
cl349@4087 | 312 * If you use variable sized constraints like "m" or "g" in the |
cl349@4087 | 313 * replacement maake sure to pad to the worst case length. |
cl349@4087 | 314 */ |
cl349@4087 | 315 #define alternative_input(oldinstr, newinstr, feature, input...) \ |
cl349@4087 | 316 asm volatile ("661:\n\t" oldinstr "\n662:\n" \ |
cl349@4087 | 317 ".section .altinstructions,\"a\"\n" \ |
cl349@4087 | 318 " .align 4\n" \ |
cl349@4087 | 319 " .long 661b\n" /* label */ \ |
cl349@4087 | 320 " .long 663f\n" /* new instruction */ \ |
cl349@4087 | 321 " .byte %c0\n" /* feature bit */ \ |
cl349@4087 | 322 " .byte 662b-661b\n" /* sourcelen */ \ |
cl349@4087 | 323 " .byte 664f-663f\n" /* replacementlen */ \ |
cl349@4087 | 324 ".previous\n" \ |
cl349@4087 | 325 ".section .altinstr_replacement,\"ax\"\n" \ |
cl349@4087 | 326 "663:\n\t" newinstr "\n664:\n" /* replacement */ \ |
cl349@4087 | 327 ".previous" :: "i" (feature), ##input) |
cl349@4087 | 328 |
cl349@4087 | 329 /* |
cl349@4087 | 330 * Force strict CPU ordering. |
cl349@4087 | 331 * And yes, this is required on UP too when we're talking |
cl349@4087 | 332 * to devices. |
cl349@4087 | 333 * |
cl349@4087 | 334 * For now, "wmb()" doesn't actually do anything, as all |
cl349@4087 | 335 * Intel CPU's follow what Intel calls a *Processor Order*, |
cl349@4087 | 336 * in which all writes are seen in the program order even |
cl349@4087 | 337 * outside the CPU. |
cl349@4087 | 338 * |
cl349@4087 | 339 * I expect future Intel CPU's to have a weaker ordering, |
cl349@4087 | 340 * but I'd also expect them to finally get their act together |
cl349@4087 | 341 * and add some real memory barriers if so. |
cl349@4087 | 342 * |
cl349@4087 | 343 * Some non intel clones support out of order store. wmb() ceases to be a |
cl349@4087 | 344 * nop for these. |
cl349@4087 | 345 */ |
cl349@4087 | 346 |
cl349@4087 | 347 |
cl349@4087 | 348 /* |
cl349@4087 | 349 * Actually only lfence would be needed for mb() because all stores done |
cl349@4087 | 350 * by the kernel should be already ordered. But keep a full barrier for now. |
cl349@4087 | 351 */ |
cl349@4087 | 352 |
cl349@4087 | 353 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) |
cl349@4087 | 354 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) |
cl349@4087 | 355 |
cl349@4087 | 356 /** |
cl349@4087 | 357 * read_barrier_depends - Flush all pending reads that subsequents reads |
cl349@4087 | 358 * depend on. |
cl349@4087 | 359 * |
cl349@4087 | 360 * No data-dependent reads from memory-like regions are ever reordered |
cl349@4087 | 361 * over this barrier. All reads preceding this primitive are guaranteed |
cl349@4087 | 362 * to access memory (but not necessarily other CPUs' caches) before any |
cl349@4087 | 363 * reads following this primitive that depend on the data return by |
cl349@4087 | 364 * any of the preceding reads. This primitive is much lighter weight than |
cl349@4087 | 365 * rmb() on most CPUs, and is never heavier weight than is |
cl349@4087 | 366 * rmb(). |
cl349@4087 | 367 * |
cl349@4087 | 368 * These ordering constraints are respected by both the local CPU |
cl349@4087 | 369 * and the compiler. |
cl349@4087 | 370 * |
cl349@4087 | 371 * Ordering is not guaranteed by anything other than these primitives, |
cl349@4087 | 372 * not even by data dependencies. See the documentation for |
cl349@4087 | 373 * memory_barrier() for examples and URLs to more information. |
cl349@4087 | 374 * |
cl349@4087 | 375 * For example, the following code would force ordering (the initial |
cl349@4087 | 376 * value of "a" is zero, "b" is one, and "p" is "&a"): |
cl349@4087 | 377 * |
cl349@4087 | 378 * <programlisting> |
cl349@4087 | 379 * CPU 0 CPU 1 |
cl349@4087 | 380 * |
cl349@4087 | 381 * b = 2; |
cl349@4087 | 382 * memory_barrier(); |
cl349@4087 | 383 * p = &b; q = p; |
cl349@4087 | 384 * read_barrier_depends(); |
cl349@4087 | 385 * d = *q; |
cl349@4087 | 386 * </programlisting> |
cl349@4087 | 387 * |
cl349@4087 | 388 * because the read of "*q" depends on the read of "p" and these |
cl349@4087 | 389 * two reads are separated by a read_barrier_depends(). However, |
cl349@4087 | 390 * the following code, with the same initial values for "a" and "b": |
cl349@4087 | 391 * |
cl349@4087 | 392 * <programlisting> |
cl349@4087 | 393 * CPU 0 CPU 1 |
cl349@4087 | 394 * |
cl349@4087 | 395 * a = 2; |
cl349@4087 | 396 * memory_barrier(); |
cl349@4087 | 397 * b = 3; y = b; |
cl349@4087 | 398 * read_barrier_depends(); |
cl349@4087 | 399 * x = a; |
cl349@4087 | 400 * </programlisting> |
cl349@4087 | 401 * |
cl349@4087 | 402 * does not enforce ordering, since there is no data dependency between |
cl349@4087 | 403 * the read of "a" and the read of "b". Therefore, on some CPUs, such |
cl349@4087 | 404 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() |
cl349@4087 | 405 * in cases like thiswhere there are no data dependencies. |
cl349@4087 | 406 **/ |
cl349@4087 | 407 |
cl349@4087 | 408 #define read_barrier_depends() do { } while(0) |
cl349@4087 | 409 |
cl349@4087 | 410 #ifdef CONFIG_X86_OOSTORE |
cl349@4087 | 411 /* Actually there are no OOO store capable CPUs for now that do SSE, |
cl349@4087 | 412 but make it already an possibility. */ |
cl349@4087 | 413 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) |
cl349@4087 | 414 #else |
cl349@4087 | 415 #define wmb() __asm__ __volatile__ ("": : :"memory") |
cl349@4087 | 416 #endif |
cl349@4087 | 417 |
cl349@4087 | 418 #ifdef CONFIG_SMP |
cl349@4087 | 419 #define smp_mb() mb() |
cl349@4087 | 420 #define smp_rmb() rmb() |
cl349@4087 | 421 #define smp_wmb() wmb() |
cl349@4087 | 422 #define smp_read_barrier_depends() read_barrier_depends() |
cl349@4087 | 423 #define set_mb(var, value) do { xchg(&var, value); } while (0) |
cl349@4087 | 424 #else |
cl349@4087 | 425 #define smp_mb() barrier() |
cl349@4087 | 426 #define smp_rmb() barrier() |
cl349@4087 | 427 #define smp_wmb() barrier() |
cl349@4087 | 428 #define smp_read_barrier_depends() do { } while(0) |
cl349@4087 | 429 #define set_mb(var, value) do { var = value; barrier(); } while (0) |
cl349@4087 | 430 #endif |
cl349@4087 | 431 |
cl349@4087 | 432 #define set_wmb(var, value) do { var = value; wmb(); } while (0) |
cl349@4087 | 433 |
cl349@4087 | 434 /* interrupt control.. */ |
cl349@4087 | 435 |
cl349@4087 | 436 /* |
cl349@4087 | 437 * The use of 'barrier' in the following reflects their use as local-lock |
cl349@4087 | 438 * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following |
cl349@4112 | 439 * critical operations are executed. All critical operations must complete |
cl349@4087 | 440 * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also |
cl349@4087 | 441 * includes these barriers, for example. |
cl349@4087 | 442 */ |
cl349@4087 | 443 |
cl349@4087 | 444 #define __cli() \ |
cl349@4087 | 445 do { \ |
cl349@4112 | 446 vcpu_info_t *_vcpu; \ |
cl349@4112 | 447 preempt_disable(); \ |
cl349@4112 | 448 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \ |
cl349@4112 | 449 _vcpu->evtchn_upcall_mask = 1; \ |
cl349@4112 | 450 preempt_enable_no_resched(); \ |
cl349@4087 | 451 barrier(); \ |
cl349@4087 | 452 } while (0) |
cl349@4087 | 453 |
cl349@4087 | 454 #define __sti() \ |
cl349@4087 | 455 do { \ |
cl349@4112 | 456 vcpu_info_t *_vcpu; \ |
cl349@4087 | 457 barrier(); \ |
cl349@4112 | 458 preempt_disable(); \ |
cl349@4112 | 459 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \ |
cl349@4112 | 460 _vcpu->evtchn_upcall_mask = 0; \ |
cl349@4087 | 461 barrier(); /* unmask then check (avoid races) */ \ |
cl349@4112 | 462 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ |
cl349@4112 | 463 force_evtchn_callback(); \ |
cl349@4112 | 464 preempt_enable(); \ |
cl349@4087 | 465 } while (0) |
cl349@4087 | 466 |
cl349@4087 | 467 #define __save_flags(x) \ |
cl349@4087 | 468 do { \ |
cl349@4112 | 469 vcpu_info_t *_vcpu; \ |
cl349@4112 | 470 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \ |
cl349@4112 | 471 (x) = _vcpu->evtchn_upcall_mask; \ |
cl349@4087 | 472 } while (0) |
cl349@4087 | 473 |
cl349@4087 | 474 #define __restore_flags(x) \ |
cl349@4087 | 475 do { \ |
cl349@4112 | 476 vcpu_info_t *_vcpu; \ |
cl349@4087 | 477 barrier(); \ |
cl349@4112 | 478 preempt_disable(); \ |
cl349@4112 | 479 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \ |
cl349@4112 | 480 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ |
cl349@4112 | 481 barrier(); /* unmask then check (avoid races) */ \ |
cl349@4112 | 482 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ |
cl349@4112 | 483 force_evtchn_callback(); \ |
cl349@4112 | 484 preempt_enable(); \ |
cl349@4112 | 485 } else \ |
cl349@4112 | 486 preempt_enable_no_resched(); \ |
cl349@4087 | 487 } while (0) |
cl349@4087 | 488 |
cl349@4112 | 489 #define safe_halt() ((void)0) |
cl349@4087 | 490 |
cl349@4087 | 491 #define __save_and_cli(x) \ |
cl349@4087 | 492 do { \ |
cl349@4112 | 493 vcpu_info_t *_vcpu; \ |
cl349@4112 | 494 preempt_disable(); \ |
cl349@4112 | 495 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \ |
cl349@4112 | 496 (x) = _vcpu->evtchn_upcall_mask; \ |
cl349@4112 | 497 _vcpu->evtchn_upcall_mask = 1; \ |
cl349@4112 | 498 preempt_enable_no_resched(); \ |
cl349@4087 | 499 barrier(); \ |
cl349@4087 | 500 } while (0) |
cl349@4087 | 501 |
cl349@4087 | 502 #define local_irq_save(x) __save_and_cli(x) |
cl349@4087 | 503 #define local_irq_restore(x) __restore_flags(x) |
cl349@4087 | 504 #define local_save_flags(x) __save_flags(x) |
cl349@4087 | 505 #define local_irq_disable() __cli() |
cl349@4087 | 506 #define local_irq_enable() __sti() |
cl349@4087 | 507 |
cl349@4112 | 508 #define irqs_disabled() \ |
cl349@4112 | 509 HYPERVISOR_shared_info->vcpu_data[smp_processor_id()].evtchn_upcall_mask |
cl349@4087 | 510 |
cl349@4087 | 511 /* |
cl349@4087 | 512 * disable hlt during certain critical i/o operations |
cl349@4087 | 513 */ |
cl349@4087 | 514 #define HAVE_DISABLE_HLT |
cl349@4087 | 515 void disable_hlt(void); |
cl349@4087 | 516 void enable_hlt(void); |
cl349@4087 | 517 |
cl349@4087 | 518 extern int es7000_plat; |
cl349@4087 | 519 void cpu_idle_wait(void); |
cl349@4087 | 520 |
cl349@4087 | 521 #endif |