debuggers.hg
changeset 16447:7ccf7d373d0e
x86: Re-factor and clean up system.h.
Signed-off-by: Keir Fraser <keir.fraser@eu.citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@eu.citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Wed Nov 21 14:27:38 2007 +0000 (2007-11-21) |
parents | 6301c3b6e1ba |
children | 05cbf512b82b |
files | xen/include/asm-x86/system.h xen/include/asm-x86/x86_32/system.h xen/include/asm-x86/x86_64/system.h |
line diff
1.1 --- a/xen/include/asm-x86/system.h Wed Nov 21 12:00:20 2007 +0000 1.2 +++ b/xen/include/asm-x86/system.h Wed Nov 21 14:27:38 2007 +0000 1.3 @@ -5,69 +5,78 @@ 1.4 #include <xen/types.h> 1.5 #include <asm/bitops.h> 1.6 1.7 -#define read_segment_register(name) \ 1.8 -({ u16 __sel; \ 1.9 - __asm__ __volatile__ ( "movw %%" STR(name) ",%0" : "=r" (__sel) ); \ 1.10 - __sel; \ 1.11 +#define read_segment_register(name) \ 1.12 +({ u16 __sel; \ 1.13 + asm volatile ( "movw %%" STR(name) ",%0" : "=r" (__sel) ); \ 1.14 + __sel; \ 1.15 }) 1.16 1.17 #define wbinvd() \ 1.18 - __asm__ __volatile__ ("wbinvd": : :"memory"); 1.19 + asm volatile ( "wbinvd" : : : "memory" ) 1.20 1.21 #define clflush(a) \ 1.22 - __asm__ __volatile__ ("clflush (%0)": :"r"(a)); 1.23 + asm volatile ( "clflush (%0)" : : "r"(a) ) 1.24 1.25 -#define nop() __asm__ __volatile__ ("nop") 1.26 +#define nop() \ 1.27 + asm volatile ( "nop" ) 1.28 1.29 -#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) 1.30 +#define xchg(ptr,v) \ 1.31 + ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) 1.32 1.33 struct __xchg_dummy { unsigned long a[100]; }; 1.34 #define __xg(x) ((volatile struct __xchg_dummy *)(x)) 1.35 1.36 +#if defined(__i386__) 1.37 +# include <asm/x86_32/system.h> 1.38 +#elif defined(__x86_64__) 1.39 +# include <asm/x86_64/system.h> 1.40 +#endif 1.41 1.42 /* 1.43 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway 1.44 * Note 2: xchg has side effect, so that attribute volatile is necessary, 1.45 * but generally the primitive is invalid, *ptr is output argument. --ANK 1.46 */ 1.47 -static always_inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 1.48 +static always_inline unsigned long __xchg( 1.49 + unsigned long x, volatile void *ptr, int size) 1.50 { 1.51 - switch (size) { 1.52 - case 1: 1.53 - __asm__ __volatile__("xchgb %b0,%1" 1.54 - :"=q" (x) 1.55 - :"m" (*__xg((volatile void *)ptr)), "0" (x) 1.56 - :"memory"); 1.57 - break; 1.58 - case 2: 1.59 - __asm__ __volatile__("xchgw %w0,%1" 1.60 - :"=r" (x) 1.61 - :"m" (*__xg((volatile void *)ptr)), "0" (x) 1.62 - :"memory"); 1.63 - break; 1.64 + switch ( size ) 1.65 + { 1.66 + case 1: 1.67 + asm volatile ( "xchgb %b0,%1" 1.68 + : "=q" (x) 1.69 + : "m" (*__xg((volatile void *)ptr)), "0" (x) 1.70 + : "memory" ); 1.71 + break; 1.72 + case 2: 1.73 + asm volatile ( "xchgw %w0,%1" 1.74 + : "=r" (x) 1.75 + : "m" (*__xg((volatile void *)ptr)), "0" (x) 1.76 + : "memory" ); 1.77 + break; 1.78 #if defined(__i386__) 1.79 - case 4: 1.80 - __asm__ __volatile__("xchgl %0,%1" 1.81 - :"=r" (x) 1.82 - :"m" (*__xg((volatile void *)ptr)), "0" (x) 1.83 - :"memory"); 1.84 - break; 1.85 + case 4: 1.86 + asm volatile ( "xchgl %0,%1" 1.87 + : "=r" (x) 1.88 + : "m" (*__xg((volatile void *)ptr)), "0" (x) 1.89 + : "memory" ); 1.90 + break; 1.91 #elif defined(__x86_64__) 1.92 - case 4: 1.93 - __asm__ __volatile__("xchgl %k0,%1" 1.94 - :"=r" (x) 1.95 - :"m" (*__xg((volatile void *)ptr)), "0" (x) 1.96 - :"memory"); 1.97 - break; 1.98 - case 8: 1.99 - __asm__ __volatile__("xchgq %0,%1" 1.100 - :"=r" (x) 1.101 - :"m" (*__xg((volatile void *)ptr)), "0" (x) 1.102 - :"memory"); 1.103 - break; 1.104 + case 4: 1.105 + asm volatile ( "xchgl %k0,%1" 1.106 + : "=r" (x) 1.107 + : "m" (*__xg((volatile void *)ptr)), "0" (x) 1.108 + : "memory" ); 1.109 + break; 1.110 + case 8: 1.111 + asm volatile ( "xchgq %0,%1" 1.112 + : "=r" (x) 1.113 + : "m" (*__xg((volatile void *)ptr)), "0" (x) 1.114 + : "memory" ); 1.115 + break; 1.116 #endif 1.117 - } 1.118 - return x; 1.119 + } 1.120 + return x; 1.121 } 1.122 1.123 /* 1.124 @@ -79,241 +88,73 @@ static always_inline unsigned long __xch 1.125 static always_inline unsigned long __cmpxchg( 1.126 volatile void *ptr, unsigned long old, unsigned long new, int size) 1.127 { 1.128 - unsigned long prev; 1.129 - switch (size) { 1.130 - case 1: 1.131 - __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" 1.132 - : "=a"(prev) 1.133 - : "q"(new), "m"(*__xg((volatile void *)ptr)), "0"(old) 1.134 - : "memory"); 1.135 - return prev; 1.136 - case 2: 1.137 - __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" 1.138 - : "=a"(prev) 1.139 - : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old) 1.140 - : "memory"); 1.141 - return prev; 1.142 + unsigned long prev; 1.143 + switch ( size ) 1.144 + { 1.145 + case 1: 1.146 + asm volatile ( LOCK_PREFIX "cmpxchgb %b1,%2" 1.147 + : "=a" (prev) 1.148 + : "q" (new), "m" (*__xg((volatile void *)ptr)), 1.149 + "0" (old) 1.150 + : "memory" ); 1.151 + return prev; 1.152 + case 2: 1.153 + asm volatile ( LOCK_PREFIX "cmpxchgw %w1,%2" 1.154 + : "=a" (prev) 1.155 + : "r" (new), "m" (*__xg((volatile void *)ptr)), 1.156 + "0" (old) 1.157 + : "memory" ); 1.158 + return prev; 1.159 #if defined(__i386__) 1.160 - case 4: 1.161 - __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" 1.162 - : "=a"(prev) 1.163 - : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old) 1.164 - : "memory"); 1.165 - return prev; 1.166 + case 4: 1.167 + asm volatile ( LOCK_PREFIX "cmpxchgl %1,%2" 1.168 + : "=a" (prev) 1.169 + : "r" (new), "m" (*__xg((volatile void *)ptr)), 1.170 + "0" (old) 1.171 + : "memory" ); 1.172 + return prev; 1.173 #elif defined(__x86_64__) 1.174 - case 4: 1.175 - __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" 1.176 - : "=a"(prev) 1.177 - : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old) 1.178 - : "memory"); 1.179 - return prev; 1.180 - case 8: 1.181 - __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" 1.182 - : "=a"(prev) 1.183 - : "r"(new), "m"(*__xg((volatile void *)ptr)), "0"(old) 1.184 - : "memory"); 1.185 - return prev; 1.186 + case 4: 1.187 + asm volatile ( LOCK_PREFIX "cmpxchgl %k1,%2" 1.188 + : "=a" (prev) 1.189 + : "r" (new), "m" (*__xg((volatile void *)ptr)), 1.190 + "0" (old) 1.191 + : "memory" ); 1.192 + return prev; 1.193 + case 8: 1.194 + asm volatile ( LOCK_PREFIX "cmpxchgq %1,%2" 1.195 + : "=a" (prev) 1.196 + : "r" (new), "m" (*__xg((volatile void *)ptr)), 1.197 + "0" (old) 1.198 + : "memory" ); 1.199 + return prev; 1.200 #endif 1.201 - } 1.202 - return old; 1.203 + } 1.204 + return old; 1.205 } 1.206 1.207 #define __HAVE_ARCH_CMPXCHG 1.208 1.209 -#if BITS_PER_LONG == 64 1.210 - 1.211 -#define cmpxchg(ptr,o,n) \ 1.212 - ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \ 1.213 - (unsigned long)(n),sizeof(*(ptr)))) 1.214 +#ifdef CONFIG_SMP 1.215 +#define smp_mb() mb() 1.216 +#define smp_rmb() rmb() 1.217 +#define smp_wmb() wmb() 1.218 #else 1.219 - 1.220 -static always_inline unsigned long long __cmpxchg8b( 1.221 - volatile void *ptr, unsigned long long old, unsigned long long new) 1.222 -{ 1.223 - unsigned long long prev; 1.224 - __asm__ __volatile__ ( 1.225 - LOCK_PREFIX "cmpxchg8b %3" 1.226 - : "=A" (prev) 1.227 - : "c" ((u32)(new>>32)), "b" ((u32)new), 1.228 - "m" (*__xg((volatile void *)ptr)), "0" (old) 1.229 - : "memory" ); 1.230 - return prev; 1.231 -} 1.232 - 1.233 -#define cmpxchg(ptr,o,n) \ 1.234 -({ \ 1.235 - __typeof__(*(ptr)) __prev; \ 1.236 - switch ( sizeof(*(ptr)) ) { \ 1.237 - case 8: \ 1.238 - __prev = ((__typeof__(*(ptr)))__cmpxchg8b( \ 1.239 - (ptr), \ 1.240 - (unsigned long long)(o), \ 1.241 - (unsigned long long)(n))); \ 1.242 - break; \ 1.243 - default: \ 1.244 - __prev = ((__typeof__(*(ptr)))__cmpxchg( \ 1.245 - (ptr), \ 1.246 - (unsigned long)(o), \ 1.247 - (unsigned long)(n), \ 1.248 - sizeof(*(ptr)))); \ 1.249 - break; \ 1.250 - } \ 1.251 - __prev; \ 1.252 -}) 1.253 - 1.254 -#endif 1.255 - 1.256 - 1.257 -/* 1.258 - * This function causes value _o to be changed to _n at location _p. 1.259 - * If this access causes a fault then we return 1, otherwise we return 0. 1.260 - * If no fault occurs then _o is updated to the value we saw at _p. If this 1.261 - * is the same as the initial value of _o then _n is written to location _p. 1.262 - */ 1.263 -#ifdef __i386__ 1.264 -#define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \ 1.265 - __asm__ __volatile__ ( \ 1.266 - "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \ 1.267 - "2:\n" \ 1.268 - ".section .fixup,\"ax\"\n" \ 1.269 - "3: movl $1,%1\n" \ 1.270 - " jmp 2b\n" \ 1.271 - ".previous\n" \ 1.272 - ".section __ex_table,\"a\"\n" \ 1.273 - " .align 4\n" \ 1.274 - " .long 1b,3b\n" \ 1.275 - ".previous" \ 1.276 - : "=a" (_o), "=r" (_rc) \ 1.277 - : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \ 1.278 - : "memory"); 1.279 -#define cmpxchg_user(_p,_o,_n) \ 1.280 -({ \ 1.281 - int _rc; \ 1.282 - switch ( sizeof(*(_p)) ) { \ 1.283 - case 1: \ 1.284 - __cmpxchg_user(_p,_o,_n,"b","b","q"); \ 1.285 - break; \ 1.286 - case 2: \ 1.287 - __cmpxchg_user(_p,_o,_n,"w","w","r"); \ 1.288 - break; \ 1.289 - case 4: \ 1.290 - __cmpxchg_user(_p,_o,_n,"l","","r"); \ 1.291 - break; \ 1.292 - case 8: \ 1.293 - __asm__ __volatile__ ( \ 1.294 - "1: " LOCK_PREFIX "cmpxchg8b %4\n" \ 1.295 - "2:\n" \ 1.296 - ".section .fixup,\"ax\"\n" \ 1.297 - "3: movl $1,%1\n" \ 1.298 - " jmp 2b\n" \ 1.299 - ".previous\n" \ 1.300 - ".section __ex_table,\"a\"\n" \ 1.301 - " .align 4\n" \ 1.302 - " .long 1b,3b\n" \ 1.303 - ".previous" \ 1.304 - : "=A" (_o), "=r" (_rc) \ 1.305 - : "c" ((u32)((u64)(_n)>>32)), "b" ((u32)(_n)), \ 1.306 - "m" (*__xg((volatile void *)(_p))), "0" (_o), "1" (0) \ 1.307 - : "memory"); \ 1.308 - break; \ 1.309 - } \ 1.310 - _rc; \ 1.311 -}) 1.312 -#else 1.313 -#define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \ 1.314 - __asm__ __volatile__ ( \ 1.315 - "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \ 1.316 - "2:\n" \ 1.317 - ".section .fixup,\"ax\"\n" \ 1.318 - "3: movl $1,%1\n" \ 1.319 - " jmp 2b\n" \ 1.320 - ".previous\n" \ 1.321 - ".section __ex_table,\"a\"\n" \ 1.322 - " .align 8\n" \ 1.323 - " .quad 1b,3b\n" \ 1.324 - ".previous" \ 1.325 - : "=a" (_o), "=r" (_rc) \ 1.326 - : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \ 1.327 - : "memory"); 1.328 -#define cmpxchg_user(_p,_o,_n) \ 1.329 -({ \ 1.330 - int _rc; \ 1.331 - switch ( sizeof(*(_p)) ) { \ 1.332 - case 1: \ 1.333 - __cmpxchg_user(_p,_o,_n,"b","b","q"); \ 1.334 - break; \ 1.335 - case 2: \ 1.336 - __cmpxchg_user(_p,_o,_n,"w","w","r"); \ 1.337 - break; \ 1.338 - case 4: \ 1.339 - __cmpxchg_user(_p,_o,_n,"l","k","r"); \ 1.340 - break; \ 1.341 - case 8: \ 1.342 - __cmpxchg_user(_p,_o,_n,"q","","r"); \ 1.343 - break; \ 1.344 - } \ 1.345 - _rc; \ 1.346 -}) 1.347 -#endif 1.348 - 1.349 -static inline void atomic_write64(uint64_t *p, uint64_t v) 1.350 -{ 1.351 -#ifdef __i386__ 1.352 - uint64_t w = *p, x; 1.353 - while ( (x = __cmpxchg8b(p, w, v)) != w ) 1.354 - w = x; 1.355 -#else 1.356 - *p = v; 1.357 -#endif 1.358 -} 1.359 - 1.360 -#if defined(__i386__) 1.361 -#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") 1.362 -#define rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") 1.363 -#elif defined(__x86_64__) 1.364 -#define mb() __asm__ __volatile__ ("mfence":::"memory") 1.365 -#define rmb() __asm__ __volatile__ ("lfence":::"memory") 1.366 -#endif 1.367 -#define wmb() __asm__ __volatile__ ("": : :"memory") 1.368 - 1.369 -#ifdef CONFIG_SMP 1.370 -#define smp_mb() mb() 1.371 -#define smp_rmb() rmb() 1.372 -#define smp_wmb() wmb() 1.373 -#else 1.374 -#define smp_mb() barrier() 1.375 -#define smp_rmb() barrier() 1.376 -#define smp_wmb() barrier() 1.377 +#define smp_mb() barrier() 1.378 +#define smp_rmb() barrier() 1.379 +#define smp_wmb() barrier() 1.380 #endif 1.381 1.382 #define set_mb(var, value) do { xchg(&var, value); } while (0) 1.383 #define set_wmb(var, value) do { var = value; wmb(); } while (0) 1.384 1.385 -/* interrupt control.. */ 1.386 -#if defined(__i386__) 1.387 -#define __save_flags(x) __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */) 1.388 -#define __restore_flags(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc") 1.389 -#elif defined(__x86_64__) 1.390 -#define __save_flags(x) do { __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0) 1.391 -#define __restore_flags(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc") 1.392 -#endif 1.393 -#define __cli() __asm__ __volatile__("cli": : :"memory") 1.394 -#define __sti() __asm__ __volatile__("sti": : :"memory") 1.395 +#define local_irq_disable() asm volatile ( "cli" : : : "memory" ) 1.396 +#define local_irq_enable() asm volatile ( "sti" : : : "memory" ) 1.397 + 1.398 /* used in the idle loop; sti takes one instruction cycle to complete */ 1.399 -#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") 1.400 +#define safe_halt() asm volatile ( "sti; hlt" : : : "memory" ) 1.401 /* used when interrupts are already enabled or to shutdown the processor */ 1.402 -#define halt() __asm__ __volatile__("hlt": : :"memory") 1.403 - 1.404 -/* For spinlocks etc */ 1.405 -#if defined(__i386__) 1.406 -#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") 1.407 -#define local_irq_restore(x) __restore_flags(x) 1.408 -#elif defined(__x86_64__) 1.409 -#define local_irq_save(x) do { __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0) 1.410 -#define local_irq_restore(x) __asm__ __volatile__("# local_irq_restore \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory") 1.411 -#endif 1.412 -#define local_irq_disable() __cli() 1.413 -#define local_irq_enable() __sti() 1.414 +#define halt() asm volatile ( "hlt" : : : "memory" ) 1.415 1.416 static inline int local_irq_is_enabled(void) 1.417 { 1.418 @@ -322,8 +163,8 @@ static inline int local_irq_is_enabled(v 1.419 return !!(flags & (1<<9)); /* EFLAGS_IF */ 1.420 } 1.421 1.422 -#define BROKEN_ACPI_Sx 0x0001 1.423 -#define BROKEN_INIT_AFTER_S1 0x0002 1.424 +#define BROKEN_ACPI_Sx 0x0001 1.425 +#define BROKEN_INIT_AFTER_S1 0x0002 1.426 1.427 void trap_init(void); 1.428 void percpu_traps_init(void);
2.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 2.2 +++ b/xen/include/asm-x86/x86_32/system.h Wed Nov 21 14:27:38 2007 +0000 2.3 @@ -0,0 +1,115 @@ 2.4 +#ifndef __X86_32_SYSTEM_H__ 2.5 +#define __X86_32_SYSTEM_H__ 2.6 + 2.7 +static always_inline unsigned long long __cmpxchg8b( 2.8 + volatile void *ptr, unsigned long long old, unsigned long long new) 2.9 +{ 2.10 + unsigned long long prev; 2.11 + asm volatile ( 2.12 + LOCK_PREFIX "cmpxchg8b %3" 2.13 + : "=A" (prev) 2.14 + : "c" ((u32)(new>>32)), "b" ((u32)new), 2.15 + "m" (*__xg((volatile void *)ptr)), "0" (old) 2.16 + : "memory" ); 2.17 + return prev; 2.18 +} 2.19 + 2.20 +#define cmpxchg(ptr,o,n) \ 2.21 +({ \ 2.22 + __typeof__(*(ptr)) __prev; \ 2.23 + switch ( sizeof(*(ptr)) ) { \ 2.24 + case 8: \ 2.25 + __prev = ((__typeof__(*(ptr)))__cmpxchg8b( \ 2.26 + (ptr), \ 2.27 + (unsigned long long)(o), \ 2.28 + (unsigned long long)(n))); \ 2.29 + break; \ 2.30 + default: \ 2.31 + __prev = ((__typeof__(*(ptr)))__cmpxchg( \ 2.32 + (ptr), \ 2.33 + (unsigned long)(o), \ 2.34 + (unsigned long)(n), \ 2.35 + sizeof(*(ptr)))); \ 2.36 + break; \ 2.37 + } \ 2.38 + __prev; \ 2.39 +}) 2.40 + 2.41 +/* 2.42 + * This function causes value _o to be changed to _n at location _p. 2.43 + * If this access causes a fault then we return 1, otherwise we return 0. 2.44 + * If no fault occurs then _o is updated to the value we saw at _p. If this 2.45 + * is the same as the initial value of _o then _n is written to location _p. 2.46 + */ 2.47 +#define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \ 2.48 + asm volatile ( \ 2.49 + "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \ 2.50 + "2:\n" \ 2.51 + ".section .fixup,\"ax\"\n" \ 2.52 + "3: movl $1,%1\n" \ 2.53 + " jmp 2b\n" \ 2.54 + ".previous\n" \ 2.55 + ".section __ex_table,\"a\"\n" \ 2.56 + " .align 4\n" \ 2.57 + " .long 1b,3b\n" \ 2.58 + ".previous" \ 2.59 + : "=a" (_o), "=r" (_rc) \ 2.60 + : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \ 2.61 + : "memory"); 2.62 + 2.63 +#define cmpxchg_user(_p,_o,_n) \ 2.64 +({ \ 2.65 + int _rc; \ 2.66 + switch ( sizeof(*(_p)) ) { \ 2.67 + case 1: \ 2.68 + __cmpxchg_user(_p,_o,_n,"b","b","q"); \ 2.69 + break; \ 2.70 + case 2: \ 2.71 + __cmpxchg_user(_p,_o,_n,"w","w","r"); \ 2.72 + break; \ 2.73 + case 4: \ 2.74 + __cmpxchg_user(_p,_o,_n,"l","","r"); \ 2.75 + break; \ 2.76 + case 8: \ 2.77 + asm volatile ( \ 2.78 + "1: " LOCK_PREFIX "cmpxchg8b %4\n" \ 2.79 + "2:\n" \ 2.80 + ".section .fixup,\"ax\"\n" \ 2.81 + "3: movl $1,%1\n" \ 2.82 + " jmp 2b\n" \ 2.83 + ".previous\n" \ 2.84 + ".section __ex_table,\"a\"\n" \ 2.85 + " .align 4\n" \ 2.86 + " .long 1b,3b\n" \ 2.87 + ".previous" \ 2.88 + : "=A" (_o), "=r" (_rc) \ 2.89 + : "c" ((u32)((u64)(_n)>>32)), "b" ((u32)(_n)), \ 2.90 + "m" (*__xg((volatile void *)(_p))), "0" (_o), "1" (0) \ 2.91 + : "memory"); \ 2.92 + break; \ 2.93 + } \ 2.94 + _rc; \ 2.95 +}) 2.96 + 2.97 +static inline void atomic_write64(uint64_t *p, uint64_t v) 2.98 +{ 2.99 + uint64_t w = *p, x; 2.100 + while ( (x = __cmpxchg8b(p, w, v)) != w ) 2.101 + w = x; 2.102 +} 2.103 + 2.104 +#define mb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" ) 2.105 +#define rmb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" ) 2.106 +#define wmb() asm volatile ( "" : : : "memory" ) 2.107 + 2.108 +#define __save_flags(x) \ 2.109 + asm volatile ( "pushfl ; popl %0" : "=g" (x) : ) 2.110 +#define __restore_flags(x) \ 2.111 + asm volatile ( "pushl %0 ; popfl" : : "g" (x) : "memory", "cc" ) 2.112 + 2.113 +#define local_irq_save(x) \ 2.114 + asm volatile ( "pushfl ; popl %0 ; cli" : "=g" (x) : : "memory" ) 2.115 +#define local_irq_restore(x) \ 2.116 + __restore_flags(x) 2.117 + 2.118 +#endif /* __X86_32_SYSTEM_H__ */
3.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 3.2 +++ b/xen/include/asm-x86/x86_64/system.h Wed Nov 21 14:27:38 2007 +0000 3.3 @@ -0,0 +1,69 @@ 3.4 +#ifndef __X86_64_SYSTEM_H__ 3.5 +#define __X86_64_SYSTEM_H__ 3.6 + 3.7 +#define cmpxchg(ptr,o,n) \ 3.8 + ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \ 3.9 + (unsigned long)(n),sizeof(*(ptr)))) 3.10 + 3.11 +/* 3.12 + * This function causes value _o to be changed to _n at location _p. 3.13 + * If this access causes a fault then we return 1, otherwise we return 0. 3.14 + * If no fault occurs then _o is updated to the value we saw at _p. If this 3.15 + * is the same as the initial value of _o then _n is written to location _p. 3.16 + */ 3.17 +#define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \ 3.18 + asm volatile ( \ 3.19 + "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \ 3.20 + "2:\n" \ 3.21 + ".section .fixup,\"ax\"\n" \ 3.22 + "3: movl $1,%1\n" \ 3.23 + " jmp 2b\n" \ 3.24 + ".previous\n" \ 3.25 + ".section __ex_table,\"a\"\n" \ 3.26 + " .align 8\n" \ 3.27 + " .quad 1b,3b\n" \ 3.28 + ".previous" \ 3.29 + : "=a" (_o), "=r" (_rc) \ 3.30 + : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \ 3.31 + : "memory"); 3.32 + 3.33 +#define cmpxchg_user(_p,_o,_n) \ 3.34 +({ \ 3.35 + int _rc; \ 3.36 + switch ( sizeof(*(_p)) ) { \ 3.37 + case 1: \ 3.38 + __cmpxchg_user(_p,_o,_n,"b","b","q"); \ 3.39 + break; \ 3.40 + case 2: \ 3.41 + __cmpxchg_user(_p,_o,_n,"w","w","r"); \ 3.42 + break; \ 3.43 + case 4: \ 3.44 + __cmpxchg_user(_p,_o,_n,"l","k","r"); \ 3.45 + break; \ 3.46 + case 8: \ 3.47 + __cmpxchg_user(_p,_o,_n,"q","","r"); \ 3.48 + break; \ 3.49 + } \ 3.50 + _rc; \ 3.51 +}) 3.52 + 3.53 +static inline void atomic_write64(uint64_t *p, uint64_t v) 3.54 +{ 3.55 + *p = v; 3.56 +} 3.57 + 3.58 +#define mb() asm volatile ( "mfence" : : : "memory" ) 3.59 +#define rmb() asm volatile ( "lfence" : : : "memory" ) 3.60 +#define wmb() asm volatile ( "" : : : "memory" ) 3.61 + 3.62 +#define __save_flags(x) \ 3.63 + asm volatile ( "pushfq ; popq %q0" : "=g" (x) : :"memory" ) 3.64 +#define __restore_flags(x) \ 3.65 + asm volatile ( "pushq %0 ; popfq" : : "g" (x) : "memory", "cc" ) 3.66 + 3.67 +#define local_irq_save(x) \ 3.68 + asm volatile ( "pushfq ; popq %0 ; cli" : "=g" (x) : : "memory" ) 3.69 +#define local_irq_restore(x) \ 3.70 + __restore_flags(x) 3.71 + 3.72 +#endif /* __X86_64_SYSTEM_H__ */