xen-vtx-unstable

annotate linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/system.h @ 6776:e7c7196fa329

merge?
author cl349@firebug.cl.cam.ac.uk
date Tue Sep 13 15:46:49 2005 +0000 (2005-09-13)
parents 4d899a738d59 dd668f7527cb
children 72e4e2aab342
rev   line source
cl349@4445 1 #ifndef __ASM_SYSTEM_H
cl349@4445 2 #define __ASM_SYSTEM_H
cl349@4445 3
cl349@4445 4 #include <linux/config.h>
cl349@4445 5 #include <linux/kernel.h>
cl349@4445 6 #include <asm/segment.h>
kaf24@6760 7 #include <asm/synch_bitops.h>
cl349@4445 8 #include <asm-xen/hypervisor.h>
cl349@4445 9 #include <asm-xen/xen-public/arch-x86_64.h>
cl349@4445 10
cl349@4445 11 #ifdef __KERNEL__
cl349@4445 12
cl349@4445 13 #ifdef CONFIG_SMP
cl349@4445 14 #define LOCK_PREFIX "lock ; "
cl349@4445 15 #else
cl349@4445 16 #define LOCK_PREFIX ""
cl349@4445 17 #endif
cl349@4445 18
cl349@4445 19 #define __STR(x) #x
cl349@4445 20 #define STR(x) __STR(x)
cl349@4445 21
cl349@4445 22 #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
cl349@4445 23 #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
cl349@4445 24
cl349@4445 25 /* frame pointer must be last for get_wchan */
cl349@4445 26 #define SAVE_CONTEXT "pushfq ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
cl349@4445 27 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popfq\n\t"
cl349@4445 28
cl349@4445 29 #define __EXTRA_CLOBBER \
cl349@4445 30 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
cl349@4445 31
cl349@4445 32 #define switch_to(prev,next,last) \
cl349@4445 33 asm volatile(SAVE_CONTEXT \
cl349@4445 34 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
cl349@4445 35 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
cl349@4445 36 "call __switch_to\n\t" \
cl349@4445 37 ".globl thread_return\n" \
cl349@4445 38 "thread_return:\n\t" \
cl349@4445 39 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
cl349@4445 40 "movq %P[thread_info](%%rsi),%%r8\n\t" \
cl349@4445 41 LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
cl349@4445 42 "movq %%rax,%%rdi\n\t" \
cl349@4445 43 "jc ret_from_fork\n\t" \
cl349@4445 44 RESTORE_CONTEXT \
cl349@4445 45 : "=a" (last) \
cl349@4445 46 : [next] "S" (next), [prev] "D" (prev), \
cl349@4445 47 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
cl349@4445 48 [ti_flags] "i" (offsetof(struct thread_info, flags)),\
cl349@4445 49 [tif_fork] "i" (TIF_FORK), \
cl349@4445 50 [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
cl349@4445 51 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
cl349@4445 52 : "memory", "cc" __EXTRA_CLOBBER)
cl349@4445 53
cl349@4445 54
cl349@4445 55 extern void load_gs_index(unsigned);
cl349@4445 56
cl349@4445 57 /*
cl349@4445 58 * Load a segment. Fall back on loading the zero
cl349@4445 59 * segment if something goes wrong..
cl349@4445 60 */
cl349@4445 61 #define loadsegment(seg,value) \
cl349@4445 62 asm volatile("\n" \
cl349@4445 63 "1:\t" \
cl349@4445 64 "movl %k0,%%" #seg "\n" \
cl349@4445 65 "2:\n" \
cl349@4445 66 ".section .fixup,\"ax\"\n" \
cl349@4445 67 "3:\t" \
cl349@4445 68 "movl %1,%%" #seg "\n\t" \
cl349@4445 69 "jmp 2b\n" \
cl349@4445 70 ".previous\n" \
cl349@4445 71 ".section __ex_table,\"a\"\n\t" \
cl349@4445 72 ".align 8\n\t" \
cl349@4445 73 ".quad 1b,3b\n" \
cl349@4445 74 ".previous" \
cl349@4445 75 : :"r" (value), "r" (0))
cl349@4445 76
cl349@4445 77 #define set_debug(value,register) \
cl349@4445 78 __asm__("movq %0,%%db" #register \
cl349@4445 79 : /* no output */ \
cl349@4445 80 :"r" ((unsigned long) value))
cl349@4445 81
cl349@4445 82
cl349@4445 83 #ifdef __KERNEL__
cl349@4445 84 struct alt_instr {
cl349@4445 85 __u8 *instr; /* original instruction */
cl349@4445 86 __u8 *replacement;
cl349@4445 87 __u8 cpuid; /* cpuid bit set for replacement */
cl349@4445 88 __u8 instrlen; /* length of original instruction */
cl349@4445 89 __u8 replacementlen; /* length of new instruction, <= instrlen */
cl349@4445 90 __u8 pad[5];
cl349@4445 91 };
cl349@4445 92 #endif
cl349@4445 93
cl349@4445 94 /*
cl349@4445 95 * Alternative instructions for different CPU types or capabilities.
cl349@4445 96 *
cl349@4445 97 * This allows to use optimized instructions even on generic binary
cl349@4445 98 * kernels.
cl349@4445 99 *
cl349@4445 100 * length of oldinstr must be longer or equal the length of newinstr
cl349@4445 101 * It can be padded with nops as needed.
cl349@4445 102 *
cl349@4445 103 * For non barrier like inlines please define new variants
cl349@4445 104 * without volatile and memory clobber.
cl349@4445 105 */
cl349@4445 106 #define alternative(oldinstr, newinstr, feature) \
cl349@4445 107 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
cl349@4445 108 ".section .altinstructions,\"a\"\n" \
cl349@4445 109 " .align 8\n" \
cl349@4445 110 " .quad 661b\n" /* label */ \
cl349@4445 111 " .quad 663f\n" /* new instruction */ \
cl349@4445 112 " .byte %c0\n" /* feature bit */ \
cl349@4445 113 " .byte 662b-661b\n" /* sourcelen */ \
cl349@4445 114 " .byte 664f-663f\n" /* replacementlen */ \
cl349@4445 115 ".previous\n" \
cl349@4445 116 ".section .altinstr_replacement,\"ax\"\n" \
cl349@4445 117 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
cl349@4445 118 ".previous" :: "i" (feature) : "memory")
cl349@4445 119
cl349@4445 120 /*
cl349@4445 121 * Alternative inline assembly with input.
cl349@4445 122 *
cl349@4445 123 * Pecularities:
cl349@4445 124 * No memory clobber here.
cl349@4445 125 * Argument numbers start with 1.
cl349@4445 126 * Best is to use constraints that are fixed size (like (%1) ... "r")
cl349@4445 127 * If you use variable sized constraints like "m" or "g" in the
cl349@4445 128 * replacement maake sure to pad to the worst case length.
cl349@4445 129 */
cl349@4445 130 #define alternative_input(oldinstr, newinstr, feature, input...) \
cl349@4445 131 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
cl349@4445 132 ".section .altinstructions,\"a\"\n" \
cl349@4445 133 " .align 8\n" \
cl349@4445 134 " .quad 661b\n" /* label */ \
cl349@4445 135 " .quad 663f\n" /* new instruction */ \
cl349@4445 136 " .byte %c0\n" /* feature bit */ \
cl349@4445 137 " .byte 662b-661b\n" /* sourcelen */ \
cl349@4445 138 " .byte 664f-663f\n" /* replacementlen */ \
cl349@4445 139 ".previous\n" \
cl349@4445 140 ".section .altinstr_replacement,\"ax\"\n" \
cl349@4445 141 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
cl349@4445 142 ".previous" :: "i" (feature), ##input)
cl349@4445 143
cl349@4445 144 /*
cl349@4445 145 * Clear and set 'TS' bit respectively
cl349@4445 146 */
kaf24@4641 147 #define clts() (HYPERVISOR_fpu_taskswitch(0))
kaf24@5367 148
cl349@4445 149 static inline unsigned long read_cr0(void)
cl349@4445 150 {
kaf24@5367 151 unsigned long cr0;
kaf24@5367 152 asm volatile("movq %%cr0,%0" : "=r" (cr0));
kaf24@5367 153 return cr0;
cl349@4445 154 }
cl349@4445 155
cl349@4445 156 static inline void write_cr0(unsigned long val)
cl349@4445 157 {
kaf24@5367 158 asm volatile("movq %0,%%cr0" :: "r" (val));
cl349@4445 159 }
cl349@4445 160
cl349@4445 161 static inline unsigned long read_cr3(void)
cl349@4445 162 {
kaf24@5367 163 unsigned long cr3;
kaf24@5367 164 asm("movq %%cr3,%0" : "=r" (cr3));
kaf24@5367 165 return cr3;
cl349@4445 166 }
cl349@4445 167
cl349@4445 168 static inline unsigned long read_cr4(void)
cl349@4445 169 {
kaf24@5367 170 unsigned long cr4;
kaf24@5367 171 asm("movq %%cr4,%0" : "=r" (cr4));
kaf24@5367 172 return cr4;
cl349@4445 173 }
cl349@4445 174
cl349@4445 175 static inline void write_cr4(unsigned long val)
cl349@4445 176 {
kaf24@5367 177 asm volatile("movq %0,%%cr4" :: "r" (val));
cl349@4445 178 }
kaf24@5367 179
kaf24@4641 180 #define stts() (HYPERVISOR_fpu_taskswitch(1))
cl349@4445 181
cl349@4492 182 #define wbinvd() \
cl349@4492 183 __asm__ __volatile__ ("wbinvd": : :"memory");
cl349@4445 184
cl349@4445 185 #endif /* __KERNEL__ */
cl349@4445 186
cl349@4445 187 #define nop() __asm__ __volatile__ ("nop")
cl349@4445 188
cl349@4445 189 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
cl349@4445 190
cl349@4445 191 #define tas(ptr) (xchg((ptr),1))
cl349@4445 192
cl349@4445 193 #define __xg(x) ((volatile long *)(x))
cl349@4445 194
cl349@4445 195 extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
cl349@4445 196 {
cl349@4445 197 *ptr = val;
cl349@4445 198 }
cl349@4445 199
cl349@4445 200 #define _set_64bit set_64bit
cl349@4445 201
cl349@4445 202 /*
cl349@4445 203 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
cl349@4445 204 * Note 2: xchg has side effect, so that attribute volatile is necessary,
cl349@4445 205 * but generally the primitive is invalid, *ptr is output argument. --ANK
cl349@4445 206 */
cl349@4445 207 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
cl349@4445 208 {
cl349@4445 209 switch (size) {
cl349@4445 210 case 1:
cl349@4445 211 __asm__ __volatile__("xchgb %b0,%1"
cl349@4445 212 :"=q" (x)
cl349@4445 213 :"m" (*__xg(ptr)), "0" (x)
cl349@4445 214 :"memory");
cl349@4445 215 break;
cl349@4445 216 case 2:
cl349@4445 217 __asm__ __volatile__("xchgw %w0,%1"
cl349@4445 218 :"=r" (x)
cl349@4445 219 :"m" (*__xg(ptr)), "0" (x)
cl349@4445 220 :"memory");
cl349@4445 221 break;
cl349@4445 222 case 4:
cl349@4445 223 __asm__ __volatile__("xchgl %k0,%1"
cl349@4445 224 :"=r" (x)
cl349@4445 225 :"m" (*__xg(ptr)), "0" (x)
cl349@4445 226 :"memory");
cl349@4445 227 break;
cl349@4445 228 case 8:
cl349@4445 229 __asm__ __volatile__("xchgq %0,%1"
cl349@4445 230 :"=r" (x)
cl349@4445 231 :"m" (*__xg(ptr)), "0" (x)
cl349@4445 232 :"memory");
cl349@4445 233 break;
cl349@4445 234 }
cl349@4445 235 return x;
cl349@4445 236 }
cl349@4445 237
cl349@4445 238 /*
cl349@4445 239 * Atomic compare and exchange. Compare OLD with MEM, if identical,
cl349@4445 240 * store NEW in MEM. Return the initial value in MEM. Success is
cl349@4445 241 * indicated by comparing RETURN with OLD.
cl349@4445 242 */
cl349@4445 243
cl349@4445 244 #define __HAVE_ARCH_CMPXCHG 1
cl349@4445 245
cl349@4445 246 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
cl349@4445 247 unsigned long new, int size)
cl349@4445 248 {
cl349@4445 249 unsigned long prev;
cl349@4445 250 switch (size) {
cl349@4445 251 case 1:
cl349@4445 252 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
cl349@4445 253 : "=a"(prev)
cl349@4445 254 : "q"(new), "m"(*__xg(ptr)), "0"(old)
cl349@4445 255 : "memory");
cl349@4445 256 return prev;
cl349@4445 257 case 2:
cl349@4445 258 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
cl349@4445 259 : "=a"(prev)
cl349@4445 260 : "q"(new), "m"(*__xg(ptr)), "0"(old)
cl349@4445 261 : "memory");
cl349@4445 262 return prev;
cl349@4445 263 case 4:
cl349@4445 264 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
cl349@4445 265 : "=a"(prev)
cl349@4445 266 : "q"(new), "m"(*__xg(ptr)), "0"(old)
cl349@4445 267 : "memory");
cl349@4445 268 return prev;
cl349@4445 269 case 8:
cl349@4445 270 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
cl349@4445 271 : "=a"(prev)
cl349@4445 272 : "q"(new), "m"(*__xg(ptr)), "0"(old)
cl349@4445 273 : "memory");
cl349@4445 274 return prev;
cl349@4445 275 }
cl349@4445 276 return old;
cl349@4445 277 }
cl349@4445 278
cl349@4445 279 #define cmpxchg(ptr,o,n)\
cl349@4445 280 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
cl349@4445 281 (unsigned long)(n),sizeof(*(ptr))))
cl349@4445 282
cl349@4445 283 #ifdef CONFIG_SMP
cl349@4445 284 #define smp_mb() mb()
cl349@4445 285 #define smp_rmb() rmb()
cl349@4445 286 #define smp_wmb() wmb()
cl349@4445 287 #define smp_read_barrier_depends() do {} while(0)
cl349@4445 288 #else
cl349@4445 289 #define smp_mb() barrier()
cl349@4445 290 #define smp_rmb() barrier()
cl349@4445 291 #define smp_wmb() barrier()
cl349@4445 292 #define smp_read_barrier_depends() do {} while(0)
cl349@4445 293 #endif
cl349@4445 294
cl349@4445 295
cl349@4445 296 /*
cl349@4445 297 * Force strict CPU ordering.
cl349@4445 298 * And yes, this is required on UP too when we're talking
cl349@4445 299 * to devices.
cl349@4445 300 */
cl349@4445 301 #define mb() asm volatile("mfence":::"memory")
cl349@4445 302 #define rmb() asm volatile("lfence":::"memory")
cl349@4445 303
cl349@4445 304 #ifdef CONFIG_UNORDERED_IO
cl349@4445 305 #define wmb() asm volatile("sfence" ::: "memory")
cl349@4445 306 #else
cl349@4445 307 #define wmb() asm volatile("" ::: "memory")
cl349@4445 308 #endif
cl349@4445 309 #define read_barrier_depends() do {} while(0)
cl349@4445 310 #define set_mb(var, value) do { xchg(&var, value); } while (0)
cl349@4445 311 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
cl349@4445 312
cl349@4445 313 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
cl349@4445 314
cl349@4445 315
cl349@4445 316 /*
cl349@4445 317 * The use of 'barrier' in the following reflects their use as local-lock
cl349@4445 318 * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
cl349@4445 319 * critical operations are executed. All critical operations must complete
cl349@4445 320 * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
cl349@4445 321 * includes these barriers, for example.
cl349@4445 322 */
cl349@4445 323
cl349@4445 324 #define __cli() \
cl349@4445 325 do { \
cl349@4445 326 vcpu_info_t *_vcpu; \
cl349@4445 327 preempt_disable(); \
cl349@4445 328 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
cl349@4445 329 _vcpu->evtchn_upcall_mask = 1; \
cl349@4445 330 preempt_enable_no_resched(); \
cl349@4445 331 barrier(); \
cl349@4445 332 } while (0)
cl349@4445 333
cl349@4445 334 #define __sti() \
cl349@4445 335 do { \
cl349@4445 336 vcpu_info_t *_vcpu; \
cl349@4445 337 barrier(); \
cl349@4445 338 preempt_disable(); \
cl349@4445 339 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
cl349@4445 340 _vcpu->evtchn_upcall_mask = 0; \
cl349@4445 341 barrier(); /* unmask then check (avoid races) */ \
cl349@4445 342 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
cl349@4445 343 force_evtchn_callback(); \
cl349@4445 344 preempt_enable(); \
cl349@4445 345 } while (0)
cl349@4445 346
cl349@4445 347 #define __save_flags(x) \
cl349@4445 348 do { \
cl349@4445 349 vcpu_info_t *_vcpu; \
cl349@4445 350 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
cl349@4445 351 (x) = _vcpu->evtchn_upcall_mask; \
cl349@4445 352 } while (0)
cl349@4445 353
cl349@4445 354 #define __restore_flags(x) \
cl349@4445 355 do { \
cl349@4445 356 vcpu_info_t *_vcpu; \
cl349@4445 357 barrier(); \
cl349@4445 358 preempt_disable(); \
cl349@4445 359 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
cl349@4445 360 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
cl349@4445 361 barrier(); /* unmask then check (avoid races) */ \
cl349@4445 362 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
cl349@4445 363 force_evtchn_callback(); \
cl349@4445 364 preempt_enable(); \
cl349@4445 365 } else \
cl349@4445 366 preempt_enable_no_resched(); \
cl349@4445 367 } while (0)
cl349@4445 368
cl349@4445 369 #define safe_halt() ((void)0)
cl349@4445 370
cl349@4445 371 #define __save_and_cli(x) \
cl349@4445 372 do { \
cl349@4445 373 vcpu_info_t *_vcpu; \
cl349@4445 374 preempt_disable(); \
cl349@4445 375 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
cl349@4445 376 (x) = _vcpu->evtchn_upcall_mask; \
cl349@4445 377 _vcpu->evtchn_upcall_mask = 1; \
cl349@4445 378 preempt_enable_no_resched(); \
cl349@4445 379 barrier(); \
cl349@4445 380 } while (0)
cl349@4445 381
cl349@4445 382 void cpu_idle_wait(void);
cl349@4445 383
cl349@4445 384 #define local_irq_save(x) __save_and_cli(x)
cl349@4445 385 #define local_irq_restore(x) __restore_flags(x)
cl349@4445 386 #define local_save_flags(x) __save_flags(x)
cl349@4445 387 #define local_irq_disable() __cli()
cl349@4445 388 #define local_irq_enable() __sti()
cl349@4445 389
kaf24@6551 390 /* Don't use smp_processor_id: this is called in debug versions of that fn. */
kaf24@6552 391 #ifdef CONFIG_SMP
cl349@4445 392 #define irqs_disabled() \
kaf24@6551 393 HYPERVISOR_shared_info->vcpu_data[__smp_processor_id()].evtchn_upcall_mask
kaf24@6552 394 #else
kaf24@6552 395 #define irqs_disabled() \
kaf24@6552 396 HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask
kaf24@6552 397 #endif
cl349@4445 398
cl349@4445 399 /*
cl349@4445 400 * disable hlt during certain critical i/o operations
cl349@4445 401 */
cl349@4445 402 #define HAVE_DISABLE_HLT
cl349@4445 403 void disable_hlt(void);
cl349@4445 404 void enable_hlt(void);
cl349@4445 405
cl349@4445 406 #define HAVE_EAT_KEY
cl349@4445 407 void eat_key(void);
cl349@4445 408
vh249@5730 409 extern unsigned long arch_align_stack(unsigned long sp);
vh249@5730 410
cl349@4445 411 #endif