debuggers.hg
changeset 16472:d46265d21dc5
[Mini-OS] Fix x86 arch_switch_thread
Fix x86 arch_switch_thread by making it pure assembly.
There were missing general register clobbers for x86_64, and BP should
theorically be clobbered too, but gcc does not believe that, so the
only simple safe solution is to use pure assembly.
Signed-off-by: Samuel Thibault <samuel.thibault@citrix.com>
Fix x86 arch_switch_thread by making it pure assembly.
There were missing general register clobbers for x86_64, and BP should
theorically be clobbered too, but gcc does not believe that, so the
only simple safe solution is to use pure assembly.
Signed-off-by: Samuel Thibault <samuel.thibault@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Nov 23 16:23:28 2007 +0000 (2007-11-23) |
parents | 2215f4f6f0f2 |
children | e40015e20548 |
files | extras/mini-os/arch/x86/x86_32.S extras/mini-os/arch/x86/x86_64.S extras/mini-os/include/sched.h extras/mini-os/include/x86/arch_sched.h |
line diff
1.1 --- a/extras/mini-os/arch/x86/x86_32.S Fri Nov 23 16:23:03 2007 +0000 1.2 +++ b/extras/mini-os/arch/x86/x86_32.S Fri Nov 23 16:23:28 2007 +0000 1.3 @@ -288,3 +288,21 @@ ENTRY(thread_starter) 1.4 call *%ebx 1.5 call exit_thread 1.6 1.7 +ENTRY(__arch_switch_threads) 1.8 + movl 4(%esp), %ecx /* prev */ 1.9 + movl 8(%esp), %edx /* next */ 1.10 + pushl %ebp 1.11 + pushl %ebx 1.12 + pushl %esi 1.13 + pushl %edi 1.14 + movl %esp, (%ecx) /* save ESP */ 1.15 + movl (%edx), %esp /* restore ESP */ 1.16 + movl $1f, 4(%ecx) /* save EIP */ 1.17 + pushl 4(%edx) /* restore EIP */ 1.18 + ret 1.19 +1: 1.20 + popl %edi 1.21 + popl %esi 1.22 + popl %ebx 1.23 + popl %ebp 1.24 + ret
2.1 --- a/extras/mini-os/arch/x86/x86_64.S Fri Nov 23 16:23:03 2007 +0000 2.2 +++ b/extras/mini-os/arch/x86/x86_64.S Fri Nov 23 16:23:28 2007 +0000 2.3 @@ -386,3 +386,23 @@ ENTRY(thread_starter) 2.4 call exit_thread 2.5 2.6 2.7 +ENTRY(__arch_switch_threads) 2.8 + pushq %rbp 2.9 + pushq %rbx 2.10 + pushq %r12 2.11 + pushq %r13 2.12 + pushq %r14 2.13 + pushq %r15 2.14 + movq %rsp, (%rdi) /* save ESP */ 2.15 + movq (%rsi), %rsp /* restore ESP */ 2.16 + movq $1f, 8(%rdi) /* save EIP */ 2.17 + pushq 8(%rsi) /* restore EIP */ 2.18 + ret 2.19 +1: 2.20 + popq %r15 2.21 + popq %r14 2.22 + popq %r13 2.23 + popq %r12 2.24 + popq %rbx 2.25 + popq %rbp 2.26 + ret
3.1 --- a/extras/mini-os/include/sched.h Fri Nov 23 16:23:03 2007 +0000 3.2 +++ b/extras/mini-os/include/sched.h Fri Nov 23 16:23:28 2007 +0000 3.3 @@ -10,6 +10,7 @@ struct thread 3.4 char *name; 3.5 char *stack; 3.6 #if !defined(__ia64__) 3.7 + /* keep in that order */ 3.8 unsigned long sp; /* Stack pointer */ 3.9 unsigned long ip; /* Instruction pointer */ 3.10 #else /* !defined(__ia64__) */
4.1 --- a/extras/mini-os/include/x86/arch_sched.h Fri Nov 23 16:23:03 2007 +0000 4.2 +++ b/extras/mini-os/include/x86/arch_sched.h Fri Nov 23 16:23:28 2007 +0000 4.3 @@ -15,44 +15,9 @@ static inline struct thread* get_current 4.4 return *current; 4.5 } 4.6 4.7 -#ifdef __i386__ 4.8 -#define arch_switch_threads(prev, next) do { \ 4.9 - unsigned long esi,edi; \ 4.10 - __asm__ __volatile__("pushfl\n\t" \ 4.11 - "pushl %%ebp\n\t" \ 4.12 - "movl %%esp,%0\n\t" /* save ESP */ \ 4.13 - "movl %4,%%esp\n\t" /* restore ESP */ \ 4.14 - "movl $1f,%1\n\t" /* save EIP */ \ 4.15 - "pushl %5\n\t" /* restore EIP */ \ 4.16 - "ret\n\t" \ 4.17 - "1:\t" \ 4.18 - "popl %%ebp\n\t" \ 4.19 - "popfl" \ 4.20 - :"=m" (prev->sp),"=m" (prev->ip), \ 4.21 - "=S" (esi),"=D" (edi) \ 4.22 - :"m" (next->sp),"m" (next->ip), \ 4.23 - "2" (prev), "d" (next)); \ 4.24 -} while (0) 4.25 -#elif __x86_64__ 4.26 -#define arch_switch_threads(prev, next) do { \ 4.27 - unsigned long rsi,rdi; \ 4.28 - __asm__ __volatile__("pushfq\n\t" \ 4.29 - "pushq %%rbp\n\t" \ 4.30 - "movq %%rsp,%0\n\t" /* save RSP */ \ 4.31 - "movq %4,%%rsp\n\t" /* restore RSP */ \ 4.32 - "movq $1f,%1\n\t" /* save RIP */ \ 4.33 - "pushq %5\n\t" /* restore RIP */ \ 4.34 - "ret\n\t" \ 4.35 - "1:\t" \ 4.36 - "popq %%rbp\n\t" \ 4.37 - "popfq" \ 4.38 - :"=m" (prev->sp),"=m" (prev->ip), \ 4.39 - "=S" (rsi),"=D" (rdi) \ 4.40 - :"m" (next->sp),"m" (next->ip), \ 4.41 - "2" (prev), "d" (next)); \ 4.42 -} while (0) 4.43 -#endif 4.44 +extern void __arch_switch_threads(unsigned long *prevctx, unsigned long *nextctx); 4.45 4.46 +#define arch_switch_threads(prev,next) __arch_switch_threads(&(prev)->sp, &(next)->sp) 4.47 4.48 4.49