debuggers.hg
changeset 4734:e686528abbfc
bitkeeper revision 1.1389.3.1 (42714dabVSywx2XWGjgw2J54ZylwYg)
Ensure block/yield hypercalls always return a sane return code.
Ensure callers of __enter_scheduler take appropriate arch-specific
action if no context switch occurs (callers from arch/x86 do not
expect to return from a call into the scheduler).
This fixes wildly unintuitive behaviour of do_block() for the
VMX team.
Signed-off-by: Keir Fraser <keir@xensource.com>
Ensure block/yield hypercalls always return a sane return code.
Ensure callers of __enter_scheduler take appropriate arch-specific
action if no context switch occurs (callers from arch/x86 do not
expect to return from a call into the scheduler).
This fixes wildly unintuitive behaviour of do_block() for the
VMX team.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Thu Apr 28 20:55:07 2005 +0000 (2005-04-28) |
parents | 57dcb8c9f1d8 |
children | 123bd8c4b408 a879e5923337 |
files | xen/arch/ia64/xenmisc.c xen/arch/x86/domain.c xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_64/entry.S xen/common/schedule.c xen/include/xen/sched.h |
line diff
1.1 --- a/xen/arch/ia64/xenmisc.c Thu Apr 28 18:26:25 2005 +0000 1.2 +++ b/xen/arch/ia64/xenmisc.c Thu Apr 28 20:55:07 2005 +0000 1.3 @@ -278,6 +278,11 @@ if (!i--) { printk("+",id); cnt[id] = 10 1.4 if (vcpu_timer_expired(current)) vcpu_pend_timer(current); 1.5 } 1.6 1.7 +void continue_running(struct exec_domain *same) 1.8 +{ 1.9 + /* nothing to do */ 1.10 +} 1.11 + 1.12 void panic_domain(struct pt_regs *regs, const char *fmt, ...) 1.13 { 1.14 va_list args;
2.1 --- a/xen/arch/x86/domain.c Thu Apr 28 18:26:25 2005 +0000 2.2 +++ b/xen/arch/x86/domain.c Thu Apr 28 20:55:07 2005 +0000 2.3 @@ -794,7 +794,12 @@ void context_switch(struct exec_domain * 2.4 clear_bit(EDF_RUNNING, &prev->ed_flags); 2.5 2.6 schedule_tail(next); 2.7 + BUG(); 2.8 +} 2.9 2.10 +void continue_running(struct exec_domain *same) 2.11 +{ 2.12 + schedule_tail(same); 2.13 BUG(); 2.14 } 2.15
3.1 --- a/xen/arch/x86/x86_32/entry.S Thu Apr 28 18:26:25 2005 +0000 3.2 +++ b/xen/arch/x86/x86_32/entry.S Thu Apr 28 20:55:07 2005 +0000 3.3 @@ -652,6 +652,12 @@ ENTRY(setup_vm86_frame) 3.4 addl $16,%esp 3.5 ret 3.6 3.7 +do_arch_sched_op: 3.8 + # Ensure we return success even if we return via schedule_tail() 3.9 + xorl %eax,%eax 3.10 + movl %eax,UREGS_eax+4(%esp) 3.11 + jmp SYMBOL_NAME(do_sched_op) 3.12 + 3.13 do_switch_vm86: 3.14 # Discard the return address 3.15 addl $4,%esp 3.16 @@ -718,7 +724,7 @@ ENTRY(hypercall_table) 3.17 .long SYMBOL_NAME(do_stack_switch) 3.18 .long SYMBOL_NAME(do_set_callbacks) 3.19 .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */ 3.20 - .long SYMBOL_NAME(do_sched_op) 3.21 + .long SYMBOL_NAME(do_arch_sched_op) 3.22 .long SYMBOL_NAME(do_dom0_op) 3.23 .long SYMBOL_NAME(do_set_debugreg) 3.24 .long SYMBOL_NAME(do_get_debugreg)
4.1 --- a/xen/arch/x86/x86_64/entry.S Thu Apr 28 18:26:25 2005 +0000 4.2 +++ b/xen/arch/x86/x86_64/entry.S Thu Apr 28 20:55:07 2005 +0000 4.3 @@ -523,6 +523,12 @@ ENTRY(nmi) 4.4 call SYMBOL_NAME(do_nmi) 4.5 jmp restore_all_xen 4.6 4.7 +do_arch_sched_op: 4.8 + # Ensure we return success even if we return via schedule_tail() 4.9 + xorl %eax,%eax 4.10 + movq %rax,UREGS_rax+8(%rsp) 4.11 + jmp SYMBOL_NAME(do_sched_op) 4.12 + 4.13 .data 4.14 4.15 ENTRY(exception_table) 4.16 @@ -554,7 +560,7 @@ ENTRY(hypercall_table) 4.17 .quad SYMBOL_NAME(do_stack_switch) 4.18 .quad SYMBOL_NAME(do_set_callbacks) 4.19 .quad SYMBOL_NAME(do_fpu_taskswitch) /* 5 */ 4.20 - .quad SYMBOL_NAME(do_sched_op) 4.21 + .quad SYMBOL_NAME(do_arch_sched_op) 4.22 .quad SYMBOL_NAME(do_dom0_op) 4.23 .quad SYMBOL_NAME(do_set_debugreg) 4.24 .quad SYMBOL_NAME(do_get_debugreg)
5.1 --- a/xen/common/schedule.c Thu Apr 28 18:26:25 2005 +0000 5.2 +++ b/xen/common/schedule.c Thu Apr 28 20:55:07 2005 +0000 5.3 @@ -228,7 +228,9 @@ long do_block(void) 5.4 5.5 /* Check for events /after/ blocking: avoids wakeup waiting race. */ 5.6 if ( event_pending(ed) ) 5.7 + { 5.8 clear_bit(EDF_BLOCKED, &ed->ed_flags); 5.9 + } 5.10 else 5.11 { 5.12 TRACE_2D(TRC_SCHED_BLOCK, ed->domain->id, ed->eid); 5.13 @@ -382,7 +384,7 @@ static void __enter_scheduler(void) 5.14 spin_unlock_irq(&schedule_data[cpu].schedule_lock); 5.15 5.16 if ( unlikely(prev == next) ) 5.17 - return; 5.18 + return continue_running(prev); 5.19 5.20 perfc_incrc(sched_ctx); 5.21
6.1 --- a/xen/include/xen/sched.h Thu Apr 28 18:26:25 2005 +0000 6.2 +++ b/xen/include/xen/sched.h Thu Apr 28 20:55:07 2005 +0000 6.3 @@ -210,7 +210,7 @@ static inline void get_knownalive_domain 6.4 atomic_inc(&d->refcnt); 6.5 ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED)); 6.6 } 6.7 - 6.8 + 6.9 extern struct domain *do_createdomain( 6.10 domid_t dom_id, unsigned int cpu); 6.11 extern int construct_dom0( 6.12 @@ -265,10 +265,15 @@ extern void sync_lazy_execstate_cpuset(u 6.13 extern void sync_lazy_execstate_all(void); 6.14 extern int __sync_lazy_execstate(void); 6.15 6.16 +/* Called by the scheduler to switch to another exec_domain. */ 6.17 extern void context_switch( 6.18 struct exec_domain *prev, 6.19 struct exec_domain *next); 6.20 6.21 +/* Called by the scheduler to continue running the current exec_domain. */ 6.22 +extern void continue_running( 6.23 + struct exec_domain *same); 6.24 + 6.25 void domain_init(void); 6.26 6.27 int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */