debuggers.hg

view xen/common/wait.c @ 22906:700ac6445812

Now add KDB to the non-kdb tree
author Mukesh Rathor
date Thu Feb 03 15:42:41 2011 -0800 (2011-02-03)
parents 0e614c0eb4a9
children
line source
1 /******************************************************************************
2 * wait.c
3 *
4 * Sleep in hypervisor context for some event to occur.
5 *
6 * Copyright (c) 2010, Keir Fraser <keir@xen.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #include <xen/config.h>
24 #include <xen/sched.h>
25 #include <xen/wait.h>
27 struct waitqueue_vcpu {
28 struct list_head list;
29 struct vcpu *vcpu;
30 #ifdef CONFIG_X86
31 /*
32 * Xen/x86 does not have per-vcpu hypervisor stacks. So we must save the
33 * hypervisor context before sleeping (descheduling), setjmp/longjmp-style.
34 */
35 void *esp;
36 char stack[1500];
37 #endif
38 };
40 int init_waitqueue_vcpu(struct vcpu *v)
41 {
42 struct waitqueue_vcpu *wqv;
44 wqv = xmalloc(struct waitqueue_vcpu);
45 if ( wqv == NULL )
46 return -ENOMEM;
48 memset(wqv, 0, sizeof(*wqv));
49 INIT_LIST_HEAD(&wqv->list);
50 wqv->vcpu = v;
52 v->waitqueue_vcpu = wqv;
54 return 0;
55 }
57 void destroy_waitqueue_vcpu(struct vcpu *v)
58 {
59 struct waitqueue_vcpu *wqv;
61 wqv = v->waitqueue_vcpu;
62 if ( wqv == NULL )
63 return;
65 BUG_ON(!list_empty(&wqv->list));
66 xfree(wqv);
68 v->waitqueue_vcpu = NULL;
69 }
71 void init_waitqueue_head(struct waitqueue_head *wq)
72 {
73 spin_lock_init(&wq->lock);
74 INIT_LIST_HEAD(&wq->list);
75 }
77 void wake_up(struct waitqueue_head *wq)
78 {
79 struct waitqueue_vcpu *wqv;
81 spin_lock(&wq->lock);
83 while ( !list_empty(&wq->list) )
84 {
85 wqv = list_entry(wq->list.next, struct waitqueue_vcpu, list);
86 list_del_init(&wqv->list);
87 vcpu_unpause(wqv->vcpu);
88 }
90 spin_unlock(&wq->lock);
91 }
93 #ifdef CONFIG_X86
95 static void __prepare_to_wait(struct waitqueue_vcpu *wqv)
96 {
97 char *cpu_info = (char *)get_cpu_info();
98 asm volatile (
99 #ifdef CONFIG_X86_64
100 "push %%rax; push %%rbx; push %%rcx; push %%rdx; push %%rdi; "
101 "push %%rbp; push %%r8; push %%r9; push %%r10; push %%r11; "
102 "push %%r12; push %%r13; push %%r14; push %%r15; call 1f; "
103 "1: mov 80(%%rsp),%%rdi; mov 96(%%rsp),%%rcx; mov %%rsp,%%rsi; "
104 "sub %%rsi,%%rcx; rep movsb; mov %%rsp,%%rsi; pop %%rax; "
105 "pop %%r15; pop %%r14; pop %%r13; pop %%r12; "
106 "pop %%r11; pop %%r10; pop %%r9; pop %%r8; "
107 "pop %%rbp; pop %%rdi; pop %%rdx; pop %%rcx; pop %%rbx; pop %%rax"
108 #else
109 "push %%eax; push %%ebx; push %%ecx; push %%edx; push %%edi; "
110 "push %%ebp; call 1f; "
111 "1: mov 8(%%esp),%%edi; mov 16(%%esp),%%ecx; mov %%esp,%%esi; "
112 "sub %%esi,%%ecx; rep movsb; mov %%esp,%%esi; pop %%eax; "
113 "pop %%ebp; pop %%edi; pop %%edx; pop %%ecx; pop %%ebx; pop %%eax"
114 #endif
115 : "=S" (wqv->esp)
116 : "c" (cpu_info), "D" (wqv->stack)
117 : "memory" );
118 BUG_ON((cpu_info - (char *)wqv->esp) > sizeof(wqv->stack));
119 }
121 static void __finish_wait(struct waitqueue_vcpu *wqv)
122 {
123 wqv->esp = NULL;
124 }
126 void check_wakeup_from_wait(void)
127 {
128 struct waitqueue_vcpu *wqv = current->waitqueue_vcpu;
130 ASSERT(list_empty(&wqv->list));
132 if ( likely(wqv->esp == NULL) )
133 return;
135 asm volatile (
136 "mov %1,%%"__OP"sp; rep movsb; jmp *(%%"__OP"sp)"
137 : : "S" (wqv->stack), "D" (wqv->esp),
138 "c" ((char *)get_cpu_info() - (char *)wqv->esp)
139 : "memory" );
140 }
142 #else /* !CONFIG_X86 */
144 #define __prepare_to_wait(wqv) ((void)0)
145 #define __finish_wait(wqv) ((void)0)
147 #endif
149 void prepare_to_wait(struct waitqueue_head *wq)
150 {
151 struct vcpu *curr = current;
152 struct waitqueue_vcpu *wqv = curr->waitqueue_vcpu;
154 ASSERT(list_empty(&wqv->list));
156 spin_lock(&wq->lock);
157 list_add_tail(&wqv->list, &wq->list);
158 vcpu_pause_nosync(curr);
159 spin_unlock(&wq->lock);
161 __prepare_to_wait(wqv);
162 }
164 void finish_wait(struct waitqueue_head *wq)
165 {
166 struct vcpu *curr = current;
167 struct waitqueue_vcpu *wqv = curr->waitqueue_vcpu;
169 __finish_wait(wqv);
171 if ( list_empty(&wqv->list) )
172 return;
174 spin_lock(&wq->lock);
175 if ( !list_empty(&wqv->list) )
176 {
177 list_del_init(&wqv->list);
178 vcpu_unpause(curr);
179 }
180 spin_unlock(&wq->lock);
181 }