/root/src/xen/xen/common/wait.c
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * wait.c |
3 | | * |
4 | | * Sleep in hypervisor context for some event to occur. |
5 | | * |
6 | | * Copyright (c) 2010, Keir Fraser <keir@xen.org> |
7 | | * |
8 | | * This program is free software; you can redistribute it and/or modify |
9 | | * it under the terms of the GNU General Public License as published by |
10 | | * the Free Software Foundation; either version 2 of the License, or |
11 | | * (at your option) any later version. |
12 | | * |
13 | | * This program is distributed in the hope that it will be useful, |
14 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | | * GNU General Public License for more details. |
17 | | * |
18 | | * You should have received a copy of the GNU General Public License |
19 | | * along with this program; If not, see <http://www.gnu.org/licenses/>. |
20 | | */ |
21 | | |
22 | | #include <xen/sched.h> |
23 | | #include <xen/wait.h> |
24 | | #include <xen/errno.h> |
25 | | |
26 | | struct waitqueue_vcpu { |
27 | | struct list_head list; |
28 | | struct vcpu *vcpu; |
29 | | #ifdef CONFIG_X86 |
30 | | /* |
31 | | * Xen/x86 does not have per-vcpu hypervisor stacks. So we must save the |
32 | | * hypervisor context before sleeping (descheduling), setjmp/longjmp-style. |
33 | | */ |
34 | | void *esp; |
35 | | char *stack; |
36 | | cpumask_t saved_affinity; |
37 | | unsigned int wakeup_cpu; |
38 | | #endif |
39 | | }; |
40 | | |
41 | | int init_waitqueue_vcpu(struct vcpu *v) |
42 | 12 | { |
43 | 12 | struct waitqueue_vcpu *wqv; |
44 | 12 | |
45 | 12 | wqv = xzalloc(struct waitqueue_vcpu); |
46 | 12 | if ( wqv == NULL ) |
47 | 0 | return -ENOMEM; |
48 | 12 | |
49 | 12 | #ifdef CONFIG_X86 |
50 | 12 | wqv->stack = alloc_xenheap_page(); |
51 | 12 | if ( wqv->stack == NULL ) |
52 | 0 | { |
53 | 0 | xfree(wqv); |
54 | 0 | return -ENOMEM; |
55 | 0 | } |
56 | 12 | #endif |
57 | 12 | |
58 | 12 | INIT_LIST_HEAD(&wqv->list); |
59 | 12 | wqv->vcpu = v; |
60 | 12 | |
61 | 12 | v->waitqueue_vcpu = wqv; |
62 | 12 | |
63 | 12 | return 0; |
64 | 12 | } |
65 | | |
66 | | void destroy_waitqueue_vcpu(struct vcpu *v) |
67 | 0 | { |
68 | 0 | struct waitqueue_vcpu *wqv; |
69 | 0 |
|
70 | 0 | wqv = v->waitqueue_vcpu; |
71 | 0 | if ( wqv == NULL ) |
72 | 0 | return; |
73 | 0 |
|
74 | 0 | BUG_ON(!list_empty(&wqv->list)); |
75 | 0 | #ifdef CONFIG_X86 |
76 | 0 | free_xenheap_page(wqv->stack); |
77 | 0 | #endif |
78 | 0 | xfree(wqv); |
79 | 0 |
|
80 | 0 | v->waitqueue_vcpu = NULL; |
81 | 0 | } |
82 | | |
83 | | void init_waitqueue_head(struct waitqueue_head *wq) |
84 | 0 | { |
85 | 0 | spin_lock_init(&wq->lock); |
86 | 0 | INIT_LIST_HEAD(&wq->list); |
87 | 0 | } |
88 | | |
89 | | void destroy_waitqueue_head(struct waitqueue_head *wq) |
90 | 0 | { |
91 | 0 | wake_up_all(wq); |
92 | 0 | } |
93 | | |
94 | | void wake_up_nr(struct waitqueue_head *wq, unsigned int nr) |
95 | 0 | { |
96 | 0 | struct waitqueue_vcpu *wqv; |
97 | 0 |
|
98 | 0 | spin_lock(&wq->lock); |
99 | 0 |
|
100 | 0 | while ( !list_empty(&wq->list) && nr-- ) |
101 | 0 | { |
102 | 0 | wqv = list_entry(wq->list.next, struct waitqueue_vcpu, list); |
103 | 0 | list_del_init(&wqv->list); |
104 | 0 | vcpu_unpause(wqv->vcpu); |
105 | 0 | put_domain(wqv->vcpu->domain); |
106 | 0 | } |
107 | 0 |
|
108 | 0 | spin_unlock(&wq->lock); |
109 | 0 | } |
110 | | |
111 | | void wake_up_one(struct waitqueue_head *wq) |
112 | 0 | { |
113 | 0 | wake_up_nr(wq, 1); |
114 | 0 | } |
115 | | |
116 | | void wake_up_all(struct waitqueue_head *wq) |
117 | 0 | { |
118 | 0 | wake_up_nr(wq, UINT_MAX); |
119 | 0 | } |
120 | | |
121 | | #ifdef CONFIG_X86 |
122 | | |
123 | | static void __prepare_to_wait(struct waitqueue_vcpu *wqv) |
124 | 0 | { |
125 | 0 | struct cpu_info *cpu_info = get_cpu_info(); |
126 | 0 | struct vcpu *curr = current; |
127 | 0 | unsigned long dummy; |
128 | 0 | u32 entry_vector = cpu_info->guest_cpu_user_regs.entry_vector; |
129 | 0 |
|
130 | 0 | cpu_info->guest_cpu_user_regs.entry_vector &= ~TRAP_regs_partial; |
131 | 0 | ASSERT(wqv->esp == 0); |
132 | 0 |
|
133 | 0 | /* Save current VCPU affinity; force wakeup on *this* CPU only. */ |
134 | 0 | wqv->wakeup_cpu = smp_processor_id(); |
135 | 0 | cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity); |
136 | 0 | if ( vcpu_set_hard_affinity(curr, cpumask_of(wqv->wakeup_cpu)) ) |
137 | 0 | { |
138 | 0 | gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n"); |
139 | 0 | domain_crash_synchronous(); |
140 | 0 | } |
141 | 0 |
|
142 | 0 | asm volatile ( |
143 | 0 | "push %%rax; push %%rbx; push %%rdx; " |
144 | 0 | "push %%rbp; push %%r8; push %%r9; push %%r10; push %%r11; " |
145 | 0 | "push %%r12; push %%r13; push %%r14; push %%r15; call 1f; " |
146 | 0 | "1: addq $2f-1b,(%%rsp); sub %%esp,%%ecx; cmp %3,%%ecx; ja 3f; " |
147 | 0 | "mov %%rsp,%%rsi; 2: rep movsb; mov %%rsp,%%rsi; 3: pop %%rax; " |
148 | 0 | "pop %%r15; pop %%r14; pop %%r13; pop %%r12; " |
149 | 0 | "pop %%r11; pop %%r10; pop %%r9; pop %%r8; " |
150 | 0 | "pop %%rbp; pop %%rdx; pop %%rbx; pop %%rax" |
151 | 0 | : "=&S" (wqv->esp), "=&c" (dummy), "=&D" (dummy) |
152 | 0 | : "i" (PAGE_SIZE), "0" (0), "1" (cpu_info), "2" (wqv->stack) |
153 | 0 | : "memory" ); |
154 | 0 |
|
155 | 0 | if ( unlikely(wqv->esp == 0) ) |
156 | 0 | { |
157 | 0 | gdprintk(XENLOG_ERR, "Stack too large in %s\n", __func__); |
158 | 0 | domain_crash_synchronous(); |
159 | 0 | } |
160 | 0 |
|
161 | 0 | cpu_info->guest_cpu_user_regs.entry_vector = entry_vector; |
162 | 0 | } |
163 | | |
164 | | static void __finish_wait(struct waitqueue_vcpu *wqv) |
165 | 0 | { |
166 | 0 | wqv->esp = NULL; |
167 | 0 | (void)vcpu_set_hard_affinity(current, &wqv->saved_affinity); |
168 | 0 | } |
169 | | |
170 | | void check_wakeup_from_wait(void) |
171 | 4.56M | { |
172 | 4.56M | struct waitqueue_vcpu *wqv = current->waitqueue_vcpu; |
173 | 4.56M | |
174 | 4.56M | ASSERT(list_empty(&wqv->list)); |
175 | 4.56M | |
176 | 4.56M | if ( likely(wqv->esp == NULL) ) |
177 | 4.57M | return; |
178 | 4.56M | |
179 | 4.56M | /* Check if we woke up on the wrong CPU. */ |
180 | 18.4E | if ( unlikely(smp_processor_id() != wqv->wakeup_cpu) ) |
181 | 0 | { |
182 | 0 | /* Re-set VCPU affinity and re-enter the scheduler. */ |
183 | 0 | struct vcpu *curr = current; |
184 | 0 | cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity); |
185 | 0 | if ( vcpu_set_hard_affinity(curr, cpumask_of(wqv->wakeup_cpu)) ) |
186 | 0 | { |
187 | 0 | gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n"); |
188 | 0 | domain_crash_synchronous(); |
189 | 0 | } |
190 | 0 | wait(); /* takes us back into the scheduler */ |
191 | 0 | } |
192 | 18.4E | |
193 | 18.4E | asm volatile ( |
194 | 18.4E | "mov %1,%%"__OP"sp; jmp *(%0)" |
195 | 18.4E | : : "S" (wqv->stack), "D" (wqv->esp), |
196 | 18.4E | "c" ((char *)get_cpu_info() - (char *)wqv->esp) |
197 | 18.4E | : "memory" ); |
198 | 18.4E | } |
199 | | |
200 | | #else /* !CONFIG_X86 */ |
201 | | |
202 | | #define __prepare_to_wait(wqv) ((void)0) |
203 | | #define __finish_wait(wqv) ((void)0) |
204 | | |
205 | | #endif |
206 | | |
207 | | void prepare_to_wait(struct waitqueue_head *wq) |
208 | 0 | { |
209 | 0 | struct vcpu *curr = current; |
210 | 0 | struct waitqueue_vcpu *wqv = curr->waitqueue_vcpu; |
211 | 0 |
|
212 | 0 | ASSERT_NOT_IN_ATOMIC(); |
213 | 0 | __prepare_to_wait(wqv); |
214 | 0 |
|
215 | 0 | ASSERT(list_empty(&wqv->list)); |
216 | 0 | spin_lock(&wq->lock); |
217 | 0 | list_add_tail(&wqv->list, &wq->list); |
218 | 0 | vcpu_pause_nosync(curr); |
219 | 0 | get_knownalive_domain(curr->domain); |
220 | 0 | spin_unlock(&wq->lock); |
221 | 0 | } |
222 | | |
223 | | void finish_wait(struct waitqueue_head *wq) |
224 | 0 | { |
225 | 0 | struct vcpu *curr = current; |
226 | 0 | struct waitqueue_vcpu *wqv = curr->waitqueue_vcpu; |
227 | 0 |
|
228 | 0 | __finish_wait(wqv); |
229 | 0 |
|
230 | 0 | if ( list_empty(&wqv->list) ) |
231 | 0 | return; |
232 | 0 |
|
233 | 0 | spin_lock(&wq->lock); |
234 | 0 | if ( !list_empty(&wqv->list) ) |
235 | 0 | { |
236 | 0 | list_del_init(&wqv->list); |
237 | 0 | vcpu_unpause(curr); |
238 | 0 | put_domain(curr->domain); |
239 | 0 | } |
240 | 0 | spin_unlock(&wq->lock); |
241 | 0 | } |