debuggers.hg

annotate xen/common/wait.c @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents 0e614c0eb4a9
children
rev   line source
keir@22442 1 /******************************************************************************
keir@22442 2 * wait.c
keir@22442 3 *
keir@22442 4 * Sleep in hypervisor context for some event to occur.
keir@22445 5 *
keir@22445 6 * Copyright (c) 2010, Keir Fraser <keir@xen.org>
keir@22445 7 *
keir@22445 8 * This program is free software; you can redistribute it and/or modify
keir@22445 9 * it under the terms of the GNU General Public License as published by
keir@22445 10 * the Free Software Foundation; either version 2 of the License, or
keir@22445 11 * (at your option) any later version.
keir@22445 12 *
keir@22445 13 * This program is distributed in the hope that it will be useful,
keir@22445 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
keir@22445 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
keir@22445 16 * GNU General Public License for more details.
keir@22445 17 *
keir@22445 18 * You should have received a copy of the GNU General Public License
keir@22445 19 * along with this program; if not, write to the Free Software
keir@22445 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
keir@22442 21 */
keir@22442 22
keir@22442 23 #include <xen/config.h>
keir@22442 24 #include <xen/sched.h>
keir@22442 25 #include <xen/wait.h>
keir@22442 26
keir@22442 27 struct waitqueue_vcpu {
keir@22442 28 struct list_head list;
keir@22442 29 struct vcpu *vcpu;
keir@22442 30 #ifdef CONFIG_X86
keir@22442 31 /*
keir@22442 32 * Xen/x86 does not have per-vcpu hypervisor stacks. So we must save the
keir@22442 33 * hypervisor context before sleeping (descheduling), setjmp/longjmp-style.
keir@22442 34 */
keir@22442 35 void *esp;
keir@22442 36 char stack[1500];
keir@22442 37 #endif
keir@22442 38 };
keir@22442 39
keir@22442 40 int init_waitqueue_vcpu(struct vcpu *v)
keir@22442 41 {
keir@22442 42 struct waitqueue_vcpu *wqv;
keir@22442 43
keir@22442 44 wqv = xmalloc(struct waitqueue_vcpu);
keir@22442 45 if ( wqv == NULL )
keir@22442 46 return -ENOMEM;
keir@22442 47
keir@22442 48 memset(wqv, 0, sizeof(*wqv));
keir@22442 49 INIT_LIST_HEAD(&wqv->list);
keir@22442 50 wqv->vcpu = v;
keir@22442 51
keir@22442 52 v->waitqueue_vcpu = wqv;
keir@22442 53
keir@22442 54 return 0;
keir@22442 55 }
keir@22442 56
keir@22442 57 void destroy_waitqueue_vcpu(struct vcpu *v)
keir@22442 58 {
keir@22442 59 struct waitqueue_vcpu *wqv;
keir@22442 60
keir@22442 61 wqv = v->waitqueue_vcpu;
keir@22442 62 if ( wqv == NULL )
keir@22442 63 return;
keir@22442 64
keir@22442 65 BUG_ON(!list_empty(&wqv->list));
keir@22442 66 xfree(wqv);
keir@22442 67
keir@22442 68 v->waitqueue_vcpu = NULL;
keir@22442 69 }
keir@22442 70
keir@22442 71 void init_waitqueue_head(struct waitqueue_head *wq)
keir@22442 72 {
keir@22442 73 spin_lock_init(&wq->lock);
keir@22442 74 INIT_LIST_HEAD(&wq->list);
keir@22442 75 }
keir@22442 76
keir@22442 77 void wake_up(struct waitqueue_head *wq)
keir@22442 78 {
keir@22442 79 struct waitqueue_vcpu *wqv;
keir@22442 80
keir@22442 81 spin_lock(&wq->lock);
keir@22442 82
keir@22442 83 while ( !list_empty(&wq->list) )
keir@22442 84 {
keir@22442 85 wqv = list_entry(wq->list.next, struct waitqueue_vcpu, list);
keir@22442 86 list_del_init(&wqv->list);
keir@22442 87 vcpu_unpause(wqv->vcpu);
keir@22442 88 }
keir@22442 89
keir@22442 90 spin_unlock(&wq->lock);
keir@22442 91 }
keir@22442 92
keir@22442 93 #ifdef CONFIG_X86
keir@22442 94
keir@22442 95 static void __prepare_to_wait(struct waitqueue_vcpu *wqv)
keir@22442 96 {
keir@22442 97 char *cpu_info = (char *)get_cpu_info();
keir@22442 98 asm volatile (
keir@22442 99 #ifdef CONFIG_X86_64
keir@22442 100 "push %%rax; push %%rbx; push %%rcx; push %%rdx; push %%rdi; "
keir@22442 101 "push %%rbp; push %%r8; push %%r9; push %%r10; push %%r11; "
keir@22442 102 "push %%r12; push %%r13; push %%r14; push %%r15; call 1f; "
keir@22442 103 "1: mov 80(%%rsp),%%rdi; mov 96(%%rsp),%%rcx; mov %%rsp,%%rsi; "
keir@22442 104 "sub %%rsi,%%rcx; rep movsb; mov %%rsp,%%rsi; pop %%rax; "
keir@22442 105 "pop %%r15; pop %%r14; pop %%r13; pop %%r12; "
keir@22442 106 "pop %%r11; pop %%r10; pop %%r9; pop %%r8; "
keir@22442 107 "pop %%rbp; pop %%rdi; pop %%rdx; pop %%rcx; pop %%rbx; pop %%rax"
keir@22442 108 #else
keir@22442 109 "push %%eax; push %%ebx; push %%ecx; push %%edx; push %%edi; "
keir@22442 110 "push %%ebp; call 1f; "
keir@22442 111 "1: mov 8(%%esp),%%edi; mov 16(%%esp),%%ecx; mov %%esp,%%esi; "
keir@22442 112 "sub %%esi,%%ecx; rep movsb; mov %%esp,%%esi; pop %%eax; "
keir@22442 113 "pop %%ebp; pop %%edi; pop %%edx; pop %%ecx; pop %%ebx; pop %%eax"
keir@22442 114 #endif
keir@22442 115 : "=S" (wqv->esp)
keir@22442 116 : "c" (cpu_info), "D" (wqv->stack)
keir@22442 117 : "memory" );
keir@22442 118 BUG_ON((cpu_info - (char *)wqv->esp) > sizeof(wqv->stack));
keir@22442 119 }
keir@22442 120
keir@22442 121 static void __finish_wait(struct waitqueue_vcpu *wqv)
keir@22442 122 {
keir@22442 123 wqv->esp = NULL;
keir@22442 124 }
keir@22442 125
keir@22442 126 void check_wakeup_from_wait(void)
keir@22442 127 {
keir@22442 128 struct waitqueue_vcpu *wqv = current->waitqueue_vcpu;
keir@22442 129
keir@22442 130 ASSERT(list_empty(&wqv->list));
keir@22442 131
keir@22442 132 if ( likely(wqv->esp == NULL) )
keir@22442 133 return;
keir@22442 134
keir@22442 135 asm volatile (
keir@22442 136 "mov %1,%%"__OP"sp; rep movsb; jmp *(%%"__OP"sp)"
keir@22442 137 : : "S" (wqv->stack), "D" (wqv->esp),
keir@22442 138 "c" ((char *)get_cpu_info() - (char *)wqv->esp)
keir@22442 139 : "memory" );
keir@22442 140 }
keir@22442 141
keir@22442 142 #else /* !CONFIG_X86 */
keir@22442 143
keir@22442 144 #define __prepare_to_wait(wqv) ((void)0)
keir@22442 145 #define __finish_wait(wqv) ((void)0)
keir@22442 146
keir@22442 147 #endif
keir@22442 148
keir@22442 149 void prepare_to_wait(struct waitqueue_head *wq)
keir@22442 150 {
keir@22442 151 struct vcpu *curr = current;
keir@22442 152 struct waitqueue_vcpu *wqv = curr->waitqueue_vcpu;
keir@22442 153
keir@22442 154 ASSERT(list_empty(&wqv->list));
keir@22442 155
keir@22442 156 spin_lock(&wq->lock);
keir@22442 157 list_add_tail(&wqv->list, &wq->list);
keir@22442 158 vcpu_pause_nosync(curr);
keir@22442 159 spin_unlock(&wq->lock);
keir@22442 160
keir@22442 161 __prepare_to_wait(wqv);
keir@22442 162 }
keir@22442 163
keir@22442 164 void finish_wait(struct waitqueue_head *wq)
keir@22442 165 {
keir@22442 166 struct vcpu *curr = current;
keir@22442 167 struct waitqueue_vcpu *wqv = curr->waitqueue_vcpu;
keir@22442 168
keir@22442 169 __finish_wait(wqv);
keir@22442 170
keir@22442 171 if ( list_empty(&wqv->list) )
keir@22442 172 return;
keir@22442 173
keir@22442 174 spin_lock(&wq->lock);
keir@22442 175 if ( !list_empty(&wqv->list) )
keir@22442 176 {
keir@22442 177 list_del_init(&wqv->list);
keir@22442 178 vcpu_unpause(curr);
keir@22442 179 }
keir@22442 180 spin_unlock(&wq->lock);
keir@22442 181 }