debuggers.hg

view unmodified_drivers/linux-2.6/platform-pci/evtchn.c @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents 1ea9dd2c7331
children
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * A simplified event channel for para-drivers in unmodified linux
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8 *
9 * This file may be distributed separately from the Linux kernel, or
10 * incorporated into other software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/spinlock.h>
34 #include <xen/evtchn.h>
35 #include <xen/interface/hvm/ioreq.h>
36 #include <xen/features.h>
37 #include "platform-pci.h"
39 #ifdef HAVE_XEN_PLATFORM_COMPAT_H
40 #include <xen/platform-compat.h>
41 #endif
43 void *shared_info_area;
45 #define is_valid_evtchn(x) ((x) != 0)
46 #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn)
48 static struct {
49 spinlock_t lock;
50 irq_handler_t handler;
51 void *dev_id;
52 int evtchn;
53 int close:1; /* close on unbind_from_irqhandler()? */
54 int inuse:1;
55 int in_handler:1;
56 } irq_evtchn[256];
57 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
58 [0 ... NR_EVENT_CHANNELS-1] = -1 };
60 static DEFINE_SPINLOCK(irq_alloc_lock);
62 static int alloc_xen_irq(void)
63 {
64 static int warned;
65 int irq;
67 spin_lock(&irq_alloc_lock);
69 for (irq = 1; irq < ARRAY_SIZE(irq_evtchn); irq++) {
70 if (irq_evtchn[irq].inuse)
71 continue;
72 irq_evtchn[irq].inuse = 1;
73 spin_unlock(&irq_alloc_lock);
74 return irq;
75 }
77 if (!warned) {
78 warned = 1;
79 printk(KERN_WARNING "No available IRQ to bind to: "
80 "increase irq_evtchn[] size in evtchn.c.\n");
81 }
83 spin_unlock(&irq_alloc_lock);
85 return -ENOSPC;
86 }
88 static void free_xen_irq(int irq)
89 {
90 spin_lock(&irq_alloc_lock);
91 irq_evtchn[irq].inuse = 0;
92 spin_unlock(&irq_alloc_lock);
93 }
95 int irq_to_evtchn_port(int irq)
96 {
97 return irq_evtchn[irq].evtchn;
98 }
99 EXPORT_SYMBOL(irq_to_evtchn_port);
101 void mask_evtchn(int port)
102 {
103 shared_info_t *s = shared_info_area;
104 synch_set_bit(port, &s->evtchn_mask[0]);
105 }
106 EXPORT_SYMBOL(mask_evtchn);
108 void unmask_evtchn(int port)
109 {
110 evtchn_unmask_t op = { .port = port };
111 VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &op));
112 }
113 EXPORT_SYMBOL(unmask_evtchn);
115 int bind_listening_port_to_irqhandler(
116 unsigned int remote_domain,
117 irq_handler_t handler,
118 unsigned long irqflags,
119 const char *devname,
120 void *dev_id)
121 {
122 struct evtchn_alloc_unbound alloc_unbound;
123 int err, irq;
125 irq = alloc_xen_irq();
126 if (irq < 0)
127 return irq;
129 spin_lock_irq(&irq_evtchn[irq].lock);
131 alloc_unbound.dom = DOMID_SELF;
132 alloc_unbound.remote_dom = remote_domain;
133 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
134 &alloc_unbound);
135 if (err) {
136 spin_unlock_irq(&irq_evtchn[irq].lock);
137 free_xen_irq(irq);
138 return err;
139 }
141 irq_evtchn[irq].handler = handler;
142 irq_evtchn[irq].dev_id = dev_id;
143 irq_evtchn[irq].evtchn = alloc_unbound.port;
144 irq_evtchn[irq].close = 1;
146 evtchn_to_irq[alloc_unbound.port] = irq;
148 unmask_evtchn(alloc_unbound.port);
150 spin_unlock_irq(&irq_evtchn[irq].lock);
152 return irq;
153 }
154 EXPORT_SYMBOL(bind_listening_port_to_irqhandler);
156 int bind_caller_port_to_irqhandler(
157 unsigned int caller_port,
158 irq_handler_t handler,
159 unsigned long irqflags,
160 const char *devname,
161 void *dev_id)
162 {
163 int irq;
165 irq = alloc_xen_irq();
166 if (irq < 0)
167 return irq;
169 spin_lock_irq(&irq_evtchn[irq].lock);
171 irq_evtchn[irq].handler = handler;
172 irq_evtchn[irq].dev_id = dev_id;
173 irq_evtchn[irq].evtchn = caller_port;
174 irq_evtchn[irq].close = 0;
176 evtchn_to_irq[caller_port] = irq;
178 unmask_evtchn(caller_port);
180 spin_unlock_irq(&irq_evtchn[irq].lock);
182 return irq;
183 }
184 EXPORT_SYMBOL(bind_caller_port_to_irqhandler);
186 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
187 {
188 int evtchn;
190 spin_lock_irq(&irq_evtchn[irq].lock);
192 evtchn = evtchn_from_irq(irq);
194 if (is_valid_evtchn(evtchn)) {
195 evtchn_to_irq[evtchn] = -1;
196 mask_evtchn(evtchn);
197 if (irq_evtchn[irq].close) {
198 struct evtchn_close close = { .port = evtchn };
199 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
200 BUG();
201 }
202 }
204 irq_evtchn[irq].handler = NULL;
205 irq_evtchn[irq].evtchn = 0;
207 spin_unlock_irq(&irq_evtchn[irq].lock);
209 while (irq_evtchn[irq].in_handler)
210 cpu_relax();
212 free_xen_irq(irq);
213 }
214 EXPORT_SYMBOL(unbind_from_irqhandler);
216 void notify_remote_via_irq(int irq)
217 {
218 int evtchn;
220 evtchn = evtchn_from_irq(irq);
221 if (is_valid_evtchn(evtchn))
222 notify_remote_via_evtchn(evtchn);
223 }
224 EXPORT_SYMBOL(notify_remote_via_irq);
226 static DEFINE_PER_CPU(unsigned int, last_processed_l1i) = { BITS_PER_LONG - 1 };
227 static DEFINE_PER_CPU(unsigned int, last_processed_l2i) = { BITS_PER_LONG - 1 };
229 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
230 unsigned int idx)
231 {
232 return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
233 }
235 static irqreturn_t evtchn_interrupt(int irq, void *dev_id
236 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
237 , struct pt_regs *regs
238 #else
239 # define handler(irq, dev_id, regs) handler(irq, dev_id)
240 #endif
241 )
242 {
243 unsigned int l1i, l2i, port;
244 unsigned long masked_l1, masked_l2;
245 /* XXX: All events are bound to vcpu0 but irq may be redirected. */
246 int cpu = 0; /*smp_processor_id();*/
247 irq_handler_t handler;
248 shared_info_t *s = shared_info_area;
249 vcpu_info_t *v = &s->vcpu_info[cpu];
250 unsigned long l1, l2;
252 v->evtchn_upcall_pending = 0;
254 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
255 /* Clear master flag /before/ clearing selector flag. */
256 wmb();
257 #endif
258 l1 = xchg(&v->evtchn_pending_sel, 0);
260 l1i = per_cpu(last_processed_l1i, cpu);
261 l2i = per_cpu(last_processed_l2i, cpu);
263 while (l1 != 0) {
265 l1i = (l1i + 1) % BITS_PER_LONG;
266 masked_l1 = l1 & ((~0UL) << l1i);
268 if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */
269 l1i = BITS_PER_LONG - 1;
270 l2i = BITS_PER_LONG - 1;
271 continue;
272 }
273 l1i = __ffs(masked_l1);
275 do {
276 l2 = active_evtchns(cpu, s, l1i);
278 l2i = (l2i + 1) % BITS_PER_LONG;
279 masked_l2 = l2 & ((~0UL) << l2i);
281 if (masked_l2 == 0) { /* if we masked out all events, move on */
282 l2i = BITS_PER_LONG - 1;
283 break;
284 }
285 l2i = __ffs(masked_l2);
287 /* process port */
288 port = (l1i * BITS_PER_LONG) + l2i;
289 synch_clear_bit(port, &s->evtchn_pending[0]);
291 irq = evtchn_to_irq[port];
292 if (irq < 0)
293 continue;
295 spin_lock(&irq_evtchn[irq].lock);
296 handler = irq_evtchn[irq].handler;
297 dev_id = irq_evtchn[irq].dev_id;
298 if (unlikely(handler == NULL)) {
299 printk("Xen IRQ%d (port %d) has no handler!\n",
300 irq, port);
301 spin_unlock(&irq_evtchn[irq].lock);
302 continue;
303 }
304 irq_evtchn[irq].in_handler = 1;
305 spin_unlock(&irq_evtchn[irq].lock);
307 local_irq_enable();
308 handler(irq, irq_evtchn[irq].dev_id, regs);
309 local_irq_disable();
311 spin_lock(&irq_evtchn[irq].lock);
312 irq_evtchn[irq].in_handler = 0;
313 spin_unlock(&irq_evtchn[irq].lock);
315 /* if this is the final port processed, we'll pick up here+1 next time */
316 per_cpu(last_processed_l1i, cpu) = l1i;
317 per_cpu(last_processed_l2i, cpu) = l2i;
319 } while (l2i != BITS_PER_LONG - 1);
321 l2 = active_evtchns(cpu, s, l1i);
322 if (l2 == 0) /* we handled all ports, so we can clear the selector bit */
323 l1 &= ~(1UL << l1i);
324 }
326 return IRQ_HANDLED;
327 }
329 void irq_resume(void)
330 {
331 int evtchn, irq;
333 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) {
334 mask_evtchn(evtchn);
335 evtchn_to_irq[evtchn] = -1;
336 }
338 for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++)
339 irq_evtchn[irq].evtchn = 0;
340 }
342 int xen_irq_init(struct pci_dev *pdev)
343 {
344 int irq;
346 for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++)
347 spin_lock_init(&irq_evtchn[irq].lock);
349 return request_irq(pdev->irq, evtchn_interrupt,
350 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
351 SA_SHIRQ | SA_SAMPLE_RANDOM | SA_INTERRUPT,
352 #else
353 IRQF_SHARED | IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
354 #endif
355 "xen-platform-pci", pdev);
356 }