debuggers.hg
changeset 3017:02c4a1199884
bitkeeper revision 1.1159.170.24 (4198acd7Fs4-ujy8fHFTYRsZuN115Q)
Ported Steven Smith's multi-user extension of /dev/xen/evtchn to
current evtchn driver. Now the device can be open multiple times, with
users bound to disjoint sets of ports.
Ported Steven Smith's multi-user extension of /dev/xen/evtchn to
current evtchn driver. Now the device can be open multiple times, with
users bound to disjoint sets of ports.
author | kaf24@freefall.cl.cam.ac.uk |
---|---|
date | Mon Nov 15 13:19:19 2004 +0000 (2004-11-15) |
parents | fd10b3d1ab9a |
children | b49a4a760280 |
files | linux-2.6.9-xen-sparse/drivers/xen/evtchn/evtchn.c |
line diff
1.1 --- a/linux-2.6.9-xen-sparse/drivers/xen/evtchn/evtchn.c Mon Nov 15 10:56:35 2004 +0000 1.2 +++ b/linux-2.6.9-xen-sparse/drivers/xen/evtchn/evtchn.c Mon Nov 15 13:19:19 2004 +0000 1.3 @@ -4,6 +4,7 @@ 1.4 * Xenolinux driver for receiving and demuxing event-channel signals. 1.5 * 1.6 * Copyright (c) 2004, K A Fraser 1.7 + * Multi-process extensions Copyright (c) 2004, Steven Smith 1.8 * 1.9 * This file may be distributed separately from the Linux kernel, or 1.10 * incorporated into other software packages, subject to the following license: 1.11 @@ -57,55 +58,49 @@ 1.12 static devfs_handle_t xen_dev_dir; 1.13 #endif 1.14 1.15 -/* Only one process may open /dev/xen/evtchn at any time. */ 1.16 -static unsigned long evtchn_dev_inuse; 1.17 - 1.18 -/* Notification ring, accessed via /dev/xen/evtchn. */ 1.19 -#define RING_SIZE 2048 /* 2048 16-bit entries */ 1.20 -#define RING_MASK(_i) ((_i)&(RING_SIZE-1)) 1.21 -static u16 *ring; 1.22 -static unsigned int ring_cons, ring_prod, ring_overflow; 1.23 +struct per_user_data { 1.24 + /* Notification ring, accessed via /dev/xen/evtchn. */ 1.25 +# define RING_SIZE 2048 /* 2048 16-bit entries */ 1.26 +# define RING_MASK(_i) ((_i)&(RING_SIZE-1)) 1.27 + u16 *ring; 1.28 + unsigned int ring_cons, ring_prod, ring_overflow; 1.29 1.30 -/* Processes wait on this queue via /dev/xen/evtchn when ring is empty. */ 1.31 -static DECLARE_WAIT_QUEUE_HEAD(evtchn_wait); 1.32 -static struct fasync_struct *evtchn_async_queue; 1.33 + /* Processes wait on this queue when ring is empty. */ 1.34 + wait_queue_head_t evtchn_wait; 1.35 + struct fasync_struct *evtchn_async_queue; 1.36 +}; 1.37 1.38 -/* Which ports is user-space bound to? */ 1.39 -static u32 bound_ports[32]; 1.40 - 1.41 -static spinlock_t lock; 1.42 +/* Who's bound to each port? */ 1.43 +static struct per_user_data *port_user[NR_EVENT_CHANNELS]; 1.44 +static spinlock_t port_user_lock; 1.45 1.46 void evtchn_device_upcall(int port) 1.47 { 1.48 - spin_lock(&lock); 1.49 + struct per_user_data *u; 1.50 + 1.51 + spin_lock(&port_user_lock); 1.52 1.53 mask_evtchn(port); 1.54 clear_evtchn(port); 1.55 1.56 - if ( ring != NULL ) 1.57 + if ( (u = port_user[port]) != NULL ) 1.58 { 1.59 - if ( (ring_prod - ring_cons) < RING_SIZE ) 1.60 + if ( (u->ring_prod - u->ring_cons) < RING_SIZE ) 1.61 { 1.62 - ring[RING_MASK(ring_prod)] = (u16)port; 1.63 - if ( ring_cons == ring_prod++ ) 1.64 + u->ring[RING_MASK(u->ring_prod)] = (u16)port; 1.65 + if ( u->ring_cons == u->ring_prod++ ) 1.66 { 1.67 - wake_up_interruptible(&evtchn_wait); 1.68 - kill_fasync(&evtchn_async_queue, SIGIO, POLL_IN); 1.69 + wake_up_interruptible(&u->evtchn_wait); 1.70 + kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN); 1.71 } 1.72 } 1.73 else 1.74 { 1.75 - ring_overflow = 1; 1.76 + u->ring_overflow = 1; 1.77 } 1.78 } 1.79 1.80 - spin_unlock(&lock); 1.81 -} 1.82 - 1.83 -static void __evtchn_reset_buffer_ring(void) 1.84 -{ 1.85 - /* Initialise the ring to empty. Clear errors. */ 1.86 - ring_cons = ring_prod = ring_overflow = 0; 1.87 + spin_unlock(&port_user_lock); 1.88 } 1.89 1.90 static ssize_t evtchn_read(struct file *file, char *buf, 1.91 @@ -114,8 +109,9 @@ static ssize_t evtchn_read(struct file * 1.92 int rc; 1.93 unsigned int c, p, bytes1 = 0, bytes2 = 0; 1.94 DECLARE_WAITQUEUE(wait, current); 1.95 + struct per_user_data *u = file->private_data; 1.96 1.97 - add_wait_queue(&evtchn_wait, &wait); 1.98 + add_wait_queue(&u->evtchn_wait, &wait); 1.99 1.100 count &= ~1; /* even number of bytes */ 1.101 1.102 @@ -132,10 +128,10 @@ static ssize_t evtchn_read(struct file * 1.103 { 1.104 set_current_state(TASK_INTERRUPTIBLE); 1.105 1.106 - if ( (c = ring_cons) != (p = ring_prod) ) 1.107 + if ( (c = u->ring_cons) != (p = u->ring_prod) ) 1.108 break; 1.109 1.110 - if ( ring_overflow ) 1.111 + if ( u->ring_overflow ) 1.112 { 1.113 rc = -EFBIG; 1.114 goto out; 1.115 @@ -179,20 +175,20 @@ static ssize_t evtchn_read(struct file * 1.116 bytes2 = count - bytes1; 1.117 } 1.118 1.119 - if ( copy_to_user(buf, &ring[RING_MASK(c)], bytes1) || 1.120 - ((bytes2 != 0) && copy_to_user(&buf[bytes1], &ring[0], bytes2)) ) 1.121 + if ( copy_to_user(buf, &u->ring[RING_MASK(c)], bytes1) || 1.122 + ((bytes2 != 0) && copy_to_user(&buf[bytes1], &u->ring[0], bytes2)) ) 1.123 { 1.124 rc = -EFAULT; 1.125 goto out; 1.126 } 1.127 1.128 - ring_cons += (bytes1 + bytes2) / sizeof(u16); 1.129 + u->ring_cons += (bytes1 + bytes2) / sizeof(u16); 1.130 1.131 rc = bytes1 + bytes2; 1.132 1.133 out: 1.134 __set_current_state(TASK_RUNNING); 1.135 - remove_wait_queue(&evtchn_wait, &wait); 1.136 + remove_wait_queue(&u->evtchn_wait, &wait); 1.137 return rc; 1.138 } 1.139 1.140 @@ -201,6 +197,7 @@ static ssize_t evtchn_write(struct file 1.141 { 1.142 int rc, i; 1.143 u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL); 1.144 + struct per_user_data *u = file->private_data; 1.145 1.146 if ( kbuf == NULL ) 1.147 return -ENOMEM; 1.148 @@ -222,11 +219,11 @@ static ssize_t evtchn_write(struct file 1.149 goto out; 1.150 } 1.151 1.152 - spin_lock_irq(&lock); 1.153 + spin_lock_irq(&port_user_lock); 1.154 for ( i = 0; i < (count/2); i++ ) 1.155 - if ( test_bit(kbuf[i], (unsigned long *)&bound_ports[0]) ) 1.156 + if ( (kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u) ) 1.157 unmask_evtchn(kbuf[i]); 1.158 - spin_unlock_irq(&lock); 1.159 + spin_unlock_irq(&port_user_lock); 1.160 1.161 rc = count; 1.162 1.163 @@ -239,32 +236,55 @@ static int evtchn_ioctl(struct inode *in 1.164 unsigned int cmd, unsigned long arg) 1.165 { 1.166 int rc = 0; 1.167 - 1.168 - spin_lock_irq(&lock); 1.169 + struct per_user_data *u = file->private_data; 1.170 + 1.171 + spin_lock_irq(&port_user_lock); 1.172 1.173 switch ( cmd ) 1.174 { 1.175 case EVTCHN_RESET: 1.176 - __evtchn_reset_buffer_ring(); 1.177 + /* Initialise the ring to empty. Clear errors. */ 1.178 + u->ring_cons = u->ring_prod = u->ring_overflow = 0; 1.179 break; 1.180 + 1.181 case EVTCHN_BIND: 1.182 - if ( !test_and_set_bit(arg, (unsigned long *)&bound_ports[0]) ) 1.183 - unmask_evtchn(arg); 1.184 - else 1.185 + if ( arg >= NR_EVENT_CHANNELS ) 1.186 + { 1.187 rc = -EINVAL; 1.188 + } 1.189 + else if ( port_user[arg] != NULL ) 1.190 + { 1.191 + rc = -EISCONN; 1.192 + } 1.193 + else 1.194 + { 1.195 + port_user[arg] = u; 1.196 + unmask_evtchn(arg); 1.197 + } 1.198 break; 1.199 + 1.200 case EVTCHN_UNBIND: 1.201 - if ( test_and_clear_bit(arg, (unsigned long *)&bound_ports[0]) ) 1.202 + if ( arg >= NR_EVENT_CHANNELS ) 1.203 + { 1.204 + rc = -EINVAL; 1.205 + } 1.206 + else if ( port_user[arg] != u ) 1.207 + { 1.208 + rc = -ENOTCONN; 1.209 + } 1.210 + else 1.211 + { 1.212 + port_user[arg] = NULL; 1.213 mask_evtchn(arg); 1.214 - else 1.215 - rc = -EINVAL; 1.216 + } 1.217 break; 1.218 + 1.219 default: 1.220 rc = -ENOSYS; 1.221 break; 1.222 } 1.223 1.224 - spin_unlock_irq(&lock); 1.225 + spin_unlock_irq(&port_user_lock); 1.226 1.227 return rc; 1.228 } 1.229 @@ -272,34 +292,39 @@ static int evtchn_ioctl(struct inode *in 1.230 static unsigned int evtchn_poll(struct file *file, poll_table *wait) 1.231 { 1.232 unsigned int mask = POLLOUT | POLLWRNORM; 1.233 - poll_wait(file, &evtchn_wait, wait); 1.234 - if ( ring_cons != ring_prod ) 1.235 + struct per_user_data *u = file->private_data; 1.236 + 1.237 + poll_wait(file, &u->evtchn_wait, wait); 1.238 + if ( u->ring_cons != u->ring_prod ) 1.239 mask |= POLLIN | POLLRDNORM; 1.240 - if ( ring_overflow ) 1.241 + if ( u->ring_overflow ) 1.242 mask = POLLERR; 1.243 return mask; 1.244 } 1.245 1.246 static int evtchn_fasync(int fd, struct file *filp, int on) 1.247 { 1.248 - return fasync_helper(fd, filp, on, &evtchn_async_queue); 1.249 + struct per_user_data *u = filp->private_data; 1.250 + return fasync_helper(fd, filp, on, &u->evtchn_async_queue); 1.251 } 1.252 1.253 static int evtchn_open(struct inode *inode, struct file *filp) 1.254 { 1.255 - u16 *_ring; 1.256 + struct per_user_data *u; 1.257 1.258 - if ( test_and_set_bit(0, &evtchn_dev_inuse) ) 1.259 - return -EBUSY; 1.260 - 1.261 - /* Allocate outside locked region so that we can use GFP_KERNEL. */ 1.262 - if ( (_ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL ) 1.263 + if ( (u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL ) 1.264 return -ENOMEM; 1.265 1.266 - spin_lock_irq(&lock); 1.267 - ring = _ring; 1.268 - __evtchn_reset_buffer_ring(); 1.269 - spin_unlock_irq(&lock); 1.270 + memset(u, 0, sizeof(*u)); 1.271 + init_waitqueue_head(&u->evtchn_wait); 1.272 + 1.273 + if ( (u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL ) 1.274 + { 1.275 + kfree(u); 1.276 + return -ENOMEM; 1.277 + } 1.278 + 1.279 + filp->private_data = u; 1.280 1.281 MOD_INC_USE_COUNT; 1.282 1.283 @@ -309,19 +334,22 @@ static int evtchn_open(struct inode *ino 1.284 static int evtchn_release(struct inode *inode, struct file *filp) 1.285 { 1.286 int i; 1.287 + struct per_user_data *u = filp->private_data; 1.288 1.289 - spin_lock_irq(&lock); 1.290 - if ( ring != NULL ) 1.291 + spin_lock_irq(&port_user_lock); 1.292 + 1.293 + free_page((unsigned long)u->ring); 1.294 + 1.295 + for ( i = 0; i < NR_EVENT_CHANNELS; i++ ) 1.296 { 1.297 - free_page((unsigned long)ring); 1.298 - ring = NULL; 1.299 + if ( port_user[i] == u ) 1.300 + { 1.301 + port_user[i] = NULL; 1.302 + mask_evtchn(i); 1.303 + } 1.304 } 1.305 - for ( i = 0; i < NR_EVENT_CHANNELS; i++ ) 1.306 - if ( test_and_clear_bit(i, (unsigned long *)&bound_ports[0]) ) 1.307 - mask_evtchn(i); 1.308 - spin_unlock_irq(&lock); 1.309 1.310 - evtchn_dev_inuse = 0; 1.311 + spin_unlock_irq(&port_user_lock); 1.312 1.313 MOD_DEC_USE_COUNT; 1.314 1.315 @@ -357,6 +385,9 @@ static int __init evtchn_init(void) 1.316 #endif 1.317 int err; 1.318 1.319 + spin_lock_init(&port_user_lock); 1.320 + memset(port_user, 0, sizeof(port_user)); 1.321 + 1.322 /* (DEVFS) create '/dev/misc/evtchn'. */ 1.323 err = misc_register(&evtchn_miscdev); 1.324 if ( err != 0 )