debuggers.hg

changeset 640:81fe7d3a9c46

bitkeeper revision 1.334.1.1 (3f0d64fbAhSrn0pFkVtseG4cWqCOmw)

Add some basic locking to the segment stuff. I'm not entirely
convinced that this is correct, but it's better than the old
version, and is probably very close to being right.
author sos22@labyrinth.cl.cam.ac.uk
date Thu Jul 10 13:07:07 2003 +0000 (2003-07-10)
parents 75e23848b238
children 7f7fc97be5fd
files xen/drivers/block/xen_segment.c
line diff
     1.1 --- a/xen/drivers/block/xen_segment.c	Thu Jul 10 12:32:49 2003 +0000
     1.2 +++ b/xen/drivers/block/xen_segment.c	Thu Jul 10 13:07:07 2003 +0000
     1.3 @@ -15,7 +15,21 @@
     1.4  #include <asm/domain_page.h>
     1.5  #include <hypervisor-ifs/block.h>
     1.6  
     1.7 +/* Global list of all possible segments.  This can be changed in
     1.8 +   the following way:
     1.9 +
    1.10 +   1) UNUSED segment -> RO or RW segment.  This requires the spinlock.
    1.11 +
    1.12 +   2) RO or RW -> UNUSED.  This requires the lock and can only happen
    1.13 +   during process teardown.
    1.14 +
    1.15 +   This means that processes can access entries in the list safely
    1.16 +   without having to hold any lock at all: they already have an entry
    1.17 +   allocated, and we know that entry can't become unused, as segments
    1.18 +   are only torn down when the domain is dieing, by which point it
    1.19 +   can't be accessing them anymore. */
    1.20  static segment_t xsegments[XEN_MAX_SEGMENTS];
    1.21 +static spinlock_t xsegment_lock = SPIN_LOCK_UNLOCKED;
    1.22  
    1.23  #if 0
    1.24  #define DPRINTK(_f, _a...) printk( _f , ## _a )
    1.25 @@ -23,9 +37,6 @@ static segment_t xsegments[XEN_MAX_SEGME
    1.26  #define DPRINTK(_f, _a...) ((void)0)
    1.27  #endif
    1.28  
    1.29 -/* XXX XXX XXX Why are there absolutely no calls to any locking
    1.30 -   primitives anywhere in this? */
    1.31 -
    1.32  /*
    1.33   * xen_segment_map_request
    1.34   *
    1.35 @@ -33,6 +44,12 @@ static segment_t xsegments[XEN_MAX_SEGME
    1.36   * 
    1.37   * NB. All offsets and sizes here are in sector units.
    1.38   * eg. 'size == 1' means an actual size of 512 bytes.
    1.39 + *
    1.40 + * Note that no locking is performed here whatsoever --
    1.41 + * we rely on the fact that once segment information is
    1.42 + * established, it is only modified by domain shutdown,
    1.43 + * and so if this is being called, noone is trying
    1.44 + * to modify the segment list.
    1.45   */
    1.46  int xen_segment_map_request(
    1.47      phys_seg_t *pseg, struct task_struct *p, int operation,
    1.48 @@ -162,6 +179,8 @@ void xen_segment_probe(struct task_struc
    1.49      xen_disk_info_t *xdi = map_domain_mem(virt_to_phys(raw_xdi));
    1.50      unsigned long capacity = 0, device;
    1.51  
    1.52 +    spin_lock(&xsegment_lock);
    1.53 +    xdi->count = 0;
    1.54      for ( loop = 0; loop < XEN_MAX_SEGMENTS; loop++ )
    1.55      {
    1.56          if ( (xsegments[loop].mode == XEN_SEGMENT_UNUSED) ||
    1.57 @@ -176,6 +195,7 @@ void xen_segment_probe(struct task_struc
    1.58          xdi->disks[xdi->count].capacity = capacity;
    1.59          xdi->count++;
    1.60      }
    1.61 +    spin_unlock(&xsegment_lock);
    1.62  
    1.63      unmap_domain_mem(xdi);
    1.64  }
    1.65 @@ -190,6 +210,7 @@ void xen_segment_probe_all(xen_segment_i
    1.66      int loop;
    1.67      xen_segment_info_t *xsi = map_domain_mem(virt_to_phys(raw_xsi));
    1.68  
    1.69 +    spin_lock(&xsegment_lock);
    1.70      xsi->count = 0;
    1.71      for ( loop = 0; loop < XEN_MAX_SEGMENTS; loop++ )
    1.72      {
    1.73 @@ -204,6 +225,7 @@ void xen_segment_probe_all(xen_segment_i
    1.74  	xsi->segments[xsi->count].seg_nr = xsegments[loop].segment_number;
    1.75          xsi->count++;	
    1.76      }
    1.77 +    spin_unlock(&xsegment_lock);
    1.78  
    1.79      unmap_domain_mem(xsi);
    1.80  }
    1.81 @@ -213,11 +235,13 @@ void xen_segment_probe_all(xen_segment_i
    1.82   *
    1.83   * find all segments associated with a domain and assign
    1.84   * them to the domain
    1.85 + *
    1.86   */
    1.87  void xen_refresh_segment_list (struct task_struct *p)
    1.88  {
    1.89      int loop;
    1.90  
    1.91 +    spin_lock(&xsegment_lock);
    1.92      for (loop = 0; loop < XEN_MAX_SEGMENTS; loop++)
    1.93      {
    1.94          if ( (xsegments[loop].mode == XEN_SEGMENT_UNUSED) ||
    1.95 @@ -226,6 +250,7 @@ void xen_refresh_segment_list (struct ta
    1.96  
    1.97          p->segment_list[xsegments[loop].segment_number] = &xsegments[loop];
    1.98      }
    1.99 +    spin_unlock(&xsegment_lock);
   1.100  }
   1.101  
   1.102  /*
   1.103 @@ -244,6 +269,7 @@ int xen_segment_create(xv_disk_t *xvd_in
   1.104      xv_disk_t *xvd = map_domain_mem(virt_to_phys(xvd_in));
   1.105      struct task_struct *p;
   1.106  
   1.107 +    spin_lock(&xsegment_lock);
   1.108      for (idx = 0; idx < XEN_MAX_SEGMENTS; idx++)
   1.109      {
   1.110          if (xsegments[idx].mode == XEN_SEGMENT_UNUSED ||
   1.111 @@ -262,8 +288,10 @@ int xen_segment_create(xv_disk_t *xvd_in
   1.112      xsegments[idx].segment_number = xvd->segment;
   1.113      memcpy(xsegments[idx].key, xvd->key, XEN_SEGMENT_KEYSIZE);
   1.114      xsegments[idx].num_extents = xvd->ext_count;
   1.115 +
   1.116 +
   1.117      if (xsegments[idx].extents)
   1.118 -	kfree(xsegments[idx].extents);
   1.119 +	kfree(xsegments[idx].extents);    
   1.120      xsegments[idx].extents = (extent_t *)kmalloc(
   1.121          sizeof(extent_t)*xvd->ext_count,
   1.122          GFP_KERNEL);
   1.123 @@ -290,6 +318,8 @@ int xen_segment_create(xv_disk_t *xvd_in
   1.124          put_task_struct(p);
   1.125      }
   1.126  
   1.127 +    spin_unlock(&xsegment_lock);
   1.128 +
   1.129      unmap_domain_mem(xvd);
   1.130      return 0;
   1.131  }
   1.132 @@ -299,6 +329,8 @@ int xen_segment_create(xv_disk_t *xvd_in
   1.133   *
   1.134   * return 0 on success, 1 on failure
   1.135   *
   1.136 + * This should *only* be called from domain shutdown, or else we
   1.137 + * race with access checking.
   1.138   */
   1.139  int xen_segment_delete(struct task_struct *p, int segnr)
   1.140  {
   1.141 @@ -330,12 +362,16 @@ int xen_segment_delete(struct task_struc
   1.142  	return 1;
   1.143      }
   1.144  
   1.145 +    spin_lock(&xsegment_lock);
   1.146 +
   1.147      p->segment_list[segnr] = NULL;
   1.148      seg->domain = -1;
   1.149      seg->segment_number = -1;
   1.150      kfree(seg->extents);
   1.151      seg->mode = XEN_SEGMENT_UNUSED;
   1.152  
   1.153 +    spin_unlock(&xsegment_lock);
   1.154 +
   1.155      return 0;
   1.156  }
   1.157