debuggers.hg

view xen/drivers/block/xen_segment.c @ 620:01725801761a

bitkeeper revision 1.322 (3f0d22cccb17_me9ZBYMkbZaFLovQg)

Get the new segment probing stuff to actually return useful
information...
author sos22@labyrinth.cl.cam.ac.uk
date Thu Jul 10 08:24:44 2003 +0000 (2003-07-10)
parents cc54519f9a1a
children 5ed7375f954a 3e071d151e22
line source
1 /*
2 * xen_segment.c
3 */
5 #include <xeno/config.h>
6 #include <xeno/types.h>
7 #include <xeno/lib.h>
8 #include <asm/io.h>
9 #include <xeno/slab.h>
10 #include <xeno/segment.h>
11 #include <xeno/sched.h>
12 #include <xeno/blkdev.h>
13 #include <xeno/keyhandler.h>
14 #include <asm/current.h>
15 #include <asm/domain_page.h>
16 #include <hypervisor-ifs/block.h>
18 segment_t xsegments[XEN_MAX_SEGMENTS];
20 #if 0
21 #define DPRINTK(_f, _a...) printk( _f , ## _a )
22 #else
23 #define DPRINTK(_f, _a...) ((void)0)
24 #endif
26 /*
27 * xen_segment_map_request
28 *
29 * xen_device must be a valid device.
30 *
31 * NB. All offsets and sizes here are in sector units.
32 * eg. 'size == 1' means an actual size of 512 bytes.
33 */
34 int xen_segment_map_request(
35 phys_seg_t *pseg, struct task_struct *p, int operation,
36 unsigned short segment_number,
37 unsigned long sect_nr, unsigned long buffer, unsigned short nr_sects)
38 {
39 segment_t *seg;
40 extent_t *ext;
41 int sum, i;
43 segment_number &= XENDEV_IDX_MASK;
44 if ( segment_number >= XEN_MAX_SEGMENTS )
45 {
46 DPRINTK("invalid segment number. %d %d\n",
47 segment_number, XEN_MAX_SEGMENTS);
48 goto fail;
49 }
51 seg = p->segment_list[segment_number];
52 if ( seg == NULL )
53 {
54 DPRINTK("segment is null. %d\n", segment_number);
55 goto fail;
56 }
58 /* check domain permissions */
59 if ( seg->domain != p->domain )
60 {
61 DPRINTK("seg is for another domain. %d %d\n", seg->domain, p->domain);
62 goto fail;
63 }
65 /* check rw access */
66 if ( ((operation == WRITE) && (seg->mode != XEN_SEGMENT_RW)) ||
67 ((operation == READ) && (seg->mode == XEN_SEGMENT_UNUSED)) )
68 {
69 DPRINTK("illegal operation: %d %d\n", operation, seg->mode);
70 goto fail;
71 }
73 if ( (nr_sects + sect_nr) <= sect_nr )
74 {
75 DPRINTK("sector + size wrap! %08lx %04x\n", sect_nr, nr_sects);
76 goto fail;
77 }
79 /* find extent, check size */
80 sum = 0;
81 i = 0;
82 ext = seg->extents;
83 while ( (i < seg->num_extents) && ((sum + ext->size) <= sect_nr) )
84 {
85 sum += ext->size;
86 ext++; i++;
87 }
89 if ( (sum + ext->size) <= sect_nr )
90 {
91 DPRINTK("extent size mismatch: %d %d : %d %ld %ld\n",
92 i, seg->num_extents, sum, ext->size, sect_nr);
93 goto fail;
94 }
96 pseg->sector_number = (sect_nr - sum) + ext->offset;
97 pseg->buffer = buffer;
98 pseg->nr_sects = nr_sects;
99 pseg->dev = xendev_to_physdev(ext->disk);
100 if ( pseg->dev == 0 )
101 {
102 DPRINTK ("invalid device 0x%x 0x%lx 0x%lx\n",
103 ext->disk, ext->offset, ext->size);
104 goto fail;
105 }
107 /* We're finished if the virtual extent didn't overrun the phys extent. */
108 if ( (sum + ext->size) >= (sect_nr + nr_sects) )
109 return 1; /* entire read fits in this extent */
111 /* Hmmm... make sure there's another extent to overrun onto! */
112 if ( (i+1) == seg->num_extents )
113 {
114 DPRINTK ("not enough extents %d %d\n",
115 i, seg->num_extents);
116 goto fail;
117 }
119 pseg[1].nr_sects = (sect_nr + nr_sects) - (sum + ext->size);
120 pseg[0].nr_sects = sum + ext->size - sect_nr;
121 pseg[1].buffer = buffer + (pseg->nr_sects << 9);
122 pseg[1].sector_number = ext[1].offset;
123 pseg[1].dev = xendev_to_physdev(ext[1].disk);
124 if ( pseg[1].dev == 0 )
125 {
126 DPRINTK ("bogus device for pseg[1] \n");
127 goto fail;
128 }
130 /* We don't allow overrun onto a third physical extent. */
131 if ( pseg[1].nr_sects > ext[1].size )
132 {
133 DPRINTK ("third extent\n");
134 DPRINTK (" sum:%d, e0:%ld, e1:%ld p1.sect:%ld p1.nr:%d\n",
135 sum, ext[0].size, ext[1].size,
136 pseg[1].sector_number, pseg[1].nr_sects);
137 goto fail;
138 }
140 return 2; /* We overran onto a second physical extent. */
142 fail:
143 DPRINTK ("xen_segment_map_request failure\n");
144 DPRINTK ("operation: %d\n", operation);
145 DPRINTK ("segment number: %d\n", segment_number);
146 DPRINTK ("sect_nr: %ld 0x%lx\n", sect_nr, sect_nr);
147 DPRINTK ("nr_sects: %d 0x%x\n", nr_sects, nr_sects);
148 return -1;
149 }
151 /*
152 * xen_segment_probe
153 *
154 * return a list of segments to the guestos
155 */
156 void xen_segment_probe(struct task_struct *p, xen_disk_info_t *raw_xdi)
157 {
158 int loop, i;
159 xen_disk_info_t *xdi = map_domain_mem(virt_to_phys(raw_xdi));
160 unsigned long capacity = 0, device;
162 for ( loop = 0; loop < XEN_MAX_SEGMENTS; loop++ )
163 {
164 if ( (xsegments[loop].mode == XEN_SEGMENT_UNUSED) ||
165 (xsegments[loop].domain != p->domain) )
166 continue;
168 device = MK_VIRTUAL_XENDEV(xsegments[loop].segment_number);
169 for ( i = 0; i < xsegments[loop].num_extents; i++ )
170 capacity += xsegments[loop].extents[i].size;
172 xdi->disks[xdi->count].device = device;
173 xdi->disks[xdi->count].capacity = capacity;
174 xdi->count++;
175 }
177 unmap_domain_mem(xdi);
178 }
180 /*
181 * xen_segment_probe_all
182 *
183 * return a list of all segments to domain 0
184 */
185 void xen_segment_probe_all(xen_segment_info_t *raw_xsi)
186 {
187 int loop;
188 xen_segment_info_t *xsi = map_domain_mem(virt_to_phys(raw_xsi));
189 unsigned long device;
191 xsi->count = 0;
192 for ( loop = 0; loop < XEN_MAX_SEGMENTS; loop++ )
193 {
194 if ( xsegments[loop].mode == XEN_SEGMENT_UNUSED )
195 continue;
197 device = MK_VIRTUAL_XENDEV(xsegments[loop].segment_number);
199 printk("Doing seg %d.\n", xsi->count);
200 xsi->segments[xsi->count].device = device;
201 xsi->segments[xsi->count].domain = xsegments[loop].domain;
202 memcpy(xsi->segments[xsi->count].key,
203 xsegments[loop].key,
204 XEN_SEGMENT_KEYSIZE);
205 xsi->segments[xsi->count].seg_nr = xsegments[loop].segment_number;
206 printk("Done.\n");
207 xsi->count++;
208 }
210 unmap_domain_mem(xsi);
211 }
213 /*
214 * xen_refresh_segment_list
215 *
216 * find all segments associated with a domain and assign
217 * them to the domain
218 */
219 void xen_refresh_segment_list (struct task_struct *p)
220 {
221 int loop;
223 for (loop = 0; loop < XEN_MAX_SEGMENTS; loop++)
224 {
225 if ( (xsegments[loop].mode == XEN_SEGMENT_UNUSED) ||
226 (xsegments[loop].domain != p->domain) )
227 continue;
229 p->segment_list[xsegments[loop].segment_number] = &xsegments[loop];
230 }
231 }
233 /*
234 * create a new segment for a domain
235 *
236 * return 0 on success, 1 on failure
237 *
238 * if we see the same DOM#/SEG# combination, we reuse the slot in
239 * the segment table (overwriting what was there before).
240 * an alternative would be to raise an error if the slot is reused.
241 * bug: we don't free the xtents array when we re-use a slot.
242 */
243 int xen_segment_create(xv_disk_t *xvd_in)
244 {
245 int idx;
246 int loop;
247 xv_disk_t *xvd = map_domain_mem(virt_to_phys(xvd_in));
248 struct task_struct *p;
250 for (idx = 0; idx < XEN_MAX_SEGMENTS; idx++)
251 {
252 if (xsegments[idx].mode == XEN_SEGMENT_UNUSED ||
253 (xsegments[idx].domain == xvd->domain &&
254 xsegments[idx].segment_number == xvd->segment)) break;
255 }
256 if (idx == XEN_MAX_SEGMENTS)
257 {
258 printk (KERN_ALERT "xen_segment_create: unable to find free slot\n");
259 unmap_domain_mem(xvd);
260 return 1;
261 }
263 xsegments[idx].mode = xvd->mode;
264 xsegments[idx].domain = xvd->domain;
265 xsegments[idx].segment_number = xvd->segment;
266 memcpy(xsegments[idx].key, xvd->key, XEN_SEGMENT_KEYSIZE);
267 xsegments[idx].num_extents = xvd->ext_count;
268 xsegments[idx].extents = (extent_t *)kmalloc(
269 sizeof(extent_t)*xvd->ext_count,
270 GFP_KERNEL);
272 /* could memcpy, but this is safer */
273 for (loop = 0; loop < xvd->ext_count; loop++)
274 {
275 xsegments[idx].extents[loop].disk = xvd->extents[loop].disk;
276 xsegments[idx].extents[loop].offset = xvd->extents[loop].offset;
277 xsegments[idx].extents[loop].size = xvd->extents[loop].size;
278 if (xsegments[idx].extents[loop].size == 0)
279 {
280 printk("xen_segment_create: extent %d is zero length\n", loop);
281 unmap_domain_mem(xvd);
282 return 1;
283 }
284 }
286 /* if the domain exists, assign the segment to the domain */
287 p = find_domain_by_id(xvd->domain);
288 if (p != NULL)
289 {
290 p->segment_list[xvd->segment] = &xsegments[idx];
291 put_task_struct(p);
292 }
294 unmap_domain_mem(xvd);
295 return 0;
296 }
298 /*
299 * delete a segment from a domain
300 *
301 * return 0 on success, 1 on failure
302 *
303 * TODO: caller must ensure that only domain 0 calls this function
304 */
305 int xen_segment_delete(struct task_struct *p, xv_disk_t *xvd)
306 {
307 return 0;
308 }
310 static void dump_segments(u_char key, void *dev_id, struct pt_regs *regs)
311 {
312 int loop, i;
313 struct task_struct *p;
315 printk("segment list\n");
316 for (loop = 0; loop < XEN_MAX_SEGMENTS; loop++)
317 {
318 if (xsegments[loop].mode != XEN_SEGMENT_UNUSED)
319 {
320 printk(" %2d: %s dom%d, seg# %d, num_exts: %d\n",
321 loop,
322 xsegments[loop].mode == XEN_SEGMENT_RO ? "RO" : "RW",
323 xsegments[loop].domain, xsegments[loop].segment_number,
324 xsegments[loop].num_extents);
325 for (i = 0; i < xsegments[loop].num_extents; i++)
326 {
327 printk(" extent %d: disk 0x%x, offset 0x%lx, size 0x%lx\n",
328 i, xsegments[loop].extents[i].disk,
329 xsegments[loop].extents[i].offset,
330 xsegments[loop].extents[i].size);
331 }
332 }
333 }
335 printk("segments by domain (index into segments list)\n");
336 p = current;
337 do
338 {
339 printk(" domain %d: ", p->domain);
340 for (loop = 0; loop < XEN_MAX_SEGMENTS; loop++)
341 {
342 if (p->segment_list[loop])
343 {
344 printk (" %d", p->segment_list[loop] - xsegments);
345 }
346 }
347 printk("\n");
348 p = p->next_task;
349 } while (p != current);
350 }
352 /*
353 * initialize segments
354 */
356 void xen_segment_initialize(void)
357 {
358 memset (xsegments, 0, sizeof(xsegments));
360 add_key_handler('S', dump_segments, "dump segments");
361 }