debuggers.hg

view extras/mini-os/fs-front.c @ 20689:23bc248302df

mini-os: Fix memory leaks in blkfront, netfront, pcifront, etc.

The return value of Xenbus routines xenbus_transaction_start(),
xenbus_printf(), xenbus_transaction_end(), etc. is a pointer of error
message. This pointer should be passed to free() to release the
allocated memory when it is no longer needed.

Signed-off-by: Yu Zhiguo <yuzg@cn.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Dec 14 09:51:07 2009 +0000 (2009-12-14)
parents e10d641b413f
children
line source
1 /******************************************************************************
2 * fs-front.c
3 *
4 * Frontend driver for FS split device driver.
5 *
6 * Copyright (c) 2007, Grzegorz Milos, <gm281@cam.ac.uk>.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
27 #undef NDEBUG
28 #include <stdint.h>
29 #include <mini-os/os.h>
30 #include <mini-os/list.h>
31 #include <mini-os/xmalloc.h>
32 #include <mini-os/xenbus.h>
33 #include <mini-os/gnttab.h>
34 #include <mini-os/events.h>
35 #include <xen/io/fsif.h>
36 #include <mini-os/fs.h>
37 #include <mini-os/sched.h>
39 #define preempt_disable()
40 #define preempt_enable()
41 #define cmpxchg(p,o,n) synch_cmpxchg(p,o,n)
44 #ifdef FS_DEBUG
45 #define DEBUG(_f, _a...) \
46 printk("MINI_OS(file=fs-front.c, line=%d) " _f "\n", __LINE__, ## _a)
47 #else
48 #define DEBUG(_f, _a...) ((void)0)
49 #endif
52 struct fs_request;
53 struct fs_import *fs_import;
54 void *alloc_buffer_page(struct fs_request *req, domid_t domid, grant_ref_t *gref);
55 void free_buffer_page(struct fs_request *req);
57 /******************************************************************************/
58 /* RING REQUEST/RESPONSES HANDLING */
59 /******************************************************************************/
61 struct fs_request
62 {
63 void *private1; /* Specific to request type */
64 void *private2;
65 struct thread *thread; /* Thread blocked on this request */
66 struct fsif_response shadow_rsp; /* Response copy writen by the
67 interrupt handler */
68 };
70 struct fs_rw_gnts
71 {
72 /* TODO 16 bit? */
73 int count;
74 grant_ref_t grefs[FSIF_NR_READ_GNTS];
75 void *pages[FSIF_NR_READ_GNTS];
76 };
78 /* Ring operations:
79 * FSIF ring is used differently to Linux-like split devices. This stems from
80 * the fact that no I/O request queue is present. The use of some of the macros
81 * defined in ring.h is not allowed, in particular:
82 * RING_PUSH_REQUESTS_AND_CHECK_NOTIFY cannot be used.
83 *
84 * The protocol used for FSIF ring is described below:
85 *
86 * In order to reserve a request the frontend:
87 * a) saves current frontend_ring->req_prod_pvt into a local variable
88 * b) checks that there are free request using the local req_prod_pvt
89 * c) tries to reserve the request using cmpxchg on frontend_ring->req_prod_pvt
90 * if cmpxchg fails, it means that someone reserved the request, start from
91 * a)
92 *
93 * In order to commit a request to the shared ring:
94 * a) cmpxchg shared_ring->req_prod from local req_prod_pvt to req_prod_pvt+1
95 * Loop if unsuccessful.
96 * NOTE: Request should be commited to the shared ring as quickly as possible,
97 * because otherwise other threads might busy loop trying to commit next
98 * requests. It also follows that preemption should be disabled, if
99 * possible, for the duration of the request construction.
100 */
102 /* Number of free requests (for use on front side only). */
103 #define FS_RING_FREE_REQUESTS(_r, _req_prod_pvt) \
104 (RING_SIZE(_r) - (_req_prod_pvt - (_r)->rsp_cons))
108 static RING_IDX reserve_fsif_request(struct fs_import *import)
109 {
110 RING_IDX idx;
112 down(&import->reqs_sem);
113 preempt_disable();
114 again:
115 /* We will attempt to reserve slot idx */
116 idx = import->ring.req_prod_pvt;
117 ASSERT (FS_RING_FREE_REQUESTS(&import->ring, idx));
118 /* Attempt to reserve */
119 if(cmpxchg(&import->ring.req_prod_pvt, idx, idx+1) != idx)
120 goto again;
122 return idx;
123 }
125 static void commit_fsif_request(struct fs_import *import, RING_IDX idx)
126 {
127 while(cmpxchg(&import->ring.sring->req_prod, idx, idx+1) != idx)
128 {
129 printk("Failed to commit a request: req_prod=%d, idx=%d\n",
130 import->ring.sring->req_prod, idx);
131 }
132 preempt_enable();
134 /* NOTE: we cannot do anything clever about rsp_event, to hold off
135 * notifications, because we don't know if we are a single request (in which
136 * case we have to notify always), or a part of a larger request group
137 * (when, in some cases, notification isn't required) */
138 notify_remote_via_evtchn(import->local_port);
139 }
143 static inline void add_id_to_freelist(unsigned int id,unsigned short* freelist)
144 {
145 unsigned int old_id, new_id;
147 again:
148 old_id = freelist[0];
149 /* Note: temporal inconsistency, since freelist[0] can be changed by someone
150 * else, but we are a sole owner of freelist[id + 1], it's OK. */
151 freelist[id + 1] = old_id;
152 new_id = id;
153 if(cmpxchg(&freelist[0], old_id, new_id) != old_id)
154 {
155 printk("Cmpxchg on freelist add failed.\n");
156 goto again;
157 }
158 }
160 /* always call reserve_fsif_request(import) before this, to protect from
161 * depletion. */
162 static inline unsigned short get_id_from_freelist(unsigned short* freelist)
163 {
164 unsigned int old_id, new_id;
166 again:
167 old_id = freelist[0];
168 new_id = freelist[old_id + 1];
169 if(cmpxchg(&freelist[0], old_id, new_id) != old_id)
170 {
171 printk("Cmpxchg on freelist remove failed.\n");
172 goto again;
173 }
175 return old_id;
176 }
178 /******************************************************************************/
179 /* END OF RING REQUEST/RESPONSES HANDLING */
180 /******************************************************************************/
184 /******************************************************************************/
185 /* INDIVIDUAL FILE OPERATIONS */
186 /******************************************************************************/
187 int fs_open(struct fs_import *import, char *file)
188 {
189 struct fs_request *fsr;
190 unsigned short priv_req_id;
191 grant_ref_t gref;
192 void *buffer;
193 RING_IDX back_req_id;
194 struct fsif_request *req;
195 int fd;
197 if (!import)
198 return -1;
200 /* Prepare request for the backend */
201 back_req_id = reserve_fsif_request(import);
202 DEBUG("Backend request id=%d\n", back_req_id);
204 /* Prepare our private request structure */
205 priv_req_id = get_id_from_freelist(import->freelist);
206 DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
207 fsr = &import->requests[priv_req_id];
208 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
209 DEBUG("gref id=%d\n", gref);
210 fsr->thread = current;
211 sprintf(buffer, "%s", file);
213 req = RING_GET_REQUEST(&import->ring, back_req_id);
214 req->type = REQ_FILE_OPEN;
215 req->id = priv_req_id;
216 req->u.fopen.gref = gref;
218 /* Set blocked flag before commiting the request, thus avoiding missed
219 * response race */
220 block(current);
221 commit_fsif_request(import, back_req_id);
222 schedule();
224 /* Read the response */
225 fd = (int)fsr->shadow_rsp.u.ret_val;
226 DEBUG("The following FD returned: %d\n", fd);
227 free_buffer_page(fsr);
228 add_id_to_freelist(priv_req_id, import->freelist);
230 return fd;
231 }
233 int fs_close(struct fs_import *import, int fd)
234 {
235 struct fs_request *fsr;
236 unsigned short priv_req_id;
237 RING_IDX back_req_id;
238 struct fsif_request *req;
239 int ret;
241 if (!import)
242 return -1;
244 /* Prepare request for the backend */
245 back_req_id = reserve_fsif_request(import);
246 DEBUG("Backend request id=%d\n", back_req_id);
248 /* Prepare our private request structure */
249 priv_req_id = get_id_from_freelist(import->freelist);
250 DEBUG("Request id for fs_close call is: %d\n", priv_req_id);
251 fsr = &import->requests[priv_req_id];
252 fsr->thread = current;
254 req = RING_GET_REQUEST(&import->ring, back_req_id);
255 req->type = REQ_FILE_CLOSE;
256 req->id = priv_req_id;
257 req->u.fclose.fd = fd;
259 /* Set blocked flag before commiting the request, thus avoiding missed
260 * response race */
261 block(current);
262 commit_fsif_request(import, back_req_id);
263 schedule();
265 /* Read the response */
266 ret = (int)fsr->shadow_rsp.u.ret_val;
267 DEBUG("Close returned: %d\n", ret);
268 add_id_to_freelist(priv_req_id, import->freelist);
270 return ret;
271 }
273 ssize_t fs_read(struct fs_import *import, int fd, void *buf,
274 ssize_t len, ssize_t offset)
275 {
276 struct fs_request *fsr;
277 unsigned short priv_req_id;
278 struct fs_rw_gnts gnts;
279 RING_IDX back_req_id;
280 struct fsif_request *req;
281 ssize_t ret;
282 int i;
284 if (!import)
285 return -1;
287 BUG_ON(len > PAGE_SIZE * FSIF_NR_READ_GNTS);
289 /* Prepare request for the backend */
290 back_req_id = reserve_fsif_request(import);
291 DEBUG("Backend request id=%d\n", back_req_id);
293 /* Prepare our private request structure */
294 priv_req_id = get_id_from_freelist(import->freelist);
295 DEBUG("Request id for fs_read call is: %d\n", priv_req_id);
296 fsr = &import->requests[priv_req_id];
298 req = RING_GET_REQUEST(&import->ring, back_req_id);
299 req->type = REQ_FILE_READ;
300 req->id = priv_req_id;
301 req->u.fread.fd = fd;
302 req->u.fread.len = len;
303 req->u.fread.offset = offset;
306 ASSERT(len > 0);
307 gnts.count = ((len - 1) / PAGE_SIZE) + 1;
308 for(i=0; i<gnts.count; i++)
309 {
310 gnts.pages[i] = (void *)alloc_page();
311 gnts.grefs[i] = gnttab_grant_access(import->dom_id,
312 virt_to_mfn(gnts.pages[i]),
313 0);
314 memset(gnts.pages[i], 0, PAGE_SIZE);
315 req->u.fread.grefs[i] = gnts.grefs[i];
316 }
317 fsr->thread = current;
319 /* Set blocked flag before commiting the request, thus avoiding missed
320 * response race */
321 block(current);
322 commit_fsif_request(import, back_req_id);
323 schedule();
325 /* Read the response */
326 ret = (ssize_t)fsr->shadow_rsp.u.ret_val;
327 DEBUG("The following ret value returned %d\n", ret);
328 if(ret > 0)
329 {
330 ssize_t to_copy = ret, current_copy;
331 for(i=0; i<gnts.count; i++)
332 {
333 gnttab_end_access(gnts.grefs[i]);
334 current_copy = to_copy > PAGE_SIZE ? PAGE_SIZE : to_copy;
335 if(current_copy > 0)
336 memcpy(buf, gnts.pages[i], current_copy);
337 to_copy -= current_copy;
338 buf = (char*) buf + current_copy;
339 free_page(gnts.pages[i]);
340 }
341 }
342 add_id_to_freelist(priv_req_id, import->freelist);
344 return ret;
345 }
347 ssize_t fs_write(struct fs_import *import, int fd, void *buf,
348 ssize_t len, ssize_t offset)
349 {
350 struct fs_request *fsr;
351 unsigned short priv_req_id;
352 struct fs_rw_gnts gnts;
353 RING_IDX back_req_id;
354 struct fsif_request *req;
355 ssize_t ret, to_copy;
356 int i;
358 if (!import)
359 return -1;
361 BUG_ON(len > PAGE_SIZE * FSIF_NR_WRITE_GNTS);
363 /* Prepare request for the backend */
364 back_req_id = reserve_fsif_request(import);
365 DEBUG("Backend request id=%d\n", back_req_id);
367 /* Prepare our private request structure */
368 priv_req_id = get_id_from_freelist(import->freelist);
369 DEBUG("Request id for fs_read call is: %d\n", priv_req_id);
370 fsr = &import->requests[priv_req_id];
372 req = RING_GET_REQUEST(&import->ring, back_req_id);
373 req->type = REQ_FILE_WRITE;
374 req->id = priv_req_id;
375 req->u.fwrite.fd = fd;
376 req->u.fwrite.len = len;
377 req->u.fwrite.offset = offset;
379 ASSERT(len > 0);
380 gnts.count = ((len - 1) / PAGE_SIZE) + 1;
381 to_copy = len;
382 for(i=0; i<gnts.count; i++)
383 {
384 int current_copy = (to_copy > PAGE_SIZE ? PAGE_SIZE : to_copy);
385 gnts.pages[i] = (void *)alloc_page();
386 gnts.grefs[i] = gnttab_grant_access(import->dom_id,
387 virt_to_mfn(gnts.pages[i]),
388 0);
389 memcpy(gnts.pages[i], buf, current_copy);
390 if(current_copy < PAGE_SIZE)
391 memset((char *)gnts.pages[i] + current_copy,
392 0,
393 PAGE_SIZE - current_copy);
394 req->u.fwrite.grefs[i] = gnts.grefs[i];
395 to_copy -= current_copy;
396 buf = (char*) buf + current_copy;
397 }
398 fsr->thread = current;
400 /* Set blocked flag before commiting the request, thus avoiding missed
401 * response race */
402 block(current);
403 commit_fsif_request(import, back_req_id);
404 schedule();
406 /* Read the response */
407 ret = (ssize_t)fsr->shadow_rsp.u.ret_val;
408 DEBUG("The following ret value returned %d\n", ret);
409 for(i=0; i<gnts.count; i++)
410 {
411 gnttab_end_access(gnts.grefs[i]);
412 free_page(gnts.pages[i]);
413 }
414 add_id_to_freelist(priv_req_id, import->freelist);
416 return ret;
417 }
419 int fs_stat(struct fs_import *import,
420 int fd,
421 struct fsif_stat_response *stat)
422 {
423 struct fs_request *fsr;
424 unsigned short priv_req_id;
425 RING_IDX back_req_id;
426 struct fsif_request *req;
427 int ret;
429 if (!import)
430 return -1;
432 /* Prepare request for the backend */
433 back_req_id = reserve_fsif_request(import);
434 DEBUG("Backend request id=%d\n", back_req_id);
436 /* Prepare our private request structure */
437 priv_req_id = get_id_from_freelist(import->freelist);
438 DEBUG("Request id for fs_stat call is: %d\n", priv_req_id);
439 fsr = &import->requests[priv_req_id];
440 fsr->thread = current;
442 req = RING_GET_REQUEST(&import->ring, back_req_id);
443 req->type = REQ_STAT;
444 req->id = priv_req_id;
445 req->u.fstat.fd = fd;
447 /* Set blocked flag before commiting the request, thus avoiding missed
448 * response race */
449 block(current);
450 commit_fsif_request(import, back_req_id);
451 schedule();
453 /* Read the response */
454 ret = (int)fsr->shadow_rsp.u.ret_val;
455 DEBUG("Following ret from fstat: %d\n", ret);
456 memcpy(stat,
457 &fsr->shadow_rsp.u.fstat,
458 sizeof(struct fsif_stat_response));
459 add_id_to_freelist(priv_req_id, import->freelist);
461 return ret;
462 }
464 int fs_truncate(struct fs_import *import,
465 int fd,
466 int64_t length)
467 {
468 struct fs_request *fsr;
469 unsigned short priv_req_id;
470 RING_IDX back_req_id;
471 struct fsif_request *req;
472 int ret;
474 if (!import)
475 return -1;
477 /* Prepare request for the backend */
478 back_req_id = reserve_fsif_request(import);
479 DEBUG("Backend request id=%d\n", back_req_id);
481 /* Prepare our private request structure */
482 priv_req_id = get_id_from_freelist(import->freelist);
483 DEBUG("Request id for fs_truncate call is: %d\n", priv_req_id);
484 fsr = &import->requests[priv_req_id];
485 fsr->thread = current;
487 req = RING_GET_REQUEST(&import->ring, back_req_id);
488 req->type = REQ_FILE_TRUNCATE;
489 req->id = priv_req_id;
490 req->u.ftruncate.fd = fd;
491 req->u.ftruncate.length = length;
493 /* Set blocked flag before commiting the request, thus avoiding missed
494 * response race */
495 block(current);
496 commit_fsif_request(import, back_req_id);
497 schedule();
499 /* Read the response */
500 ret = (int)fsr->shadow_rsp.u.ret_val;
501 DEBUG("Following ret from ftruncate: %d\n", ret);
502 add_id_to_freelist(priv_req_id, import->freelist);
504 return ret;
505 }
507 int fs_remove(struct fs_import *import, char *file)
508 {
509 struct fs_request *fsr;
510 unsigned short priv_req_id;
511 grant_ref_t gref;
512 void *buffer;
513 RING_IDX back_req_id;
514 struct fsif_request *req;
515 int ret;
517 if (!import)
518 return -1;
520 /* Prepare request for the backend */
521 back_req_id = reserve_fsif_request(import);
522 DEBUG("Backend request id=%d\n", back_req_id);
524 /* Prepare our private request structure */
525 priv_req_id = get_id_from_freelist(import->freelist);
526 DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
527 fsr = &import->requests[priv_req_id];
528 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
529 DEBUG("gref=%d\n", gref);
530 fsr->thread = current;
531 sprintf(buffer, "%s", file);
533 req = RING_GET_REQUEST(&import->ring, back_req_id);
534 req->type = REQ_REMOVE;
535 req->id = priv_req_id;
536 req->u.fremove.gref = gref;
538 /* Set blocked flag before commiting the request, thus avoiding missed
539 * response race */
540 block(current);
541 commit_fsif_request(import, back_req_id);
542 schedule();
544 /* Read the response */
545 ret = (int)fsr->shadow_rsp.u.ret_val;
546 DEBUG("The following ret: %d\n", ret);
547 free_buffer_page(fsr);
548 add_id_to_freelist(priv_req_id, import->freelist);
550 return ret;
551 }
554 int fs_rename(struct fs_import *import,
555 char *old_file_name,
556 char *new_file_name)
557 {
558 struct fs_request *fsr;
559 unsigned short priv_req_id;
560 grant_ref_t gref;
561 void *buffer;
562 RING_IDX back_req_id;
563 struct fsif_request *req;
564 int ret;
565 char old_header[] = "old: ";
566 char new_header[] = "new: ";
568 if (!import)
569 return -1;
571 /* Prepare request for the backend */
572 back_req_id = reserve_fsif_request(import);
573 DEBUG("Backend request id=%d\n", back_req_id);
575 /* Prepare our private request structure */
576 priv_req_id = get_id_from_freelist(import->freelist);
577 DEBUG("Request id for fs_open call is: %d\n", priv_req_id);
578 fsr = &import->requests[priv_req_id];
579 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
580 DEBUG("gref=%d\n", gref);
581 fsr->thread = current;
582 sprintf(buffer, "%s%s%c%s%s",
583 old_header, old_file_name, '\0', new_header, new_file_name);
585 req = RING_GET_REQUEST(&import->ring, back_req_id);
586 req->type = REQ_RENAME;
587 req->id = priv_req_id;
588 req->u.frename.gref = gref;
589 req->u.frename.old_name_offset = strlen(old_header);
590 req->u.frename.new_name_offset = strlen(old_header) +
591 strlen(old_file_name) +
592 strlen(new_header) +
593 1 /* Accouning for the additional
594 end of string character */;
596 /* Set blocked flag before commiting the request, thus avoiding missed
597 * response race */
598 block(current);
599 commit_fsif_request(import, back_req_id);
600 schedule();
602 /* Read the response */
603 ret = (int)fsr->shadow_rsp.u.ret_val;
604 DEBUG("The following ret: %d\n", ret);
605 free_buffer_page(fsr);
606 add_id_to_freelist(priv_req_id, import->freelist);
608 return ret;
609 }
611 int fs_create(struct fs_import *import, char *name,
612 int8_t directory, int32_t mode)
613 {
614 struct fs_request *fsr;
615 unsigned short priv_req_id;
616 grant_ref_t gref;
617 void *buffer;
618 RING_IDX back_req_id;
619 struct fsif_request *req;
620 int ret;
622 if (!import)
623 return -1;
625 /* Prepare request for the backend */
626 back_req_id = reserve_fsif_request(import);
627 DEBUG("Backend request id=%d\n", back_req_id);
629 /* Prepare our private request structure */
630 priv_req_id = get_id_from_freelist(import->freelist);
631 DEBUG("Request id for fs_create call is: %d\n", priv_req_id);
632 fsr = &import->requests[priv_req_id];
633 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
634 DEBUG("gref=%d\n", gref);
635 fsr->thread = current;
636 sprintf(buffer, "%s", name);
638 req = RING_GET_REQUEST(&import->ring, back_req_id);
639 req->type = REQ_CREATE;
640 req->id = priv_req_id;
641 req->u.fcreate.gref = gref;
642 req->u.fcreate.directory = directory;
643 req->u.fcreate.mode = mode;
645 /* Set blocked flag before commiting the request, thus avoiding missed
646 * response race */
647 block(current);
648 commit_fsif_request(import, back_req_id);
649 schedule();
651 /* Read the response */
652 ret = (int)fsr->shadow_rsp.u.ret_val;
653 DEBUG("The following ret: %d\n", ret);
654 free_buffer_page(fsr);
655 add_id_to_freelist(priv_req_id, import->freelist);
657 return ret;
658 }
660 char** fs_list(struct fs_import *import, char *name,
661 int32_t offset, int32_t *nr_files, int *has_more)
662 {
663 struct fs_request *fsr;
664 unsigned short priv_req_id;
665 grant_ref_t gref;
666 void *buffer;
667 RING_IDX back_req_id;
668 struct fsif_request *req;
669 char **files, *current_file;
670 int i;
672 if (!import)
673 return NULL;
675 DEBUG("Different masks: NR_FILES=(%llx, %d), ERROR=(%llx, %d), HAS_MORE(%llx, %d)\n",
676 NR_FILES_MASK, NR_FILES_SHIFT, ERROR_MASK, ERROR_SHIFT, HAS_MORE_FLAG, HAS_MORE_SHIFT);
678 /* Prepare request for the backend */
679 back_req_id = reserve_fsif_request(import);
680 DEBUG("Backend request id=%d\n", back_req_id);
682 /* Prepare our private request structure */
683 priv_req_id = get_id_from_freelist(import->freelist);
684 DEBUG("Request id for fs_list call is: %d\n", priv_req_id);
685 fsr = &import->requests[priv_req_id];
686 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
687 DEBUG("gref=%d\n", gref);
688 fsr->thread = current;
689 sprintf(buffer, "%s", name);
691 req = RING_GET_REQUEST(&import->ring, back_req_id);
692 req->type = REQ_DIR_LIST;
693 req->id = priv_req_id;
694 req->u.flist.gref = gref;
695 req->u.flist.offset = offset;
697 /* Set blocked flag before commiting the request, thus avoiding missed
698 * response race */
699 block(current);
700 commit_fsif_request(import, back_req_id);
701 schedule();
703 /* Read the response */
704 *nr_files = (fsr->shadow_rsp.u.ret_val & NR_FILES_MASK) >> NR_FILES_SHIFT;
705 files = NULL;
706 if(*nr_files <= 0) goto exit;
707 files = malloc(sizeof(char*) * (*nr_files));
708 current_file = buffer;
709 for(i=0; i<*nr_files; i++)
710 {
711 files[i] = strdup(current_file);
712 current_file += strlen(current_file) + 1;
713 }
714 if(has_more != NULL)
715 *has_more = fsr->shadow_rsp.u.ret_val & HAS_MORE_FLAG;
716 free_buffer_page(fsr);
717 add_id_to_freelist(priv_req_id, import->freelist);
718 exit:
719 return files;
720 }
722 int fs_chmod(struct fs_import *import, int fd, int32_t mode)
723 {
724 struct fs_request *fsr;
725 unsigned short priv_req_id;
726 RING_IDX back_req_id;
727 struct fsif_request *req;
728 int ret;
730 if (!import)
731 return -1;
733 /* Prepare request for the backend */
734 back_req_id = reserve_fsif_request(import);
735 DEBUG("Backend request id=%d\n", back_req_id);
737 /* Prepare our private request structure */
738 priv_req_id = get_id_from_freelist(import->freelist);
739 DEBUG("Request id for fs_chmod call is: %d\n", priv_req_id);
740 fsr = &import->requests[priv_req_id];
741 fsr->thread = current;
743 req = RING_GET_REQUEST(&import->ring, back_req_id);
744 req->type = REQ_CHMOD;
745 req->id = priv_req_id;
746 req->u.fchmod.fd = fd;
747 req->u.fchmod.mode = mode;
749 /* Set blocked flag before commiting the request, thus avoiding missed
750 * response race */
751 block(current);
752 commit_fsif_request(import, back_req_id);
753 schedule();
755 /* Read the response */
756 ret = (int)fsr->shadow_rsp.u.ret_val;
757 DEBUG("The following returned: %d\n", ret);
758 add_id_to_freelist(priv_req_id, import->freelist);
760 return ret;
761 }
763 int64_t fs_space(struct fs_import *import, char *location)
764 {
765 struct fs_request *fsr;
766 unsigned short priv_req_id;
767 grant_ref_t gref;
768 void *buffer;
769 RING_IDX back_req_id;
770 struct fsif_request *req;
771 int64_t ret;
773 if (!import)
774 return -1;
776 /* Prepare request for the backend */
777 back_req_id = reserve_fsif_request(import);
778 DEBUG("Backend request id=%d\n", back_req_id);
780 /* Prepare our private request structure */
781 priv_req_id = get_id_from_freelist(import->freelist);
782 DEBUG("Request id for fs_space is: %d\n", priv_req_id);
783 fsr = &import->requests[priv_req_id];
784 buffer = alloc_buffer_page(fsr, import->dom_id, &gref);
785 DEBUG("gref=%d\n", gref);
786 fsr->thread = current;
787 sprintf(buffer, "%s", location);
789 req = RING_GET_REQUEST(&import->ring, back_req_id);
790 req->type = REQ_FS_SPACE;
791 req->id = priv_req_id;
792 req->u.fspace.gref = gref;
794 /* Set blocked flag before commiting the request, thus avoiding missed
795 * response race */
796 block(current);
797 commit_fsif_request(import, back_req_id);
798 schedule();
800 /* Read the response */
801 ret = (int64_t)fsr->shadow_rsp.u.ret_val;
802 DEBUG("The following returned: %lld\n", ret);
803 free_buffer_page(fsr);
804 add_id_to_freelist(priv_req_id, import->freelist);
806 return ret;
807 }
809 int fs_sync(struct fs_import *import, int fd)
810 {
811 struct fs_request *fsr;
812 unsigned short priv_req_id;
813 RING_IDX back_req_id;
814 struct fsif_request *req;
815 int ret;
817 if (!import)
818 return -1;
820 /* Prepare request for the backend */
821 back_req_id = reserve_fsif_request(import);
822 DEBUG("Backend request id=%d\n", back_req_id);
824 /* Prepare our private request structure */
825 priv_req_id = get_id_from_freelist(import->freelist);
826 DEBUG("Request id for fs_sync call is: %d\n", priv_req_id);
827 fsr = &import->requests[priv_req_id];
828 fsr->thread = current;
830 req = RING_GET_REQUEST(&import->ring, back_req_id);
831 req->type = REQ_FILE_SYNC;
832 req->id = priv_req_id;
833 req->u.fsync.fd = fd;
835 /* Set blocked flag before commiting the request, thus avoiding missed
836 * response race */
837 block(current);
838 commit_fsif_request(import, back_req_id);
839 schedule();
841 /* Read the response */
842 ret = (int)fsr->shadow_rsp.u.ret_val;
843 DEBUG("Close returned: %d\n", ret);
844 add_id_to_freelist(priv_req_id, import->freelist);
846 return ret;
847 }
850 /******************************************************************************/
851 /* END OF INDIVIDUAL FILE OPERATIONS */
852 /******************************************************************************/
854 void *alloc_buffer_page(struct fs_request *req, domid_t domid, grant_ref_t *gref)
855 {
856 void *page;
858 page = (void *)alloc_page();
859 *gref = gnttab_grant_access(domid, virt_to_mfn(page), 0);
860 req->private1 = page;
861 req->private2 = (void *)(uintptr_t)(*gref);
863 return page;
864 }
866 void free_buffer_page(struct fs_request *req)
867 {
868 gnttab_end_access((grant_ref_t)(uintptr_t)req->private2);
869 free_page(req->private1);
870 }
872 static void fsfront_handler(evtchn_port_t port, struct pt_regs *regs, void *data)
873 {
874 struct fs_import *import = (struct fs_import*)data;
875 static int in_irq = 0;
876 RING_IDX cons, rp;
877 int more;
879 /* Check for non-reentrance */
880 BUG_ON(in_irq);
881 in_irq = 1;
883 DEBUG("Event from import [%d:%d].\n", import->dom_id, import->export_id);
884 moretodo:
885 rp = import->ring.sring->rsp_prod;
886 rmb(); /* Ensure we see queued responses up to 'rp'. */
887 cons = import->ring.rsp_cons;
888 while (cons != rp)
889 {
890 struct fsif_response *rsp;
891 struct fs_request *req;
893 rsp = RING_GET_RESPONSE(&import->ring, cons);
894 DEBUG("Response at idx=%d to request id=%d, ret_val=%lx\n",
895 cons, rsp->id, rsp->u.ret_val);
896 req = &import->requests[rsp->id];
897 memcpy(&req->shadow_rsp, rsp, sizeof(struct fsif_response));
898 DEBUG("Waking up: %s\n", req->thread->name);
899 wake(req->thread);
901 cons++;
902 up(&import->reqs_sem);
903 }
905 import->ring.rsp_cons = rp;
906 RING_FINAL_CHECK_FOR_RESPONSES(&import->ring, more);
907 if(more) goto moretodo;
909 in_irq = 0;
910 }
912 static void alloc_request_table(struct fs_import *import)
913 {
914 struct fs_request *requests;
915 int i;
917 BUG_ON(import->nr_entries <= 0);
918 printk("Allocating request array for import %d, nr_entries = %d.\n",
919 import->import_id, import->nr_entries);
920 requests = xmalloc_array(struct fs_request, import->nr_entries);
921 import->freelist = xmalloc_array(unsigned short, import->nr_entries + 1);
922 memset(import->freelist, 0, sizeof(unsigned short) * (import->nr_entries + 1));
923 for(i=0; i<import->nr_entries; i++)
924 add_id_to_freelist(i, import->freelist);
925 import->requests = requests;
926 }
929 /******************************************************************************/
930 /* FS TESTS */
931 /******************************************************************************/
934 void test_fs_import(void *data)
935 {
936 struct fs_import *import = (struct fs_import *)data;
937 int ret, fd, i, repeat_count;
938 int32_t nr_files;
939 char buffer[1024];
940 ssize_t offset;
941 char **files;
942 long ret64;
943 struct fsif_stat_response stat;
945 repeat_count = 10;
946 /* Sleep for 1s and then try to open a file */
947 msleep(1000);
948 again:
949 ret = fs_create(import, "mini-os-created-directory", 1, 0777);
950 printk("Directory create: %d\n", ret);
952 sprintf(buffer, "mini-os-created-directory/mini-os-created-file-%d",
953 repeat_count);
954 ret = fs_create(import, buffer, 0, 0666);
955 printk("File create: %d\n", ret);
957 fd = fs_open(import, buffer);
958 printk("File descriptor: %d\n", fd);
959 if(fd < 0) return;
961 offset = 0;
962 for(i=0; i<10; i++)
963 {
964 sprintf(buffer, "Current time is: %lld\n", NOW());
965 ret = fs_write(import, fd, buffer, strlen(buffer), offset);
966 printk("Writen current time (%d)\n", ret);
967 if(ret < 0)
968 return;
969 offset += ret;
970 }
971 ret = fs_stat(import, fd, &stat);
972 printk("Ret after stat: %d\n", ret);
973 printk(" st_mode=%o\n", stat.stat_mode);
974 printk(" st_uid =%d\n", stat.stat_uid);
975 printk(" st_gid =%d\n", stat.stat_gid);
976 printk(" st_size=%ld\n", stat.stat_size);
977 printk(" st_atime=%ld\n", stat.stat_atime);
978 printk(" st_mtime=%ld\n", stat.stat_mtime);
979 printk(" st_ctime=%ld\n", stat.stat_ctime);
981 ret = fs_close(import, fd);
982 printk("Closed fd: %d, ret=%d\n", fd, ret);
984 printk("Listing files in /\n");
985 files = fs_list(import, "/", 0, &nr_files, NULL);
986 for(i=0; i<nr_files; i++)
987 printk(" files[%d] = %s\n", i, files[i]);
989 ret64 = fs_space(import, "/");
990 printk("Free space: %lld (=%lld Mb)\n", ret64, (ret64 >> 20));
991 repeat_count--;
992 if(repeat_count > 0)
993 goto again;
995 }
997 #if 0
998 // char *content = (char *)alloc_page();
999 int fd, ret;
1000 // int read;
1001 char write_string[] = "\"test data written from minios\"";
1002 struct fsif_stat_response stat;
1003 char **files;
1004 int32_t nr_files, i;
1005 int64_t ret64;
1008 fd = fs_open(import, "test-export-file");
1009 // read = fs_read(import, fd, content, PAGE_SIZE, 0);
1010 // printk("Read: %d bytes\n", read);
1011 // content[read] = '\0';
1012 // printk("Value: %s\n", content);
1013 ret = fs_write(import, fd, write_string, strlen(write_string), 0);
1014 printk("Ret after write: %d\n", ret);
1015 ret = fs_stat(import, fd, &stat);
1016 printk("Ret after stat: %d\n", ret);
1017 printk(" st_mode=%o\n", stat.stat_mode);
1018 printk(" st_uid =%d\n", stat.stat_uid);
1019 printk(" st_gid =%d\n", stat.stat_gid);
1020 printk(" st_size=%ld\n", stat.stat_size);
1021 printk(" st_atime=%ld\n", stat.stat_atime);
1022 printk(" st_mtime=%ld\n", stat.stat_mtime);
1023 printk(" st_ctime=%ld\n", stat.stat_ctime);
1024 ret = fs_truncate(import, fd, 30);
1025 printk("Ret after truncate: %d\n", ret);
1026 ret = fs_remove(import, "test-to-remove/test-file");
1027 printk("Ret after remove: %d\n", ret);
1028 ret = fs_remove(import, "test-to-remove");
1029 printk("Ret after remove: %d\n", ret);
1030 ret = fs_chmod(import, fd, 0700);
1031 printk("Ret after chmod: %d\n", ret);
1032 ret = fs_sync(import, fd);
1033 printk("Ret after sync: %d\n", ret);
1034 ret = fs_close(import, fd);
1035 //ret = fs_rename(import, "test-export-file", "renamed-test-export-file");
1036 //printk("Ret after rename: %d\n", ret);
1037 ret = fs_create(import, "created-dir", 1, 0777);
1038 printk("Ret after dir create: %d\n", ret);
1039 ret = fs_create(import, "created-dir/created-file", 0, 0777);
1040 printk("Ret after file create: %d\n", ret);
1041 files = fs_list(import, "/", 15, &nr_files, NULL);
1042 for(i=0; i<nr_files; i++)
1043 printk(" files[%d] = %s\n", i, files[i]);
1044 ret64 = fs_space(import, "created-dir");
1045 printk("Ret after space: %lld\n", ret64);
1047 #endif
1050 /******************************************************************************/
1051 /* END OF FS TESTS */
1052 /******************************************************************************/
1054 static int init_fs_import(struct fs_import *import)
1056 char *err;
1057 xenbus_transaction_t xbt;
1058 char nodename[1024], r_nodename[1024], token[128], *message = NULL;
1059 struct fsif_sring *sring;
1060 int i, retry = 0;
1061 domid_t self_id;
1062 xenbus_event_queue events = NULL;
1064 printk("Initialising FS fortend to backend dom %d\n", import->dom_id);
1065 /* Allocate page for the shared ring */
1066 sring = (struct fsif_sring*) alloc_pages(FSIF_RING_SIZE_ORDER);
1067 memset(sring, 0, PAGE_SIZE * FSIF_RING_SIZE_PAGES);
1069 /* Init the shared ring */
1070 SHARED_RING_INIT(sring);
1071 ASSERT(FSIF_NR_READ_GNTS == FSIF_NR_WRITE_GNTS);
1073 /* Init private frontend ring */
1074 FRONT_RING_INIT(&import->ring, sring, PAGE_SIZE * FSIF_RING_SIZE_PAGES);
1075 import->nr_entries = import->ring.nr_ents;
1077 /* Allocate table of requests */
1078 alloc_request_table(import);
1079 init_SEMAPHORE(&import->reqs_sem, import->nr_entries);
1081 /* Grant access to the shared ring */
1082 for(i=0; i<FSIF_RING_SIZE_PAGES; i++)
1083 import->gnt_refs[i] =
1084 gnttab_grant_access(import->dom_id,
1085 virt_to_mfn((char *)sring + i * PAGE_SIZE),
1086 0);
1088 /* Allocate event channel */
1089 BUG_ON(evtchn_alloc_unbound(import->dom_id,
1090 fsfront_handler,
1091 //ANY_CPU,
1092 import,
1093 &import->local_port));
1094 unmask_evtchn(import->local_port);
1097 self_id = xenbus_get_self_id();
1098 /* Write the frontend info to a node in our Xenbus */
1099 sprintf(nodename, "/local/domain/%d/device/vfs/%d",
1100 self_id, import->import_id);
1102 again:
1103 err = xenbus_transaction_start(&xbt);
1104 if (err) {
1105 printk("starting transaction\n");
1106 free(err);
1109 err = xenbus_printf(xbt,
1110 nodename,
1111 "ring-size",
1112 "%u",
1113 FSIF_RING_SIZE_PAGES);
1114 if (err) {
1115 message = "writing ring-size";
1116 goto abort_transaction;
1119 for(i=0; i<FSIF_RING_SIZE_PAGES; i++)
1121 sprintf(r_nodename, "ring-ref-%d", i);
1122 err = xenbus_printf(xbt,
1123 nodename,
1124 r_nodename,
1125 "%u",
1126 import->gnt_refs[i]);
1127 if (err) {
1128 message = "writing ring-refs";
1129 goto abort_transaction;
1133 err = xenbus_printf(xbt,
1134 nodename,
1135 "event-channel",
1136 "%u",
1137 import->local_port);
1138 if (err) {
1139 message = "writing event-channel";
1140 goto abort_transaction;
1143 err = xenbus_printf(xbt, nodename, "state", STATE_READY, 0xdeadbeef);
1144 if (err) free(err);
1146 err = xenbus_transaction_end(xbt, 0, &retry);
1147 if (err) free(err);
1148 if (retry) {
1149 goto again;
1150 printk("completing transaction\n");
1153 /* Now, when our node is prepared we write request in the exporting domain
1154 * */
1155 printk("Our own id is %d\n", self_id);
1156 sprintf(r_nodename,
1157 "/local/domain/%d/backend/vfs/exports/requests/%d/%d/frontend",
1158 import->dom_id, self_id, import->export_id);
1159 BUG_ON(xenbus_write(XBT_NIL, r_nodename, nodename));
1161 goto done;
1163 abort_transaction:
1164 free(err);
1165 err = xenbus_transaction_end(xbt, 1, &retry);
1166 if (err) free(err);
1168 done:
1170 #define WAIT_PERIOD 10 /* Wait period in ms */
1171 #define MAX_WAIT 10 /* Max number of WAIT_PERIODs */
1172 import->backend = NULL;
1173 sprintf(r_nodename, "%s/backend", nodename);
1175 for(retry = MAX_WAIT; retry > 0; retry--)
1177 xenbus_read(XBT_NIL, r_nodename, &import->backend);
1178 if(import->backend)
1180 printk("Backend found at %s\n", import->backend);
1181 break;
1183 msleep(WAIT_PERIOD);
1186 if(!import->backend)
1188 printk("No backend available.\n");
1189 /* TODO - cleanup datastructures/xenbus */
1190 return 0;
1192 sprintf(r_nodename, "%s/state", import->backend);
1193 sprintf(token, "fs-front-%d", import->import_id);
1194 /* The token will not be unique if multiple imports are inited */
1195 xenbus_watch_path_token(XBT_NIL, r_nodename, r_nodename, &events);
1196 err = xenbus_wait_for_value(r_nodename, STATE_READY, &events);
1197 if (err) free(err);
1198 xenbus_unwatch_path_token(XBT_NIL, r_nodename, r_nodename);
1199 printk("Backend ready.\n");
1201 //create_thread("fs-tester", test_fs_import, import);
1203 return 1;
1206 static void add_export(struct minios_list_head *exports, unsigned int domid)
1208 char node[1024], **exports_list = NULL, *ret_msg;
1209 int j = 0;
1210 static int import_id = 0;
1212 sprintf(node, "/local/domain/%d/backend/vfs/exports", domid);
1213 ret_msg = xenbus_ls(XBT_NIL, node, &exports_list);
1214 if (ret_msg && strcmp(ret_msg, "ENOENT"))
1215 printk("couldn't read %s: %s\n", node, ret_msg);
1216 while(exports_list && exports_list[j])
1218 struct fs_import *import;
1219 int export_id = -1;
1221 sscanf(exports_list[j], "%d", &export_id);
1222 if(export_id >= 0)
1224 import = xmalloc(struct fs_import);
1225 import->dom_id = domid;
1226 import->export_id = export_id;
1227 import->import_id = import_id++;
1228 MINIOS_INIT_LIST_HEAD(&import->list);
1229 minios_list_add(&import->list, exports);
1231 free(exports_list[j]);
1232 j++;
1234 if(exports_list)
1235 free(exports_list);
1236 if(ret_msg)
1237 free(ret_msg);
1240 #if 0
1241 static struct minios_list_head* probe_exports(void)
1243 struct minios_list_head *exports;
1244 char **node_list = NULL, *msg = NULL;
1245 int i = 0;
1247 exports = xmalloc(struct minios_list_head);
1248 MINIOS_INIT_LIST_HEAD(exports);
1250 msg = xenbus_ls(XBT_NIL, "/local/domain", &node_list);
1251 if(msg)
1253 printk("Could not list VFS exports (%s).\n", msg);
1254 goto exit;
1257 while(node_list[i])
1259 add_export(exports, atoi(node_list[i]));
1260 free(node_list[i]);
1261 i++;
1264 exit:
1265 if(msg)
1266 free(msg);
1267 if(node_list)
1268 free(node_list);
1269 return exports;
1271 #endif
1273 MINIOS_LIST_HEAD(exports);
1275 void init_fs_frontend(void)
1277 struct minios_list_head *entry;
1278 struct fs_import *import = NULL;
1279 printk("Initing FS frontend(s).\n");
1281 add_export(&exports, 0);
1282 minios_list_for_each(entry, &exports)
1284 import = minios_list_entry(entry, struct fs_import, list);
1285 printk("FS export [dom=%d, id=%d] found\n",
1286 import->dom_id, import->export_id);
1287 if (init_fs_import(import) != 0) {
1288 fs_import = import;
1289 break;
1293 if (!fs_import)
1294 printk("No FS import\n");
1297 /* TODO: shutdown */