debuggers.hg

view extras/mini-os/netfront.c @ 20689:23bc248302df

mini-os: Fix memory leaks in blkfront, netfront, pcifront, etc.

The return value of Xenbus routines xenbus_transaction_start(),
xenbus_printf(), xenbus_transaction_end(), etc. is a pointer of error
message. This pointer should be passed to free() to release the
allocated memory when it is no longer needed.

Signed-off-by: Yu Zhiguo <yuzg@cn.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Dec 14 09:51:07 2009 +0000 (2009-12-14)
parents e10d641b413f
children b20f897d6010
line source
1 /* Minimal network driver for Mini-OS.
2 * Copyright (c) 2006-2007 Jacob Gorm Hansen, University of Copenhagen.
3 * Based on netfront.c from Xen Linux.
4 *
5 * Does not handle fragments or extras.
6 */
8 #include <mini-os/os.h>
9 #include <mini-os/xenbus.h>
10 #include <mini-os/events.h>
11 #include <errno.h>
12 #include <xen/io/netif.h>
13 #include <mini-os/gnttab.h>
14 #include <mini-os/xmalloc.h>
15 #include <mini-os/time.h>
16 #include <mini-os/netfront.h>
17 #include <mini-os/lib.h>
18 #include <mini-os/semaphore.h>
20 DECLARE_WAIT_QUEUE_HEAD(netfront_queue);
22 #ifdef HAVE_LIBC
23 #define NETIF_SELECT_RX ((void*)-1)
24 #endif
28 #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
29 #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
30 #define GRANT_INVALID_REF 0
33 struct net_buffer {
34 void* page;
35 grant_ref_t gref;
36 };
38 struct netfront_dev {
39 domid_t dom;
41 unsigned short tx_freelist[NET_TX_RING_SIZE + 1];
42 struct semaphore tx_sem;
44 struct net_buffer rx_buffers[NET_RX_RING_SIZE];
45 struct net_buffer tx_buffers[NET_TX_RING_SIZE];
47 struct netif_tx_front_ring tx;
48 struct netif_rx_front_ring rx;
49 grant_ref_t tx_ring_ref;
50 grant_ref_t rx_ring_ref;
51 evtchn_port_t evtchn;
53 char *nodename;
54 char *backend;
55 char *mac;
57 xenbus_event_queue events;
59 #ifdef HAVE_LIBC
60 int fd;
61 unsigned char *data;
62 size_t len;
63 size_t rlen;
64 #endif
66 void (*netif_rx)(unsigned char* data, int len);
67 };
69 void init_rx_buffers(struct netfront_dev *dev);
71 static inline void add_id_to_freelist(unsigned int id,unsigned short* freelist)
72 {
73 freelist[id + 1] = freelist[0];
74 freelist[0] = id;
75 }
77 static inline unsigned short get_id_from_freelist(unsigned short* freelist)
78 {
79 unsigned int id = freelist[0];
80 freelist[0] = freelist[id + 1];
81 return id;
82 }
84 __attribute__((weak)) void netif_rx(unsigned char* data,int len)
85 {
86 printk("%d bytes incoming at %p\n",len,data);
87 }
89 __attribute__((weak)) void net_app_main(void*si,unsigned char*mac) {}
91 static inline int xennet_rxidx(RING_IDX idx)
92 {
93 return idx & (NET_RX_RING_SIZE - 1);
94 }
96 void network_rx(struct netfront_dev *dev)
97 {
98 RING_IDX rp,cons,req_prod;
99 struct netif_rx_response *rx;
100 int nr_consumed, some, more, i, notify;
103 moretodo:
104 rp = dev->rx.sring->rsp_prod;
105 rmb(); /* Ensure we see queued responses up to 'rp'. */
106 cons = dev->rx.rsp_cons;
108 nr_consumed = 0;
109 some = 0;
110 while ((cons != rp) && !some)
111 {
112 struct net_buffer* buf;
113 unsigned char* page;
114 int id;
116 rx = RING_GET_RESPONSE(&dev->rx, cons);
118 if (rx->flags & NETRXF_extra_info)
119 {
120 printk("+++++++++++++++++++++ we have extras!\n");
121 continue;
122 }
125 if (rx->status == NETIF_RSP_NULL) continue;
127 id = rx->id;
128 BUG_ON(id >= NET_TX_RING_SIZE);
130 buf = &dev->rx_buffers[id];
131 page = (unsigned char*)buf->page;
132 gnttab_end_access(buf->gref);
134 if(rx->status>0)
135 {
136 #ifdef HAVE_LIBC
137 if (dev->netif_rx == NETIF_SELECT_RX) {
138 int len = rx->status;
139 ASSERT(current == main_thread);
140 if (len > dev->len)
141 len = dev->len;
142 memcpy(dev->data, page+rx->offset, len);
143 dev->rlen = len;
144 some = 1;
145 } else
146 #endif
147 dev->netif_rx(page+rx->offset,rx->status);
148 }
150 nr_consumed++;
152 ++cons;
153 }
154 dev->rx.rsp_cons=cons;
156 RING_FINAL_CHECK_FOR_RESPONSES(&dev->rx,more);
157 if(more && !some) goto moretodo;
159 req_prod = dev->rx.req_prod_pvt;
161 for(i=0; i<nr_consumed; i++)
162 {
163 int id = xennet_rxidx(req_prod + i);
164 netif_rx_request_t *req = RING_GET_REQUEST(&dev->rx, req_prod + i);
165 struct net_buffer* buf = &dev->rx_buffers[id];
166 void* page = buf->page;
168 /* We are sure to have free gnttab entries since they got released above */
169 buf->gref = req->gref =
170 gnttab_grant_access(dev->dom,virt_to_mfn(page),0);
172 req->id = id;
173 }
175 wmb();
177 dev->rx.req_prod_pvt = req_prod + i;
179 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);
180 if (notify)
181 notify_remote_via_evtchn(dev->evtchn);
183 }
185 void network_tx_buf_gc(struct netfront_dev *dev)
186 {
189 RING_IDX cons, prod;
190 unsigned short id;
192 do {
193 prod = dev->tx.sring->rsp_prod;
194 rmb(); /* Ensure we see responses up to 'rp'. */
196 for (cons = dev->tx.rsp_cons; cons != prod; cons++)
197 {
198 struct netif_tx_response *txrsp;
199 struct net_buffer *buf;
201 txrsp = RING_GET_RESPONSE(&dev->tx, cons);
202 if (txrsp->status == NETIF_RSP_NULL)
203 continue;
205 if (txrsp->status == NETIF_RSP_ERROR)
206 printk("packet error\n");
208 id = txrsp->id;
209 BUG_ON(id >= NET_TX_RING_SIZE);
210 buf = &dev->tx_buffers[id];
211 gnttab_end_access(buf->gref);
212 buf->gref=GRANT_INVALID_REF;
214 add_id_to_freelist(id,dev->tx_freelist);
215 up(&dev->tx_sem);
216 }
218 dev->tx.rsp_cons = prod;
220 /*
221 * Set a new event, then check for race with update of tx_cons.
222 * Note that it is essential to schedule a callback, no matter
223 * how few tx_buffers are pending. Even if there is space in the
224 * transmit ring, higher layers may be blocked because too much
225 * data is outstanding: in such cases notification from Xen is
226 * likely to be the only kick that we'll get.
227 */
228 dev->tx.sring->rsp_event =
229 prod + ((dev->tx.sring->req_prod - prod) >> 1) + 1;
230 mb();
231 } while ((cons == prod) && (prod != dev->tx.sring->rsp_prod));
234 }
236 void netfront_handler(evtchn_port_t port, struct pt_regs *regs, void *data)
237 {
238 int flags;
239 struct netfront_dev *dev = data;
241 local_irq_save(flags);
243 network_tx_buf_gc(dev);
244 network_rx(dev);
246 local_irq_restore(flags);
247 }
249 #ifdef HAVE_LIBC
250 void netfront_select_handler(evtchn_port_t port, struct pt_regs *regs, void *data)
251 {
252 int flags;
253 struct netfront_dev *dev = data;
254 int fd = dev->fd;
256 local_irq_save(flags);
257 network_tx_buf_gc(dev);
258 local_irq_restore(flags);
260 if (fd != -1)
261 files[fd].read = 1;
262 wake_up(&netfront_queue);
263 }
264 #endif
266 static void free_netfront(struct netfront_dev *dev)
267 {
268 int i;
270 for(i=0;i<NET_TX_RING_SIZE;i++)
271 down(&dev->tx_sem);
273 mask_evtchn(dev->evtchn);
275 free(dev->mac);
276 free(dev->backend);
278 gnttab_end_access(dev->rx_ring_ref);
279 gnttab_end_access(dev->tx_ring_ref);
281 free_page(dev->rx.sring);
282 free_page(dev->tx.sring);
284 unbind_evtchn(dev->evtchn);
286 for(i=0;i<NET_RX_RING_SIZE;i++) {
287 gnttab_end_access(dev->rx_buffers[i].gref);
288 free_page(dev->rx_buffers[i].page);
289 }
291 for(i=0;i<NET_TX_RING_SIZE;i++)
292 if (dev->tx_buffers[i].page)
293 free_page(dev->tx_buffers[i].page);
295 free(dev->nodename);
296 free(dev);
297 }
299 struct netfront_dev *init_netfront(char *_nodename, void (*thenetif_rx)(unsigned char* data, int len), unsigned char rawmac[6], char **ip)
300 {
301 xenbus_transaction_t xbt;
302 char* err;
303 char* message=NULL;
304 struct netif_tx_sring *txs;
305 struct netif_rx_sring *rxs;
306 int retry=0;
307 int i;
308 char* msg = NULL;
309 char nodename[256];
310 char path[256];
311 struct netfront_dev *dev;
312 static int netfrontends = 0;
314 if (!_nodename)
315 snprintf(nodename, sizeof(nodename), "device/vif/%d", netfrontends);
316 else
317 strncpy(nodename, _nodename, strlen(nodename));
318 netfrontends++;
320 if (!thenetif_rx)
321 thenetif_rx = netif_rx;
323 printk("************************ NETFRONT for %s **********\n\n\n", nodename);
325 dev = malloc(sizeof(*dev));
326 memset(dev, 0, sizeof(*dev));
327 dev->nodename = strdup(nodename);
328 #ifdef HAVE_LIBC
329 dev->fd = -1;
330 #endif
332 printk("net TX ring size %d\n", NET_TX_RING_SIZE);
333 printk("net RX ring size %d\n", NET_RX_RING_SIZE);
334 init_SEMAPHORE(&dev->tx_sem, NET_TX_RING_SIZE);
335 for(i=0;i<NET_TX_RING_SIZE;i++)
336 {
337 add_id_to_freelist(i,dev->tx_freelist);
338 dev->tx_buffers[i].page = NULL;
339 }
341 for(i=0;i<NET_RX_RING_SIZE;i++)
342 {
343 /* TODO: that's a lot of memory */
344 dev->rx_buffers[i].page = (char*)alloc_page();
345 }
347 snprintf(path, sizeof(path), "%s/backend-id", nodename);
348 dev->dom = xenbus_read_integer(path);
349 #ifdef HAVE_LIBC
350 if (thenetif_rx == NETIF_SELECT_RX)
351 evtchn_alloc_unbound(dev->dom, netfront_select_handler, dev, &dev->evtchn);
352 else
353 #endif
354 evtchn_alloc_unbound(dev->dom, netfront_handler, dev, &dev->evtchn);
356 txs = (struct netif_tx_sring *) alloc_page();
357 rxs = (struct netif_rx_sring *) alloc_page();
358 memset(txs,0,PAGE_SIZE);
359 memset(rxs,0,PAGE_SIZE);
362 SHARED_RING_INIT(txs);
363 SHARED_RING_INIT(rxs);
364 FRONT_RING_INIT(&dev->tx, txs, PAGE_SIZE);
365 FRONT_RING_INIT(&dev->rx, rxs, PAGE_SIZE);
367 dev->tx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(txs),0);
368 dev->rx_ring_ref = gnttab_grant_access(dev->dom,virt_to_mfn(rxs),0);
370 init_rx_buffers(dev);
372 dev->netif_rx = thenetif_rx;
374 dev->events = NULL;
376 again:
377 err = xenbus_transaction_start(&xbt);
378 if (err) {
379 printk("starting transaction\n");
380 free(err);
381 }
383 err = xenbus_printf(xbt, nodename, "tx-ring-ref","%u",
384 dev->tx_ring_ref);
385 if (err) {
386 message = "writing tx ring-ref";
387 goto abort_transaction;
388 }
389 err = xenbus_printf(xbt, nodename, "rx-ring-ref","%u",
390 dev->rx_ring_ref);
391 if (err) {
392 message = "writing rx ring-ref";
393 goto abort_transaction;
394 }
395 err = xenbus_printf(xbt, nodename,
396 "event-channel", "%u", dev->evtchn);
397 if (err) {
398 message = "writing event-channel";
399 goto abort_transaction;
400 }
402 err = xenbus_printf(xbt, nodename, "request-rx-copy", "%u", 1);
404 if (err) {
405 message = "writing request-rx-copy";
406 goto abort_transaction;
407 }
409 snprintf(path, sizeof(path), "%s/state", nodename);
410 err = xenbus_switch_state(xbt, path, XenbusStateConnected);
411 if (err) {
412 message = "switching state";
413 goto abort_transaction;
414 }
416 err = xenbus_transaction_end(xbt, 0, &retry);
417 if (err) free(err);
418 if (retry) {
419 goto again;
420 printk("completing transaction\n");
421 }
423 goto done;
425 abort_transaction:
426 free(err);
427 err = xenbus_transaction_end(xbt, 1, &retry);
428 goto error;
430 done:
432 snprintf(path, sizeof(path), "%s/backend", nodename);
433 msg = xenbus_read(XBT_NIL, path, &dev->backend);
434 snprintf(path, sizeof(path), "%s/mac", nodename);
435 msg = xenbus_read(XBT_NIL, path, &dev->mac);
437 if ((dev->backend == NULL) || (dev->mac == NULL)) {
438 printk("%s: backend/mac failed\n", __func__);
439 goto error;
440 }
442 printk("backend at %s\n",dev->backend);
443 printk("mac is %s\n",dev->mac);
445 {
446 XenbusState state;
447 char path[strlen(dev->backend) + 1 + 5 + 1];
448 snprintf(path, sizeof(path), "%s/state", dev->backend);
450 xenbus_watch_path_token(XBT_NIL, path, path, &dev->events);
452 err = NULL;
453 state = xenbus_read_integer(path);
454 while (err == NULL && state < XenbusStateConnected)
455 err = xenbus_wait_for_state_change(path, &state, &dev->events);
456 if (state != XenbusStateConnected) {
457 printk("backend not avalable, state=%d\n", state);
458 xenbus_unwatch_path_token(XBT_NIL, path, path);
459 goto error;
460 }
462 if (ip) {
463 snprintf(path, sizeof(path), "%s/ip", dev->backend);
464 xenbus_read(XBT_NIL, path, ip);
465 }
466 }
468 printk("**************************\n");
470 unmask_evtchn(dev->evtchn);
472 /* Special conversion specifier 'hh' needed for __ia64__. Without
473 this mini-os panics with 'Unaligned reference'. */
474 if (rawmac)
475 sscanf(dev->mac,"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
476 &rawmac[0],
477 &rawmac[1],
478 &rawmac[2],
479 &rawmac[3],
480 &rawmac[4],
481 &rawmac[5]);
483 return dev;
484 error:
485 free(msg);
486 free(err);
487 free_netfront(dev);
488 return NULL;
489 }
491 #ifdef HAVE_LIBC
492 int netfront_tap_open(char *nodename) {
493 struct netfront_dev *dev;
495 dev = init_netfront(nodename, NETIF_SELECT_RX, NULL, NULL);
496 if (!dev) {
497 printk("TAP open failed\n");
498 errno = EIO;
499 return -1;
500 }
501 dev->fd = alloc_fd(FTYPE_TAP);
502 printk("tap_open(%s) -> %d\n", nodename, dev->fd);
503 files[dev->fd].tap.dev = dev;
504 return dev->fd;
505 }
506 #endif
508 void shutdown_netfront(struct netfront_dev *dev)
509 {
510 char* err = NULL;
511 XenbusState state;
513 char path[strlen(dev->backend) + 1 + 5 + 1];
514 char nodename[strlen(dev->nodename) + 1 + 5 + 1];
516 printk("close network: backend at %s\n",dev->backend);
518 snprintf(path, sizeof(path), "%s/state", dev->backend);
519 snprintf(nodename, sizeof(nodename), "%s/state", dev->nodename);
521 if ((err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosing)) != NULL) {
522 printk("shutdown_netfront: error changing state to %d: %s\n",
523 XenbusStateClosing, err);
524 goto close;
525 }
526 state = xenbus_read_integer(path);
527 while (err == NULL && state < XenbusStateClosing)
528 err = xenbus_wait_for_state_change(path, &state, &dev->events);
529 if (err) free(err);
531 if ((err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosed)) != NULL) {
532 printk("shutdown_netfront: error changing state to %d: %s\n",
533 XenbusStateClosed, err);
534 goto close;
535 }
536 state = xenbus_read_integer(path);
537 if (state < XenbusStateClosed) {
538 err = xenbus_wait_for_state_change(path, &state, &dev->events);
539 if (err) free(err);
540 }
542 if ((err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateInitialising)) != NULL) {
543 printk("shutdown_netfront: error changing state to %d: %s\n",
544 XenbusStateInitialising, err);
545 goto close;
546 }
547 err = NULL;
548 state = xenbus_read_integer(path);
549 while (err == NULL && (state < XenbusStateInitWait || state >= XenbusStateClosed))
550 err = xenbus_wait_for_state_change(path, &state, &dev->events);
552 close:
553 if (err) free(err);
554 xenbus_unwatch_path_token(XBT_NIL, path, path);
556 snprintf(path, sizeof(path), "%s/tx-ring-ref", nodename);
557 xenbus_rm(XBT_NIL, path);
558 snprintf(path, sizeof(path), "%s/rx-ring-ref", nodename);
559 xenbus_rm(XBT_NIL, path);
560 snprintf(path, sizeof(path), "%s/event-channel", nodename);
561 xenbus_rm(XBT_NIL, path);
562 snprintf(path, sizeof(path), "%s/request-rx-copy", nodename);
563 xenbus_rm(XBT_NIL, path);
565 free_netfront(dev);
566 }
569 void init_rx_buffers(struct netfront_dev *dev)
570 {
571 int i, requeue_idx;
572 netif_rx_request_t *req;
573 int notify;
575 /* Rebuild the RX buffer freelist and the RX ring itself. */
576 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++)
577 {
578 struct net_buffer* buf = &dev->rx_buffers[requeue_idx];
579 req = RING_GET_REQUEST(&dev->rx, requeue_idx);
581 buf->gref = req->gref =
582 gnttab_grant_access(dev->dom,virt_to_mfn(buf->page),0);
584 req->id = requeue_idx;
586 requeue_idx++;
587 }
589 dev->rx.req_prod_pvt = requeue_idx;
591 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);
593 if (notify)
594 notify_remote_via_evtchn(dev->evtchn);
596 dev->rx.sring->rsp_event = dev->rx.rsp_cons + 1;
597 }
600 void netfront_xmit(struct netfront_dev *dev, unsigned char* data,int len)
601 {
602 int flags;
603 struct netif_tx_request *tx;
604 RING_IDX i;
605 int notify;
606 unsigned short id;
607 struct net_buffer* buf;
608 void* page;
610 BUG_ON(len > PAGE_SIZE);
612 down(&dev->tx_sem);
614 local_irq_save(flags);
615 id = get_id_from_freelist(dev->tx_freelist);
616 local_irq_restore(flags);
618 buf = &dev->tx_buffers[id];
619 page = buf->page;
620 if (!page)
621 page = buf->page = (char*) alloc_page();
623 i = dev->tx.req_prod_pvt;
624 tx = RING_GET_REQUEST(&dev->tx, i);
626 memcpy(page,data,len);
628 buf->gref =
629 tx->gref = gnttab_grant_access(dev->dom,virt_to_mfn(page),1);
631 tx->offset=0;
632 tx->size = len;
633 tx->flags=0;
634 tx->id = id;
635 dev->tx.req_prod_pvt = i + 1;
637 wmb();
639 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->tx, notify);
641 if(notify) notify_remote_via_evtchn(dev->evtchn);
643 local_irq_save(flags);
644 network_tx_buf_gc(dev);
645 local_irq_restore(flags);
646 }
648 #ifdef HAVE_LIBC
649 ssize_t netfront_receive(struct netfront_dev *dev, unsigned char *data, size_t len)
650 {
651 unsigned long flags;
652 int fd = dev->fd;
653 ASSERT(current == main_thread);
655 dev->rlen = 0;
656 dev->data = data;
657 dev->len = len;
659 local_irq_save(flags);
660 network_rx(dev);
661 if (!dev->rlen && fd != -1)
662 /* No data for us, make select stop returning */
663 files[fd].read = 0;
664 /* Before re-enabling the interrupts, in case a packet just arrived in the
665 * meanwhile. */
666 local_irq_restore(flags);
668 dev->data = NULL;
669 dev->len = 0;
671 return dev->rlen;
672 }
673 #endif