debuggers.hg

view netbsd-2.0-xen-sparse/sys/arch/xen/xen/if_xennet.c @ 2620:86f3590030af

bitkeeper revision 1.1159.1.196 (415d2c407zgMBjgq11fXyF4ysEuEkA)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into freefall.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xeno.bk-nbsd
author cl349@freefall.cl.cam.ac.uk
date Fri Oct 01 10:06:56 2004 +0000 (2004-10-01)
parents 464c5ab1400e e7ec6d1ba0b4
children aa75f00efa54 a4fbb98f00cb
line source
1 /* $NetBSD: if_xennet.c,v 1.1.2.1 2004/05/22 15:58:29 he Exp $ */
3 /*
4 *
5 * Copyright (c) 2004 Christian Limpach.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Christian Limpach.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: if_xennet.c,v 1.1.2.1 2004/05/22 15:58:29 he Exp $");
38 #include "opt_inet.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/mbuf.h>
43 #include <sys/syslog.h>
44 #include <sys/mount.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/device.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #if NRND > 0
51 #include <sys/rnd.h>
52 #endif
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_ether.h>
59 #ifdef mediacode
60 #include <net/if_media.h>
61 #endif
63 #ifdef INET
64 #include <netinet/in.h>
65 #include <netinet/if_inarp.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #endif
71 #include <nfs/rpcv2.h>
73 #include <nfs/nfsproto.h>
74 #include <nfs/nfs.h>
75 #include <nfs/nfsmount.h>
76 #include <nfs/nfsdiskless.h>
78 #include "bpfilter.h"
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #include <net/bpfdesc.h>
82 #endif
84 #include <uvm/uvm_extern.h>
85 #include <uvm/uvm_page.h>
87 #include <machine/xen.h>
88 #include <machine/hypervisor.h>
89 #include <machine/evtchn.h>
90 #include <machine/ctrl_if.h>
92 #include <machine/if_xennetvar.h>
94 #ifdef DEBUG
95 #define XENNET_DEBUG
96 #endif
97 #if defined(XENNET_DEBUG) && !defined(DEBUG)
98 #define DEBUG
99 #endif
100 /* #define XENNET_DEBUG_DUMP */
102 #ifdef XENNET_DEBUG
103 #define XEDB_FOLLOW 0x01
104 #define XEDB_INIT 0x02
105 #define XEDB_EVENT 0x04
106 #define XEDB_MBUF 0x08
107 #define XEDB_MEM 0x10
108 int xennet_debug = 0x0;
109 #define DPRINTF(x) if (xennet_debug) printf x;
110 #define DPRINTFN(n,x) if (xennet_debug & (n)) printf x;
111 #else
112 #define DPRINTF(x)
113 #define DPRINTFN(n,x)
114 #endif
115 #define PRINTF(x) printf x;
117 #ifdef XENNET_DEBUG_DUMP
118 static void xennet_hex_dump(unsigned char *, size_t, char *, int);
119 #endif
121 int xennet_match (struct device *, struct cfdata *, void *);
122 void xennet_attach (struct device *, struct device *, void *);
123 static void xennet_ctrlif_rx(ctrl_msg_t *, unsigned long);
124 static int xennet_driver_count_connected(void);
125 static void xennet_driver_status_change(netif_fe_driver_status_t *);
126 static void xennet_interface_status_change(netif_fe_interface_status_t *);
127 static void xennet_tx_mbuf_free(struct mbuf *, caddr_t, size_t, void *);
128 static void xennet_rx_mbuf_free(struct mbuf *, caddr_t, size_t, void *);
129 static int xen_network_handler(void *);
130 static void network_tx_buf_gc(struct xennet_softc *);
131 static void network_alloc_rx_buffers(struct xennet_softc *);
132 static void network_alloc_tx_buffers(struct xennet_softc *);
133 void xennet_init(struct xennet_softc *);
134 void xennet_reset(struct xennet_softc *);
135 #ifdef mediacode
136 static int xennet_mediachange (struct ifnet *);
137 static void xennet_mediastatus(struct ifnet *, struct ifmediareq *);
138 #endif
140 CFATTACH_DECL(xennet, sizeof(struct xennet_softc),
141 xennet_match, xennet_attach, NULL, NULL);
143 #define TX_MAX_ENTRIES (NETIF_TX_RING_SIZE - 2)
144 #define RX_MAX_ENTRIES (NETIF_RX_RING_SIZE - 2)
145 #define TX_ENTRIES 128
146 #define RX_ENTRIES 128
148 static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
149 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
150 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
152 /** Network interface info. */
153 struct xennet_ctrl {
154 /** Number of interfaces. */
155 int xc_interfaces;
156 /** Number of connected interfaces. */
157 int xc_connected;
158 /** Error code. */
159 int xc_err;
160 /** Driver status. */
161 int xc_up;
163 cfprint_t xc_cfprint;
164 struct device *xc_parent;
165 };
167 static struct xennet_ctrl netctrl = { -1, 0, 0 };
169 #ifdef mediacode
170 static int xennet_media[] = {
171 IFM_ETHER|IFM_AUTO,
172 };
173 static int nxennet_media = (sizeof(xennet_media)/sizeof(xennet_media[0]));
174 #endif
177 int
178 xennet_scan(struct device *self, struct xennet_attach_args *xneta,
179 cfprint_t print)
180 {
181 ctrl_msg_t cmsg;
182 netif_fe_driver_status_t st;
183 int err = 0;
185 if ((xen_start_info.flags & SIF_INITDOMAIN) ||
186 (xen_start_info.flags & SIF_NET_BE_DOMAIN))
187 return 0;
189 netctrl.xc_parent = self;
190 netctrl.xc_cfprint = print;
192 printf("Initialising Xen virtual ethernet frontend driver.\n");
194 (void)ctrl_if_register_receiver(CMSG_NETIF_FE, xennet_ctrlif_rx,
195 CALLBACK_IN_BLOCKING_CONTEXT);
197 /* Send a driver-UP notification to the domain controller. */
198 cmsg.type = CMSG_NETIF_FE;
199 cmsg.subtype = CMSG_NETIF_FE_DRIVER_STATUS;
200 cmsg.length = sizeof(netif_fe_driver_status_t);
201 st.status = NETIF_DRIVER_STATUS_UP;
202 st.max_handle = 0;
203 memcpy(cmsg.msg, &st, sizeof(st));
204 ctrl_if_send_message_block(&cmsg, NULL, 0, 0);
206 #if 0
207 err = xennet_wait_for_interfaces();
208 if (err)
209 ctrl_if_unregister_receiver(CMSG_NETIF_FE, xennet_ctrlif_rx);
210 #endif
212 return err;
213 }
215 int
216 xennet_match(struct device *parent, struct cfdata *match, void *aux)
217 {
218 struct xennet_attach_args *xa = (struct xennet_attach_args *)aux;
220 if (strcmp(xa->xa_device, "xennet") == 0)
221 return 1;
222 return 0;
223 }
225 void
226 xennet_attach(struct device *parent, struct device *self, void *aux)
227 {
228 struct xennet_attach_args *xneta = (struct xennet_attach_args *)aux;
229 struct xennet_softc *sc = (struct xennet_softc *)self;
230 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
231 int idx;
233 aprint_normal(": Xen Virtual Network Interface\n");
235 sc->sc_ifno = xneta->xa_handle;
237 /* Initialize ifnet structure. */
238 memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
239 ifp->if_softc = sc;
240 ifp->if_start = xennet_start;
241 ifp->if_ioctl = xennet_ioctl;
242 ifp->if_watchdog = xennet_watchdog;
243 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS;
245 #ifdef mediacode
246 ifmedia_init(&sc->sc_media, 0, xennet_mediachange,
247 xennet_mediastatus);
248 for (idx = 0; idx < nxennet_media; idx++)
249 ifmedia_add(&sc->sc_media, xennet_media[idx], 0, NULL);
250 ifmedia_set(&sc->sc_media, xennet_media[0]);
251 #endif
253 for (idx = 0; idx < NETIF_TX_RING_SIZE; idx++)
254 sc->sc_tx_bufa[idx].xb_next = idx + 1;
255 for (idx = 0; idx < NETIF_RX_RING_SIZE; idx++)
256 sc->sc_rx_bufa[idx].xb_next = idx + 1;
257 }
259 static struct xennet_softc *
260 find_device(int handle)
261 {
262 struct device *dv;
263 struct xennet_softc *xs = NULL;
265 for (dv = alldevs.tqh_first; dv != NULL; dv = dv->dv_list.tqe_next) {
266 if (dv->dv_cfattach == NULL ||
267 dv->dv_cfattach->ca_attach != xennet_attach)
268 continue;
269 xs = (struct xennet_softc *)dv;
270 if (xs->sc_ifno == handle)
271 break;
272 }
273 return xs;
274 }
276 static void
277 xennet_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
278 {
279 int respond = 1;
281 switch (msg->subtype) {
282 case CMSG_NETIF_FE_INTERFACE_STATUS:
283 if (msg->length != sizeof(netif_fe_interface_status_t))
284 goto error;
285 xennet_interface_status_change(
286 (netif_fe_interface_status_t *)&msg->msg[0]);
287 break;
289 case CMSG_NETIF_FE_DRIVER_STATUS:
290 if (msg->length != sizeof(netif_fe_driver_status_t))
291 goto error;
292 xennet_driver_status_change(
293 (netif_fe_driver_status_t *)&msg->msg[0]);
294 break;
296 error:
297 default:
298 msg->length = 0;
299 break;
300 }
302 if (respond)
303 ctrl_if_send_response(msg);
304 }
306 static void
307 xennet_driver_status_change(netif_fe_driver_status_t *status)
308 {
310 DPRINTFN(XEDB_EVENT, ("> status=%d\n", status->status));
312 netctrl.xc_up = status->status;
313 xennet_driver_count_connected();
314 }
316 static int
317 xennet_driver_count_connected(void)
318 {
319 struct device *dv;
320 struct xennet_softc *xs = NULL;
322 netctrl.xc_interfaces = netctrl.xc_connected = 0;
323 for (dv = alldevs.tqh_first; dv != NULL; dv = dv->dv_list.tqe_next) {
324 if (dv->dv_cfattach == NULL ||
325 dv->dv_cfattach->ca_attach != xennet_attach)
326 continue;
327 xs = (struct xennet_softc *)dv;
328 netctrl.xc_interfaces++;
329 if (xs->sc_backend_state == BEST_CONNECTED)
330 netctrl.xc_connected++;
331 }
333 return netctrl.xc_connected;
334 }
336 static void
337 xennet_interface_status_change(netif_fe_interface_status_t *status)
338 {
339 ctrl_msg_t cmsg;
340 netif_fe_interface_connect_t up;
341 struct xennet_softc *sc;
342 struct ifnet *ifp;
343 struct vm_page *pg_tx, *pg_rx;
344 struct xennet_attach_args xneta;
346 DPRINTFN(XEDB_EVENT, ("> status=%d handle=%d mac=%02x:%02x:%02x:%02x:%02x:%02x\n",
347 status->status,
348 status->handle,
349 status->mac[0], status->mac[1], status->mac[2],
350 status->mac[3], status->mac[4], status->mac[5]));
352 sc = find_device(status->handle);
353 if (sc == NULL) {
354 xneta.xa_device = "xennet";
355 xneta.xa_handle = status->handle;
356 config_found(netctrl.xc_parent, &xneta, netctrl.xc_cfprint);
357 sc = find_device(status->handle);
358 if (sc == NULL) {
359 printf("Status change: invalid netif handle %u\n",
360 status->handle);
361 return;
362 }
363 }
364 ifp = &sc->sc_ethercom.ec_if;
366 switch (status->status) {
367 case NETIF_INTERFACE_STATUS_CLOSED:
368 printf("Unexpected netif-CLOSED message in state %d\n",
369 sc->sc_backend_state);
370 break;
372 case NETIF_INTERFACE_STATUS_DISCONNECTED:
373 #if 0
374 if (sc->sc_backend_state != BEST_CLOSED) {
375 printk("Unexpected netif-DISCONNECTED message"
376 " in state %d\n", sc->sc_backend_state);
377 printk("Attempting to reconnect network interface\n");
379 /* Begin interface recovery.
380 *
381 * NB. Whilst we're recovering, we turn the
382 * carrier state off. We take measures to
383 * ensure that this device isn't used for
384 * anything. We also stop the queue for this
385 * device. Various different approaches
386 * (e.g. continuing to buffer packets) have
387 * been tested but don't appear to improve the
388 * overall impact on TCP connections.
389 *
390 * TODO: (MAW) Change the Xend<->Guest
391 * protocol so that a recovery is initiated by
392 * a special "RESET" message - disconnect
393 * could just mean we're not allowed to use
394 * this interface any more.
395 */
397 /* Stop old i/f to prevent errors whilst we
398 * rebuild the state. */
399 spin_lock_irq(&np->tx_lock);
400 spin_lock(&np->rx_lock);
401 netif_stop_queue(dev);
402 np->backend_state = BEST_DISCONNECTED;
403 spin_unlock(&np->rx_lock);
404 spin_unlock_irq(&np->tx_lock);
406 /* Free resources. */
407 free_irq(np->irq, dev);
408 unbind_evtchn_from_irq(np->evtchn);
409 free_page((unsigned long)np->tx);
410 free_page((unsigned long)np->rx);
411 }
412 #endif
414 /* Move from CLOSED to DISCONNECTED state. */
415 sc->sc_tx = (netif_tx_interface_t *)
416 uvm_km_valloc_align(kernel_map, PAGE_SIZE, PAGE_SIZE);
417 if (sc->sc_tx == NULL)
418 panic("netif: no tx va");
419 sc->sc_rx = (netif_rx_interface_t *)
420 uvm_km_valloc_align(kernel_map, PAGE_SIZE, PAGE_SIZE);
421 if (sc->sc_rx == NULL)
422 panic("netif: no rx va");
423 pg_tx = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
424 if (pg_tx == NULL) {
425 panic("netif: no tx pages");
426 }
427 pmap_kenter_pa((vaddr_t)sc->sc_tx, VM_PAGE_TO_PHYS(pg_tx),
428 VM_PROT_READ | VM_PROT_WRITE);
429 pg_rx = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
430 if (pg_rx == NULL) {
431 panic("netif: no rx pages");
432 }
433 pmap_kenter_pa((vaddr_t)sc->sc_rx, VM_PAGE_TO_PHYS(pg_rx),
434 VM_PROT_READ | VM_PROT_WRITE);
435 sc->sc_backend_state = BEST_DISCONNECTED;
437 /* Construct an interface-CONNECT message for the
438 * domain controller. */
439 cmsg.type = CMSG_NETIF_FE;
440 cmsg.subtype = CMSG_NETIF_FE_INTERFACE_CONNECT;
441 cmsg.length = sizeof(netif_fe_interface_connect_t);
442 up.handle = status->handle;
443 up.tx_shmem_frame = xpmap_ptom(VM_PAGE_TO_PHYS(pg_tx)) >> PAGE_SHIFT;
444 up.rx_shmem_frame = xpmap_ptom(VM_PAGE_TO_PHYS(pg_rx)) >> PAGE_SHIFT;
445 memcpy(cmsg.msg, &up, sizeof(up));
447 /* Tell the controller to bring up the interface. */
448 ctrl_if_send_message_block(&cmsg, NULL, 0, 0);
449 break;
451 case NETIF_INTERFACE_STATUS_CONNECTED:
452 if (sc->sc_backend_state == BEST_CLOSED) {
453 printf("Unexpected netif-CONNECTED message"
454 " in state %d\n", sc->sc_backend_state);
455 break;
456 }
458 memcpy(sc->sc_enaddr, status->mac, ETHER_ADDR_LEN);
459 #if 0
460 if (xen_start_info.flags & SIF_PRIVILEGED) {
461 /* XXX for domain-0 change out ethernet address to be
462 * different than the physical address since arp
463 * replies from other domains will report the physical
464 * address.
465 */
466 if (sc->sc_enaddr[0] != 0xaa)
467 sc->sc_enaddr[0] = 0xaa;
468 else
469 sc->sc_enaddr[0] = 0xab;
470 }
471 #endif
473 /* Recovery procedure: */
475 /* Step 1: Reinitialise variables. */
476 sc->sc_rx_resp_cons = sc->sc_tx_resp_cons = /* sc->sc_tx_full = */ 0;
477 sc->sc_rx->event = sc->sc_tx->event = 1;
479 /* Step 2: Rebuild the RX and TX ring contents. */
480 network_alloc_rx_buffers(sc);
481 SLIST_INIT(&sc->sc_tx_bufs);
482 network_alloc_tx_buffers(sc);
484 /* Step 3: All public and private state should now be
485 * sane. Get ready to start sending and receiving
486 * packets and give the driver domain a kick because
487 * we've probably just requeued some packets.
488 */
489 sc->sc_backend_state = BEST_CONNECTED;
490 __insn_barrier();
491 hypervisor_notify_via_evtchn(status->evtchn);
492 network_tx_buf_gc(sc);
494 if_attach(ifp);
495 ether_ifattach(ifp, sc->sc_enaddr);
497 sc->sc_evtchn = status->evtchn;
498 sc->sc_irq = bind_evtchn_to_irq(sc->sc_evtchn);
499 event_set_handler(sc->sc_irq, &xen_network_handler, sc, IPL_NET);
500 hypervisor_enable_irq(sc->sc_irq);
501 xennet_driver_count_connected();
503 aprint_normal("%s: MAC address %s\n", sc->sc_dev.dv_xname,
504 ether_sprintf(sc->sc_enaddr));
506 #if NRND > 0
507 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
508 RND_TYPE_NET, 0);
509 #endif
510 break;
512 default:
513 printf("Status change to unknown value %d\n",
514 status->status);
515 break;
516 }
517 }
519 static void
520 xennet_tx_mbuf_free(struct mbuf *m, caddr_t buf, size_t size, void *arg)
521 {
522 struct xennet_txbuf *txbuf = (struct xennet_txbuf *)arg;
524 DPRINTFN(XEDB_MBUF, ("xennet_tx_mbuf_free %p pa %p\n", txbuf,
525 (void *)txbuf->xt_pa));
526 SLIST_INSERT_HEAD(&txbuf->xt_sc->sc_tx_bufs, txbuf, xt_next);
527 pool_cache_put(&mbpool_cache, m);
528 }
530 static void
531 xennet_rx_push_buffer(struct xennet_softc *sc, int id)
532 {
533 NETIF_RING_IDX ringidx;
534 int nr_pfns;
536 ringidx = sc->sc_rx->req_prod;
537 nr_pfns = 0;
539 DPRINTFN(XEDB_MEM, ("readding page va %p pa %p ma %p/%p to rx_ring "
540 "at %d with id %d\n",
541 (void *)sc->sc_rx_bufa[id].xb_rx.xbrx_va,
542 (void *)sc->sc_rx_bufa[id].xb_rx.xbrx_pa,
543 (void *)(PTE_BASE[x86_btop
544 (sc->sc_rx_bufa[id].xb_rx.xbrx_va)] &
545 PG_FRAME),
546 (void *)xpmap_ptom(sc->sc_rx_bufa[id].xb_rx.xbrx_pa),
547 ringidx, id));
549 sc->sc_rx->ring[MASK_NETIF_RX_IDX(ringidx)].req.id = id;
551 rx_pfn_array[nr_pfns] = xpmap_ptom(sc->sc_rx_bufa[id].xb_rx.xbrx_pa)
552 >> PAGE_SHIFT;
554 /* Remove this page from pseudo phys map before
555 * passing back to Xen. */
556 xpmap_phys_to_machine_mapping[(sc->sc_rx_bufa[id].xb_rx.xbrx_pa - XPMAP_OFFSET) >> PAGE_SHIFT] =
557 INVALID_P2M_ENTRY;
559 rx_mcl[nr_pfns].op = __HYPERVISOR_update_va_mapping;
560 rx_mcl[nr_pfns].args[0] = sc->sc_rx_bufa[id].xb_rx.xbrx_va >> PAGE_SHIFT;
561 rx_mcl[nr_pfns].args[1] = 0;
562 rx_mcl[nr_pfns].args[2] = 0;
564 nr_pfns++;
566 sc->sc_rx_bufs_to_notify++;
568 ringidx++;
570 /*
571 * We may have allocated buffers which have entries
572 * outstanding in the page update queue -- make sure we flush
573 * those first!
574 */
575 xpq_flush_queue();
577 /* After all PTEs have been zapped we blow away stale TLB entries. */
578 rx_mcl[nr_pfns-1].args[2] = UVMF_FLUSH_TLB;
580 /* Give away a batch of pages. */
581 rx_mcl[nr_pfns].op = __HYPERVISOR_dom_mem_op;
582 rx_mcl[nr_pfns].args[0] = MEMOP_decrease_reservation;
583 rx_mcl[nr_pfns].args[1] = (unsigned long)rx_pfn_array;
584 rx_mcl[nr_pfns].args[2] = (unsigned long)nr_pfns;
585 rx_mcl[nr_pfns].args[3] = 0;
586 rx_mcl[nr_pfns].args[4] = DOMID_SELF;
588 /* Zap PTEs and give away pages in one big multicall. */
589 (void)HYPERVISOR_multicall(rx_mcl, nr_pfns+1);
591 /* Check return status of HYPERVISOR_dom_mem_op(). */
592 if ( rx_mcl[nr_pfns].args[5] != nr_pfns )
593 panic("Unable to reduce memory reservation\n");
595 /* Above is a suitable barrier to ensure backend will see requests. */
596 sc->sc_rx->req_prod = ringidx;
597 }
599 static void
600 xennet_rx_mbuf_free(struct mbuf *m, caddr_t buf, size_t size, void *arg)
601 {
602 union xennet_bufarray *xb = (union xennet_bufarray *)arg;
603 struct xennet_softc *sc = xb->xb_rx.xbrx_sc;
604 int id = (xb - sc->sc_rx_bufa);
606 DPRINTFN(XEDB_MBUF, ("xennet_rx_mbuf_free id %d, mbuf %p, buf %p, "
607 "size %d\n", id, m, buf, size));
609 xennet_rx_push_buffer(sc, id);
611 pool_cache_put(&mbpool_cache, m);
612 }
614 static int
615 xen_network_handler(void *arg)
616 {
617 struct xennet_softc *sc = arg;
618 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
619 netif_rx_response_t *rx;
620 paddr_t pa;
621 NETIF_RING_IDX ringidx;
622 mmu_update_t *mmu = rx_mmu;
623 multicall_entry_t *mcl = rx_mcl;
624 struct mbuf *m;
626 network_tx_buf_gc(sc);
628 again:
629 for (ringidx = sc->sc_rx_resp_cons;
630 ringidx != sc->sc_rx->resp_prod;
631 ringidx++) {
632 rx = &sc->sc_rx->ring[MASK_NETIF_RX_IDX(ringidx)].resp;
634 if (rx->status < 0)
635 panic("rx->status < 0");
636 /* XXXcl check rx->status for error */
638 MGETHDR(m, M_DONTWAIT, MT_DATA);
639 if (m == NULL) {
640 printf("xennet: rx no mbuf\n");
641 break;
642 }
644 pa = sc->sc_rx_bufa[rx->id].xb_rx.xbrx_pa;
646 DPRINTFN(XEDB_EVENT, ("rx event %d for id %d, size %d, "
647 "status %d, ma %08lx, pa %08lx\n", ringidx,
648 rx->id, rx->status, rx->status, rx->addr, pa));
650 /* Remap the page. */
651 mmu->ptr = (rx->addr & PG_FRAME) | MMU_MACHPHYS_UPDATE;
652 mmu->val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
653 mmu++;
654 mcl->op = __HYPERVISOR_update_va_mapping;
655 mcl->args[0] = sc->sc_rx_bufa[rx->id].xb_rx.xbrx_va >> PAGE_SHIFT;
656 mcl->args[1] = (rx->addr & PG_FRAME) | PG_V|PG_KW;
657 mcl->args[2] = UVMF_FLUSH_TLB; // 0;
658 mcl++;
660 xpmap_phys_to_machine_mapping
661 [(pa - XPMAP_OFFSET) >> PAGE_SHIFT] =
662 rx->addr >> PAGE_SHIFT;
664 /* Do all the remapping work, and M->P updates, in one
665 * big hypercall. */
666 if ((mcl - rx_mcl) != 0) {
667 mcl->op = __HYPERVISOR_mmu_update;
668 mcl->args[0] = (unsigned long)rx_mmu;
669 mcl->args[1] = mmu - rx_mmu;
670 mcl->args[2] = 0;
671 mcl++;
672 (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
673 }
674 if (0)
675 printf("page mapped at va %08lx -> %08x/%08lx\n",
676 sc->sc_rx_bufa[rx->id].xb_rx.xbrx_va,
677 PTE_BASE[x86_btop(sc->sc_rx_bufa[rx->id].xb_rx.xbrx_va)],
678 rx->addr);
679 mmu = rx_mmu;
680 mcl = rx_mcl;
682 DPRINTFN(XEDB_MBUF, ("rx packet mbuf %p va %p pa %p/%p "
683 "ma %p\n", m,
684 (void *)sc->sc_rx_bufa[rx->id].xb_rx.xbrx_va,
685 (void *)(xpmap_mtop(PTE_BASE[x86_btop
686 (sc->sc_rx_bufa[rx->id].xb_rx.xbrx_va)] & PG_FRAME)), (void *)pa,
687 (void *)(PTE_BASE[x86_btop
688 (sc->sc_rx_bufa[rx->id].xb_rx.xbrx_va)] & PG_FRAME)));
690 m->m_len = m->m_pkthdr.len = rx->status;
691 m->m_pkthdr.rcvif = ifp;
692 if (sc->sc_rx->req_prod != sc->sc_rx->resp_prod) {
693 MEXTADD(m, (void *)(sc->sc_rx_bufa[rx->id].xb_rx.
694 xbrx_va + (rx->addr & PAGE_MASK)), rx->status, M_DEVBUF,
695 xennet_rx_mbuf_free,
696 &sc->sc_rx_bufa[rx->id]);
697 } else {
698 /*
699 * This was our last receive buffer, allocate
700 * memory, copy data and push the receive
701 * buffer back to the hypervisor.
702 */
703 MEXTMALLOC(m, rx->status, M_DONTWAIT);
704 if ((m->m_flags & M_EXT) == 0) {
705 printf("xennet: rx no mbuf 2\n");
706 m_free(m);
707 break;
708 }
709 memcpy(m->m_data, (void *)(sc->sc_rx_bufa[rx->id].
710 xb_rx.xbrx_va + (rx->addr & PAGE_MASK)), rx->status);
711 xennet_rx_push_buffer(sc, rx->id);
712 }
714 #ifdef XENNET_DEBUG_DUMP
715 xennet_hex_dump(mtod(m, u_char *), m->m_pkthdr.len, "r", rx->id);
716 #endif
718 #if NBPFILTER > 0
719 /*
720 * Pass packet to bpf if there is a listener.
721 */
722 if (ifp->if_bpf)
723 bpf_mtap(ifp->if_bpf, m);
724 #endif
726 ifp->if_ipackets++;
728 /* Pass the packet up. */
729 (*ifp->if_input)(ifp, m);
730 }
732 sc->sc_rx_resp_cons = ringidx;
733 sc->sc_rx->event = sc->sc_rx_resp_cons + 1;
735 if (sc->sc_rx->resp_prod != ringidx)
736 goto again;
738 return 0;
739 }
741 static inline int
742 get_bufarray_entry(union xennet_bufarray *a)
743 {
744 int idx;
746 idx = a[0].xb_next;
747 a[0].xb_next = a[idx].xb_next;
748 return idx;
749 }
751 static inline void
752 put_bufarray_entry(union xennet_bufarray *a, int idx)
753 {
755 a[idx].xb_next = a[0].xb_next;
756 a[0].xb_next = idx;
757 }
759 static void
760 network_tx_buf_gc(struct xennet_softc *sc)
761 {
762 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
763 NETIF_RING_IDX idx, prod;
765 do {
766 prod = sc->sc_tx->resp_prod;
768 for (idx = sc->sc_tx_resp_cons; idx != prod; idx++) {
769 DPRINTFN(XEDB_EVENT, ("tx event at pos %d, status: "
770 "%d, id: %d, mbuf %p, buf %p\n", idx,
771 sc->sc_tx->ring[MASK_NETIF_TX_IDX(idx)].resp.status,
772 sc->sc_tx->ring[MASK_NETIF_TX_IDX(idx)].resp.id,
773 sc->sc_tx_bufa[sc->sc_tx->ring[MASK_NETIF_TX_IDX(idx)].resp.id].xb_tx.xbtx_m,
774 mtod(sc->sc_tx_bufa[sc->sc_tx->ring[MASK_NETIF_TX_IDX(idx)].resp.id].xb_tx.xbtx_m, void *)));
775 m_freem(sc->sc_tx_bufa[sc->sc_tx->ring[MASK_NETIF_TX_IDX(idx)].resp.id].xb_tx.xbtx_m);
776 put_bufarray_entry(sc->sc_tx_bufa,
777 sc->sc_tx->ring[MASK_NETIF_TX_IDX(idx)].resp.id);
778 sc->sc_tx_entries--; /* atomic */
779 }
781 sc->sc_tx_resp_cons = prod;
783 /*
784 * Set a new event, then check for race with update of
785 * tx_cons.
786 */
787 sc->sc_tx->event = /* atomic */
788 prod + (sc->sc_tx_entries >> 1) + 1;
789 __insn_barrier();
790 } while (prod != sc->sc_tx->resp_prod);
792 if (sc->sc_tx->resp_prod == sc->sc_tx->req_prod)
793 ifp->if_timer = 0;
794 /* KDASSERT(sc->sc_net_idx->tx_req_prod == */
795 /* TX_RING_ADD(sc->sc_net_idx->tx_resp_prod, sc->sc_tx_entries)); */
796 }
798 static void
799 network_alloc_rx_buffers(struct xennet_softc *sc)
800 {
801 vaddr_t rxpages, va;
802 paddr_t pa;
803 struct vm_page *pg;
804 int id, nr_pfns;
805 NETIF_RING_IDX ringidx;
806 int s;
808 ringidx = sc->sc_rx->req_prod;
809 if (0) printf("network_alloc_rx_buffers prod %d cons %d\n", ringidx,
810 sc->sc_rx_resp_cons);
811 if ((ringidx - sc->sc_rx_resp_cons) > (RX_MAX_ENTRIES / 2))
812 return;
814 nr_pfns = 0;
816 rxpages = uvm_km_valloc_align(kernel_map, RX_ENTRIES * PAGE_SIZE,
817 PAGE_SIZE);
819 s = splnet();
820 for (va = rxpages; va < rxpages + RX_ENTRIES * PAGE_SIZE;
821 va += PAGE_SIZE) {
822 pg = uvm_pagealloc(NULL, 0, NULL, 0);
823 if (pg == NULL)
824 panic("network_alloc_rx_buffers: no pages");
825 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
826 VM_PROT_READ | VM_PROT_WRITE);
828 id = get_bufarray_entry(sc->sc_rx_bufa);
829 sc->sc_rx_bufa[id].xb_rx.xbrx_va = va;
830 sc->sc_rx_bufa[id].xb_rx.xbrx_sc = sc;
832 pa = VM_PAGE_TO_PHYS(pg);
833 DPRINTFN(XEDB_MEM, ("adding page va %p pa %p/%p "
834 "ma %p/%p to rx_ring at %d with id %d\n", (void *)va,
835 (void *)(VM_PAGE_TO_PHYS(pg) & PG_FRAME), (void *)xpmap_mtop(PTE_BASE[x86_btop(va)]),
836 (void *)(PTE_BASE[x86_btop(va)] & PG_FRAME),
837 (void *)xpmap_ptom(VM_PAGE_TO_PHYS(pg)),
838 ringidx, id));
839 sc->sc_rx_bufa[id].xb_rx.xbrx_pa = pa;
840 sc->sc_rx->ring[MASK_NETIF_RX_IDX(ringidx)].req.id = id;
842 rx_pfn_array[nr_pfns] = xpmap_ptom(pa) >> PAGE_SHIFT;
844 /* Remove this page from pseudo phys map before
845 * passing back to Xen. */
846 xpmap_phys_to_machine_mapping[(pa - XPMAP_OFFSET) >> PAGE_SHIFT] =
847 INVALID_P2M_ENTRY;
849 rx_mcl[nr_pfns].op = __HYPERVISOR_update_va_mapping;
850 rx_mcl[nr_pfns].args[0] = va >> PAGE_SHIFT;
851 rx_mcl[nr_pfns].args[1] = 0;
852 rx_mcl[nr_pfns].args[2] = 0;
854 nr_pfns++;
856 sc->sc_rx_bufs_to_notify++;
858 ringidx++;
859 if ((ringidx - sc->sc_rx_resp_cons) == RX_MAX_ENTRIES)
860 break;
861 }
863 if (nr_pfns == 0) {
864 splx(s);
865 return;
866 }
868 /*
869 * We may have allocated buffers which have entries
870 * outstanding in the page update queue -- make sure we flush
871 * those first!
872 */
873 xpq_flush_queue();
875 /* After all PTEs have been zapped we blow away stale TLB entries. */
876 rx_mcl[nr_pfns-1].args[2] = UVMF_FLUSH_TLB;
878 /* Give away a batch of pages. */
879 rx_mcl[nr_pfns].op = __HYPERVISOR_dom_mem_op;
880 rx_mcl[nr_pfns].args[0] = MEMOP_decrease_reservation;
881 rx_mcl[nr_pfns].args[1] = (unsigned long)rx_pfn_array;
882 rx_mcl[nr_pfns].args[2] = (unsigned long)nr_pfns;
883 rx_mcl[nr_pfns].args[3] = 0;
884 rx_mcl[nr_pfns].args[4] = DOMID_SELF;
886 /* Zap PTEs and give away pages in one big multicall. */
887 (void)HYPERVISOR_multicall(rx_mcl, nr_pfns+1);
889 /* Check return status of HYPERVISOR_dom_mem_op(). */
890 if (rx_mcl[nr_pfns].args[5] != nr_pfns)
891 panic("Unable to reduce memory reservation\n");
893 /* Above is a suitable barrier to ensure backend will see requests. */
894 sc->sc_rx->req_prod = ringidx;
896 splx(s);
898 }
900 static void
901 network_alloc_tx_buffers(struct xennet_softc *sc)
902 {
903 vaddr_t txpages, va;
904 struct vm_page *pg;
905 struct xennet_txbuf *txbuf;
906 int i;
908 txpages = uvm_km_valloc_align(kernel_map,
909 (TX_ENTRIES / TXBUF_PER_PAGE) * PAGE_SIZE, PAGE_SIZE);
910 for (va = txpages;
911 va < txpages + (TX_ENTRIES / TXBUF_PER_PAGE) * PAGE_SIZE;
912 va += PAGE_SIZE) {
913 pg = uvm_pagealloc(NULL, 0, NULL, 0);
914 if (pg == NULL)
915 panic("network_alloc_tx_buffers: no pages");
916 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
917 VM_PROT_READ | VM_PROT_WRITE);
919 for (i = 0; i < TXBUF_PER_PAGE; i++) {
920 txbuf = (struct xennet_txbuf *)
921 (va + i * (PAGE_SIZE / TXBUF_PER_PAGE));
922 txbuf->xt_sc = sc;
923 txbuf->xt_pa = VM_PAGE_TO_PHYS(pg) +
924 i * (PAGE_SIZE / TXBUF_PER_PAGE) +
925 sizeof(struct xennet_txbuf);
926 SLIST_INSERT_HEAD(&sc->sc_tx_bufs, txbuf, xt_next);
927 }
928 }
929 }
931 /*
932 * Called at splnet.
933 */
934 void
935 xennet_start(struct ifnet *ifp)
936 {
937 struct xennet_softc *sc = ifp->if_softc;
938 struct mbuf *m, *new_m;
939 struct xennet_txbuf *txbuf;
940 netif_tx_request_t *txreq;
941 NETIF_RING_IDX idx;
942 paddr_t pa;
943 int bufid;
945 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start()\n", sc->sc_dev.dv_xname));
947 #ifdef DIAGNOSTIC
948 IFQ_POLL(&ifp->if_snd, m);
949 if (m == 0)
950 panic("%s: No packet to start", sc->sc_dev.dv_xname);
951 #endif
953 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
954 return;
956 idx = sc->sc_tx->req_prod;
957 while (/*CONSTCOND*/1) {
959 IFQ_POLL(&ifp->if_snd, m);
960 if (m == NULL)
961 break;
963 switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
964 case M_EXT|M_EXT_CLUSTER:
965 pa = m->m_ext.ext_paddr +
966 (m->m_data - m->m_ext.ext_buf);
967 break;
968 default:
969 case 0:
970 pa = m->m_paddr + M_BUFOFFSET(m) +
971 (m->m_data - M_BUFADDR(m));
972 break;
973 }
975 if (m->m_pkthdr.len != m->m_len ||
976 (pa ^ (pa + m->m_pkthdr.len)) & PG_FRAME) {
977 txbuf = SLIST_FIRST(&sc->sc_tx_bufs);
978 if (txbuf == NULL) {
979 // printf("xennet: no tx bufs\n");
980 break;
981 }
983 MGETHDR(new_m, M_DONTWAIT, MT_DATA);
984 if (new_m == NULL) {
985 printf("xennet: no mbuf\n");
986 break;
987 }
989 SLIST_REMOVE_HEAD(&sc->sc_tx_bufs, xt_next);
990 IFQ_DEQUEUE(&ifp->if_snd, m);
992 KASSERT(m->m_flags & M_PKTHDR);
993 M_COPY_PKTHDR(new_m, m);
994 m_copydata(m, 0, m->m_pkthdr.len, txbuf->xt_buf);
995 MEXTADD(new_m, txbuf->xt_buf, m->m_pkthdr.len,
996 M_DEVBUF, xennet_tx_mbuf_free, txbuf);
997 new_m->m_ext.ext_paddr = txbuf->xt_pa;
998 new_m->m_len = new_m->m_pkthdr.len = m->m_pkthdr.len;
1000 m_freem(m);
1001 m = new_m;
1003 pa = m->m_ext.ext_paddr +
1004 (m->m_data - m->m_ext.ext_buf);
1005 } else
1006 IFQ_DEQUEUE(&ifp->if_snd, m);
1008 bufid = get_bufarray_entry(sc->sc_tx_bufa);
1009 sc->sc_tx_bufa[bufid].xb_tx.xbtx_m = m;
1011 DPRINTFN(XEDB_MBUF, ("xennet_start id %d, mbuf %p, buf %p/%p, "
1012 "size %d\n", bufid, m, mtod(m, void *),
1013 (void *)pa, m->m_pkthdr.len));
1014 #ifdef XENNET_DEBUG_DUMP
1015 xennet_hex_dump(mtod(m, u_char *), m->m_pkthdr.len, "s", bufid);
1016 #endif
1018 txreq = &sc->sc_tx->ring[MASK_NETIF_TX_IDX(idx)].req;
1019 txreq->id = bufid;
1020 txreq->addr = xpmap_ptom(pa);
1021 txreq->size = m->m_pkthdr.len;
1023 __insn_barrier();
1024 idx++;
1025 sc->sc_tx->req_prod = idx;
1027 sc->sc_tx_entries++; /* XXX atomic */
1029 #ifdef XENNET_DEBUG
1030 DPRINTFN(XEDB_MEM, ("packet addr %p/%p, physical %p/%p, "
1031 "m_paddr %p, len %d/%d\n", M_BUFADDR(m), mtod(m, void *),
1032 (void *)*kvtopte(mtod(m, vaddr_t)),
1033 (void *)xpmap_mtop(*kvtopte(mtod(m, vaddr_t))),
1034 (void *)m->m_paddr, m->m_pkthdr.len, m->m_len));
1035 #endif
1037 #if NBPFILTER > 0
1038 /*
1039 * Pass packet to bpf if there is a listener.
1040 */
1041 if (ifp->if_bpf)
1042 bpf_mtap(ifp->if_bpf, m);
1043 #endif
1046 ifp->if_flags &= ~IFF_OACTIVE;
1048 network_tx_buf_gc(sc);
1050 __insn_barrier();
1051 if (sc->sc_tx->resp_prod != idx)
1052 hypervisor_notify_via_evtchn(sc->sc_evtchn);
1054 ifp->if_timer = 5;
1056 ifp->if_opackets++;
1058 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start() done\n",
1059 sc->sc_dev.dv_xname));
1062 int
1063 xennet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1065 struct xennet_softc *sc = ifp->if_softc;
1066 struct ifaddr *ifa = (struct ifaddr *)data;
1067 #ifdef mediacode
1068 struct ifreq *ifr = (struct ifreq *)data;
1069 #endif
1070 int s, error = 0;
1072 s = splnet();
1074 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl()\n", sc->sc_dev.dv_xname));
1076 switch(cmd) {
1077 case SIOCSIFADDR:
1078 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() SIOCSIFADDR\n",
1079 sc->sc_dev.dv_xname));
1080 ifp->if_flags |= IFF_UP;
1081 switch (ifa->ifa_addr->sa_family) {
1082 #ifdef INET
1083 case AF_INET:
1084 xennet_init(sc);
1085 arp_ifinit(ifp, ifa);
1086 break;
1087 #endif
1088 default:
1089 xennet_init(sc);
1090 break;
1092 break;
1094 case SIOCSIFFLAGS:
1095 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() SIOCSIFFLAGS\n",
1096 sc->sc_dev.dv_xname));
1097 break;
1099 case SIOCADDMULTI:
1100 case SIOCDELMULTI:
1101 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() SIOC*MULTI\n",
1102 sc->sc_dev.dv_xname));
1103 break;
1105 #ifdef mediacode
1106 case SIOCGIFMEDIA:
1107 case SIOCSIFMEDIA:
1108 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() SIOC*IFMEDIA\n",
1109 sc->sc_dev.dv_xname));
1110 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1111 break;
1112 #endif
1114 default:
1115 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl(0x%lx) unknown cmd\n",
1116 sc->sc_dev.dv_xname, cmd));
1117 error = EINVAL;
1118 break;
1121 splx(s);
1123 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() returning %d\n",
1124 sc->sc_dev.dv_xname, error));
1126 return error;
1129 void
1130 xennet_watchdog(struct ifnet *ifp)
1133 panic("xennet_watchdog\n");
1136 void
1137 xennet_init(struct xennet_softc *sc)
1139 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1141 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_init()\n", sc->sc_dev.dv_xname));
1143 if (ifp->if_flags & IFF_UP) {
1144 if ((ifp->if_flags & IFF_RUNNING) == 0)
1145 xennet_reset(sc);
1147 ifp->if_flags |= IFF_RUNNING;
1148 ifp->if_flags &= ~IFF_OACTIVE;
1149 ifp->if_timer = 0;
1150 } else {
1151 ifp->if_flags &= ~IFF_RUNNING;
1152 xennet_reset(sc);
1156 void
1157 xennet_reset(struct xennet_softc *sc)
1160 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_reset()\n", sc->sc_dev.dv_xname));
1163 #ifdef mediacode
1164 /*
1165 * Media change callback.
1166 */
1167 static int
1168 xennet_mediachange(struct ifnet *ifp)
1170 struct xennet_softc *sc = ifp->if_softc;
1172 switch IFM_SUBTYPE(sc->sc_media.ifm_media) {
1173 case IFM_AUTO:
1174 break;
1175 default:
1176 return (1);
1177 break;
1180 return (0);
1183 /*
1184 * Media status callback.
1185 */
1186 static void
1187 xennet_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1189 struct xennet_softc *sc = ifp->if_softc;
1191 if (IFM_SUBTYPE(ifmr->ifm_active) == IFM_AUTO)
1192 ifmr->ifm_active = sc->sc_media.ifm_cur->ifm_data;
1194 ifmr->ifm_status &= ~IFM_AVALID;
1196 #endif
1198 int
1199 xennet_bootstatic_callback(struct nfs_diskless *nd)
1201 struct ifnet *ifp = nd->nd_ifp;
1202 struct xennet_softc *sc = (struct xennet_softc *)ifp->if_softc;
1203 union xen_cmdline_parseinfo xcp;
1204 struct sockaddr_in *sin;
1206 memset(&xcp, 0, sizeof(xcp.xcp_netinfo));
1207 xcp.xcp_netinfo.xi_ifno = sc->sc_ifno;
1208 xcp.xcp_netinfo.xi_root = nd->nd_root.ndm_host;
1209 xen_parse_cmdline(XEN_PARSE_NETINFO, &xcp);
1211 nd->nd_myip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[0]);
1212 nd->nd_gwip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[2]);
1213 nd->nd_mask.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[3]);
1215 sin = (struct sockaddr_in *) &nd->nd_root.ndm_saddr;
1216 memset((caddr_t)sin, 0, sizeof(*sin));
1217 sin->sin_len = sizeof(*sin);
1218 sin->sin_family = AF_INET;
1219 sin->sin_addr.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[1]);
1221 return (NFS_BOOTSTATIC_HAS_MYIP|NFS_BOOTSTATIC_HAS_GWIP|
1222 NFS_BOOTSTATIC_HAS_MASK|NFS_BOOTSTATIC_HAS_SERVADDR|
1223 NFS_BOOTSTATIC_HAS_SERVER);
1227 #ifdef XENNET_DEBUG_DUMP
1228 #define XCHR(x) "0123456789abcdef"[(x) & 0xf]
1229 static void
1230 xennet_hex_dump(unsigned char *pkt, size_t len, char *type, int id)
1232 size_t i, j;
1234 printf("pkt %p len %d/%x type %s id %d\n", pkt, len, len, type, id);
1235 printf("00000000 ");
1236 for(i=0; i<len; i++) {
1237 printf("%c%c ", XCHR(pkt[i]>>4), XCHR(pkt[i]));
1238 if ((i+1) % 16 == 8)
1239 printf(" ");
1240 if ((i+1) % 16 == 0) {
1241 printf(" %c", '|');
1242 for(j=0; j<16; j++)
1243 printf("%c", pkt[i-15+j]>=32 &&
1244 pkt[i-15+j]<127?pkt[i-15+j]:'.');
1245 printf("%c\n%c%c%c%c%c%c%c%c ", '|',
1246 XCHR((i+1)>>28), XCHR((i+1)>>24),
1247 XCHR((i+1)>>20), XCHR((i+1)>>16),
1248 XCHR((i+1)>>12), XCHR((i+1)>>8),
1249 XCHR((i+1)>>4), XCHR(i+1));
1252 printf("\n");
1254 #undef XCHR
1255 #endif