debuggers.hg

view netbsd-2.0-xen-sparse/sys/arch/xen/xen/if_xennet.c @ 2632:a4fbb98f00cb

bitkeeper revision 1.1159.1.202 (41616cc2-ciBh_VkJKwmQaCL6BEU6Q)

Merge labyrinth.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into labyrinth.cl.cam.ac.uk:/auto/anfs/scratch/labyrinth/iap10/xeno-clone/xeno.bk
author iap10@labyrinth.cl.cam.ac.uk
date Mon Oct 04 15:31:14 2004 +0000 (2004-10-04)
parents 86f3590030af aa75f00efa54
children 322541dd9041 fb095fda8aa2
line source
1 /* $NetBSD: if_xennet.c,v 1.1.2.1 2004/05/22 15:58:29 he Exp $ */
3 /*
4 *
5 * Copyright (c) 2004 Christian Limpach.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Christian Limpach.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: if_xennet.c,v 1.1.2.1 2004/05/22 15:58:29 he Exp $");
38 #include "opt_inet.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/mbuf.h>
43 #include <sys/syslog.h>
44 #include <sys/mount.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/device.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #if NRND > 0
51 #include <sys/rnd.h>
52 #endif
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_ether.h>
59 #ifdef mediacode
60 #include <net/if_media.h>
61 #endif
63 #ifdef INET
64 #include <netinet/in.h>
65 #include <netinet/if_inarp.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #endif
71 #include <nfs/rpcv2.h>
73 #include <nfs/nfsproto.h>
74 #include <nfs/nfs.h>
75 #include <nfs/nfsmount.h>
76 #include <nfs/nfsdiskless.h>
78 #include "bpfilter.h"
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #include <net/bpfdesc.h>
82 #endif
84 #include <uvm/uvm_extern.h>
85 #include <uvm/uvm_page.h>
87 #include <machine/xen.h>
88 #include <machine/hypervisor.h>
89 #include <machine/evtchn.h>
90 #include <machine/ctrl_if.h>
92 #include <machine/if_xennetvar.h>
94 #ifdef DEBUG
95 #define XENNET_DEBUG
96 #endif
97 #if defined(XENNET_DEBUG) && !defined(DEBUG)
98 #define DEBUG
99 #endif
100 /* #define XENNET_DEBUG_DUMP */
102 #ifdef XENNET_DEBUG
103 #define XEDB_FOLLOW 0x01
104 #define XEDB_INIT 0x02
105 #define XEDB_EVENT 0x04
106 #define XEDB_MBUF 0x08
107 #define XEDB_MEM 0x10
108 int xennet_debug = 0x0;
109 void printk(char *, ...);
110 #define DPRINTF(x) if (xennet_debug) printk x;
111 #define DPRINTFN(n,x) if (xennet_debug & (n)) printk x;
112 #else
113 #define DPRINTF(x)
114 #define DPRINTFN(n,x)
115 #endif
116 #define PRINTF(x) printf x;
118 #ifdef XENNET_DEBUG_DUMP
119 static void xennet_hex_dump(unsigned char *, size_t, char *, int);
120 #endif
122 int xennet_match (struct device *, struct cfdata *, void *);
123 void xennet_attach (struct device *, struct device *, void *);
124 static void xennet_ctrlif_rx(ctrl_msg_t *, unsigned long);
125 static int xennet_driver_count_connected(void);
126 static void xennet_driver_status_change(netif_fe_driver_status_t *);
127 static void xennet_interface_status_change(netif_fe_interface_status_t *);
128 static void xennet_tx_mbuf_free(struct mbuf *, caddr_t, size_t, void *);
129 static void xennet_rx_mbuf_free(struct mbuf *, caddr_t, size_t, void *);
130 static int xen_network_handler(void *);
131 static void network_tx_buf_gc(struct xennet_softc *);
132 static void network_alloc_rx_buffers(struct xennet_softc *);
133 static void network_alloc_tx_buffers(struct xennet_softc *);
134 void xennet_init(struct xennet_softc *);
135 void xennet_reset(struct xennet_softc *);
136 #ifdef mediacode
137 static int xennet_mediachange (struct ifnet *);
138 static void xennet_mediastatus(struct ifnet *, struct ifmediareq *);
139 #endif
141 CFATTACH_DECL(xennet, sizeof(struct xennet_softc),
142 xennet_match, xennet_attach, NULL, NULL);
144 #define TX_MAX_ENTRIES (NETIF_TX_RING_SIZE - 2)
145 #define RX_MAX_ENTRIES (NETIF_RX_RING_SIZE - 2)
146 #define TX_ENTRIES 128
147 #define RX_ENTRIES 128
149 static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
150 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
151 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
153 /** Network interface info. */
154 struct xennet_ctrl {
155 /** Number of interfaces. */
156 int xc_interfaces;
157 /** Number of connected interfaces. */
158 int xc_connected;
159 /** Error code. */
160 int xc_err;
161 /** Driver status. */
162 int xc_up;
164 cfprint_t xc_cfprint;
165 struct device *xc_parent;
166 };
168 static struct xennet_ctrl netctrl = { -1, 0, 0 };
170 #ifdef mediacode
171 static int xennet_media[] = {
172 IFM_ETHER|IFM_AUTO,
173 };
174 static int nxennet_media = (sizeof(xennet_media)/sizeof(xennet_media[0]));
175 #endif
178 static int
179 xennet_wait_for_interfaces(void)
180 {
182 while (netctrl.xc_interfaces != netctrl.xc_connected)
183 HYPERVISOR_yield();
184 return 0;
185 }
187 int
188 xennet_scan(struct device *self, struct xennet_attach_args *xneta,
189 cfprint_t print)
190 {
191 ctrl_msg_t cmsg;
192 netif_fe_driver_status_t st;
194 if ((xen_start_info.flags & SIF_INITDOMAIN) ||
195 (xen_start_info.flags & SIF_NET_BE_DOMAIN))
196 return 0;
198 netctrl.xc_parent = self;
199 netctrl.xc_cfprint = print;
201 printf("Initialising Xen virtual ethernet frontend driver.\n");
203 (void)ctrl_if_register_receiver(CMSG_NETIF_FE, xennet_ctrlif_rx,
204 CALLBACK_IN_BLOCKING_CONTEXT);
206 /* Send a driver-UP notification to the domain controller. */
207 cmsg.type = CMSG_NETIF_FE;
208 cmsg.subtype = CMSG_NETIF_FE_DRIVER_STATUS;
209 cmsg.length = sizeof(netif_fe_driver_status_t);
210 st.status = NETIF_DRIVER_STATUS_UP;
211 st.max_handle = 0;
212 memcpy(cmsg.msg, &st, sizeof(st));
213 ctrl_if_send_message_block(&cmsg, NULL, 0, 0);
215 return 0;
216 }
218 void
219 xennet_scan_finish(struct device *parent)
220 {
221 int err;
223 err = xennet_wait_for_interfaces();
224 if (err)
225 ctrl_if_unregister_receiver(CMSG_NETIF_FE, xennet_ctrlif_rx);
226 }
228 int
229 xennet_match(struct device *parent, struct cfdata *match, void *aux)
230 {
231 struct xennet_attach_args *xa = (struct xennet_attach_args *)aux;
233 if (strcmp(xa->xa_device, "xennet") == 0)
234 return 1;
235 return 0;
236 }
238 void
239 xennet_attach(struct device *parent, struct device *self, void *aux)
240 {
241 struct xennet_attach_args *xneta = (struct xennet_attach_args *)aux;
242 struct xennet_softc *sc = (struct xennet_softc *)self;
243 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
244 int idx;
246 aprint_normal(": Xen Virtual Network Interface\n");
248 sc->sc_ifno = xneta->xa_handle;
250 /* Initialize ifnet structure. */
251 memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
252 ifp->if_softc = sc;
253 ifp->if_start = xennet_start;
254 ifp->if_ioctl = xennet_ioctl;
255 ifp->if_watchdog = xennet_watchdog;
256 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS;
258 #ifdef mediacode
259 ifmedia_init(&sc->sc_media, 0, xennet_mediachange,
260 xennet_mediastatus);
261 for (idx = 0; idx < nxennet_media; idx++)
262 ifmedia_add(&sc->sc_media, xennet_media[idx], 0, NULL);
263 ifmedia_set(&sc->sc_media, xennet_media[0]);
264 #endif
266 for (idx = 0; idx < NETIF_TX_RING_SIZE; idx++)
267 sc->sc_tx_bufa[idx].xb_next = idx + 1;
268 for (idx = 0; idx < NETIF_RX_RING_SIZE; idx++)
269 sc->sc_rx_bufa[idx].xb_next = idx + 1;
270 }
272 static struct xennet_softc *
273 find_device(int handle)
274 {
275 struct device *dv;
276 struct xennet_softc *xs = NULL;
278 for (dv = alldevs.tqh_first; dv != NULL; dv = dv->dv_list.tqe_next) {
279 if (dv->dv_cfattach == NULL ||
280 dv->dv_cfattach->ca_attach != xennet_attach)
281 continue;
282 xs = (struct xennet_softc *)dv;
283 if (xs->sc_ifno == handle)
284 break;
285 }
286 return dv ? xs : NULL;
287 }
289 static void
290 xennet_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
291 {
292 int respond = 1;
294 DPRINTFN(XEDB_EVENT, ("> ctrlif_rx=%d\n", msg->subtype));
295 switch (msg->subtype) {
296 case CMSG_NETIF_FE_INTERFACE_STATUS:
297 if (msg->length != sizeof(netif_fe_interface_status_t))
298 goto error;
299 xennet_interface_status_change(
300 (netif_fe_interface_status_t *)&msg->msg[0]);
301 break;
303 case CMSG_NETIF_FE_DRIVER_STATUS:
304 if (msg->length != sizeof(netif_fe_driver_status_t))
305 goto error;
306 xennet_driver_status_change(
307 (netif_fe_driver_status_t *)&msg->msg[0]);
308 break;
310 error:
311 default:
312 msg->length = 0;
313 break;
314 }
316 if (respond)
317 ctrl_if_send_response(msg);
318 }
320 static void
321 xennet_driver_status_change(netif_fe_driver_status_t *status)
322 {
324 DPRINTFN(XEDB_EVENT, ("xennet_driver_status_change(%d)\n",
325 status->status));
327 netctrl.xc_up = status->status;
328 xennet_driver_count_connected();
329 }
331 static int
332 xennet_driver_count_connected(void)
333 {
334 struct device *dv;
335 struct xennet_softc *xs = NULL;
337 netctrl.xc_interfaces = netctrl.xc_connected = 0;
338 for (dv = alldevs.tqh_first; dv != NULL; dv = dv->dv_list.tqe_next) {
339 if (dv->dv_cfattach == NULL ||
340 dv->dv_cfattach->ca_attach != xennet_attach)
341 continue;
342 xs = (struct xennet_softc *)dv;
343 netctrl.xc_interfaces++;
344 if (xs->sc_backend_state == BEST_CONNECTED)
345 netctrl.xc_connected++;
346 }
348 return netctrl.xc_connected;
349 }
351 static void
352 xennet_interface_status_change(netif_fe_interface_status_t *status)
353 {
354 ctrl_msg_t cmsg;
355 netif_fe_interface_connect_t up;
356 struct xennet_softc *sc;
357 struct ifnet *ifp;
358 struct xennet_attach_args xneta;
360 DPRINTFN(XEDB_EVENT, ("xennet_interface_status_change(%d,%d,%02x:%02x:%02x:%02x:%02x:%02x)\n",
361 status->status,
362 status->handle,
363 status->mac[0], status->mac[1], status->mac[2],
364 status->mac[3], status->mac[4], status->mac[5]));
366 sc = find_device(status->handle);
367 if (sc == NULL) {
368 xneta.xa_device = "xennet";
369 xneta.xa_handle = status->handle;
370 config_found(netctrl.xc_parent, &xneta, netctrl.xc_cfprint);
371 sc = find_device(status->handle);
372 if (sc == NULL) {
373 printf("Status change: invalid netif handle %u\n",
374 status->handle);
375 return;
376 }
377 }
378 ifp = &sc->sc_ethercom.ec_if;
380 DPRINTFN(XEDB_EVENT, ("xennet_interface_status_change(%d,%p,%02x:%02x:%02x:%02x:%02x:%02x)\n",
381 status->handle, sc,
382 status->mac[0], status->mac[1], status->mac[2],
383 status->mac[3], status->mac[4], status->mac[5]));
385 switch (status->status) {
386 case NETIF_INTERFACE_STATUS_CLOSED:
387 printf("Unexpected netif-CLOSED message in state %d\n",
388 sc->sc_backend_state);
389 break;
391 case NETIF_INTERFACE_STATUS_DISCONNECTED:
392 #if 0
393 if (sc->sc_backend_state != BEST_CLOSED) {
394 printk("Unexpected netif-DISCONNECTED message"
395 " in state %d\n", sc->sc_backend_state);
396 printk("Attempting to reconnect network interface\n");
398 /* Begin interface recovery.
399 *
400 * NB. Whilst we're recovering, we turn the
401 * carrier state off. We take measures to
402 * ensure that this device isn't used for
403 * anything. We also stop the queue for this
404 * device. Various different approaches
405 * (e.g. continuing to buffer packets) have
406 * been tested but don't appear to improve the
407 * overall impact on TCP connections.
408 *
409 * TODO: (MAW) Change the Xend<->Guest
410 * protocol so that a recovery is initiated by
411 * a special "RESET" message - disconnect
412 * could just mean we're not allowed to use
413 * this interface any more.
414 */
416 /* Stop old i/f to prevent errors whilst we
417 * rebuild the state. */
418 spin_lock_irq(&np->tx_lock);
419 spin_lock(&np->rx_lock);
420 netif_stop_queue(dev);
421 np->backend_state = BEST_DISCONNECTED;
422 spin_unlock(&np->rx_lock);
423 spin_unlock_irq(&np->tx_lock);
425 /* Free resources. */
426 free_irq(np->irq, dev);
427 unbind_evtchn_from_irq(np->evtchn);
428 free_page((unsigned long)np->tx);
429 free_page((unsigned long)np->rx);
430 }
431 #endif
433 if (sc->sc_backend_state == BEST_CLOSED) {
434 /* Move from CLOSED to DISCONNECTED state. */
435 sc->sc_tx = (netif_tx_interface_t *)
436 uvm_km_valloc_align(kernel_map, PAGE_SIZE, PAGE_SIZE);
437 if (sc->sc_tx == NULL)
438 panic("netif: no tx va");
439 sc->sc_rx = (netif_rx_interface_t *)
440 uvm_km_valloc_align(kernel_map, PAGE_SIZE, PAGE_SIZE);
441 if (sc->sc_rx == NULL)
442 panic("netif: no rx va");
443 sc->sc_pg_tx = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
444 if (sc->sc_pg_tx == NULL) {
445 panic("netif: no tx pages");
446 }
447 pmap_kenter_pa((vaddr_t)sc->sc_tx, VM_PAGE_TO_PHYS(sc->sc_pg_tx),
448 VM_PROT_READ | VM_PROT_WRITE);
449 sc->sc_pg_rx = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
450 if (sc->sc_pg_rx == NULL) {
451 panic("netif: no rx pages");
452 }
453 pmap_kenter_pa((vaddr_t)sc->sc_rx, VM_PAGE_TO_PHYS(sc->sc_pg_rx),
454 VM_PROT_READ | VM_PROT_WRITE);
455 sc->sc_backend_state = BEST_DISCONNECTED;
456 }
458 /* Construct an interface-CONNECT message for the
459 * domain controller. */
460 cmsg.type = CMSG_NETIF_FE;
461 cmsg.subtype = CMSG_NETIF_FE_INTERFACE_CONNECT;
462 cmsg.length = sizeof(netif_fe_interface_connect_t);
463 up.handle = status->handle;
464 up.tx_shmem_frame = xpmap_ptom(VM_PAGE_TO_PHYS(sc->sc_pg_tx)) >> PAGE_SHIFT;
465 up.rx_shmem_frame = xpmap_ptom(VM_PAGE_TO_PHYS(sc->sc_pg_rx)) >> PAGE_SHIFT;
466 memcpy(cmsg.msg, &up, sizeof(up));
468 /* Tell the controller to bring up the interface. */
469 ctrl_if_send_message_block(&cmsg, NULL, 0, 0);
470 break;
472 case NETIF_INTERFACE_STATUS_CONNECTED:
473 if (sc->sc_backend_state == BEST_CLOSED) {
474 printf("Unexpected netif-CONNECTED message"
475 " in state %d\n", sc->sc_backend_state);
476 break;
477 }
479 memcpy(sc->sc_enaddr, status->mac, ETHER_ADDR_LEN);
480 #if 0
481 if (xen_start_info.flags & SIF_PRIVILEGED) {
482 /* XXX for domain-0 change out ethernet address to be
483 * different than the physical address since arp
484 * replies from other domains will report the physical
485 * address.
486 */
487 if (sc->sc_enaddr[0] != 0xaa)
488 sc->sc_enaddr[0] = 0xaa;
489 else
490 sc->sc_enaddr[0] = 0xab;
491 }
492 #endif
494 /* Recovery procedure: */
496 /* Step 1: Reinitialise variables. */
497 sc->sc_rx_resp_cons = sc->sc_tx_resp_cons = /* sc->sc_tx_full = */ 0;
498 sc->sc_rx->event = sc->sc_tx->event = 1;
500 /* Step 2: Rebuild the RX and TX ring contents. */
501 network_alloc_rx_buffers(sc);
502 SLIST_INIT(&sc->sc_tx_bufs);
503 network_alloc_tx_buffers(sc);
505 /* Step 3: All public and private state should now be
506 * sane. Get ready to start sending and receiving
507 * packets and give the driver domain a kick because
508 * we've probably just requeued some packets.
509 */
510 sc->sc_backend_state = BEST_CONNECTED;
511 __insn_barrier();
512 hypervisor_notify_via_evtchn(status->evtchn);
513 network_tx_buf_gc(sc);
515 if_attach(ifp);
516 ether_ifattach(ifp, sc->sc_enaddr);
518 sc->sc_evtchn = status->evtchn;
519 sc->sc_irq = bind_evtchn_to_irq(sc->sc_evtchn);
520 event_set_handler(sc->sc_irq, &xen_network_handler, sc, IPL_NET);
521 hypervisor_enable_irq(sc->sc_irq);
522 xennet_driver_count_connected();
524 aprint_normal("%s: MAC address %s\n", sc->sc_dev.dv_xname,
525 ether_sprintf(sc->sc_enaddr));
527 #if NRND > 0
528 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
529 RND_TYPE_NET, 0);
530 #endif
531 break;
533 default:
534 printf("Status change to unknown value %d\n",
535 status->status);
536 break;
537 }
538 DPRINTFN(XEDB_EVENT, ("xennet_interface_status_change()\n"));
539 }
541 static void
542 xennet_tx_mbuf_free(struct mbuf *m, caddr_t buf, size_t size, void *arg)
543 {
544 struct xennet_txbuf *txbuf = (struct xennet_txbuf *)arg;
546 DPRINTFN(XEDB_MBUF, ("xennet_tx_mbuf_free %p pa %p\n", txbuf,
547 (void *)txbuf->xt_pa));
548 SLIST_INSERT_HEAD(&txbuf->xt_sc->sc_tx_bufs, txbuf, xt_next);
549 pool_cache_put(&mbpool_cache, m);
550 }
552 static void
553 xennet_rx_push_buffer(struct xennet_softc *sc, int id)
554 {
555 NETIF_RING_IDX ringidx;
556 int nr_pfns;
558 ringidx = sc->sc_rx->req_prod;
559 nr_pfns = 0;
561 DPRINTFN(XEDB_MEM, ("readding page va %p pa %p ma %p/%p to rx_ring "
562 "at %d with id %d\n",
563 (void *)sc->sc_rx_bufa[id].xb_rx.xbrx_va,
564 (void *)sc->sc_rx_bufa[id].xb_rx.xbrx_pa,
565 (void *)(PTE_BASE[x86_btop
566 (sc->sc_rx_bufa[id].xb_rx.xbrx_va)] &
567 PG_FRAME),
568 (void *)xpmap_ptom(sc->sc_rx_bufa[id].xb_rx.xbrx_pa),
569 ringidx, id));
571 sc->sc_rx->ring[MASK_NETIF_RX_IDX(ringidx)].req.id = id;
573 rx_pfn_array[nr_pfns] = xpmap_ptom(sc->sc_rx_bufa[id].xb_rx.xbrx_pa)
574 >> PAGE_SHIFT;
576 /* Remove this page from pseudo phys map before
577 * passing back to Xen. */
578 xpmap_phys_to_machine_mapping[(sc->sc_rx_bufa[id].xb_rx.xbrx_pa - XPMAP_OFFSET) >> PAGE_SHIFT] =
579 INVALID_P2M_ENTRY;
581 rx_mcl[nr_pfns].op = __HYPERVISOR_update_va_mapping;
582 rx_mcl[nr_pfns].args[0] = sc->sc_rx_bufa[id].xb_rx.xbrx_va >> PAGE_SHIFT;
583 rx_mcl[nr_pfns].args[1] = 0;
584 rx_mcl[nr_pfns].args[2] = 0;
586 nr_pfns++;
588 sc->sc_rx_bufs_to_notify++;
590 ringidx++;
592 /*
593 * We may have allocated buffers which have entries
594 * outstanding in the page update queue -- make sure we flush
595 * those first!
596 */
597 xpq_flush_queue();
599 /* After all PTEs have been zapped we blow away stale TLB entries. */
600 rx_mcl[nr_pfns-1].args[2] = UVMF_FLUSH_TLB;
602 /* Give away a batch of pages. */
603 rx_mcl[nr_pfns].op = __HYPERVISOR_dom_mem_op;
604 rx_mcl[nr_pfns].args[0] = MEMOP_decrease_reservation;
605 rx_mcl[nr_pfns].args[1] = (unsigned long)rx_pfn_array;
606 rx_mcl[nr_pfns].args[2] = (unsigned long)nr_pfns;
607 rx_mcl[nr_pfns].args[3] = 0;
608 rx_mcl[nr_pfns].args[4] = DOMID_SELF;
610 /* Zap PTEs and give away pages in one big multicall. */
611 (void)HYPERVISOR_multicall(rx_mcl, nr_pfns+1);
613 /* Check return status of HYPERVISOR_dom_mem_op(). */
614 if ( rx_mcl[nr_pfns].args[5] != nr_pfns )
615 panic("Unable to reduce memory reservation\n");
617 /* Above is a suitable barrier to ensure backend will see requests. */
618 sc->sc_rx->req_prod = ringidx;
619 }
621 static void
622 xennet_rx_mbuf_free(struct mbuf *m, caddr_t buf, size_t size, void *arg)
623 {
624 union xennet_bufarray *xb = (union xennet_bufarray *)arg;
625 struct xennet_softc *sc = xb->xb_rx.xbrx_sc;
626 int id = (xb - sc->sc_rx_bufa);
628 DPRINTFN(XEDB_MBUF, ("xennet_rx_mbuf_free id %d, mbuf %p, buf %p, "
629 "size %d\n", id, m, buf, size));
631 xennet_rx_push_buffer(sc, id);
633 pool_cache_put(&mbpool_cache, m);
634 }
636 static int
637 xen_network_handler(void *arg)
638 {
639 struct xennet_softc *sc = arg;
640 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
641 netif_rx_response_t *rx;
642 paddr_t pa;
643 NETIF_RING_IDX ringidx;
644 mmu_update_t *mmu = rx_mmu;
645 multicall_entry_t *mcl = rx_mcl;
646 struct mbuf *m;
648 network_tx_buf_gc(sc);
650 again:
651 for (ringidx = sc->sc_rx_resp_cons;
652 ringidx != sc->sc_rx->resp_prod;
653 ringidx++) {
654 rx = &sc->sc_rx->ring[MASK_NETIF_RX_IDX(ringidx)].resp;
656 if (rx->status < 0)
657 panic("rx->status < 0");
658 /* XXXcl check rx->status for error */
660 MGETHDR(m, M_DONTWAIT, MT_DATA);
661 if (m == NULL) {
662 printf("xennet: rx no mbuf\n");
663 break;
664 }
666 pa = sc->sc_rx_bufa[rx->id].xb_rx.xbrx_pa;
668 DPRINTFN(XEDB_EVENT, ("rx event %d for id %d, size %d, "
669 "status %d, ma %08lx, pa %08lx\n", ringidx,
670 rx->id, rx->status, rx->status, rx->addr, pa));
672 /* Remap the page. */
673 mmu->ptr = (rx->addr & PG_FRAME) | MMU_MACHPHYS_UPDATE;
674 mmu->val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
675 mmu++;
676 mcl->op = __HYPERVISOR_update_va_mapping;
677 mcl->args[0] = sc->sc_rx_bufa[rx->id].xb_rx.xbrx_va >> PAGE_SHIFT;
678 mcl->args[1] = (rx->addr & PG_FRAME) | PG_V|PG_KW;
679 mcl->args[2] = UVMF_FLUSH_TLB; // 0;
680 mcl++;
682 xpmap_phys_to_machine_mapping
683 [(pa - XPMAP_OFFSET) >> PAGE_SHIFT] =
684 rx->addr >> PAGE_SHIFT;
686 /* Do all the remapping work, and M->P updates, in one
687 * big hypercall. */
688 if ((mcl - rx_mcl) != 0) {
689 mcl->op = __HYPERVISOR_mmu_update;
690 mcl->args[0] = (unsigned long)rx_mmu;
691 mcl->args[1] = mmu - rx_mmu;
692 mcl->args[2] = 0;
693 mcl++;
694 (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
695 }
696 if (0)
697 printf("page mapped at va %08lx -> %08x/%08lx\n",
698 sc->sc_rx_bufa[rx->id].xb_rx.xbrx_va,
699 PTE_BASE[x86_btop(sc->sc_rx_bufa[rx->id].xb_rx.xbrx_va)],
700 rx->addr);
701 mmu = rx_mmu;
702 mcl = rx_mcl;
704 DPRINTFN(XEDB_MBUF, ("rx packet mbuf %p va %p pa %p/%p "
705 "ma %p\n", m,
706 (void *)sc->sc_rx_bufa[rx->id].xb_rx.xbrx_va,
707 (void *)(xpmap_mtop(PTE_BASE[x86_btop
708 (sc->sc_rx_bufa[rx->id].xb_rx.xbrx_va)] & PG_FRAME)), (void *)pa,
709 (void *)(PTE_BASE[x86_btop
710 (sc->sc_rx_bufa[rx->id].xb_rx.xbrx_va)] & PG_FRAME)));
712 m->m_len = m->m_pkthdr.len = rx->status;
713 m->m_pkthdr.rcvif = ifp;
714 if (sc->sc_rx->req_prod != sc->sc_rx->resp_prod) {
715 MEXTADD(m, (void *)(sc->sc_rx_bufa[rx->id].xb_rx.
716 xbrx_va + (rx->addr & PAGE_MASK)), rx->status, M_DEVBUF,
717 xennet_rx_mbuf_free,
718 &sc->sc_rx_bufa[rx->id]);
719 } else {
720 /*
721 * This was our last receive buffer, allocate
722 * memory, copy data and push the receive
723 * buffer back to the hypervisor.
724 */
725 MEXTMALLOC(m, rx->status, M_DONTWAIT);
726 if ((m->m_flags & M_EXT) == 0) {
727 printf("xennet: rx no mbuf 2\n");
728 m_free(m);
729 break;
730 }
731 memcpy(m->m_data, (void *)(sc->sc_rx_bufa[rx->id].
732 xb_rx.xbrx_va + (rx->addr & PAGE_MASK)), rx->status);
733 xennet_rx_push_buffer(sc, rx->id);
734 }
736 #ifdef XENNET_DEBUG_DUMP
737 xennet_hex_dump(mtod(m, u_char *), m->m_pkthdr.len, "r", rx->id);
738 #endif
740 #if NBPFILTER > 0
741 /*
742 * Pass packet to bpf if there is a listener.
743 */
744 if (ifp->if_bpf)
745 bpf_mtap(ifp->if_bpf, m);
746 #endif
748 ifp->if_ipackets++;
750 /* Pass the packet up. */
751 (*ifp->if_input)(ifp, m);
752 }
754 sc->sc_rx_resp_cons = ringidx;
755 sc->sc_rx->event = sc->sc_rx_resp_cons + 1;
757 if (sc->sc_rx->resp_prod != ringidx)
758 goto again;
760 return 0;
761 }
763 static inline int
764 get_bufarray_entry(union xennet_bufarray *a)
765 {
766 int idx;
768 idx = a[0].xb_next;
769 a[0].xb_next = a[idx].xb_next;
770 return idx;
771 }
773 static inline void
774 put_bufarray_entry(union xennet_bufarray *a, int idx)
775 {
777 a[idx].xb_next = a[0].xb_next;
778 a[0].xb_next = idx;
779 }
781 static void
782 network_tx_buf_gc(struct xennet_softc *sc)
783 {
784 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
785 NETIF_RING_IDX idx, prod;
787 do {
788 prod = sc->sc_tx->resp_prod;
790 for (idx = sc->sc_tx_resp_cons; idx != prod; idx++) {
791 DPRINTFN(XEDB_EVENT, ("tx event at pos %d, status: "
792 "%d, id: %d, mbuf %p, buf %p\n", idx,
793 sc->sc_tx->ring[MASK_NETIF_TX_IDX(idx)].resp.status,
794 sc->sc_tx->ring[MASK_NETIF_TX_IDX(idx)].resp.id,
795 sc->sc_tx_bufa[sc->sc_tx->ring[MASK_NETIF_TX_IDX(idx)].resp.id].xb_tx.xbtx_m,
796 mtod(sc->sc_tx_bufa[sc->sc_tx->ring[MASK_NETIF_TX_IDX(idx)].resp.id].xb_tx.xbtx_m, void *)));
797 m_freem(sc->sc_tx_bufa[sc->sc_tx->ring[MASK_NETIF_TX_IDX(idx)].resp.id].xb_tx.xbtx_m);
798 put_bufarray_entry(sc->sc_tx_bufa,
799 sc->sc_tx->ring[MASK_NETIF_TX_IDX(idx)].resp.id);
800 sc->sc_tx_entries--; /* atomic */
801 }
803 sc->sc_tx_resp_cons = prod;
805 /*
806 * Set a new event, then check for race with update of
807 * tx_cons.
808 */
809 sc->sc_tx->event = /* atomic */
810 prod + (sc->sc_tx_entries >> 1) + 1;
811 __insn_barrier();
812 } while (prod != sc->sc_tx->resp_prod);
814 if (sc->sc_tx->resp_prod == sc->sc_tx->req_prod)
815 ifp->if_timer = 0;
816 /* KDASSERT(sc->sc_net_idx->tx_req_prod == */
817 /* TX_RING_ADD(sc->sc_net_idx->tx_resp_prod, sc->sc_tx_entries)); */
818 }
820 static void
821 network_alloc_rx_buffers(struct xennet_softc *sc)
822 {
823 vaddr_t rxpages, va;
824 paddr_t pa;
825 struct vm_page *pg;
826 int id, nr_pfns;
827 NETIF_RING_IDX ringidx;
828 int s;
830 ringidx = sc->sc_rx->req_prod;
831 if ((ringidx - sc->sc_rx_resp_cons) > (RX_MAX_ENTRIES / 2))
832 return;
834 nr_pfns = 0;
836 rxpages = uvm_km_valloc_align(kernel_map, RX_ENTRIES * PAGE_SIZE,
837 PAGE_SIZE);
839 s = splnet();
840 for (va = rxpages; va < rxpages + RX_ENTRIES * PAGE_SIZE;
841 va += PAGE_SIZE) {
842 pg = uvm_pagealloc(NULL, 0, NULL, 0);
843 if (pg == NULL)
844 panic("network_alloc_rx_buffers: no pages");
845 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
846 VM_PROT_READ | VM_PROT_WRITE);
848 id = get_bufarray_entry(sc->sc_rx_bufa);
849 sc->sc_rx_bufa[id].xb_rx.xbrx_va = va;
850 sc->sc_rx_bufa[id].xb_rx.xbrx_sc = sc;
852 pa = VM_PAGE_TO_PHYS(pg);
853 DPRINTFN(XEDB_MEM, ("adding page va %p pa %p/%p "
854 "ma %p/%p to rx_ring at %d with id %d\n", (void *)va,
855 (void *)(VM_PAGE_TO_PHYS(pg) & PG_FRAME), (void *)xpmap_mtop(PTE_BASE[x86_btop(va)]),
856 (void *)(PTE_BASE[x86_btop(va)] & PG_FRAME),
857 (void *)xpmap_ptom(VM_PAGE_TO_PHYS(pg)),
858 ringidx, id));
859 sc->sc_rx_bufa[id].xb_rx.xbrx_pa = pa;
860 sc->sc_rx->ring[MASK_NETIF_RX_IDX(ringidx)].req.id = id;
862 rx_pfn_array[nr_pfns] = xpmap_ptom(pa) >> PAGE_SHIFT;
864 /* Remove this page from pseudo phys map before
865 * passing back to Xen. */
866 xpmap_phys_to_machine_mapping[(pa - XPMAP_OFFSET) >> PAGE_SHIFT] =
867 INVALID_P2M_ENTRY;
869 rx_mcl[nr_pfns].op = __HYPERVISOR_update_va_mapping;
870 rx_mcl[nr_pfns].args[0] = va >> PAGE_SHIFT;
871 rx_mcl[nr_pfns].args[1] = 0;
872 rx_mcl[nr_pfns].args[2] = 0;
874 nr_pfns++;
876 sc->sc_rx_bufs_to_notify++;
878 ringidx++;
879 if ((ringidx - sc->sc_rx_resp_cons) == RX_MAX_ENTRIES)
880 break;
881 }
883 if (nr_pfns == 0) {
884 splx(s);
885 return;
886 }
888 /*
889 * We may have allocated buffers which have entries
890 * outstanding in the page update queue -- make sure we flush
891 * those first!
892 */
893 xpq_flush_queue();
895 /* After all PTEs have been zapped we blow away stale TLB entries. */
896 rx_mcl[nr_pfns-1].args[2] = UVMF_FLUSH_TLB;
898 /* Give away a batch of pages. */
899 rx_mcl[nr_pfns].op = __HYPERVISOR_dom_mem_op;
900 rx_mcl[nr_pfns].args[0] = MEMOP_decrease_reservation;
901 rx_mcl[nr_pfns].args[1] = (unsigned long)rx_pfn_array;
902 rx_mcl[nr_pfns].args[2] = (unsigned long)nr_pfns;
903 rx_mcl[nr_pfns].args[3] = 0;
904 rx_mcl[nr_pfns].args[4] = DOMID_SELF;
906 /* Zap PTEs and give away pages in one big multicall. */
907 (void)HYPERVISOR_multicall(rx_mcl, nr_pfns+1);
909 /* Check return status of HYPERVISOR_dom_mem_op(). */
910 if (rx_mcl[nr_pfns].args[5] != nr_pfns)
911 panic("Unable to reduce memory reservation\n");
913 /* Above is a suitable barrier to ensure backend will see requests. */
914 sc->sc_rx->req_prod = ringidx;
916 splx(s);
918 }
920 static void
921 network_alloc_tx_buffers(struct xennet_softc *sc)
922 {
923 vaddr_t txpages, va;
924 struct vm_page *pg;
925 struct xennet_txbuf *txbuf;
926 int i;
928 txpages = uvm_km_valloc_align(kernel_map,
929 (TX_ENTRIES / TXBUF_PER_PAGE) * PAGE_SIZE, PAGE_SIZE);
930 for (va = txpages;
931 va < txpages + (TX_ENTRIES / TXBUF_PER_PAGE) * PAGE_SIZE;
932 va += PAGE_SIZE) {
933 pg = uvm_pagealloc(NULL, 0, NULL, 0);
934 if (pg == NULL)
935 panic("network_alloc_tx_buffers: no pages");
936 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
937 VM_PROT_READ | VM_PROT_WRITE);
939 for (i = 0; i < TXBUF_PER_PAGE; i++) {
940 txbuf = (struct xennet_txbuf *)
941 (va + i * (PAGE_SIZE / TXBUF_PER_PAGE));
942 txbuf->xt_sc = sc;
943 txbuf->xt_pa = VM_PAGE_TO_PHYS(pg) +
944 i * (PAGE_SIZE / TXBUF_PER_PAGE) +
945 sizeof(struct xennet_txbuf);
946 SLIST_INSERT_HEAD(&sc->sc_tx_bufs, txbuf, xt_next);
947 }
948 }
949 }
951 /*
952 * Called at splnet.
953 */
954 void
955 xennet_start(struct ifnet *ifp)
956 {
957 struct xennet_softc *sc = ifp->if_softc;
958 struct mbuf *m, *new_m;
959 struct xennet_txbuf *txbuf;
960 netif_tx_request_t *txreq;
961 NETIF_RING_IDX idx;
962 paddr_t pa;
963 int bufid;
965 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start()\n", sc->sc_dev.dv_xname));
967 #ifdef DIAGNOSTIC
968 IFQ_POLL(&ifp->if_snd, m);
969 if (m == 0)
970 panic("%s: No packet to start", sc->sc_dev.dv_xname);
971 #endif
973 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
974 return;
976 idx = sc->sc_tx->req_prod;
977 while (/*CONSTCOND*/1) {
979 IFQ_POLL(&ifp->if_snd, m);
980 if (m == NULL)
981 break;
983 switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
984 case M_EXT|M_EXT_CLUSTER:
985 pa = m->m_ext.ext_paddr +
986 (m->m_data - m->m_ext.ext_buf);
987 break;
988 default:
989 case 0:
990 pa = m->m_paddr + M_BUFOFFSET(m) +
991 (m->m_data - M_BUFADDR(m));
992 break;
993 }
995 if (m->m_pkthdr.len != m->m_len ||
996 (pa ^ (pa + m->m_pkthdr.len)) & PG_FRAME) {
997 txbuf = SLIST_FIRST(&sc->sc_tx_bufs);
998 if (txbuf == NULL) {
999 // printf("xennet: no tx bufs\n");
1000 break;
1003 MGETHDR(new_m, M_DONTWAIT, MT_DATA);
1004 if (new_m == NULL) {
1005 printf("xennet: no mbuf\n");
1006 break;
1009 SLIST_REMOVE_HEAD(&sc->sc_tx_bufs, xt_next);
1010 IFQ_DEQUEUE(&ifp->if_snd, m);
1012 KASSERT(m->m_flags & M_PKTHDR);
1013 M_COPY_PKTHDR(new_m, m);
1014 m_copydata(m, 0, m->m_pkthdr.len, txbuf->xt_buf);
1015 MEXTADD(new_m, txbuf->xt_buf, m->m_pkthdr.len,
1016 M_DEVBUF, xennet_tx_mbuf_free, txbuf);
1017 new_m->m_ext.ext_paddr = txbuf->xt_pa;
1018 new_m->m_len = new_m->m_pkthdr.len = m->m_pkthdr.len;
1020 m_freem(m);
1021 m = new_m;
1023 pa = m->m_ext.ext_paddr +
1024 (m->m_data - m->m_ext.ext_buf);
1025 } else
1026 IFQ_DEQUEUE(&ifp->if_snd, m);
1028 bufid = get_bufarray_entry(sc->sc_tx_bufa);
1029 sc->sc_tx_bufa[bufid].xb_tx.xbtx_m = m;
1031 DPRINTFN(XEDB_MBUF, ("xennet_start id %d, mbuf %p, buf %p/%p, "
1032 "size %d\n", bufid, m, mtod(m, void *),
1033 (void *)pa, m->m_pkthdr.len));
1034 #ifdef XENNET_DEBUG_DUMP
1035 xennet_hex_dump(mtod(m, u_char *), m->m_pkthdr.len, "s", bufid);
1036 #endif
1038 txreq = &sc->sc_tx->ring[MASK_NETIF_TX_IDX(idx)].req;
1039 txreq->id = bufid;
1040 txreq->addr = xpmap_ptom(pa);
1041 txreq->size = m->m_pkthdr.len;
1043 __insn_barrier();
1044 idx++;
1045 sc->sc_tx->req_prod = idx;
1047 sc->sc_tx_entries++; /* XXX atomic */
1049 #ifdef XENNET_DEBUG
1050 DPRINTFN(XEDB_MEM, ("packet addr %p/%p, physical %p/%p, "
1051 "m_paddr %p, len %d/%d\n", M_BUFADDR(m), mtod(m, void *),
1052 (void *)*kvtopte(mtod(m, vaddr_t)),
1053 (void *)xpmap_mtop(*kvtopte(mtod(m, vaddr_t))),
1054 (void *)m->m_paddr, m->m_pkthdr.len, m->m_len));
1055 #endif
1057 #if NBPFILTER > 0
1058 /*
1059 * Pass packet to bpf if there is a listener.
1060 */
1061 if (ifp->if_bpf)
1062 bpf_mtap(ifp->if_bpf, m);
1063 #endif
1066 ifp->if_flags &= ~IFF_OACTIVE;
1068 network_tx_buf_gc(sc);
1070 __insn_barrier();
1071 if (sc->sc_tx->resp_prod != idx)
1072 hypervisor_notify_via_evtchn(sc->sc_evtchn);
1074 ifp->if_timer = 5;
1076 ifp->if_opackets++;
1078 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start() done\n",
1079 sc->sc_dev.dv_xname));
1082 int
1083 xennet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1085 struct xennet_softc *sc = ifp->if_softc;
1086 struct ifaddr *ifa = (struct ifaddr *)data;
1087 #ifdef mediacode
1088 struct ifreq *ifr = (struct ifreq *)data;
1089 #endif
1090 int s, error = 0;
1092 s = splnet();
1094 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl()\n", sc->sc_dev.dv_xname));
1096 switch(cmd) {
1097 case SIOCSIFADDR:
1098 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() SIOCSIFADDR\n",
1099 sc->sc_dev.dv_xname));
1100 ifp->if_flags |= IFF_UP;
1101 switch (ifa->ifa_addr->sa_family) {
1102 #ifdef INET
1103 case AF_INET:
1104 xennet_init(sc);
1105 arp_ifinit(ifp, ifa);
1106 break;
1107 #endif
1108 default:
1109 xennet_init(sc);
1110 break;
1112 break;
1114 case SIOCSIFFLAGS:
1115 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() SIOCSIFFLAGS\n",
1116 sc->sc_dev.dv_xname));
1117 break;
1119 case SIOCADDMULTI:
1120 case SIOCDELMULTI:
1121 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() SIOC*MULTI\n",
1122 sc->sc_dev.dv_xname));
1123 break;
1125 #ifdef mediacode
1126 case SIOCGIFMEDIA:
1127 case SIOCSIFMEDIA:
1128 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() SIOC*IFMEDIA\n",
1129 sc->sc_dev.dv_xname));
1130 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1131 break;
1132 #endif
1134 default:
1135 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl(0x%lx) unknown cmd\n",
1136 sc->sc_dev.dv_xname, cmd));
1137 error = EINVAL;
1138 break;
1141 splx(s);
1143 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() returning %d\n",
1144 sc->sc_dev.dv_xname, error));
1146 return error;
1149 void
1150 xennet_watchdog(struct ifnet *ifp)
1153 panic("xennet_watchdog\n");
1156 void
1157 xennet_init(struct xennet_softc *sc)
1159 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1161 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_init()\n", sc->sc_dev.dv_xname));
1163 if (ifp->if_flags & IFF_UP) {
1164 if ((ifp->if_flags & IFF_RUNNING) == 0)
1165 xennet_reset(sc);
1167 ifp->if_flags |= IFF_RUNNING;
1168 ifp->if_flags &= ~IFF_OACTIVE;
1169 ifp->if_timer = 0;
1170 } else {
1171 ifp->if_flags &= ~IFF_RUNNING;
1172 xennet_reset(sc);
1176 void
1177 xennet_reset(struct xennet_softc *sc)
1180 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_reset()\n", sc->sc_dev.dv_xname));
1183 #ifdef mediacode
1184 /*
1185 * Media change callback.
1186 */
1187 static int
1188 xennet_mediachange(struct ifnet *ifp)
1190 struct xennet_softc *sc = ifp->if_softc;
1192 switch IFM_SUBTYPE(sc->sc_media.ifm_media) {
1193 case IFM_AUTO:
1194 break;
1195 default:
1196 return (1);
1197 break;
1200 return (0);
1203 /*
1204 * Media status callback.
1205 */
1206 static void
1207 xennet_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1209 struct xennet_softc *sc = ifp->if_softc;
1211 if (IFM_SUBTYPE(ifmr->ifm_active) == IFM_AUTO)
1212 ifmr->ifm_active = sc->sc_media.ifm_cur->ifm_data;
1214 ifmr->ifm_status &= ~IFM_AVALID;
1216 #endif
1218 int
1219 xennet_bootstatic_callback(struct nfs_diskless *nd)
1221 struct ifnet *ifp = nd->nd_ifp;
1222 struct xennet_softc *sc = (struct xennet_softc *)ifp->if_softc;
1223 union xen_cmdline_parseinfo xcp;
1224 struct sockaddr_in *sin;
1226 memset(&xcp, 0, sizeof(xcp.xcp_netinfo));
1227 xcp.xcp_netinfo.xi_ifno = sc->sc_ifno;
1228 xcp.xcp_netinfo.xi_root = nd->nd_root.ndm_host;
1229 xen_parse_cmdline(XEN_PARSE_NETINFO, &xcp);
1231 nd->nd_myip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[0]);
1232 nd->nd_gwip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[2]);
1233 nd->nd_mask.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[3]);
1235 sin = (struct sockaddr_in *) &nd->nd_root.ndm_saddr;
1236 memset((caddr_t)sin, 0, sizeof(*sin));
1237 sin->sin_len = sizeof(*sin);
1238 sin->sin_family = AF_INET;
1239 sin->sin_addr.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[1]);
1241 return (NFS_BOOTSTATIC_HAS_MYIP|NFS_BOOTSTATIC_HAS_GWIP|
1242 NFS_BOOTSTATIC_HAS_MASK|NFS_BOOTSTATIC_HAS_SERVADDR|
1243 NFS_BOOTSTATIC_HAS_SERVER);
1247 #ifdef XENNET_DEBUG_DUMP
1248 #define XCHR(x) "0123456789abcdef"[(x) & 0xf]
1249 static void
1250 xennet_hex_dump(unsigned char *pkt, size_t len, char *type, int id)
1252 size_t i, j;
1254 printf("pkt %p len %d/%x type %s id %d\n", pkt, len, len, type, id);
1255 printf("00000000 ");
1256 for(i=0; i<len; i++) {
1257 printf("%c%c ", XCHR(pkt[i]>>4), XCHR(pkt[i]));
1258 if ((i+1) % 16 == 8)
1259 printf(" ");
1260 if ((i+1) % 16 == 0) {
1261 printf(" %c", '|');
1262 for(j=0; j<16; j++)
1263 printf("%c", pkt[i-15+j]>=32 &&
1264 pkt[i-15+j]<127?pkt[i-15+j]:'.');
1265 printf("%c\n%c%c%c%c%c%c%c%c ", '|',
1266 XCHR((i+1)>>28), XCHR((i+1)>>24),
1267 XCHR((i+1)>>20), XCHR((i+1)>>16),
1268 XCHR((i+1)>>12), XCHR((i+1)>>8),
1269 XCHR((i+1)>>4), XCHR(i+1));
1272 printf("\n");
1274 #undef XCHR
1275 #endif