debuggers.hg

view xen/drivers/net/via-rhine.c @ 618:4480b471191c

bitkeeper revision 1.259.2.7 (3f0c428fGYxQAV_56B2hOOjYs1PF0A)

Port a bunch of network drivers for low-quality NICS (which will incur extra copying overheads within Xen). But will allow us to work on a wider range of systems at least.
author kaf24@scramble.cl.cam.ac.uk
date Wed Jul 09 16:27:59 2003 +0000 (2003-07-09)
parents
children 125f43340354
line source
1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3 Written 1998-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is designed for the VIA VT86C100A Rhine-I.
13 It also works with the 6102 Rhine-II, and 6105/6105M Rhine-III.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
18 Annapolis MD 21403
21 This driver contains some changes from the original Donald Becker
22 version. He may or may not be interested in bug reports on this
23 code. You can find his versions at:
24 http://www.scyld.com/network/via-rhine.html
27 Linux kernel version history:
29 LK1.1.0:
30 - Jeff Garzik: softnet 'n stuff
32 LK1.1.1:
33 - Justin Guyett: softnet and locking fixes
34 - Jeff Garzik: use PCI interface
36 LK1.1.2:
37 - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
39 LK1.1.3:
40 - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
41 code) update "Theory of Operation" with
42 softnet/locking changes
43 - Dave Miller: PCI DMA and endian fixups
44 - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
46 LK1.1.4:
47 - Urban Widmark: fix gcc 2.95.2 problem and
48 remove writel's to fixed address 0x7c
50 LK1.1.5:
51 - Urban Widmark: mdio locking, bounce buffer changes
52 merges from Beckers 1.05 version
53 added netif_running_on/off support
55 LK1.1.6:
56 - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
57 set netif_running_on/off on startup, del_timer_sync
59 LK1.1.7:
60 - Manfred Spraul: added reset into tx_timeout
62 LK1.1.9:
63 - Urban Widmark: merges from Beckers 1.10 version
64 (media selection + eeprom reload)
65 - David Vrabel: merges from D-Link "1.11" version
66 (disable WOL and PME on startup)
68 LK1.1.10:
69 - Manfred Spraul: use "singlecopy" for unaligned buffers
70 don't allocate bounce buffers for !ReqTxAlign cards
72 LK1.1.11:
73 - David Woodhouse: Set dev->base_addr before the first time we call
74 wait_for_reset(). It's a lot happier that way.
75 Free np->tx_bufs only if we actually allocated it.
77 LK1.1.12:
78 - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
80 LK1.1.13 (jgarzik):
81 - Add ethtool support
82 - Replace some MII-related magic numbers with constants
84 LK1.1.14 (Ivan G.):
85 - fixes comments for Rhine-III
86 - removes W_MAX_TIMEOUT (unused)
87 - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
88 is R-I and has Davicom chip, flag is referenced in kernel driver)
89 - sends chip_id as a parameter to wait_for_reset since np is not
90 initialized on first call
91 - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
92 for Rhine-III's (documentation says same bit is correct)
93 - transmit frame queue message is off by one - fixed
94 - adds IntrNormalSummary to "Something Wicked" exclusion list
95 so normal interrupts will not trigger the message (src: Donald Becker)
96 (Roger Luethi)
97 - show confused chip where to continue after Tx error
98 - location of collision counter is chip specific
99 - allow selecting backoff algorithm (module parameter)
101 LK1.1.15 (jgarzik):
102 - Use new MII lib helper generic_mii_ioctl
104 LK1.1.16 (Roger Luethi)
105 - Etherleak fix
106 - Handle Tx buffer underrun
107 - Fix bugs in full duplex handling
108 - New reset code uses "force reset" cmd on Rhine-II
109 - Various clean ups
111 LK1.1.17 (Roger Luethi)
112 - Fix race in via_rhine_start_tx()
113 - On errors, wait for Tx engine to turn off before scavenging
114 - Handle Tx descriptor write-back race on Rhine-II
115 - Force flushing for PCI posted writes
116 - More reset code changes
118 */
120 #define DRV_NAME "via-rhine"
121 #define DRV_VERSION "1.1.17"
122 #define DRV_RELDATE "March-1-2003"
124 #include <linux/module.h>
125 #include <linux/kernel.h>
126 #include <linux/string.h>
127 #include <linux/timer.h>
128 #include <linux/errno.h>
129 #include <linux/ioport.h>
130 #include <linux/slab.h>
131 #include <linux/interrupt.h>
132 #include <linux/pci.h>
133 #include <linux/netdevice.h>
134 #include <linux/etherdevice.h>
135 #include <linux/skbuff.h>
136 #include <linux/init.h>
137 #include <linux/delay.h>
138 #include <linux/mii.h>
139 #include <linux/ethtool.h>
140 #include <linux/crc32.h>
141 #include <asm/processor.h> /* Processor type for cache alignment. */
142 #include <asm/bitops.h>
143 #include <asm/io.h>
144 #include <asm/irq.h>
145 #include <asm/uaccess.h>
147 #undef RX_RING_SIZE
148 #undef TX_RING_SIZE
150 /* A few user-configurable values.
151 These may be modified when a driver module is loaded. */
153 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
154 static int max_interrupt_work = 20;
156 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
157 Setting to > 1518 effectively disables this feature. */
158 static int rx_copybreak;
160 /* Select a backoff algorithm (Ethernet capture effect) */
161 static int backoff;
163 /* Used to pass the media type, etc.
164 Both 'options[]' and 'full_duplex[]' should exist for driver
165 interoperability.
166 The media type is usually passed in 'options[]'.
167 The default is autonegotation for speed and duplex.
168 This should rarely be overridden.
169 Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
170 Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
171 Use option values 0x20 and 0x200 for forcing full duplex operation.
172 */
173 #define MAX_UNITS 8 /* More are supported, limit only on options */
174 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
175 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
177 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
178 The Rhine has a 64 element 8390-like hash table. */
179 static const int multicast_filter_limit = 32;
182 /* Operational parameters that are set at compile time. */
184 /* Keep the ring sizes a power of two for compile efficiency.
185 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
186 Making the Tx ring too large decreases the effectiveness of channel
187 bonding and packet priority.
188 There are no ill effects from too-large receive rings. */
189 #define TX_RING_SIZE 16
190 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
191 #define RX_RING_SIZE 16
194 /* Operational parameters that usually are not changed. */
196 /* Time in jiffies before concluding the transmitter is hung. */
197 #define TX_TIMEOUT (2*HZ)
199 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
201 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
202 #warning You must compile this file with the correct options!
203 #warning See the last lines of the source file.
204 #error You must compile this driver with "-O".
205 #endif
207 /* These identify the driver base version and may not be removed. */
208 static char version[] __devinitdata =
209 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
210 KERN_INFO " http://www.scyld.com/network/via-rhine.html\n";
212 static char shortname[] = DRV_NAME;
215 /* This driver was written to use PCI memory space, however most versions
216 of the Rhine only work correctly with I/O space accesses. */
217 #ifdef CONFIG_VIA_RHINE_MMIO
218 #define USE_MEM
219 #else
220 #define USE_IO
221 #undef readb
222 #undef readw
223 #undef readl
224 #undef writeb
225 #undef writew
226 #undef writel
227 #define readb inb
228 #define readw inw
229 #define readl inl
230 #define writeb outb
231 #define writew outw
232 #define writel outl
233 #endif
235 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
236 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
237 MODULE_LICENSE("GPL");
239 MODULE_PARM(max_interrupt_work, "i");
240 MODULE_PARM(debug, "i");
241 MODULE_PARM(rx_copybreak, "i");
242 MODULE_PARM(backoff, "i");
243 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
244 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
245 MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
246 MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
247 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
248 MODULE_PARM_DESC(backoff, "VIA Rhine: Bits 0-3: backoff algorithm");
249 MODULE_PARM_DESC(options, "VIA Rhine: Bits 0-3: media type, bit 17: full duplex");
250 MODULE_PARM_DESC(full_duplex, "VIA Rhine full duplex setting(s) (1)");
252 /*
253 Theory of Operation
255 I. Board Compatibility
257 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
258 controller.
260 II. Board-specific settings
262 Boards with this chip are functional only in a bus-master PCI slot.
264 Many operational settings are loaded from the EEPROM to the Config word at
265 offset 0x78. For most of these settings, this driver assumes that they are
266 correct.
267 If this driver is compiled to use PCI memory space operations the EEPROM
268 must be configured to enable memory ops.
270 III. Driver operation
272 IIIa. Ring buffers
274 This driver uses two statically allocated fixed-size descriptor lists
275 formed into rings by a branch from the final descriptor to the beginning of
276 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
278 IIIb/c. Transmit/Receive Structure
280 This driver attempts to use a zero-copy receive and transmit scheme.
282 Alas, all data buffers are required to start on a 32 bit boundary, so
283 the driver must often copy transmit packets into bounce buffers.
285 The driver allocates full frame size skbuffs for the Rx ring buffers at
286 open() time and passes the skb->data field to the chip as receive data
287 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
288 a fresh skbuff is allocated and the frame is copied to the new skbuff.
289 When the incoming frame is larger, the skbuff is passed directly up the
290 protocol stack. Buffers consumed this way are replaced by newly allocated
291 skbuffs in the last phase of via_rhine_rx().
293 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
294 using a full-sized skbuff for small frames vs. the copying costs of larger
295 frames. New boards are typically used in generously configured machines
296 and the underfilled buffers have negligible impact compared to the benefit of
297 a single allocation size, so the default value of zero results in never
298 copying packets. When copying is done, the cost is usually mitigated by using
299 a combined copy/checksum routine. Copying also preloads the cache, which is
300 most useful with small frames.
302 Since the VIA chips are only able to transfer data to buffers on 32 bit
303 boundaries, the IP header at offset 14 in an ethernet frame isn't
304 longword aligned for further processing. Copying these unaligned buffers
305 has the beneficial effect of 16-byte aligning the IP header.
307 IIId. Synchronization
309 The driver runs as two independent, single-threaded flows of control. One
310 is the send-packet routine, which enforces single-threaded use by the
311 dev->priv->lock spinlock. The other thread is the interrupt handler, which
312 is single threaded by the hardware and interrupt handling software.
314 The send packet thread has partial control over the Tx ring. It locks the
315 dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
316 is not available it stops the transmit queue by calling netif_stop_queue.
318 The interrupt handler has exclusive control over the Rx ring and records stats
319 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
320 empty by incrementing the dirty_tx mark. If at least half of the entries in
321 the Rx ring are available the transmit queue is woken up if it was stopped.
323 IV. Notes
325 IVb. References
327 Preliminary VT86C100A manual from http://www.via.com.tw/
328 http://www.scyld.com/expert/100mbps.html
329 http://www.scyld.com/expert/NWay.html
330 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
331 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
334 IVc. Errata
336 The VT86C100A manual is not reliable information.
337 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
338 in significant performance degradation for bounce buffer copies on transmit
339 and unaligned IP headers on receive.
340 The chip does not pad to minimum transmit length.
342 */
345 /* This table drives the PCI probe routines. It's mostly boilerplate in all
346 of the drivers, and will likely be provided by some future kernel.
347 Note the matching code -- the first table entry matchs all 56** cards but
348 second only the 1234 card.
349 */
351 enum pci_flags_bit {
352 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
353 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
354 };
356 enum via_rhine_chips {
357 VT86C100A = 0,
358 VT6102,
359 VT6105,
360 VT6105M
361 };
363 struct via_rhine_chip_info {
364 const char *name;
365 u16 pci_flags;
366 int io_size;
367 int drv_flags;
368 };
371 enum chip_capability_flags {
372 CanHaveMII=1, HasESIPhy=2, HasDavicomPhy=4,
373 ReqTxAlign=0x10, HasWOL=0x20, };
375 #ifdef USE_MEM
376 #define RHINE_IOTYPE (PCI_USES_MEM | PCI_USES_MASTER | PCI_ADDR1)
377 #else
378 #define RHINE_IOTYPE (PCI_USES_IO | PCI_USES_MASTER | PCI_ADDR0)
379 #endif
380 /* Beware of PCI posted writes */
381 #define IOSYNC do { readb(dev->base_addr + StationAddr); } while (0)
383 /* directly indexed by enum via_rhine_chips, above */
384 static struct via_rhine_chip_info via_rhine_chip_info[] __devinitdata =
385 {
386 { "VIA VT86C100A Rhine", RHINE_IOTYPE, 128,
387 CanHaveMII | ReqTxAlign | HasDavicomPhy },
388 { "VIA VT6102 Rhine-II", RHINE_IOTYPE, 256,
389 CanHaveMII | HasWOL },
390 { "VIA VT6105 Rhine-III", RHINE_IOTYPE, 256,
391 CanHaveMII | HasWOL },
392 { "VIA VT6105M Rhine-III", RHINE_IOTYPE, 256,
393 CanHaveMII | HasWOL },
394 };
396 static struct pci_device_id via_rhine_pci_tbl[] __devinitdata =
397 {
398 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT86C100A},
399 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6102},
400 {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6105},
401 {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6105M},
402 {0,} /* terminate list */
403 };
404 MODULE_DEVICE_TABLE(pci, via_rhine_pci_tbl);
407 /* Offsets to the device registers. */
408 enum register_offsets {
409 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
410 IntrStatus=0x0C, IntrEnable=0x0E,
411 MulticastFilter0=0x10, MulticastFilter1=0x14,
412 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
413 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
414 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
415 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
416 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
417 StickyHW=0x83, IntrStatus2=0x84, WOLcrClr=0xA4, WOLcgClr=0xA7,
418 PwrcsrClr=0xAC,
419 };
421 /* Bits in ConfigD */
422 enum backoff_bits {
423 BackOptional=0x01, BackModify=0x02,
424 BackCaptureEffect=0x04, BackRandom=0x08
425 };
427 #ifdef USE_MEM
428 /* Registers we check that mmio and reg are the same. */
429 int mmio_verify_registers[] = {
430 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
431 0
432 };
433 #endif
435 /* Bits in the interrupt status/mask registers. */
436 enum intr_status_bits {
437 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
438 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
439 IntrPCIErr=0x0040,
440 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
441 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
442 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
443 IntrRxWakeUp=0x8000,
444 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
445 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
446 IntrTxErrSummary=0x082210,
447 };
449 /* The Rx and Tx buffer descriptors. */
450 struct rx_desc {
451 s32 rx_status;
452 u32 desc_length; /* Chain flag, Buffer/frame length */
453 u32 addr;
454 u32 next_desc;
455 };
456 struct tx_desc {
457 s32 tx_status;
458 u32 desc_length; /* Chain flag, Tx Config, Frame length */
459 u32 addr;
460 u32 next_desc;
461 };
463 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
464 #define TXDESC 0x00e08000
466 enum rx_status_bits {
467 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
468 };
470 /* Bits in *_desc.*_status */
471 enum desc_status_bits {
472 DescOwn=0x80000000
473 };
475 /* Bits in ChipCmd. */
476 enum chip_cmd_bits {
477 CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008,
478 CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040,
479 CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400,
480 CmdNoTxPoll=0x0800, CmdReset=0x8000,
481 };
483 #define MAX_MII_CNT 4
484 struct netdev_private {
485 /* Descriptor rings */
486 struct rx_desc *rx_ring;
487 struct tx_desc *tx_ring;
488 dma_addr_t rx_ring_dma;
489 dma_addr_t tx_ring_dma;
491 /* The addresses of receive-in-place skbuffs. */
492 struct sk_buff *rx_skbuff[RX_RING_SIZE];
493 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
495 /* The saved address of a sent-in-place packet/buffer, for later free(). */
496 struct sk_buff *tx_skbuff[TX_RING_SIZE];
497 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
499 /* Tx bounce buffers */
500 unsigned char *tx_buf[TX_RING_SIZE];
501 unsigned char *tx_bufs;
502 dma_addr_t tx_bufs_dma;
504 struct pci_dev *pdev;
505 struct net_device_stats stats;
506 struct timer_list timer; /* Media monitoring timer. */
507 spinlock_t lock;
509 /* Frequently used values: keep some adjacent for cache effect. */
510 int chip_id, drv_flags;
511 struct rx_desc *rx_head_desc;
512 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
513 unsigned int cur_tx, dirty_tx;
514 unsigned int rx_buf_sz; /* Based on MTU+slack. */
515 u16 chip_cmd; /* Current setting for ChipCmd */
517 /* These values are keep track of the transceiver/media in use. */
518 unsigned int default_port:4; /* Last dev->if_port value. */
519 u8 tx_thresh, rx_thresh;
521 /* MII transceiver section. */
522 unsigned char phys[MAX_MII_CNT]; /* MII device addresses. */
523 unsigned int mii_cnt; /* number of MIIs found, but only the first one is used */
524 u16 mii_status; /* last read MII status */
525 struct mii_if_info mii_if;
526 };
528 static int mdio_read(struct net_device *dev, int phy_id, int location);
529 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
530 static int via_rhine_open(struct net_device *dev);
531 static void via_rhine_check_duplex(struct net_device *dev);
532 static void via_rhine_timer(unsigned long data);
533 static void via_rhine_tx_timeout(struct net_device *dev);
534 static int via_rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
535 static void via_rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
536 static void via_rhine_tx(struct net_device *dev);
537 static void via_rhine_rx(struct net_device *dev);
538 static void via_rhine_error(struct net_device *dev, int intr_status);
539 static void via_rhine_set_rx_mode(struct net_device *dev);
540 static struct net_device_stats *via_rhine_get_stats(struct net_device *dev);
541 #if 0
542 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
543 #endif
544 static int via_rhine_close(struct net_device *dev);
546 static inline u32 get_intr_status(struct net_device *dev)
547 {
548 long ioaddr = dev->base_addr;
549 struct netdev_private *np = dev->priv;
550 u32 intr_status;
552 intr_status = readw(ioaddr + IntrStatus);
553 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
554 if (np->chip_id == VT6102)
555 intr_status |= readb(ioaddr + IntrStatus2) << 16;
556 return intr_status;
557 }
559 static void wait_for_reset(struct net_device *dev, int chip_id, char *name)
560 {
561 long ioaddr = dev->base_addr;
562 int boguscnt = 20;
564 IOSYNC;
566 if (readw(ioaddr + ChipCmd) & CmdReset) {
567 printk(KERN_INFO "%s: Reset not complete yet. "
568 "Trying harder.\n", name);
570 /* Rhine-II needs to be forced sometimes */
571 if (chip_id == VT6102)
572 writeb(0x40, ioaddr + MiscCmd);
574 /* VT86C100A may need long delay after reset (dlink) */
575 /* Seen on Rhine-II as well (rl) */
576 while ((readw(ioaddr + ChipCmd) & CmdReset) && --boguscnt)
577 udelay(5);
579 }
581 if (debug > 1)
582 printk(KERN_INFO "%s: Reset %s.\n", name,
583 boguscnt ? "succeeded" : "failed");
584 }
586 #ifdef USE_MEM
587 static void __devinit enable_mmio(long ioaddr, int chip_id)
588 {
589 int n;
590 if (chip_id == VT86C100A) {
591 /* More recent docs say that this bit is reserved ... */
592 n = inb(ioaddr + ConfigA) | 0x20;
593 outb(n, ioaddr + ConfigA);
594 } else {
595 n = inb(ioaddr + ConfigD) | 0x80;
596 outb(n, ioaddr + ConfigD);
597 }
598 }
599 #endif
601 static void __devinit reload_eeprom(long ioaddr)
602 {
603 int i;
604 outb(0x20, ioaddr + MACRegEEcsr);
605 /* Typically 2 cycles to reload. */
606 for (i = 0; i < 150; i++)
607 if (! (inb(ioaddr + MACRegEEcsr) & 0x20))
608 break;
609 }
611 static int __devinit via_rhine_init_one (struct pci_dev *pdev,
612 const struct pci_device_id *ent)
613 {
614 struct net_device *dev;
615 struct netdev_private *np;
616 int i, option;
617 int chip_id = (int) ent->driver_data;
618 static int card_idx = -1;
619 long ioaddr;
620 long memaddr;
621 int io_size;
622 int pci_flags;
623 #ifdef USE_MEM
624 long ioaddr0;
625 #endif
627 /* when built into the kernel, we only print version if device is found */
628 #ifndef MODULE
629 static int printed_version;
630 if (!printed_version++)
631 printk(version);
632 #endif
634 card_idx++;
635 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
636 io_size = via_rhine_chip_info[chip_id].io_size;
637 pci_flags = via_rhine_chip_info[chip_id].pci_flags;
639 if (pci_enable_device (pdev))
640 goto err_out;
642 /* this should always be supported */
643 if (pci_set_dma_mask(pdev, 0xffffffff)) {
644 printk(KERN_ERR "32-bit PCI DMA addresses not supported by the card!?\n");
645 goto err_out;
646 }
648 /* sanity check */
649 if ((pci_resource_len (pdev, 0) < io_size) ||
650 (pci_resource_len (pdev, 1) < io_size)) {
651 printk (KERN_ERR "Insufficient PCI resources, aborting\n");
652 goto err_out;
653 }
655 ioaddr = pci_resource_start (pdev, 0);
656 memaddr = pci_resource_start (pdev, 1);
658 if (pci_flags & PCI_USES_MASTER)
659 pci_set_master (pdev);
661 dev = alloc_etherdev(sizeof(*np));
662 if (dev == NULL) {
663 printk (KERN_ERR "init_ethernet failed for card #%d\n", card_idx);
664 goto err_out;
665 }
666 SET_MODULE_OWNER(dev);
668 if (pci_request_regions(pdev, shortname))
669 goto err_out_free_netdev;
671 #ifdef USE_MEM
672 ioaddr0 = ioaddr;
673 enable_mmio(ioaddr0, chip_id);
675 ioaddr = (long) ioremap (memaddr, io_size);
676 if (!ioaddr) {
677 printk (KERN_ERR "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
678 pdev->slot_name, io_size, memaddr);
679 goto err_out_free_res;
680 }
682 /* Check that selected MMIO registers match the PIO ones */
683 i = 0;
684 while (mmio_verify_registers[i]) {
685 int reg = mmio_verify_registers[i++];
686 unsigned char a = inb(ioaddr0+reg);
687 unsigned char b = readb(ioaddr+reg);
688 if (a != b) {
689 printk (KERN_ERR "MMIO do not match PIO [%02x] (%02x != %02x)\n",
690 reg, a, b);
691 goto err_out_unmap;
692 }
693 }
694 #endif
696 /* D-Link provided reset code (with comment additions) */
697 if (via_rhine_chip_info[chip_id].drv_flags & HasWOL) {
698 unsigned char byOrgValue;
700 /* clear sticky bit before reset & read ethernet address */
701 byOrgValue = readb(ioaddr + StickyHW);
702 byOrgValue = byOrgValue & 0xFC;
703 writeb(byOrgValue, ioaddr + StickyHW);
705 /* (bits written are cleared?) */
706 /* disable force PME-enable */
707 writeb(0x80, ioaddr + WOLcgClr);
708 /* disable power-event config bit */
709 writeb(0xFF, ioaddr + WOLcrClr);
710 /* clear power status (undocumented in vt6102 docs?) */
711 writeb(0xFF, ioaddr + PwrcsrClr);
712 }
714 /* Reset the chip to erase previous misconfiguration. */
715 writew(CmdReset, ioaddr + ChipCmd);
717 dev->base_addr = ioaddr;
718 wait_for_reset(dev, chip_id, shortname);
720 /* Reload the station address from the EEPROM. */
721 #ifdef USE_IO
722 reload_eeprom(ioaddr);
723 #else
724 reload_eeprom(ioaddr0);
725 /* Reloading from eeprom overwrites cfgA-D, so we must re-enable MMIO.
726 If reload_eeprom() was done first this could be avoided, but it is
727 not known if that still works with the "win98-reboot" problem. */
728 enable_mmio(ioaddr0, chip_id);
729 #endif
731 for (i = 0; i < 6; i++)
732 dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
734 if (!is_valid_ether_addr(dev->dev_addr)) {
735 printk(KERN_ERR "Invalid MAC address for card #%d\n", card_idx);
736 goto err_out_unmap;
737 }
739 if (chip_id == VT6102) {
740 /*
741 * for 3065D, EEPROM reloaded will cause bit 0 in MAC_REG_CFGA
742 * turned on. it makes MAC receive magic packet
743 * automatically. So, we turn it off. (D-Link)
744 */
745 writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);
746 }
748 /* Select backoff algorithm */
749 if (backoff)
750 writeb(readb(ioaddr + ConfigD) & (0xF0 | backoff),
751 ioaddr + ConfigD);
753 dev->irq = pdev->irq;
755 np = dev->priv;
756 spin_lock_init (&np->lock);
757 np->chip_id = chip_id;
758 np->drv_flags = via_rhine_chip_info[chip_id].drv_flags;
759 np->pdev = pdev;
760 np->mii_if.dev = dev;
761 np->mii_if.mdio_read = mdio_read;
762 np->mii_if.mdio_write = mdio_write;
763 np->mii_if.phy_id_mask = 0x1f;
764 np->mii_if.reg_num_mask = 0x1f;
766 if (dev->mem_start)
767 option = dev->mem_start;
769 /* The chip-specific entries in the device structure. */
770 dev->open = via_rhine_open;
771 dev->hard_start_xmit = via_rhine_start_tx;
772 dev->stop = via_rhine_close;
773 dev->get_stats = via_rhine_get_stats;
774 dev->set_multicast_list = via_rhine_set_rx_mode;
775 #if 0
776 dev->do_ioctl = netdev_ioctl;
777 #endif
778 dev->tx_timeout = via_rhine_tx_timeout;
779 dev->watchdog_timeo = TX_TIMEOUT;
780 #if 0
781 if (np->drv_flags & ReqTxAlign)
782 #endif
783 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
785 /* dev->name not defined before register_netdev()! */
786 i = register_netdev(dev);
787 if (i)
788 goto err_out_unmap;
790 /* The lower four bits are the media type. */
791 if (option > 0) {
792 if (option & 0x220)
793 np->mii_if.full_duplex = 1;
794 np->default_port = option & 15;
795 }
796 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
797 np->mii_if.full_duplex = 1;
799 if (np->mii_if.full_duplex) {
800 printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
801 " disabled.\n", dev->name);
802 np->mii_if.force_media = 1;
803 }
805 printk(KERN_INFO "%s: %s at 0x%lx, ",
806 dev->name, via_rhine_chip_info[chip_id].name,
807 (pci_flags & PCI_USES_IO) ? ioaddr : memaddr);
809 for (i = 0; i < 5; i++)
810 printk("%2.2x:", dev->dev_addr[i]);
811 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
813 pci_set_drvdata(pdev, dev);
815 if (np->drv_flags & CanHaveMII) {
816 int phy, phy_idx = 0;
817 np->phys[0] = 1; /* Standard for this chip. */
818 for (phy = 1; phy < 32 && phy_idx < MAX_MII_CNT; phy++) {
819 int mii_status = mdio_read(dev, phy, 1);
820 if (mii_status != 0xffff && mii_status != 0x0000) {
821 np->phys[phy_idx++] = phy;
822 np->mii_if.advertising = mdio_read(dev, phy, 4);
823 printk(KERN_INFO "%s: MII PHY found at address %d, status "
824 "0x%4.4x advertising %4.4x Link %4.4x.\n",
825 dev->name, phy, mii_status, np->mii_if.advertising,
826 mdio_read(dev, phy, 5));
828 /* set IFF_RUNNING */
829 if (mii_status & BMSR_LSTATUS)
830 netif_carrier_on(dev);
831 else
832 netif_carrier_off(dev);
833 }
834 }
835 np->mii_cnt = phy_idx;
836 np->mii_if.phy_id = np->phys[0];
837 }
839 /* Allow forcing the media type. */
840 if (option > 0) {
841 if (option & 0x220)
842 np->mii_if.full_duplex = 1;
843 np->default_port = option & 0x3ff;
844 if (np->default_port & 0x330) {
845 /* FIXME: shouldn't someone check this variable? */
846 /* np->medialock = 1; */
847 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
848 (option & 0x300 ? 100 : 10),
849 (option & 0x220 ? "full" : "half"));
850 if (np->mii_cnt)
851 mdio_write(dev, np->phys[0], MII_BMCR,
852 ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
853 ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */
854 }
855 }
857 alert_slow_netdevice(dev, (char *)via_rhine_chip_info[chip_id].name);
859 return 0;
861 err_out_unmap:
862 #ifdef USE_MEM
863 iounmap((void *)ioaddr);
864 err_out_free_res:
865 #endif
866 pci_release_regions(pdev);
867 err_out_free_netdev:
868 kfree (dev);
869 err_out:
870 return -ENODEV;
871 }
873 static int alloc_ring(struct net_device* dev)
874 {
875 struct netdev_private *np = dev->priv;
876 void *ring;
877 dma_addr_t ring_dma;
879 ring = pci_alloc_consistent(np->pdev,
880 RX_RING_SIZE * sizeof(struct rx_desc) +
881 TX_RING_SIZE * sizeof(struct tx_desc),
882 &ring_dma);
883 if (!ring) {
884 printk(KERN_ERR "Could not allocate DMA memory.\n");
885 return -ENOMEM;
886 }
887 if (np->drv_flags & ReqTxAlign) {
888 np->tx_bufs = pci_alloc_consistent(np->pdev, PKT_BUF_SZ * TX_RING_SIZE,
889 &np->tx_bufs_dma);
890 if (np->tx_bufs == NULL) {
891 pci_free_consistent(np->pdev,
892 RX_RING_SIZE * sizeof(struct rx_desc) +
893 TX_RING_SIZE * sizeof(struct tx_desc),
894 ring, ring_dma);
895 return -ENOMEM;
896 }
897 }
899 np->rx_ring = ring;
900 np->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
901 np->rx_ring_dma = ring_dma;
902 np->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
904 return 0;
905 }
907 void free_ring(struct net_device* dev)
908 {
909 struct netdev_private *np = dev->priv;
911 pci_free_consistent(np->pdev,
912 RX_RING_SIZE * sizeof(struct rx_desc) +
913 TX_RING_SIZE * sizeof(struct tx_desc),
914 np->rx_ring, np->rx_ring_dma);
915 np->tx_ring = NULL;
917 if (np->tx_bufs)
918 pci_free_consistent(np->pdev, PKT_BUF_SZ * TX_RING_SIZE,
919 np->tx_bufs, np->tx_bufs_dma);
921 np->tx_bufs = NULL;
923 }
925 static void alloc_rbufs(struct net_device *dev)
926 {
927 struct netdev_private *np = dev->priv;
928 dma_addr_t next;
929 int i;
931 np->dirty_rx = np->cur_rx = 0;
933 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
934 np->rx_head_desc = &np->rx_ring[0];
935 next = np->rx_ring_dma;
937 /* Init the ring entries */
938 for (i = 0; i < RX_RING_SIZE; i++) {
939 np->rx_ring[i].rx_status = 0;
940 np->rx_ring[i].desc_length = cpu_to_le32(np->rx_buf_sz);
941 next += sizeof(struct rx_desc);
942 np->rx_ring[i].next_desc = cpu_to_le32(next);
943 np->rx_skbuff[i] = 0;
944 }
945 /* Mark the last entry as wrapping the ring. */
946 np->rx_ring[i-1].next_desc = cpu_to_le32(np->rx_ring_dma);
948 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
949 for (i = 0; i < RX_RING_SIZE; i++) {
950 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
951 np->rx_skbuff[i] = skb;
952 if (skb == NULL)
953 break;
954 skb->dev = dev; /* Mark as being used by this device. */
956 np->rx_skbuff_dma[i] =
957 pci_map_single(np->pdev, skb->tail, np->rx_buf_sz,
958 PCI_DMA_FROMDEVICE);
960 np->rx_ring[i].addr = cpu_to_le32(np->rx_skbuff_dma[i]);
961 np->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
962 }
963 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
964 }
966 static void free_rbufs(struct net_device* dev)
967 {
968 struct netdev_private *np = dev->priv;
969 int i;
971 /* Free all the skbuffs in the Rx queue. */
972 for (i = 0; i < RX_RING_SIZE; i++) {
973 np->rx_ring[i].rx_status = 0;
974 np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
975 if (np->rx_skbuff[i]) {
976 pci_unmap_single(np->pdev,
977 np->rx_skbuff_dma[i],
978 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
979 dev_kfree_skb(np->rx_skbuff[i]);
980 }
981 np->rx_skbuff[i] = 0;
982 }
983 }
985 static void alloc_tbufs(struct net_device* dev)
986 {
987 struct netdev_private *np = dev->priv;
988 dma_addr_t next;
989 int i;
991 np->dirty_tx = np->cur_tx = 0;
992 next = np->tx_ring_dma;
993 for (i = 0; i < TX_RING_SIZE; i++) {
994 np->tx_skbuff[i] = 0;
995 np->tx_ring[i].tx_status = 0;
996 np->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
997 next += sizeof(struct tx_desc);
998 np->tx_ring[i].next_desc = cpu_to_le32(next);
999 np->tx_buf[i] = &np->tx_bufs[i * PKT_BUF_SZ];
1001 np->tx_ring[i-1].next_desc = cpu_to_le32(np->tx_ring_dma);
1005 static void free_tbufs(struct net_device* dev)
1007 struct netdev_private *np = dev->priv;
1008 int i;
1010 for (i = 0; i < TX_RING_SIZE; i++) {
1011 np->tx_ring[i].tx_status = 0;
1012 np->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1013 np->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1014 if (np->tx_skbuff[i]) {
1015 if (np->tx_skbuff_dma[i]) {
1016 pci_unmap_single(np->pdev,
1017 np->tx_skbuff_dma[i],
1018 np->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
1020 dev_kfree_skb(np->tx_skbuff[i]);
1022 np->tx_skbuff[i] = 0;
1023 np->tx_buf[i] = 0;
1027 static void init_registers(struct net_device *dev)
1029 struct netdev_private *np = dev->priv;
1030 long ioaddr = dev->base_addr;
1031 int i;
1033 for (i = 0; i < 6; i++)
1034 writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
1036 /* Initialize other registers. */
1037 writew(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1038 /* Configure initial FIFO thresholds. */
1039 writeb(0x20, ioaddr + TxConfig);
1040 np->tx_thresh = 0x20;
1041 np->rx_thresh = 0x60; /* Written in via_rhine_set_rx_mode(). */
1042 np->mii_if.full_duplex = 0;
1044 if (dev->if_port == 0)
1045 dev->if_port = np->default_port;
1047 writel(np->rx_ring_dma, ioaddr + RxRingPtr);
1048 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
1050 via_rhine_set_rx_mode(dev);
1052 /* Enable interrupts by setting the interrupt mask. */
1053 writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1054 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1055 IntrTxDone | IntrTxError | IntrTxUnderrun |
1056 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1057 ioaddr + IntrEnable);
1059 np->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
1060 if (np->mii_if.force_media)
1061 np->chip_cmd |= CmdFDuplex;
1062 writew(np->chip_cmd, ioaddr + ChipCmd);
1064 via_rhine_check_duplex(dev);
1066 /* The LED outputs of various MII xcvrs should be configured. */
1067 /* For NS or Mison phys, turn on bit 1 in register 0x17 */
1068 /* For ESI phys, turn on bit 7 in register 0x17. */
1069 mdio_write(dev, np->phys[0], 0x17, mdio_read(dev, np->phys[0], 0x17) |
1070 (np->drv_flags & HasESIPhy) ? 0x0080 : 0x0001);
1072 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1074 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1076 long ioaddr = dev->base_addr;
1077 int boguscnt = 1024;
1079 /* Wait for a previous command to complete. */
1080 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
1082 writeb(0x00, ioaddr + MIICmd);
1083 writeb(phy_id, ioaddr + MIIPhyAddr);
1084 writeb(regnum, ioaddr + MIIRegAddr);
1085 writeb(0x40, ioaddr + MIICmd); /* Trigger read */
1086 boguscnt = 1024;
1087 while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0)
1089 return readw(ioaddr + MIIData);
1092 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1094 struct netdev_private *np = dev->priv;
1095 long ioaddr = dev->base_addr;
1096 int boguscnt = 1024;
1098 if (phy_id == np->phys[0]) {
1099 switch (regnum) {
1100 case MII_BMCR: /* Is user forcing speed/duplex? */
1101 if (value & 0x9000) /* Autonegotiation. */
1102 np->mii_if.force_media = 0;
1103 else
1104 np->mii_if.full_duplex = (value & 0x0100) ? 1 : 0;
1105 break;
1106 case MII_ADVERTISE:
1107 np->mii_if.advertising = value;
1108 break;
1112 /* Wait for a previous command to complete. */
1113 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
1115 writeb(0x00, ioaddr + MIICmd);
1116 writeb(phy_id, ioaddr + MIIPhyAddr);
1117 writeb(regnum, ioaddr + MIIRegAddr);
1118 writew(value, ioaddr + MIIData);
1119 writeb(0x20, ioaddr + MIICmd); /* Trigger write. */
1123 static int via_rhine_open(struct net_device *dev)
1125 struct netdev_private *np = dev->priv;
1126 long ioaddr = dev->base_addr;
1127 int i;
1129 /* Reset the chip. */
1130 writew(CmdReset, ioaddr + ChipCmd);
1132 i = request_irq(np->pdev->irq, &via_rhine_interrupt, SA_SHIRQ, dev->name, dev);
1133 if (i)
1134 return i;
1136 if (debug > 1)
1137 printk(KERN_DEBUG "%s: via_rhine_open() irq %d.\n",
1138 dev->name, np->pdev->irq);
1140 i = alloc_ring(dev);
1141 if (i)
1142 return i;
1143 alloc_rbufs(dev);
1144 alloc_tbufs(dev);
1145 wait_for_reset(dev, np->chip_id, dev->name);
1146 init_registers(dev);
1147 if (debug > 2)
1148 printk(KERN_DEBUG "%s: Done via_rhine_open(), status %4.4x "
1149 "MII status: %4.4x.\n",
1150 dev->name, readw(ioaddr + ChipCmd),
1151 mdio_read(dev, np->phys[0], MII_BMSR));
1153 netif_start_queue(dev);
1155 /* Set the timer to check for link beat. */
1156 init_timer(&np->timer);
1157 np->timer.expires = jiffies + 2;
1158 np->timer.data = (unsigned long)dev;
1159 np->timer.function = &via_rhine_timer; /* timer handler */
1160 add_timer(&np->timer);
1162 return 0;
1165 static void via_rhine_check_duplex(struct net_device *dev)
1167 struct netdev_private *np = dev->priv;
1168 long ioaddr = dev->base_addr;
1169 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1170 int negotiated = mii_lpa & np->mii_if.advertising;
1171 int duplex;
1173 if (np->mii_if.force_media || mii_lpa == 0xffff)
1174 return;
1175 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
1176 if (np->mii_if.full_duplex != duplex) {
1177 np->mii_if.full_duplex = duplex;
1178 if (debug)
1179 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
1180 " partner capability of %4.4x.\n", dev->name,
1181 duplex ? "full" : "half", np->phys[0], mii_lpa);
1182 if (duplex)
1183 np->chip_cmd |= CmdFDuplex;
1184 else
1185 np->chip_cmd &= ~CmdFDuplex;
1186 writew(np->chip_cmd, ioaddr + ChipCmd);
1191 static void via_rhine_timer(unsigned long data)
1193 struct net_device *dev = (struct net_device *)data;
1194 struct netdev_private *np = dev->priv;
1195 long ioaddr = dev->base_addr;
1196 int next_tick = 10*HZ;
1197 int mii_status;
1199 if (debug > 3) {
1200 printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n",
1201 dev->name, readw(ioaddr + IntrStatus));
1204 spin_lock_irq (&np->lock);
1206 via_rhine_check_duplex(dev);
1208 /* make IFF_RUNNING follow the MII status bit "Link established" */
1209 mii_status = mdio_read(dev, np->phys[0], MII_BMSR);
1210 if ( (mii_status & BMSR_LSTATUS) != (np->mii_status & BMSR_LSTATUS) ) {
1211 if (mii_status & BMSR_LSTATUS)
1212 netif_carrier_on(dev);
1213 else
1214 netif_carrier_off(dev);
1216 np->mii_status = mii_status;
1218 spin_unlock_irq (&np->lock);
1220 np->timer.expires = jiffies + next_tick;
1221 add_timer(&np->timer);
1225 static void via_rhine_tx_timeout (struct net_device *dev)
1227 struct netdev_private *np = dev->priv;
1228 long ioaddr = dev->base_addr;
1230 printk (KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1231 "%4.4x, resetting...\n",
1232 dev->name, readw (ioaddr + IntrStatus),
1233 mdio_read (dev, np->phys[0], MII_BMSR));
1235 dev->if_port = 0;
1237 /* protect against concurrent rx interrupts */
1238 disable_irq(np->pdev->irq);
1240 spin_lock(&np->lock);
1242 /* Reset the chip. */
1243 writew(CmdReset, ioaddr + ChipCmd);
1245 /* clear all descriptors */
1246 free_tbufs(dev);
1247 free_rbufs(dev);
1248 alloc_tbufs(dev);
1249 alloc_rbufs(dev);
1251 /* Reinitialize the hardware. */
1252 wait_for_reset(dev, np->chip_id, dev->name);
1253 init_registers(dev);
1255 spin_unlock(&np->lock);
1256 enable_irq(np->pdev->irq);
1258 dev->trans_start = jiffies;
1259 np->stats.tx_errors++;
1260 netif_wake_queue(dev);
1263 static int via_rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1265 struct netdev_private *np = dev->priv;
1266 unsigned entry;
1267 u32 intr_status;
1269 /* Caution: the write order is important here, set the field
1270 with the "ownership" bits last. */
1272 /* Calculate the next Tx descriptor entry. */
1273 entry = np->cur_tx % TX_RING_SIZE;
1275 if (skb->len < ETH_ZLEN) {
1276 #if 0
1277 skb = skb_padto(skb, ETH_ZLEN);
1278 if(skb == NULL)
1279 return 0;
1280 #else
1281 memset(np->tx_buf[entry], 0, ETH_ZLEN);
1282 #endif
1285 np->tx_skbuff[entry] = skb;
1287 #if 0
1288 if ((np->drv_flags & ReqTxAlign) &&
1289 (((long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)
1290 ) {
1291 #endif
1292 /* Must use alignment buffer. */
1293 if (skb->len > PKT_BUF_SZ) {
1294 /* packet too long, drop it */
1295 dev_kfree_skb(skb);
1296 np->tx_skbuff[entry] = NULL;
1297 np->stats.tx_dropped++;
1298 return 0;
1300 #if 0
1301 skb_copy_and_csum_dev(skb, np->tx_buf[entry]);
1302 #else
1303 skb_copy_bits(skb, 0, np->tx_buf[entry], skb->len);
1304 #endif
1305 np->tx_skbuff_dma[entry] = 0;
1306 np->tx_ring[entry].addr = cpu_to_le32(np->tx_bufs_dma +
1307 (np->tx_buf[entry] - np->tx_bufs));
1308 #if 0
1309 } else {
1310 np->tx_skbuff_dma[entry] =
1311 pci_map_single(np->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
1312 np->tx_ring[entry].addr = cpu_to_le32(np->tx_skbuff_dma[entry]);
1314 #endif
1316 np->tx_ring[entry].desc_length =
1317 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1319 /* lock eth irq */
1320 spin_lock_irq (&np->lock);
1321 wmb();
1322 np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1323 wmb();
1325 np->cur_tx++;
1327 /* Non-x86 Todo: explicitly flush cache lines here. */
1329 /*
1330 * Wake the potentially-idle transmit channel unless errors are
1331 * pending (the ISR must sort them out first).
1332 */
1333 intr_status = get_intr_status(dev);
1334 if ((intr_status & IntrTxErrSummary) == 0) {
1335 writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
1337 IOSYNC;
1339 if (np->cur_tx == np->dirty_tx + TX_QUEUE_LEN)
1340 netif_stop_queue(dev);
1342 dev->trans_start = jiffies;
1344 spin_unlock_irq (&np->lock);
1346 if (debug > 4) {
1347 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1348 dev->name, np->cur_tx-1, entry);
1350 return 0;
1353 /* The interrupt handler does all of the Rx thread work and cleans up
1354 after the Tx thread. */
1355 static void via_rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1357 struct net_device *dev = dev_instance;
1358 long ioaddr;
1359 u32 intr_status;
1360 int boguscnt = max_interrupt_work;
1362 ioaddr = dev->base_addr;
1364 while ((intr_status = get_intr_status(dev))) {
1365 /* Acknowledge all of the current interrupt sources ASAP. */
1366 if (intr_status & IntrTxDescRace)
1367 writeb(0x08, ioaddr + IntrStatus2);
1368 writew(intr_status & 0xffff, ioaddr + IntrStatus);
1369 IOSYNC;
1371 if (debug > 4)
1372 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1373 dev->name, intr_status);
1375 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1376 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
1377 via_rhine_rx(dev);
1379 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1380 if (intr_status & IntrTxErrSummary) {
1381 int cnt = 20;
1382 /* Avoid scavenging before Tx engine turned off */
1383 while ((readw(ioaddr+ChipCmd) & CmdTxOn) && --cnt)
1384 udelay(5);
1385 if (debug > 2 && !cnt)
1386 printk(KERN_WARNING "%s: via_rhine_interrupt() "
1387 "Tx engine still on.\n",
1388 dev->name);
1390 via_rhine_tx(dev);
1393 /* Abnormal error summary/uncommon events handlers. */
1394 if (intr_status & (IntrPCIErr | IntrLinkChange |
1395 IntrStatsMax | IntrTxError | IntrTxAborted |
1396 IntrTxUnderrun | IntrTxDescRace))
1397 via_rhine_error(dev, intr_status);
1399 if (--boguscnt < 0) {
1400 printk(KERN_WARNING "%s: Too much work at interrupt, "
1401 "status=%#8.8x.\n",
1402 dev->name, intr_status);
1403 break;
1407 if (debug > 3)
1408 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1409 dev->name, readw(ioaddr + IntrStatus));
1412 /* This routine is logically part of the interrupt handler, but isolated
1413 for clarity. */
1414 static void via_rhine_tx(struct net_device *dev)
1416 struct netdev_private *np = dev->priv;
1417 int txstatus = 0, entry = np->dirty_tx % TX_RING_SIZE;
1419 spin_lock (&np->lock);
1421 /* find and cleanup dirty tx descriptors */
1422 while (np->dirty_tx != np->cur_tx) {
1423 txstatus = le32_to_cpu(np->tx_ring[entry].tx_status);
1424 if (debug > 6)
1425 printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n",
1426 entry, txstatus);
1427 if (txstatus & DescOwn)
1428 break;
1429 if (txstatus & 0x8000) {
1430 if (debug > 1)
1431 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1432 dev->name, txstatus);
1433 np->stats.tx_errors++;
1434 if (txstatus & 0x0400) np->stats.tx_carrier_errors++;
1435 if (txstatus & 0x0200) np->stats.tx_window_errors++;
1436 if (txstatus & 0x0100) np->stats.tx_aborted_errors++;
1437 if (txstatus & 0x0080) np->stats.tx_heartbeat_errors++;
1438 if (((np->chip_id == VT86C100A) && txstatus & 0x0002) ||
1439 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1440 np->stats.tx_fifo_errors++;
1441 np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1442 break; /* Keep the skb - we try again */
1444 /* Transmitter restarted in 'abnormal' handler. */
1445 } else {
1446 if (np->chip_id == VT86C100A)
1447 np->stats.collisions += (txstatus >> 3) & 0x0F;
1448 else
1449 np->stats.collisions += txstatus & 0x0F;
1450 if (debug > 6)
1451 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1452 (txstatus >> 3) & 0xF,
1453 txstatus & 0xF);
1454 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1455 np->stats.tx_packets++;
1457 /* Free the original skb. */
1458 if (np->tx_skbuff_dma[entry]) {
1459 pci_unmap_single(np->pdev,
1460 np->tx_skbuff_dma[entry],
1461 np->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1463 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1464 np->tx_skbuff[entry] = NULL;
1465 entry = (++np->dirty_tx) % TX_RING_SIZE;
1467 if ((np->cur_tx - np->dirty_tx) < TX_QUEUE_LEN - 4)
1468 netif_wake_queue (dev);
1470 spin_unlock (&np->lock);
1473 /* This routine is logically part of the interrupt handler, but isolated
1474 for clarity and better register allocation. */
1475 static void via_rhine_rx(struct net_device *dev)
1477 struct netdev_private *np = dev->priv;
1478 int entry = np->cur_rx % RX_RING_SIZE;
1479 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1481 if (debug > 4) {
1482 printk(KERN_DEBUG "%s: via_rhine_rx(), entry %d status %8.8x.\n",
1483 dev->name, entry, le32_to_cpu(np->rx_head_desc->rx_status));
1486 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1487 while ( ! (np->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1488 struct rx_desc *desc = np->rx_head_desc;
1489 u32 desc_status = le32_to_cpu(desc->rx_status);
1490 int data_size = desc_status >> 16;
1492 if (debug > 4)
1493 printk(KERN_DEBUG " via_rhine_rx() status is %8.8x.\n",
1494 desc_status);
1495 if (--boguscnt < 0)
1496 break;
1497 if ( (desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1498 if ((desc_status & RxWholePkt) != RxWholePkt) {
1499 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1500 "multiple buffers, entry %#x length %d status %8.8x!\n",
1501 dev->name, entry, data_size, desc_status);
1502 printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
1503 dev->name, np->rx_head_desc, &np->rx_ring[entry]);
1504 np->stats.rx_length_errors++;
1505 } else if (desc_status & RxErr) {
1506 /* There was a error. */
1507 if (debug > 2)
1508 printk(KERN_DEBUG " via_rhine_rx() Rx error was %8.8x.\n",
1509 desc_status);
1510 np->stats.rx_errors++;
1511 if (desc_status & 0x0030) np->stats.rx_length_errors++;
1512 if (desc_status & 0x0048) np->stats.rx_fifo_errors++;
1513 if (desc_status & 0x0004) np->stats.rx_frame_errors++;
1514 if (desc_status & 0x0002) {
1515 /* this can also be updated outside the interrupt handler */
1516 spin_lock (&np->lock);
1517 np->stats.rx_crc_errors++;
1518 spin_unlock (&np->lock);
1521 } else {
1522 struct sk_buff *skb;
1523 /* Length should omit the CRC */
1524 int pkt_len = data_size - 4;
1526 /* Check if the packet is long enough to accept without copying
1527 to a minimally-sized skbuff. */
1528 if (pkt_len < rx_copybreak &&
1529 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1530 skb->dev = dev;
1531 skb_reserve(skb, 2); /* 16 byte align the IP header */
1532 pci_dma_sync_single(np->pdev, np->rx_skbuff_dma[entry],
1533 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1535 /* *_IP_COPYSUM isn't defined anywhere and eth_copy_and_sum
1536 is memcpy for all archs so this is kind of pointless right
1537 now ... or? */
1538 #if HAS_IP_COPYSUM /* Call copy + cksum if available. */
1539 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1540 skb_put(skb, pkt_len);
1541 #else
1542 memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
1543 pkt_len);
1544 #endif
1545 } else {
1546 skb = np->rx_skbuff[entry];
1547 if (skb == NULL) {
1548 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1549 dev->name);
1550 break;
1552 np->rx_skbuff[entry] = NULL;
1553 skb_put(skb, pkt_len);
1554 pci_unmap_single(np->pdev, np->rx_skbuff_dma[entry],
1555 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1557 skb->protocol = eth_type_trans(skb, dev);
1558 netif_rx(skb);
1559 dev->last_rx = jiffies;
1560 np->stats.rx_bytes += pkt_len;
1561 np->stats.rx_packets++;
1563 entry = (++np->cur_rx) % RX_RING_SIZE;
1564 np->rx_head_desc = &np->rx_ring[entry];
1567 /* Refill the Rx ring buffers. */
1568 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1569 struct sk_buff *skb;
1570 entry = np->dirty_rx % RX_RING_SIZE;
1571 if (np->rx_skbuff[entry] == NULL) {
1572 skb = dev_alloc_skb(np->rx_buf_sz);
1573 np->rx_skbuff[entry] = skb;
1574 if (skb == NULL)
1575 break; /* Better luck next round. */
1576 skb->dev = dev; /* Mark as being used by this device. */
1577 np->rx_skbuff_dma[entry] =
1578 pci_map_single(np->pdev, skb->tail, np->rx_buf_sz,
1579 PCI_DMA_FROMDEVICE);
1580 np->rx_ring[entry].addr = cpu_to_le32(np->rx_skbuff_dma[entry]);
1582 np->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1585 /* Pre-emptively restart Rx engine. */
1586 writew(readw(dev->base_addr + ChipCmd) | CmdRxOn | CmdRxDemand,
1587 dev->base_addr + ChipCmd);
1590 /* Clears the "tally counters" for CRC errors and missed frames(?).
1591 It has been reported that some chips need a write of 0 to clear
1592 these, for others the counters are set to 1 when written to and
1593 instead cleared when read. So we clear them both ways ... */
1594 static inline void clear_tally_counters(const long ioaddr)
1596 writel(0, ioaddr + RxMissed);
1597 readw(ioaddr + RxCRCErrs);
1598 readw(ioaddr + RxMissed);
1601 static void via_rhine_restart_tx(struct net_device *dev) {
1602 struct netdev_private *np = dev->priv;
1603 long ioaddr = dev->base_addr;
1604 int entry = np->dirty_tx % TX_RING_SIZE;
1605 u32 intr_status;
1607 /*
1608 * If new errors occured, we need to sort them out before doing Tx.
1609 * In that case the ISR will be back here RSN anyway.
1610 */
1611 intr_status = get_intr_status(dev);
1613 if ((intr_status & IntrTxErrSummary) == 0) {
1615 /* We know better than the chip where it should continue. */
1616 writel(np->tx_ring_dma + entry * sizeof(struct tx_desc),
1617 ioaddr + TxRingPtr);
1619 writew(CmdTxDemand | np->chip_cmd, ioaddr + ChipCmd);
1620 IOSYNC;
1622 else {
1623 /* This should never happen */
1624 if (debug > 1)
1625 printk(KERN_WARNING "%s: via_rhine_restart_tx() "
1626 "Another error occured %8.8x.\n",
1627 dev->name, intr_status);
1632 static void via_rhine_error(struct net_device *dev, int intr_status)
1634 struct netdev_private *np = dev->priv;
1635 long ioaddr = dev->base_addr;
1637 spin_lock (&np->lock);
1639 if (intr_status & (IntrLinkChange)) {
1640 if (readb(ioaddr + MIIStatus) & 0x02) {
1641 /* Link failed, restart autonegotiation. */
1642 if (np->drv_flags & HasDavicomPhy)
1643 mdio_write(dev, np->phys[0], MII_BMCR, 0x3300);
1644 } else
1645 via_rhine_check_duplex(dev);
1646 if (debug)
1647 printk(KERN_ERR "%s: MII status changed: Autonegotiation "
1648 "advertising %4.4x partner %4.4x.\n", dev->name,
1649 mdio_read(dev, np->phys[0], MII_ADVERTISE),
1650 mdio_read(dev, np->phys[0], MII_LPA));
1652 if (intr_status & IntrStatsMax) {
1653 np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1654 np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1655 clear_tally_counters(ioaddr);
1657 if (intr_status & IntrTxAborted) {
1658 if (debug > 1)
1659 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1660 dev->name, intr_status);
1662 if (intr_status & IntrTxUnderrun) {
1663 if (np->tx_thresh < 0xE0)
1664 writeb(np->tx_thresh += 0x20, ioaddr + TxConfig);
1665 if (debug > 1)
1666 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1667 "threshold now %2.2x.\n",
1668 dev->name, np->tx_thresh);
1670 if (intr_status & IntrTxDescRace) {
1671 if (debug > 2)
1672 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1673 dev->name);
1675 if (intr_status & ( IntrTxAborted | IntrTxUnderrun | IntrTxDescRace ))
1676 via_rhine_restart_tx(dev);
1678 if (intr_status & ~( IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1679 IntrTxError | IntrTxAborted | IntrNormalSummary |
1680 IntrTxDescRace )) {
1681 if (debug > 1)
1682 printk(KERN_ERR "%s: Something Wicked happened! %8.8x.\n",
1683 dev->name, intr_status);
1686 spin_unlock (&np->lock);
1689 static struct net_device_stats *via_rhine_get_stats(struct net_device *dev)
1691 struct netdev_private *np = dev->priv;
1692 long ioaddr = dev->base_addr;
1693 unsigned long flags;
1695 spin_lock_irqsave(&np->lock, flags);
1696 np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1697 np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1698 clear_tally_counters(ioaddr);
1699 spin_unlock_irqrestore(&np->lock, flags);
1701 return &np->stats;
1704 static void via_rhine_set_rx_mode(struct net_device *dev)
1706 struct netdev_private *np = dev->priv;
1707 long ioaddr = dev->base_addr;
1708 u32 mc_filter[2]; /* Multicast hash filter */
1709 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1711 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1712 /* Unconditionally log net taps. */
1713 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1714 rx_mode = 0x1C;
1715 } else if ((dev->mc_count > multicast_filter_limit)
1716 || (dev->flags & IFF_ALLMULTI)) {
1717 /* Too many to match, or accept all multicasts. */
1718 writel(0xffffffff, ioaddr + MulticastFilter0);
1719 writel(0xffffffff, ioaddr + MulticastFilter1);
1720 rx_mode = 0x0C;
1721 } else {
1722 struct dev_mc_list *mclist;
1723 int i;
1724 memset(mc_filter, 0, sizeof(mc_filter));
1725 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1726 i++, mclist = mclist->next) {
1727 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1729 mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
1731 writel(mc_filter[0], ioaddr + MulticastFilter0);
1732 writel(mc_filter[1], ioaddr + MulticastFilter1);
1733 rx_mode = 0x0C;
1735 writeb(np->rx_thresh | rx_mode, ioaddr + RxConfig);
1738 #if 0
1739 static int netdev_ethtool_ioctl (struct net_device *dev, void *useraddr)
1741 struct netdev_private *np = dev->priv;
1742 u32 ethcmd;
1744 if (get_user(ethcmd, (u32 *)useraddr))
1745 return -EFAULT;
1747 switch (ethcmd) {
1748 case ETHTOOL_GDRVINFO: {
1749 struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1750 strcpy (info.driver, DRV_NAME);
1751 strcpy (info.version, DRV_VERSION);
1752 strcpy (info.bus_info, np->pdev->slot_name);
1753 if (copy_to_user (useraddr, &info, sizeof (info)))
1754 return -EFAULT;
1755 return 0;
1758 /* get settings */
1759 case ETHTOOL_GSET: {
1760 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1761 if (!(np->drv_flags & CanHaveMII))
1762 break;
1763 spin_lock_irq(&np->lock);
1764 mii_ethtool_gset(&np->mii_if, &ecmd);
1765 spin_unlock_irq(&np->lock);
1766 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1767 return -EFAULT;
1768 return 0;
1770 /* set settings */
1771 case ETHTOOL_SSET: {
1772 int r;
1773 struct ethtool_cmd ecmd;
1774 if (!(np->drv_flags & CanHaveMII))
1775 break;
1776 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1777 return -EFAULT;
1778 spin_lock_irq(&np->lock);
1779 r = mii_ethtool_sset(&np->mii_if, &ecmd);
1780 spin_unlock_irq(&np->lock);
1781 return r;
1783 /* restart autonegotiation */
1784 case ETHTOOL_NWAY_RST: {
1785 if (!(np->drv_flags & CanHaveMII))
1786 break;
1787 return mii_nway_restart(&np->mii_if);
1789 /* get link status */
1790 case ETHTOOL_GLINK: {
1791 struct ethtool_value edata = {ETHTOOL_GLINK};
1792 if (!(np->drv_flags & CanHaveMII))
1793 break;
1794 edata.data = mii_link_ok(&np->mii_if);
1795 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1796 return -EFAULT;
1797 return 0;
1800 /* get message-level */
1801 case ETHTOOL_GMSGLVL: {
1802 struct ethtool_value edata = {ETHTOOL_GMSGLVL};
1803 edata.data = debug;
1804 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1805 return -EFAULT;
1806 return 0;
1808 /* set message-level */
1809 case ETHTOOL_SMSGLVL: {
1810 struct ethtool_value edata;
1811 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1812 return -EFAULT;
1813 debug = edata.data;
1814 return 0;
1816 default:
1817 break;
1820 return -EOPNOTSUPP;
1823 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1825 struct netdev_private *np = dev->priv;
1826 struct mii_ioctl_data *data = (struct mii_ioctl_data *) & rq->ifr_data;
1827 int rc;
1829 if (!netif_running(dev))
1830 return -EINVAL;
1832 if (cmd == SIOCETHTOOL)
1833 rc = netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
1835 else {
1836 spin_lock_irq(&np->lock);
1837 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1838 spin_unlock_irq(&np->lock);
1841 return rc;
1843 #endif
1845 static int via_rhine_close(struct net_device *dev)
1847 long ioaddr = dev->base_addr;
1848 struct netdev_private *np = dev->priv;
1850 del_timer_sync(&np->timer);
1852 spin_lock_irq(&np->lock);
1854 netif_stop_queue(dev);
1856 if (debug > 1)
1857 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1858 dev->name, readw(ioaddr + ChipCmd));
1860 /* Switch to loopback mode to avoid hardware races. */
1861 writeb(np->tx_thresh | 0x02, ioaddr + TxConfig);
1863 /* Disable interrupts by clearing the interrupt mask. */
1864 writew(0x0000, ioaddr + IntrEnable);
1866 /* Stop the chip's Tx and Rx processes. */
1867 writew(CmdStop, ioaddr + ChipCmd);
1869 spin_unlock_irq(&np->lock);
1871 free_irq(np->pdev->irq, dev);
1872 free_rbufs(dev);
1873 free_tbufs(dev);
1874 free_ring(dev);
1876 return 0;
1880 static void __devexit via_rhine_remove_one (struct pci_dev *pdev)
1882 struct net_device *dev = pci_get_drvdata(pdev);
1884 unregister_netdev(dev);
1886 pci_release_regions(pdev);
1888 #ifdef USE_MEM
1889 iounmap((char *)(dev->base_addr));
1890 #endif
1892 kfree(dev);
1893 pci_disable_device(pdev);
1894 pci_set_drvdata(pdev, NULL);
1898 static struct pci_driver via_rhine_driver = {
1899 .name = "via-rhine",
1900 .id_table = via_rhine_pci_tbl,
1901 .probe = via_rhine_init_one,
1902 .remove = __devexit_p(via_rhine_remove_one),
1903 };
1906 static int __init via_rhine_init (void)
1908 /* when a module, this is printed whether or not devices are found in probe */
1909 #ifdef MODULE
1910 printk(version);
1911 #endif
1912 return pci_module_init (&via_rhine_driver);
1916 static void __exit via_rhine_cleanup (void)
1918 pci_unregister_driver (&via_rhine_driver);
1922 module_init(via_rhine_init);
1923 module_exit(via_rhine_cleanup);
1926 /*
1927 * Local variables:
1928 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c via-rhine.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1929 * c-indent-level: 4
1930 * c-basic-offset: 4
1931 * tab-width: 4
1932 * End:
1933 */