debuggers.hg

view xen/drivers/net/tulip/interrupt.c @ 618:4480b471191c

bitkeeper revision 1.259.2.7 (3f0c428fGYxQAV_56B2hOOjYs1PF0A)

Port a bunch of network drivers for low-quality NICS (which will incur extra copying overheads within Xen). But will allow us to work on a wider range of systems at least.
author kaf24@scramble.cl.cam.ac.uk
date Wed Jul 09 16:27:59 2003 +0000 (2003-07-09)
parents
children 125f43340354
line source
1 /*
2 drivers/net/tulip/interrupt.c
4 Maintained by Jeff Garzik <jgarzik@pobox.com>
5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker.
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
11 Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
12 for more information on this driver, or visit the project
13 Web page at http://sourceforge.net/projects/tulip/
15 */
17 #include "tulip.h"
18 #include <linux/config.h>
19 #include <linux/etherdevice.h>
20 #include <linux/pci.h>
23 int tulip_rx_copybreak;
24 unsigned int tulip_max_interrupt_work;
26 #ifdef CONFIG_NET_HW_FLOWCONTROL
28 #define MIT_SIZE 15
29 unsigned int mit_table[MIT_SIZE+1] =
30 {
31 /* CRS11 21143 hardware Mitigation Control Interrupt
32 We use only RX mitigation we other techniques for
33 TX intr. mitigation.
35 31 Cycle Size (timer control)
36 30:27 TX timer in 16 * Cycle size
37 26:24 TX No pkts before Int.
38 23:20 RX timer in Cycle size
39 19:17 RX No pkts before Int.
40 16 Continues Mode (CM)
41 */
43 0x0, /* IM disabled */
44 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
45 0x80150000,
46 0x80270000,
47 0x80370000,
48 0x80490000,
49 0x80590000,
50 0x80690000,
51 0x807B0000,
52 0x808B0000,
53 0x809D0000,
54 0x80AD0000,
55 0x80BD0000,
56 0x80CF0000,
57 0x80DF0000,
58 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
59 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
60 };
61 #endif
64 int tulip_refill_rx(struct net_device *dev)
65 {
66 struct tulip_private *tp = (struct tulip_private *)dev->priv;
67 int entry;
68 int refilled = 0;
70 /* Refill the Rx ring buffers. */
71 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
72 entry = tp->dirty_rx % RX_RING_SIZE;
73 if (tp->rx_buffers[entry].skb == NULL) {
74 struct sk_buff *skb;
75 dma_addr_t mapping;
77 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
78 if (skb == NULL)
79 break;
81 mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
82 PCI_DMA_FROMDEVICE);
83 tp->rx_buffers[entry].mapping = mapping;
85 skb->dev = dev; /* Mark as being used by this device. */
86 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
87 refilled++;
88 }
89 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
90 }
91 if(tp->chip_id == LC82C168) {
92 if(((inl(dev->base_addr + CSR5)>>17)&0x07) == 4) {
93 /* Rx stopped due to out of buffers,
94 * restart it
95 */
96 outl(0x01, dev->base_addr + CSR2);
97 }
98 }
99 return refilled;
100 }
103 static int tulip_rx(struct net_device *dev)
104 {
105 struct tulip_private *tp = (struct tulip_private *)dev->priv;
106 int entry = tp->cur_rx % RX_RING_SIZE;
107 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
108 int received = 0;
110 #ifdef CONFIG_NET_HW_FLOWCONTROL
111 int drop = 0, mit_sel = 0;
113 /* that one buffer is needed for mit activation; or might be a
114 bug in the ring buffer code; check later -- JHS*/
116 if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
117 #endif
119 if (tulip_debug > 4)
120 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
121 tp->rx_ring[entry].status);
122 /* If we own the next entry, it is a new packet. Send it up. */
123 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
124 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
126 if (tulip_debug > 5)
127 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
128 dev->name, entry, status);
129 if (--rx_work_limit < 0)
130 break;
131 if ((status & 0x38008300) != 0x0300) {
132 if ((status & 0x38000300) != 0x0300) {
133 /* Ingore earlier buffers. */
134 if ((status & 0xffff) != 0x7fff) {
135 if (tulip_debug > 1)
136 printk(KERN_WARNING "%s: Oversized Ethernet frame "
137 "spanned multiple buffers, status %8.8x!\n",
138 dev->name, status);
139 tp->stats.rx_length_errors++;
140 }
141 } else if (status & RxDescFatalErr) {
142 /* There was a fatal error. */
143 if (tulip_debug > 2)
144 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
145 dev->name, status);
146 tp->stats.rx_errors++; /* end of a packet.*/
147 if (status & 0x0890) tp->stats.rx_length_errors++;
148 if (status & 0x0004) tp->stats.rx_frame_errors++;
149 if (status & 0x0002) tp->stats.rx_crc_errors++;
150 if (status & 0x0001) tp->stats.rx_fifo_errors++;
151 }
152 } else {
153 /* Omit the four octet CRC from the length. */
154 short pkt_len = ((status >> 16) & 0x7ff) - 4;
155 struct sk_buff *skb;
157 #ifndef final_version
158 if (pkt_len > 1518) {
159 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
160 dev->name, pkt_len, pkt_len);
161 pkt_len = 1518;
162 tp->stats.rx_length_errors++;
163 }
164 #endif
166 #ifdef CONFIG_NET_HW_FLOWCONTROL
167 drop = atomic_read(&netdev_dropping);
168 if (drop)
169 goto throttle;
170 #endif
171 /* Check if the packet is long enough to accept without copying
172 to a minimally-sized skbuff. */
173 if (pkt_len < tulip_rx_copybreak
174 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
175 skb->dev = dev;
176 skb_reserve(skb, 2); /* 16 byte align the IP header */
177 pci_dma_sync_single(tp->pdev,
178 tp->rx_buffers[entry].mapping,
179 pkt_len, PCI_DMA_FROMDEVICE);
180 #if ! defined(__alpha__)
181 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
182 pkt_len, 0);
183 skb_put(skb, pkt_len);
184 #else
185 memcpy(skb_put(skb, pkt_len),
186 tp->rx_buffers[entry].skb->tail,
187 pkt_len);
188 #endif
189 } else { /* Pass up the skb already on the Rx ring. */
190 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
191 pkt_len);
193 #ifndef final_version
194 if (tp->rx_buffers[entry].mapping !=
195 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
196 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
197 "do not match in tulip_rx: %08x vs. %08x %p / %p.\n",
198 dev->name,
199 le32_to_cpu(tp->rx_ring[entry].buffer1),
200 tp->rx_buffers[entry].mapping,
201 skb->head, temp);
202 }
203 #endif
205 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
206 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
208 tp->rx_buffers[entry].skb = NULL;
209 tp->rx_buffers[entry].mapping = 0;
210 }
211 skb->protocol = eth_type_trans(skb, dev);
212 #ifdef CONFIG_NET_HW_FLOWCONTROL
213 mit_sel =
214 #endif
215 netif_rx(skb);
217 #ifdef CONFIG_NET_HW_FLOWCONTROL
218 switch (mit_sel) {
219 case NET_RX_SUCCESS:
220 case NET_RX_CN_LOW:
221 case NET_RX_CN_MOD:
222 break;
224 case NET_RX_CN_HIGH:
225 rx_work_limit -= NET_RX_CN_HIGH; /* additional*/
226 break;
227 case NET_RX_DROP:
228 rx_work_limit = -1;
229 break;
230 default:
231 printk("unknown feedback return code %d\n", mit_sel);
232 break;
233 }
235 drop = atomic_read(&netdev_dropping);
236 if (drop) {
237 throttle:
238 rx_work_limit = -1;
239 mit_sel = NET_RX_DROP;
241 if (tp->fc_bit) {
242 long ioaddr = dev->base_addr;
244 /* disable Rx & RxNoBuf ints. */
245 outl(tulip_tbl[tp->chip_id].valid_intrs&RX_A_NBF_STOP, ioaddr + CSR7);
246 set_bit(tp->fc_bit, &netdev_fc_xoff);
247 }
248 }
249 #endif
250 dev->last_rx = jiffies;
251 tp->stats.rx_packets++;
252 tp->stats.rx_bytes += pkt_len;
253 }
254 received++;
255 entry = (++tp->cur_rx) % RX_RING_SIZE;
256 }
257 #ifdef CONFIG_NET_HW_FLOWCONTROL
259 /* We use this simplistic scheme for IM. It's proven by
260 real life installations. We can have IM enabled
261 continuesly but this would cause unnecessary latency.
262 Unfortunely we can't use all the NET_RX_* feedback here.
263 This would turn on IM for devices that is not contributing
264 to backlog congestion with unnecessary latency.
266 We monitor the device RX-ring and have:
268 HW Interrupt Mitigation either ON or OFF.
270 ON: More then 1 pkt received (per intr.) OR we are dropping
271 OFF: Only 1 pkt received
273 Note. We only use min and max (0, 15) settings from mit_table */
276 if( tp->flags & HAS_INTR_MITIGATION) {
277 if((received > 1 || mit_sel == NET_RX_DROP)
278 && tp->mit_sel != 15 ) {
279 tp->mit_sel = 15;
280 tp->mit_change = 1; /* Force IM change */
281 }
282 if((received <= 1 && mit_sel != NET_RX_DROP) && tp->mit_sel != 0 ) {
283 tp->mit_sel = 0;
284 tp->mit_change = 1; /* Force IM change */
285 }
286 }
288 return RX_RING_SIZE+1; /* maxrx+1 */
289 #else
290 return received;
291 #endif
292 }
294 static inline void phy_interrupt (struct net_device *dev)
295 {
296 #ifdef __hppa__
297 int csr12 = inl(dev->base_addr + CSR12) & 0xff;
298 struct tulip_private *tp = (struct tulip_private *)dev->priv;
300 if (csr12 != tp->csr12_shadow) {
301 /* ack interrupt */
302 outl(csr12 | 0x02, dev->base_addr + CSR12);
303 tp->csr12_shadow = csr12;
304 /* do link change stuff */
305 spin_lock(&tp->lock);
306 tulip_check_duplex(dev);
307 spin_unlock(&tp->lock);
308 /* clear irq ack bit */
309 outl(csr12 & ~0x02, dev->base_addr + CSR12);
310 }
311 #endif
312 }
314 /* The interrupt handler does all of the Rx thread work and cleans up
315 after the Tx thread. */
316 void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
317 {
318 struct net_device *dev = (struct net_device *)dev_instance;
319 struct tulip_private *tp = (struct tulip_private *)dev->priv;
320 long ioaddr = dev->base_addr;
321 int csr5;
322 int entry;
323 int missed;
324 int rx = 0;
325 int tx = 0;
326 int oi = 0;
327 int maxrx = RX_RING_SIZE;
328 int maxtx = TX_RING_SIZE;
329 int maxoi = TX_RING_SIZE;
330 unsigned int work_count = tulip_max_interrupt_work;
332 /* Let's see whether the interrupt really is for us */
333 csr5 = inl(ioaddr + CSR5);
335 if (tp->flags & HAS_PHY_IRQ)
336 phy_interrupt (dev);
338 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
339 return;
341 tp->nir++;
343 do {
344 /* Acknowledge all of the current interrupt sources ASAP. */
345 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
347 if (tulip_debug > 4)
348 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
349 dev->name, csr5, inl(dev->base_addr + CSR5));
351 if (csr5 & (RxIntr | RxNoBuf)) {
352 #ifdef CONFIG_NET_HW_FLOWCONTROL
353 if ((!tp->fc_bit) ||
354 (!test_bit(tp->fc_bit, &netdev_fc_xoff)))
355 #endif
356 rx += tulip_rx(dev);
357 tulip_refill_rx(dev);
358 }
360 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
361 unsigned int dirty_tx;
363 spin_lock(&tp->lock);
365 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
366 dirty_tx++) {
367 int entry = dirty_tx % TX_RING_SIZE;
368 int status = le32_to_cpu(tp->tx_ring[entry].status);
370 if (status < 0)
371 break; /* It still has not been Txed */
373 /* Check for Rx filter setup frames. */
374 if (tp->tx_buffers[entry].skb == NULL) {
375 /* test because dummy frames not mapped */
376 if (tp->tx_buffers[entry].mapping)
377 pci_unmap_single(tp->pdev,
378 tp->tx_buffers[entry].mapping,
379 sizeof(tp->setup_frame),
380 PCI_DMA_TODEVICE);
381 continue;
382 }
384 if (status & 0x8000) {
385 /* There was an major error, log it. */
386 #ifndef final_version
387 if (tulip_debug > 1)
388 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
389 dev->name, status);
390 #endif
391 tp->stats.tx_errors++;
392 if (status & 0x4104) tp->stats.tx_aborted_errors++;
393 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
394 if (status & 0x0200) tp->stats.tx_window_errors++;
395 if (status & 0x0002) tp->stats.tx_fifo_errors++;
396 if ((status & 0x0080) && tp->full_duplex == 0)
397 tp->stats.tx_heartbeat_errors++;
398 } else {
399 tp->stats.tx_bytes +=
400 tp->tx_buffers[entry].skb->len;
401 tp->stats.collisions += (status >> 3) & 15;
402 tp->stats.tx_packets++;
403 }
405 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
406 tp->tx_buffers[entry].skb->len,
407 PCI_DMA_TODEVICE);
409 /* Free the original skb. */
410 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
411 tp->tx_buffers[entry].skb = NULL;
412 tp->tx_buffers[entry].mapping = 0;
413 tx++;
414 }
416 #ifndef final_version
417 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
418 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
419 dev->name, dirty_tx, tp->cur_tx);
420 dirty_tx += TX_RING_SIZE;
421 }
422 #endif
424 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
425 netif_wake_queue(dev);
427 tp->dirty_tx = dirty_tx;
428 if (csr5 & TxDied) {
429 if (tulip_debug > 2)
430 printk(KERN_WARNING "%s: The transmitter stopped."
431 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
432 dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
433 tulip_restart_rxtx(tp);
434 }
435 spin_unlock(&tp->lock);
436 }
438 /* Log errors. */
439 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
440 if (csr5 == 0xffffffff)
441 break;
442 if (csr5 & TxJabber) tp->stats.tx_errors++;
443 if (csr5 & TxFIFOUnderflow) {
444 if ((tp->csr6 & 0xC000) != 0xC000)
445 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
446 else
447 tp->csr6 |= 0x00200000; /* Store-n-forward. */
448 /* Restart the transmit process. */
449 tulip_restart_rxtx(tp);
450 outl(0, ioaddr + CSR1);
451 }
452 if (csr5 & (RxDied | RxNoBuf)) {
453 if (tp->flags & COMET_MAC_ADDR) {
454 outl(tp->mc_filter[0], ioaddr + 0xAC);
455 outl(tp->mc_filter[1], ioaddr + 0xB0);
456 }
457 }
458 if (csr5 & RxDied) { /* Missed a Rx frame. */
459 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
460 #ifdef CONFIG_NET_HW_FLOWCONTROL
461 if (tp->fc_bit && !test_bit(tp->fc_bit, &netdev_fc_xoff)) {
462 tp->stats.rx_errors++;
463 tulip_start_rxtx(tp);
464 }
465 #else
466 tp->stats.rx_errors++;
467 tulip_start_rxtx(tp);
468 #endif
469 }
470 /*
471 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
472 * call is ever done under the spinlock
473 */
474 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
475 if (tp->link_change)
476 (tp->link_change)(dev, csr5);
477 }
478 if (csr5 & SytemError) {
479 int error = (csr5 >> 23) & 7;
480 /* oops, we hit a PCI error. The code produced corresponds
481 * to the reason:
482 * 0 - parity error
483 * 1 - master abort
484 * 2 - target abort
485 * Note that on parity error, we should do a software reset
486 * of the chip to get it back into a sane state (according
487 * to the 21142/3 docs that is).
488 * -- rmk
489 */
490 printk(KERN_ERR "%s: (%lu) System Error occured (%d)\n",
491 dev->name, tp->nir, error);
492 }
493 /* Clear all error sources, included undocumented ones! */
494 outl(0x0800f7ba, ioaddr + CSR5);
495 oi++;
496 }
497 if (csr5 & TimerInt) {
499 if (tulip_debug > 2)
500 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
501 dev->name, csr5);
502 #ifdef CONFIG_NET_HW_FLOWCONTROL
503 if (tp->fc_bit && (test_bit(tp->fc_bit, &netdev_fc_xoff)))
504 if (net_ratelimit()) printk("BUG!! enabling interupt when FC off (timerintr.) \n");
505 #endif
506 outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
507 tp->ttimer = 0;
508 oi++;
509 }
510 if (tx > maxtx || rx > maxrx || oi > maxoi) {
511 if (tulip_debug > 1)
512 printk(KERN_WARNING "%s: Too much work during an interrupt, "
513 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
515 /* Acknowledge all interrupt sources. */
516 outl(0x8001ffff, ioaddr + CSR5);
517 if (tp->flags & HAS_INTR_MITIGATION) {
518 #ifdef CONFIG_NET_HW_FLOWCONTROL
519 if(tp->mit_change) {
520 outl(mit_table[tp->mit_sel], ioaddr + CSR11);
521 tp->mit_change = 0;
522 }
523 #else
524 /* Josip Loncaric at ICASE did extensive experimentation
525 to develop a good interrupt mitigation setting.*/
526 outl(0x8b240000, ioaddr + CSR11);
527 #endif
528 } else if (tp->chip_id == LC82C168) {
529 /* the LC82C168 doesn't have a hw timer.*/
530 outl(0x00, ioaddr + CSR7);
531 mod_timer(&tp->timer, RUN_AT(HZ/50));
532 } else {
533 /* Mask all interrupting sources, set timer to
534 re-enable. */
535 #ifndef CONFIG_NET_HW_FLOWCONTROL
536 outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
537 outl(0x0012, ioaddr + CSR11);
538 #endif
539 }
540 break;
541 }
543 work_count--;
544 if (work_count == 0)
545 break;
547 csr5 = inl(ioaddr + CSR5);
548 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
550 tulip_refill_rx(dev);
552 /* check if the card is in suspend mode */
553 entry = tp->dirty_rx % RX_RING_SIZE;
554 if (tp->rx_buffers[entry].skb == NULL) {
555 if (tulip_debug > 1)
556 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
557 if (tp->chip_id == LC82C168) {
558 outl(0x00, ioaddr + CSR7);
559 mod_timer(&tp->timer, RUN_AT(HZ/50));
560 } else {
561 if (tp->ttimer == 0 || (inl(ioaddr + CSR11) & 0xffff) == 0) {
562 if (tulip_debug > 1)
563 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
564 outl(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
565 ioaddr + CSR7);
566 outl(TimerInt, ioaddr + CSR5);
567 outl(12, ioaddr + CSR11);
568 tp->ttimer = 1;
569 }
570 }
571 }
573 if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
574 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
575 }
577 if (tulip_debug > 4)
578 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
579 dev->name, inl(ioaddr + CSR5));
581 }