patch-2.4.18 linux/drivers/net/dl2k.c
Next file: linux/drivers/net/dl2k.h
Previous file: linux/drivers/net/de4x5.c
Back to the patch index
Back to the overall index
- Lines: 493
- Date:
Mon Jan 21 17:13:47 2002
- Orig file:
linux.orig/drivers/net/dl2k.c
- Orig date:
Mon Feb 18 20:18:39 2002
diff -Naur -X /home/marcelo/lib/dontdiff linux.orig/drivers/net/dl2k.c linux/drivers/net/dl2k.c
@@ -15,18 +15,24 @@
0.01 2001/05/03 Created DL2000-based linux driver
0.02 2001/05/21 Added VLAN and hardware checksum support.
1.00 2001/06/26 Added jumbo frame support.
- 1.01 2001/08/21 Added two parameters, int_count and int_timeout.
+ 1.01 2001/08/21 Added two parameters, rx_coalesce and rx_timeout.
1.02 2001/10/08 Supported fiber media.
Added flow control parameters.
- 1.03 2001/10/12 Changed the default media to 1000mbps_fd for the
- fiber devices.
- 1.04 2001/11/08 Fixed a bug which Tx stop when a very busy case.
-*/
+ 1.03 2001/10/12 Changed the default media to 1000mbps_fd for
+ the fiber devices.
+ 1.04 2001/11/08 Fixed Tx stopped when tx very busy.
+ 1.05 2001/11/22 Fixed Tx stopped when unidirectional tx busy.
+ 1.06 2001/12/13 Fixed disconnect bug at 10Mbps mode.
+ Fixed tx_full flag incorrect.
+ Added tx_coalesce paramter.
+ 1.07 2002/01/03 Fixed miscount of RX frame error.
+ 1.08 2002/01/17 Fixed the multicast bug.
+ */
#include "dl2k.h"
static char version[] __devinitdata =
- KERN_INFO "D-Link DL2000-based linux driver v1.04 2001/11/08\n";
+ KERN_INFO "D-Link DL2000-based linux driver v1.08 2002/01/17\n";
#define MAX_UNITS 8
static int mtu[MAX_UNITS];
@@ -36,11 +42,13 @@
static int tx_flow[MAX_UNITS];
static int rx_flow[MAX_UNITS];
static int copy_thresh;
-static int int_count; /* Rx frame count each interrupt */
-static int int_timeout; /* Rx DMA wait time in 64ns increments */
+static int rx_coalesce = DEFAULT_RXC;
+static int rx_timeout = DEFAULT_RXT;
+static int tx_coalesce = DEFAULT_TXC;
MODULE_AUTHOR ("Edward Peng");
MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter");
+MODULE_LICENSE("GPL");
MODULE_PARM (mtu, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM (media, "1-" __MODULE_STRING (MAX_UNITS) "s");
MODULE_PARM (vlan, "1-" __MODULE_STRING (MAX_UNITS) "i");
@@ -48,13 +56,16 @@
MODULE_PARM (tx_flow, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM (rx_flow, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM (copy_thresh, "i");
-MODULE_PARM (int_count, "i");
-MODULE_PARM (int_timeout, "i");
+MODULE_PARM (rx_coalesce, "i"); /* Rx frame count each interrupt */
+MODULE_PARM (rx_timeout, "i"); /* Rx DMA wait time in 64ns increments */
+MODULE_PARM (tx_coalesce, "i"); /* HW xmit count each TxComplete [1-8] */
+
/* Enable the default interrupts */
+#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxComplete| \
+ UpdateStats | LinkEvent)
#define EnableInt() \
-writew(RxDMAComplete | HostError | IntRequested | TxComplete| \
- UpdateStats | LinkEvent, ioaddr + IntEnable)
+writew(DEFAULT_INTR, ioaddr + IntEnable)
static int max_intrloop = 50;
static int multicast_filter_limit = 0x40;
@@ -163,11 +174,11 @@
np->speed = 10;
np->full_duplex = 0;
} else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
- strcmp (media[card_idx], "5") == 0) {
+ strcmp (media[card_idx], "6") == 0) {
np->speed=1000;
np->full_duplex=1;
} else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
- strcmp (media[card_idx], "6") == 0) {
+ strcmp (media[card_idx], "5") == 0) {
np->speed = 1000;
np->full_duplex = 0;
} else {
@@ -176,7 +187,7 @@
}
if (jumbo[card_idx] != 0) {
np->jumbo = 1;
- dev->mtu = 9000;
+ dev->mtu = MAX_JUMBO;
} else {
np->jumbo = 0;
if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
@@ -184,14 +195,17 @@
}
np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
vlan[card_idx] : 0;
- if (int_count != 0 && int_timeout != 0) {
- np->int_count = int_count;
- np->int_timeout = int_timeout;
+ if (rx_coalesce != 0 && rx_timeout != 0) {
+ np->rx_coalesce = rx_coalesce;
+ np->rx_timeout = rx_timeout;
np->coalesce = 1;
}
np->tx_flow = (tx_flow[card_idx]) ? 1 : 0;
np->rx_flow = (rx_flow[card_idx]) ? 1 : 0;
-
+ if (tx_coalesce < 1)
+ tx_coalesce = 1;
+ if (tx_coalesce > 8)
+ tx_coalesce = 8;
}
dev->open = &rio_open;
dev->hard_start_xmit = &start_xmit;
@@ -202,8 +216,8 @@
dev->tx_timeout = &tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
dev->change_mtu = &change_mtu;
-#ifdef TX_HW_CHECKSUM
- dev->features = NETIF_F_SG | NETIF_F_HW_CSUM;
+#if 0
+ dev->features = NETIF_F_IP_CSUM;
#endif
pci_set_drvdata (pdev, dev);
@@ -389,13 +403,12 @@
i = request_irq (dev->irq, &rio_interrupt, SA_SHIRQ, dev->name, dev);
if (i)
return i;
-
/* DebugCtrl bit 4, 5, 9 must set */
writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl);
/* Jumbo frame */
if (np->jumbo != 0)
- writew (9014, ioaddr + MaxFrameSize);
+ writew (MAX_JUMBO+14, ioaddr + MaxFrameSize);
alloc_list (dev);
@@ -405,7 +418,7 @@
set_multicast (dev);
if (np->coalesce) {
- writel (np->int_count | np->int_timeout << 16,
+ writel (np->rx_coalesce | np->rx_timeout << 16,
ioaddr + RxDMAIntCtrl);
}
/* Set RIO to poll every N*320nsec. */
@@ -442,13 +455,31 @@
struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
- printk (KERN_WARNING "%s: Transmit timed out, TxStatus %4.4x.\n",
+ printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
dev->name, readl (ioaddr + TxStatus));
+ /* Free used tx skbuffs */
+ for (; np->cur_tx - np->old_tx > 0; np->old_tx++) {
+ int entry = np->old_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+
+ if (!(np->tx_ring[entry].status & TFDDone))
+ break;
+ skb = np->tx_skbuff[entry];
+ pci_unmap_single (np->pdev,
+ np->tx_ring[entry].fraginfo,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq (skb);
+ np->tx_skbuff[entry] = 0;
+ }
dev->if_port = 0;
dev->trans_start = jiffies;
np->stats.tx_errors++;
- if (!np->tx_full)
+ /* If the ring is no longer full, clear tx_full and
+ call netif_wake_queue() */
+ if (np->tx_full && np->cur_tx - np->old_tx < TX_QUEUE_LEN - 1) {
+ np->tx_full = 0;
netif_wake_queue (dev);
+ }
}
/* allocate and initialize Tx and Rx descriptors */
@@ -466,16 +497,19 @@
/* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_skbuff[i] = 0;
- np->tx_ring[i].status = 0;
+ np->tx_ring[i].status = cpu_to_le64 (TFDDone);
+ np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
+ ((i+1)%TX_RING_SIZE) *
+ sizeof (struct
+ netdev_desc));
}
/* Initialize Rx descriptors */
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
- ((i +
- 1) % RX_RING_SIZE) *
- sizeof (struct
- netdev_desc));
+ ((i + 1) % RX_RING_SIZE) *
+ sizeof (struct
+ netdev_desc));
np->rx_ring[i].status = 0;
np->rx_ring[i].fraginfo = 0;
np->rx_skbuff[i] = 0;
@@ -523,13 +557,12 @@
entry = np->cur_tx % TX_RING_SIZE;
np->tx_skbuff[entry] = skb;
txdesc = &np->tx_ring[entry];
- txdesc->next_desc = 0;
/* Set TFDDone to avoid TxDMA gather this descriptor */
txdesc->status = cpu_to_le64 (TFDDone);
txdesc->status |=
cpu_to_le64 (entry | WordAlignDisable | (1 << FragCountShift));
-#ifdef TX_HW_CHECKSUM
+#if 0
if (skb->ip_summed == CHECKSUM_HW) {
txdesc->status |=
cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable |
@@ -545,21 +578,13 @@
/* Send one packet each time at 10Mbps mode */
/* Tx coalescing loop do not exceed 8 */
- if (entry % 0x08 == 0 || np->speed == 10)
+ if (entry % tx_coalesce == 0 || np->speed == 10)
txdesc->status |= cpu_to_le64 (TxIndicate);
txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
skb->len,
PCI_DMA_TODEVICE));
txdesc->fraginfo |= cpu_to_le64 (skb->len) << 48;
- /* Chain the last descriptor's pointer to this one */
- if (np->last_tx)
- np->last_tx->next_desc = cpu_to_le64 (np->tx_ring_dma +
- entry *
- sizeof (struct
- netdev_desc));
- np->last_tx = txdesc;
-
/* Clear TFDDone, then TxDMA start to send this descriptor */
txdesc->status &= ~cpu_to_le64 (TFDDone);
@@ -571,8 +596,10 @@
if (np->cur_tx - np->old_tx < TX_QUEUE_LEN - 1 && np->speed != 10) {
/* do nothing */
} else {
+ spin_lock_irqsave(&np->lock, flags);
np->tx_full = 1;
netif_stop_queue (dev);
+ spin_unlock_irqrestore (&np->lock, flags);
}
/* The first TFDListPtr */
@@ -581,15 +608,15 @@
dev->base_addr + TFDListPtr0);
writel (0, dev->base_addr + TFDListPtr1);
}
-
- spin_lock_irqsave (&np->lock, flags);
+
if (np->old_tx > TX_RING_SIZE) {
+ spin_lock_irqsave (&np->lock, flags);
tx_shift = TX_RING_SIZE;
np->old_tx -= tx_shift;
np->cur_tx -= tx_shift;
+ spin_unlock_irqrestore (&np->lock, flags);
}
- spin_unlock_irqrestore (&np->lock, flags);
-
+
/* NETDEV WATCHDOG timer */
dev->trans_start = jiffies;
return 0;
@@ -606,33 +633,24 @@
ioaddr = dev->base_addr;
np = dev->priv;
- spin_lock (&np->lock);
+ spin_lock(&np->lock);
while (1) {
- int_status = readw (ioaddr + IntStatus) &
- (HostError | TxComplete | IntRequested |
- UpdateStats | LinkEvent | RxDMAComplete);
- writew (int_status & (HostError | TxComplete | RxComplete |
- IntRequested | UpdateStats | LinkEvent |
- TxDMAComplete | RxDMAComplete | RFDListEnd
- | RxDMAPriority), ioaddr + IntStatus);
+ int_status = readw (ioaddr + IntStatus);
+ writew (int_status, ioaddr + IntStatus);
+ int_status &= DEFAULT_INTR;
if (int_status == 0)
break;
/* Processing received packets */
if (int_status & RxDMAComplete)
receive_packet (dev);
/* TxComplete interrupt */
- if (int_status & TxComplete || np->tx_full) {
- int tx_status = readl (ioaddr + TxStatus);
+ if ((int_status & TxComplete) || np->tx_full) {
+ int tx_status;
+ tx_status = readl (ioaddr + TxStatus);
if (tx_status & 0x01)
tx_error (dev, tx_status);
- /* Send one packet each time at 10Mbps mode */
- if (np->speed == 10) {
- np->tx_full = 0;
- netif_wake_queue (dev);
- }
-
/* Free used tx skbuffs */
- for (; np->cur_tx - np->old_tx > 0; np->old_tx++) {
+ for (;np->cur_tx - np->old_tx > 0; np->old_tx++) {
int entry = np->old_tx % TX_RING_SIZE;
struct sk_buff *skb;
@@ -649,9 +667,12 @@
/* If the ring is no longer full, clear tx_full and
call netif_wake_queue() */
if (np->tx_full && np->cur_tx - np->old_tx < TX_QUEUE_LEN - 1) {
- np->tx_full = 0;
- netif_wake_queue (dev);
+ if (np->speed != 10 || int_status & TxComplete) {
+ np->tx_full = 0;
+ netif_wake_queue (dev);
+ }
}
+
/* Handle uncommon events */
if (int_status &
(IntRequested | HostError | LinkEvent | UpdateStats))
@@ -666,7 +687,7 @@
break;
}
}
- spin_unlock (&np->lock);
+ spin_unlock(&np->lock);
}
static void
@@ -742,7 +763,7 @@
np->stats.collisions++;
#endif
- /* Restart the Tx. */
+ /* Restart the Tx */
writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl);
}
@@ -783,7 +804,7 @@
if (frame_status & 0x00300000)
np->stats.rx_length_errors++;
if (frame_status & 0x00010000)
- np->stats.rx_fifo_errors++;
+ np->stats.rx_fifo_errors++;
if (frame_status & 0x00060000)
np->stats.rx_frame_errors++;
if (frame_status & 0x00080000)
@@ -808,7 +829,7 @@
skb_put (skb, pkt_len);
}
skb->protocol = eth_type_trans (skb, dev);
-#ifdef RX_HW_CHECKSUM
+#if 0
/* Checksum done by hw, but csum value unavailable. */
if (!(frame_status & (TCPError | UDPError | IPError))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -899,7 +920,7 @@
/* PCI Error, a catastronphic error related to the bus interface
occurs, set GlobalReset and HostReset to reset. */
if (int_status & HostError) {
- printk (KERN_ERR "%s: PCI Error! IntStatus %4.4x.\n",
+ printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
dev->name, int_status);
writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2);
mdelay (500);
@@ -914,8 +935,8 @@
u16 temp1;
u16 temp2;
int i;
- /* All statistics registers need to acknowledge,
- else overflow could cause some problem */
+ /* All statistics registers need to be acknowledged,
+ else statistic overflow could cause problems */
np->stats.rx_packets += readl (ioaddr + FramesRcvOk);
np->stats.tx_packets += readl (ioaddr + FramesXmtOk);
np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);
@@ -932,11 +953,11 @@
readl (ioaddr + FramesWDeferredXmt) + temp2;
/* detailed rx_error */
- np->stats.rx_length_errors += readw (ioaddr + InRangeLengthErrors) +
- readw (ioaddr + FrameTooLongErrors);
+ np->stats.rx_length_errors += readw (ioaddr + FrameTooLongErrors);
np->stats.rx_crc_errors += readw (ioaddr + FrameCheckSeqError);
/* Clear all other statistic register. */
+ readw (ioaddr + InRangeLengthErrors);
readw (ioaddr + MacControlFramesXmtd);
readw (ioaddr + BcstFramesXmtdOk);
readl (ioaddr + McstFramesXmtdOk);
@@ -961,7 +982,7 @@
change_mtu (struct net_device *dev, int new_mtu)
{
struct netdev_private *np = dev->priv;
- int max = (np->jumbo) ? 9000 : 1536;
+ int max = (np->jumbo) ? MAX_JUMBO : 1536;
if ((new_mtu < 68) || (new_mtu > max)) {
return -EINVAL;
@@ -996,36 +1017,42 @@
u32 hash_table[2];
u16 rx_mode = 0;
int i;
+ int bit;
+ int index, crc;
struct dev_mc_list *mclist;
struct netdev_private *np = dev->priv;
-
- /* Default: receive broadcast and unicast */
- rx_mode = ReceiveBroadcast | ReceiveUnicast;
+
+ hash_table[0] = hash_table[1] = 0;
+ /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
+ hash_table[1] |= 0x02000000;
if (dev->flags & IFF_PROMISC) {
/* Receive all frames promiscuously. */
- rx_mode |= ReceiveAllFrames;
- } else if (((dev->flags & IFF_MULTICAST)
- && (dev->mc_count > multicast_filter_limit))
- || (dev->flags & IFF_ALLMULTI)) {
+ rx_mode = ReceiveAllFrames;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ (dev->mc_count > multicast_filter_limit)) {
/* Receive broadcast and multicast frames */
- rx_mode |= ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
- } else if ((dev->flags & IFF_MULTICAST) & (dev->mc_count > 0)) {
- /* Receive broadcast frames and multicast frames filtering by Hashtable */
- rx_mode |=
+ rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
+ } else if (dev->mc_count > 0) {
+ /* Receive broadcast frames and multicast frames filtering
+ by Hashtable */
+ rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
+ for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist=mclist->next) {
+ crc = get_crc (mclist->dmi_addr, ETH_ALEN);
+ for (index=0, bit=0; bit<6; bit++, crc<<=1) {
+ if (crc & 0x80000000) index |= 1 << bit;
+ }
+ hash_table[index / 32] |= (1 << (index % 32));
+ }
+ } else {
+ rx_mode = ReceiveBroadcast | ReceiveUnicast;
}
if (np->vlan) {
/* ReceiveVLANMatch field in ReceiveMode */
rx_mode |= ReceiveVLANMatch;
}
- hash_table[0] = 0x00000000;
- hash_table[1] = 0x00000000;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
- set_bit (get_crc (mclist->dmi_addr, ETH_ALEN) & 0x3f,
- hash_table);
- }
writel (hash_table[0], ioaddr + HashTable0);
writel (hash_table[1], ioaddr + HashTable1);
writew (rx_mode, ioaddr + ReceiveMode);
@@ -1058,6 +1085,8 @@
miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
break;
case SIOCDEVPRIVATE + 2:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
break;
case SIOCDEVPRIVATE + 3:
@@ -1693,8 +1722,9 @@
Compile command:
-gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2x.c
+gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
Read Documentation/networking/dl2k.txt for details.
*/
+
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)