1
0
mirror of git://projects.qi-hardware.com/openwrt-xburst.git synced 2024-11-27 15:29:21 +02:00

[adm8668] remove the "old" tulip forked driver

We can now use the mainline tulip driver.

Signed-off-by: Florian Fainelli <florian@openwrt.org>

git-svn-id: svn://svn.openwrt.org/openwrt/trunk@34559 3c298f89-4303-0410-b956-a3cf2f4a3e73
This commit is contained in:
florian 2012-12-06 22:40:40 +00:00
parent ecb428abff
commit 8e79fcf508
3 changed files with 0 additions and 1341 deletions

View File

@ -1,277 +0,0 @@
/*
* originally drivers/net/tulip/tulip.h
* Copyright 2000,2001 The Linux Kernel Team
* Written/copyright 1994-2001 by Donald Becker.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __NET_TULIP_H__
#define __NET_TULIP_H__
#include <linux/module.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mii.h>
#include <linux/crc32.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <asm/unaligned.h>
#include <asm/uaccess.h>
/* undefine, or define to various debugging levels (>4 == obscene levels) */
#define TULIP_DEBUG 1
#define VALID_INTR 0x0001a451
#define ADM8668_WAN_IRQ 8
#define ADM8668_LAN_IRQ 7
#define ADM8668_WAN_MACADDR 0xb00205ac
#define ADM8668_LAN_MACADDR 0xb0020404
/* Offsets to the Command and Status Registers, "CSRs". All accesses
must be longword instructions and quadword aligned. */
enum tulip_offsets {
CSR0 = 0,
CSR1 = 0x08,
CSR2 = 0x10,
CSR3 = 0x18,
CSR4 = 0x20,
CSR5 = 0x28,
CSR6 = 0x30,
CSR7 = 0x38,
CSR8 = 0x40,
CSR9 = 0x48,
CSR10 = 0x50,
CSR11 = 0x58,
CSR12 = 0x60,
CSR13 = 0x68,
CSR14 = 0x70,
CSR15 = 0x78,
CSR18 = 0x88,
CSR19 = 0x8c,
CSR20 = 0x90,
CSR27 = 0xAC,
CSR28 = 0xB0,
};
#define RxPollInt (RxIntr|RxNoBuf|RxDied|RxJabber)
/* The bits in the CSR5 status registers, mostly interrupt sources. */
enum status_bits {
TimerInt = 0x800,
SystemError = 0x2000,
TPLnkFail = 0x1000,
TPLnkPass = 0x10,
NormalIntr = 0x10000,
AbnormalIntr = 0x8000,
RxJabber = 0x200,
RxDied = 0x100,
RxNoBuf = 0x80,
RxIntr = 0x40,
TxFIFOUnderflow = 0x20,
RxErrIntr = 0x10,
TxJabber = 0x08,
TxNoBuf = 0x04,
TxDied = 0x02,
TxIntr = 0x01,
};
/* bit mask for CSR5 TX/RX process state */
#define CSR5_TS 0x00700000
#define CSR5_RS 0x000e0000
enum tulip_mode_bits {
TxThreshold = (1 << 22),
FullDuplex = (1 << 9),
TxOn = 0x2000,
AcceptBroadcast = 0x0100,
AcceptAllMulticast = 0x0080,
AcceptAllPhys = 0x0040,
AcceptRunt = 0x0008,
RxOn = 0x0002,
RxTx = (TxOn | RxOn),
};
/* The Tulip Rx and Tx buffer descriptors. */
struct tulip_rx_desc {
__le32 status;
__le32 length;
__le32 buffer1;
__le32 buffer2;
};
struct tulip_tx_desc {
__le32 status;
__le32 length;
__le32 buffer1;
__le32 buffer2; /* We use only buffer 1. */
};
enum desc_status_bits {
DescOwned = 0x80000000,
DescWholePkt = 0x60000000,
DescEndPkt = 0x40000000,
DescStartPkt = 0x20000000,
DescEndRing = 0x02000000,
DescUseLink = 0x01000000,
/*
* Error summary flag is logical or of 'CRC Error', 'Collision Seen',
* 'Frame Too Long', 'Runt' and 'Descriptor Error' flags generated
* within tulip chip.
*/
RxDescErrorSummary = 0x8000,
RxDescCRCError = 0x0002,
RxDescCollisionSeen = 0x0040,
/*
* 'Frame Too Long' flag is set if packet length including CRC exceeds
* 1518. However, a full sized VLAN tagged frame is 1522 bytes
* including CRC.
*
* The tulip chip does not block oversized frames, and if this flag is
* set on a receive descriptor it does not indicate the frame has been
* truncated. The receive descriptor also includes the actual length.
* Therefore we can safety ignore this flag and check the length
* ourselves.
*/
RxDescFrameTooLong = 0x0080,
RxDescRunt = 0x0800,
RxDescDescErr = 0x4000,
RxWholePkt = 0x00000300,
/*
* Top three bits of 14 bit frame length (status bits 27-29) should
* never be set as that would make frame over 2047 bytes. The Receive
* Watchdog flag (bit 4) may indicate the length is over 2048 and the
* length field is invalid.
*/
RxLengthOver2047 = 0x38000010
};
/* Keep the ring sizes a power of two for efficiency.
Making the Tx ring too large decreases the effectiveness of channel
bonding and packet priority.
There are no ill effects from too-large receive rings. */
#define TX_RING_SIZE 32
#define RX_RING_SIZE 128
/* The receiver on the DC21143 rev 65 can fail to close the last
* receive descriptor in certain circumstances (see errata) when
* using MWI. This can only occur if the receive buffer ends on
* a cache line boundary, so the "+ 4" below ensures it doesn't.
*/
#define PKT_BUF_SZ (1536 + 4) /* Size of each temporary Rx buffer. */
/* Ring-wrap flag in length field, use for last ring entry.
0x01000000 means chain on buffer2 address,
0x02000000 means use the ring start address in CSR2/3.
Note: Some work-alike chips do not function correctly in chained mode.
The ASIX chip works only in chained mode.
Thus we indicates ring mode, but always write the 'next' field for
chained mode as well.
*/
#define DESC_RING_WRAP 0x02000000
struct ring_info {
struct sk_buff *skb;
dma_addr_t mapping;
};
struct tulip_private {
struct tulip_rx_desc *rx_ring;
struct tulip_tx_desc *tx_ring;
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
struct ring_info tx_buffers[TX_RING_SIZE];
/* The addresses of receive-in-place skbuffs. */
struct ring_info rx_buffers[RX_RING_SIZE];
struct napi_struct napi;
struct net_device_stats stats;
struct timer_list oom_timer; /* Out of memory timer. */
u32 mc_filter[2];
spinlock_t lock;
unsigned int cur_rx, cur_tx; /* The next free ring entry */
unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
unsigned int csr0; /* CSR0 setting. */
unsigned int csr6; /* Current CSR6 control settings. */
void (*link_change) (struct net_device * dev, int csr5);
struct platform_device *pdev;
unsigned long nir;
void __iomem *base_addr;
int pad0; /* Used for 8-byte alignment */
struct net_device *dev;
};
/* interrupt.c */
irqreturn_t tulip_interrupt(int irq, void *dev_instance);
int tulip_refill_rx(struct net_device *dev);
int tulip_poll(struct napi_struct *napi, int budget);
/* tulip_core.c */
extern int tulip_debug;
void oom_timer(unsigned long data);
static inline void tulip_start_rxtx(struct tulip_private *tp)
{
void __iomem *ioaddr = tp->base_addr;
iowrite32(tp->csr6 | RxTx, ioaddr + CSR6);
barrier();
(void) ioread32(ioaddr + CSR6); /* mmio sync */
}
static inline void tulip_stop_rxtx(struct tulip_private *tp)
{
void __iomem *ioaddr = tp->base_addr;
u32 csr6 = ioread32(ioaddr + CSR6);
if (csr6 & RxTx) {
unsigned i=1300/10;
iowrite32(csr6 & ~RxTx, ioaddr + CSR6);
barrier();
/* wait until in-flight frame completes.
* Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
* Typically expect this loop to end in < 50 us on 100BT.
*/
while (--i && (ioread32(ioaddr + CSR5) & (CSR5_TS|CSR5_RS)))
udelay(10);
if (!i)
printk(KERN_DEBUG "fixme: tulip_stop_rxtx() failed"
" (CSR5 0x%x CSR6 0x%x)\n",
ioread32(ioaddr + CSR5),
ioread32(ioaddr + CSR6));
}
}
static inline void tulip_restart_rxtx(struct tulip_private *tp)
{
tulip_stop_rxtx(tp);
udelay(5);
tulip_start_rxtx(tp);
}
static inline void tulip_tx_timeout_complete(struct tulip_private *tp, void __iomem *ioaddr)
{
/* Stop and restart the chip's Tx processes. */
tulip_restart_rxtx(tp);
/* Trigger an immediate transmit demand. */
iowrite32(0, ioaddr + CSR1);
tp->stats.tx_errors++;
}
#endif /* __NET_TULIP_H__ */

View File

@ -1,618 +0,0 @@
/*
* originally drivers/net/tulip_core.c
* Copyright 2000,2001 The Linux Kernel Team
* Written/copyright 1994-2001 by Donald Becker.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#define DRV_NAME "tulip"
#define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */
#define DRV_RELDATE "Feb 27, 2007"
#include "net.h"
static char version[] __devinitdata =
"ADM8668net driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
#define MAX_UNITS 2
/*
Set the bus performance register.
Typical: Set 16 longword cache alignment, no burst limit.
Cache alignment bits 15:14 Burst length 13:8
0000 No alignment 0x00000000 unlimited 0800 8 longwords
4000 8 longwords 0100 1 longword 1000 16 longwords
8000 16 longwords 0200 2 longwords 2000 32 longwords
C000 32 longwords 0400 4 longwords
Warning: many older 486 systems are broken and require setting 0x00A04800
8 longword cache alignment, 8 longword burst.
ToDo: Non-Intel setting could be better.
*/
//static int csr0 = 0x00200000 | 0x4000;
static int csr0 = 0;
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (4*HZ)
MODULE_AUTHOR("Scott Nicholas <neutronscott@scottn.us>");
MODULE_DESCRIPTION("ADM8668 new ethernet driver.");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
#ifdef TULIP_DEBUG
int tulip_debug = TULIP_DEBUG;
#else
int tulip_debug = 1;
#endif
static void tulip_tx_timeout(struct net_device *dev);
static void tulip_init_ring(struct net_device *dev);
static void tulip_free_ring(struct net_device *dev);
static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int tulip_open(struct net_device *dev);
static int tulip_close(struct net_device *dev);
static void tulip_up(struct net_device *dev);
static void tulip_down(struct net_device *dev);
static struct net_device_stats *tulip_get_stats(struct net_device *dev);
//static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void set_rx_mode(struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void poll_tulip(struct net_device *dev);
#endif
static void tulip_up(struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
napi_enable(&tp->napi);
/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
iowrite32(0x00000001, ioaddr + CSR0);
/* Deassert reset.
Wait the specified 50 PCI cycles after a reset by initializing
Tx and Rx queues and the address filter list. */
iowrite32(tp->csr0, ioaddr + CSR0);
if (tulip_debug > 1)
printk(KERN_DEBUG "%s: tulip_up(), irq==%d\n",
dev->name, dev->irq);
iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
tp->cur_rx = tp->cur_tx = 0;
tp->dirty_rx = tp->dirty_tx = 0;
/* set mac address */
iowrite32(get_unaligned_le32(dev->dev_addr), ioaddr + 0xA4);
iowrite32(get_unaligned_le16(dev->dev_addr + 4), ioaddr + 0xA8);
iowrite32(0, ioaddr + CSR27);
iowrite32(0, ioaddr + CSR28);
tp->csr6 = 0;
/* Enable automatic Tx underrun recovery. */
iowrite32(ioread32(ioaddr + CSR18) | 1, ioaddr + CSR18);
tp->csr6 = 0x00040000;
/* Start the chip's Tx to process setup frame. */
tulip_stop_rxtx(tp);
barrier();
udelay(5);
iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
/* Enable interrupts by setting the interrupt mask. */
iowrite32(VALID_INTR, ioaddr + CSR5);
iowrite32(VALID_INTR, ioaddr + CSR7);
tulip_start_rxtx(tp);
iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
if (tulip_debug > 2) {
printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
dev->name, ioread32(ioaddr + CSR0),
ioread32(ioaddr + CSR5),
ioread32(ioaddr + CSR6));
}
init_timer(&tp->oom_timer);
tp->oom_timer.data = (unsigned long)dev;
tp->oom_timer.function = oom_timer;
}
static int
tulip_open(struct net_device *dev)
{
int retval;
tulip_init_ring (dev);
retval = request_irq(dev->irq, tulip_interrupt, 0, dev->name, dev);
if (retval)
goto free_ring;
tulip_up (dev);
netif_start_queue (dev);
return 0;
free_ring:
tulip_free_ring (dev);
return retval;
}
static void tulip_tx_timeout(struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
unsigned long flags;
spin_lock_irqsave (&tp->lock, flags);
dev_warn(&dev->dev,
"Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
tulip_tx_timeout_complete(tp, ioaddr);
spin_unlock_irqrestore (&tp->lock, flags);
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void tulip_init_ring(struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
int i;
tp->nir = 0;
for (i = 0; i < RX_RING_SIZE; i++) {
tp->rx_ring[i].status = 0x00000000;
tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
tp->rx_buffers[i].skb = NULL;
tp->rx_buffers[i].mapping = 0;
}
/* Mark the last entry as wrapping the ring. */
tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
for (i = 0; i < RX_RING_SIZE; i++) {
dma_addr_t mapping;
/* Note the receive buffer must be longword aligned.
dev_alloc_skb() provides 16 byte alignment. But do *not*
use skb_reserve() to align the IP header! */
struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
tp->rx_buffers[i].skb = skb;
if (skb == NULL)
break;
mapping = dma_map_single(&dev->dev, skb->data,
PKT_BUF_SZ, DMA_FROM_DEVICE);
tp->rx_buffers[i].mapping = mapping;
skb->dev = dev; /* Mark as being used by this device. */
tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
}
tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
/* The Tx buffer descriptor is filled in as needed, but we
do need to clear the ownership bit. */
for (i = 0; i < TX_RING_SIZE; i++) {
tp->tx_buffers[i].skb = NULL;
tp->tx_buffers[i].mapping = 0;
tp->tx_ring[i].status = 0x00000000;
tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
}
tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
}
static netdev_tx_t
tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
int entry;
u32 flag;
dma_addr_t mapping;
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
/* Calculate the next Tx descriptor entry. */
entry = tp->cur_tx % TX_RING_SIZE;
tp->tx_buffers[entry].skb = skb;
mapping = dma_map_single(&tp->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
tp->tx_buffers[entry].mapping = mapping;
tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
flag = 0x60000000; /* No interrupt */
} else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
flag = 0xe0000000; /* Tx-done intr. */
} else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
flag = 0x60000000; /* No Tx-done intr. */
} else { /* Leave room for set_rx_mode() to fill entries. */
flag = 0xe0000000; /* Tx-done intr. */
netif_stop_queue(dev);
}
if (entry == TX_RING_SIZE-1)
flag = 0xe0000000 | DESC_RING_WRAP;
tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
/* if we were using Transmit Automatic Polling, we would need a
* wmb() here. */
tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
wmb();
tp->cur_tx++;
/* Trigger an immediate transmit demand. */
iowrite32(0, tp->base_addr + CSR1);
spin_unlock_irqrestore(&tp->lock, flags);
return NETDEV_TX_OK;
}
static void tulip_clean_tx_ring(struct tulip_private *tp)
{
unsigned int dirty_tx;
for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
dirty_tx++) {
int entry = dirty_tx % TX_RING_SIZE;
int status = le32_to_cpu(tp->tx_ring[entry].status);
if (status < 0) {
tp->stats.tx_errors++; /* It wasn't Txed */
tp->tx_ring[entry].status = 0;
}
dma_unmap_single(&tp->pdev->dev, tp->tx_buffers[entry].mapping,
tp->tx_buffers[entry].skb->len,
DMA_TO_DEVICE);
/* Free the original skb. */
dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
tp->tx_buffers[entry].skb = NULL;
tp->tx_buffers[entry].mapping = 0;
}
}
static void tulip_down (struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
unsigned long flags;
napi_disable(&tp->napi);
del_timer_sync (&tp->oom_timer);
spin_lock_irqsave (&tp->lock, flags);
/* Disable interrupts by clearing the interrupt mask. */
iowrite32 (0x00000000, ioaddr + CSR7);
/* Stop the Tx and Rx processes. */
tulip_stop_rxtx(tp);
/* prepare receive buffers */
tulip_refill_rx(dev);
/* release any unconsumed transmit buffers */
tulip_clean_tx_ring(tp);
if (ioread32 (ioaddr + CSR6) != 0xffffffff)
tp->stats.rx_missed_errors += ioread32 (ioaddr + CSR8) & 0xffff;
spin_unlock_irqrestore (&tp->lock, flags);
}
static void tulip_free_ring (struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
int i;
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = tp->rx_buffers[i].skb;
dma_addr_t mapping = tp->rx_buffers[i].mapping;
tp->rx_buffers[i].skb = NULL;
tp->rx_buffers[i].mapping = 0;
tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
tp->rx_ring[i].length = 0;
/* An invalid address. */
tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
if (skb) {
dma_unmap_single(&tp->pdev->dev, mapping, PKT_BUF_SZ,
DMA_FROM_DEVICE);
dev_kfree_skb (skb);
}
}
for (i = 0; i < TX_RING_SIZE; i++) {
struct sk_buff *skb = tp->tx_buffers[i].skb;
if (skb != NULL) {
dma_unmap_single(&tp->pdev->dev,
tp->tx_buffers[i].mapping, skb->len, DMA_TO_DEVICE);
dev_kfree_skb (skb);
}
tp->tx_buffers[i].skb = NULL;
tp->tx_buffers[i].mapping = 0;
}
}
static int tulip_close (struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
netif_stop_queue (dev);
tulip_down (dev);
if (tulip_debug > 1)
dev_printk(KERN_DEBUG, &dev->dev,
"Shutting down ethercard, status was %02x\n",
ioread32 (ioaddr + CSR5));
free_irq (dev->irq, dev);
tulip_free_ring (dev);
return 0;
}
static struct net_device_stats *tulip_get_stats(struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
if (netif_running(dev)) {
unsigned long flags;
spin_lock_irqsave (&tp->lock, flags);
tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
spin_unlock_irqrestore(&tp->lock, flags);
}
return &tp->stats;
}
static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
strcpy(info->bus_info, "mmio");
}
static const struct ethtool_ops ops = {
.get_drvinfo = tulip_get_drvinfo
};
static void set_rx_mode(struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
int csr6;
csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
tp->csr6 &= ~0x00D5;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
csr6 |= AcceptAllMulticast | AcceptAllPhys;
} else if ((netdev_mc_count(dev) > 1000) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well -- accept all multicasts. */
tp->csr6 |= AcceptAllMulticast;
csr6 |= AcceptAllMulticast;
} else {
/* Some work-alikes have only a 64-entry hash filter table. */
/* Should verify correctness on big-endian/__powerpc__ */
struct netdev_hw_addr *ha;
if (netdev_mc_count(dev) > 64) {
/* Arbitrary non-effective limit. */
tp->csr6 |= AcceptAllMulticast;
csr6 |= AcceptAllMulticast;
} else {
u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
int filterbit;
netdev_for_each_mc_addr(ha, dev) {
filterbit = ether_crc_le(ETH_ALEN, ha->addr);
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
if (tulip_debug > 2)
dev_info(&dev->dev,
"Added filter for %pM %08x bit %d\n",
ha->addr,
ether_crc(ETH_ALEN, ha->addr),
filterbit);
}
if (mc_filter[0] == tp->mc_filter[0] &&
mc_filter[1] == tp->mc_filter[1])
; /* No change. */
iowrite32(mc_filter[0], ioaddr + CSR27);
iowrite32(mc_filter[1], ioaddr + CSR28);
tp->mc_filter[0] = mc_filter[0];
tp->mc_filter[1] = mc_filter[1];
}
}
if (dev->irq == ADM8668_LAN_IRQ)
csr6 |= (1 << 9); /* force 100Mbps full duplex */
// csr6 |= 1; /* pad 2 bytes. vlan? */
iowrite32(csr6, ioaddr + CSR6);
}
static const struct net_device_ops tulip_netdev_ops = {
.ndo_open = tulip_open,
.ndo_start_xmit = tulip_start_xmit,
.ndo_tx_timeout = tulip_tx_timeout,
.ndo_stop = tulip_close,
.ndo_get_stats = tulip_get_stats,
.ndo_set_rx_mode = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_tulip,
#endif
};
static int __devinit adm8668net_probe(struct platform_device *pdev)
{
struct tulip_private *tp;
struct net_device *dev;
struct resource *res;
void __iomem *ioaddr;
int irq;
if (pdev->id < 0 || pdev->id >= MAX_UNITS)
return -EINVAL;
if (!(res = platform_get_resource(pdev, IORESOURCE_IRQ, 0)))
return -ENODEV;
irq = res->start;
if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0)))
return -ENODEV;
if (!(ioaddr = ioremap(res->start, res->end - res->start)))
return -ENODEV;
if (!(dev = alloc_etherdev(sizeof (*tp))))
return -ENOMEM;
/* setup net dev */
dev->base_addr = (unsigned long)res->start;
dev->irq = irq;
SET_NETDEV_DEV(dev, &pdev->dev);
/* tulip private struct */
tp = netdev_priv(dev);
tp->dev = dev;
tp->base_addr = ioaddr;
tp->csr0 = csr0;
tp->pdev = pdev;
tp->rx_ring = dma_alloc_coherent(&pdev->dev,
sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
&tp->rx_ring_dma, GFP_KERNEL);
if (!tp->rx_ring)
return -ENODEV;
tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
spin_lock_init(&tp->lock);
/* Stop the chip's Tx and Rx processes. */
tulip_stop_rxtx(tp);
/* Clear the missed-packet counter. */
ioread32(ioaddr + CSR8);
/* Addresses are stored in BSP area of NOR flash */
if (irq == ADM8668_WAN_IRQ)
memcpy(dev->dev_addr, (char *)ADM8668_WAN_MACADDR, 6);
else
memcpy(dev->dev_addr, (char *)ADM8668_LAN_MACADDR, 6);
/* The Tulip-specific entries in the device structure. */
dev->netdev_ops = &tulip_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
netif_napi_add(dev, &tp->napi, tulip_poll, 16);
SET_ETHTOOL_OPS(dev, &ops);
if (register_netdev(dev))
goto err_out_free_ring;
dev_info(&dev->dev,
"ADM8668net at MMIO %#lx %pM, IRQ %d\n",
(unsigned long)dev->base_addr, dev->dev_addr, irq);
platform_set_drvdata(pdev, dev);
return 0;
err_out_free_ring:
dma_free_coherent(&pdev->dev,
sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
tp->rx_ring, tp->rx_ring_dma);
return -ENODEV;
}
static int __devexit adm8668net_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata (pdev);
struct tulip_private *tp;
if (!dev)
return -ENODEV;
tp = netdev_priv(dev);
unregister_netdev(dev);
dma_free_coherent(&pdev->dev,
sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
tp->rx_ring, tp->rx_ring_dma);
iounmap(tp->base_addr);
free_netdev(dev);
platform_set_drvdata(pdev, NULL);
return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
static void poll_tulip (struct net_device *dev)
{
/* disable_irq here is not very nice, but with the lockless
interrupt handler we have no other choice. */
disable_irq(dev->irq);
tulip_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
static struct platform_driver adm8668net_platform_driver = {
.probe = adm8668net_probe,
.remove = __devexit_p(adm8668net_remove),
.driver = {
.owner = THIS_MODULE,
.name = "adm8668_eth"
},
};
static int __init adm8668net_init(void)
{
pr_info("%s", version);
return platform_driver_register(&adm8668net_platform_driver);
}
static void __exit adm8668net_exit(void)
{
platform_driver_unregister(&adm8668net_platform_driver);
}
module_init(adm8668net_init);
module_exit(adm8668net_exit);

View File

@ -1,446 +0,0 @@
/*
* originally drivers/net/tulip/interrupt.c
* Copyright 2000,2001 The Linux Kernel Team
* Written/copyright 1994-2001 by Donald Becker.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include "net.h"
int tulip_refill_rx(struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
int entry;
int refilled = 0;
/* Refill the Rx ring buffers. */
for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
entry = tp->dirty_rx % RX_RING_SIZE;
if (tp->rx_buffers[entry].skb == NULL) {
struct sk_buff *skb;
dma_addr_t mapping;
skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
if (skb == NULL)
break;
mapping = dma_map_single(&dev->dev, skb->data,
PKT_BUF_SZ, DMA_FROM_DEVICE);
tp->rx_buffers[entry].mapping = mapping;
skb->dev = dev; /* Mark as being used by this device. */
tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
refilled++;
}
tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
}
return refilled;
}
void oom_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct tulip_private *tp = netdev_priv(dev);
napi_schedule(&tp->napi);
}
int tulip_poll(struct napi_struct *napi, int budget)
{
struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
struct net_device *dev = tp->dev;
int entry = tp->cur_rx % RX_RING_SIZE;
int work_done = 0;
if (tulip_debug > 4)
printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
entry, tp->rx_ring[entry].status);
do {
if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
printk(KERN_DEBUG " In tulip_poll(), hardware disappeared\n");
break;
}
/* Acknowledge current RX interrupt sources. */
iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
/* If we own the next entry, it is a new packet. Send it up. */
while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
s32 status = le32_to_cpu(tp->rx_ring[entry].status);
short pkt_len;
if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
break;
if (tulip_debug > 5)
printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
dev->name, entry, status);
if (++work_done >= budget)
goto not_done;
/*
* Omit the four octet CRC from the length.
* (May not be considered valid until we have
* checked status for RxLengthOver2047 bits)
*/
pkt_len = ((status >> 16) & 0x7ff) - 4;
#if 0
csr6 = ioread32(tp->base_addr + CSR6);
if (csr6 & 0x1)
pkt_len += 2;
#endif
/*
* Maximum pkt_len is 1518 (1514 + vlan header)
* Anything higher than this is always invalid
* regardless of RxLengthOver2047 bits
*/
if ((status & (RxLengthOver2047 |
RxDescCRCError |
RxDescCollisionSeen |
RxDescRunt |
RxDescDescErr |
RxWholePkt)) != RxWholePkt ||
pkt_len > 1518) {
if ((status & (RxLengthOver2047 |
RxWholePkt)) != RxWholePkt) {
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (tulip_debug > 1)
dev_warn(&dev->dev,
"Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
status);
tp->stats.rx_length_errors++;
}
} else {
/* There was a fatal error. */
if (tulip_debug > 2)
printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
dev->name, status);
tp->stats.rx_errors++; /* end of a packet.*/
if (pkt_len > 1518 ||
(status & RxDescRunt))
tp->stats.rx_length_errors++;
if (status & 0x0004) tp->stats.rx_frame_errors++;
if (status & 0x0002) tp->stats.rx_crc_errors++;
if (status & 0x0001) tp->stats.rx_fifo_errors++;
}
} else {
struct sk_buff *skb = tp->rx_buffers[entry].skb;
char *temp = skb_put(skb, pkt_len);
#if 0
if (csr6 & 1)
skb_pull(skb, 2);
#endif
#ifndef final_version
if (tp->rx_buffers[entry].mapping !=
le32_to_cpu(tp->rx_ring[entry].buffer1)) {
dev_err(&dev->dev,
"Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
le32_to_cpu(tp->rx_ring[entry].buffer1),
(unsigned long long)tp->rx_buffers[entry].mapping,
skb->head, temp);
}
#endif
tp->rx_buffers[entry].skb = NULL;
tp->rx_buffers[entry].mapping = 0;
skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb(skb);
tp->stats.rx_packets++;
tp->stats.rx_bytes += pkt_len;
}
entry = (++tp->cur_rx) % RX_RING_SIZE;
if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
tulip_refill_rx(dev);
}
/* New ack strategy... irq does not ack Rx any longer
hopefully this helps */
/* Really bad things can happen here... If new packet arrives
* and an irq arrives (tx or just due to occasionally unset
* mask), it will be acked by irq handler, but new thread
* is not scheduled. It is major hole in design.
* No idea how to fix this if "playing with fire" will fail
* tomorrow (night 011029). If it will not fail, we won
* finally: amount of IO did not increase at all. */
} while ((ioread32(tp->base_addr + CSR5) & RxIntr));
tulip_refill_rx(dev);
/* If RX ring is not full we are out of memory. */
if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
goto oom;
/* Remove us from polling list and enable RX intr. */
napi_complete(napi);
iowrite32(VALID_INTR, tp->base_addr+CSR7);
/* The last op happens after poll completion. Which means the following:
* 1. it can race with disabling irqs in irq handler
* 2. it can race with dise/enabling irqs in other poll threads
* 3. if an irq raised after beginning loop, it will be immediately
* triggered here.
*
* Summarizing: the logic results in some redundant irqs both
* due to races in masking and due to too late acking of already
* processed irqs. But it must not result in losing events.
*/
return work_done;
not_done:
if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
tulip_refill_rx(dev);
if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
goto oom;
return work_done;
oom: /* Executed with RX ints disabled */
/* Start timer, stop polling, but do not enable rx interrupts. */
mod_timer(&tp->oom_timer, jiffies+1);
/* Think: timer_pending() was an explicit signature of bug.
* Timer can be pending now but fired and completed
* before we did napi_complete(). See? We would lose it. */
/* remove ourselves from the polling list */
napi_complete(napi);
return work_done;
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
irqreturn_t tulip_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = (struct net_device *)dev_instance;
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
int csr5;
int missed;
int rx = 0;
int tx = 0;
int oi = 0;
int maxrx = RX_RING_SIZE;
int maxtx = TX_RING_SIZE;
int maxoi = TX_RING_SIZE;
int rxd = 0;
unsigned int work_count = 25;
unsigned int handled = 0;
/* Let's see whether the interrupt really is for us */
csr5 = ioread32(ioaddr + CSR5);
if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
return IRQ_RETVAL(handled);
tp->nir++;
do {
if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
rxd++;
/* Mask RX intrs and add the device to poll list. */
iowrite32(VALID_INTR&~RxPollInt, ioaddr + CSR7);
napi_schedule(&tp->napi);
if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
break;
}
/* Acknowledge the interrupt sources we handle here ASAP
the poll function does Rx and RxNoBuf acking */
iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
if (tulip_debug > 4)
printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n",
dev->name, csr5, ioread32(ioaddr + CSR5));
if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
unsigned int dirty_tx;
spin_lock(&tp->lock);
for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
dirty_tx++) {
int entry = dirty_tx % TX_RING_SIZE;
int status = le32_to_cpu(tp->tx_ring[entry].status);
if (status < 0)
break; /* It still has not been Txed */
if (status & 0x8000) {
/* There was an major error, log it. */
#ifndef final_version
if (tulip_debug > 1)
printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
dev->name, status);
#endif
tp->stats.tx_errors++;
if (status & 0x4104) tp->stats.tx_aborted_errors++;
if (status & 0x0C00) tp->stats.tx_carrier_errors++;
if (status & 0x0200) tp->stats.tx_window_errors++;
if (status & 0x0002) tp->stats.tx_fifo_errors++;
if (status & 0x0080) tp->stats.tx_heartbeat_errors++;
} else {
tp->stats.tx_bytes +=
tp->tx_buffers[entry].skb->len;
tp->stats.collisions += (status >> 3) & 15;
tp->stats.tx_packets++;
}
dma_unmap_single(&tp->pdev->dev, tp->tx_buffers[entry].mapping,
tp->tx_buffers[entry].skb->len, DMA_TO_DEVICE);
/* Free the original skb. */
dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
tp->tx_buffers[entry].skb = NULL;
tp->tx_buffers[entry].mapping = 0;
tx++;
}
#ifndef final_version
if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
dev_err(&dev->dev,
"Out-of-sync dirty pointer, %d vs. %d\n",
dirty_tx, tp->cur_tx);
dirty_tx += TX_RING_SIZE;
}
#endif
if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
netif_wake_queue(dev);
tp->dirty_tx = dirty_tx;
if (csr5 & TxDied) {
if (tulip_debug > 2)
dev_warn(&dev->dev,
"The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
csr5, ioread32(ioaddr + CSR6),
tp->csr6);
tulip_restart_rxtx(tp);
}
spin_unlock(&tp->lock);
}
/* Log errors. */
if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
if (csr5 == 0xffffffff)
break;
if (csr5 & TxJabber) tp->stats.tx_errors++;
if (csr5 & TxFIFOUnderflow) {
if ((tp->csr6 & 0xC000) != 0xC000)
tp->csr6 += 0x4000; /* Bump up the Tx threshold */
else
tp->csr6 |= 0x00200000; /* Store-n-forward. */
/* Restart the transmit process. */
tulip_restart_rxtx(tp);
iowrite32(0, ioaddr + CSR1);
}
if (csr5 & (RxDied | RxNoBuf)) {
iowrite32(tp->mc_filter[0], ioaddr + CSR27);
iowrite32(tp->mc_filter[1], ioaddr + CSR28);
}
if (csr5 & RxDied) { /* Missed a Rx frame. */
tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
tp->stats.rx_errors++;
tulip_start_rxtx(tp);
}
/*
* NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
* call is ever done under the spinlock
*/
if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
if (tp->link_change)
(tp->link_change)(dev, csr5);
}
if (csr5 & SystemError) {
int error = (csr5 >> 23) & 7;
/* oops, we hit a PCI error. The code produced corresponds
* to the reason:
* 0 - parity error
* 1 - master abort
* 2 - target abort
* Note that on parity error, we should do a software reset
* of the chip to get it back into a sane state (according
* to the 21142/3 docs that is).
* -- rmk
*/
dev_err(&dev->dev,
"(%lu) System Error occurred (%d)\n",
tp->nir, error);
}
/* Clear all error sources, included undocumented ones! */
iowrite32(0x0800f7ba, ioaddr + CSR5);
oi++;
}
if (csr5 & TimerInt) {
if (tulip_debug > 2)
dev_err(&dev->dev,
"Re-enabling interrupts, %08x\n",
csr5);
iowrite32(VALID_INTR, ioaddr + CSR7);
oi++;
}
if (tx > maxtx || rx > maxrx || oi > maxoi) {
if (tulip_debug > 1)
dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
csr5, tp->nir, tx, rx, oi);
/* Acknowledge all interrupt sources. */
iowrite32(0x8001ffff, ioaddr + CSR5);
/* Mask all interrupting sources, set timer to
re-enable. */
iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
iowrite32(0x0012, ioaddr + CSR11);
break;
}
work_count--;
if (work_count == 0)
break;
csr5 = ioread32(ioaddr + CSR5);
if (rxd)
csr5 &= ~RxPollInt;
} while ((csr5 & (TxNoBuf |
TxDied |
TxIntr |
TimerInt |
/* Abnormal intr. */
RxDied |
TxFIFOUnderflow |
TxJabber |
TPLnkFail |
SystemError )) != 0);
if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
}
if (tulip_debug > 4)
printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#04x\n",
dev->name, ioread32(ioaddr + CSR5));
return IRQ_HANDLED;
}