1
0
mirror of git://projects.qi-hardware.com/openwrt-xburst.git synced 2024-11-24 07:20:37 +02:00

ramips_eth: coding style cleanup

git-svn-id: svn://svn.openwrt.org/openwrt/trunk@19414 3c298f89-4303-0410-b956-a3cf2f4a3e73
This commit is contained in:
juhosg 2010-01-30 15:25:55 +00:00
parent ad3d89cc5a
commit 06901dcfd6

View File

@ -83,12 +83,12 @@ ramips_alloc_dma(struct net_device *dev)
/* setup tx ring */ /* setup tx ring */
priv->tx = dma_alloc_coherent(NULL, priv->tx = dma_alloc_coherent(NULL,
NUM_TX_DESC * sizeof(struct ramips_tx_dma), &priv->phy_tx, GFP_ATOMIC); NUM_TX_DESC * sizeof(struct ramips_tx_dma),
&priv->phy_tx, GFP_ATOMIC);
if (!priv->tx) if (!priv->tx)
goto err_cleanup; goto err_cleanup;
for(i = 0; i < NUM_TX_DESC; i++) for (i = 0; i < NUM_TX_DESC; i++) {
{
memset(&priv->tx[i], 0, sizeof(struct ramips_tx_dma)); memset(&priv->tx[i], 0, sizeof(struct ramips_tx_dma));
priv->tx[i].txd2 |= TX_DMA_LSO | TX_DMA_DONE; priv->tx[i].txd2 |= TX_DMA_LSO | TX_DMA_DONE;
priv->tx[i].txd4 &= (TX_DMA_QN_MASK | TX_DMA_PN_MASK); priv->tx[i].txd4 &= (TX_DMA_QN_MASK | TX_DMA_PN_MASK);
@ -97,22 +97,23 @@ ramips_alloc_dma(struct net_device *dev)
/* setup rx ring */ /* setup rx ring */
priv->rx = dma_alloc_coherent(NULL, priv->rx = dma_alloc_coherent(NULL,
NUM_RX_DESC * sizeof(struct ramips_rx_dma), &priv->phy_rx, GFP_ATOMIC); NUM_RX_DESC * sizeof(struct ramips_rx_dma),
&priv->phy_rx, GFP_ATOMIC);
if (!priv->rx) if (!priv->rx)
goto err_cleanup; goto err_cleanup;
memset(priv->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC); memset(priv->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
for(i = 0; i < NUM_RX_DESC; i++) for (i = 0; i < NUM_RX_DESC; i++) {
{
struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH + 2); struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH + 2);
if (!new_skb) if (!new_skb)
goto err_cleanup; goto err_cleanup;
skb_reserve(new_skb, 2); skb_reserve(new_skb, 2);
priv->rx[i].rxd1 = priv->rx[i].rxd1 = dma_map_single(NULL,
dma_map_single(NULL, skb_put(new_skb, 2), MAX_RX_LENGTH + 2, skb_put(new_skb, 2),
DMA_FROM_DEVICE); MAX_RX_LENGTH + 2,
DMA_FROM_DEVICE);
priv->rx[i].rxd2 |= RX_DMA_LSO; priv->rx[i].rxd2 |= RX_DMA_LSO;
priv->rx_skb[i] = new_skb; priv->rx_skb[i] = new_skb;
} }
@ -141,7 +142,7 @@ ramips_setup_dma(struct net_device *dev)
} }
static int static int
ramips_eth_hard_start_xmit(struct sk_buff* skb, struct net_device *dev) ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct raeth_priv *priv = netdev_priv(dev); struct raeth_priv *priv = netdev_priv(dev);
unsigned long tx; unsigned long tx;
@ -149,32 +150,34 @@ ramips_eth_hard_start_xmit(struct sk_buff* skb, struct net_device *dev)
unsigned int mapped_addr; unsigned int mapped_addr;
unsigned long flags; unsigned long flags;
if(priv->plat->min_pkt_len) if (priv->plat->min_pkt_len) {
{ if (skb->len < priv->plat->min_pkt_len) {
if(skb->len < priv->plat->min_pkt_len) if (skb_padto(skb, priv->plat->min_pkt_len)) {
{ printk(KERN_ERR
if(skb_padto(skb, priv->plat->min_pkt_len)) "ramips_eth: skb_padto failed\n");
{ kfree_skb(skb);
printk(KERN_ERR "ramips_eth: skb_padto failed\n"); return 0;
kfree_skb(skb); }
return 0; skb_put(skb, priv->plat->min_pkt_len - skb->len);
} }
skb_put(skb, priv->plat->min_pkt_len - skb->len);
}
} }
dev->trans_start = jiffies; dev->trans_start = jiffies;
mapped_addr = (unsigned int)dma_map_single(NULL, skb->data, skb->len, mapped_addr = (unsigned int) dma_map_single(NULL, skb->data, skb->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE); dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE);
spin_lock_irqsave(&priv->page_lock, flags); spin_lock_irqsave(&priv->page_lock, flags);
tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0); tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
if(tx == NUM_TX_DESC - 1) if (tx == NUM_TX_DESC - 1)
tx_next = 0; tx_next = 0;
else else
tx_next = tx + 1; tx_next = tx + 1;
if((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) ||
!(priv->tx[tx].txd2 & TX_DMA_DONE) || !(priv->tx[tx_next].txd2 & TX_DMA_DONE)) if ((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) ||
!(priv->tx[tx].txd2 & TX_DMA_DONE) ||
!(priv->tx[tx_next].txd2 & TX_DMA_DONE))
goto out; goto out;
priv->tx[tx].txd1 = mapped_addr; priv->tx[tx].txd1 = mapped_addr;
priv->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE); priv->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
priv->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len); priv->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
@ -185,7 +188,8 @@ ramips_eth_hard_start_xmit(struct sk_buff* skb, struct net_device *dev)
ramips_fe_wr((tx + 1) % NUM_TX_DESC, RAMIPS_TX_CTX_IDX0); ramips_fe_wr((tx + 1) % NUM_TX_DESC, RAMIPS_TX_CTX_IDX0);
spin_unlock_irqrestore(&priv->page_lock, flags); spin_unlock_irqrestore(&priv->page_lock, flags);
return NETDEV_TX_OK; return NETDEV_TX_OK;
out:
out:
spin_unlock_irqrestore(&priv->page_lock, flags); spin_unlock_irqrestore(&priv->page_lock, flags);
dev->stats.tx_dropped++; dev->stats.tx_dropped++;
kfree_skb(skb); kfree_skb(skb);
@ -195,17 +199,16 @@ out:
static void static void
ramips_eth_rx_hw(unsigned long ptr) ramips_eth_rx_hw(unsigned long ptr)
{ {
struct net_device *dev = (struct net_device*)ptr; struct net_device *dev = (struct net_device *) ptr;
struct raeth_priv *priv = netdev_priv(dev); struct raeth_priv *priv = netdev_priv(dev);
int rx; int rx;
int max_rx = 16; int max_rx = 16;
while(max_rx) while (max_rx) {
{
struct sk_buff *rx_skb, *new_skb; struct sk_buff *rx_skb, *new_skb;
rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC; rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
if(!(priv->rx[rx].rxd2 & RX_DMA_DONE)) if (!(priv->rx[rx].rxd2 & RX_DMA_DONE))
break; break;
max_rx--; max_rx--;
@ -222,18 +225,20 @@ ramips_eth_rx_hw(unsigned long ptr)
priv->rx_skb[rx] = new_skb; priv->rx_skb[rx] = new_skb;
BUG_ON(!new_skb); BUG_ON(!new_skb);
skb_reserve(new_skb, 2); skb_reserve(new_skb, 2);
priv->rx[rx].rxd1 = priv->rx[rx].rxd1 = dma_map_single(NULL,
dma_map_single(NULL, new_skb->data, MAX_RX_LENGTH + 2, new_skb->data,
DMA_FROM_DEVICE); MAX_RX_LENGTH + 2,
DMA_FROM_DEVICE);
priv->rx[rx].rxd2 &= ~RX_DMA_DONE; priv->rx[rx].rxd2 &= ~RX_DMA_DONE;
wmb(); wmb();
ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0); ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0);
} }
if(max_rx == 0)
if (max_rx == 0)
tasklet_schedule(&priv->rx_tasklet); tasklet_schedule(&priv->rx_tasklet);
else else
ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_RX_DLY_INT, ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_RX_DLY_INT,
RAMIPS_FE_INT_ENABLE); RAMIPS_FE_INT_ENABLE);
} }
static void static void
@ -242,30 +247,31 @@ ramips_eth_tx_housekeeping(unsigned long ptr)
struct net_device *dev = (struct net_device*)ptr; struct net_device *dev = (struct net_device*)ptr;
struct raeth_priv *priv = netdev_priv(dev); struct raeth_priv *priv = netdev_priv(dev);
while((priv->tx[priv->skb_free_idx].txd2 & TX_DMA_DONE) && while ((priv->tx[priv->skb_free_idx].txd2 & TX_DMA_DONE) &&
(priv->tx_skb[priv->skb_free_idx])) (priv->tx_skb[priv->skb_free_idx])) {
{ dev_kfree_skb_irq((struct sk_buff *) priv->tx_skb[priv->skb_free_idx]);
dev_kfree_skb_irq((struct sk_buff*)priv->tx_skb[priv->skb_free_idx]);
priv->tx_skb[priv->skb_free_idx] = 0; priv->tx_skb[priv->skb_free_idx] = 0;
priv->skb_free_idx++; priv->skb_free_idx++;
if(priv->skb_free_idx >= NUM_TX_DESC) if (priv->skb_free_idx >= NUM_TX_DESC)
priv->skb_free_idx = 0; priv->skb_free_idx = 0;
} }
ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_TX_DLY_INT, ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_TX_DLY_INT,
RAMIPS_FE_INT_ENABLE); RAMIPS_FE_INT_ENABLE);
} }
static int static int
ramips_eth_set_mac_addr(struct net_device *dev, void *priv) ramips_eth_set_mac_addr(struct net_device *dev, void *priv)
{ {
unsigned char *mac = (unsigned char*)priv; unsigned char *mac = (unsigned char *) priv;
if(netif_running(dev)) if (netif_running(dev))
return -EBUSY; return -EBUSY;
memcpy(dev->dev_addr, ((struct sockaddr*)priv)->sa_data, dev->addr_len); memcpy(dev->dev_addr, ((struct sockaddr*)priv)->sa_data, dev->addr_len);
ramips_fe_wr((mac[0] << 8) | mac[1], RAMIPS_GDMA1_MAC_ADRH); ramips_fe_wr((mac[0] << 8) | mac[1], RAMIPS_GDMA1_MAC_ADRH);
ramips_fe_wr(RAMIPS_GDMA1_MAC_ADRL, ramips_fe_wr(RAMIPS_GDMA1_MAC_ADRL,
(mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5]); (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5]);
return 0; return 0;
} }
@ -285,14 +291,15 @@ ramips_eth_irq(int irq, void *dev)
ramips_fe_wr(0xFFFFFFFF, RAMIPS_FE_INT_STATUS); ramips_fe_wr(0xFFFFFFFF, RAMIPS_FE_INT_STATUS);
if(fe_int & RAMIPS_RX_DLY_INT) if (fe_int & RAMIPS_RX_DLY_INT) {
{
ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) & ~(RAMIPS_RX_DLY_INT), ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) & ~(RAMIPS_RX_DLY_INT),
RAMIPS_FE_INT_ENABLE); RAMIPS_FE_INT_ENABLE);
tasklet_schedule(&priv->rx_tasklet); tasklet_schedule(&priv->rx_tasklet);
} }
if(fe_int & RAMIPS_TX_DLY_INT)
if (fe_int & RAMIPS_TX_DLY_INT)
ramips_eth_tx_housekeeping((unsigned long)dev); ramips_eth_tx_housekeeping((unsigned long)dev);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -320,9 +327,11 @@ ramips_eth_open(struct net_device *dev)
~(RAMIPS_US_CYC_CNT_MASK << RAMIPS_US_CYC_CNT_SHIFT)) | ~(RAMIPS_US_CYC_CNT_MASK << RAMIPS_US_CYC_CNT_SHIFT)) |
((rt305x_sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT), ((rt305x_sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT),
RAMIPS_FE_GLO_CFG); RAMIPS_FE_GLO_CFG);
tasklet_init(&priv->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping, tasklet_init(&priv->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping,
(unsigned long)dev); (unsigned long)dev);
tasklet_init(&priv->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev); tasklet_init(&priv->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev);
ramips_fe_wr(RAMIPS_DELAY_INIT, RAMIPS_DLY_INT_CFG); ramips_fe_wr(RAMIPS_DELAY_INIT, RAMIPS_DLY_INT_CFG);
ramips_fe_wr(RAMIPS_TX_DLY_INT | RAMIPS_RX_DLY_INT, RAMIPS_FE_INT_ENABLE); ramips_fe_wr(RAMIPS_TX_DLY_INT | RAMIPS_RX_DLY_INT, RAMIPS_FE_INT_ENABLE);
ramips_fe_wr(ramips_fe_rr(RAMIPS_GDMA1_FWD_CFG) & ramips_fe_wr(ramips_fe_rr(RAMIPS_GDMA1_FWD_CFG) &
@ -334,6 +343,7 @@ ramips_eth_open(struct net_device *dev)
ramips_fe_wr(RAMIPS_PSE_FQFC_CFG_INIT, RAMIPS_PSE_FQ_CFG); ramips_fe_wr(RAMIPS_PSE_FQFC_CFG_INIT, RAMIPS_PSE_FQ_CFG);
ramips_fe_wr(1, RAMIPS_FE_RST_GL); ramips_fe_wr(1, RAMIPS_FE_RST_GL);
ramips_fe_wr(0, RAMIPS_FE_RST_GL); ramips_fe_wr(0, RAMIPS_FE_RST_GL);
netif_start_queue(dev); netif_start_queue(dev);
return 0; return 0;
@ -379,6 +389,7 @@ ramips_eth_probe(struct net_device *dev)
dev->tx_timeout = ramips_eth_timeout; dev->tx_timeout = ramips_eth_timeout;
dev->watchdog_timeo = TX_TIMEOUT; dev->watchdog_timeo = TX_TIMEOUT;
spin_lock_init(&priv->page_lock); spin_lock_init(&priv->page_lock);
return 0; return 0;
} }
@ -402,11 +413,11 @@ ramips_eth_plat_probe(struct platform_device *plat)
} }
ramips_fe_base = ioremap_nocache(res->start, res->end - res->start + 1); ramips_fe_base = ioremap_nocache(res->start, res->end - res->start + 1);
if(!ramips_fe_base) if (!ramips_fe_base)
return -ENOMEM; return -ENOMEM;
ramips_dev = alloc_etherdev(sizeof(struct raeth_priv)); ramips_dev = alloc_etherdev(sizeof(struct raeth_priv));
if(!ramips_dev) { if (!ramips_dev) {
dev_err(&plat->dev, "alloc_etherdev failed\n"); dev_err(&plat->dev, "alloc_etherdev failed\n");
err = -ENOMEM; err = -ENOMEM;
goto err_unmap; goto err_unmap;
@ -422,7 +433,8 @@ ramips_eth_plat_probe(struct platform_device *plat)
ramips_dev->addr_len = ETH_ALEN; ramips_dev->addr_len = ETH_ALEN;
ramips_dev->base_addr = (unsigned long)ramips_fe_base; ramips_dev->base_addr = (unsigned long)ramips_fe_base;
ramips_dev->init = ramips_eth_probe; ramips_dev->init = ramips_eth_probe;
priv = (struct raeth_priv*)netdev_priv(ramips_dev);
priv = netdev_priv(ramips_dev);
priv->plat = data; priv->plat = data;
err = register_netdev(ramips_dev); err = register_netdev(ramips_dev);