1
0
mirror of git://projects.qi-hardware.com/openwrt-xburst.git synced 2024-11-20 01:09:41 +02:00
openwrt-xburst/package/madwifi/patches/300-napi_polling.patch

1019 lines
30 KiB
Diff
Raw Normal View History

diff -ur madwifi.old/ath/if_ath.c madwifi.dev/ath/if_ath.c
--- madwifi.old/ath/if_ath.c 2007-05-21 07:49:54.571131744 +0200
+++ madwifi.dev/ath/if_ath.c 2007-05-21 07:51:40.208072488 +0200
@@ -167,7 +167,7 @@
int, u_int32_t);
static void ath_setdefantenna(struct ath_softc *, u_int);
static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int);
-static void ath_rx_tasklet(TQUEUE_ARG);
+static int ath_rx_poll(struct net_device *dev, int *budget);
static int ath_hardstart(struct sk_buff *, struct net_device *);
static int ath_mgtstart(struct ieee80211com *, struct sk_buff *);
#ifdef ATH_SUPERG_COMP
@@ -417,7 +417,6 @@
ATH_TXBUF_LOCK_INIT(sc);
ATH_RXBUF_LOCK_INIT(sc);
- ATH_INIT_TQUEUE(&sc->sc_rxtq, ath_rx_tasklet, dev);
ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet, dev);
ATH_INIT_TQUEUE(&sc->sc_bmisstq, ath_bmiss_tasklet, dev);
ATH_INIT_TQUEUE(&sc->sc_bstucktq, ath_bstuck_tasklet, dev);
@@ -665,6 +664,8 @@
dev->set_mac_address = ath_set_mac_address;
dev->change_mtu = ath_change_mtu;
dev->tx_queue_len = ATH_TXBUF - 1; /* 1 for mgmt frame */
+ dev->poll = ath_rx_poll;
+ dev->weight = 64;
#ifdef USE_HEADERLEN_RESV
dev->hard_header_len += sizeof(struct ieee80211_qosframe) +
sizeof(struct llc) +
@@ -1635,6 +1636,7 @@
*/
ath_hal_getisr(ah, &status); /* NB: clears ISR too */
DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
+ sc->sc_isr = status;
status &= sc->sc_imask; /* discard unasked for bits */
if (status & HAL_INT_FATAL) {
sc->sc_stats.ast_hardware++;
@@ -1674,7 +1676,12 @@
* might take too long to fire */
ath_hal_process_noisefloor(ah);
sc->sc_channoise = ath_hal_get_channel_noise(ah, &(sc->sc_curchan));
- ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, &needmark);
+ sc->sc_isr &= ~HAL_INT_RX;
+ if (netif_rx_schedule_prep(dev)) {
+ sc->sc_imask &= ~HAL_INT_RX;
+ ath_hal_intrset(ah, sc->sc_imask);
+ __netif_rx_schedule(dev);
+ }
}
if (status & HAL_INT_TX) {
#ifdef ATH_SUPERG_DYNTURBO
@@ -1700,6 +1707,11 @@
}
}
#endif
+ /* disable transmit interrupt */
+ sc->sc_isr &= ~HAL_INT_TX;
+ ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_TX);
+ sc->sc_imask &= ~HAL_INT_TX;
+
ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark);
}
if (status & HAL_INT_BMISS) {
@@ -2162,12 +2174,13 @@
* Insert the frame on the outbound list and
* pass it on to the hardware.
*/
- ATH_TXQ_LOCK(txq);
+ ATH_TXQ_LOCK_IRQ(txq);
if (ni && ni->ni_vap && txq == &ATH_VAP(ni->ni_vap)->av_mcastq) {
/*
* The CAB queue is started from the SWBA handler since
* frames only go out on DTIM and to avoid possible races.
*/
+ sc->sc_imask &= ~HAL_INT_SWBA;
ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_SWBA);
ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: txq depth = %d\n", __func__, txq->axq_depth);
@@ -2183,6 +2196,7 @@
ito64(bf->bf_daddr), bf->bf_desc);
}
txq->axq_link = &lastds->ds_link;
+ sc->sc_imask |= HAL_INT_SWBA;
ath_hal_intrset(ah, sc->sc_imask);
} else {
ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
@@ -2218,7 +2232,7 @@
}
}
}
- ATH_TXQ_UNLOCK(txq);
+ ATH_TXQ_UNLOCK_IRQ(txq);
sc->sc_devstats.tx_packets++;
sc->sc_devstats.tx_bytes += framelen;
@@ -2369,8 +2383,14 @@
unsigned int pktlen;
int framecnt;
+ /*
+ * NB: using _BH style locking even though this function may be called
+ * at interrupt time (within tasklet or bh). This should be harmless
+ * and this function calls others (i.e., ath_tx_start()) which do
+ * the same.
+ */
for (;;) {
- ATH_TXQ_LOCK(txq);
+ ATH_TXQ_LOCK_BH(txq);
bf_ff = TAILQ_LAST(&txq->axq_stageq, axq_headtype);
if ((!bf_ff) || ath_ff_flushdonetest(txq, bf_ff)) {
@@ -2384,7 +2404,7 @@
ATH_NODE(ni)->an_tx_ffbuf[bf_ff->bf_skb->priority] = NULL;
TAILQ_REMOVE(&txq->axq_stageq, bf_ff, bf_stagelist);
- ATH_TXQ_UNLOCK(txq);
+ ATH_TXQ_UNLOCK_BH(txq);
/* encap and xmit */
bf_ff->bf_skb = ieee80211_encap(ni, bf_ff->bf_skb, &framecnt);
@@ -2405,15 +2425,16 @@
}
bf_ff->bf_node = NULL;
- ATH_TXBUF_LOCK_IRQ(sc);
+ ATH_TXBUF_LOCK_BH(sc);
STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf_ff, bf_list);
- ATH_TXBUF_UNLOCK_IRQ(sc);
+ ATH_TXBUF_UNLOCK_BH(sc);
}
+ ATH_TXQ_UNLOCK_BH(txq);
}
#endif
#define ATH_HARDSTART_GET_TX_BUF_WITH_LOCK \
- ATH_TXBUF_LOCK_IRQ(sc); \
+ ATH_TXBUF_LOCK_BH(sc); \
bf = STAILQ_FIRST(&sc->sc_txbuf); \
if (bf != NULL) { \
STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); \
@@ -2428,11 +2449,23 @@
sc->sc_devstopped = 1; \
ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL); \
} \
- ATH_TXBUF_UNLOCK_IRQ(sc); \
+
+#define ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF \
+ ATH_TXBUF_UNLOCK_BH(sc); \
+ if (bf == NULL) { /* NB: should not happen */ \
+ DPRINTF(sc,ATH_DEBUG_XMIT,"%s: discard, no xmit buf\n", __func__); \
+ sc->sc_stats.ast_tx_nobuf++; \
+ goto hardstart_fail; \
+ }
+
+#define ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_ON \
+ ATH_TXBUF_UNLOCK_BH(sc); \
if (bf == NULL) { /* NB: should not happen */ \
DPRINTF(sc,ATH_DEBUG_XMIT, \
"%s: discard, no xmit buf\n", __func__); \
+ ATH_TXQ_UNLOCK_BH(txq); \
sc->sc_stats.ast_tx_nobuf++; \
+ goto hardstart_fail; \
}
/*
@@ -2494,6 +2527,7 @@
if (M_FLAG_GET(skb, M_UAPSD)) {
/* bypass FF handling */
ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
+ ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF;
if (bf == NULL)
goto hardstart_fail;
goto ff_bypass;
@@ -2515,7 +2549,7 @@
/* NB: use this lock to protect an->an_ff_txbuf in athff_can_aggregate()
* call too.
*/
- ATH_TXQ_LOCK(txq);
+ ATH_TXQ_LOCK_BH(txq);
if (athff_can_aggregate(sc, eh, an, skb, vap->iv_fragthreshold, &ff_flush)) {
if (an->an_tx_ffbuf[skb->priority]) { /* i.e., frame on the staging queue */
@@ -2525,7 +2559,7 @@
TAILQ_REMOVE(&txq->axq_stageq, bf, bf_stagelist);
an->an_tx_ffbuf[skb->priority] = NULL;
- ATH_TXQ_UNLOCK(txq);
+ ATH_TXQ_UNLOCK_BH(txq);
/*
* chain skbs and add FF magic
@@ -2552,6 +2586,7 @@
* to give the buffer back.
*/
ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
+ ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_ON;
if (bf == NULL) {
ATH_TXQ_UNLOCK(txq);
goto hardstart_fail;
@@ -2566,7 +2601,7 @@
TAILQ_INSERT_HEAD(&txq->axq_stageq, bf, bf_stagelist);
- ATH_TXQ_UNLOCK(txq);
+ ATH_TXQ_UNLOCK_BH(txq);
return 0;
}
@@ -2577,7 +2612,7 @@
TAILQ_REMOVE(&txq->axq_stageq, bf_ff, bf_stagelist);
an->an_tx_ffbuf[skb->priority] = NULL;
- ATH_TXQ_UNLOCK(txq);
+ ATH_TXQ_UNLOCK_BH(txq);
/* encap and xmit */
bf_ff->bf_skb = ieee80211_encap(ni, bf_ff->bf_skb, &framecnt);
@@ -2607,9 +2642,9 @@
}
bf_ff->bf_node = NULL;
- ATH_TXBUF_LOCK(sc);
+ ATH_TXBUF_LOCK_BH(sc);
STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf_ff, bf_list);
- ATH_TXBUF_UNLOCK(sc);
+ ATH_TXBUF_UNLOCK_BH(sc);
goto ff_flushdone;
}
/*
@@ -2619,14 +2654,13 @@
else if (an->an_tx_ffbuf[skb->priority]) {
DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
"%s: Out-Of-Order fast-frame\n", __func__);
- ATH_TXQ_UNLOCK(txq);
+ ATH_TXQ_UNLOCK_BH(txq);
} else
- ATH_TXQ_UNLOCK(txq);
+ ATH_TXQ_UNLOCK_BH(txq);
ff_flushdone:
ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
- if (bf == NULL)
- goto hardstart_fail;
+ ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF;
}
ff_bypass:
@@ -2634,6 +2668,7 @@
#else /* ATH_SUPERG_FF */
ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
+ ATH_HARDSTART_REL_TX_BUF_WITH_TXQLOCK_OFF;
#endif /* ATH_SUPERG_FF */
@@ -2655,7 +2690,7 @@
* Allocate 1 ath_buf for each frame given 1 was
* already alloc'd
*/
- ATH_TXBUF_LOCK(sc);
+ ATH_TXBUF_LOCK_BH(sc);
for (bfcnt = 1; bfcnt < framecnt; ++bfcnt) {
if ((tbf = STAILQ_FIRST(&sc->sc_txbuf)) != NULL) {
STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
@@ -2676,11 +2711,11 @@
STAILQ_INSERT_TAIL(&sc->sc_txbuf, tbf, bf_list);
}
}
- ATH_TXBUF_UNLOCK(sc);
+ ATH_TXBUF_UNLOCK_BH(sc);
STAILQ_INIT(&bf_head);
goto hardstart_fail;
}
- ATH_TXBUF_UNLOCK(sc);
+ ATH_TXBUF_UNLOCK_BH(sc);
while ((bf = STAILQ_FIRST(&bf_head)) != NULL && skb != NULL) {
unsigned int nextfraglen = 0;
@@ -2716,7 +2751,7 @@
hardstart_fail:
if (!STAILQ_EMPTY(&bf_head)) {
- ATH_TXBUF_LOCK(sc);
+ ATH_TXBUF_LOCK_BH(sc);
STAILQ_FOREACH_SAFE(tbf, &bf_head, bf_list, tempbf) {
tbf->bf_skb = NULL;
tbf->bf_node = NULL;
@@ -2726,7 +2761,7 @@
STAILQ_INSERT_TAIL(&sc->sc_txbuf, tbf, bf_list);
}
- ATH_TXBUF_UNLOCK(sc);
+ ATH_TXBUF_UNLOCK_BH(sc);
}
/* free sk_buffs */
@@ -2769,7 +2804,7 @@
/*
* Grab a TX buffer and associated resources.
*/
- ATH_TXBUF_LOCK_IRQ(sc);
+ ATH_TXBUF_LOCK_BH(sc);
bf = STAILQ_FIRST(&sc->sc_txbuf);
if (bf != NULL)
STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
@@ -2780,7 +2815,7 @@
sc->sc_devstopped=1;
ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL);
}
- ATH_TXBUF_UNLOCK_IRQ(sc);
+ ATH_TXBUF_UNLOCK_BH(sc);
if (bf == NULL) {
printk("ath_mgtstart: discard, no xmit buf\n");
sc->sc_stats.ast_tx_nobufmgt++;
@@ -2809,9 +2844,9 @@
bf->bf_skb = NULL;
bf->bf_node = NULL;
- ATH_TXBUF_LOCK_IRQ(sc);
+ ATH_TXBUF_LOCK_BH(sc);
STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
- ATH_TXBUF_UNLOCK_IRQ(sc);
+ ATH_TXBUF_UNLOCK_BH(sc);
}
dev_kfree_skb_any(skb);
skb = NULL;
@@ -3279,10 +3314,10 @@
*
* XXX Using in_softirq is not right since we might
* be called from other soft irq contexts than
- * ath_rx_tasklet.
+ * ath_rx_poll
*/
if (!in_softirq())
- tasklet_disable(&sc->sc_rxtq);
+ netif_poll_disable(dev);
netif_stop_queue(dev);
}
@@ -3295,7 +3330,7 @@
DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
netif_start_queue(dev);
if (!in_softirq()) /* NB: see above */
- tasklet_enable(&sc->sc_rxtq);
+ netif_poll_enable(dev);
}
/*
@@ -4861,9 +4896,9 @@
bf->bf_node = NULL;
bf->bf_desc->ds_link = 0;
- ATH_TXBUF_LOCK_IRQ(sc);
+ ATH_TXBUF_LOCK_BH(sc);
STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
- ATH_TXBUF_UNLOCK_IRQ(sc);
+ ATH_TXBUF_UNLOCK_BH(sc);
an->an_uapsd_overflowqdepth--;
}
@@ -5542,13 +5577,12 @@
sc->sc_rxotherant = 0;
}
-static void
-ath_rx_tasklet(TQUEUE_ARG data)
+static int
+ath_rx_poll(struct net_device *dev, int *budget)
{
#define PA2DESC(_sc, _pa) \
((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
- struct net_device *dev = (struct net_device *)data;
struct ath_buf *bf;
struct ath_softc *sc = dev->priv;
struct ieee80211com *ic = &sc->sc_ic;
@@ -5560,11 +5594,15 @@
unsigned int len;
int type;
u_int phyerr;
+ int processed = 0, early_stop = 0;
+ int rx_limit = dev->quota;
/* Let the 802.11 layer know about the new noise floor */
ic->ic_channoise = sc->sc_channoise;
DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s\n", __func__);
+
+process_rx_again:
do {
bf = STAILQ_FIRST(&sc->sc_rxbuf);
if (bf == NULL) { /* XXX ??? can this happen */
@@ -5588,6 +5626,13 @@
/* NB: never process the self-linked entry at the end */
break;
}
+
+ processed++;
+ if (rx_limit-- < 0) {
+ early_stop = 1;
+ break;
+ }
+
skb = bf->bf_skb;
if (skb == NULL) { /* XXX ??? can this happen */
printk("%s: no skbuff (%s)\n", dev->name, __func__);
@@ -5626,6 +5671,7 @@
sc->sc_stats.ast_rx_phyerr++;
phyerr = rs->rs_phyerr & 0x1f;
sc->sc_stats.ast_rx_phy[phyerr]++;
+ goto rx_next;
}
if (rs->rs_status & HAL_RXERR_DECRYPT) {
/*
@@ -5829,9 +5875,29 @@
STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
ATH_RXBUF_UNLOCK_IRQ(sc);
} while (ath_rxbuf_init(sc, bf) == 0);
+ if (!early_stop) {
+ /* Check if more data is received while we were
+ * processing the descriptor chain.
+ */
+ ATH_DISABLE_INTR();
+ if (sc->sc_isr & HAL_INT_RX) {
+ sc->sc_isr &= ~HAL_INT_RX;
+ ATH_ENABLE_INTR();
+ ath_uapsd_processtriggers(sc);
+ goto process_rx_again;
+ }
+ netif_rx_complete(dev);
+
+ sc->sc_imask |= HAL_INT_RX;
+ ath_hal_intrset(ah, sc->sc_imask);
+ ATH_ENABLE_INTR();
+ }
+
+ *budget -= processed;
/* rx signal state monitoring */
ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
+ return early_stop;
#undef PA2DESC
}
@@ -6107,22 +6173,22 @@
}
}
- ATH_TXBUF_LOCK_IRQ(sc);
+ ATH_TXBUF_LOCK_BH(sc);
bf = STAILQ_FIRST(&sc->sc_grppollbuf);
if (bf != NULL)
STAILQ_REMOVE_HEAD(&sc->sc_grppollbuf, bf_list);
else {
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: No more TxBufs\n", __func__);
- ATH_TXBUF_UNLOCK_IRQ_EARLY(sc);
+ ATH_TXBUF_UNLOCK_BH(sc);
return;
}
/* XXX use a counter and leave at least one for mgmt frames */
if (STAILQ_EMPTY(&sc->sc_grppollbuf)) {
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: No more TxBufs left\n", __func__);
- ATH_TXBUF_UNLOCK_IRQ_EARLY(sc);
+ ATH_TXBUF_UNLOCK_BH(sc);
return;
}
- ATH_TXBUF_UNLOCK_IRQ(sc);
+ ATH_TXBUF_UNLOCK_BH(sc);
bf->bf_skbaddr = bus_map_single(sc->sc_bdev,
skb->data, skb->len, BUS_DMA_TODEVICE);
@@ -6588,9 +6654,9 @@
dev_kfree_skb(lastbuf->bf_skb);
lastbuf->bf_skb = NULL;
ieee80211_unref_node(&lastbuf->bf_node);
- ATH_TXBUF_LOCK_IRQ(sc);
+ ATH_TXBUF_LOCK_BH(sc);
STAILQ_INSERT_TAIL(&sc->sc_txbuf, lastbuf, bf_list);
- ATH_TXBUF_UNLOCK_IRQ(sc);
+ ATH_TXBUF_UNLOCK_BH(sc);
/*
* move oldest from overflow to delivery
@@ -7411,9 +7477,6 @@
if (sc->sc_reapcount > ATH_TXBUF_FREE_THRESHOLD) {
if (!sc->sc_dfswait)
netif_start_queue(sc->sc_dev);
- DPRINTF(sc, ATH_DEBUG_TX_PROC,
- "%s: tx tasklet restart the queue\n",
- __func__);
sc->sc_reapcount = 0;
sc->sc_devstopped = 0;
} else
@@ -7448,11 +7511,22 @@
struct net_device *dev = (struct net_device *)data;
struct ath_softc *sc = dev->priv;
+process_tx_again:
if (txqactive(sc->sc_ah, 0))
ath_tx_processq(sc, &sc->sc_txq[0]);
if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
ath_tx_processq(sc, sc->sc_cabq);
+ ATH_DISABLE_INTR();
+ if (sc->sc_isr & HAL_INT_TX) {
+ sc->sc_isr &= ~HAL_INT_TX;
+ ATH_ENABLE_INTR();
+ goto process_tx_again;
+ }
+ sc->sc_imask |= HAL_INT_TX;
+ ath_hal_intrset(sc->sc_ah, sc->sc_imask);
+ ATH_ENABLE_INTR();
+
netif_wake_queue(dev);
if (sc->sc_softled)
@@ -7469,6 +7543,7 @@
struct net_device *dev = (struct net_device *)data;
struct ath_softc *sc = dev->priv;
+process_tx_again:
/*
* Process each active queue.
*/
@@ -7489,6 +7564,16 @@
if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum))
ath_tx_processq(sc, sc->sc_uapsdq);
+ ATH_DISABLE_INTR();
+ if (sc->sc_isr & HAL_INT_TX) {
+ sc->sc_isr &= ~HAL_INT_TX;
+ ATH_ENABLE_INTR();
+ goto process_tx_again;
+ }
+ sc->sc_imask |= HAL_INT_TX;
+ ath_hal_intrset(sc->sc_ah, sc->sc_imask);
+ ATH_ENABLE_INTR();
+
netif_wake_queue(dev);
if (sc->sc_softled)
@@ -7506,6 +7591,7 @@
unsigned int i;
/* Process each active queue. */
+process_tx_again:
for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
ath_tx_processq(sc, &sc->sc_txq[i]);
@@ -7514,6 +7600,16 @@
ath_tx_processq(sc, sc->sc_xrtxq);
#endif
+ ATH_DISABLE_INTR();
+ if (sc->sc_isr & HAL_INT_TX) {
+ sc->sc_isr &= ~HAL_INT_TX;
+ ATH_ENABLE_INTR();
+ goto process_tx_again;
+ }
+ sc->sc_imask |= HAL_INT_TX;
+ ath_hal_intrset(sc->sc_ah, sc->sc_imask);
+ ATH_ENABLE_INTR();
+
netif_wake_queue(dev);
if (sc->sc_softled)
@@ -7612,6 +7708,7 @@
ath_draintxq(struct ath_softc *sc)
{
struct ath_hal *ah = sc->sc_ah;
+ int npend = 0;
unsigned int i;
/* XXX return value */
@@ -9144,9 +9241,9 @@
dev->mtu = mtu;
if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
/* NB: the rx buffers may need to be reallocated */
- tasklet_disable(&sc->sc_rxtq);
+ netif_poll_disable(dev);
error = ath_reset(dev);
- tasklet_enable(&sc->sc_rxtq);
+ netif_poll_enable(dev);
}
ATH_UNLOCK(sc);
diff -ur madwifi.old/ath/if_athvar.h madwifi.dev/ath/if_athvar.h
--- madwifi.old/ath/if_athvar.h 2007-05-21 07:49:54.563132960 +0200
+++ madwifi.dev/ath/if_athvar.h 2007-05-21 07:50:22.814838048 +0200
@@ -48,6 +48,10 @@
#include "if_athioctl.h"
#include "net80211/ieee80211.h" /* XXX for WME_NUM_AC */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define irqs_disabled() 0
+#endif
+
/*
* Deduce if tasklets are available. If not then
* fall back to using the immediate work queue.
@@ -478,8 +482,12 @@
#define ATH_TXQ_LOCK_DESTROY(_tq)
#define ATH_TXQ_LOCK(_tq) spin_lock(&(_tq)->axq_lock)
#define ATH_TXQ_UNLOCK(_tq) spin_unlock(&(_tq)->axq_lock)
-#define ATH_TXQ_LOCK_BH(_tq) spin_lock_bh(&(_tq)->axq_lock)
-#define ATH_TXQ_UNLOCK_BH(_tq) spin_unlock_bh(&(_tq)->axq_lock)
+#define ATH_TXQ_LOCK_BH(_tq) \
+ if (!irqs_disabled()) \
+ spin_lock_bh(&(_tq)->axq_lock)
+#define ATH_TXQ_UNLOCK_BH(_tq) \
+ if (!irqs_disabled()) \
+ spin_unlock_bh(&(_tq)->axq_lock)
#define ATH_TXQ_LOCK_IRQ(_tq) do { \
unsigned long __axq_lockflags; \
spin_lock_irqsave(&(_tq)->axq_lock, __axq_lockflags);
@@ -623,7 +631,6 @@
struct ath_buf *sc_rxbufcur; /* current rx buffer */
u_int32_t *sc_rxlink; /* link ptr in last RX desc */
spinlock_t sc_rxbuflock;
- struct ATH_TQ_STRUCT sc_rxtq; /* rx intr tasklet */
struct ATH_TQ_STRUCT sc_rxorntq; /* rxorn intr tasklet */
u_int8_t sc_defant; /* current default antenna */
u_int8_t sc_rxotherant; /* rx's on non-default antenna*/
@@ -636,6 +643,7 @@
u_int sc_txintrperiod; /* tx interrupt batching */
struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
struct ath_txq *sc_ac2q[WME_NUM_AC]; /* WME AC -> h/w qnum */
+ HAL_INT sc_isr; /* unmasked ISR state */
struct ATH_TQ_STRUCT sc_txtq; /* tx intr tasklet */
u_int8_t sc_grppoll_str[GRPPOLL_RATE_STR_LEN];
struct ath_descdma sc_bdma; /* beacon descriptors */
@@ -701,8 +709,12 @@
#define ATH_TXBUF_LOCK_DESTROY(_sc)
#define ATH_TXBUF_LOCK(_sc) spin_lock(&(_sc)->sc_txbuflock)
#define ATH_TXBUF_UNLOCK(_sc) spin_unlock(&(_sc)->sc_txbuflock)
-#define ATH_TXBUF_LOCK_BH(_sc) spin_lock_bh(&(_sc)->sc_txbuflock)
-#define ATH_TXBUF_UNLOCK_BH(_sc) spin_unlock_bh(&(_sc)->sc_txbuflock)
+#define ATH_TXBUF_LOCK_BH(_sc) \
+ if (!irqs_disabled()) \
+ spin_lock_bh(&(_sc)->sc_txbuflock)
+#define ATH_TXBUF_UNLOCK_BH(_sc) \
+ if (!irqs_disabled()) \
+ spin_unlock_bh(&(_sc)->sc_txbuflock)
#define ATH_TXBUF_LOCK_IRQ(_sc) do { \
unsigned long __txbuflockflags; \
spin_lock_irqsave(&(_sc)->sc_txbuflock, __txbuflockflags);
@@ -720,8 +732,12 @@
#define ATH_RXBUF_LOCK_DESTROY(_sc)
#define ATH_RXBUF_LOCK(_sc) spin_lock(&(_sc)->sc_rxbuflock)
#define ATH_RXBUF_UNLOCK(_sc) spin_unlock(&(_sc)->sc_rxbuflock)
-#define ATH_RXBUF_LOCK_BH(_sc) spin_lock_bh(&(_sc)->sc_rxbuflock)
-#define ATH_RXBUF_UNLOCK_BH(_sc) spin_unlock_bh(&(_sc)->sc_rxbuflock)
+#define ATH_RXBUF_LOCK_BH(_sc) \
+ if (!irqs_disabled()) \
+ spin_lock_bh(&(_sc)->sc_rxbuflock)
+#define ATH_RXBUF_UNLOCK_BH(_sc) \
+ if (!irqs_disabled()) \
+ spin_unlock_bh(&(_sc)->sc_rxbuflock)
#define ATH_RXBUF_LOCK_IRQ(_sc) do { \
unsigned long __rxbuflockflags; \
spin_lock_irqsave(&(_sc)->sc_rxbuflock, __rxbuflockflags);
@@ -731,6 +747,8 @@
#define ATH_RXBUF_UNLOCK_IRQ_EARLY(_sc) \
spin_unlock_irqrestore(&(_sc)->sc_rxbuflock, __rxbuflockflags);
+#define ATH_DISABLE_INTR local_irq_disable
+#define ATH_ENABLE_INTR local_irq_enable
/* Protects the device from concurrent accesses */
#define ATH_LOCK_INIT(_sc) init_MUTEX(&(_sc)->sc_lock)
diff -ur madwifi.old/net80211/ieee80211_beacon.c madwifi.dev/net80211/ieee80211_beacon.c
--- madwifi.old/net80211/ieee80211_beacon.c 2007-01-31 11:41:05.000000000 +0100
+++ madwifi.dev/net80211/ieee80211_beacon.c 2007-05-21 07:50:22.815837896 +0200
@@ -286,7 +286,7 @@
int len_changed = 0;
u_int16_t capinfo;
- IEEE80211_LOCK(ic);
+ IEEE80211_BEACON_LOCK(ic);
if ((ic->ic_flags & IEEE80211_F_DOTH) &&
(vap->iv_flags & IEEE80211_F_CHANSWITCH) &&
@@ -547,7 +547,7 @@
vap->iv_flags_ext &= ~IEEE80211_FEXT_APPIE_UPDATE;
}
- IEEE80211_UNLOCK(ic);
+ IEEE80211_BEACON_UNLOCK(ic);
return len_changed;
}
diff -ur madwifi.old/net80211/ieee80211_input.c madwifi.dev/net80211/ieee80211_input.c
--- madwifi.old/net80211/ieee80211_input.c 2007-05-21 07:49:54.527138432 +0200
+++ madwifi.dev/net80211/ieee80211_input.c 2007-05-21 07:50:22.816837744 +0200
@@ -1155,8 +1155,9 @@
if (ni->ni_vlan != 0 && vap->iv_vlgrp != NULL) {
/* attach vlan tag */
vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan);
- } else
- netif_rx(skb);
+ } else {
+ netif_receive_skb(skb);
+ }
dev->last_rx = jiffies;
}
}
@@ -3657,9 +3658,9 @@
}
/* Okay, take the first queued packet and put it out... */
- IEEE80211_NODE_SAVEQ_LOCK(ni);
+ IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
IEEE80211_NODE_SAVEQ_DEQUEUE(ni, skb, qlen);
- IEEE80211_NODE_SAVEQ_UNLOCK(ni);
+ IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
if (skb == NULL) {
IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_POWER, wh->i_addr2,
"%s", "recv ps-poll, but queue empty");
diff -ur madwifi.old/net80211/ieee80211_linux.h madwifi.dev/net80211/ieee80211_linux.h
--- madwifi.old/net80211/ieee80211_linux.h 2007-05-21 07:49:54.528138280 +0200
+++ madwifi.dev/net80211/ieee80211_linux.h 2007-05-21 07:50:22.817837592 +0200
@@ -31,6 +31,10 @@
#include <linux/wireless.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define irqs_disabled() 0
+#endif
+
/*
* Task deferral
*
@@ -86,8 +90,12 @@
} while (0)
#define IEEE80211_UNLOCK_IRQ_EARLY(_ic) \
spin_unlock_irqrestore(&(_ic)->ic_comlock, __ilockflags);
-#define IEEE80211_LOCK_BH(_ic) spin_lock_bh(&(_ic)->ic_comlock)
-#define IEEE80211_UNLOCK_BH(_ic) spin_unlock_bh(&(_ic)->ic_comlock)
+#define IEEE80211_LOCK_BH(_ic) \
+ if (!irqs_disabled()) \
+ spin_lock_bh(&(_ic)->ic_comlock)
+#define IEEE80211_UNLOCK_BH(_ic) \
+ if (!irqs_disabled()) \
+ spin_unlock_bh(&(_ic)->ic_comlock)
#define IEEE80211_LOCK(_ic) spin_lock(&(_ic)->ic_comlock)
#define IEEE80211_UNLOCK(_ic) spin_unlock(&(_ic)->ic_comlock)
@@ -104,15 +112,22 @@
#define IEEE80211_VAPS_LOCK_DESTROY(_ic)
#define IEEE80211_VAPS_LOCK(_ic) spin_lock(&(_ic)->ic_vapslock);
#define IEEE80211_VAPS_UNLOCK(_ic) spin_unlock(&(_ic)->ic_vapslock);
-#define IEEE80211_VAPS_LOCK_BH(_ic) spin_lock_bh(&(_ic)->ic_vapslock);
-#define IEEE80211_VAPS_UNLOCK_BH(_ic) spin_unlock_bh(&(_ic)->ic_vapslock);
-#define IEEE80211_VAPS_LOCK_IRQ(_ic) do { \
- int _vaps_lockflags; \
- spin_lock_irqsave(&(_ic)->ic_vapslock, _vaps_lockflags);
-#define IEEE80211_VAPS_UNLOCK_IRQ(_ic) \
- spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags); \
-} while (0)
-#define IEEE80211_VAPS_UNLOCK_IRQ_EARLY(_ic) spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags)
+#define IEEE80211_VAPS_LOCK_BH(_ic) \
+ if (!irqs_disabled()) \
+ spin_lock_bh(&(_ic)->ic_vapslock);
+#define IEEE80211_VAPS_UNLOCK_BH(_ic) \
+ if (!irqs_disabled()) \
+ spin_unlock_bh(&(_ic)->ic_vapslock);
+#define IEEE80211_VAPS_LOCK_IRQ(_ic) do { \
+ unsigned long __vlockflags=0; \
+ unsigned int __vlocked=0; \
+ __vlocked=spin_is_locked(&(_ic)->ic_vapslock); \
+ if(!__vlocked) spin_lock_irqsave(&(_ic)->ic_vapslock, __vlockflags);
+#define IEEE80211_VAPS_UNLOCK_IRQ(_ic) \
+ if(!__vlocked) spin_unlock_irqrestore(&(_ic)->ic_vapslock, __vlockflags); \
+} while (0);
+#define IEEE80211_VAPS_UNLOCK_IRQ_EARLY(_ic) \
+ if (!__vlocked) spin_unlock_irqrestore(&(_ic)->ic_vapslock, _vaps_lockflags)
#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
#define IEEE80211_VAPS_LOCK_ASSERT(_ic) \
@@ -122,6 +137,11 @@
#define IEEE80211_VAPS_LOCK_ASSERT(_ic)
#endif
+/*
+ * Beacon locking definitions; piggyback on com lock.
+ */
+#define IEEE80211_BEACON_LOCK(_ic) IEEE80211_LOCK_IRQ(_ic)
+#define IEEE80211_BEACON_UNLOCK(_ic) IEEE80211_UNLOCK_IRQ(_ic)
/*
* Node locking definitions.
@@ -191,8 +211,12 @@
typedef spinlock_t ieee80211_scan_lock_t;
#define IEEE80211_SCAN_LOCK_INIT(_nt, _name) spin_lock_init(&(_nt)->nt_scanlock)
#define IEEE80211_SCAN_LOCK_DESTROY(_nt)
-#define IEEE80211_SCAN_LOCK_BH(_nt) spin_lock_bh(&(_nt)->nt_scanlock)
-#define IEEE80211_SCAN_UNLOCK_BH(_nt) spin_unlock_bh(&(_nt)->nt_scanlock)
+#define IEEE80211_SCAN_LOCK_BH(_nt) \
+ if (!irqs_disabled()) \
+ spin_lock_bh(&(_nt)->nt_scanlock)
+#define IEEE80211_SCAN_UNLOCK_BH(_nt) \
+ if (!irqs_disabled()) \
+ spin_unlock_bh(&(_nt)->nt_scanlock)
#define IEEE80211_SCAN_LOCK_IRQ(_nt) do { \
unsigned long __scan_lockflags; \
spin_lock_irqsave(&(_nt)->nt_scanlock, __scan_lockflags);
@@ -217,8 +241,12 @@
#define ACL_LOCK_DESTROY(_as)
#define ACL_LOCK(_as) spin_lock(&(_as)->as_lock)
#define ACL_UNLOCK(_as) spin_unlock(&(_as)->as_lock)
-#define ACL_LOCK_BH(_as) spin_lock_bh(&(_as)->as_lock)
-#define ACL_UNLOCK_BH(_as) spin_unlock_bh(&(_as)->as_lock)
+#define ACL_LOCK_BH(_as) \
+ if (!irqs_disabled()) \
+ spin_lock_bh(&(_as)->as_lock)
+#define ACL_UNLOCK_BH(_as) \
+ if (!irqs_disabled()) \
+ spin_unlock_bh(&(_as)->as_lock)
#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)
#define ACL_LOCK_ASSERT(_as) \
diff -ur madwifi.old/net80211/ieee80211_node.c madwifi.dev/net80211/ieee80211_node.c
--- madwifi.old/net80211/ieee80211_node.c 2007-05-21 07:49:54.555134176 +0200
+++ madwifi.dev/net80211/ieee80211_node.c 2007-05-21 07:50:22.818837440 +0200
@@ -1570,7 +1570,7 @@
struct ieee80211_node *ni;
u_int gen;
- IEEE80211_SCAN_LOCK_IRQ(nt);
+ IEEE80211_SCAN_LOCK_BH(nt);
gen = ++nt->nt_scangen;
restart:
@@ -1590,7 +1590,7 @@
}
IEEE80211_NODE_TABLE_UNLOCK_IRQ(nt);
- IEEE80211_SCAN_UNLOCK_IRQ(nt);
+ IEEE80211_SCAN_UNLOCK_BH(nt);
}
EXPORT_SYMBOL(ieee80211_iterate_dev_nodes);
diff -ur madwifi.old/net80211/ieee80211_power.c madwifi.dev/net80211/ieee80211_power.c
--- madwifi.old/net80211/ieee80211_power.c 2007-05-21 07:49:54.532137672 +0200
+++ madwifi.dev/net80211/ieee80211_power.c 2007-05-21 07:50:22.818837440 +0200
@@ -147,7 +147,7 @@
#endif
struct sk_buff *skb;
- IEEE80211_NODE_SAVEQ_LOCK(ni);
+ IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
while ((skb = skb_peek(&ni->ni_savedq)) != NULL &&
M_AGE_GET(skb) < IEEE80211_INACT_WAIT) {
IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
@@ -159,7 +159,7 @@
}
if (skb != NULL)
M_AGE_SUB(skb, IEEE80211_INACT_WAIT);
- IEEE80211_NODE_SAVEQ_UNLOCK(ni);
+ IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
"discard %u frames for age", discard);
@@ -185,7 +185,7 @@
KASSERT(aid < vap->iv_max_aid,
("bogus aid %u, max %u", aid, vap->iv_max_aid));
- IEEE80211_LOCK(ni->ni_ic);
+ IEEE80211_BEACON_LOCK(ni->ni_ic);
if (set != (isset(vap->iv_tim_bitmap, aid) != 0)) {
if (set) {
setbit(vap->iv_tim_bitmap, aid);
@@ -196,7 +196,7 @@
}
vap->iv_flags |= IEEE80211_F_TIMUPDATE;
}
- IEEE80211_UNLOCK(ni->ni_ic);
+ IEEE80211_BEACON_UNLOCK(ni->ni_ic);
}
/*
@@ -297,9 +297,9 @@
struct sk_buff *skb;
int qlen;
- IEEE80211_NODE_SAVEQ_LOCK(ni);
+ IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
IEEE80211_NODE_SAVEQ_DEQUEUE(ni, skb, qlen);
- IEEE80211_NODE_SAVEQ_UNLOCK(ni);
+ IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
if (skb == NULL)
break;
/*
@@ -361,9 +361,9 @@
for (;;) {
struct sk_buff *skb;
- IEEE80211_NODE_SAVEQ_LOCK(ni);
+ IEEE80211_NODE_SAVEQ_LOCK_IRQ(ni);
skb = __skb_dequeue(&ni->ni_savedq);
- IEEE80211_NODE_SAVEQ_UNLOCK(ni);
+ IEEE80211_NODE_SAVEQ_UNLOCK_IRQ(ni);
if (skb == NULL)
break;
ieee80211_parent_queue_xmit(skb);
diff -ur madwifi.old/net80211/ieee80211_proto.c madwifi.dev/net80211/ieee80211_proto.c
--- madwifi.old/net80211/ieee80211_proto.c 2007-05-21 07:49:54.574131288 +0200
+++ madwifi.dev/net80211/ieee80211_proto.c 2007-05-21 07:50:22.819837288 +0200
@@ -635,9 +635,9 @@
{
struct ieee80211com *ic = vap->iv_ic;
- IEEE80211_LOCK(ic);
+ IEEE80211_BEACON_LOCK(ic);
ieee80211_wme_initparams_locked(vap);
- IEEE80211_UNLOCK(ic);
+ IEEE80211_BEACON_UNLOCK(ic);
}
void
@@ -920,9 +920,9 @@
struct ieee80211com *ic = vap->iv_ic;
if (ic->ic_caps & IEEE80211_C_WME) {
- IEEE80211_LOCK(ic);
+ IEEE80211_BEACON_LOCK(ic);
ieee80211_wme_updateparams_locked(vap);
- IEEE80211_UNLOCK(ic);
+ IEEE80211_BEACON_UNLOCK(ic);
}
}
diff -ur madwifi.old/net80211/ieee80211_scan_sta.c madwifi.dev/net80211/ieee80211_scan_sta.c
--- madwifi.old/net80211/ieee80211_scan_sta.c 2006-09-20 10:45:13.000000000 +0200
+++ madwifi.dev/net80211/ieee80211_scan_sta.c 2007-05-21 07:50:22.819837288 +0200
@@ -163,9 +163,11 @@
{
struct sta_table *st = ss->ss_priv;
- spin_lock(&st->st_lock);
+ if (!irqs_disabled())
+ spin_lock_bh(&st->st_lock);
sta_flush_table(st);
- spin_unlock(&st->st_lock);
+ if (!irqs_disabled())
+ spin_unlock_bh(&st->st_lock);
ss->ss_last = 0;
return 0;
}
@@ -215,7 +217,8 @@
int hash;
hash = STA_HASH(macaddr);
- spin_lock(&st->st_lock);
+ if (!irqs_disabled())
+ spin_lock_bh(&st->st_lock);
LIST_FOREACH(se, &st->st_hash[hash], se_hash)
if (IEEE80211_ADDR_EQ(se->base.se_macaddr, macaddr) &&
sp->ssid[1] == se->base.se_ssid[1] &&
@@ -225,7 +228,7 @@
MALLOC(se, struct sta_entry *, sizeof(struct sta_entry),
M_80211_SCAN, M_NOWAIT | M_ZERO);
if (se == NULL) {
- spin_unlock(&st->st_lock);
+ spin_unlock_bh(&st->st_lock);
return 0;
}
se->se_scangen = st->st_scangen-1;
@@ -287,7 +290,8 @@
se->se_seen = 1;
se->se_notseen = 0;
- spin_unlock(&st->st_lock);
+ if (!irqs_disabled())
+ spin_unlock_bh(&st->st_lock);
/*
* If looking for a quick choice and nothing's
@@ -1063,7 +1067,8 @@
u_int gen;
int res = 0;
- spin_lock(&st->st_scanlock);
+ if (!irqs_disabled())
+ spin_lock_bh(&st->st_scanlock);
gen = st->st_scangen++;
restart:
spin_lock(&st->st_lock);
@@ -1086,7 +1091,8 @@
spin_unlock(&st->st_lock);
done:
- spin_unlock(&st->st_scanlock);
+ if (!irqs_disabled())
+ spin_unlock_bh(&st->st_scanlock);
return res;
}
@@ -1235,7 +1241,8 @@
bestchan = NULL;
bestrssi = -1;
- spin_lock(&st->st_lock);
+ if (!irqs_disabled())
+ spin_lock_bh(&st->st_lock);
for (i = 0; i < ss->ss_last; i++) {
c = ss->ss_chans[i];
maxrssi = 0;
@@ -1248,7 +1255,8 @@
if (bestchan == NULL || maxrssi < bestrssi)
bestchan = c;
}
- spin_unlock(&st->st_lock);
+ if (!irqs_disabled())
+ spin_unlock_bh(&st->st_lock);
return bestchan;
}