ath9k: rework tx queue selection and fix queue stopping/waking

The current ath9k tx queue handling code showed a few issues that could
lead to locking issues, tx stalls due to stopped queues, and maybe even
DMA issues.

The main source of these issues is that in some places the queue is
selected via skb queue mapping in places where this mapping may no
longer be valid. One such place is when data frames are transmitted via
the CAB queue (for powersave buffered frames). This is made even worse
by a lookup WMM AC values from the assigned tx queue (which is
undefined for the CAB queue).

This messed up the pending frame counting, which in turn caused issues
with queues getting stopped, but not woken again.

To fix these issues, this patch removes an unnecessary abstraction
separating a driver internal queue number from the skb queue number
(not to be confused with the hardware queue number).

It seems that this abstraction may have been necessary because of tx
queue preinitialization from the initvals. This patch avoids breakage
here by pushing the software <-> hardware queue mapping to the function
that assigns the tx queues and redefining the WMM AC definitions to
match the numbers used by mac80211 (also affects ath9k_htc).

To ensure consistency wrt. pending frame count tracking, these counters
are moved to the ath_txq struct, updated with the txq lock held, but
only where the tx queue selected by the skb queue map actually matches
the tx queue used by the driver for the frame.

Signed-off-by: Felix Fietkau <nbd@openwrt.org>
Reported-by: Björn Smedman <bjorn.smedman@venatech.se>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
Felix Fietkau 2010-11-07 14:59:39 +01:00 committed by John W. Linville
parent 21e731a1b1
commit 066dae93bd
11 changed files with 109 additions and 197 deletions

View file

@ -195,7 +195,6 @@ enum ATH_AGGR_STATUS {
#define ATH_TXFIFO_DEPTH 8
struct ath_txq {
int axq_class;
u32 axq_qnum;
u32 *axq_link;
struct list_head axq_q;
@ -208,11 +207,12 @@ struct ath_txq {
struct list_head txq_fifo_pending;
u8 txq_headidx;
u8 txq_tailidx;
int pending_frames;
};
struct ath_atx_ac {
struct ath_txq *txq;
int sched;
int qnum;
struct list_head list;
struct list_head tid_q;
};
@ -290,12 +290,11 @@ struct ath_tx_control {
struct ath_tx {
u16 seq_no;
u32 txqsetup;
int hwq_map[WME_NUM_AC];
spinlock_t txbuflock;
struct list_head txbuf;
struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
struct ath_descdma txdma;
int pending_frames[WME_NUM_AC];
struct ath_txq *txq_map[WME_NUM_AC];
};
struct ath_rx_edma {
@ -325,7 +324,6 @@ void ath_rx_cleanup(struct ath_softc *sc);
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
int ath_tx_setup(struct ath_softc *sc, int haltype);
void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
void ath_draintxq(struct ath_softc *sc,
struct ath_txq *txq, bool retry_tx);
@ -665,7 +663,6 @@ struct ath_wiphy {
void ath9k_tasklet(unsigned long data);
int ath_reset(struct ath_softc *sc, bool retry_tx);
int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
int ath_cabq_update(struct ath_softc *);
static inline void ath_read_cachesize(struct ath_common *common, int *csz)

View file

@ -28,7 +28,7 @@ int ath_beaconq_config(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_tx_queue_info qi, qi_be;
int qnum;
struct ath_txq *txq;
ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
@ -38,8 +38,8 @@ int ath_beaconq_config(struct ath_softc *sc)
qi.tqi_cwmax = 0;
} else {
/* Adhoc mode; important thing is to use 2x cwmin. */
qnum = sc->tx.hwq_map[WME_AC_BE];
ath9k_hw_get_txq_props(ah, qnum, &qi_be);
txq = sc->tx.txq_map[WME_AC_BE];
ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be);
qi.tqi_aifs = qi_be.tqi_aifs;
qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
qi.tqi_cwmax = qi_be.tqi_cwmax;

View file

@ -31,10 +31,11 @@
#define WME_MAX_BA WME_BA_BMP_SIZE
#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
#define WME_AC_BE 0
#define WME_AC_BK 1
#define WME_AC_VI 2
#define WME_AC_VO 3
/* These must match mac80211 skb queue mapping numbers */
#define WME_AC_VO 0
#define WME_AC_VI 1
#define WME_AC_BE 2
#define WME_AC_BK 3
#define WME_NUM_AC 4
#define ATH_RSSI_DUMMY_MARKER 0x127

View file

@ -585,10 +585,10 @@ static const struct file_operations fops_wiphy = {
do { \
len += snprintf(buf + len, size - len, \
"%s%13u%11u%10u%10u\n", str, \
sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BE]].elem, \
sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BK]].elem, \
sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VI]].elem, \
sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VO]].elem); \
sc->debug.stats.txstats[WME_AC_BE].elem, \
sc->debug.stats.txstats[WME_AC_BK].elem, \
sc->debug.stats.txstats[WME_AC_VI].elem, \
sc->debug.stats.txstats[WME_AC_VO].elem); \
} while(0)
static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
@ -630,33 +630,35 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
return retval;
}
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf, struct ath_tx_status *ts)
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts)
{
TX_STAT_INC(txq->axq_qnum, tx_pkts_all);
sc->debug.stats.txstats[txq->axq_qnum].tx_bytes_all += bf->bf_mpdu->len;
int qnum = skb_get_queue_mapping(bf->bf_mpdu);
TX_STAT_INC(qnum, tx_pkts_all);
sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
if (bf_isampdu(bf)) {
if (bf_isxretried(bf))
TX_STAT_INC(txq->axq_qnum, a_xretries);
TX_STAT_INC(qnum, a_xretries);
else
TX_STAT_INC(txq->axq_qnum, a_completed);
TX_STAT_INC(qnum, a_completed);
} else {
TX_STAT_INC(txq->axq_qnum, completed);
TX_STAT_INC(qnum, completed);
}
if (ts->ts_status & ATH9K_TXERR_FIFO)
TX_STAT_INC(txq->axq_qnum, fifo_underrun);
TX_STAT_INC(qnum, fifo_underrun);
if (ts->ts_status & ATH9K_TXERR_XTXOP)
TX_STAT_INC(txq->axq_qnum, xtxop);
TX_STAT_INC(qnum, xtxop);
if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
TX_STAT_INC(txq->axq_qnum, timer_exp);
TX_STAT_INC(qnum, timer_exp);
if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
TX_STAT_INC(txq->axq_qnum, desc_cfg_err);
TX_STAT_INC(qnum, desc_cfg_err);
if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
TX_STAT_INC(txq->axq_qnum, data_underrun);
TX_STAT_INC(qnum, data_underrun);
if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
TX_STAT_INC(txq->axq_qnum, delim_underrun);
TX_STAT_INC(qnum, delim_underrun);
}
static const struct file_operations fops_xmit = {

View file

@ -169,8 +169,8 @@ void ath9k_exit_debug(struct ath_hw *ah);
int ath9k_debug_create_root(void);
void ath9k_debug_remove_root(void);
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf, struct ath_tx_status *ts);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts);
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
#else
@ -199,7 +199,6 @@ static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
}
static inline void ath_debug_stat_tx(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_buf *bf,
struct ath_tx_status *ts)
{

View file

@ -20,8 +20,15 @@
/* TX */
/******/
static const int subtype_txq_to_hwq[] = {
[WME_AC_BE] = ATH_TXQ_AC_BE,
[WME_AC_BK] = ATH_TXQ_AC_BK,
[WME_AC_VI] = ATH_TXQ_AC_VI,
[WME_AC_VO] = ATH_TXQ_AC_VO,
};
#define ATH9K_HTC_INIT_TXQ(subtype) do { \
qi.tqi_subtype = subtype; \
qi.tqi_subtype = subtype_txq_to_hwq[subtype]; \
qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; \
qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; \
qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; \

View file

@ -157,6 +157,13 @@
#define PAPRD_GAIN_TABLE_ENTRIES 32
#define PAPRD_TABLE_SZ 24
enum ath_hw_txq_subtype {
ATH_TXQ_AC_BE = 0,
ATH_TXQ_AC_BK = 1,
ATH_TXQ_AC_VI = 2,
ATH_TXQ_AC_VO = 3,
};
enum ath_ini_subsys {
ATH_INI_PRE = 0,
ATH_INI_CORE,

View file

@ -395,7 +395,8 @@ static void ath9k_init_crypto(struct ath_softc *sc)
static int ath9k_init_btcoex(struct ath_softc *sc)
{
int r, qnum;
struct ath_txq *txq;
int r;
switch (sc->sc_ah->btcoex_hw.scheme) {
case ATH_BTCOEX_CFG_NONE:
@ -408,8 +409,8 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
r = ath_init_btcoex_timer(sc);
if (r)
return -1;
qnum = sc->tx.hwq_map[WME_AC_BE];
ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
txq = sc->tx.txq_map[WME_AC_BE];
ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
break;
default:
@ -422,59 +423,18 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
static int ath9k_init_queues(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int i = 0;
for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
sc->tx.hwq_map[i] = -1;
sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
if (sc->beacon.beaconq == -1) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup a beacon xmit queue\n");
goto err;
}
sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
if (sc->beacon.cabq == NULL) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup CAB xmit queue\n");
goto err;
}
sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
ath_cabq_update(sc);
if (!ath_tx_setup(sc, WME_AC_BK)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for BK traffic\n");
goto err;
}
if (!ath_tx_setup(sc, WME_AC_BE)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for BE traffic\n");
goto err;
}
if (!ath_tx_setup(sc, WME_AC_VI)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for VI traffic\n");
goto err;
}
if (!ath_tx_setup(sc, WME_AC_VO)) {
ath_print(common, ATH_DBG_FATAL,
"Unable to setup xmit queue for VO traffic\n");
goto err;
}
for (i = 0; i < WME_NUM_AC; i++)
sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
return 0;
err:
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i))
ath_tx_cleanupq(sc, &sc->tx.txq[i]);
return -EIO;
}
static int ath9k_init_channels_rates(struct ath_softc *sc)

View file

@ -330,7 +330,7 @@ void ath_paprd_calibrate(struct work_struct *work)
struct ath_tx_control txctl;
struct ath9k_hw_cal_data *caldata = ah->caldata;
struct ath_common *common = ath9k_hw_common(ah);
int qnum, ftype;
int ftype;
int chain_ok = 0;
int chain;
int len = 1800;
@ -357,8 +357,7 @@ void ath_paprd_calibrate(struct work_struct *work)
memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
memset(&txctl, 0, sizeof(txctl));
qnum = sc->tx.hwq_map[WME_AC_BE];
txctl.txq = &sc->tx.txq[qnum];
txctl.txq = sc->tx.txq_map[WME_AC_BE];
ath9k_ps_wakeup(sc);
ar9003_paprd_init_table(ah);
@ -1024,56 +1023,6 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
return r;
}
static int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
{
int qnum;
switch (queue) {
case 0:
qnum = sc->tx.hwq_map[WME_AC_VO];
break;
case 1:
qnum = sc->tx.hwq_map[WME_AC_VI];
break;
case 2:
qnum = sc->tx.hwq_map[WME_AC_BE];
break;
case 3:
qnum = sc->tx.hwq_map[WME_AC_BK];
break;
default:
qnum = sc->tx.hwq_map[WME_AC_BE];
break;
}
return qnum;
}
int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
{
int qnum;
switch (queue) {
case WME_AC_VO:
qnum = 0;
break;
case WME_AC_VI:
qnum = 1;
break;
case WME_AC_BE:
qnum = 2;
break;
case WME_AC_BK:
qnum = 3;
break;
default:
qnum = -1;
break;
}
return qnum;
}
/* XXX: Remove me once we don't depend on ath9k_channel for all
* this redundant data */
void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
@ -1243,7 +1192,6 @@ static int ath9k_tx(struct ieee80211_hw *hw,
struct ath_tx_control txctl;
int padpos, padsize;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
int qnum;
if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
ath_print(common, ATH_DBG_XMIT,
@ -1316,8 +1264,7 @@ static int ath9k_tx(struct ieee80211_hw *hw,
memmove(skb->data, skb->data + padsize, padpos);
}
qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
txctl.txq = &sc->tx.txq[qnum];
txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
@ -1801,12 +1748,15 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_txq *txq;
struct ath9k_tx_queue_info qi;
int ret = 0, qnum;
int ret = 0;
if (queue >= WME_NUM_AC)
return 0;
txq = sc->tx.txq_map[queue];
mutex_lock(&sc->mutex);
memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
@ -1815,20 +1765,19 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
qi.tqi_cwmin = params->cw_min;
qi.tqi_cwmax = params->cw_max;
qi.tqi_burstTime = params->txop;
qnum = ath_get_hal_qnum(queue, sc);
ath_print(common, ATH_DBG_CONFIG,
"Configure tx [queue/halq] [%d/%d], "
"aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
queue, qnum, params->aifs, params->cw_min,
queue, txq->axq_qnum, params->aifs, params->cw_min,
params->cw_max, params->txop);
ret = ath_txq_update(sc, qnum, &qi);
ret = ath_txq_update(sc, txq->axq_qnum, &qi);
if (ret)
ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
if ((qnum == sc->tx.hwq_map[WME_AC_BE]) && !ret)
if (queue == WME_AC_BE && !ret)
ath_beaconq_config(sc);
mutex_unlock(&sc->mutex);

View file

@ -187,7 +187,7 @@ static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
info->control.rates[1].idx = -1;
memset(&txctl, 0, sizeof(struct ath_tx_control));
txctl.txq = &sc->tx.txq[sc->tx.hwq_map[WME_AC_VO]];
txctl.txq = sc->tx.txq_map[WME_AC_VO];
txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
if (ath_tx_start(aphy->hw, skb, &txctl) != 0)

View file

@ -124,7 +124,7 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
struct ath_txq *txq = tid->ac->txq;
WARN_ON(!tid->paused);
@ -142,7 +142,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
struct ath_txq *txq = tid->ac->txq;
struct ath_buf *bf;
struct list_head bf_head;
struct ath_tx_status ts;
@ -817,7 +817,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
struct ath_txq *txq = txtid->ac->txq;
if (txtid->state & AGGR_CLEANUP)
return;
@ -888,10 +888,16 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_tx_queue_info qi;
static const int subtype_txq_to_hwq[] = {
[WME_AC_BE] = ATH_TXQ_AC_BE,
[WME_AC_BK] = ATH_TXQ_AC_BK,
[WME_AC_VI] = ATH_TXQ_AC_VI,
[WME_AC_VO] = ATH_TXQ_AC_VO,
};
int qnum, i;
memset(&qi, 0, sizeof(qi));
qi.tqi_subtype = subtype;
qi.tqi_subtype = subtype_txq_to_hwq[subtype];
qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
@ -940,7 +946,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
if (!ATH_TXQ_SETUP(sc, qnum)) {
struct ath_txq *txq = &sc->tx.txq[qnum];
txq->axq_class = subtype;
txq->axq_qnum = qnum;
txq->axq_link = NULL;
INIT_LIST_HEAD(&txq->axq_q);
@ -1210,24 +1215,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
}
}
int ath_tx_setup(struct ath_softc *sc, int haltype)
{
struct ath_txq *txq;
if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
"HAL AC %u out of range, max %zu!\n",
haltype, ARRAY_SIZE(sc->tx.hwq_map));
return 0;
}
txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
if (txq != NULL) {
sc->tx.hwq_map[haltype] = txq->axq_qnum;
return 1;
} else
return 0;
}
/***********/
/* TX, DMA */
/***********/
@ -1708,6 +1695,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
goto tx_done;
}
WARN_ON(tid->ac->txq != txctl->txq);
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
/*
* Try aggregation if it's a unicast data frame
@ -1747,6 +1735,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
return -1;
}
q = skb_get_queue_mapping(skb);
r = ath_tx_setup_buffer(hw, bf, skb, txctl);
if (unlikely(r)) {
ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
@ -1756,8 +1745,9 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
* we will at least have to run TX completionon one buffer
* on the queue */
spin_lock_bh(&txq->axq_lock);
if (!txq->stopped && txq->axq_depth > 1) {
ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
if (txq == sc->tx.txq_map[q] && !txq->stopped &&
txq->axq_depth > 1) {
ath_mac80211_stop_queue(sc, q);
txq->stopped = 1;
}
spin_unlock_bh(&txq->axq_lock);
@ -1767,13 +1757,10 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
return r;
}
q = skb_get_queue_mapping(skb);
if (q >= 4)
q = 0;
spin_lock_bh(&txq->axq_lock);
if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
if (txq == sc->tx.txq_map[q] &&
++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
ath_mac80211_stop_queue(sc, q);
txq->stopped = 1;
}
spin_unlock_bh(&txq->axq_lock);
@ -1841,7 +1828,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
/*****************/
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
struct ath_wiphy *aphy, int tx_flags)
struct ath_wiphy *aphy, int tx_flags,
struct ath_txq *txq)
{
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@ -1888,11 +1876,12 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
ath9k_tx_status(hw, skb);
else {
q = skb_get_queue_mapping(skb);
if (q >= 4)
q = 0;
if (--sc->tx.pending_frames[q] < 0)
sc->tx.pending_frames[q] = 0;
if (txq == sc->tx.txq_map[q]) {
spin_lock_bh(&txq->axq_lock);
if (WARN_ON(--txq->pending_frames < 0))
txq->pending_frames = 0;
spin_unlock_bh(&txq->axq_lock);
}
ieee80211_tx_status(hw, skb);
}
@ -1927,8 +1916,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
else
complete(&sc->paprd_complete);
} else {
ath_debug_stat_tx(sc, txq, bf, ts);
ath_tx_complete(sc, skb, bf->aphy, tx_flags);
ath_debug_stat_tx(sc, bf, ts);
ath_tx_complete(sc, skb, bf->aphy, tx_flags, txq);
}
/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
* accidentally reference it later.
@ -2018,16 +2007,13 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
}
static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
{
int qnum;
qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
if (qnum == -1)
return;
struct ath_txq *txq;
txq = sc->tx.txq_map[qnum];
spin_lock_bh(&txq->axq_lock);
if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
if (ath_mac80211_start_queue(sc, qnum))
txq->stopped = 0;
}
@ -2044,6 +2030,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
struct ath_tx_status ts;
int txok;
int status;
int qnum;
ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
@ -2119,12 +2106,15 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
}
qnum = skb_get_queue_mapping(bf->bf_mpdu);
if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
else
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
ath_wake_mac80211_queue(sc, txq);
if (txq == sc->tx.txq_map[qnum])
ath_wake_mac80211_queue(sc, qnum);
spin_lock_bh(&txq->axq_lock);
if (sc->sc_flags & SC_OP_TXAGGR)
@ -2194,6 +2184,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
struct list_head bf_head;
int status;
int txok;
int qnum;
for (;;) {
status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
@ -2237,13 +2228,16 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
}
qnum = skb_get_queue_mapping(bf->bf_mpdu);
if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
else
ath_tx_complete_buf(sc, bf, txq, &bf_head,
&txs, txok, 0);
ath_wake_mac80211_queue(sc, txq);
if (txq == sc->tx.txq_map[qnum])
ath_wake_mac80211_queue(sc, qnum);
spin_lock_bh(&txq->axq_lock);
if (!list_empty(&txq->txq_fifo_pending)) {
@ -2375,7 +2369,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
for (acno = 0, ac = &an->ac[acno];
acno < WME_NUM_AC; acno++, ac++) {
ac->sched = false;
ac->qnum = sc->tx.hwq_map[acno];
ac->txq = sc->tx.txq_map[acno];
INIT_LIST_HEAD(&ac->tid_q);
}
}
@ -2385,17 +2379,13 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
struct ath_atx_ac *ac;
struct ath_atx_tid *tid;
struct ath_txq *txq;
int i, tidno;
int tidno;
for (tidno = 0, tid = &an->tid[tidno];
tidno < WME_NUM_TID; tidno++, tid++) {
i = tid->ac->qnum;
if (!ATH_TXQ_SETUP(sc, i))
continue;
txq = &sc->tx.txq[i];
ac = tid->ac;
txq = ac->txq;
spin_lock_bh(&txq->axq_lock);