mac80211: A-MPDU MLME use dynamic allocation

This patch alters the A-MPDU MLME in sta_info to use dynamic allocation,
thus drastically improving memory usage - from a constant ~2 Kbyte in
the previous (static) allocation to a lower limit of ~200 Byte and an upper
limit of ~2 Kbyte.

Signed-off-by: Ron Rindjunsky <ron.rindjunsky@intel.com>
Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
Ron Rindjunsky 2008-03-26 20:36:03 +02:00 committed by John W. Linville
parent 6c507cd040
commit cee24a3e58
7 changed files with 151 additions and 100 deletions

View file

@ -397,7 +397,7 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
DECLARE_MAC_BUF(mac); DECLARE_MAC_BUF(mac);
spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
state = sta->ampdu_mlme.tid_tx[tid].state; state = sta->ampdu_mlme.tid_state_tx[tid];
spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
if (state == HT_AGG_STATE_IDLE && if (state == HT_AGG_STATE_IDLE &&

View file

@ -169,27 +169,30 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
p += scnprintf(p, sizeof(buf)+buf-p, "\n RX :"); p += scnprintf(p, sizeof(buf)+buf-p, "\n RX :");
for (i = 0; i < STA_TID_NUM; i++) for (i = 0; i < STA_TID_NUM; i++)
p += scnprintf(p, sizeof(buf)+buf-p, "%5d", p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
sta->ampdu_mlme.tid_rx[i].state); sta->ampdu_mlme.tid_state_rx[i]);
p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:"); p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:");
for (i = 0; i < STA_TID_NUM; i++) for (i = 0; i < STA_TID_NUM; i++)
p += scnprintf(p, sizeof(buf)+buf-p, "%5d", p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
sta->ampdu_mlme.tid_rx[i].dialog_token); sta->ampdu_mlme.tid_state_rx[i]?
sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
p += scnprintf(p, sizeof(buf)+buf-p, "\n TX :"); p += scnprintf(p, sizeof(buf)+buf-p, "\n TX :");
for (i = 0; i < STA_TID_NUM; i++) for (i = 0; i < STA_TID_NUM; i++)
p += scnprintf(p, sizeof(buf)+buf-p, "%5d", p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
sta->ampdu_mlme.tid_tx[i].state); sta->ampdu_mlme.tid_state_tx[i]);
p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:"); p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:");
for (i = 0; i < STA_TID_NUM; i++) for (i = 0; i < STA_TID_NUM; i++)
p += scnprintf(p, sizeof(buf)+buf-p, "%5d", p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
sta->ampdu_mlme.tid_tx[i].dialog_token); sta->ampdu_mlme.tid_state_tx[i]?
sta->ampdu_mlme.tid_tx[i]->dialog_token : 0);
p += scnprintf(p, sizeof(buf)+buf-p, "\n SSN :"); p += scnprintf(p, sizeof(buf)+buf-p, "\n SSN :");
for (i = 0; i < STA_TID_NUM; i++) for (i = 0; i < STA_TID_NUM; i++)
p += scnprintf(p, sizeof(buf)+buf-p, "%5d", p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
sta->ampdu_mlme.tid_tx[i].ssn); sta->ampdu_mlme.tid_state_tx[i]?
sta->ampdu_mlme.tid_tx[i]->ssn : 0);
p += scnprintf(p, sizeof(buf)+buf-p, "\n"); p += scnprintf(p, sizeof(buf)+buf-p, "\n");
@ -230,12 +233,12 @@ static ssize_t sta_agg_status_write(struct file *file,
strcpy(state, "off "); strcpy(state, "off ");
ieee80211_sta_stop_rx_ba_session(dev, da, tid_num, 0, ieee80211_sta_stop_rx_ba_session(dev, da, tid_num, 0,
WLAN_REASON_QSTA_REQUIRE_SETUP); WLAN_REASON_QSTA_REQUIRE_SETUP);
sta->ampdu_mlme.tid_rx[tid_num].state |= sta->ampdu_mlme.tid_state_rx[tid_num] |=
HT_AGG_STATE_DEBUGFS_CTL; HT_AGG_STATE_DEBUGFS_CTL;
tid_static_rx[tid_num] = 0; tid_static_rx[tid_num] = 0;
} else { } else {
strcpy(state, "on "); strcpy(state, "on ");
sta->ampdu_mlme.tid_rx[tid_num].state &= sta->ampdu_mlme.tid_state_rx[tid_num] &=
~HT_AGG_STATE_DEBUGFS_CTL; ~HT_AGG_STATE_DEBUGFS_CTL;
tid_static_rx[tid_num] = 1; tid_static_rx[tid_num] = 1;
} }

View file

@ -569,12 +569,12 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
/* we have tried too many times, receiver does not want A-MPDU */ /* we have tried too many times, receiver does not want A-MPDU */
if (sta->ampdu_mlme.tid_tx[tid].addba_req_num > HT_AGG_MAX_RETRIES) { if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
ret = -EBUSY; ret = -EBUSY;
goto start_ba_exit; goto start_ba_exit;
} }
state = &sta->ampdu_mlme.tid_tx[tid].state; state = &sta->ampdu_mlme.tid_state_tx[tid];
/* check if the TID is not in aggregation flow already */ /* check if the TID is not in aggregation flow already */
if (*state != HT_AGG_STATE_IDLE) { if (*state != HT_AGG_STATE_IDLE) {
#ifdef CONFIG_MAC80211_HT_DEBUG #ifdef CONFIG_MAC80211_HT_DEBUG
@ -585,6 +585,23 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
goto start_ba_exit; goto start_ba_exit;
} }
/* prepare A-MPDU MLME for Tx aggregation */
sta->ampdu_mlme.tid_tx[tid] =
kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
if (!sta->ampdu_mlme.tid_tx[tid]) {
if (net_ratelimit())
printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
tid);
ret = -ENOMEM;
goto start_ba_exit;
}
/* Tx timer */
sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
sta_addba_resp_timer_expired;
sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
(unsigned long)&sta->timer_to_tid[tid];
init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
/* ensure that TX flow won't interrupt us /* ensure that TX flow won't interrupt us
* until the end of the call to requeue function */ * until the end of the call to requeue function */
spin_lock_bh(&local->mdev->queue_lock); spin_lock_bh(&local->mdev->queue_lock);
@ -596,11 +613,10 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
* don't switch to aggregation */ * don't switch to aggregation */
if (ret) { if (ret) {
#ifdef CONFIG_MAC80211_HT_DEBUG #ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "BA request denied - no queue available for" printk(KERN_DEBUG "BA request denied - queue unavailable for"
" tid %d\n", tid); " tid %d\n", tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */ #endif /* CONFIG_MAC80211_HT_DEBUG */
spin_unlock_bh(&local->mdev->queue_lock); goto start_ba_err;
goto start_ba_exit;
} }
sdata = sta->sdata; sdata = sta->sdata;
@ -618,38 +634,40 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
* allocated queue */ * allocated queue */
ieee80211_ht_agg_queue_remove(local, sta, tid, 0); ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
#ifdef CONFIG_MAC80211_HT_DEBUG #ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "BA request denied - HW or queue unavailable" printk(KERN_DEBUG "BA request denied - HW unavailable for"
" for tid %d\n", tid); " tid %d\n", tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */ #endif /* CONFIG_MAC80211_HT_DEBUG */
spin_unlock_bh(&local->mdev->queue_lock);
*state = HT_AGG_STATE_IDLE; *state = HT_AGG_STATE_IDLE;
goto start_ba_exit; goto start_ba_err;
} }
/* Will put all the packets in the new SW queue */ /* Will put all the packets in the new SW queue */
ieee80211_requeue(local, ieee802_1d_to_ac[tid]); ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
spin_unlock_bh(&local->mdev->queue_lock); spin_unlock_bh(&local->mdev->queue_lock);
/* We have most probably almost emptied the legacy queue */
/* ieee80211_wake_queue(local_to_hw(local), ieee802_1d_to_ac[tid]); */
/* send an addBA request */ /* send an addBA request */
sta->ampdu_mlme.dialog_token_allocator++; sta->ampdu_mlme.dialog_token_allocator++;
sta->ampdu_mlme.tid_tx[tid].dialog_token = sta->ampdu_mlme.tid_tx[tid]->dialog_token =
sta->ampdu_mlme.dialog_token_allocator; sta->ampdu_mlme.dialog_token_allocator;
sta->ampdu_mlme.tid_tx[tid].ssn = start_seq_num; sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
ieee80211_send_addba_request(sta->sdata->dev, ra, tid, ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
sta->ampdu_mlme.tid_tx[tid].dialog_token, sta->ampdu_mlme.tid_tx[tid]->dialog_token,
sta->ampdu_mlme.tid_tx[tid].ssn, sta->ampdu_mlme.tid_tx[tid]->ssn,
0x40, 5000); 0x40, 5000);
/* activate the timer for the recipient's addBA response */ /* activate the timer for the recipient's addBA response */
sta->ampdu_mlme.tid_tx[tid].addba_resp_timer.expires = sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
jiffies + ADDBA_RESP_INTERVAL; jiffies + ADDBA_RESP_INTERVAL;
add_timer(&sta->ampdu_mlme.tid_tx[tid].addba_resp_timer); add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
goto start_ba_exit;
start_ba_err:
kfree(sta->ampdu_mlme.tid_tx[tid]);
sta->ampdu_mlme.tid_tx[tid] = NULL;
spin_unlock_bh(&local->mdev->queue_lock);
ret = -EBUSY;
start_ba_exit: start_ba_exit:
spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
rcu_read_unlock(); rcu_read_unlock();
@ -683,7 +701,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
} }
/* check if the TID is in aggregation */ /* check if the TID is in aggregation */
state = &sta->ampdu_mlme.tid_tx[tid].state; state = &sta->ampdu_mlme.tid_state_tx[tid];
spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
if (*state != HT_AGG_STATE_OPERATIONAL) { if (*state != HT_AGG_STATE_OPERATIONAL) {
@ -741,7 +759,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
return; return;
} }
state = &sta->ampdu_mlme.tid_tx[tid].state; state = &sta->ampdu_mlme.tid_state_tx[tid];
spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
if (!(*state & HT_ADDBA_REQUESTED_MSK)) { if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
@ -790,7 +808,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
rcu_read_unlock(); rcu_read_unlock();
return; return;
} }
state = &sta->ampdu_mlme.tid_tx[tid].state; state = &sta->ampdu_mlme.tid_state_tx[tid];
spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
@ -819,7 +837,9 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
* necessarily stopped */ * necessarily stopped */
netif_schedule(local->mdev); netif_schedule(local->mdev);
*state = HT_AGG_STATE_IDLE; *state = HT_AGG_STATE_IDLE;
sta->ampdu_mlme.tid_tx[tid].addba_req_num = 0; sta->ampdu_mlme.addba_req_num[tid] = 0;
kfree(sta->ampdu_mlme.tid_tx[tid]);
sta->ampdu_mlme.tid_tx[tid] = NULL;
spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
rcu_read_unlock(); rcu_read_unlock();

View file

@ -1216,12 +1216,11 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
buf_size = buf_size << sband->ht_info.ampdu_factor; buf_size = buf_size << sband->ht_info.ampdu_factor;
} }
tid_agg_rx = &sta->ampdu_mlme.tid_rx[tid];
/* examine state machine */ /* examine state machine */
spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); spin_lock_bh(&sta->ampdu_mlme.ampdu_rx);
if (tid_agg_rx->state != HT_AGG_STATE_IDLE) { if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) {
#ifdef CONFIG_MAC80211_HT_DEBUG #ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_DEBUG "unexpected AddBA Req from " printk(KERN_DEBUG "unexpected AddBA Req from "
@ -1231,6 +1230,24 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
goto end; goto end;
} }
/* prepare A-MPDU MLME for Rx aggregation */
sta->ampdu_mlme.tid_rx[tid] =
kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
if (!sta->ampdu_mlme.tid_rx[tid]) {
if (net_ratelimit())
printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
tid);
goto end;
}
/* rx timer */
sta->ampdu_mlme.tid_rx[tid]->session_timer.function =
sta_rx_agg_session_timer_expired;
sta->ampdu_mlme.tid_rx[tid]->session_timer.data =
(unsigned long)&sta->timer_to_tid[tid];
init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
/* prepare reordering buffer */ /* prepare reordering buffer */
tid_agg_rx->reorder_buf = tid_agg_rx->reorder_buf =
kmalloc(buf_size * sizeof(struct sk_buf *), GFP_ATOMIC); kmalloc(buf_size * sizeof(struct sk_buf *), GFP_ATOMIC);
@ -1238,6 +1255,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_ERR "can not allocate reordering buffer " printk(KERN_ERR "can not allocate reordering buffer "
"to tid %d\n", tid); "to tid %d\n", tid);
kfree(sta->ampdu_mlme.tid_rx[tid]);
goto end; goto end;
} }
memset(tid_agg_rx->reorder_buf, 0, memset(tid_agg_rx->reorder_buf, 0,
@ -1252,11 +1270,13 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
if (ret) { if (ret) {
kfree(tid_agg_rx->reorder_buf); kfree(tid_agg_rx->reorder_buf);
kfree(tid_agg_rx);
sta->ampdu_mlme.tid_rx[tid] = NULL;
goto end; goto end;
} }
/* change state and send addba resp */ /* change state and send addba resp */
tid_agg_rx->state = HT_AGG_STATE_OPERATIONAL; sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL;
tid_agg_rx->dialog_token = dialog_token; tid_agg_rx->dialog_token = dialog_token;
tid_agg_rx->ssn = start_seq_num; tid_agg_rx->ssn = start_seq_num;
tid_agg_rx->head_seq_num = start_seq_num; tid_agg_rx->head_seq_num = start_seq_num;
@ -1295,39 +1315,37 @@ static void ieee80211_sta_process_addba_resp(struct net_device *dev,
capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
state = &sta->ampdu_mlme.tid_tx[tid].state; state = &sta->ampdu_mlme.tid_state_tx[tid];
spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
printk(KERN_DEBUG "state not HT_ADDBA_REQUESTED_MSK:"
"%d\n", *state);
goto addba_resp_exit;
}
if (mgmt->u.action.u.addba_resp.dialog_token != if (mgmt->u.action.u.addba_resp.dialog_token !=
sta->ampdu_mlme.tid_tx[tid].dialog_token) { sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
#ifdef CONFIG_MAC80211_HT_DEBUG #ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */ #endif /* CONFIG_MAC80211_HT_DEBUG */
rcu_read_unlock(); goto addba_resp_exit;
return;
} }
del_timer_sync(&sta->ampdu_mlme.tid_tx[tid].addba_resp_timer); del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
#ifdef CONFIG_MAC80211_HT_DEBUG #ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid); printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */ #endif /* CONFIG_MAC80211_HT_DEBUG */
if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
== WLAN_STATUS_SUCCESS) { == WLAN_STATUS_SUCCESS) {
if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
printk(KERN_DEBUG "state not HT_ADDBA_REQUESTED_MSK:"
"%d\n", *state);
rcu_read_unlock();
return;
}
if (*state & HT_ADDBA_RECEIVED_MSK) if (*state & HT_ADDBA_RECEIVED_MSK)
printk(KERN_DEBUG "double addBA response\n"); printk(KERN_DEBUG "double addBA response\n");
*state |= HT_ADDBA_RECEIVED_MSK; *state |= HT_ADDBA_RECEIVED_MSK;
sta->ampdu_mlme.tid_tx[tid].addba_req_num = 0; sta->ampdu_mlme.addba_req_num[tid] = 0;
if (*state == HT_AGG_STATE_OPERATIONAL) { if (*state == HT_AGG_STATE_OPERATIONAL) {
printk(KERN_DEBUG "Aggregation on for tid %d \n", tid); printk(KERN_DEBUG "Aggregation on for tid %d \n", tid);
@ -1339,13 +1357,15 @@ static void ieee80211_sta_process_addba_resp(struct net_device *dev,
} else { } else {
printk(KERN_DEBUG "recipient rejected agg: tid %d \n", tid); printk(KERN_DEBUG "recipient rejected agg: tid %d \n", tid);
sta->ampdu_mlme.tid_tx[tid].addba_req_num++; sta->ampdu_mlme.addba_req_num[tid]++;
/* this will allow the state check in stop_BA_session */ /* this will allow the state check in stop_BA_session */
*state = HT_AGG_STATE_OPERATIONAL; *state = HT_AGG_STATE_OPERATIONAL;
spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
ieee80211_stop_tx_ba_session(hw, sta->addr, tid, ieee80211_stop_tx_ba_session(hw, sta->addr, tid,
WLAN_BACK_INITIATOR); WLAN_BACK_INITIATOR);
} }
addba_resp_exit:
rcu_read_unlock(); rcu_read_unlock();
} }
@ -1411,13 +1431,13 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
/* check if TID is in operational state */ /* check if TID is in operational state */
spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); spin_lock_bh(&sta->ampdu_mlme.ampdu_rx);
if (sta->ampdu_mlme.tid_rx[tid].state if (sta->ampdu_mlme.tid_state_rx[tid]
!= HT_AGG_STATE_OPERATIONAL) { != HT_AGG_STATE_OPERATIONAL) {
spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
rcu_read_unlock(); rcu_read_unlock();
return; return;
} }
sta->ampdu_mlme.tid_rx[tid].state = sta->ampdu_mlme.tid_state_rx[tid] =
HT_AGG_STATE_REQ_STOP_BA_MSK | HT_AGG_STATE_REQ_STOP_BA_MSK |
(initiator << HT_AGG_STATE_INITIATOR_SHIFT); (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
@ -1434,25 +1454,27 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
/* shutdown timer has not expired */ /* shutdown timer has not expired */
if (initiator != WLAN_BACK_TIMER) if (initiator != WLAN_BACK_TIMER)
del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]. del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
session_timer);
/* check if this is a self generated aggregation halt */ /* check if this is a self generated aggregation halt */
if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER) if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER)
ieee80211_send_delba(dev, ra, tid, 0, reason); ieee80211_send_delba(dev, ra, tid, 0, reason);
/* free the reordering buffer */ /* free the reordering buffer */
for (i = 0; i < sta->ampdu_mlme.tid_rx[tid].buf_size; i++) { for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) {
if (sta->ampdu_mlme.tid_rx[tid].reorder_buf[i]) { if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) {
/* release the reordered frames */ /* release the reordered frames */
dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid].reorder_buf[i]); dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]);
sta->ampdu_mlme.tid_rx[tid].stored_mpdu_num--; sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--;
sta->ampdu_mlme.tid_rx[tid].reorder_buf[i] = NULL; sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL;
} }
} }
kfree(sta->ampdu_mlme.tid_rx[tid].reorder_buf); /* free resources */
kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf);
kfree(sta->ampdu_mlme.tid_rx[tid]);
sta->ampdu_mlme.tid_rx[tid] = NULL;
sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE;
sta->ampdu_mlme.tid_rx[tid].state = HT_AGG_STATE_IDLE;
rcu_read_unlock(); rcu_read_unlock();
} }
@ -1491,7 +1513,7 @@ static void ieee80211_sta_process_delba(struct net_device *dev,
WLAN_BACK_INITIATOR, 0); WLAN_BACK_INITIATOR, 0);
else { /* WLAN_BACK_RECIPIENT */ else { /* WLAN_BACK_RECIPIENT */
spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
sta->ampdu_mlme.tid_tx[tid].state = sta->ampdu_mlme.tid_state_tx[tid] =
HT_AGG_STATE_OPERATIONAL; HT_AGG_STATE_OPERATIONAL;
spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid, ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid,
@ -1528,7 +1550,7 @@ void sta_addba_resp_timer_expired(unsigned long data)
return; return;
} }
state = &sta->ampdu_mlme.tid_tx[tid].state; state = &sta->ampdu_mlme.tid_state_tx[tid];
/* check if the TID waits for addBA response */ /* check if the TID waits for addBA response */
spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
if (!(*state & HT_ADDBA_REQUESTED_MSK)) { if (!(*state & HT_ADDBA_REQUESTED_MSK)) {

View file

@ -1514,9 +1514,10 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
if (!rx->sta) if (!rx->sta)
return RX_CONTINUE; return RX_CONTINUE;
tid = le16_to_cpu(bar->control) >> 12; tid = le16_to_cpu(bar->control) >> 12;
tid_agg_rx = &(rx->sta->ampdu_mlme.tid_rx[tid]); if (rx->sta->ampdu_mlme.tid_state_rx[tid]
if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL) != HT_AGG_STATE_OPERATIONAL)
return RX_CONTINUE; return RX_CONTINUE;
tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
@ -2123,11 +2124,12 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
qc = skb->data + ieee80211_get_hdrlen(fc) - QOS_CONTROL_LEN; qc = skb->data + ieee80211_get_hdrlen(fc) - QOS_CONTROL_LEN;
tid = qc[0] & QOS_CONTROL_TID_MASK; tid = qc[0] & QOS_CONTROL_TID_MASK;
tid_agg_rx = &(sta->ampdu_mlme.tid_rx[tid]);
if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL) if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
goto end_reorder; goto end_reorder;
tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
/* null data frames are excluded */ /* null data frames are excluded */
if (unlikely(fc & IEEE80211_STYPE_NULLFUNC)) if (unlikely(fc & IEEE80211_STYPE_NULLFUNC))
goto end_reorder; goto end_reorder;

View file

@ -170,9 +170,16 @@ void sta_info_destroy(struct sta_info *sta)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
for (i = 0; i < STA_TID_NUM; i++) { for (i = 0; i < STA_TID_NUM; i++) {
del_timer_sync(&sta->ampdu_mlme.tid_rx[i].session_timer); spin_lock_bh(&sta->ampdu_mlme.ampdu_rx);
del_timer_sync(&sta->ampdu_mlme.tid_tx[i].addba_resp_timer); if (sta->ampdu_mlme.tid_rx[i])
del_timer_sync(&sta->ampdu_mlme.tid_rx[i]->session_timer);
spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
if (sta->ampdu_mlme.tid_tx[i])
del_timer_sync(&sta->ampdu_mlme.tid_tx[i]->addba_resp_timer);
spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
} }
rate_control_free_sta(sta->rate_ctrl, sta->rate_ctrl_priv); rate_control_free_sta(sta->rate_ctrl, sta->rate_ctrl_priv);
rate_control_put(sta->rate_ctrl); rate_control_put(sta->rate_ctrl);
@ -227,18 +234,13 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->timer_to_tid[i] = i; sta->timer_to_tid[i] = i;
/* tid to tx queue: initialize according to HW (0 is valid) */ /* tid to tx queue: initialize according to HW (0 is valid) */
sta->tid_to_tx_q[i] = local->hw.queues; sta->tid_to_tx_q[i] = local->hw.queues;
/* rx timers */ /* rx */
sta->ampdu_mlme.tid_rx[i].session_timer.function = sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE;
sta_rx_agg_session_timer_expired; sta->ampdu_mlme.tid_rx[i] = NULL;
sta->ampdu_mlme.tid_rx[i].session_timer.data = /* tx */
(unsigned long)&sta->timer_to_tid[i]; sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE;
init_timer(&sta->ampdu_mlme.tid_rx[i].session_timer); sta->ampdu_mlme.tid_tx[i] = NULL;
/* tx timers */ sta->ampdu_mlme.addba_req_num[i] = 0;
sta->ampdu_mlme.tid_tx[i].addba_resp_timer.function =
sta_addba_resp_timer_expired;
sta->ampdu_mlme.tid_tx[i].addba_resp_timer.data =
(unsigned long)&sta->timer_to_tid[i];
init_timer(&sta->ampdu_mlme.tid_tx[i].addba_resp_timer);
} }
skb_queue_head_init(&sta->ps_tx_buf); skb_queue_head_init(&sta->ps_tx_buf);
skb_queue_head_init(&sta->tx_filtered); skb_queue_head_init(&sta->tx_filtered);

View file

@ -68,43 +68,37 @@ enum ieee80211_sta_info_flags {
/** /**
* struct tid_ampdu_tx - TID aggregation information (Tx). * struct tid_ampdu_tx - TID aggregation information (Tx).
* *
* @state: TID's state in session state machine.
* @dialog_token: dialog token for aggregation session
* @ssn: Starting Sequence Number expected to be aggregated.
* @addba_resp_timer: timer for peer's response to addba request * @addba_resp_timer: timer for peer's response to addba request
* @addba_req_num: number of times addBA request has been sent. * @ssn: Starting Sequence Number expected to be aggregated.
* @dialog_token: dialog token for aggregation session
*/ */
struct tid_ampdu_tx { struct tid_ampdu_tx {
u8 state;
u8 dialog_token;
u16 ssn;
struct timer_list addba_resp_timer; struct timer_list addba_resp_timer;
u8 addba_req_num; u16 ssn;
u8 dialog_token;
}; };
/** /**
* struct tid_ampdu_rx - TID aggregation information (Rx). * struct tid_ampdu_rx - TID aggregation information (Rx).
* *
* @state: TID's state in session state machine.
* @dialog_token: dialog token for aggregation session
* @ssn: Starting Sequence Number expected to be aggregated.
* @buf_size: buffer size for incoming A-MPDUs
* @timeout: reset timer value.
* @head_seq_num: head sequence number in reordering buffer. * @head_seq_num: head sequence number in reordering buffer.
* @stored_mpdu_num: number of MPDUs in reordering buffer * @stored_mpdu_num: number of MPDUs in reordering buffer
* @reorder_buf: buffer to reorder incoming aggregated MPDUs * @reorder_buf: buffer to reorder incoming aggregated MPDUs
* @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
* @ssn: Starting Sequence Number expected to be aggregated.
* @buf_size: buffer size for incoming A-MPDUs
* @timeout: reset timer value.
* @dialog_token: dialog token for aggregation session
*/ */
struct tid_ampdu_rx { struct tid_ampdu_rx {
u8 state;
u8 dialog_token;
u16 ssn;
u16 buf_size;
u16 timeout;
u16 head_seq_num; u16 head_seq_num;
u16 stored_mpdu_num; u16 stored_mpdu_num;
struct sk_buff **reorder_buf; struct sk_buff **reorder_buf;
struct timer_list session_timer; struct timer_list session_timer;
u16 ssn;
u16 buf_size;
u16 timeout;
u8 dialog_token;
}; };
/** /**
@ -133,16 +127,24 @@ enum plink_state {
/** /**
* struct sta_ampdu_mlme - STA aggregation information. * struct sta_ampdu_mlme - STA aggregation information.
* *
* @tid_state_rx: TID's state in Rx session state machine.
* @tid_rx: aggregation info for Rx per TID * @tid_rx: aggregation info for Rx per TID
* @tid_tx: aggregation info for Tx per TID
* @ampdu_rx: for locking sections in aggregation Rx flow * @ampdu_rx: for locking sections in aggregation Rx flow
* @tid_state_tx: TID's state in Tx session state machine.
* @tid_tx: aggregation info for Tx per TID
* @addba_req_num: number of times addBA request has been sent.
* @ampdu_tx: for locking sectionsi in aggregation Tx flow * @ampdu_tx: for locking sectionsi in aggregation Tx flow
* @dialog_token_allocator: dialog token enumerator for each new session; * @dialog_token_allocator: dialog token enumerator for each new session;
*/ */
struct sta_ampdu_mlme { struct sta_ampdu_mlme {
struct tid_ampdu_rx tid_rx[STA_TID_NUM]; /* rx */
struct tid_ampdu_tx tid_tx[STA_TID_NUM]; u8 tid_state_rx[STA_TID_NUM];
struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
spinlock_t ampdu_rx; spinlock_t ampdu_rx;
/* tx */
u8 tid_state_tx[STA_TID_NUM];
struct tid_ampdu_tx *tid_tx[STA_TID_NUM];
u8 addba_req_num[STA_TID_NUM];
spinlock_t ampdu_tx; spinlock_t ampdu_tx;
u8 dialog_token_allocator; u8 dialog_token_allocator;
}; };