|
@@ -107,9 +107,6 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
|
|
{
|
|
{
|
|
struct ath_atx_ac *ac = tid->ac;
|
|
struct ath_atx_ac *ac = tid->ac;
|
|
|
|
|
|
- if (tid->paused)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
if (tid->sched)
|
|
if (tid->sched)
|
|
return;
|
|
return;
|
|
|
|
|
|
@@ -1407,7 +1404,6 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
|
|
ath_tx_tid_change_state(sc, txtid);
|
|
ath_tx_tid_change_state(sc, txtid);
|
|
|
|
|
|
txtid->active = true;
|
|
txtid->active = true;
|
|
- txtid->paused = true;
|
|
|
|
*ssn = txtid->seq_start = txtid->seq_next;
|
|
*ssn = txtid->seq_start = txtid->seq_next;
|
|
txtid->bar_index = -1;
|
|
txtid->bar_index = -1;
|
|
|
|
|
|
@@ -1427,7 +1423,6 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
|
|
|
|
|
|
ath_txq_lock(sc, txq);
|
|
ath_txq_lock(sc, txq);
|
|
txtid->active = false;
|
|
txtid->active = false;
|
|
- txtid->paused = false;
|
|
|
|
ath_tx_flush_tid(sc, txtid);
|
|
ath_tx_flush_tid(sc, txtid);
|
|
ath_tx_tid_change_state(sc, txtid);
|
|
ath_tx_tid_change_state(sc, txtid);
|
|
ath_txq_unlock_complete(sc, txq);
|
|
ath_txq_unlock_complete(sc, txq);
|
|
@@ -1487,7 +1482,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
|
|
ath_txq_lock(sc, txq);
|
|
ath_txq_lock(sc, txq);
|
|
ac->clear_ps_filter = true;
|
|
ac->clear_ps_filter = true;
|
|
|
|
|
|
- if (!tid->paused && ath_tid_has_buffered(tid)) {
|
|
|
|
|
|
+ if (ath_tid_has_buffered(tid)) {
|
|
ath_tx_queue_tid(txq, tid);
|
|
ath_tx_queue_tid(txq, tid);
|
|
ath_txq_schedule(sc, txq);
|
|
ath_txq_schedule(sc, txq);
|
|
}
|
|
}
|
|
@@ -1510,7 +1505,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
|
|
ath_txq_lock(sc, txq);
|
|
ath_txq_lock(sc, txq);
|
|
|
|
|
|
tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
|
|
tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
|
|
- tid->paused = false;
|
|
|
|
|
|
|
|
if (ath_tid_has_buffered(tid)) {
|
|
if (ath_tid_has_buffered(tid)) {
|
|
ath_tx_queue_tid(txq, tid);
|
|
ath_tx_queue_tid(txq, tid);
|
|
@@ -1544,8 +1538,6 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
|
|
continue;
|
|
continue;
|
|
|
|
|
|
tid = ATH_AN_2_TID(an, i);
|
|
tid = ATH_AN_2_TID(an, i);
|
|
- if (tid->paused)
|
|
|
|
- continue;
|
|
|
|
|
|
|
|
ath_txq_lock(sc, tid->ac->txq);
|
|
ath_txq_lock(sc, tid->ac->txq);
|
|
while (nframes > 0) {
|
|
while (nframes > 0) {
|
|
@@ -1844,9 +1836,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
|
list_del(&tid->list);
|
|
list_del(&tid->list);
|
|
tid->sched = false;
|
|
tid->sched = false;
|
|
|
|
|
|
- if (tid->paused)
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
if (ath_tx_sched_aggr(sc, txq, tid, &stop))
|
|
if (ath_tx_sched_aggr(sc, txq, tid, &stop))
|
|
sent = true;
|
|
sent = true;
|
|
|
|
|
|
@@ -2698,7 +2687,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
|
|
tid->baw_size = WME_MAX_BA;
|
|
tid->baw_size = WME_MAX_BA;
|
|
tid->baw_head = tid->baw_tail = 0;
|
|
tid->baw_head = tid->baw_tail = 0;
|
|
tid->sched = false;
|
|
tid->sched = false;
|
|
- tid->paused = false;
|
|
|
|
tid->active = false;
|
|
tid->active = false;
|
|
__skb_queue_head_init(&tid->buf_q);
|
|
__skb_queue_head_init(&tid->buf_q);
|
|
__skb_queue_head_init(&tid->retry_q);
|
|
__skb_queue_head_init(&tid->retry_q);
|