|
@@ -103,9 +103,16 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
|
|
ieee80211_tx_status(sc->hw, skb);
|
|
ieee80211_tx_status(sc->hw, skb);
|
|
}
|
|
}
|
|
|
|
|
|
-static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
|
|
|
|
|
|
+static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
|
|
|
|
+ struct ath_atx_tid *tid)
|
|
{
|
|
{
|
|
struct ath_atx_ac *ac = tid->ac;
|
|
struct ath_atx_ac *ac = tid->ac;
|
|
|
|
+ struct list_head *list;
|
|
|
|
+ struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
|
|
|
|
+ struct ath_chanctx *ctx = avp->chanctx;
|
|
|
|
+
|
|
|
|
+ if (!ctx)
|
|
|
|
+ return;
|
|
|
|
|
|
if (tid->sched)
|
|
if (tid->sched)
|
|
return;
|
|
return;
|
|
@@ -117,7 +124,9 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
|
|
return;
|
|
return;
|
|
|
|
|
|
ac->sched = true;
|
|
ac->sched = true;
|
|
- list_add_tail(&ac->list, &txq->axq_acq);
|
|
|
|
|
|
+
|
|
|
|
+ list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
|
|
|
|
+ list_add_tail(&ac->list, list);
|
|
}
|
|
}
|
|
|
|
|
|
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
|
|
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
|
|
@@ -626,7 +635,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|
|
|
|
|
skb_queue_splice_tail(&bf_pending, &tid->retry_q);
|
|
skb_queue_splice_tail(&bf_pending, &tid->retry_q);
|
|
if (!an->sleeping) {
|
|
if (!an->sleeping) {
|
|
- ath_tx_queue_tid(txq, tid);
|
|
|
|
|
|
+ ath_tx_queue_tid(sc, txq, tid);
|
|
|
|
|
|
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
|
|
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
|
|
tid->ac->clear_ps_filter = true;
|
|
tid->ac->clear_ps_filter = true;
|
|
@@ -1483,7 +1492,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
|
|
ac->clear_ps_filter = true;
|
|
ac->clear_ps_filter = true;
|
|
|
|
|
|
if (ath_tid_has_buffered(tid)) {
|
|
if (ath_tid_has_buffered(tid)) {
|
|
- ath_tx_queue_tid(txq, tid);
|
|
|
|
|
|
+ ath_tx_queue_tid(sc, txq, tid);
|
|
ath_txq_schedule(sc, txq);
|
|
ath_txq_schedule(sc, txq);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1507,7 +1516,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
|
|
tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
|
|
tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
|
|
|
|
|
|
if (ath_tid_has_buffered(tid)) {
|
|
if (ath_tid_has_buffered(tid)) {
|
|
- ath_tx_queue_tid(txq, tid);
|
|
|
|
|
|
+ ath_tx_queue_tid(sc, txq, tid);
|
|
ath_txq_schedule(sc, txq);
|
|
ath_txq_schedule(sc, txq);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1642,7 +1651,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
|
|
txq->axq_link = NULL;
|
|
txq->axq_link = NULL;
|
|
__skb_queue_head_init(&txq->complete_q);
|
|
__skb_queue_head_init(&txq->complete_q);
|
|
INIT_LIST_HEAD(&txq->axq_q);
|
|
INIT_LIST_HEAD(&txq->axq_q);
|
|
- INIT_LIST_HEAD(&txq->axq_acq);
|
|
|
|
spin_lock_init(&txq->axq_lock);
|
|
spin_lock_init(&txq->axq_lock);
|
|
txq->axq_depth = 0;
|
|
txq->axq_depth = 0;
|
|
txq->axq_ampdu_depth = 0;
|
|
txq->axq_ampdu_depth = 0;
|
|
@@ -1804,7 +1812,7 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
|
|
sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
|
|
sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
|
|
}
|
|
}
|
|
|
|
|
|
-/* For each axq_acq entry, for each tid, try to schedule packets
|
|
|
|
|
|
+/* For each acq entry, for each tid, try to schedule packets
|
|
* for transmit until ampdu_depth has reached min Q depth.
|
|
* for transmit until ampdu_depth has reached min Q depth.
|
|
*/
|
|
*/
|
|
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
|
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
|
@@ -1812,19 +1820,25 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
|
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
|
|
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
|
|
struct ath_atx_ac *ac, *last_ac;
|
|
struct ath_atx_ac *ac, *last_ac;
|
|
struct ath_atx_tid *tid, *last_tid;
|
|
struct ath_atx_tid *tid, *last_tid;
|
|
|
|
+ struct list_head *ac_list;
|
|
bool sent = false;
|
|
bool sent = false;
|
|
|
|
|
|
|
|
+ if (txq->mac80211_qnum < 0)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
|
|
|
|
+
|
|
if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
|
|
if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
|
|
- list_empty(&txq->axq_acq))
|
|
|
|
|
|
+ list_empty(ac_list))
|
|
return;
|
|
return;
|
|
|
|
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
|
|
|
|
- last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
|
|
|
|
- while (!list_empty(&txq->axq_acq)) {
|
|
|
|
|
|
+ last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
|
|
|
|
+ while (!list_empty(ac_list)) {
|
|
bool stop = false;
|
|
bool stop = false;
|
|
|
|
|
|
- ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
|
|
|
|
|
|
+ ac = list_first_entry(ac_list, struct ath_atx_ac, list);
|
|
last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
|
|
last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
|
|
list_del(&ac->list);
|
|
list_del(&ac->list);
|
|
ac->sched = false;
|
|
ac->sched = false;
|
|
@@ -1844,7 +1858,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
|
* are pending for the tid
|
|
* are pending for the tid
|
|
*/
|
|
*/
|
|
if (ath_tid_has_buffered(tid))
|
|
if (ath_tid_has_buffered(tid))
|
|
- ath_tx_queue_tid(txq, tid);
|
|
|
|
|
|
+ ath_tx_queue_tid(sc, txq, tid);
|
|
|
|
|
|
if (stop || tid == last_tid)
|
|
if (stop || tid == last_tid)
|
|
break;
|
|
break;
|
|
@@ -1852,7 +1866,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
|
|
|
|
|
if (!list_empty(&ac->tid_q) && !ac->sched) {
|
|
if (!list_empty(&ac->tid_q) && !ac->sched) {
|
|
ac->sched = true;
|
|
ac->sched = true;
|
|
- list_add_tail(&ac->list, &txq->axq_acq);
|
|
|
|
|
|
+ list_add_tail(&ac->list, ac_list);
|
|
}
|
|
}
|
|
|
|
|
|
if (stop)
|
|
if (stop)
|
|
@@ -1863,7 +1877,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
|
break;
|
|
break;
|
|
|
|
|
|
sent = false;
|
|
sent = false;
|
|
- last_ac = list_entry(txq->axq_acq.prev,
|
|
|
|
|
|
+ last_ac = list_entry(ac_list->prev,
|
|
struct ath_atx_ac, list);
|
|
struct ath_atx_ac, list);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1871,6 +1885,20 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void ath_txq_schedule_all(struct ath_softc *sc)
|
|
|
|
+{
|
|
|
|
+ struct ath_txq *txq;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
|
|
|
+ txq = sc->tx.txq_map[i];
|
|
|
|
+
|
|
|
|
+ spin_lock_bh(&txq->axq_lock);
|
|
|
|
+ ath_txq_schedule(sc, txq);
|
|
|
|
+ spin_unlock_bh(&txq->axq_lock);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
/***********/
|
|
/***********/
|
|
/* TX, DMA */
|
|
/* TX, DMA */
|
|
/***********/
|
|
/***********/
|
|
@@ -2198,7 +2226,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
|
|
TX_STAT_INC(txq->axq_qnum, a_queued_sw);
|
|
TX_STAT_INC(txq->axq_qnum, a_queued_sw);
|
|
__skb_queue_tail(&tid->buf_q, skb);
|
|
__skb_queue_tail(&tid->buf_q, skb);
|
|
if (!txctl->an->sleeping)
|
|
if (!txctl->an->sleeping)
|
|
- ath_tx_queue_tid(txq, tid);
|
|
|
|
|
|
+ ath_tx_queue_tid(sc, txq, tid);
|
|
|
|
|
|
ath_txq_schedule(sc, txq);
|
|
ath_txq_schedule(sc, txq);
|
|
goto out;
|
|
goto out;
|