tx.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. /*
  2. * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
  3. * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include "mt7601u.h"
  15. #include "trace.h"
  16. enum mt76_txq_id {
  17. MT_TXQ_VO = IEEE80211_AC_VO,
  18. MT_TXQ_VI = IEEE80211_AC_VI,
  19. MT_TXQ_BE = IEEE80211_AC_BE,
  20. MT_TXQ_BK = IEEE80211_AC_BK,
  21. MT_TXQ_PSD,
  22. MT_TXQ_MCU,
  23. __MT_TXQ_MAX
  24. };
  25. /* Hardware uses mirrored order of queues with Q0 having the highest priority */
  26. static u8 q2hwq(u8 q)
  27. {
  28. return q ^ 0x3;
  29. }
  30. /* Take mac80211 Q id from the skb and translate it to hardware Q id */
  31. static u8 skb2q(struct sk_buff *skb)
  32. {
  33. int qid = skb_get_queue_mapping(skb);
  34. if (WARN_ON(qid >= MT_TXQ_PSD)) {
  35. qid = MT_TXQ_BE;
  36. skb_set_queue_mapping(skb, qid);
  37. }
  38. return q2hwq(qid);
  39. }
  40. /* Note: TX retry reporting is a bit broken.
  41. * Retries are reported only once per AMPDU and often come a frame early
  42. * i.e. they are reported in the last status preceding the AMPDU. Apart
  43. * from the fact that it's hard to know the length of the AMPDU (which is
  44. * required to know to how many consecutive frames retries should be
  45. * applied), if status comes early on full FIFO it gets lost and retries
  46. * of the whole AMPDU become invisible.
  47. * As a work-around encode the desired rate in PKT_ID of TX descriptor
  48. * and based on that guess the retries (every rate is tried once).
  49. * Only downside here is that for MCS0 we have to rely solely on
  50. * transmission failures as no retries can ever be reported.
  51. * Not having to read EXT_FIFO has a nice effect of doubling the number
  52. * of reports which can be fetched.
  53. * Also the vendor driver never uses the EXT_FIFO register so it may be
  54. * undertested.
  55. */
  56. static u8 mt7601u_tx_pktid_enc(struct mt7601u_dev *dev, u8 rate, bool is_probe)
  57. {
  58. u8 encoded = (rate + 1) + is_probe * 8;
  59. /* Because PKT_ID 0 disables status reporting only 15 values are
  60. * available but 16 are needed (8 MCS * 2 for encoding is_probe)
  61. * - we need to cram together two rates. MCS0 and MCS7 with is_probe
  62. * share PKT_ID 9.
  63. */
  64. if (is_probe && rate == 7)
  65. return encoded - 7;
  66. return encoded;
  67. }
  68. static void
  69. mt7601u_tx_pktid_dec(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
  70. {
  71. u8 req_rate = stat->pktid;
  72. u8 eff_rate = stat->rate & 0x7;
  73. req_rate -= 1;
  74. if (req_rate > 7) {
  75. stat->is_probe = true;
  76. req_rate -= 8;
  77. /* Decide between MCS0 and MCS7 which share pktid 9 */
  78. if (!req_rate && eff_rate)
  79. req_rate = 7;
  80. }
  81. stat->retry = req_rate - eff_rate;
  82. }
  83. static void mt7601u_tx_skb_remove_dma_overhead(struct sk_buff *skb,
  84. struct ieee80211_tx_info *info)
  85. {
  86. int pkt_len = (unsigned long)info->status.status_driver_data[0];
  87. skb_pull(skb, sizeof(struct mt76_txwi) + 4);
  88. if (ieee80211_get_hdrlen_from_skb(skb) % 4)
  89. mt76_remove_hdr_pad(skb);
  90. skb_trim(skb, pkt_len);
  91. }
  92. void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
  93. {
  94. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  95. mt7601u_tx_skb_remove_dma_overhead(skb, info);
  96. ieee80211_tx_info_clear_status(info);
  97. info->status.rates[0].idx = -1;
  98. info->flags |= IEEE80211_TX_STAT_ACK;
  99. spin_lock(&dev->mac_lock);
  100. ieee80211_tx_status(dev->hw, skb);
  101. spin_unlock(&dev->mac_lock);
  102. }
  103. static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
  104. {
  105. int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
  106. u32 need_head;
  107. need_head = sizeof(struct mt76_txwi) + 4;
  108. if (hdr_len % 4)
  109. need_head += 2;
  110. return skb_cow(skb, need_head);
  111. }
  112. static struct mt76_txwi *
  113. mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
  114. struct ieee80211_sta *sta, struct mt76_wcid *wcid,
  115. int pkt_len)
  116. {
  117. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  118. struct ieee80211_tx_rate *rate = &info->control.rates[0];
  119. struct mt76_txwi *txwi;
  120. unsigned long flags;
  121. bool is_probe;
  122. u32 pkt_id;
  123. u16 rate_ctl;
  124. u8 nss;
  125. txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
  126. memset(txwi, 0, sizeof(*txwi));
  127. if (!wcid->tx_rate_set)
  128. ieee80211_get_tx_rates(info->control.vif, sta, skb,
  129. info->control.rates, 1);
  130. spin_lock_irqsave(&dev->lock, flags);
  131. if (rate->idx < 0 || !rate->count)
  132. rate_ctl = wcid->tx_rate;
  133. else
  134. rate_ctl = mt76_mac_tx_rate_val(dev, rate, &nss);
  135. spin_unlock_irqrestore(&dev->lock, flags);
  136. txwi->rate_ctl = cpu_to_le16(rate_ctl);
  137. if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
  138. txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
  139. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
  140. txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
  141. if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
  142. u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
  143. ba_size <<= sta->ht_cap.ampdu_factor;
  144. ba_size = min_t(int, 63, ba_size);
  145. if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
  146. ba_size = 0;
  147. txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
  148. txwi->flags =
  149. cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
  150. FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
  151. sta->ht_cap.ampdu_density));
  152. if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
  153. txwi->flags = 0;
  154. }
  155. txwi->wcid = wcid->idx;
  156. is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
  157. pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe);
  158. pkt_len |= FIELD_PREP(MT_TXWI_LEN_PKTID, pkt_id);
  159. txwi->len_ctl = cpu_to_le16(pkt_len);
  160. return txwi;
  161. }
  162. void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
  163. struct sk_buff *skb)
  164. {
  165. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  166. struct mt7601u_dev *dev = hw->priv;
  167. struct ieee80211_vif *vif = info->control.vif;
  168. struct ieee80211_sta *sta = control->sta;
  169. struct mt76_sta *msta = NULL;
  170. struct mt76_wcid *wcid = dev->mon_wcid;
  171. struct mt76_txwi *txwi;
  172. int pkt_len = skb->len;
  173. int hw_q = skb2q(skb);
  174. BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
  175. info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
  176. if (mt7601u_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) {
  177. ieee80211_free_txskb(dev->hw, skb);
  178. return;
  179. }
  180. if (sta) {
  181. msta = (struct mt76_sta *) sta->drv_priv;
  182. wcid = &msta->wcid;
  183. } else if (vif) {
  184. struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
  185. wcid = &mvif->group_wcid;
  186. }
  187. txwi = mt7601u_push_txwi(dev, skb, sta, wcid, pkt_len);
  188. if (mt7601u_dma_enqueue_tx(dev, skb, wcid, hw_q))
  189. return;
  190. trace_mt_tx(dev, skb, msta, txwi);
  191. }
  192. void mt7601u_tx_stat(struct work_struct *work)
  193. {
  194. struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
  195. stat_work.work);
  196. struct mt76_tx_status stat;
  197. unsigned long flags;
  198. int cleaned = 0;
  199. while (!test_bit(MT7601U_STATE_REMOVED, &dev->state)) {
  200. stat = mt7601u_mac_fetch_tx_status(dev);
  201. if (!stat.valid)
  202. break;
  203. mt7601u_tx_pktid_dec(dev, &stat);
  204. mt76_send_tx_status(dev, &stat);
  205. cleaned++;
  206. }
  207. trace_mt_tx_status_cleaned(dev, cleaned);
  208. spin_lock_irqsave(&dev->tx_lock, flags);
  209. if (cleaned)
  210. queue_delayed_work(dev->stat_wq, &dev->stat_work,
  211. msecs_to_jiffies(10));
  212. else if (test_and_clear_bit(MT7601U_STATE_MORE_STATS, &dev->state))
  213. queue_delayed_work(dev->stat_wq, &dev->stat_work,
  214. msecs_to_jiffies(20));
  215. else
  216. clear_bit(MT7601U_STATE_READING_STATS, &dev->state);
  217. spin_unlock_irqrestore(&dev->tx_lock, flags);
  218. }
  219. int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  220. u16 queue, const struct ieee80211_tx_queue_params *params)
  221. {
  222. struct mt7601u_dev *dev = hw->priv;
  223. u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
  224. u32 val;
  225. /* TODO: should we do funny things with the parameters?
  226. * See what mt7601u_set_default_edca() used to do in init.c.
  227. */
  228. if (params->cw_min)
  229. cw_min = fls(params->cw_min);
  230. if (params->cw_max)
  231. cw_max = fls(params->cw_max);
  232. WARN_ON(params->txop > 0xff);
  233. WARN_ON(params->aifs > 0xf);
  234. WARN_ON(cw_min > 0xf);
  235. WARN_ON(cw_max > 0xf);
  236. val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
  237. FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
  238. FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
  239. /* TODO: based on user-controlled EnableTxBurst var vendor drv sets
  240. * a really long txop on AC0 (see connect.c:2009) but only on
  241. * connect? When not connected should be 0.
  242. */
  243. if (!hw_q)
  244. val |= 0x60;
  245. else
  246. val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop);
  247. mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
  248. val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
  249. val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
  250. val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
  251. mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
  252. val = mt76_rr(dev, MT_WMM_AIFSN);
  253. val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
  254. val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
  255. mt76_wr(dev, MT_WMM_AIFSN, val);
  256. val = mt76_rr(dev, MT_WMM_CWMIN);
  257. val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
  258. val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
  259. mt76_wr(dev, MT_WMM_CWMIN, val);
  260. val = mt76_rr(dev, MT_WMM_CWMAX);
  261. val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
  262. val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
  263. mt76_wr(dev, MT_WMM_CWMAX, val);
  264. return 0;
  265. }