mt76x2_mac.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839
  1. /*
  2. * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/delay.h>
  17. #include "mt76x2.h"
  18. #include "mt76x2_mcu.h"
  19. #include "mt76x2_eeprom.h"
  20. #include "mt76x2_trace.h"
  21. void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
  22. {
  23. idx &= 7;
  24. mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
  25. mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
  26. get_unaligned_le16(addr + 4));
  27. }
  28. static int
  29. mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
  30. {
  31. u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
  32. switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
  33. case MT_PHY_TYPE_OFDM:
  34. if (idx >= 8)
  35. idx = 0;
  36. if (status->band == NL80211_BAND_2GHZ)
  37. idx += 4;
  38. status->rate_idx = idx;
  39. return 0;
  40. case MT_PHY_TYPE_CCK:
  41. if (idx >= 8) {
  42. idx -= 8;
  43. status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
  44. }
  45. if (idx >= 4)
  46. idx = 0;
  47. status->rate_idx = idx;
  48. return 0;
  49. case MT_PHY_TYPE_HT_GF:
  50. status->enc_flags |= RX_ENC_FLAG_HT_GF;
  51. /* fall through */
  52. case MT_PHY_TYPE_HT:
  53. status->encoding = RX_ENC_HT;
  54. status->rate_idx = idx;
  55. break;
  56. case MT_PHY_TYPE_VHT:
  57. status->encoding = RX_ENC_VHT;
  58. status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
  59. status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
  60. break;
  61. default:
  62. return -EINVAL;
  63. }
  64. if (rate & MT_RXWI_RATE_LDPC)
  65. status->enc_flags |= RX_ENC_FLAG_LDPC;
  66. if (rate & MT_RXWI_RATE_SGI)
  67. status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
  68. if (rate & MT_RXWI_RATE_STBC)
  69. status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
  70. switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
  71. case MT_PHY_BW_20:
  72. break;
  73. case MT_PHY_BW_40:
  74. status->bw = RATE_INFO_BW_40;
  75. break;
  76. case MT_PHY_BW_80:
  77. status->bw = RATE_INFO_BW_80;
  78. break;
  79. default:
  80. break;
  81. }
  82. return 0;
  83. }
  84. static __le16
  85. mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
  86. const struct ieee80211_tx_rate *rate, u8 *nss_val)
  87. {
  88. u16 rateval;
  89. u8 phy, rate_idx;
  90. u8 nss = 1;
  91. u8 bw = 0;
  92. if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
  93. rate_idx = rate->idx;
  94. nss = 1 + (rate->idx >> 4);
  95. phy = MT_PHY_TYPE_VHT;
  96. if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
  97. bw = 2;
  98. else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  99. bw = 1;
  100. } else if (rate->flags & IEEE80211_TX_RC_MCS) {
  101. rate_idx = rate->idx;
  102. nss = 1 + (rate->idx >> 3);
  103. phy = MT_PHY_TYPE_HT;
  104. if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
  105. phy = MT_PHY_TYPE_HT_GF;
  106. if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
  107. bw = 1;
  108. } else {
  109. const struct ieee80211_rate *r;
  110. int band = dev->mt76.chandef.chan->band;
  111. u16 val;
  112. r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
  113. if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
  114. val = r->hw_value_short;
  115. else
  116. val = r->hw_value;
  117. phy = val >> 8;
  118. rate_idx = val & 0xff;
  119. bw = 0;
  120. }
  121. rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
  122. rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
  123. rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
  124. if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
  125. rateval |= MT_RXWI_RATE_SGI;
  126. *nss_val = nss;
  127. return cpu_to_le16(rateval);
  128. }
  129. void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
  130. {
  131. u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
  132. u32 bit = MT_WCID_DROP_MASK(idx);
  133. /* prevent unnecessary writes */
  134. if ((val & bit) != (bit * drop))
  135. mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
  136. }
  137. void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
  138. const struct ieee80211_tx_rate *rate)
  139. {
  140. spin_lock_bh(&dev->mt76.lock);
  141. wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
  142. wcid->tx_rate_set = true;
  143. spin_unlock_bh(&dev->mt76.lock);
  144. }
  145. void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
  146. struct sk_buff *skb, struct mt76_wcid *wcid,
  147. struct ieee80211_sta *sta)
  148. {
  149. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  150. struct ieee80211_tx_rate *rate = &info->control.rates[0];
  151. struct ieee80211_key_conf *key = info->control.hw_key;
  152. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  153. u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
  154. u16 txwi_flags = 0;
  155. u8 nss;
  156. s8 txpwr_adj, max_txpwr_adj;
  157. u8 ccmp_pn[8];
  158. memset(txwi, 0, sizeof(*txwi));
  159. if (wcid)
  160. txwi->wcid = wcid->idx;
  161. else
  162. txwi->wcid = 0xff;
  163. txwi->pktid = 1;
  164. if (wcid && wcid->sw_iv && key) {
  165. u64 pn = atomic64_inc_return(&key->tx_pn);
  166. ccmp_pn[0] = pn;
  167. ccmp_pn[1] = pn >> 8;
  168. ccmp_pn[2] = 0;
  169. ccmp_pn[3] = 0x20 | (key->keyidx << 6);
  170. ccmp_pn[4] = pn >> 16;
  171. ccmp_pn[5] = pn >> 24;
  172. ccmp_pn[6] = pn >> 32;
  173. ccmp_pn[7] = pn >> 40;
  174. txwi->iv = *((__le32 *)&ccmp_pn[0]);
  175. txwi->eiv = *((__le32 *)&ccmp_pn[1]);
  176. }
  177. spin_lock_bh(&dev->mt76.lock);
  178. if (wcid && (rate->idx < 0 || !rate->count)) {
  179. txwi->rate = wcid->tx_rate;
  180. max_txpwr_adj = wcid->max_txpwr_adj;
  181. nss = wcid->tx_rate_nss;
  182. } else {
  183. txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
  184. max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
  185. }
  186. spin_unlock_bh(&dev->mt76.lock);
  187. txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
  188. max_txpwr_adj);
  189. txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
  190. if (mt76xx_rev(dev) >= MT76XX_REV_E4)
  191. txwi->txstream = 0x13;
  192. else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
  193. !(txwi->rate & cpu_to_le16(rate_ht_mask)))
  194. txwi->txstream = 0x93;
  195. if (info->flags & IEEE80211_TX_CTL_LDPC)
  196. txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
  197. if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
  198. txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
  199. if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
  200. txwi_flags |= MT_TXWI_FLAGS_MMPS;
  201. if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
  202. txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
  203. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
  204. txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
  205. if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
  206. txwi->pktid |= MT_TXWI_PKTID_PROBE;
  207. if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
  208. u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
  209. ba_size <<= sta->ht_cap.ampdu_factor;
  210. ba_size = min_t(int, 63, ba_size - 1);
  211. if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
  212. ba_size = 0;
  213. txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
  214. txwi_flags |= MT_TXWI_FLAGS_AMPDU |
  215. FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
  216. sta->ht_cap.ampdu_density);
  217. }
  218. if (ieee80211_is_probe_resp(hdr->frame_control) ||
  219. ieee80211_is_beacon(hdr->frame_control))
  220. txwi_flags |= MT_TXWI_FLAGS_TS;
  221. txwi->flags |= cpu_to_le16(txwi_flags);
  222. txwi->len_ctl = cpu_to_le16(skb->len);
  223. }
  224. static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
  225. {
  226. int hdrlen;
  227. if (!len)
  228. return;
  229. hdrlen = ieee80211_get_hdrlen_from_skb(skb);
  230. memmove(skb->data + len, skb->data, hdrlen);
  231. skb_pull(skb, len);
  232. }
  233. static struct mt76_wcid *
  234. mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, u8 idx, bool unicast)
  235. {
  236. struct mt76x2_sta *sta;
  237. struct mt76_wcid *wcid;
  238. if (idx >= ARRAY_SIZE(dev->wcid))
  239. return NULL;
  240. wcid = rcu_dereference(dev->wcid[idx]);
  241. if (unicast || !wcid)
  242. return wcid;
  243. sta = container_of(wcid, struct mt76x2_sta, wcid);
  244. return &sta->vif->group_wcid;
  245. }
  246. int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
  247. void *rxi)
  248. {
  249. struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
  250. struct mt76x2_rxwi *rxwi = rxi;
  251. u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
  252. u32 ctl = le32_to_cpu(rxwi->ctl);
  253. u16 rate = le16_to_cpu(rxwi->rate);
  254. u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
  255. bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
  256. int pad_len = 0;
  257. u8 pn_len;
  258. u8 wcid;
  259. int len;
  260. if (rxinfo & MT_RXINFO_L2PAD)
  261. pad_len += 2;
  262. if (rxinfo & MT_RXINFO_DECRYPT) {
  263. status->flag |= RX_FLAG_DECRYPTED;
  264. status->flag |= RX_FLAG_MMIC_STRIPPED;
  265. status->flag |= RX_FLAG_MIC_STRIPPED;
  266. status->flag |= RX_FLAG_IV_STRIPPED;
  267. }
  268. wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
  269. status->wcid = mt76x2_rx_get_sta_wcid(dev, wcid, unicast);
  270. len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
  271. pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
  272. if (pn_len) {
  273. int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
  274. u8 *data = skb->data + offset;
  275. status->iv[0] = data[7];
  276. status->iv[1] = data[6];
  277. status->iv[2] = data[5];
  278. status->iv[3] = data[4];
  279. status->iv[4] = data[1];
  280. status->iv[5] = data[0];
  281. /*
  282. * Driver CCMP validation can't deal with fragments.
  283. * Let mac80211 take care of it.
  284. */
  285. if (rxinfo & MT_RXINFO_FRAG) {
  286. status->flag &= ~RX_FLAG_IV_STRIPPED;
  287. } else {
  288. pad_len += pn_len << 2;
  289. len -= pn_len << 2;
  290. }
  291. }
  292. mt76x2_remove_hdr_pad(skb, pad_len);
  293. if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
  294. status->aggr = true;
  295. if (WARN_ON_ONCE(len > skb->len))
  296. return -EINVAL;
  297. pskb_trim(skb, len);
  298. status->chains = BIT(0) | BIT(1);
  299. status->chain_signal[0] = mt76x2_phy_get_rssi(dev, rxwi->rssi[0], 0);
  300. status->chain_signal[1] = mt76x2_phy_get_rssi(dev, rxwi->rssi[1], 1);
  301. status->signal = max(status->chain_signal[0], status->chain_signal[1]);
  302. status->freq = dev->mt76.chandef.chan->center_freq;
  303. status->band = dev->mt76.chandef.chan->band;
  304. status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
  305. status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
  306. return mt76x2_mac_process_rate(status, rate);
  307. }
  308. static int
  309. mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
  310. enum nl80211_band band)
  311. {
  312. u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
  313. txrate->idx = 0;
  314. txrate->flags = 0;
  315. txrate->count = 1;
  316. switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
  317. case MT_PHY_TYPE_OFDM:
  318. if (band == NL80211_BAND_2GHZ)
  319. idx += 4;
  320. txrate->idx = idx;
  321. return 0;
  322. case MT_PHY_TYPE_CCK:
  323. if (idx >= 8)
  324. idx -= 8;
  325. txrate->idx = idx;
  326. return 0;
  327. case MT_PHY_TYPE_HT_GF:
  328. txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
  329. /* fall through */
  330. case MT_PHY_TYPE_HT:
  331. txrate->flags |= IEEE80211_TX_RC_MCS;
  332. txrate->idx = idx;
  333. break;
  334. case MT_PHY_TYPE_VHT:
  335. txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
  336. txrate->idx = idx;
  337. break;
  338. default:
  339. return -EINVAL;
  340. }
  341. switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
  342. case MT_PHY_BW_20:
  343. break;
  344. case MT_PHY_BW_40:
  345. txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
  346. break;
  347. case MT_PHY_BW_80:
  348. txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
  349. break;
  350. default:
  351. return -EINVAL;
  352. break;
  353. }
  354. if (rate & MT_RXWI_RATE_SGI)
  355. txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
  356. return 0;
  357. }
  358. static void
  359. mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
  360. struct ieee80211_tx_info *info,
  361. struct mt76x2_tx_status *st, int n_frames)
  362. {
  363. struct ieee80211_tx_rate *rate = info->status.rates;
  364. int cur_idx, last_rate;
  365. int i;
  366. if (!n_frames)
  367. return;
  368. last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
  369. mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
  370. dev->mt76.chandef.chan->band);
  371. if (last_rate < IEEE80211_TX_MAX_RATES - 1)
  372. rate[last_rate + 1].idx = -1;
  373. cur_idx = rate[last_rate].idx + st->retry;
  374. for (i = 0; i <= last_rate; i++) {
  375. rate[i].flags = rate[last_rate].flags;
  376. rate[i].idx = max_t(int, 0, cur_idx - i);
  377. rate[i].count = 1;
  378. }
  379. if (last_rate > 0)
  380. rate[last_rate - 1].count = st->retry + 1 - last_rate;
  381. info->status.ampdu_len = n_frames;
  382. info->status.ampdu_ack_len = st->success ? n_frames : 0;
  383. if (st->pktid & MT_TXWI_PKTID_PROBE)
  384. info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
  385. if (st->aggr)
  386. info->flags |= IEEE80211_TX_CTL_AMPDU |
  387. IEEE80211_TX_STAT_AMPDU;
  388. if (!st->ack_req)
  389. info->flags |= IEEE80211_TX_CTL_NO_ACK;
  390. else if (st->success)
  391. info->flags |= IEEE80211_TX_STAT_ACK;
  392. }
  393. static void
  394. mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat,
  395. u8 *update)
  396. {
  397. struct ieee80211_tx_info info = {};
  398. struct ieee80211_sta *sta = NULL;
  399. struct mt76_wcid *wcid = NULL;
  400. struct mt76x2_sta *msta = NULL;
  401. rcu_read_lock();
  402. if (stat->wcid < ARRAY_SIZE(dev->wcid))
  403. wcid = rcu_dereference(dev->wcid[stat->wcid]);
  404. if (wcid) {
  405. void *priv;
  406. priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
  407. sta = container_of(priv, struct ieee80211_sta,
  408. drv_priv);
  409. }
  410. if (msta && stat->aggr) {
  411. u32 stat_val, stat_cache;
  412. stat_val = stat->rate;
  413. stat_val |= ((u32) stat->retry) << 16;
  414. stat_cache = msta->status.rate;
  415. stat_cache |= ((u32) msta->status.retry) << 16;
  416. if (*update == 0 && stat_val == stat_cache &&
  417. stat->wcid == msta->status.wcid && msta->n_frames < 32) {
  418. msta->n_frames++;
  419. goto out;
  420. }
  421. mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
  422. msta->n_frames);
  423. msta->status = *stat;
  424. msta->n_frames = 1;
  425. *update = 0;
  426. } else {
  427. mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
  428. *update = 1;
  429. }
  430. ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
  431. out:
  432. rcu_read_unlock();
  433. }
  434. void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
  435. {
  436. struct mt76x2_tx_status stat = {};
  437. unsigned long flags;
  438. u8 update = 1;
  439. if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
  440. return;
  441. trace_mac_txstat_poll(dev);
  442. while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
  443. u32 stat1, stat2;
  444. spin_lock_irqsave(&dev->irq_lock, flags);
  445. stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
  446. stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
  447. if (!(stat1 & MT_TX_STAT_FIFO_VALID)) {
  448. spin_unlock_irqrestore(&dev->irq_lock, flags);
  449. break;
  450. }
  451. spin_unlock_irqrestore(&dev->irq_lock, flags);
  452. stat.valid = 1;
  453. stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
  454. stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
  455. stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
  456. stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
  457. stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
  458. stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
  459. stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
  460. trace_mac_txstat_fetch(dev, &stat);
  461. if (!irq) {
  462. mt76x2_send_tx_status(dev, &stat, &update);
  463. continue;
  464. }
  465. kfifo_put(&dev->txstatus_fifo, stat);
  466. }
  467. }
  468. static void
  469. mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb,
  470. void *txwi_ptr)
  471. {
  472. struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb);
  473. struct mt76x2_txwi *txwi = txwi_ptr;
  474. mt76x2_mac_poll_tx_status(dev, false);
  475. txi->tries = 0;
  476. txi->jiffies = jiffies;
  477. txi->wcid = txwi->wcid;
  478. txi->pktid = txwi->pktid;
  479. trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
  480. mt76x2_tx_complete(dev, skb);
  481. }
  482. void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev)
  483. {
  484. struct mt76x2_tx_status stat;
  485. u8 update = 1;
  486. while (kfifo_get(&dev->txstatus_fifo, &stat))
  487. mt76x2_send_tx_status(dev, &stat, &update);
  488. }
  489. void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
  490. struct mt76_queue_entry *e, bool flush)
  491. {
  492. struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
  493. if (e->txwi)
  494. mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
  495. else
  496. dev_kfree_skb_any(e->skb);
  497. }
  498. static enum mt76x2_cipher_type
  499. mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
  500. {
  501. memset(key_data, 0, 32);
  502. if (!key)
  503. return MT_CIPHER_NONE;
  504. if (key->keylen > 32)
  505. return MT_CIPHER_NONE;
  506. memcpy(key_data, key->key, key->keylen);
  507. switch (key->cipher) {
  508. case WLAN_CIPHER_SUITE_WEP40:
  509. return MT_CIPHER_WEP40;
  510. case WLAN_CIPHER_SUITE_WEP104:
  511. return MT_CIPHER_WEP104;
  512. case WLAN_CIPHER_SUITE_TKIP:
  513. return MT_CIPHER_TKIP;
  514. case WLAN_CIPHER_SUITE_CCMP:
  515. return MT_CIPHER_AES_CCMP;
  516. default:
  517. return MT_CIPHER_NONE;
  518. }
  519. }
  520. void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
  521. {
  522. struct mt76_wcid_addr addr = {};
  523. u32 attr;
  524. attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
  525. FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
  526. mt76_wr(dev, MT_WCID_ATTR(idx), attr);
  527. mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
  528. mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
  529. if (idx >= 128)
  530. return;
  531. if (mac)
  532. memcpy(addr.macaddr, mac, ETH_ALEN);
  533. mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
  534. }
  535. int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
  536. struct ieee80211_key_conf *key)
  537. {
  538. enum mt76x2_cipher_type cipher;
  539. u8 key_data[32];
  540. u8 iv_data[8];
  541. cipher = mt76x2_mac_get_key_info(key, key_data);
  542. if (cipher == MT_CIPHER_NONE && key)
  543. return -EOPNOTSUPP;
  544. mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
  545. mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
  546. memset(iv_data, 0, sizeof(iv_data));
  547. if (key) {
  548. mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
  549. !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
  550. iv_data[3] = key->keyidx << 6;
  551. if (cipher >= MT_CIPHER_TKIP)
  552. iv_data[3] |= 0x20;
  553. }
  554. mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
  555. return 0;
  556. }
  557. int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
  558. struct ieee80211_key_conf *key)
  559. {
  560. enum mt76x2_cipher_type cipher;
  561. u8 key_data[32];
  562. u32 val;
  563. cipher = mt76x2_mac_get_key_info(key, key_data);
  564. if (cipher == MT_CIPHER_NONE && key)
  565. return -EOPNOTSUPP;
  566. val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
  567. val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
  568. val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
  569. mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
  570. mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
  571. sizeof(key_data));
  572. return 0;
  573. }
  574. static int
  575. mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
  576. {
  577. int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
  578. struct mt76x2_txwi txwi;
  579. if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
  580. return -ENOSPC;
  581. mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL);
  582. mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
  583. offset += sizeof(txwi);
  584. mt76_wr_copy(dev, offset, skb->data, skb->len);
  585. return 0;
  586. }
  587. static int
  588. __mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
  589. {
  590. int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
  591. int beacon_addr = dev->beacon_offsets[bcn_idx];
  592. int ret = 0;
  593. int i;
  594. /* Prevent corrupt transmissions during update */
  595. mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
  596. if (skb) {
  597. ret = mt76_write_beacon(dev, beacon_addr, skb);
  598. if (!ret)
  599. dev->beacon_data_mask |= BIT(bcn_idx) &
  600. dev->beacon_mask;
  601. } else {
  602. dev->beacon_data_mask &= ~BIT(bcn_idx);
  603. for (i = 0; i < beacon_len; i += 4)
  604. mt76_wr(dev, beacon_addr + i, 0);
  605. }
  606. mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
  607. return ret;
  608. }
  609. int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
  610. struct sk_buff *skb)
  611. {
  612. bool force_update = false;
  613. int bcn_idx = 0;
  614. int i;
  615. for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
  616. if (vif_idx == i) {
  617. force_update = !!dev->beacons[i] ^ !!skb;
  618. if (dev->beacons[i])
  619. dev_kfree_skb(dev->beacons[i]);
  620. dev->beacons[i] = skb;
  621. __mt76x2_mac_set_beacon(dev, bcn_idx, skb);
  622. } else if (force_update && dev->beacons[i]) {
  623. __mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]);
  624. }
  625. bcn_idx += !!dev->beacons[i];
  626. }
  627. for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
  628. if (!(dev->beacon_data_mask & BIT(i)))
  629. break;
  630. __mt76x2_mac_set_beacon(dev, i, NULL);
  631. }
  632. mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
  633. bcn_idx - 1);
  634. return 0;
  635. }
  636. void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val)
  637. {
  638. u8 old_mask = dev->beacon_mask;
  639. bool en;
  640. u32 reg;
  641. if (val) {
  642. dev->beacon_mask |= BIT(vif_idx);
  643. } else {
  644. dev->beacon_mask &= ~BIT(vif_idx);
  645. mt76x2_mac_set_beacon(dev, vif_idx, NULL);
  646. }
  647. if (!!old_mask == !!dev->beacon_mask)
  648. return;
  649. en = dev->beacon_mask;
  650. mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
  651. reg = MT_BEACON_TIME_CFG_BEACON_TX |
  652. MT_BEACON_TIME_CFG_TBTT_EN |
  653. MT_BEACON_TIME_CFG_TIMER_EN;
  654. mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
  655. if (en)
  656. mt76x2_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
  657. else
  658. mt76x2_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
  659. }
  660. void mt76x2_update_channel(struct mt76_dev *mdev)
  661. {
  662. struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
  663. struct mt76_channel_state *state;
  664. u32 active, busy;
  665. state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
  666. busy = mt76_rr(dev, MT_CH_BUSY);
  667. active = busy + mt76_rr(dev, MT_CH_IDLE);
  668. spin_lock_bh(&dev->mt76.cc_lock);
  669. state->cc_busy += busy;
  670. state->cc_active += active;
  671. spin_unlock_bh(&dev->mt76.cc_lock);
  672. }
  673. void mt76x2_mac_work(struct work_struct *work)
  674. {
  675. struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev,
  676. mac_work.work);
  677. int i, idx;
  678. mt76x2_update_channel(&dev->mt76);
  679. for (i = 0, idx = 0; i < 16; i++) {
  680. u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
  681. dev->aggr_stats[idx++] += val & 0xffff;
  682. dev->aggr_stats[idx++] += val >> 16;
  683. }
  684. ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
  685. MT_CALIBRATE_INTERVAL);
  686. }