rx_reorder.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. /*
  2. * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
  3. * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "wil6210.h"
  18. #include "txrx.h"
  19. #define SEQ_MODULO 0x1000
  20. #define SEQ_MASK 0xfff
  21. static inline int seq_less(u16 sq1, u16 sq2)
  22. {
  23. return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
  24. }
  25. static inline u16 seq_inc(u16 sq)
  26. {
  27. return (sq + 1) & SEQ_MASK;
  28. }
  29. static inline u16 seq_sub(u16 sq1, u16 sq2)
  30. {
  31. return (sq1 - sq2) & SEQ_MASK;
  32. }
  33. static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq)
  34. {
  35. return seq_sub(seq, r->ssn) % r->buf_size;
  36. }
  37. static void wil_release_reorder_frame(struct net_device *ndev,
  38. struct wil_tid_ampdu_rx *r,
  39. int index)
  40. {
  41. struct sk_buff *skb = r->reorder_buf[index];
  42. if (!skb)
  43. goto no_frame;
  44. /* release the frame from the reorder ring buffer */
  45. r->stored_mpdu_num--;
  46. r->reorder_buf[index] = NULL;
  47. wil_netif_rx_any(skb, ndev);
  48. no_frame:
  49. r->head_seq_num = seq_inc(r->head_seq_num);
  50. }
  51. static void wil_release_reorder_frames(struct net_device *ndev,
  52. struct wil_tid_ampdu_rx *r,
  53. u16 hseq)
  54. {
  55. int index;
  56. /* note: this function is never called with
  57. * hseq preceding r->head_seq_num, i.e it is always true
  58. * !seq_less(hseq, r->head_seq_num)
  59. * and thus on loop exit it should be
  60. * r->head_seq_num == hseq
  61. */
  62. while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) {
  63. index = reorder_index(r, r->head_seq_num);
  64. wil_release_reorder_frame(ndev, r, index);
  65. }
  66. r->head_seq_num = hseq;
  67. }
  68. static void wil_reorder_release(struct net_device *ndev,
  69. struct wil_tid_ampdu_rx *r)
  70. {
  71. int index = reorder_index(r, r->head_seq_num);
  72. while (r->reorder_buf[index]) {
  73. wil_release_reorder_frame(ndev, r, index);
  74. index = reorder_index(r, r->head_seq_num);
  75. }
  76. }
  77. /* called in NAPI context */
  78. void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
  79. __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
  80. {
  81. struct wil6210_vif *vif;
  82. struct net_device *ndev;
  83. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  84. int tid = wil_rxdesc_tid(d);
  85. int cid = wil_rxdesc_cid(d);
  86. int mid = wil_rxdesc_mid(d);
  87. u16 seq = wil_rxdesc_seq(d);
  88. int mcast = wil_rxdesc_mcast(d);
  89. struct wil_sta_info *sta = &wil->sta[cid];
  90. struct wil_tid_ampdu_rx *r;
  91. u16 hseq;
  92. int index;
  93. wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
  94. mid, cid, tid, seq, mcast);
  95. vif = wil->vifs[mid];
  96. if (unlikely(!vif)) {
  97. wil_dbg_txrx(wil, "invalid VIF, mid %d\n", mid);
  98. dev_kfree_skb(skb);
  99. return;
  100. }
  101. ndev = vif_to_ndev(vif);
  102. if (unlikely(mcast)) {
  103. wil_netif_rx_any(skb, ndev);
  104. return;
  105. }
  106. spin_lock(&sta->tid_rx_lock);
  107. r = sta->tid_rx[tid];
  108. if (!r) {
  109. wil_netif_rx_any(skb, ndev);
  110. goto out;
  111. }
  112. r->total++;
  113. hseq = r->head_seq_num;
  114. /** Due to the race between WMI events, where BACK establishment
  115. * reported, and data Rx, few packets may be pass up before reorder
  116. * buffer get allocated. Catch up by pretending SSN is what we
  117. * see in the 1-st Rx packet
  118. *
  119. * Another scenario, Rx get delayed and we got packet from before
  120. * BACK. Pass it to the stack and wait.
  121. */
  122. if (r->first_time) {
  123. r->first_time = false;
  124. if (seq != r->head_seq_num) {
  125. if (seq_less(seq, r->head_seq_num)) {
  126. wil_err(wil,
  127. "Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n",
  128. seq, r->head_seq_num);
  129. r->first_time = true;
  130. wil_netif_rx_any(skb, ndev);
  131. goto out;
  132. }
  133. wil_err(wil,
  134. "Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n",
  135. seq, r->head_seq_num);
  136. r->head_seq_num = seq;
  137. r->ssn = seq;
  138. }
  139. }
  140. /* frame with out of date sequence number */
  141. if (seq_less(seq, r->head_seq_num)) {
  142. r->ssn_last_drop = seq;
  143. r->drop_old++;
  144. wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
  145. seq, r->head_seq_num);
  146. dev_kfree_skb(skb);
  147. goto out;
  148. }
  149. /*
  150. * If frame the sequence number exceeds our buffering window
  151. * size release some previous frames to make room for this one.
  152. */
  153. if (!seq_less(seq, r->head_seq_num + r->buf_size)) {
  154. hseq = seq_inc(seq_sub(seq, r->buf_size));
  155. /* release stored frames up to new head to stack */
  156. wil_release_reorder_frames(ndev, r, hseq);
  157. }
  158. /* Now the new frame is always in the range of the reordering buffer */
  159. index = reorder_index(r, seq);
  160. /* check if we already stored this frame */
  161. if (r->reorder_buf[index]) {
  162. r->drop_dup++;
  163. wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
  164. dev_kfree_skb(skb);
  165. goto out;
  166. }
  167. /*
  168. * If the current MPDU is in the right order and nothing else
  169. * is stored we can process it directly, no need to buffer it.
  170. * If it is first but there's something stored, we may be able
  171. * to release frames after this one.
  172. */
  173. if (seq == r->head_seq_num && r->stored_mpdu_num == 0) {
  174. r->head_seq_num = seq_inc(r->head_seq_num);
  175. wil_netif_rx_any(skb, ndev);
  176. goto out;
  177. }
  178. /* put the frame in the reordering buffer */
  179. r->reorder_buf[index] = skb;
  180. r->reorder_time[index] = jiffies;
  181. r->stored_mpdu_num++;
  182. wil_reorder_release(ndev, r);
  183. out:
  184. spin_unlock(&sta->tid_rx_lock);
  185. }
  186. /* process BAR frame, called in NAPI context */
  187. void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,
  188. u8 cid, u8 tid, u16 seq)
  189. {
  190. struct wil_sta_info *sta = &wil->sta[cid];
  191. struct net_device *ndev = vif_to_ndev(vif);
  192. struct wil_tid_ampdu_rx *r;
  193. spin_lock(&sta->tid_rx_lock);
  194. r = sta->tid_rx[tid];
  195. if (!r) {
  196. wil_err(wil, "BAR for non-existing CID %d TID %d\n", cid, tid);
  197. goto out;
  198. }
  199. if (seq_less(seq, r->head_seq_num)) {
  200. wil_err(wil, "BAR Seq 0x%03x preceding head 0x%03x\n",
  201. seq, r->head_seq_num);
  202. goto out;
  203. }
  204. wil_dbg_txrx(wil, "BAR: CID %d MID %d TID %d Seq 0x%03x head 0x%03x\n",
  205. cid, vif->mid, tid, seq, r->head_seq_num);
  206. wil_release_reorder_frames(ndev, r, seq);
  207. out:
  208. spin_unlock(&sta->tid_rx_lock);
  209. }
  210. struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
  211. int size, u16 ssn)
  212. {
  213. struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL);
  214. if (!r)
  215. return NULL;
  216. r->reorder_buf =
  217. kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL);
  218. r->reorder_time =
  219. kcalloc(size, sizeof(unsigned long), GFP_KERNEL);
  220. if (!r->reorder_buf || !r->reorder_time) {
  221. kfree(r->reorder_buf);
  222. kfree(r->reorder_time);
  223. kfree(r);
  224. return NULL;
  225. }
  226. r->ssn = ssn;
  227. r->head_seq_num = ssn;
  228. r->buf_size = size;
  229. r->stored_mpdu_num = 0;
  230. r->first_time = true;
  231. return r;
  232. }
  233. void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
  234. struct wil_tid_ampdu_rx *r)
  235. {
  236. int i;
  237. if (!r)
  238. return;
  239. /* Do not pass remaining frames to the network stack - it may be
  240. * not expecting to get any more Rx. Rx from here may lead to
  241. * kernel OOPS since some per-socket accounting info was already
  242. * released.
  243. */
  244. for (i = 0; i < r->buf_size; i++)
  245. kfree_skb(r->reorder_buf[i]);
  246. kfree(r->reorder_buf);
  247. kfree(r->reorder_time);
  248. kfree(r);
  249. }
  250. /* ADDBA processing */
  251. static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
  252. {
  253. u16 max_agg_size = min_t(u16, WIL_MAX_AGG_WSIZE, WIL_MAX_AMPDU_SIZE /
  254. (mtu_max + WIL_MAX_MPDU_OVERHEAD));
  255. if (!req_agg_wsize)
  256. return max_agg_size;
  257. return min(max_agg_size, req_agg_wsize);
  258. }
  259. /* Block Ack - Rx side (recipient) */
  260. int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid,
  261. u8 cidxtid, u8 dialog_token, __le16 ba_param_set,
  262. __le16 ba_timeout, __le16 ba_seq_ctrl)
  263. __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
  264. {
  265. u16 param_set = le16_to_cpu(ba_param_set);
  266. u16 agg_timeout = le16_to_cpu(ba_timeout);
  267. u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl);
  268. struct wil_sta_info *sta;
  269. u8 cid, tid;
  270. u16 agg_wsize = 0;
  271. /* bit 0: A-MSDU supported
  272. * bit 1: policy (should be 0 for us)
  273. * bits 2..5: TID
  274. * bits 6..15: buffer size
  275. */
  276. u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
  277. bool agg_amsdu = !!(param_set & BIT(0));
  278. int ba_policy = param_set & BIT(1);
  279. u16 status = WLAN_STATUS_SUCCESS;
  280. u16 ssn = seq_ctrl >> 4;
  281. struct wil_tid_ampdu_rx *r;
  282. int rc = 0;
  283. might_sleep();
  284. parse_cidxtid(cidxtid, &cid, &tid);
  285. /* sanity checks */
  286. if (cid >= WIL6210_MAX_CID) {
  287. wil_err(wil, "BACK: invalid CID %d\n", cid);
  288. rc = -EINVAL;
  289. goto out;
  290. }
  291. sta = &wil->sta[cid];
  292. if (sta->status != wil_sta_connected) {
  293. wil_err(wil, "BACK: CID %d not connected\n", cid);
  294. rc = -EINVAL;
  295. goto out;
  296. }
  297. wil_dbg_wmi(wil,
  298. "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
  299. cid, sta->addr, tid, req_agg_wsize, agg_timeout,
  300. agg_amsdu ? "+" : "-", !!ba_policy, dialog_token, ssn);
  301. /* apply policies */
  302. if (ba_policy) {
  303. wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
  304. status = WLAN_STATUS_INVALID_QOS_PARAM;
  305. }
  306. if (status == WLAN_STATUS_SUCCESS) {
  307. if (req_agg_wsize == 0) {
  308. wil_dbg_misc(wil, "Suggest BACK wsize %d\n",
  309. WIL_MAX_AGG_WSIZE);
  310. agg_wsize = WIL_MAX_AGG_WSIZE;
  311. } else {
  312. agg_wsize = min_t(u16,
  313. WIL_MAX_AGG_WSIZE, req_agg_wsize);
  314. }
  315. }
  316. rc = wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token, status,
  317. agg_amsdu, agg_wsize, agg_timeout);
  318. if (rc || (status != WLAN_STATUS_SUCCESS)) {
  319. wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc,
  320. status);
  321. goto out;
  322. }
  323. /* apply */
  324. r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
  325. spin_lock_bh(&sta->tid_rx_lock);
  326. wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
  327. sta->tid_rx[tid] = r;
  328. spin_unlock_bh(&sta->tid_rx_lock);
  329. out:
  330. return rc;
  331. }
  332. /* BACK - Tx side (originator) */
  333. int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
  334. {
  335. u8 agg_wsize = wil_agg_size(wil, wsize);
  336. u16 agg_timeout = 0;
  337. struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
  338. int rc = 0;
  339. if (txdata->addba_in_progress) {
  340. wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
  341. ringid);
  342. goto out;
  343. }
  344. if (txdata->agg_wsize) {
  345. wil_dbg_misc(wil,
  346. "ADDBA for vring[%d] already done for wsize %d\n",
  347. ringid, txdata->agg_wsize);
  348. goto out;
  349. }
  350. txdata->addba_in_progress = true;
  351. rc = wmi_addba(wil, txdata->mid, ringid, agg_wsize, agg_timeout);
  352. if (rc) {
  353. wil_err(wil, "wmi_addba failed, rc (%d)", rc);
  354. txdata->addba_in_progress = false;
  355. }
  356. out:
  357. return rc;
  358. }