agg-rx.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. /*
  2. * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "mt76.h"
  17. #define REORDER_TIMEOUT (HZ / 10)
  18. static void
  19. mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
  20. {
  21. struct sk_buff *skb;
  22. tid->head = ieee80211_sn_inc(tid->head);
  23. skb = tid->reorder_buf[idx];
  24. if (!skb)
  25. return;
  26. tid->reorder_buf[idx] = NULL;
  27. tid->nframes--;
  28. __skb_queue_tail(frames, skb);
  29. }
  30. static void
  31. mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid, struct sk_buff_head *frames,
  32. u16 head)
  33. {
  34. int idx;
  35. while (ieee80211_sn_less(tid->head, head)) {
  36. idx = tid->head % tid->size;
  37. mt76_aggr_release(tid, frames, idx);
  38. }
  39. }
  40. static void
  41. mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
  42. {
  43. int idx = tid->head % tid->size;
  44. while (tid->reorder_buf[idx]) {
  45. mt76_aggr_release(tid, frames, idx);
  46. idx = tid->head % tid->size;
  47. }
  48. }
  49. static void
  50. mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
  51. {
  52. struct mt76_rx_status *status;
  53. struct sk_buff *skb;
  54. int start, idx, nframes;
  55. if (!tid->nframes)
  56. return;
  57. mt76_rx_aggr_release_head(tid, frames);
  58. start = tid->head % tid->size;
  59. nframes = tid->nframes;
  60. for (idx = (tid->head + 1) % tid->size;
  61. idx != start && nframes;
  62. idx = (idx + 1) % tid->size) {
  63. skb = tid->reorder_buf[idx];
  64. if (!skb)
  65. continue;
  66. nframes--;
  67. status = (struct mt76_rx_status *) skb->cb;
  68. if (!time_after(jiffies, status->reorder_time +
  69. REORDER_TIMEOUT))
  70. continue;
  71. mt76_rx_aggr_release_frames(tid, frames, status->seqno);
  72. }
  73. mt76_rx_aggr_release_head(tid, frames);
  74. }
  75. static void
  76. mt76_rx_aggr_reorder_work(struct work_struct *work)
  77. {
  78. struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
  79. reorder_work.work);
  80. struct mt76_dev *dev = tid->dev;
  81. struct sk_buff_head frames;
  82. int nframes;
  83. __skb_queue_head_init(&frames);
  84. local_bh_disable();
  85. spin_lock(&tid->lock);
  86. mt76_rx_aggr_check_release(tid, &frames);
  87. nframes = tid->nframes;
  88. spin_unlock(&tid->lock);
  89. if (nframes)
  90. ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
  91. REORDER_TIMEOUT);
  92. mt76_rx_complete(dev, &frames, -1);
  93. local_bh_enable();
  94. }
  95. static void
  96. mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
  97. {
  98. struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
  99. struct ieee80211_bar *bar = (struct ieee80211_bar *) skb->data;
  100. struct mt76_wcid *wcid = status->wcid;
  101. struct mt76_rx_tid *tid;
  102. u16 seqno;
  103. if (!ieee80211_is_ctl(bar->frame_control))
  104. return;
  105. if (!ieee80211_is_back_req(bar->frame_control))
  106. return;
  107. status->tid = le16_to_cpu(bar->control) >> 12;
  108. seqno = le16_to_cpu(bar->start_seq_num) >> 4;
  109. tid = rcu_dereference(wcid->aggr[status->tid]);
  110. if (!tid)
  111. return;
  112. spin_lock_bh(&tid->lock);
  113. mt76_rx_aggr_release_frames(tid, frames, seqno);
  114. mt76_rx_aggr_release_head(tid, frames);
  115. spin_unlock_bh(&tid->lock);
  116. }
  117. void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
  118. {
  119. struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
  120. struct mt76_wcid *wcid = status->wcid;
  121. struct ieee80211_sta *sta;
  122. struct mt76_rx_tid *tid;
  123. bool sn_less;
  124. u16 seqno, head, size;
  125. u8 idx;
  126. __skb_queue_tail(frames, skb);
  127. sta = wcid_to_sta(wcid);
  128. if (!sta)
  129. return;
  130. if (!status->aggr) {
  131. mt76_rx_aggr_check_ctl(skb, frames);
  132. return;
  133. }
  134. tid = rcu_dereference(wcid->aggr[status->tid]);
  135. if (!tid)
  136. return;
  137. spin_lock_bh(&tid->lock);
  138. if (tid->stopped)
  139. goto out;
  140. head = tid->head;
  141. seqno = status->seqno;
  142. size = tid->size;
  143. sn_less = ieee80211_sn_less(seqno, head);
  144. if (!tid->started) {
  145. if (sn_less)
  146. goto out;
  147. tid->started = true;
  148. }
  149. if (sn_less) {
  150. __skb_unlink(skb, frames);
  151. dev_kfree_skb(skb);
  152. goto out;
  153. }
  154. if (seqno == head) {
  155. tid->head = ieee80211_sn_inc(head);
  156. if (tid->nframes)
  157. mt76_rx_aggr_release_head(tid, frames);
  158. goto out;
  159. }
  160. __skb_unlink(skb, frames);
  161. /*
  162. * Frame sequence number exceeds buffering window, free up some space
  163. * by releasing previous frames
  164. */
  165. if (!ieee80211_sn_less(seqno, head + size)) {
  166. head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
  167. mt76_rx_aggr_release_frames(tid, frames, head);
  168. }
  169. idx = seqno % size;
  170. /* Discard if the current slot is already in use */
  171. if (tid->reorder_buf[idx]) {
  172. dev_kfree_skb(skb);
  173. goto out;
  174. }
  175. status->reorder_time = jiffies;
  176. tid->reorder_buf[idx] = skb;
  177. tid->nframes++;
  178. mt76_rx_aggr_release_head(tid, frames);
  179. ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT);
  180. out:
  181. spin_unlock_bh(&tid->lock);
  182. }
  183. int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
  184. u16 ssn, u8 size)
  185. {
  186. struct mt76_rx_tid *tid;
  187. mt76_rx_aggr_stop(dev, wcid, tidno);
  188. tid = kzalloc(sizeof(*tid) + size * sizeof(tid->reorder_buf[0]),
  189. GFP_KERNEL);
  190. if (!tid)
  191. return -ENOMEM;
  192. tid->dev = dev;
  193. tid->head = ssn;
  194. tid->size = size;
  195. INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
  196. spin_lock_init(&tid->lock);
  197. rcu_assign_pointer(wcid->aggr[tidno], tid);
  198. return 0;
  199. }
  200. EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
  201. static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
  202. {
  203. u8 size = tid->size;
  204. int i;
  205. spin_lock_bh(&tid->lock);
  206. tid->stopped = true;
  207. for (i = 0; tid->nframes && i < size; i++) {
  208. struct sk_buff *skb = tid->reorder_buf[i];
  209. if (!skb)
  210. continue;
  211. tid->nframes--;
  212. dev_kfree_skb(skb);
  213. }
  214. spin_unlock_bh(&tid->lock);
  215. cancel_delayed_work_sync(&tid->reorder_work);
  216. }
  217. void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
  218. {
  219. struct mt76_rx_tid *tid;
  220. rcu_read_lock();
  221. tid = rcu_dereference(wcid->aggr[tidno]);
  222. if (tid) {
  223. rcu_assign_pointer(wcid->aggr[tidno], NULL);
  224. mt76_rx_aggr_shutdown(dev, tid);
  225. kfree_rcu(tid, rcu_head);
  226. }
  227. rcu_read_unlock();
  228. }
  229. EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);