agg-rx.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. /*
  2. * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "mt76.h"
  17. #define REORDER_TIMEOUT (HZ / 10)
  18. static void
  19. mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
  20. {
  21. struct sk_buff *skb;
  22. tid->head = ieee80211_sn_inc(tid->head);
  23. skb = tid->reorder_buf[idx];
  24. if (!skb)
  25. return;
  26. tid->reorder_buf[idx] = NULL;
  27. tid->nframes--;
  28. __skb_queue_tail(frames, skb);
  29. }
  30. static void
  31. mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid, struct sk_buff_head *frames,
  32. u16 head)
  33. {
  34. int idx;
  35. while (ieee80211_sn_less(tid->head, head)) {
  36. idx = tid->head % tid->size;
  37. mt76_aggr_release(tid, frames, idx);
  38. }
  39. }
  40. static void
  41. mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
  42. {
  43. int idx = tid->head % tid->size;
  44. while (tid->reorder_buf[idx]) {
  45. mt76_aggr_release(tid, frames, idx);
  46. idx = tid->head % tid->size;
  47. }
  48. }
  49. static void
  50. mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
  51. {
  52. struct mt76_rx_status *status;
  53. struct sk_buff *skb;
  54. int start, idx, nframes;
  55. if (!tid->nframes)
  56. return;
  57. mt76_rx_aggr_release_head(tid, frames);
  58. start = tid->head % tid->size;
  59. nframes = tid->nframes;
  60. for (idx = (tid->head + 1) % tid->size;
  61. idx != start && nframes;
  62. idx = (idx + 1) % tid->size) {
  63. skb = tid->reorder_buf[idx];
  64. if (!skb)
  65. continue;
  66. nframes--;
  67. status = (struct mt76_rx_status *) skb->cb;
  68. if (!time_after(jiffies, status->reorder_time +
  69. REORDER_TIMEOUT))
  70. continue;
  71. mt76_rx_aggr_release_frames(tid, frames, status->seqno);
  72. }
  73. mt76_rx_aggr_release_head(tid, frames);
  74. }
  75. static void
  76. mt76_rx_aggr_reorder_work(struct work_struct *work)
  77. {
  78. struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
  79. reorder_work.work);
  80. struct mt76_dev *dev = tid->dev;
  81. struct sk_buff_head frames;
  82. __skb_queue_head_init(&frames);
  83. local_bh_disable();
  84. spin_lock(&tid->lock);
  85. mt76_rx_aggr_check_release(tid, &frames);
  86. spin_unlock(&tid->lock);
  87. ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT);
  88. mt76_rx_complete(dev, &frames, -1);
  89. local_bh_enable();
  90. }
  91. void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
  92. {
  93. struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
  94. struct mt76_wcid *wcid = status->wcid;
  95. struct ieee80211_sta *sta;
  96. struct mt76_rx_tid *tid;
  97. bool sn_less;
  98. u16 seqno, head, size;
  99. u8 idx;
  100. __skb_queue_tail(frames, skb);
  101. sta = wcid_to_sta(wcid);
  102. if (!sta || !status->aggr)
  103. return;
  104. tid = rcu_dereference(wcid->aggr[status->tid]);
  105. if (!tid)
  106. return;
  107. spin_lock_bh(&tid->lock);
  108. if (tid->stopped)
  109. goto out;
  110. head = tid->head;
  111. seqno = status->seqno;
  112. size = tid->size;
  113. sn_less = ieee80211_sn_less(seqno, head);
  114. if (!tid->started) {
  115. if (sn_less)
  116. goto out;
  117. tid->started = true;
  118. }
  119. if (sn_less) {
  120. __skb_unlink(skb, frames);
  121. dev_kfree_skb(skb);
  122. goto out;
  123. }
  124. if (seqno == head) {
  125. tid->head = ieee80211_sn_inc(head);
  126. if (tid->nframes)
  127. mt76_rx_aggr_release_head(tid, frames);
  128. goto out;
  129. }
  130. __skb_unlink(skb, frames);
  131. /*
  132. * Frame sequence number exceeds buffering window, free up some space
  133. * by releasing previous frames
  134. */
  135. if (!ieee80211_sn_less(seqno, head + size)) {
  136. head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
  137. mt76_rx_aggr_release_frames(tid, frames, head);
  138. }
  139. idx = seqno % size;
  140. /* Discard if the current slot is already in use */
  141. if (tid->reorder_buf[idx]) {
  142. dev_kfree_skb(skb);
  143. goto out;
  144. }
  145. status->reorder_time = jiffies;
  146. tid->reorder_buf[idx] = skb;
  147. tid->nframes++;
  148. mt76_rx_aggr_release_head(tid, frames);
  149. ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT);
  150. out:
  151. spin_unlock_bh(&tid->lock);
  152. }
  153. int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
  154. u16 ssn, u8 size)
  155. {
  156. struct mt76_rx_tid *tid;
  157. mt76_rx_aggr_stop(dev, wcid, tidno);
  158. tid = kzalloc(sizeof(*tid) + size * sizeof(tid->reorder_buf[0]),
  159. GFP_KERNEL);
  160. if (!tid)
  161. return -ENOMEM;
  162. tid->dev = dev;
  163. tid->head = ssn;
  164. tid->size = size;
  165. INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
  166. spin_lock_init(&tid->lock);
  167. rcu_assign_pointer(wcid->aggr[tidno], tid);
  168. return 0;
  169. }
  170. EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
  171. static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
  172. {
  173. u8 size = tid->size;
  174. int i;
  175. spin_lock_bh(&tid->lock);
  176. tid->stopped = true;
  177. for (i = 0; tid->nframes && i < size; i++) {
  178. struct sk_buff *skb = tid->reorder_buf[i];
  179. if (!skb)
  180. continue;
  181. tid->nframes--;
  182. dev_kfree_skb(skb);
  183. }
  184. spin_unlock_bh(&tid->lock);
  185. cancel_delayed_work_sync(&tid->reorder_work);
  186. }
  187. void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
  188. {
  189. struct mt76_rx_tid *tid;
  190. rcu_read_lock();
  191. tid = rcu_dereference(wcid->aggr[tidno]);
  192. if (tid) {
  193. rcu_assign_pointer(wcid->aggr[tidno], NULL);
  194. mt76_rx_aggr_shutdown(dev, tid);
  195. kfree_rcu(tid, rcu_head);
  196. }
  197. rcu_read_unlock();
  198. }
  199. EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);