fq_impl.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. /*
  2. * Copyright (c) 2016 Qualcomm Atheros, Inc
  3. *
  4. * GPL v2
  5. *
  6. * Based on net/sched/sch_fq_codel.c
  7. */
  8. #ifndef __NET_SCHED_FQ_IMPL_H
  9. #define __NET_SCHED_FQ_IMPL_H
  10. #include <net/fq.h>
  11. /* functions that are embedded into includer */
  12. static struct sk_buff *fq_flow_dequeue(struct fq *fq,
  13. struct fq_flow *flow)
  14. {
  15. struct fq_tin *tin = flow->tin;
  16. struct fq_flow *i;
  17. struct sk_buff *skb;
  18. lockdep_assert_held(&fq->lock);
  19. skb = __skb_dequeue(&flow->queue);
  20. if (!skb)
  21. return NULL;
  22. tin->backlog_bytes -= skb->len;
  23. tin->backlog_packets--;
  24. flow->backlog -= skb->len;
  25. fq->backlog--;
  26. if (flow->backlog == 0) {
  27. list_del_init(&flow->backlogchain);
  28. } else {
  29. i = flow;
  30. list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
  31. if (i->backlog < flow->backlog)
  32. break;
  33. list_move_tail(&flow->backlogchain,
  34. &i->backlogchain);
  35. }
  36. return skb;
  37. }
  38. static struct sk_buff *fq_tin_dequeue(struct fq *fq,
  39. struct fq_tin *tin,
  40. fq_tin_dequeue_t dequeue_func)
  41. {
  42. struct fq_flow *flow;
  43. struct list_head *head;
  44. struct sk_buff *skb;
  45. lockdep_assert_held(&fq->lock);
  46. begin:
  47. head = &tin->new_flows;
  48. if (list_empty(head)) {
  49. head = &tin->old_flows;
  50. if (list_empty(head))
  51. return NULL;
  52. }
  53. flow = list_first_entry(head, struct fq_flow, flowchain);
  54. if (flow->deficit <= 0) {
  55. flow->deficit += fq->quantum;
  56. list_move_tail(&flow->flowchain,
  57. &tin->old_flows);
  58. goto begin;
  59. }
  60. skb = dequeue_func(fq, tin, flow);
  61. if (!skb) {
  62. /* force a pass through old_flows to prevent starvation */
  63. if ((head == &tin->new_flows) &&
  64. !list_empty(&tin->old_flows)) {
  65. list_move_tail(&flow->flowchain, &tin->old_flows);
  66. } else {
  67. list_del_init(&flow->flowchain);
  68. flow->tin = NULL;
  69. }
  70. goto begin;
  71. }
  72. flow->deficit -= skb->len;
  73. tin->tx_bytes += skb->len;
  74. tin->tx_packets++;
  75. return skb;
  76. }
  77. static struct fq_flow *fq_flow_classify(struct fq *fq,
  78. struct fq_tin *tin,
  79. struct sk_buff *skb,
  80. fq_flow_get_default_t get_default_func)
  81. {
  82. struct fq_flow *flow;
  83. u32 hash;
  84. u32 idx;
  85. lockdep_assert_held(&fq->lock);
  86. hash = skb_get_hash_perturb(skb, fq->perturbation);
  87. idx = reciprocal_scale(hash, fq->flows_cnt);
  88. flow = &fq->flows[idx];
  89. if (flow->tin && flow->tin != tin) {
  90. flow = get_default_func(fq, tin, idx, skb);
  91. tin->collisions++;
  92. fq->collisions++;
  93. }
  94. if (!flow->tin)
  95. tin->flows++;
  96. return flow;
  97. }
  98. static void fq_recalc_backlog(struct fq *fq,
  99. struct fq_tin *tin,
  100. struct fq_flow *flow)
  101. {
  102. struct fq_flow *i;
  103. if (list_empty(&flow->backlogchain))
  104. list_add_tail(&flow->backlogchain, &fq->backlogs);
  105. i = flow;
  106. list_for_each_entry_continue_reverse(i, &fq->backlogs,
  107. backlogchain)
  108. if (i->backlog > flow->backlog)
  109. break;
  110. list_move(&flow->backlogchain, &i->backlogchain);
  111. }
  112. static void fq_tin_enqueue(struct fq *fq,
  113. struct fq_tin *tin,
  114. struct sk_buff *skb,
  115. fq_skb_free_t free_func,
  116. fq_flow_get_default_t get_default_func)
  117. {
  118. struct fq_flow *flow;
  119. lockdep_assert_held(&fq->lock);
  120. flow = fq_flow_classify(fq, tin, skb, get_default_func);
  121. flow->tin = tin;
  122. flow->backlog += skb->len;
  123. tin->backlog_bytes += skb->len;
  124. tin->backlog_packets++;
  125. fq->backlog++;
  126. fq_recalc_backlog(fq, tin, flow);
  127. if (list_empty(&flow->flowchain)) {
  128. flow->deficit = fq->quantum;
  129. list_add_tail(&flow->flowchain,
  130. &tin->new_flows);
  131. }
  132. __skb_queue_tail(&flow->queue, skb);
  133. if (fq->backlog > fq->limit) {
  134. flow = list_first_entry_or_null(&fq->backlogs,
  135. struct fq_flow,
  136. backlogchain);
  137. if (!flow)
  138. return;
  139. skb = fq_flow_dequeue(fq, flow);
  140. if (!skb)
  141. return;
  142. free_func(fq, flow->tin, flow, skb);
  143. flow->tin->overlimit++;
  144. fq->overlimit++;
  145. }
  146. }
  147. static void fq_flow_reset(struct fq *fq,
  148. struct fq_flow *flow,
  149. fq_skb_free_t free_func)
  150. {
  151. struct sk_buff *skb;
  152. while ((skb = fq_flow_dequeue(fq, flow)))
  153. free_func(fq, flow->tin, flow, skb);
  154. if (!list_empty(&flow->flowchain))
  155. list_del_init(&flow->flowchain);
  156. if (!list_empty(&flow->backlogchain))
  157. list_del_init(&flow->backlogchain);
  158. flow->tin = NULL;
  159. WARN_ON_ONCE(flow->backlog);
  160. }
  161. static void fq_tin_reset(struct fq *fq,
  162. struct fq_tin *tin,
  163. fq_skb_free_t free_func)
  164. {
  165. struct list_head *head;
  166. struct fq_flow *flow;
  167. for (;;) {
  168. head = &tin->new_flows;
  169. if (list_empty(head)) {
  170. head = &tin->old_flows;
  171. if (list_empty(head))
  172. break;
  173. }
  174. flow = list_first_entry(head, struct fq_flow, flowchain);
  175. fq_flow_reset(fq, flow, free_func);
  176. }
  177. WARN_ON_ONCE(tin->backlog_bytes);
  178. WARN_ON_ONCE(tin->backlog_packets);
  179. }
  180. static void fq_flow_init(struct fq_flow *flow)
  181. {
  182. INIT_LIST_HEAD(&flow->flowchain);
  183. INIT_LIST_HEAD(&flow->backlogchain);
  184. __skb_queue_head_init(&flow->queue);
  185. }
  186. static void fq_tin_init(struct fq_tin *tin)
  187. {
  188. INIT_LIST_HEAD(&tin->new_flows);
  189. INIT_LIST_HEAD(&tin->old_flows);
  190. }
  191. static int fq_init(struct fq *fq, int flows_cnt)
  192. {
  193. int i;
  194. memset(fq, 0, sizeof(fq[0]));
  195. INIT_LIST_HEAD(&fq->backlogs);
  196. spin_lock_init(&fq->lock);
  197. fq->flows_cnt = max_t(u32, flows_cnt, 1);
  198. fq->perturbation = prandom_u32();
  199. fq->quantum = 300;
  200. fq->limit = 8192;
  201. fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
  202. if (!fq->flows)
  203. return -ENOMEM;
  204. for (i = 0; i < fq->flows_cnt; i++)
  205. fq_flow_init(&fq->flows[i]);
  206. return 0;
  207. }
  208. static void fq_reset(struct fq *fq,
  209. fq_skb_free_t free_func)
  210. {
  211. int i;
  212. for (i = 0; i < fq->flows_cnt; i++)
  213. fq_flow_reset(fq, &fq->flows[i], free_func);
  214. kfree(fq->flows);
  215. fq->flows = NULL;
  216. }
  217. #endif