rx-offload.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. /*
  2. * Copyright (c) 2014 David Jander, Protonic Holland
  3. * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the version 2 of the GNU General Public License
  7. * as published by the Free Software Foundation
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/can/dev.h>
  18. #include <linux/can/rx-offload.h>
  19. struct can_rx_offload_cb {
  20. u32 timestamp;
  21. };
  22. static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
  23. {
  24. BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
  25. return (struct can_rx_offload_cb *)skb->cb;
  26. }
  27. static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
  28. {
  29. if (offload->inc)
  30. return a <= b;
  31. else
  32. return a >= b;
  33. }
  34. static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
  35. {
  36. if (offload->inc)
  37. return (*val)++;
  38. else
  39. return (*val)--;
  40. }
  41. static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
  42. {
  43. struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
  44. struct net_device *dev = offload->dev;
  45. struct net_device_stats *stats = &dev->stats;
  46. struct sk_buff *skb;
  47. int work_done = 0;
  48. while ((work_done < quota) &&
  49. (skb = skb_dequeue(&offload->skb_queue))) {
  50. struct can_frame *cf = (struct can_frame *)skb->data;
  51. work_done++;
  52. stats->rx_packets++;
  53. stats->rx_bytes += cf->can_dlc;
  54. netif_receive_skb(skb);
  55. }
  56. if (work_done < quota) {
  57. napi_complete_done(napi, work_done);
  58. /* Check if there was another interrupt */
  59. if (!skb_queue_empty(&offload->skb_queue))
  60. napi_reschedule(&offload->napi);
  61. }
  62. can_led_event(offload->dev, CAN_LED_EVENT_RX);
  63. return work_done;
  64. }
  65. static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
  66. int (*compare)(struct sk_buff *a, struct sk_buff *b))
  67. {
  68. struct sk_buff *pos, *insert = (struct sk_buff *)head;
  69. skb_queue_reverse_walk(head, pos) {
  70. const struct can_rx_offload_cb *cb_pos, *cb_new;
  71. cb_pos = can_rx_offload_get_cb(pos);
  72. cb_new = can_rx_offload_get_cb(new);
  73. netdev_dbg(new->dev,
  74. "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
  75. __func__,
  76. cb_pos->timestamp, cb_new->timestamp,
  77. cb_new->timestamp - cb_pos->timestamp,
  78. skb_queue_len(head));
  79. if (compare(pos, new) < 0)
  80. continue;
  81. insert = pos;
  82. break;
  83. }
  84. __skb_queue_after(head, insert, new);
  85. }
  86. static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
  87. {
  88. const struct can_rx_offload_cb *cb_a, *cb_b;
  89. cb_a = can_rx_offload_get_cb(a);
  90. cb_b = can_rx_offload_get_cb(b);
  91. /* Substract two u32 and return result as int, to keep
  92. * difference steady around the u32 overflow.
  93. */
  94. return cb_b->timestamp - cb_a->timestamp;
  95. }
  96. static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
  97. {
  98. struct sk_buff *skb = NULL;
  99. struct can_rx_offload_cb *cb;
  100. struct can_frame *cf;
  101. int ret;
  102. /* If queue is full or skb not available, read to discard mailbox */
  103. if (likely(skb_queue_len(&offload->skb_queue) <=
  104. offload->skb_queue_len_max))
  105. skb = alloc_can_skb(offload->dev, &cf);
  106. if (!skb) {
  107. struct can_frame cf_overflow;
  108. u32 timestamp;
  109. ret = offload->mailbox_read(offload, &cf_overflow,
  110. &timestamp, n);
  111. if (ret)
  112. offload->dev->stats.rx_dropped++;
  113. return NULL;
  114. }
  115. cb = can_rx_offload_get_cb(skb);
  116. ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
  117. if (!ret) {
  118. kfree_skb(skb);
  119. return NULL;
  120. }
  121. return skb;
  122. }
  123. int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
  124. {
  125. struct sk_buff_head skb_queue;
  126. unsigned int i;
  127. __skb_queue_head_init(&skb_queue);
  128. for (i = offload->mb_first;
  129. can_rx_offload_le(offload, i, offload->mb_last);
  130. can_rx_offload_inc(offload, &i)) {
  131. struct sk_buff *skb;
  132. if (!(pending & BIT_ULL(i)))
  133. continue;
  134. skb = can_rx_offload_offload_one(offload, i);
  135. if (!skb)
  136. break;
  137. __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
  138. }
  139. if (!skb_queue_empty(&skb_queue)) {
  140. unsigned long flags;
  141. u32 queue_len;
  142. spin_lock_irqsave(&offload->skb_queue.lock, flags);
  143. skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
  144. spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
  145. if ((queue_len = skb_queue_len(&offload->skb_queue)) >
  146. (offload->skb_queue_len_max / 8))
  147. netdev_dbg(offload->dev, "%s: queue_len=%d\n",
  148. __func__, queue_len);
  149. can_rx_offload_schedule(offload);
  150. }
  151. return skb_queue_len(&skb_queue);
  152. }
  153. EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
  154. int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
  155. {
  156. struct sk_buff *skb;
  157. int received = 0;
  158. while ((skb = can_rx_offload_offload_one(offload, 0))) {
  159. skb_queue_tail(&offload->skb_queue, skb);
  160. received++;
  161. }
  162. if (received)
  163. can_rx_offload_schedule(offload);
  164. return received;
  165. }
  166. EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
  167. int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb)
  168. {
  169. if (skb_queue_len(&offload->skb_queue) >
  170. offload->skb_queue_len_max)
  171. return -ENOMEM;
  172. skb_queue_tail(&offload->skb_queue, skb);
  173. can_rx_offload_schedule(offload);
  174. return 0;
  175. }
  176. EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb);
  177. static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
  178. {
  179. offload->dev = dev;
  180. /* Limit queue len to 4x the weight (rounted to next power of two) */
  181. offload->skb_queue_len_max = 2 << fls(weight);
  182. offload->skb_queue_len_max *= 4;
  183. skb_queue_head_init(&offload->skb_queue);
  184. can_rx_offload_reset(offload);
  185. netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
  186. dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
  187. __func__, offload->skb_queue_len_max);
  188. return 0;
  189. }
  190. int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
  191. {
  192. unsigned int weight;
  193. if (offload->mb_first > BITS_PER_LONG_LONG ||
  194. offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
  195. return -EINVAL;
  196. if (offload->mb_first < offload->mb_last) {
  197. offload->inc = true;
  198. weight = offload->mb_last - offload->mb_first;
  199. } else {
  200. offload->inc = false;
  201. weight = offload->mb_first - offload->mb_last;
  202. }
  203. return can_rx_offload_init_queue(dev, offload, weight);;
  204. }
  205. EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
  206. int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
  207. {
  208. if (!offload->mailbox_read)
  209. return -EINVAL;
  210. return can_rx_offload_init_queue(dev, offload, weight);
  211. }
  212. EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
  213. void can_rx_offload_enable(struct can_rx_offload *offload)
  214. {
  215. can_rx_offload_reset(offload);
  216. napi_enable(&offload->napi);
  217. }
  218. EXPORT_SYMBOL_GPL(can_rx_offload_enable);
  219. void can_rx_offload_del(struct can_rx_offload *offload)
  220. {
  221. netif_napi_del(&offload->napi);
  222. skb_queue_purge(&offload->skb_queue);
  223. }
  224. EXPORT_SYMBOL_GPL(can_rx_offload_del);
  225. void can_rx_offload_reset(struct can_rx_offload *offload)
  226. {
  227. }
  228. EXPORT_SYMBOL_GPL(can_rx_offload_reset);