rmnet_vnd.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. *
  13. * RMNET Data virtual network driver
  14. *
  15. */
  16. #include <linux/etherdevice.h>
  17. #include <linux/if_arp.h>
  18. #include <net/pkt_sched.h>
  19. #include "rmnet_config.h"
  20. #include "rmnet_handlers.h"
  21. #include "rmnet_private.h"
  22. #include "rmnet_map.h"
  23. #include "rmnet_vnd.h"
  24. /* RX/TX Fixup */
  25. void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
  26. {
  27. struct rmnet_priv *priv = netdev_priv(dev);
  28. struct rmnet_pcpu_stats *pcpu_ptr;
  29. pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
  30. u64_stats_update_begin(&pcpu_ptr->syncp);
  31. pcpu_ptr->stats.rx_pkts++;
  32. pcpu_ptr->stats.rx_bytes += skb->len;
  33. u64_stats_update_end(&pcpu_ptr->syncp);
  34. }
  35. void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
  36. {
  37. struct rmnet_priv *priv = netdev_priv(dev);
  38. struct rmnet_pcpu_stats *pcpu_ptr;
  39. pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
  40. u64_stats_update_begin(&pcpu_ptr->syncp);
  41. pcpu_ptr->stats.tx_pkts++;
  42. pcpu_ptr->stats.tx_bytes += skb->len;
  43. u64_stats_update_end(&pcpu_ptr->syncp);
  44. }
  45. /* Network Device Operations */
  46. static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
  47. struct net_device *dev)
  48. {
  49. struct rmnet_priv *priv;
  50. priv = netdev_priv(dev);
  51. if (priv->real_dev) {
  52. rmnet_egress_handler(skb);
  53. } else {
  54. this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
  55. kfree_skb(skb);
  56. }
  57. return NETDEV_TX_OK;
  58. }
  59. static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
  60. {
  61. if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
  62. return -EINVAL;
  63. rmnet_dev->mtu = new_mtu;
  64. return 0;
  65. }
  66. static int rmnet_vnd_get_iflink(const struct net_device *dev)
  67. {
  68. struct rmnet_priv *priv = netdev_priv(dev);
  69. return priv->real_dev->ifindex;
  70. }
  71. static int rmnet_vnd_init(struct net_device *dev)
  72. {
  73. struct rmnet_priv *priv = netdev_priv(dev);
  74. int err;
  75. priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
  76. if (!priv->pcpu_stats)
  77. return -ENOMEM;
  78. err = gro_cells_init(&priv->gro_cells, dev);
  79. if (err) {
  80. free_percpu(priv->pcpu_stats);
  81. return err;
  82. }
  83. return 0;
  84. }
  85. static void rmnet_vnd_uninit(struct net_device *dev)
  86. {
  87. struct rmnet_priv *priv = netdev_priv(dev);
  88. gro_cells_destroy(&priv->gro_cells);
  89. free_percpu(priv->pcpu_stats);
  90. }
  91. static void rmnet_get_stats64(struct net_device *dev,
  92. struct rtnl_link_stats64 *s)
  93. {
  94. struct rmnet_priv *priv = netdev_priv(dev);
  95. struct rmnet_vnd_stats total_stats;
  96. struct rmnet_pcpu_stats *pcpu_ptr;
  97. unsigned int cpu, start;
  98. memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
  99. for_each_possible_cpu(cpu) {
  100. pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
  101. do {
  102. start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
  103. total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
  104. total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
  105. total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
  106. total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
  107. } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
  108. total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
  109. }
  110. s->rx_packets = total_stats.rx_pkts;
  111. s->rx_bytes = total_stats.rx_bytes;
  112. s->tx_packets = total_stats.tx_pkts;
  113. s->tx_bytes = total_stats.tx_bytes;
  114. s->tx_dropped = total_stats.tx_drops;
  115. }
  116. static const struct net_device_ops rmnet_vnd_ops = {
  117. .ndo_start_xmit = rmnet_vnd_start_xmit,
  118. .ndo_change_mtu = rmnet_vnd_change_mtu,
  119. .ndo_get_iflink = rmnet_vnd_get_iflink,
  120. .ndo_add_slave = rmnet_add_bridge,
  121. .ndo_del_slave = rmnet_del_bridge,
  122. .ndo_init = rmnet_vnd_init,
  123. .ndo_uninit = rmnet_vnd_uninit,
  124. .ndo_get_stats64 = rmnet_get_stats64,
  125. };
  126. /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
  127. * flags, ARP type, needed headroom, etc...
  128. */
  129. void rmnet_vnd_setup(struct net_device *rmnet_dev)
  130. {
  131. rmnet_dev->netdev_ops = &rmnet_vnd_ops;
  132. rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
  133. rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
  134. random_ether_addr(rmnet_dev->dev_addr);
  135. rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
  136. /* Raw IP mode */
  137. rmnet_dev->header_ops = NULL; /* No header */
  138. rmnet_dev->type = ARPHRD_RAWIP;
  139. rmnet_dev->hard_header_len = 0;
  140. rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
  141. rmnet_dev->needs_free_netdev = true;
  142. }
  143. /* Exposed API */
  144. int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
  145. struct rmnet_port *port,
  146. struct net_device *real_dev,
  147. struct rmnet_endpoint *ep)
  148. {
  149. struct rmnet_priv *priv;
  150. int rc;
  151. if (ep->egress_dev)
  152. return -EINVAL;
  153. if (rmnet_get_endpoint(port, id))
  154. return -EBUSY;
  155. rmnet_dev->hw_features = NETIF_F_RXCSUM;
  156. rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  157. rmnet_dev->hw_features |= NETIF_F_SG;
  158. rc = register_netdevice(rmnet_dev);
  159. if (!rc) {
  160. ep->egress_dev = rmnet_dev;
  161. ep->mux_id = id;
  162. port->nr_rmnet_devs++;
  163. rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
  164. priv = netdev_priv(rmnet_dev);
  165. priv->mux_id = id;
  166. priv->real_dev = real_dev;
  167. netdev_dbg(rmnet_dev, "rmnet dev created\n");
  168. }
  169. return rc;
  170. }
  171. int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
  172. struct rmnet_endpoint *ep)
  173. {
  174. if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
  175. return -EINVAL;
  176. ep->egress_dev = NULL;
  177. port->nr_rmnet_devs--;
  178. return 0;
  179. }
  180. u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
  181. {
  182. struct rmnet_priv *priv;
  183. priv = netdev_priv(rmnet_dev);
  184. return priv->mux_id;
  185. }
  186. int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
  187. {
  188. netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
  189. /* Although we expect similar number of enable/disable
  190. * commands, optimize for the disable. That is more
  191. * latency sensitive than enable
  192. */
  193. if (unlikely(enable))
  194. netif_wake_queue(rmnet_dev);
  195. else
  196. netif_stop_queue(rmnet_dev);
  197. return 0;
  198. }