rmnet_vnd.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. /* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. *
  13. * RMNET Data virtual network driver
  14. *
  15. */
  16. #include <linux/etherdevice.h>
  17. #include <linux/if_arp.h>
  18. #include <net/pkt_sched.h>
  19. #include "rmnet_config.h"
  20. #include "rmnet_handlers.h"
  21. #include "rmnet_private.h"
  22. #include "rmnet_map.h"
  23. #include "rmnet_vnd.h"
  24. /* RX/TX Fixup */
  25. void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
  26. {
  27. dev->stats.rx_packets++;
  28. dev->stats.rx_bytes += skb->len;
  29. }
  30. void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
  31. {
  32. dev->stats.tx_packets++;
  33. dev->stats.tx_bytes += skb->len;
  34. }
  35. /* Network Device Operations */
  36. static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
  37. struct net_device *dev)
  38. {
  39. struct rmnet_priv *priv;
  40. priv = netdev_priv(dev);
  41. if (priv->local_ep.egress_dev) {
  42. rmnet_egress_handler(skb, &priv->local_ep);
  43. } else {
  44. dev->stats.tx_dropped++;
  45. kfree_skb(skb);
  46. }
  47. return NETDEV_TX_OK;
  48. }
  49. static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
  50. {
  51. if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
  52. return -EINVAL;
  53. rmnet_dev->mtu = new_mtu;
  54. return 0;
  55. }
  56. static int rmnet_vnd_get_iflink(const struct net_device *dev)
  57. {
  58. struct rmnet_priv *priv = netdev_priv(dev);
  59. return priv->real_dev->ifindex;
  60. }
  61. static const struct net_device_ops rmnet_vnd_ops = {
  62. .ndo_start_xmit = rmnet_vnd_start_xmit,
  63. .ndo_change_mtu = rmnet_vnd_change_mtu,
  64. .ndo_get_iflink = rmnet_vnd_get_iflink,
  65. };
  66. /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
  67. * flags, ARP type, needed headroom, etc...
  68. */
  69. void rmnet_vnd_setup(struct net_device *rmnet_dev)
  70. {
  71. rmnet_dev->netdev_ops = &rmnet_vnd_ops;
  72. rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
  73. rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
  74. random_ether_addr(rmnet_dev->dev_addr);
  75. rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
  76. /* Raw IP mode */
  77. rmnet_dev->header_ops = NULL; /* No header */
  78. rmnet_dev->type = ARPHRD_RAWIP;
  79. rmnet_dev->hard_header_len = 0;
  80. rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
  81. rmnet_dev->needs_free_netdev = true;
  82. }
  83. /* Exposed API */
  84. int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
  85. struct rmnet_port *port,
  86. struct net_device *real_dev)
  87. {
  88. struct rmnet_priv *priv;
  89. int rc;
  90. if (port->rmnet_devices[id])
  91. return -EINVAL;
  92. rc = register_netdevice(rmnet_dev);
  93. if (!rc) {
  94. port->rmnet_devices[id] = rmnet_dev;
  95. port->nr_rmnet_devs++;
  96. rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
  97. priv = netdev_priv(rmnet_dev);
  98. priv->mux_id = id;
  99. priv->real_dev = real_dev;
  100. netdev_dbg(rmnet_dev, "rmnet dev created\n");
  101. }
  102. return rc;
  103. }
  104. int rmnet_vnd_dellink(u8 id, struct rmnet_port *port)
  105. {
  106. if (id >= RMNET_MAX_LOGICAL_EP || !port->rmnet_devices[id])
  107. return -EINVAL;
  108. port->rmnet_devices[id] = NULL;
  109. port->nr_rmnet_devs--;
  110. return 0;
  111. }
  112. u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
  113. {
  114. struct rmnet_priv *priv;
  115. priv = netdev_priv(rmnet_dev);
  116. return priv->mux_id;
  117. }
  118. /* Gets the logical endpoint configuration for a RmNet virtual network device
  119. * node. Caller should confirm that devices is a RmNet VND before calling.
  120. */
  121. struct rmnet_endpoint *rmnet_vnd_get_endpoint(struct net_device *rmnet_dev)
  122. {
  123. struct rmnet_priv *priv;
  124. if (!rmnet_dev)
  125. return NULL;
  126. priv = netdev_priv(rmnet_dev);
  127. return &priv->local_ep;
  128. }
  129. int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
  130. {
  131. netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
  132. /* Although we expect similar number of enable/disable
  133. * commands, optimize for the disable. That is more
  134. * latency sensitive than enable
  135. */
  136. if (unlikely(enable))
  137. netif_wake_queue(rmnet_dev);
  138. else
  139. netif_stop_queue(rmnet_dev);
  140. return 0;
  141. }