rmnet_handlers.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. /* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * RMNET Data ingress/egress handler
  13. *
  14. */
  15. #include <linux/netdevice.h>
  16. #include <linux/netdev_features.h>
  17. #include "rmnet_private.h"
  18. #include "rmnet_config.h"
  19. #include "rmnet_vnd.h"
  20. #include "rmnet_map.h"
  21. #include "rmnet_handlers.h"
  22. #define RMNET_IP_VERSION_4 0x40
  23. #define RMNET_IP_VERSION_6 0x60
  24. /* Helper Functions */
  25. static void rmnet_set_skb_proto(struct sk_buff *skb)
  26. {
  27. switch (skb->data[0] & 0xF0) {
  28. case RMNET_IP_VERSION_4:
  29. skb->protocol = htons(ETH_P_IP);
  30. break;
  31. case RMNET_IP_VERSION_6:
  32. skb->protocol = htons(ETH_P_IPV6);
  33. break;
  34. default:
  35. skb->protocol = htons(ETH_P_MAP);
  36. break;
  37. }
  38. }
  39. /* Generic handler */
  40. static rx_handler_result_t
  41. rmnet_bridge_handler(struct sk_buff *skb, struct rmnet_endpoint *ep)
  42. {
  43. if (!ep->egress_dev)
  44. kfree_skb(skb);
  45. else
  46. rmnet_egress_handler(skb, ep);
  47. return RX_HANDLER_CONSUMED;
  48. }
  49. static rx_handler_result_t
  50. rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_endpoint *ep)
  51. {
  52. switch (ep->rmnet_mode) {
  53. case RMNET_EPMODE_NONE:
  54. return RX_HANDLER_PASS;
  55. case RMNET_EPMODE_BRIDGE:
  56. return rmnet_bridge_handler(skb, ep);
  57. case RMNET_EPMODE_VND:
  58. skb_reset_transport_header(skb);
  59. skb_reset_network_header(skb);
  60. rmnet_vnd_rx_fixup(skb, skb->dev);
  61. skb->pkt_type = PACKET_HOST;
  62. skb_set_mac_header(skb, 0);
  63. netif_receive_skb(skb);
  64. return RX_HANDLER_CONSUMED;
  65. default:
  66. kfree_skb(skb);
  67. return RX_HANDLER_CONSUMED;
  68. }
  69. }
  70. static rx_handler_result_t
  71. rmnet_ingress_deliver_packet(struct sk_buff *skb,
  72. struct rmnet_port *port)
  73. {
  74. if (!port) {
  75. kfree_skb(skb);
  76. return RX_HANDLER_CONSUMED;
  77. }
  78. skb->dev = port->local_ep.egress_dev;
  79. return rmnet_deliver_skb(skb, &port->local_ep);
  80. }
  81. /* MAP handler */
  82. static rx_handler_result_t
  83. __rmnet_map_ingress_handler(struct sk_buff *skb,
  84. struct rmnet_port *port)
  85. {
  86. struct rmnet_endpoint *ep;
  87. u8 mux_id;
  88. u16 len;
  89. if (RMNET_MAP_GET_CD_BIT(skb)) {
  90. if (port->ingress_data_format
  91. & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
  92. return rmnet_map_command(skb, port);
  93. kfree_skb(skb);
  94. return RX_HANDLER_CONSUMED;
  95. }
  96. mux_id = RMNET_MAP_GET_MUX_ID(skb);
  97. len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb);
  98. if (mux_id >= RMNET_MAX_LOGICAL_EP) {
  99. kfree_skb(skb);
  100. return RX_HANDLER_CONSUMED;
  101. }
  102. ep = &port->muxed_ep[mux_id];
  103. if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING)
  104. skb->dev = ep->egress_dev;
  105. /* Subtract MAP header */
  106. skb_pull(skb, sizeof(struct rmnet_map_header));
  107. skb_trim(skb, len);
  108. rmnet_set_skb_proto(skb);
  109. return rmnet_deliver_skb(skb, ep);
  110. }
  111. static rx_handler_result_t
  112. rmnet_map_ingress_handler(struct sk_buff *skb,
  113. struct rmnet_port *port)
  114. {
  115. struct sk_buff *skbn;
  116. int rc;
  117. if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
  118. while ((skbn = rmnet_map_deaggregate(skb)) != NULL)
  119. __rmnet_map_ingress_handler(skbn, port);
  120. consume_skb(skb);
  121. rc = RX_HANDLER_CONSUMED;
  122. } else {
  123. rc = __rmnet_map_ingress_handler(skb, port);
  124. }
  125. return rc;
  126. }
  127. static int rmnet_map_egress_handler(struct sk_buff *skb,
  128. struct rmnet_port *port,
  129. struct rmnet_endpoint *ep,
  130. struct net_device *orig_dev)
  131. {
  132. int required_headroom, additional_header_len;
  133. struct rmnet_map_header *map_header;
  134. additional_header_len = 0;
  135. required_headroom = sizeof(struct rmnet_map_header);
  136. if (skb_headroom(skb) < required_headroom) {
  137. if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
  138. return RMNET_MAP_CONSUMED;
  139. }
  140. map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
  141. if (!map_header)
  142. return RMNET_MAP_CONSUMED;
  143. if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
  144. if (ep->mux_id == 0xff)
  145. map_header->mux_id = 0;
  146. else
  147. map_header->mux_id = ep->mux_id;
  148. }
  149. skb->protocol = htons(ETH_P_MAP);
  150. return RMNET_MAP_SUCCESS;
  151. }
  152. /* Ingress / Egress Entry Points */
  153. /* Processes packet as per ingress data format for receiving device. Logical
  154. * endpoint is determined from packet inspection. Packet is then sent to the
  155. * egress device listed in the logical endpoint configuration.
  156. */
  157. rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
  158. {
  159. struct rmnet_port *port;
  160. struct sk_buff *skb = *pskb;
  161. struct net_device *dev;
  162. int rc;
  163. if (!skb)
  164. return RX_HANDLER_CONSUMED;
  165. dev = skb->dev;
  166. port = rmnet_get_port(dev);
  167. if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) {
  168. rc = rmnet_map_ingress_handler(skb, port);
  169. } else {
  170. switch (ntohs(skb->protocol)) {
  171. case ETH_P_MAP:
  172. if (port->local_ep.rmnet_mode ==
  173. RMNET_EPMODE_BRIDGE) {
  174. rc = rmnet_ingress_deliver_packet(skb, port);
  175. } else {
  176. kfree_skb(skb);
  177. rc = RX_HANDLER_CONSUMED;
  178. }
  179. break;
  180. case ETH_P_IP:
  181. case ETH_P_IPV6:
  182. rc = rmnet_ingress_deliver_packet(skb, port);
  183. break;
  184. default:
  185. rc = RX_HANDLER_PASS;
  186. }
  187. }
  188. return rc;
  189. }
  190. /* Modifies packet as per logical endpoint configuration and egress data format
  191. * for egress device configured in logical endpoint. Packet is then transmitted
  192. * on the egress device.
  193. */
  194. void rmnet_egress_handler(struct sk_buff *skb,
  195. struct rmnet_endpoint *ep)
  196. {
  197. struct net_device *orig_dev;
  198. struct rmnet_port *port;
  199. orig_dev = skb->dev;
  200. skb->dev = ep->egress_dev;
  201. port = rmnet_get_port(skb->dev);
  202. if (!port) {
  203. kfree_skb(skb);
  204. return;
  205. }
  206. if (port->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
  207. switch (rmnet_map_egress_handler(skb, port, ep, orig_dev)) {
  208. case RMNET_MAP_CONSUMED:
  209. return;
  210. case RMNET_MAP_SUCCESS:
  211. break;
  212. default:
  213. kfree_skb(skb);
  214. return;
  215. }
  216. }
  217. if (ep->rmnet_mode == RMNET_EPMODE_VND)
  218. rmnet_vnd_tx_fixup(skb, orig_dev);
  219. dev_queue_xmit(skb);
  220. }