rmnet_map_data.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * RMNET Data MAP protocol
  13. *
  14. */
  15. #include <linux/netdevice.h>
  16. #include <linux/ip.h>
  17. #include <linux/ipv6.h>
  18. #include <net/ip6_checksum.h>
  19. #include "rmnet_config.h"
  20. #include "rmnet_map.h"
  21. #include "rmnet_private.h"
  22. #define RMNET_MAP_DEAGGR_SPACING 64
  23. #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
  24. static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
  25. const void *txporthdr)
  26. {
  27. __sum16 *check = NULL;
  28. switch (protocol) {
  29. case IPPROTO_TCP:
  30. check = &(((struct tcphdr *)txporthdr)->check);
  31. break;
  32. case IPPROTO_UDP:
  33. check = &(((struct udphdr *)txporthdr)->check);
  34. break;
  35. default:
  36. check = NULL;
  37. break;
  38. }
  39. return check;
  40. }
  41. static int
  42. rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
  43. struct rmnet_map_dl_csum_trailer *csum_trailer)
  44. {
  45. __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
  46. u16 csum_value, csum_value_final;
  47. struct iphdr *ip4h;
  48. void *txporthdr;
  49. __be16 addend;
  50. ip4h = (struct iphdr *)(skb->data);
  51. if ((ntohs(ip4h->frag_off) & IP_MF) ||
  52. ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
  53. return -EOPNOTSUPP;
  54. txporthdr = skb->data + ip4h->ihl * 4;
  55. csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
  56. if (!csum_field)
  57. return -EPROTONOSUPPORT;
  58. /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
  59. if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP)
  60. return 0;
  61. csum_value = ~ntohs(csum_trailer->csum_value);
  62. hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
  63. ip_payload_csum = csum16_sub((__force __sum16)csum_value,
  64. (__force __be16)hdr_csum);
  65. pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
  66. ntohs(ip4h->tot_len) - ip4h->ihl * 4,
  67. ip4h->protocol, 0);
  68. addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
  69. pseudo_csum = csum16_add(ip_payload_csum, addend);
  70. addend = (__force __be16)ntohs((__force __be16)*csum_field);
  71. csum_temp = ~csum16_sub(pseudo_csum, addend);
  72. csum_value_final = (__force u16)csum_temp;
  73. if (unlikely(csum_value_final == 0)) {
  74. switch (ip4h->protocol) {
  75. case IPPROTO_UDP:
  76. /* RFC 768 - DL4 1's complement rule for UDP csum 0 */
  77. csum_value_final = ~csum_value_final;
  78. break;
  79. case IPPROTO_TCP:
  80. /* DL4 Non-RFC compliant TCP checksum found */
  81. if (*csum_field == (__force __sum16)0xFFFF)
  82. csum_value_final = ~csum_value_final;
  83. break;
  84. }
  85. }
  86. if (csum_value_final == ntohs((__force __be16)*csum_field))
  87. return 0;
  88. else
  89. return -EINVAL;
  90. }
  91. #if IS_ENABLED(CONFIG_IPV6)
  92. static int
  93. rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
  94. struct rmnet_map_dl_csum_trailer *csum_trailer)
  95. {
  96. __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
  97. u16 csum_value, csum_value_final;
  98. __be16 ip6_hdr_csum, addend;
  99. struct ipv6hdr *ip6h;
  100. void *txporthdr;
  101. u32 length;
  102. ip6h = (struct ipv6hdr *)(skb->data);
  103. txporthdr = skb->data + sizeof(struct ipv6hdr);
  104. csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
  105. if (!csum_field)
  106. return -EPROTONOSUPPORT;
  107. csum_value = ~ntohs(csum_trailer->csum_value);
  108. ip6_hdr_csum = (__force __be16)
  109. ~ntohs((__force __be16)ip_compute_csum(ip6h,
  110. (int)(txporthdr - (void *)(skb->data))));
  111. ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
  112. ip6_hdr_csum);
  113. length = (ip6h->nexthdr == IPPROTO_UDP) ?
  114. ntohs(((struct udphdr *)txporthdr)->len) :
  115. ntohs(ip6h->payload_len);
  116. pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  117. length, ip6h->nexthdr, 0));
  118. addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
  119. pseudo_csum = csum16_add(ip6_payload_csum, addend);
  120. addend = (__force __be16)ntohs((__force __be16)*csum_field);
  121. csum_temp = ~csum16_sub(pseudo_csum, addend);
  122. csum_value_final = (__force u16)csum_temp;
  123. if (unlikely(csum_value_final == 0)) {
  124. switch (ip6h->nexthdr) {
  125. case IPPROTO_UDP:
  126. /* RFC 2460 section 8.1
  127. * DL6 One's complement rule for UDP checksum 0
  128. */
  129. csum_value_final = ~csum_value_final;
  130. break;
  131. case IPPROTO_TCP:
  132. /* DL6 Non-RFC compliant TCP checksum found */
  133. if (*csum_field == (__force __sum16)0xFFFF)
  134. csum_value_final = ~csum_value_final;
  135. break;
  136. }
  137. }
  138. if (csum_value_final == ntohs((__force __be16)*csum_field))
  139. return 0;
  140. else
  141. return -EINVAL;
  142. }
  143. #endif
  144. static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
  145. {
  146. struct iphdr *ip4h = (struct iphdr *)iphdr;
  147. void *txphdr;
  148. u16 *csum;
  149. txphdr = iphdr + ip4h->ihl * 4;
  150. if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
  151. csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
  152. *csum = ~(*csum);
  153. }
  154. }
  155. static void
  156. rmnet_map_ipv4_ul_csum_header(void *iphdr,
  157. struct rmnet_map_ul_csum_header *ul_header,
  158. struct sk_buff *skb)
  159. {
  160. struct iphdr *ip4h = (struct iphdr *)iphdr;
  161. __be16 *hdr = (__be16 *)ul_header, offset;
  162. offset = htons((__force u16)(skb_transport_header(skb) -
  163. (unsigned char *)iphdr));
  164. ul_header->csum_start_offset = offset;
  165. ul_header->csum_insert_offset = skb->csum_offset;
  166. ul_header->csum_enabled = 1;
  167. if (ip4h->protocol == IPPROTO_UDP)
  168. ul_header->udp_ip4_ind = 1;
  169. else
  170. ul_header->udp_ip4_ind = 0;
  171. /* Changing remaining fields to network order */
  172. hdr++;
  173. *hdr = htons((__force u16)*hdr);
  174. skb->ip_summed = CHECKSUM_NONE;
  175. rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
  176. }
  177. #if IS_ENABLED(CONFIG_IPV6)
  178. static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
  179. {
  180. struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
  181. void *txphdr;
  182. u16 *csum;
  183. txphdr = ip6hdr + sizeof(struct ipv6hdr);
  184. if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
  185. csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
  186. *csum = ~(*csum);
  187. }
  188. }
  189. static void
  190. rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
  191. struct rmnet_map_ul_csum_header *ul_header,
  192. struct sk_buff *skb)
  193. {
  194. __be16 *hdr = (__be16 *)ul_header, offset;
  195. offset = htons((__force u16)(skb_transport_header(skb) -
  196. (unsigned char *)ip6hdr));
  197. ul_header->csum_start_offset = offset;
  198. ul_header->csum_insert_offset = skb->csum_offset;
  199. ul_header->csum_enabled = 1;
  200. ul_header->udp_ip4_ind = 0;
  201. /* Changing remaining fields to network order */
  202. hdr++;
  203. *hdr = htons((__force u16)*hdr);
  204. skb->ip_summed = CHECKSUM_NONE;
  205. rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
  206. }
  207. #endif
  208. /* Adds MAP header to front of skb->data
  209. * Padding is calculated and set appropriately in MAP header. Mux ID is
  210. * initialized to 0.
  211. */
  212. struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
  213. int hdrlen, int pad)
  214. {
  215. struct rmnet_map_header *map_header;
  216. u32 padding, map_datalen;
  217. u8 *padbytes;
  218. map_datalen = skb->len - hdrlen;
  219. map_header = (struct rmnet_map_header *)
  220. skb_push(skb, sizeof(struct rmnet_map_header));
  221. memset(map_header, 0, sizeof(struct rmnet_map_header));
  222. if (pad == RMNET_MAP_NO_PAD_BYTES) {
  223. map_header->pkt_len = htons(map_datalen);
  224. return map_header;
  225. }
  226. padding = ALIGN(map_datalen, 4) - map_datalen;
  227. if (padding == 0)
  228. goto done;
  229. if (skb_tailroom(skb) < padding)
  230. return NULL;
  231. padbytes = (u8 *)skb_put(skb, padding);
  232. memset(padbytes, 0, padding);
  233. done:
  234. map_header->pkt_len = htons(map_datalen + padding);
  235. map_header->pad_len = padding & 0x3F;
  236. return map_header;
  237. }
  238. /* Deaggregates a single packet
  239. * A whole new buffer is allocated for each portion of an aggregated frame.
  240. * Caller should keep calling deaggregate() on the source skb until 0 is
  241. * returned, indicating that there are no more packets to deaggregate. Caller
  242. * is responsible for freeing the original skb.
  243. */
  244. struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
  245. struct rmnet_port *port)
  246. {
  247. struct rmnet_map_header *maph;
  248. struct sk_buff *skbn;
  249. u32 packet_len;
  250. if (skb->len == 0)
  251. return NULL;
  252. maph = (struct rmnet_map_header *)skb->data;
  253. packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
  254. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
  255. packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
  256. if (((int)skb->len - (int)packet_len) < 0)
  257. return NULL;
  258. /* Some hardware can send us empty frames. Catch them */
  259. if (ntohs(maph->pkt_len) == 0)
  260. return NULL;
  261. skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
  262. if (!skbn)
  263. return NULL;
  264. skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
  265. skb_put(skbn, packet_len);
  266. memcpy(skbn->data, skb->data, packet_len);
  267. skb_pull(skb, packet_len);
  268. return skbn;
  269. }
  270. /* Validates packet checksums. Function takes a pointer to
  271. * the beginning of a buffer which contains the IP payload +
  272. * padding + checksum trailer.
  273. * Only IPv4 and IPv6 are supported along with TCP & UDP.
  274. * Fragmented or tunneled packets are not supported.
  275. */
  276. int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
  277. {
  278. struct rmnet_map_dl_csum_trailer *csum_trailer;
  279. if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
  280. return -EOPNOTSUPP;
  281. csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
  282. if (!csum_trailer->valid)
  283. return -EINVAL;
  284. if (skb->protocol == htons(ETH_P_IP))
  285. return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer);
  286. else if (skb->protocol == htons(ETH_P_IPV6))
  287. #if IS_ENABLED(CONFIG_IPV6)
  288. return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer);
  289. #else
  290. return -EPROTONOSUPPORT;
  291. #endif
  292. return 0;
  293. }
  294. /* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
  295. * packets that are supported for UL checksum offload.
  296. */
  297. void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
  298. struct net_device *orig_dev)
  299. {
  300. struct rmnet_map_ul_csum_header *ul_header;
  301. void *iphdr;
  302. ul_header = (struct rmnet_map_ul_csum_header *)
  303. skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
  304. if (unlikely(!(orig_dev->features &
  305. (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
  306. goto sw_csum;
  307. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  308. iphdr = (char *)ul_header +
  309. sizeof(struct rmnet_map_ul_csum_header);
  310. if (skb->protocol == htons(ETH_P_IP)) {
  311. rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
  312. return;
  313. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  314. #if IS_ENABLED(CONFIG_IPV6)
  315. rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
  316. return;
  317. #else
  318. goto sw_csum;
  319. #endif
  320. }
  321. }
  322. sw_csum:
  323. ul_header->csum_start_offset = 0;
  324. ul_header->csum_insert_offset = 0;
  325. ul_header->csum_enabled = 0;
  326. ul_header->udp_ip4_ind = 0;
  327. }