vector_transports.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. /*
  2. * Copyright (C) 2017 - Cambridge Greys Limited
  3. * Copyright (C) 2011 - 2014 Cisco Systems Inc
  4. * Licensed under the GPL.
  5. */
  6. #include <linux/etherdevice.h>
  7. #include <linux/netdevice.h>
  8. #include <linux/skbuff.h>
  9. #include <linux/slab.h>
  10. #include <asm/byteorder.h>
  11. #include <uapi/linux/ip.h>
  12. #include <uapi/linux/virtio_net.h>
  13. #include <linux/virtio_net.h>
  14. #include <linux/virtio_byteorder.h>
  15. #include <linux/netdev_features.h>
  16. #include "vector_user.h"
  17. #include "vector_kern.h"
  18. #define GOOD_LINEAR 512
  19. #define GSO_ERROR "Incoming GSO frames and GRO disabled on the interface"
  20. struct gre_minimal_header {
  21. uint16_t header;
  22. uint16_t arptype;
  23. };
  24. struct uml_gre_data {
  25. uint32_t rx_key;
  26. uint32_t tx_key;
  27. uint32_t sequence;
  28. bool ipv6;
  29. bool has_sequence;
  30. bool pin_sequence;
  31. bool checksum;
  32. bool key;
  33. struct gre_minimal_header expected_header;
  34. uint32_t checksum_offset;
  35. uint32_t key_offset;
  36. uint32_t sequence_offset;
  37. };
  38. struct uml_l2tpv3_data {
  39. uint64_t rx_cookie;
  40. uint64_t tx_cookie;
  41. uint64_t rx_session;
  42. uint64_t tx_session;
  43. uint32_t counter;
  44. bool udp;
  45. bool ipv6;
  46. bool has_counter;
  47. bool pin_counter;
  48. bool cookie;
  49. bool cookie_is_64;
  50. uint32_t cookie_offset;
  51. uint32_t session_offset;
  52. uint32_t counter_offset;
  53. };
  54. static int l2tpv3_form_header(uint8_t *header,
  55. struct sk_buff *skb, struct vector_private *vp)
  56. {
  57. struct uml_l2tpv3_data *td = vp->transport_data;
  58. uint32_t *counter;
  59. if (td->udp)
  60. *(uint32_t *) header = cpu_to_be32(L2TPV3_DATA_PACKET);
  61. (*(uint32_t *) (header + td->session_offset)) = td->tx_session;
  62. if (td->cookie) {
  63. if (td->cookie_is_64)
  64. (*(uint64_t *)(header + td->cookie_offset)) =
  65. td->tx_cookie;
  66. else
  67. (*(uint32_t *)(header + td->cookie_offset)) =
  68. td->tx_cookie;
  69. }
  70. if (td->has_counter) {
  71. counter = (uint32_t *)(header + td->counter_offset);
  72. if (td->pin_counter) {
  73. *counter = 0;
  74. } else {
  75. td->counter++;
  76. *counter = cpu_to_be32(td->counter);
  77. }
  78. }
  79. return 0;
  80. }
  81. static int gre_form_header(uint8_t *header,
  82. struct sk_buff *skb, struct vector_private *vp)
  83. {
  84. struct uml_gre_data *td = vp->transport_data;
  85. uint32_t *sequence;
  86. *((uint32_t *) header) = *((uint32_t *) &td->expected_header);
  87. if (td->key)
  88. (*(uint32_t *) (header + td->key_offset)) = td->tx_key;
  89. if (td->has_sequence) {
  90. sequence = (uint32_t *)(header + td->sequence_offset);
  91. if (td->pin_sequence)
  92. *sequence = 0;
  93. else
  94. *sequence = cpu_to_be32(++td->sequence);
  95. }
  96. return 0;
  97. }
  98. static int raw_form_header(uint8_t *header,
  99. struct sk_buff *skb, struct vector_private *vp)
  100. {
  101. struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
  102. virtio_net_hdr_from_skb(
  103. skb,
  104. vheader,
  105. virtio_legacy_is_little_endian(),
  106. false
  107. );
  108. return 0;
  109. }
  110. static int l2tpv3_verify_header(
  111. uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
  112. {
  113. struct uml_l2tpv3_data *td = vp->transport_data;
  114. uint32_t *session;
  115. uint64_t cookie;
  116. if ((!td->udp) && (!td->ipv6))
  117. header += sizeof(struct iphdr) /* fix for ipv4 raw */;
  118. /* we do not do a strict check for "data" packets as per
  119. * the RFC spec because the pure IP spec does not have
  120. * that anyway.
  121. */
  122. if (td->cookie) {
  123. if (td->cookie_is_64)
  124. cookie = *(uint64_t *)(header + td->cookie_offset);
  125. else
  126. cookie = *(uint32_t *)(header + td->cookie_offset);
  127. if (cookie != td->rx_cookie) {
  128. if (net_ratelimit())
  129. netdev_err(vp->dev, "uml_l2tpv3: unknown cookie id");
  130. return -1;
  131. }
  132. }
  133. session = (uint32_t *) (header + td->session_offset);
  134. if (*session != td->rx_session) {
  135. if (net_ratelimit())
  136. netdev_err(vp->dev, "uml_l2tpv3: session mismatch");
  137. return -1;
  138. }
  139. return 0;
  140. }
  141. static int gre_verify_header(
  142. uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
  143. {
  144. uint32_t key;
  145. struct uml_gre_data *td = vp->transport_data;
  146. if (!td->ipv6)
  147. header += sizeof(struct iphdr) /* fix for ipv4 raw */;
  148. if (*((uint32_t *) header) != *((uint32_t *) &td->expected_header)) {
  149. if (net_ratelimit())
  150. netdev_err(vp->dev, "header type disagreement, expecting %0x, got %0x",
  151. *((uint32_t *) &td->expected_header),
  152. *((uint32_t *) header)
  153. );
  154. return -1;
  155. }
  156. if (td->key) {
  157. key = (*(uint32_t *)(header + td->key_offset));
  158. if (key != td->rx_key) {
  159. if (net_ratelimit())
  160. netdev_err(vp->dev, "unknown key id %0x, expecting %0x",
  161. key, td->rx_key);
  162. return -1;
  163. }
  164. }
  165. return 0;
  166. }
  167. static int raw_verify_header(
  168. uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
  169. {
  170. struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
  171. if ((vheader->gso_type != VIRTIO_NET_HDR_GSO_NONE) &&
  172. (vp->req_size != 65536)) {
  173. if (net_ratelimit())
  174. netdev_err(
  175. vp->dev,
  176. GSO_ERROR
  177. );
  178. }
  179. if ((vheader->flags & VIRTIO_NET_HDR_F_DATA_VALID) > 0)
  180. return 1;
  181. virtio_net_hdr_to_skb(skb, vheader, virtio_legacy_is_little_endian());
  182. return 0;
  183. }
  184. static bool get_uint_param(
  185. struct arglist *def, char *param, unsigned int *result)
  186. {
  187. char *arg = uml_vector_fetch_arg(def, param);
  188. if (arg != NULL) {
  189. if (kstrtoint(arg, 0, result) == 0)
  190. return true;
  191. }
  192. return false;
  193. }
  194. static bool get_ulong_param(
  195. struct arglist *def, char *param, unsigned long *result)
  196. {
  197. char *arg = uml_vector_fetch_arg(def, param);
  198. if (arg != NULL) {
  199. if (kstrtoul(arg, 0, result) == 0)
  200. return true;
  201. return true;
  202. }
  203. return false;
  204. }
  205. static int build_gre_transport_data(struct vector_private *vp)
  206. {
  207. struct uml_gre_data *td;
  208. int temp_int;
  209. int temp_rx;
  210. int temp_tx;
  211. vp->transport_data = kmalloc(sizeof(struct uml_gre_data), GFP_KERNEL);
  212. if (vp->transport_data == NULL)
  213. return -ENOMEM;
  214. td = vp->transport_data;
  215. td->sequence = 0;
  216. td->expected_header.arptype = GRE_IRB;
  217. td->expected_header.header = 0;
  218. vp->form_header = &gre_form_header;
  219. vp->verify_header = &gre_verify_header;
  220. vp->header_size = 4;
  221. td->key_offset = 4;
  222. td->sequence_offset = 4;
  223. td->checksum_offset = 4;
  224. td->ipv6 = false;
  225. if (get_uint_param(vp->parsed, "v6", &temp_int)) {
  226. if (temp_int > 0)
  227. td->ipv6 = true;
  228. }
  229. td->key = false;
  230. if (get_uint_param(vp->parsed, "rx_key", &temp_rx)) {
  231. if (get_uint_param(vp->parsed, "tx_key", &temp_tx)) {
  232. td->key = true;
  233. td->expected_header.header |= GRE_MODE_KEY;
  234. td->rx_key = cpu_to_be32(temp_rx);
  235. td->tx_key = cpu_to_be32(temp_tx);
  236. vp->header_size += 4;
  237. td->sequence_offset += 4;
  238. } else {
  239. return -EINVAL;
  240. }
  241. }
  242. td->sequence = false;
  243. if (get_uint_param(vp->parsed, "sequence", &temp_int)) {
  244. if (temp_int > 0) {
  245. vp->header_size += 4;
  246. td->has_sequence = true;
  247. td->expected_header.header |= GRE_MODE_SEQUENCE;
  248. if (get_uint_param(
  249. vp->parsed, "pin_sequence", &temp_int)) {
  250. if (temp_int > 0)
  251. td->pin_sequence = true;
  252. }
  253. }
  254. }
  255. vp->rx_header_size = vp->header_size;
  256. if (!td->ipv6)
  257. vp->rx_header_size += sizeof(struct iphdr);
  258. return 0;
  259. }
  260. static int build_l2tpv3_transport_data(struct vector_private *vp)
  261. {
  262. struct uml_l2tpv3_data *td;
  263. int temp_int, temp_rxs, temp_txs;
  264. unsigned long temp_rx;
  265. unsigned long temp_tx;
  266. vp->transport_data = kmalloc(
  267. sizeof(struct uml_l2tpv3_data), GFP_KERNEL);
  268. if (vp->transport_data == NULL)
  269. return -ENOMEM;
  270. td = vp->transport_data;
  271. vp->form_header = &l2tpv3_form_header;
  272. vp->verify_header = &l2tpv3_verify_header;
  273. td->counter = 0;
  274. vp->header_size = 4;
  275. td->session_offset = 0;
  276. td->cookie_offset = 4;
  277. td->counter_offset = 4;
  278. td->ipv6 = false;
  279. if (get_uint_param(vp->parsed, "v6", &temp_int)) {
  280. if (temp_int > 0)
  281. td->ipv6 = true;
  282. }
  283. if (get_uint_param(vp->parsed, "rx_session", &temp_rxs)) {
  284. if (get_uint_param(vp->parsed, "tx_session", &temp_txs)) {
  285. td->tx_session = cpu_to_be32(temp_txs);
  286. td->rx_session = cpu_to_be32(temp_rxs);
  287. } else {
  288. return -EINVAL;
  289. }
  290. } else {
  291. return -EINVAL;
  292. }
  293. td->cookie_is_64 = false;
  294. if (get_uint_param(vp->parsed, "cookie64", &temp_int)) {
  295. if (temp_int > 0)
  296. td->cookie_is_64 = true;
  297. }
  298. td->cookie = false;
  299. if (get_ulong_param(vp->parsed, "rx_cookie", &temp_rx)) {
  300. if (get_ulong_param(vp->parsed, "tx_cookie", &temp_tx)) {
  301. td->cookie = true;
  302. if (td->cookie_is_64) {
  303. td->rx_cookie = cpu_to_be64(temp_rx);
  304. td->tx_cookie = cpu_to_be64(temp_tx);
  305. vp->header_size += 8;
  306. td->counter_offset += 8;
  307. } else {
  308. td->rx_cookie = cpu_to_be32(temp_rx);
  309. td->tx_cookie = cpu_to_be32(temp_tx);
  310. vp->header_size += 4;
  311. td->counter_offset += 4;
  312. }
  313. } else {
  314. return -EINVAL;
  315. }
  316. }
  317. td->has_counter = false;
  318. if (get_uint_param(vp->parsed, "counter", &temp_int)) {
  319. if (temp_int > 0) {
  320. td->has_counter = true;
  321. vp->header_size += 4;
  322. if (get_uint_param(
  323. vp->parsed, "pin_counter", &temp_int)) {
  324. if (temp_int > 0)
  325. td->pin_counter = true;
  326. }
  327. }
  328. }
  329. if (get_uint_param(vp->parsed, "udp", &temp_int)) {
  330. if (temp_int > 0) {
  331. td->udp = true;
  332. vp->header_size += 4;
  333. td->counter_offset += 4;
  334. td->session_offset += 4;
  335. td->cookie_offset += 4;
  336. }
  337. }
  338. vp->rx_header_size = vp->header_size;
  339. if ((!td->ipv6) && (!td->udp))
  340. vp->rx_header_size += sizeof(struct iphdr);
  341. return 0;
  342. }
  343. static int build_raw_transport_data(struct vector_private *vp)
  344. {
  345. if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
  346. if (!uml_raw_enable_vnet_headers(vp->fds->tx_fd))
  347. return -1;
  348. vp->form_header = &raw_form_header;
  349. vp->verify_header = &raw_verify_header;
  350. vp->header_size = sizeof(struct virtio_net_hdr);
  351. vp->rx_header_size = sizeof(struct virtio_net_hdr);
  352. vp->dev->hw_features |= (NETIF_F_TSO | NETIF_F_GRO);
  353. vp->dev->features |=
  354. (NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
  355. NETIF_F_TSO | NETIF_F_GRO);
  356. netdev_info(
  357. vp->dev,
  358. "raw: using vnet headers for tso and tx/rx checksum"
  359. );
  360. }
  361. return 0;
  362. }
  363. static int build_tap_transport_data(struct vector_private *vp)
  364. {
  365. if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
  366. vp->form_header = &raw_form_header;
  367. vp->verify_header = &raw_verify_header;
  368. vp->header_size = sizeof(struct virtio_net_hdr);
  369. vp->rx_header_size = sizeof(struct virtio_net_hdr);
  370. vp->dev->hw_features |=
  371. (NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
  372. vp->dev->features |=
  373. (NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
  374. NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
  375. netdev_info(
  376. vp->dev,
  377. "tap/raw: using vnet headers for tso and tx/rx checksum"
  378. );
  379. } else {
  380. return 0; /* do not try to enable tap too if raw failed */
  381. }
  382. if (uml_tap_enable_vnet_headers(vp->fds->tx_fd))
  383. return 0;
  384. return -1;
  385. }
  386. int build_transport_data(struct vector_private *vp)
  387. {
  388. char *transport = uml_vector_fetch_arg(vp->parsed, "transport");
  389. if (strncmp(transport, TRANS_GRE, TRANS_GRE_LEN) == 0)
  390. return build_gre_transport_data(vp);
  391. if (strncmp(transport, TRANS_L2TPV3, TRANS_L2TPV3_LEN) == 0)
  392. return build_l2tpv3_transport_data(vp);
  393. if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
  394. return build_raw_transport_data(vp);
  395. if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
  396. return build_tap_transport_data(vp);
  397. return 0;
  398. }