xfrm_device.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. /*
  2. * xfrm_device.c - IPsec device offloading code.
  3. *
  4. * Copyright (c) 2015 secunet Security Networks AG
  5. *
  6. * Author:
  7. * Steffen Klassert <steffen.klassert@secunet.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #include <linux/errno.h>
  15. #include <linux/module.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/slab.h>
  19. #include <linux/spinlock.h>
  20. #include <net/dst.h>
  21. #include <net/xfrm.h>
  22. #include <linux/notifier.h>
  23. #ifdef CONFIG_XFRM_OFFLOAD
  24. struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
  25. {
  26. int err;
  27. unsigned long flags;
  28. struct xfrm_state *x;
  29. struct sk_buff *skb2;
  30. struct softnet_data *sd;
  31. netdev_features_t esp_features = features;
  32. struct xfrm_offload *xo = xfrm_offload(skb);
  33. if (!xo)
  34. return skb;
  35. if (!(features & NETIF_F_HW_ESP))
  36. esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
  37. x = skb->sp->xvec[skb->sp->len - 1];
  38. if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
  39. return skb;
  40. local_irq_save(flags);
  41. sd = this_cpu_ptr(&softnet_data);
  42. err = !skb_queue_empty(&sd->xfrm_backlog);
  43. local_irq_restore(flags);
  44. if (err) {
  45. *again = true;
  46. return skb;
  47. }
  48. if (skb_is_gso(skb)) {
  49. struct net_device *dev = skb->dev;
  50. if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
  51. struct sk_buff *segs;
  52. /* Packet got rerouted, fixup features and segment it. */
  53. esp_features = esp_features & ~(NETIF_F_HW_ESP
  54. | NETIF_F_GSO_ESP);
  55. segs = skb_gso_segment(skb, esp_features);
  56. if (IS_ERR(segs)) {
  57. kfree_skb(skb);
  58. atomic_long_inc(&dev->tx_dropped);
  59. return NULL;
  60. } else {
  61. consume_skb(skb);
  62. skb = segs;
  63. }
  64. }
  65. }
  66. if (!skb->next) {
  67. x->outer_mode->xmit(x, skb);
  68. xo->flags |= XFRM_DEV_RESUME;
  69. err = x->type_offload->xmit(x, skb, esp_features);
  70. if (err) {
  71. if (err == -EINPROGRESS)
  72. return NULL;
  73. XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
  74. kfree_skb(skb);
  75. return NULL;
  76. }
  77. skb_push(skb, skb->data - skb_mac_header(skb));
  78. return skb;
  79. }
  80. skb2 = skb;
  81. do {
  82. struct sk_buff *nskb = skb2->next;
  83. skb2->next = NULL;
  84. xo = xfrm_offload(skb2);
  85. xo->flags |= XFRM_DEV_RESUME;
  86. x->outer_mode->xmit(x, skb2);
  87. err = x->type_offload->xmit(x, skb2, esp_features);
  88. if (!err) {
  89. skb2->next = nskb;
  90. } else if (err != -EINPROGRESS) {
  91. XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
  92. skb2->next = nskb;
  93. kfree_skb_list(skb2);
  94. return NULL;
  95. } else {
  96. if (skb == skb2)
  97. skb = nskb;
  98. if (!skb)
  99. return NULL;
  100. goto skip_push;
  101. }
  102. skb_push(skb2, skb2->data - skb_mac_header(skb2));
  103. skip_push:
  104. skb2 = nskb;
  105. } while (skb2);
  106. return skb;
  107. }
  108. EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
  109. int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
  110. struct xfrm_user_offload *xuo)
  111. {
  112. int err;
  113. struct dst_entry *dst;
  114. struct net_device *dev;
  115. struct xfrm_state_offload *xso = &x->xso;
  116. xfrm_address_t *saddr;
  117. xfrm_address_t *daddr;
  118. if (!x->type_offload)
  119. return -EINVAL;
  120. /* We don't yet support UDP encapsulation and TFC padding. */
  121. if (x->encap || x->tfcpad)
  122. return -EINVAL;
  123. dev = dev_get_by_index(net, xuo->ifindex);
  124. if (!dev) {
  125. if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
  126. saddr = &x->props.saddr;
  127. daddr = &x->id.daddr;
  128. } else {
  129. saddr = &x->id.daddr;
  130. daddr = &x->props.saddr;
  131. }
  132. dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
  133. x->props.family, x->props.output_mark);
  134. if (IS_ERR(dst))
  135. return 0;
  136. dev = dst->dev;
  137. dev_hold(dev);
  138. dst_release(dst);
  139. }
  140. if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
  141. xso->dev = NULL;
  142. dev_put(dev);
  143. return 0;
  144. }
  145. if (x->props.flags & XFRM_STATE_ESN &&
  146. !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
  147. xso->dev = NULL;
  148. dev_put(dev);
  149. return -EINVAL;
  150. }
  151. xso->dev = dev;
  152. xso->num_exthdrs = 1;
  153. xso->flags = xuo->flags;
  154. err = dev->xfrmdev_ops->xdo_dev_state_add(x);
  155. if (err) {
  156. xso->dev = NULL;
  157. dev_put(dev);
  158. return err;
  159. }
  160. return 0;
  161. }
  162. EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
  163. bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
  164. {
  165. int mtu;
  166. struct dst_entry *dst = skb_dst(skb);
  167. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  168. struct net_device *dev = x->xso.dev;
  169. if (!x->type_offload || x->encap)
  170. return false;
  171. if ((!dev || (x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev))) &&
  172. (!xdst->child->xfrm && x->type->get_mtu)) {
  173. mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
  174. if (skb->len <= mtu)
  175. goto ok;
  176. if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
  177. goto ok;
  178. }
  179. return false;
  180. ok:
  181. if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
  182. return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
  183. return true;
  184. }
  185. EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
  186. void xfrm_dev_resume(struct sk_buff *skb)
  187. {
  188. struct net_device *dev = skb->dev;
  189. int ret = NETDEV_TX_BUSY;
  190. struct netdev_queue *txq;
  191. struct softnet_data *sd;
  192. unsigned long flags;
  193. rcu_read_lock();
  194. txq = netdev_pick_tx(dev, skb, NULL);
  195. HARD_TX_LOCK(dev, txq, smp_processor_id());
  196. if (!netif_xmit_frozen_or_stopped(txq))
  197. skb = dev_hard_start_xmit(skb, dev, txq, &ret);
  198. HARD_TX_UNLOCK(dev, txq);
  199. if (!dev_xmit_complete(ret)) {
  200. local_irq_save(flags);
  201. sd = this_cpu_ptr(&softnet_data);
  202. skb_queue_tail(&sd->xfrm_backlog, skb);
  203. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  204. local_irq_restore(flags);
  205. }
  206. rcu_read_unlock();
  207. }
  208. EXPORT_SYMBOL_GPL(xfrm_dev_resume);
  209. void xfrm_dev_backlog(struct softnet_data *sd)
  210. {
  211. struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
  212. struct sk_buff_head list;
  213. struct sk_buff *skb;
  214. if (skb_queue_empty(xfrm_backlog))
  215. return;
  216. __skb_queue_head_init(&list);
  217. spin_lock(&xfrm_backlog->lock);
  218. skb_queue_splice_init(xfrm_backlog, &list);
  219. spin_unlock(&xfrm_backlog->lock);
  220. while (!skb_queue_empty(&list)) {
  221. skb = __skb_dequeue(&list);
  222. xfrm_dev_resume(skb);
  223. }
  224. }
  225. #endif
  226. static int xfrm_api_check(struct net_device *dev)
  227. {
  228. #ifdef CONFIG_XFRM_OFFLOAD
  229. if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
  230. !(dev->features & NETIF_F_HW_ESP))
  231. return NOTIFY_BAD;
  232. if ((dev->features & NETIF_F_HW_ESP) &&
  233. (!(dev->xfrmdev_ops &&
  234. dev->xfrmdev_ops->xdo_dev_state_add &&
  235. dev->xfrmdev_ops->xdo_dev_state_delete)))
  236. return NOTIFY_BAD;
  237. #else
  238. if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
  239. return NOTIFY_BAD;
  240. #endif
  241. return NOTIFY_DONE;
  242. }
  243. static int xfrm_dev_register(struct net_device *dev)
  244. {
  245. return xfrm_api_check(dev);
  246. }
  247. static int xfrm_dev_unregister(struct net_device *dev)
  248. {
  249. xfrm_policy_cache_flush();
  250. return NOTIFY_DONE;
  251. }
  252. static int xfrm_dev_feat_change(struct net_device *dev)
  253. {
  254. return xfrm_api_check(dev);
  255. }
  256. static int xfrm_dev_down(struct net_device *dev)
  257. {
  258. if (dev->features & NETIF_F_HW_ESP)
  259. xfrm_dev_state_flush(dev_net(dev), dev, true);
  260. xfrm_policy_cache_flush();
  261. return NOTIFY_DONE;
  262. }
  263. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  264. {
  265. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  266. switch (event) {
  267. case NETDEV_REGISTER:
  268. return xfrm_dev_register(dev);
  269. case NETDEV_UNREGISTER:
  270. return xfrm_dev_unregister(dev);
  271. case NETDEV_FEAT_CHANGE:
  272. return xfrm_dev_feat_change(dev);
  273. case NETDEV_DOWN:
  274. return xfrm_dev_down(dev);
  275. }
  276. return NOTIFY_DONE;
  277. }
  278. static struct notifier_block xfrm_dev_notifier = {
  279. .notifier_call = xfrm_dev_event,
  280. };
  281. void __init xfrm_dev_init(void)
  282. {
  283. register_netdevice_notifier(&xfrm_dev_notifier);
  284. }