esp4.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062
  1. #define pr_fmt(fmt) "IPsec: " fmt
  2. #include <crypto/aead.h>
  3. #include <crypto/authenc.h>
  4. #include <linux/err.h>
  5. #include <linux/module.h>
  6. #include <net/ip.h>
  7. #include <net/xfrm.h>
  8. #include <net/esp.h>
  9. #include <linux/scatterlist.h>
  10. #include <linux/kernel.h>
  11. #include <linux/pfkeyv2.h>
  12. #include <linux/rtnetlink.h>
  13. #include <linux/slab.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/in6.h>
  16. #include <net/icmp.h>
  17. #include <net/protocol.h>
  18. #include <net/udp.h>
  19. #include <linux/highmem.h>
  20. struct esp_skb_cb {
  21. struct xfrm_skb_cb xfrm;
  22. void *tmp;
  23. };
  24. struct esp_output_extra {
  25. __be32 seqhi;
  26. u32 esphoff;
  27. };
  28. #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
  29. static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
  30. /*
  31. * Allocate an AEAD request structure with extra space for SG and IV.
  32. *
  33. * For alignment considerations the IV is placed at the front, followed
  34. * by the request and finally the SG list.
  35. *
  36. * TODO: Use spare space in skb for this where possible.
  37. */
  38. static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
  39. {
  40. unsigned int len;
  41. len = extralen;
  42. len += crypto_aead_ivsize(aead);
  43. if (len) {
  44. len += crypto_aead_alignmask(aead) &
  45. ~(crypto_tfm_ctx_alignment() - 1);
  46. len = ALIGN(len, crypto_tfm_ctx_alignment());
  47. }
  48. len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
  49. len = ALIGN(len, __alignof__(struct scatterlist));
  50. len += sizeof(struct scatterlist) * nfrags;
  51. return kmalloc(len, GFP_ATOMIC);
  52. }
  53. static inline void *esp_tmp_extra(void *tmp)
  54. {
  55. return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
  56. }
  57. static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
  58. {
  59. return crypto_aead_ivsize(aead) ?
  60. PTR_ALIGN((u8 *)tmp + extralen,
  61. crypto_aead_alignmask(aead) + 1) : tmp + extralen;
  62. }
  63. static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
  64. {
  65. struct aead_request *req;
  66. req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
  67. crypto_tfm_ctx_alignment());
  68. aead_request_set_tfm(req, aead);
  69. return req;
  70. }
  71. static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
  72. struct aead_request *req)
  73. {
  74. return (void *)ALIGN((unsigned long)(req + 1) +
  75. crypto_aead_reqsize(aead),
  76. __alignof__(struct scatterlist));
  77. }
  78. static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
  79. {
  80. struct esp_output_extra *extra = esp_tmp_extra(tmp);
  81. struct crypto_aead *aead = x->data;
  82. int extralen = 0;
  83. u8 *iv;
  84. struct aead_request *req;
  85. struct scatterlist *sg;
  86. if (x->props.flags & XFRM_STATE_ESN)
  87. extralen += sizeof(*extra);
  88. extra = esp_tmp_extra(tmp);
  89. iv = esp_tmp_iv(aead, tmp, extralen);
  90. req = esp_tmp_req(aead, iv);
  91. /* Unref skb_frag_pages in the src scatterlist if necessary.
  92. * Skip the first sg which comes from skb->data.
  93. */
  94. if (req->src != req->dst)
  95. for (sg = sg_next(req->src); sg; sg = sg_next(sg))
  96. put_page(sg_page(sg));
  97. }
  98. static void esp_output_done(struct crypto_async_request *base, int err)
  99. {
  100. struct sk_buff *skb = base->data;
  101. struct xfrm_offload *xo = xfrm_offload(skb);
  102. void *tmp;
  103. struct xfrm_state *x;
  104. if (xo && (xo->flags & XFRM_DEV_RESUME))
  105. x = skb->sp->xvec[skb->sp->len - 1];
  106. else
  107. x = skb_dst(skb)->xfrm;
  108. tmp = ESP_SKB_CB(skb)->tmp;
  109. esp_ssg_unref(x, tmp);
  110. kfree(tmp);
  111. if (xo && (xo->flags & XFRM_DEV_RESUME)) {
  112. if (err) {
  113. XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
  114. kfree_skb(skb);
  115. return;
  116. }
  117. skb_push(skb, skb->data - skb_mac_header(skb));
  118. secpath_reset(skb);
  119. xfrm_dev_resume(skb);
  120. } else {
  121. xfrm_output_resume(skb, err);
  122. }
  123. }
  124. /* Move ESP header back into place. */
  125. static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
  126. {
  127. struct ip_esp_hdr *esph = (void *)(skb->data + offset);
  128. void *tmp = ESP_SKB_CB(skb)->tmp;
  129. __be32 *seqhi = esp_tmp_extra(tmp);
  130. esph->seq_no = esph->spi;
  131. esph->spi = *seqhi;
  132. }
  133. static void esp_output_restore_header(struct sk_buff *skb)
  134. {
  135. void *tmp = ESP_SKB_CB(skb)->tmp;
  136. struct esp_output_extra *extra = esp_tmp_extra(tmp);
  137. esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
  138. sizeof(__be32));
  139. }
  140. static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
  141. struct xfrm_state *x,
  142. struct ip_esp_hdr *esph,
  143. struct esp_output_extra *extra)
  144. {
  145. /* For ESN we move the header forward by 4 bytes to
  146. * accomodate the high bits. We will move it back after
  147. * encryption.
  148. */
  149. if ((x->props.flags & XFRM_STATE_ESN)) {
  150. __u32 seqhi;
  151. struct xfrm_offload *xo = xfrm_offload(skb);
  152. if (xo)
  153. seqhi = xo->seq.hi;
  154. else
  155. seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
  156. extra->esphoff = (unsigned char *)esph -
  157. skb_transport_header(skb);
  158. esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
  159. extra->seqhi = esph->spi;
  160. esph->seq_no = htonl(seqhi);
  161. }
  162. esph->spi = x->id.spi;
  163. return esph;
  164. }
  165. static void esp_output_done_esn(struct crypto_async_request *base, int err)
  166. {
  167. struct sk_buff *skb = base->data;
  168. esp_output_restore_header(skb);
  169. esp_output_done(base, err);
  170. }
  171. static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
  172. {
  173. /* Fill padding... */
  174. if (tfclen) {
  175. memset(tail, 0, tfclen);
  176. tail += tfclen;
  177. }
  178. do {
  179. int i;
  180. for (i = 0; i < plen - 2; i++)
  181. tail[i] = i + 1;
  182. } while (0);
  183. tail[plen - 2] = plen - 2;
  184. tail[plen - 1] = proto;
  185. }
  186. static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
  187. {
  188. int encap_type;
  189. struct udphdr *uh;
  190. __be32 *udpdata32;
  191. __be16 sport, dport;
  192. struct xfrm_encap_tmpl *encap = x->encap;
  193. struct ip_esp_hdr *esph = esp->esph;
  194. spin_lock_bh(&x->lock);
  195. sport = encap->encap_sport;
  196. dport = encap->encap_dport;
  197. encap_type = encap->encap_type;
  198. spin_unlock_bh(&x->lock);
  199. uh = (struct udphdr *)esph;
  200. uh->source = sport;
  201. uh->dest = dport;
  202. uh->len = htons(skb->len + esp->tailen
  203. - skb_transport_offset(skb));
  204. uh->check = 0;
  205. switch (encap_type) {
  206. default:
  207. case UDP_ENCAP_ESPINUDP:
  208. esph = (struct ip_esp_hdr *)(uh + 1);
  209. break;
  210. case UDP_ENCAP_ESPINUDP_NON_IKE:
  211. udpdata32 = (__be32 *)(uh + 1);
  212. udpdata32[0] = udpdata32[1] = 0;
  213. esph = (struct ip_esp_hdr *)(udpdata32 + 2);
  214. break;
  215. }
  216. *skb_mac_header(skb) = IPPROTO_UDP;
  217. esp->esph = esph;
  218. }
  219. int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
  220. {
  221. u8 *tail;
  222. u8 *vaddr;
  223. int nfrags;
  224. int esph_offset;
  225. struct page *page;
  226. struct sk_buff *trailer;
  227. int tailen = esp->tailen;
  228. /* this is non-NULL only with UDP Encapsulation */
  229. if (x->encap)
  230. esp_output_udp_encap(x, skb, esp);
  231. if (!skb_cloned(skb)) {
  232. if (tailen <= skb_tailroom(skb)) {
  233. nfrags = 1;
  234. trailer = skb;
  235. tail = skb_tail_pointer(trailer);
  236. goto skip_cow;
  237. } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
  238. && !skb_has_frag_list(skb)) {
  239. int allocsize;
  240. struct sock *sk = skb->sk;
  241. struct page_frag *pfrag = &x->xfrag;
  242. esp->inplace = false;
  243. allocsize = ALIGN(tailen, L1_CACHE_BYTES);
  244. spin_lock_bh(&x->lock);
  245. if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
  246. spin_unlock_bh(&x->lock);
  247. goto cow;
  248. }
  249. page = pfrag->page;
  250. get_page(page);
  251. vaddr = kmap_atomic(page);
  252. tail = vaddr + pfrag->offset;
  253. esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
  254. kunmap_atomic(vaddr);
  255. nfrags = skb_shinfo(skb)->nr_frags;
  256. __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
  257. tailen);
  258. skb_shinfo(skb)->nr_frags = ++nfrags;
  259. pfrag->offset = pfrag->offset + allocsize;
  260. spin_unlock_bh(&x->lock);
  261. nfrags++;
  262. skb->len += tailen;
  263. skb->data_len += tailen;
  264. skb->truesize += tailen;
  265. if (sk)
  266. refcount_add(tailen, &sk->sk_wmem_alloc);
  267. goto out;
  268. }
  269. }
  270. cow:
  271. esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
  272. nfrags = skb_cow_data(skb, tailen, &trailer);
  273. if (nfrags < 0)
  274. goto out;
  275. tail = skb_tail_pointer(trailer);
  276. esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
  277. skip_cow:
  278. esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
  279. pskb_put(skb, trailer, tailen);
  280. out:
  281. return nfrags;
  282. }
  283. EXPORT_SYMBOL_GPL(esp_output_head);
  284. int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
  285. {
  286. u8 *iv;
  287. int alen;
  288. void *tmp;
  289. int ivlen;
  290. int assoclen;
  291. int extralen;
  292. struct page *page;
  293. struct ip_esp_hdr *esph;
  294. struct crypto_aead *aead;
  295. struct aead_request *req;
  296. struct scatterlist *sg, *dsg;
  297. struct esp_output_extra *extra;
  298. int err = -ENOMEM;
  299. assoclen = sizeof(struct ip_esp_hdr);
  300. extralen = 0;
  301. if (x->props.flags & XFRM_STATE_ESN) {
  302. extralen += sizeof(*extra);
  303. assoclen += sizeof(__be32);
  304. }
  305. aead = x->data;
  306. alen = crypto_aead_authsize(aead);
  307. ivlen = crypto_aead_ivsize(aead);
  308. tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
  309. if (!tmp)
  310. goto error;
  311. extra = esp_tmp_extra(tmp);
  312. iv = esp_tmp_iv(aead, tmp, extralen);
  313. req = esp_tmp_req(aead, iv);
  314. sg = esp_req_sg(aead, req);
  315. if (esp->inplace)
  316. dsg = sg;
  317. else
  318. dsg = &sg[esp->nfrags];
  319. esph = esp_output_set_extra(skb, x, esp->esph, extra);
  320. esp->esph = esph;
  321. sg_init_table(sg, esp->nfrags);
  322. err = skb_to_sgvec(skb, sg,
  323. (unsigned char *)esph - skb->data,
  324. assoclen + ivlen + esp->clen + alen);
  325. if (unlikely(err < 0))
  326. goto error_free;
  327. if (!esp->inplace) {
  328. int allocsize;
  329. struct page_frag *pfrag = &x->xfrag;
  330. allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
  331. spin_lock_bh(&x->lock);
  332. if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
  333. spin_unlock_bh(&x->lock);
  334. goto error_free;
  335. }
  336. skb_shinfo(skb)->nr_frags = 1;
  337. page = pfrag->page;
  338. get_page(page);
  339. /* replace page frags in skb with new page */
  340. __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
  341. pfrag->offset = pfrag->offset + allocsize;
  342. spin_unlock_bh(&x->lock);
  343. sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
  344. err = skb_to_sgvec(skb, dsg,
  345. (unsigned char *)esph - skb->data,
  346. assoclen + ivlen + esp->clen + alen);
  347. if (unlikely(err < 0))
  348. goto error_free;
  349. }
  350. if ((x->props.flags & XFRM_STATE_ESN))
  351. aead_request_set_callback(req, 0, esp_output_done_esn, skb);
  352. else
  353. aead_request_set_callback(req, 0, esp_output_done, skb);
  354. aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
  355. aead_request_set_ad(req, assoclen);
  356. memset(iv, 0, ivlen);
  357. memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
  358. min(ivlen, 8));
  359. ESP_SKB_CB(skb)->tmp = tmp;
  360. err = crypto_aead_encrypt(req);
  361. switch (err) {
  362. case -EINPROGRESS:
  363. goto error;
  364. case -ENOSPC:
  365. err = NET_XMIT_DROP;
  366. break;
  367. case 0:
  368. if ((x->props.flags & XFRM_STATE_ESN))
  369. esp_output_restore_header(skb);
  370. }
  371. if (sg != dsg)
  372. esp_ssg_unref(x, tmp);
  373. error_free:
  374. kfree(tmp);
  375. error:
  376. return err;
  377. }
  378. EXPORT_SYMBOL_GPL(esp_output_tail);
  379. static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
  380. {
  381. int alen;
  382. int blksize;
  383. struct ip_esp_hdr *esph;
  384. struct crypto_aead *aead;
  385. struct esp_info esp;
  386. esp.inplace = true;
  387. esp.proto = *skb_mac_header(skb);
  388. *skb_mac_header(skb) = IPPROTO_ESP;
  389. /* skb is pure payload to encrypt */
  390. aead = x->data;
  391. alen = crypto_aead_authsize(aead);
  392. esp.tfclen = 0;
  393. if (x->tfcpad) {
  394. struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
  395. u32 padto;
  396. padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
  397. if (skb->len < padto)
  398. esp.tfclen = padto - skb->len;
  399. }
  400. blksize = ALIGN(crypto_aead_blocksize(aead), 4);
  401. esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
  402. esp.plen = esp.clen - skb->len - esp.tfclen;
  403. esp.tailen = esp.tfclen + esp.plen + alen;
  404. esp.esph = ip_esp_hdr(skb);
  405. esp.nfrags = esp_output_head(x, skb, &esp);
  406. if (esp.nfrags < 0)
  407. return esp.nfrags;
  408. esph = esp.esph;
  409. esph->spi = x->id.spi;
  410. esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
  411. esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
  412. ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
  413. skb_push(skb, -skb_network_offset(skb));
  414. return esp_output_tail(x, skb, &esp);
  415. }
  416. static inline int esp_remove_trailer(struct sk_buff *skb)
  417. {
  418. struct xfrm_state *x = xfrm_input_state(skb);
  419. struct xfrm_offload *xo = xfrm_offload(skb);
  420. struct crypto_aead *aead = x->data;
  421. int alen, hlen, elen;
  422. int padlen, trimlen;
  423. __wsum csumdiff;
  424. u8 nexthdr[2];
  425. int ret;
  426. alen = crypto_aead_authsize(aead);
  427. hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
  428. elen = skb->len - hlen;
  429. if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
  430. ret = xo->proto;
  431. goto out;
  432. }
  433. if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
  434. BUG();
  435. ret = -EINVAL;
  436. padlen = nexthdr[0];
  437. if (padlen + 2 + alen >= elen) {
  438. net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
  439. padlen + 2, elen - alen);
  440. goto out;
  441. }
  442. trimlen = alen + padlen + 2;
  443. if (skb->ip_summed == CHECKSUM_COMPLETE) {
  444. csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
  445. skb->csum = csum_block_sub(skb->csum, csumdiff,
  446. skb->len - trimlen);
  447. }
  448. pskb_trim(skb, skb->len - trimlen);
  449. ret = nexthdr[1];
  450. out:
  451. return ret;
  452. }
  453. int esp_input_done2(struct sk_buff *skb, int err)
  454. {
  455. const struct iphdr *iph;
  456. struct xfrm_state *x = xfrm_input_state(skb);
  457. struct xfrm_offload *xo = xfrm_offload(skb);
  458. struct crypto_aead *aead = x->data;
  459. int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
  460. int ihl;
  461. if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
  462. kfree(ESP_SKB_CB(skb)->tmp);
  463. if (unlikely(err))
  464. goto out;
  465. err = esp_remove_trailer(skb);
  466. if (unlikely(err < 0))
  467. goto out;
  468. iph = ip_hdr(skb);
  469. ihl = iph->ihl * 4;
  470. if (x->encap) {
  471. struct xfrm_encap_tmpl *encap = x->encap;
  472. struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
  473. /*
  474. * 1) if the NAT-T peer's IP or port changed then
  475. * advertize the change to the keying daemon.
  476. * This is an inbound SA, so just compare
  477. * SRC ports.
  478. */
  479. if (iph->saddr != x->props.saddr.a4 ||
  480. uh->source != encap->encap_sport) {
  481. xfrm_address_t ipaddr;
  482. ipaddr.a4 = iph->saddr;
  483. km_new_mapping(x, &ipaddr, uh->source);
  484. /* XXX: perhaps add an extra
  485. * policy check here, to see
  486. * if we should allow or
  487. * reject a packet from a
  488. * different source
  489. * address/port.
  490. */
  491. }
  492. /*
  493. * 2) ignore UDP/TCP checksums in case
  494. * of NAT-T in Transport Mode, or
  495. * perform other post-processing fixes
  496. * as per draft-ietf-ipsec-udp-encaps-06,
  497. * section 3.1.2
  498. */
  499. if (x->props.mode == XFRM_MODE_TRANSPORT)
  500. skb->ip_summed = CHECKSUM_UNNECESSARY;
  501. }
  502. skb_pull_rcsum(skb, hlen);
  503. if (x->props.mode == XFRM_MODE_TUNNEL)
  504. skb_reset_transport_header(skb);
  505. else
  506. skb_set_transport_header(skb, -ihl);
  507. /* RFC4303: Drop dummy packets without any error */
  508. if (err == IPPROTO_NONE)
  509. err = -EINVAL;
  510. out:
  511. return err;
  512. }
  513. EXPORT_SYMBOL_GPL(esp_input_done2);
  514. static void esp_input_done(struct crypto_async_request *base, int err)
  515. {
  516. struct sk_buff *skb = base->data;
  517. xfrm_input_resume(skb, esp_input_done2(skb, err));
  518. }
  519. static void esp_input_restore_header(struct sk_buff *skb)
  520. {
  521. esp_restore_header(skb, 0);
  522. __skb_pull(skb, 4);
  523. }
  524. static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
  525. {
  526. struct xfrm_state *x = xfrm_input_state(skb);
  527. struct ip_esp_hdr *esph;
  528. /* For ESN we move the header forward by 4 bytes to
  529. * accomodate the high bits. We will move it back after
  530. * decryption.
  531. */
  532. if ((x->props.flags & XFRM_STATE_ESN)) {
  533. esph = skb_push(skb, 4);
  534. *seqhi = esph->spi;
  535. esph->spi = esph->seq_no;
  536. esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
  537. }
  538. }
  539. static void esp_input_done_esn(struct crypto_async_request *base, int err)
  540. {
  541. struct sk_buff *skb = base->data;
  542. esp_input_restore_header(skb);
  543. esp_input_done(base, err);
  544. }
  545. /*
  546. * Note: detecting truncated vs. non-truncated authentication data is very
  547. * expensive, so we only support truncated data, which is the recommended
  548. * and common case.
  549. */
  550. static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
  551. {
  552. struct crypto_aead *aead = x->data;
  553. struct aead_request *req;
  554. struct sk_buff *trailer;
  555. int ivlen = crypto_aead_ivsize(aead);
  556. int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
  557. int nfrags;
  558. int assoclen;
  559. int seqhilen;
  560. __be32 *seqhi;
  561. void *tmp;
  562. u8 *iv;
  563. struct scatterlist *sg;
  564. int err = -EINVAL;
  565. if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen))
  566. goto out;
  567. if (elen <= 0)
  568. goto out;
  569. assoclen = sizeof(struct ip_esp_hdr);
  570. seqhilen = 0;
  571. if (x->props.flags & XFRM_STATE_ESN) {
  572. seqhilen += sizeof(__be32);
  573. assoclen += seqhilen;
  574. }
  575. if (!skb_cloned(skb)) {
  576. if (!skb_is_nonlinear(skb)) {
  577. nfrags = 1;
  578. goto skip_cow;
  579. } else if (!skb_has_frag_list(skb)) {
  580. nfrags = skb_shinfo(skb)->nr_frags;
  581. nfrags++;
  582. goto skip_cow;
  583. }
  584. }
  585. err = skb_cow_data(skb, 0, &trailer);
  586. if (err < 0)
  587. goto out;
  588. nfrags = err;
  589. skip_cow:
  590. err = -ENOMEM;
  591. tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
  592. if (!tmp)
  593. goto out;
  594. ESP_SKB_CB(skb)->tmp = tmp;
  595. seqhi = esp_tmp_extra(tmp);
  596. iv = esp_tmp_iv(aead, tmp, seqhilen);
  597. req = esp_tmp_req(aead, iv);
  598. sg = esp_req_sg(aead, req);
  599. esp_input_set_header(skb, seqhi);
  600. sg_init_table(sg, nfrags);
  601. err = skb_to_sgvec(skb, sg, 0, skb->len);
  602. if (unlikely(err < 0)) {
  603. kfree(tmp);
  604. goto out;
  605. }
  606. skb->ip_summed = CHECKSUM_NONE;
  607. if ((x->props.flags & XFRM_STATE_ESN))
  608. aead_request_set_callback(req, 0, esp_input_done_esn, skb);
  609. else
  610. aead_request_set_callback(req, 0, esp_input_done, skb);
  611. aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
  612. aead_request_set_ad(req, assoclen);
  613. err = crypto_aead_decrypt(req);
  614. if (err == -EINPROGRESS)
  615. goto out;
  616. if ((x->props.flags & XFRM_STATE_ESN))
  617. esp_input_restore_header(skb);
  618. err = esp_input_done2(skb, err);
  619. out:
  620. return err;
  621. }
  622. static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
  623. {
  624. struct crypto_aead *aead = x->data;
  625. u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
  626. unsigned int net_adj;
  627. switch (x->props.mode) {
  628. case XFRM_MODE_TRANSPORT:
  629. case XFRM_MODE_BEET:
  630. net_adj = sizeof(struct iphdr);
  631. break;
  632. case XFRM_MODE_TUNNEL:
  633. net_adj = 0;
  634. break;
  635. default:
  636. BUG();
  637. }
  638. return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
  639. net_adj) & ~(blksize - 1)) + net_adj - 2;
  640. }
  641. static int esp4_err(struct sk_buff *skb, u32 info)
  642. {
  643. struct net *net = dev_net(skb->dev);
  644. const struct iphdr *iph = (const struct iphdr *)skb->data;
  645. struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
  646. struct xfrm_state *x;
  647. switch (icmp_hdr(skb)->type) {
  648. case ICMP_DEST_UNREACH:
  649. if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
  650. return 0;
  651. case ICMP_REDIRECT:
  652. break;
  653. default:
  654. return 0;
  655. }
  656. x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
  657. esph->spi, IPPROTO_ESP, AF_INET);
  658. if (!x)
  659. return 0;
  660. if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
  661. ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ESP);
  662. else
  663. ipv4_redirect(skb, net, 0, IPPROTO_ESP);
  664. xfrm_state_put(x);
  665. return 0;
  666. }
  667. static void esp_destroy(struct xfrm_state *x)
  668. {
  669. struct crypto_aead *aead = x->data;
  670. if (!aead)
  671. return;
  672. crypto_free_aead(aead);
  673. }
  674. static int esp_init_aead(struct xfrm_state *x)
  675. {
  676. char aead_name[CRYPTO_MAX_ALG_NAME];
  677. struct crypto_aead *aead;
  678. int err;
  679. err = -ENAMETOOLONG;
  680. if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
  681. x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
  682. goto error;
  683. aead = crypto_alloc_aead(aead_name, 0, 0);
  684. err = PTR_ERR(aead);
  685. if (IS_ERR(aead))
  686. goto error;
  687. x->data = aead;
  688. err = crypto_aead_setkey(aead, x->aead->alg_key,
  689. (x->aead->alg_key_len + 7) / 8);
  690. if (err)
  691. goto error;
  692. err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
  693. if (err)
  694. goto error;
  695. error:
  696. return err;
  697. }
  698. static int esp_init_authenc(struct xfrm_state *x)
  699. {
  700. struct crypto_aead *aead;
  701. struct crypto_authenc_key_param *param;
  702. struct rtattr *rta;
  703. char *key;
  704. char *p;
  705. char authenc_name[CRYPTO_MAX_ALG_NAME];
  706. unsigned int keylen;
  707. int err;
  708. err = -EINVAL;
  709. if (!x->ealg)
  710. goto error;
  711. err = -ENAMETOOLONG;
  712. if ((x->props.flags & XFRM_STATE_ESN)) {
  713. if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
  714. "%s%sauthencesn(%s,%s)%s",
  715. x->geniv ?: "", x->geniv ? "(" : "",
  716. x->aalg ? x->aalg->alg_name : "digest_null",
  717. x->ealg->alg_name,
  718. x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
  719. goto error;
  720. } else {
  721. if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
  722. "%s%sauthenc(%s,%s)%s",
  723. x->geniv ?: "", x->geniv ? "(" : "",
  724. x->aalg ? x->aalg->alg_name : "digest_null",
  725. x->ealg->alg_name,
  726. x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
  727. goto error;
  728. }
  729. aead = crypto_alloc_aead(authenc_name, 0, 0);
  730. err = PTR_ERR(aead);
  731. if (IS_ERR(aead))
  732. goto error;
  733. x->data = aead;
  734. keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
  735. (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
  736. err = -ENOMEM;
  737. key = kmalloc(keylen, GFP_KERNEL);
  738. if (!key)
  739. goto error;
  740. p = key;
  741. rta = (void *)p;
  742. rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
  743. rta->rta_len = RTA_LENGTH(sizeof(*param));
  744. param = RTA_DATA(rta);
  745. p += RTA_SPACE(sizeof(*param));
  746. if (x->aalg) {
  747. struct xfrm_algo_desc *aalg_desc;
  748. memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
  749. p += (x->aalg->alg_key_len + 7) / 8;
  750. aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
  751. BUG_ON(!aalg_desc);
  752. err = -EINVAL;
  753. if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
  754. crypto_aead_authsize(aead)) {
  755. pr_info("ESP: %s digestsize %u != %hu\n",
  756. x->aalg->alg_name,
  757. crypto_aead_authsize(aead),
  758. aalg_desc->uinfo.auth.icv_fullbits / 8);
  759. goto free_key;
  760. }
  761. err = crypto_aead_setauthsize(
  762. aead, x->aalg->alg_trunc_len / 8);
  763. if (err)
  764. goto free_key;
  765. }
  766. param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
  767. memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
  768. err = crypto_aead_setkey(aead, key, keylen);
  769. free_key:
  770. kfree(key);
  771. error:
  772. return err;
  773. }
  774. static int esp_init_state(struct xfrm_state *x)
  775. {
  776. struct crypto_aead *aead;
  777. u32 align;
  778. int err;
  779. x->data = NULL;
  780. if (x->aead)
  781. err = esp_init_aead(x);
  782. else
  783. err = esp_init_authenc(x);
  784. if (err)
  785. goto error;
  786. aead = x->data;
  787. x->props.header_len = sizeof(struct ip_esp_hdr) +
  788. crypto_aead_ivsize(aead);
  789. if (x->props.mode == XFRM_MODE_TUNNEL)
  790. x->props.header_len += sizeof(struct iphdr);
  791. else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
  792. x->props.header_len += IPV4_BEET_PHMAXLEN;
  793. if (x->encap) {
  794. struct xfrm_encap_tmpl *encap = x->encap;
  795. switch (encap->encap_type) {
  796. default:
  797. err = -EINVAL;
  798. goto error;
  799. case UDP_ENCAP_ESPINUDP:
  800. x->props.header_len += sizeof(struct udphdr);
  801. break;
  802. case UDP_ENCAP_ESPINUDP_NON_IKE:
  803. x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
  804. break;
  805. }
  806. }
  807. align = ALIGN(crypto_aead_blocksize(aead), 4);
  808. x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
  809. error:
  810. return err;
  811. }
  812. static int esp4_rcv_cb(struct sk_buff *skb, int err)
  813. {
  814. return 0;
  815. }
  816. static const struct xfrm_type esp_type =
  817. {
  818. .description = "ESP4",
  819. .owner = THIS_MODULE,
  820. .proto = IPPROTO_ESP,
  821. .flags = XFRM_TYPE_REPLAY_PROT,
  822. .init_state = esp_init_state,
  823. .destructor = esp_destroy,
  824. .get_mtu = esp4_get_mtu,
  825. .input = esp_input,
  826. .output = esp_output,
  827. };
  828. static struct xfrm4_protocol esp4_protocol = {
  829. .handler = xfrm4_rcv,
  830. .input_handler = xfrm_input,
  831. .cb_handler = esp4_rcv_cb,
  832. .err_handler = esp4_err,
  833. .priority = 0,
  834. };
  835. static int __init esp4_init(void)
  836. {
  837. if (xfrm_register_type(&esp_type, AF_INET) < 0) {
  838. pr_info("%s: can't add xfrm type\n", __func__);
  839. return -EAGAIN;
  840. }
  841. if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) {
  842. pr_info("%s: can't add protocol\n", __func__);
  843. xfrm_unregister_type(&esp_type, AF_INET);
  844. return -EAGAIN;
  845. }
  846. return 0;
  847. }
  848. static void __exit esp4_fini(void)
  849. {
  850. if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0)
  851. pr_info("%s: can't remove protocol\n", __func__);
  852. if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
  853. pr_info("%s: can't remove xfrm type\n", __func__);
  854. }
  855. module_init(esp4_init);
  856. module_exit(esp4_fini);
  857. MODULE_LICENSE("GPL");
  858. MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);