crypto.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/err.h>
  3. #include <linux/scatterlist.h>
  4. #include <linux/slab.h>
  5. #include <crypto/aes.h>
  6. #include <crypto/skcipher.h>
  7. #include <linux/key-type.h>
  8. #include <keys/ceph-type.h>
  9. #include <keys/user-type.h>
  10. #include <linux/ceph/decode.h>
  11. #include "crypto.h"
  12. int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
  13. const struct ceph_crypto_key *src)
  14. {
  15. memcpy(dst, src, sizeof(struct ceph_crypto_key));
  16. dst->key = kmemdup(src->key, src->len, GFP_NOFS);
  17. if (!dst->key)
  18. return -ENOMEM;
  19. return 0;
  20. }
  21. int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
  22. {
  23. if (*p + sizeof(u16) + sizeof(key->created) +
  24. sizeof(u16) + key->len > end)
  25. return -ERANGE;
  26. ceph_encode_16(p, key->type);
  27. ceph_encode_copy(p, &key->created, sizeof(key->created));
  28. ceph_encode_16(p, key->len);
  29. ceph_encode_copy(p, key->key, key->len);
  30. return 0;
  31. }
  32. int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
  33. {
  34. ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
  35. key->type = ceph_decode_16(p);
  36. ceph_decode_copy(p, &key->created, sizeof(key->created));
  37. key->len = ceph_decode_16(p);
  38. ceph_decode_need(p, end, key->len, bad);
  39. key->key = kmalloc(key->len, GFP_NOFS);
  40. if (!key->key)
  41. return -ENOMEM;
  42. ceph_decode_copy(p, key->key, key->len);
  43. return 0;
  44. bad:
  45. dout("failed to decode crypto key\n");
  46. return -EINVAL;
  47. }
  48. int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
  49. {
  50. int inlen = strlen(inkey);
  51. int blen = inlen * 3 / 4;
  52. void *buf, *p;
  53. int ret;
  54. dout("crypto_key_unarmor %s\n", inkey);
  55. buf = kmalloc(blen, GFP_NOFS);
  56. if (!buf)
  57. return -ENOMEM;
  58. blen = ceph_unarmor(buf, inkey, inkey+inlen);
  59. if (blen < 0) {
  60. kfree(buf);
  61. return blen;
  62. }
  63. p = buf;
  64. ret = ceph_crypto_key_decode(key, &p, p + blen);
  65. kfree(buf);
  66. if (ret)
  67. return ret;
  68. dout("crypto_key_unarmor key %p type %d len %d\n", key,
  69. key->type, key->len);
  70. return 0;
  71. }
  72. static struct crypto_skcipher *ceph_crypto_alloc_cipher(void)
  73. {
  74. return crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
  75. }
  76. static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
  77. /*
  78. * Should be used for buffers allocated with ceph_kvmalloc().
  79. * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
  80. * in-buffer (msg front).
  81. *
  82. * Dispose of @sgt with teardown_sgtable().
  83. *
  84. * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
  85. * in cases where a single sg is sufficient. No attempt to reduce the
  86. * number of sgs by squeezing physically contiguous pages together is
  87. * made though, for simplicity.
  88. */
  89. static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
  90. const void *buf, unsigned int buf_len)
  91. {
  92. struct scatterlist *sg;
  93. const bool is_vmalloc = is_vmalloc_addr(buf);
  94. unsigned int off = offset_in_page(buf);
  95. unsigned int chunk_cnt = 1;
  96. unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
  97. int i;
  98. int ret;
  99. if (buf_len == 0) {
  100. memset(sgt, 0, sizeof(*sgt));
  101. return -EINVAL;
  102. }
  103. if (is_vmalloc) {
  104. chunk_cnt = chunk_len >> PAGE_SHIFT;
  105. chunk_len = PAGE_SIZE;
  106. }
  107. if (chunk_cnt > 1) {
  108. ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
  109. if (ret)
  110. return ret;
  111. } else {
  112. WARN_ON(chunk_cnt != 1);
  113. sg_init_table(prealloc_sg, 1);
  114. sgt->sgl = prealloc_sg;
  115. sgt->nents = sgt->orig_nents = 1;
  116. }
  117. for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
  118. struct page *page;
  119. unsigned int len = min(chunk_len - off, buf_len);
  120. if (is_vmalloc)
  121. page = vmalloc_to_page(buf);
  122. else
  123. page = virt_to_page(buf);
  124. sg_set_page(sg, page, len, off);
  125. off = 0;
  126. buf += len;
  127. buf_len -= len;
  128. }
  129. WARN_ON(buf_len != 0);
  130. return 0;
  131. }
  132. static void teardown_sgtable(struct sg_table *sgt)
  133. {
  134. if (sgt->orig_nents > 1)
  135. sg_free_table(sgt);
  136. }
  137. static int ceph_aes_encrypt(const void *key, int key_len,
  138. void *dst, size_t *dst_len,
  139. const void *src, size_t src_len)
  140. {
  141. struct scatterlist sg_in[2], prealloc_sg;
  142. struct sg_table sg_out;
  143. struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
  144. SKCIPHER_REQUEST_ON_STACK(req, tfm);
  145. int ret;
  146. char iv[AES_BLOCK_SIZE];
  147. size_t zero_padding = (0x10 - (src_len & 0x0f));
  148. char pad[16];
  149. if (IS_ERR(tfm))
  150. return PTR_ERR(tfm);
  151. memset(pad, zero_padding, zero_padding);
  152. *dst_len = src_len + zero_padding;
  153. sg_init_table(sg_in, 2);
  154. sg_set_buf(&sg_in[0], src, src_len);
  155. sg_set_buf(&sg_in[1], pad, zero_padding);
  156. ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
  157. if (ret)
  158. goto out_tfm;
  159. crypto_skcipher_setkey((void *)tfm, key, key_len);
  160. memcpy(iv, aes_iv, AES_BLOCK_SIZE);
  161. skcipher_request_set_tfm(req, tfm);
  162. skcipher_request_set_callback(req, 0, NULL, NULL);
  163. skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
  164. src_len + zero_padding, iv);
  165. /*
  166. print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
  167. key, key_len, 1);
  168. print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1,
  169. src, src_len, 1);
  170. print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
  171. pad, zero_padding, 1);
  172. */
  173. ret = crypto_skcipher_encrypt(req);
  174. skcipher_request_zero(req);
  175. if (ret < 0) {
  176. pr_err("ceph_aes_crypt failed %d\n", ret);
  177. goto out_sg;
  178. }
  179. /*
  180. print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
  181. dst, *dst_len, 1);
  182. */
  183. out_sg:
  184. teardown_sgtable(&sg_out);
  185. out_tfm:
  186. crypto_free_skcipher(tfm);
  187. return ret;
  188. }
  189. static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
  190. size_t *dst_len,
  191. const void *src1, size_t src1_len,
  192. const void *src2, size_t src2_len)
  193. {
  194. struct scatterlist sg_in[3], prealloc_sg;
  195. struct sg_table sg_out;
  196. struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
  197. SKCIPHER_REQUEST_ON_STACK(req, tfm);
  198. int ret;
  199. char iv[AES_BLOCK_SIZE];
  200. size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f));
  201. char pad[16];
  202. if (IS_ERR(tfm))
  203. return PTR_ERR(tfm);
  204. memset(pad, zero_padding, zero_padding);
  205. *dst_len = src1_len + src2_len + zero_padding;
  206. sg_init_table(sg_in, 3);
  207. sg_set_buf(&sg_in[0], src1, src1_len);
  208. sg_set_buf(&sg_in[1], src2, src2_len);
  209. sg_set_buf(&sg_in[2], pad, zero_padding);
  210. ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
  211. if (ret)
  212. goto out_tfm;
  213. crypto_skcipher_setkey((void *)tfm, key, key_len);
  214. memcpy(iv, aes_iv, AES_BLOCK_SIZE);
  215. skcipher_request_set_tfm(req, tfm);
  216. skcipher_request_set_callback(req, 0, NULL, NULL);
  217. skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
  218. src1_len + src2_len + zero_padding, iv);
  219. /*
  220. print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
  221. key, key_len, 1);
  222. print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1,
  223. src1, src1_len, 1);
  224. print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1,
  225. src2, src2_len, 1);
  226. print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
  227. pad, zero_padding, 1);
  228. */
  229. ret = crypto_skcipher_encrypt(req);
  230. skcipher_request_zero(req);
  231. if (ret < 0) {
  232. pr_err("ceph_aes_crypt2 failed %d\n", ret);
  233. goto out_sg;
  234. }
  235. /*
  236. print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
  237. dst, *dst_len, 1);
  238. */
  239. out_sg:
  240. teardown_sgtable(&sg_out);
  241. out_tfm:
  242. crypto_free_skcipher(tfm);
  243. return ret;
  244. }
  245. static int ceph_aes_decrypt(const void *key, int key_len,
  246. void *dst, size_t *dst_len,
  247. const void *src, size_t src_len)
  248. {
  249. struct sg_table sg_in;
  250. struct scatterlist sg_out[2], prealloc_sg;
  251. struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
  252. SKCIPHER_REQUEST_ON_STACK(req, tfm);
  253. char pad[16];
  254. char iv[AES_BLOCK_SIZE];
  255. int ret;
  256. int last_byte;
  257. if (IS_ERR(tfm))
  258. return PTR_ERR(tfm);
  259. sg_init_table(sg_out, 2);
  260. sg_set_buf(&sg_out[0], dst, *dst_len);
  261. sg_set_buf(&sg_out[1], pad, sizeof(pad));
  262. ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
  263. if (ret)
  264. goto out_tfm;
  265. crypto_skcipher_setkey((void *)tfm, key, key_len);
  266. memcpy(iv, aes_iv, AES_BLOCK_SIZE);
  267. skcipher_request_set_tfm(req, tfm);
  268. skcipher_request_set_callback(req, 0, NULL, NULL);
  269. skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
  270. src_len, iv);
  271. /*
  272. print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
  273. key, key_len, 1);
  274. print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
  275. src, src_len, 1);
  276. */
  277. ret = crypto_skcipher_decrypt(req);
  278. skcipher_request_zero(req);
  279. if (ret < 0) {
  280. pr_err("ceph_aes_decrypt failed %d\n", ret);
  281. goto out_sg;
  282. }
  283. if (src_len <= *dst_len)
  284. last_byte = ((char *)dst)[src_len - 1];
  285. else
  286. last_byte = pad[src_len - *dst_len - 1];
  287. if (last_byte <= 16 && src_len >= last_byte) {
  288. *dst_len = src_len - last_byte;
  289. } else {
  290. pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
  291. last_byte, (int)src_len);
  292. return -EPERM; /* bad padding */
  293. }
  294. /*
  295. print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
  296. dst, *dst_len, 1);
  297. */
  298. out_sg:
  299. teardown_sgtable(&sg_in);
  300. out_tfm:
  301. crypto_free_skcipher(tfm);
  302. return ret;
  303. }
  304. static int ceph_aes_decrypt2(const void *key, int key_len,
  305. void *dst1, size_t *dst1_len,
  306. void *dst2, size_t *dst2_len,
  307. const void *src, size_t src_len)
  308. {
  309. struct sg_table sg_in;
  310. struct scatterlist sg_out[3], prealloc_sg;
  311. struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
  312. SKCIPHER_REQUEST_ON_STACK(req, tfm);
  313. char pad[16];
  314. char iv[AES_BLOCK_SIZE];
  315. int ret;
  316. int last_byte;
  317. if (IS_ERR(tfm))
  318. return PTR_ERR(tfm);
  319. sg_init_table(sg_out, 3);
  320. sg_set_buf(&sg_out[0], dst1, *dst1_len);
  321. sg_set_buf(&sg_out[1], dst2, *dst2_len);
  322. sg_set_buf(&sg_out[2], pad, sizeof(pad));
  323. ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
  324. if (ret)
  325. goto out_tfm;
  326. crypto_skcipher_setkey((void *)tfm, key, key_len);
  327. memcpy(iv, aes_iv, AES_BLOCK_SIZE);
  328. skcipher_request_set_tfm(req, tfm);
  329. skcipher_request_set_callback(req, 0, NULL, NULL);
  330. skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
  331. src_len, iv);
  332. /*
  333. print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
  334. key, key_len, 1);
  335. print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
  336. src, src_len, 1);
  337. */
  338. ret = crypto_skcipher_decrypt(req);
  339. skcipher_request_zero(req);
  340. if (ret < 0) {
  341. pr_err("ceph_aes_decrypt failed %d\n", ret);
  342. goto out_sg;
  343. }
  344. if (src_len <= *dst1_len)
  345. last_byte = ((char *)dst1)[src_len - 1];
  346. else if (src_len <= *dst1_len + *dst2_len)
  347. last_byte = ((char *)dst2)[src_len - *dst1_len - 1];
  348. else
  349. last_byte = pad[src_len - *dst1_len - *dst2_len - 1];
  350. if (last_byte <= 16 && src_len >= last_byte) {
  351. src_len -= last_byte;
  352. } else {
  353. pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
  354. last_byte, (int)src_len);
  355. return -EPERM; /* bad padding */
  356. }
  357. if (src_len < *dst1_len) {
  358. *dst1_len = src_len;
  359. *dst2_len = 0;
  360. } else {
  361. *dst2_len = src_len - *dst1_len;
  362. }
  363. /*
  364. print_hex_dump(KERN_ERR, "dec out1: ", DUMP_PREFIX_NONE, 16, 1,
  365. dst1, *dst1_len, 1);
  366. print_hex_dump(KERN_ERR, "dec out2: ", DUMP_PREFIX_NONE, 16, 1,
  367. dst2, *dst2_len, 1);
  368. */
  369. out_sg:
  370. teardown_sgtable(&sg_in);
  371. out_tfm:
  372. crypto_free_skcipher(tfm);
  373. return ret;
  374. }
  375. int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
  376. const void *src, size_t src_len)
  377. {
  378. switch (secret->type) {
  379. case CEPH_CRYPTO_NONE:
  380. if (*dst_len < src_len)
  381. return -ERANGE;
  382. memcpy(dst, src, src_len);
  383. *dst_len = src_len;
  384. return 0;
  385. case CEPH_CRYPTO_AES:
  386. return ceph_aes_decrypt(secret->key, secret->len, dst,
  387. dst_len, src, src_len);
  388. default:
  389. return -EINVAL;
  390. }
  391. }
  392. int ceph_decrypt2(struct ceph_crypto_key *secret,
  393. void *dst1, size_t *dst1_len,
  394. void *dst2, size_t *dst2_len,
  395. const void *src, size_t src_len)
  396. {
  397. size_t t;
  398. switch (secret->type) {
  399. case CEPH_CRYPTO_NONE:
  400. if (*dst1_len + *dst2_len < src_len)
  401. return -ERANGE;
  402. t = min(*dst1_len, src_len);
  403. memcpy(dst1, src, t);
  404. *dst1_len = t;
  405. src += t;
  406. src_len -= t;
  407. if (src_len) {
  408. t = min(*dst2_len, src_len);
  409. memcpy(dst2, src, t);
  410. *dst2_len = t;
  411. }
  412. return 0;
  413. case CEPH_CRYPTO_AES:
  414. return ceph_aes_decrypt2(secret->key, secret->len,
  415. dst1, dst1_len, dst2, dst2_len,
  416. src, src_len);
  417. default:
  418. return -EINVAL;
  419. }
  420. }
  421. int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
  422. const void *src, size_t src_len)
  423. {
  424. switch (secret->type) {
  425. case CEPH_CRYPTO_NONE:
  426. if (*dst_len < src_len)
  427. return -ERANGE;
  428. memcpy(dst, src, src_len);
  429. *dst_len = src_len;
  430. return 0;
  431. case CEPH_CRYPTO_AES:
  432. return ceph_aes_encrypt(secret->key, secret->len, dst,
  433. dst_len, src, src_len);
  434. default:
  435. return -EINVAL;
  436. }
  437. }
  438. int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
  439. const void *src1, size_t src1_len,
  440. const void *src2, size_t src2_len)
  441. {
  442. switch (secret->type) {
  443. case CEPH_CRYPTO_NONE:
  444. if (*dst_len < src1_len + src2_len)
  445. return -ERANGE;
  446. memcpy(dst, src1, src1_len);
  447. memcpy(dst + src1_len, src2, src2_len);
  448. *dst_len = src1_len + src2_len;
  449. return 0;
  450. case CEPH_CRYPTO_AES:
  451. return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len,
  452. src1, src1_len, src2, src2_len);
  453. default:
  454. return -EINVAL;
  455. }
  456. }
  457. static int ceph_key_preparse(struct key_preparsed_payload *prep)
  458. {
  459. struct ceph_crypto_key *ckey;
  460. size_t datalen = prep->datalen;
  461. int ret;
  462. void *p;
  463. ret = -EINVAL;
  464. if (datalen <= 0 || datalen > 32767 || !prep->data)
  465. goto err;
  466. ret = -ENOMEM;
  467. ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
  468. if (!ckey)
  469. goto err;
  470. /* TODO ceph_crypto_key_decode should really take const input */
  471. p = (void *)prep->data;
  472. ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
  473. if (ret < 0)
  474. goto err_ckey;
  475. prep->payload.data[0] = ckey;
  476. prep->quotalen = datalen;
  477. return 0;
  478. err_ckey:
  479. kfree(ckey);
  480. err:
  481. return ret;
  482. }
  483. static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
  484. {
  485. struct ceph_crypto_key *ckey = prep->payload.data[0];
  486. ceph_crypto_key_destroy(ckey);
  487. kfree(ckey);
  488. }
  489. static void ceph_key_destroy(struct key *key)
  490. {
  491. struct ceph_crypto_key *ckey = key->payload.data[0];
  492. ceph_crypto_key_destroy(ckey);
  493. kfree(ckey);
  494. }
  495. struct key_type key_type_ceph = {
  496. .name = "ceph",
  497. .preparse = ceph_key_preparse,
  498. .free_preparse = ceph_key_free_preparse,
  499. .instantiate = generic_key_instantiate,
  500. .destroy = ceph_key_destroy,
  501. };
  502. int ceph_crypto_init(void) {
  503. return register_key_type(&key_type_ceph);
  504. }
  505. void ceph_crypto_shutdown(void) {
  506. unregister_key_type(&key_type_ceph);
  507. }