crypto.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/err.h>
  3. #include <linux/scatterlist.h>
  4. #include <linux/slab.h>
  5. #include <crypto/hash.h>
  6. #include <linux/key-type.h>
  7. #include <keys/ceph-type.h>
  8. #include <keys/user-type.h>
  9. #include <linux/ceph/decode.h>
  10. #include "crypto.h"
  11. int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
  12. const struct ceph_crypto_key *src)
  13. {
  14. memcpy(dst, src, sizeof(struct ceph_crypto_key));
  15. dst->key = kmemdup(src->key, src->len, GFP_NOFS);
  16. if (!dst->key)
  17. return -ENOMEM;
  18. return 0;
  19. }
  20. int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
  21. {
  22. if (*p + sizeof(u16) + sizeof(key->created) +
  23. sizeof(u16) + key->len > end)
  24. return -ERANGE;
  25. ceph_encode_16(p, key->type);
  26. ceph_encode_copy(p, &key->created, sizeof(key->created));
  27. ceph_encode_16(p, key->len);
  28. ceph_encode_copy(p, key->key, key->len);
  29. return 0;
  30. }
  31. int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
  32. {
  33. ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
  34. key->type = ceph_decode_16(p);
  35. ceph_decode_copy(p, &key->created, sizeof(key->created));
  36. key->len = ceph_decode_16(p);
  37. ceph_decode_need(p, end, key->len, bad);
  38. key->key = kmalloc(key->len, GFP_NOFS);
  39. if (!key->key)
  40. return -ENOMEM;
  41. ceph_decode_copy(p, key->key, key->len);
  42. return 0;
  43. bad:
  44. dout("failed to decode crypto key\n");
  45. return -EINVAL;
  46. }
  47. int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
  48. {
  49. int inlen = strlen(inkey);
  50. int blen = inlen * 3 / 4;
  51. void *buf, *p;
  52. int ret;
  53. dout("crypto_key_unarmor %s\n", inkey);
  54. buf = kmalloc(blen, GFP_NOFS);
  55. if (!buf)
  56. return -ENOMEM;
  57. blen = ceph_unarmor(buf, inkey, inkey+inlen);
  58. if (blen < 0) {
  59. kfree(buf);
  60. return blen;
  61. }
  62. p = buf;
  63. ret = ceph_crypto_key_decode(key, &p, p + blen);
  64. kfree(buf);
  65. if (ret)
  66. return ret;
  67. dout("crypto_key_unarmor key %p type %d len %d\n", key,
  68. key->type, key->len);
  69. return 0;
  70. }
  71. #define AES_KEY_SIZE 16
  72. static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
  73. {
  74. return crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
  75. }
  76. static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
  77. /*
  78. * Should be used for buffers allocated with ceph_kvmalloc().
  79. * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
  80. * in-buffer (msg front).
  81. *
  82. * Dispose of @sgt with teardown_sgtable().
  83. *
  84. * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
  85. * in cases where a single sg is sufficient. No attempt to reduce the
  86. * number of sgs by squeezing physically contiguous pages together is
  87. * made though, for simplicity.
  88. */
  89. static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
  90. const void *buf, unsigned int buf_len)
  91. {
  92. struct scatterlist *sg;
  93. const bool is_vmalloc = is_vmalloc_addr(buf);
  94. unsigned int off = offset_in_page(buf);
  95. unsigned int chunk_cnt = 1;
  96. unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
  97. int i;
  98. int ret;
  99. if (buf_len == 0) {
  100. memset(sgt, 0, sizeof(*sgt));
  101. return -EINVAL;
  102. }
  103. if (is_vmalloc) {
  104. chunk_cnt = chunk_len >> PAGE_SHIFT;
  105. chunk_len = PAGE_SIZE;
  106. }
  107. if (chunk_cnt > 1) {
  108. ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
  109. if (ret)
  110. return ret;
  111. } else {
  112. WARN_ON(chunk_cnt != 1);
  113. sg_init_table(prealloc_sg, 1);
  114. sgt->sgl = prealloc_sg;
  115. sgt->nents = sgt->orig_nents = 1;
  116. }
  117. for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
  118. struct page *page;
  119. unsigned int len = min(chunk_len - off, buf_len);
  120. if (is_vmalloc)
  121. page = vmalloc_to_page(buf);
  122. else
  123. page = virt_to_page(buf);
  124. sg_set_page(sg, page, len, off);
  125. off = 0;
  126. buf += len;
  127. buf_len -= len;
  128. }
  129. WARN_ON(buf_len != 0);
  130. return 0;
  131. }
  132. static void teardown_sgtable(struct sg_table *sgt)
  133. {
  134. if (sgt->orig_nents > 1)
  135. sg_free_table(sgt);
  136. }
  137. static int ceph_aes_encrypt(const void *key, int key_len,
  138. void *dst, size_t *dst_len,
  139. const void *src, size_t src_len)
  140. {
  141. struct scatterlist sg_in[2], prealloc_sg;
  142. struct sg_table sg_out;
  143. struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
  144. struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
  145. int ret;
  146. void *iv;
  147. int ivsize;
  148. size_t zero_padding = (0x10 - (src_len & 0x0f));
  149. char pad[16];
  150. if (IS_ERR(tfm))
  151. return PTR_ERR(tfm);
  152. memset(pad, zero_padding, zero_padding);
  153. *dst_len = src_len + zero_padding;
  154. sg_init_table(sg_in, 2);
  155. sg_set_buf(&sg_in[0], src, src_len);
  156. sg_set_buf(&sg_in[1], pad, zero_padding);
  157. ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
  158. if (ret)
  159. goto out_tfm;
  160. crypto_blkcipher_setkey((void *)tfm, key, key_len);
  161. iv = crypto_blkcipher_crt(tfm)->iv;
  162. ivsize = crypto_blkcipher_ivsize(tfm);
  163. memcpy(iv, aes_iv, ivsize);
  164. /*
  165. print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
  166. key, key_len, 1);
  167. print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1,
  168. src, src_len, 1);
  169. print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
  170. pad, zero_padding, 1);
  171. */
  172. ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
  173. src_len + zero_padding);
  174. if (ret < 0) {
  175. pr_err("ceph_aes_crypt failed %d\n", ret);
  176. goto out_sg;
  177. }
  178. /*
  179. print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
  180. dst, *dst_len, 1);
  181. */
  182. out_sg:
  183. teardown_sgtable(&sg_out);
  184. out_tfm:
  185. crypto_free_blkcipher(tfm);
  186. return ret;
  187. }
  188. static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
  189. size_t *dst_len,
  190. const void *src1, size_t src1_len,
  191. const void *src2, size_t src2_len)
  192. {
  193. struct scatterlist sg_in[3], prealloc_sg;
  194. struct sg_table sg_out;
  195. struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
  196. struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
  197. int ret;
  198. void *iv;
  199. int ivsize;
  200. size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f));
  201. char pad[16];
  202. if (IS_ERR(tfm))
  203. return PTR_ERR(tfm);
  204. memset(pad, zero_padding, zero_padding);
  205. *dst_len = src1_len + src2_len + zero_padding;
  206. sg_init_table(sg_in, 3);
  207. sg_set_buf(&sg_in[0], src1, src1_len);
  208. sg_set_buf(&sg_in[1], src2, src2_len);
  209. sg_set_buf(&sg_in[2], pad, zero_padding);
  210. ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
  211. if (ret)
  212. goto out_tfm;
  213. crypto_blkcipher_setkey((void *)tfm, key, key_len);
  214. iv = crypto_blkcipher_crt(tfm)->iv;
  215. ivsize = crypto_blkcipher_ivsize(tfm);
  216. memcpy(iv, aes_iv, ivsize);
  217. /*
  218. print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
  219. key, key_len, 1);
  220. print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1,
  221. src1, src1_len, 1);
  222. print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1,
  223. src2, src2_len, 1);
  224. print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
  225. pad, zero_padding, 1);
  226. */
  227. ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
  228. src1_len + src2_len + zero_padding);
  229. if (ret < 0) {
  230. pr_err("ceph_aes_crypt2 failed %d\n", ret);
  231. goto out_sg;
  232. }
  233. /*
  234. print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
  235. dst, *dst_len, 1);
  236. */
  237. out_sg:
  238. teardown_sgtable(&sg_out);
  239. out_tfm:
  240. crypto_free_blkcipher(tfm);
  241. return ret;
  242. }
  243. static int ceph_aes_decrypt(const void *key, int key_len,
  244. void *dst, size_t *dst_len,
  245. const void *src, size_t src_len)
  246. {
  247. struct sg_table sg_in;
  248. struct scatterlist sg_out[2], prealloc_sg;
  249. struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
  250. struct blkcipher_desc desc = { .tfm = tfm };
  251. char pad[16];
  252. void *iv;
  253. int ivsize;
  254. int ret;
  255. int last_byte;
  256. if (IS_ERR(tfm))
  257. return PTR_ERR(tfm);
  258. sg_init_table(sg_out, 2);
  259. sg_set_buf(&sg_out[0], dst, *dst_len);
  260. sg_set_buf(&sg_out[1], pad, sizeof(pad));
  261. ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
  262. if (ret)
  263. goto out_tfm;
  264. crypto_blkcipher_setkey((void *)tfm, key, key_len);
  265. iv = crypto_blkcipher_crt(tfm)->iv;
  266. ivsize = crypto_blkcipher_ivsize(tfm);
  267. memcpy(iv, aes_iv, ivsize);
  268. /*
  269. print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
  270. key, key_len, 1);
  271. print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
  272. src, src_len, 1);
  273. */
  274. ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
  275. if (ret < 0) {
  276. pr_err("ceph_aes_decrypt failed %d\n", ret);
  277. goto out_sg;
  278. }
  279. if (src_len <= *dst_len)
  280. last_byte = ((char *)dst)[src_len - 1];
  281. else
  282. last_byte = pad[src_len - *dst_len - 1];
  283. if (last_byte <= 16 && src_len >= last_byte) {
  284. *dst_len = src_len - last_byte;
  285. } else {
  286. pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
  287. last_byte, (int)src_len);
  288. return -EPERM; /* bad padding */
  289. }
  290. /*
  291. print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
  292. dst, *dst_len, 1);
  293. */
  294. out_sg:
  295. teardown_sgtable(&sg_in);
  296. out_tfm:
  297. crypto_free_blkcipher(tfm);
  298. return ret;
  299. }
  300. static int ceph_aes_decrypt2(const void *key, int key_len,
  301. void *dst1, size_t *dst1_len,
  302. void *dst2, size_t *dst2_len,
  303. const void *src, size_t src_len)
  304. {
  305. struct sg_table sg_in;
  306. struct scatterlist sg_out[3], prealloc_sg;
  307. struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
  308. struct blkcipher_desc desc = { .tfm = tfm };
  309. char pad[16];
  310. void *iv;
  311. int ivsize;
  312. int ret;
  313. int last_byte;
  314. if (IS_ERR(tfm))
  315. return PTR_ERR(tfm);
  316. sg_init_table(sg_out, 3);
  317. sg_set_buf(&sg_out[0], dst1, *dst1_len);
  318. sg_set_buf(&sg_out[1], dst2, *dst2_len);
  319. sg_set_buf(&sg_out[2], pad, sizeof(pad));
  320. ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
  321. if (ret)
  322. goto out_tfm;
  323. crypto_blkcipher_setkey((void *)tfm, key, key_len);
  324. iv = crypto_blkcipher_crt(tfm)->iv;
  325. ivsize = crypto_blkcipher_ivsize(tfm);
  326. memcpy(iv, aes_iv, ivsize);
  327. /*
  328. print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
  329. key, key_len, 1);
  330. print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
  331. src, src_len, 1);
  332. */
  333. ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
  334. if (ret < 0) {
  335. pr_err("ceph_aes_decrypt failed %d\n", ret);
  336. goto out_sg;
  337. }
  338. if (src_len <= *dst1_len)
  339. last_byte = ((char *)dst1)[src_len - 1];
  340. else if (src_len <= *dst1_len + *dst2_len)
  341. last_byte = ((char *)dst2)[src_len - *dst1_len - 1];
  342. else
  343. last_byte = pad[src_len - *dst1_len - *dst2_len - 1];
  344. if (last_byte <= 16 && src_len >= last_byte) {
  345. src_len -= last_byte;
  346. } else {
  347. pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
  348. last_byte, (int)src_len);
  349. return -EPERM; /* bad padding */
  350. }
  351. if (src_len < *dst1_len) {
  352. *dst1_len = src_len;
  353. *dst2_len = 0;
  354. } else {
  355. *dst2_len = src_len - *dst1_len;
  356. }
  357. /*
  358. print_hex_dump(KERN_ERR, "dec out1: ", DUMP_PREFIX_NONE, 16, 1,
  359. dst1, *dst1_len, 1);
  360. print_hex_dump(KERN_ERR, "dec out2: ", DUMP_PREFIX_NONE, 16, 1,
  361. dst2, *dst2_len, 1);
  362. */
  363. out_sg:
  364. teardown_sgtable(&sg_in);
  365. out_tfm:
  366. crypto_free_blkcipher(tfm);
  367. return ret;
  368. }
  369. int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
  370. const void *src, size_t src_len)
  371. {
  372. switch (secret->type) {
  373. case CEPH_CRYPTO_NONE:
  374. if (*dst_len < src_len)
  375. return -ERANGE;
  376. memcpy(dst, src, src_len);
  377. *dst_len = src_len;
  378. return 0;
  379. case CEPH_CRYPTO_AES:
  380. return ceph_aes_decrypt(secret->key, secret->len, dst,
  381. dst_len, src, src_len);
  382. default:
  383. return -EINVAL;
  384. }
  385. }
  386. int ceph_decrypt2(struct ceph_crypto_key *secret,
  387. void *dst1, size_t *dst1_len,
  388. void *dst2, size_t *dst2_len,
  389. const void *src, size_t src_len)
  390. {
  391. size_t t;
  392. switch (secret->type) {
  393. case CEPH_CRYPTO_NONE:
  394. if (*dst1_len + *dst2_len < src_len)
  395. return -ERANGE;
  396. t = min(*dst1_len, src_len);
  397. memcpy(dst1, src, t);
  398. *dst1_len = t;
  399. src += t;
  400. src_len -= t;
  401. if (src_len) {
  402. t = min(*dst2_len, src_len);
  403. memcpy(dst2, src, t);
  404. *dst2_len = t;
  405. }
  406. return 0;
  407. case CEPH_CRYPTO_AES:
  408. return ceph_aes_decrypt2(secret->key, secret->len,
  409. dst1, dst1_len, dst2, dst2_len,
  410. src, src_len);
  411. default:
  412. return -EINVAL;
  413. }
  414. }
  415. int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
  416. const void *src, size_t src_len)
  417. {
  418. switch (secret->type) {
  419. case CEPH_CRYPTO_NONE:
  420. if (*dst_len < src_len)
  421. return -ERANGE;
  422. memcpy(dst, src, src_len);
  423. *dst_len = src_len;
  424. return 0;
  425. case CEPH_CRYPTO_AES:
  426. return ceph_aes_encrypt(secret->key, secret->len, dst,
  427. dst_len, src, src_len);
  428. default:
  429. return -EINVAL;
  430. }
  431. }
  432. int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
  433. const void *src1, size_t src1_len,
  434. const void *src2, size_t src2_len)
  435. {
  436. switch (secret->type) {
  437. case CEPH_CRYPTO_NONE:
  438. if (*dst_len < src1_len + src2_len)
  439. return -ERANGE;
  440. memcpy(dst, src1, src1_len);
  441. memcpy(dst + src1_len, src2, src2_len);
  442. *dst_len = src1_len + src2_len;
  443. return 0;
  444. case CEPH_CRYPTO_AES:
  445. return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len,
  446. src1, src1_len, src2, src2_len);
  447. default:
  448. return -EINVAL;
  449. }
  450. }
  451. static int ceph_key_preparse(struct key_preparsed_payload *prep)
  452. {
  453. struct ceph_crypto_key *ckey;
  454. size_t datalen = prep->datalen;
  455. int ret;
  456. void *p;
  457. ret = -EINVAL;
  458. if (datalen <= 0 || datalen > 32767 || !prep->data)
  459. goto err;
  460. ret = -ENOMEM;
  461. ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
  462. if (!ckey)
  463. goto err;
  464. /* TODO ceph_crypto_key_decode should really take const input */
  465. p = (void *)prep->data;
  466. ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
  467. if (ret < 0)
  468. goto err_ckey;
  469. prep->payload[0] = ckey;
  470. prep->quotalen = datalen;
  471. return 0;
  472. err_ckey:
  473. kfree(ckey);
  474. err:
  475. return ret;
  476. }
  477. static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
  478. {
  479. struct ceph_crypto_key *ckey = prep->payload[0];
  480. ceph_crypto_key_destroy(ckey);
  481. kfree(ckey);
  482. }
  483. static void ceph_key_destroy(struct key *key)
  484. {
  485. struct ceph_crypto_key *ckey = key->payload.data;
  486. ceph_crypto_key_destroy(ckey);
  487. kfree(ckey);
  488. }
  489. struct key_type key_type_ceph = {
  490. .name = "ceph",
  491. .preparse = ceph_key_preparse,
  492. .free_preparse = ceph_key_free_preparse,
  493. .instantiate = generic_key_instantiate,
  494. .destroy = ceph_key_destroy,
  495. };
  496. int ceph_crypto_init(void) {
  497. return register_key_type(&key_type_ceph);
  498. }
  499. void ceph_crypto_shutdown(void) {
  500. unregister_key_type(&key_type_ceph);
  501. }