gss_krb5_crypto.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064
  1. /*
  2. * linux/net/sunrpc/gss_krb5_crypto.c
  3. *
  4. * Copyright (c) 2000-2008 The Regents of the University of Michigan.
  5. * All rights reserved.
  6. *
  7. * Andy Adamson <andros@umich.edu>
  8. * Bruce Fields <bfields@umich.edu>
  9. */
  10. /*
  11. * Copyright (C) 1998 by the FundsXpress, INC.
  12. *
  13. * All rights reserved.
  14. *
  15. * Export of this software from the United States of America may require
  16. * a specific license from the United States Government. It is the
  17. * responsibility of any person or organization contemplating export to
  18. * obtain such a license before exporting.
  19. *
  20. * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
  21. * distribute this software and its documentation for any purpose and
  22. * without fee is hereby granted, provided that the above copyright
  23. * notice appear in all copies and that both that copyright notice and
  24. * this permission notice appear in supporting documentation, and that
  25. * the name of FundsXpress. not be used in advertising or publicity pertaining
  26. * to distribution of the software without specific, written prior
  27. * permission. FundsXpress makes no representations about the suitability of
  28. * this software for any purpose. It is provided "as is" without express
  29. * or implied warranty.
  30. *
  31. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
  32. * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
  33. * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  34. */
  35. #include <crypto/hash.h>
  36. #include <crypto/skcipher.h>
  37. #include <linux/err.h>
  38. #include <linux/types.h>
  39. #include <linux/mm.h>
  40. #include <linux/scatterlist.h>
  41. #include <linux/highmem.h>
  42. #include <linux/pagemap.h>
  43. #include <linux/random.h>
  44. #include <linux/sunrpc/gss_krb5.h>
  45. #include <linux/sunrpc/xdr.h>
  46. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  47. # define RPCDBG_FACILITY RPCDBG_AUTH
  48. #endif
  49. u32
  50. krb5_encrypt(
  51. struct crypto_skcipher *tfm,
  52. void * iv,
  53. void * in,
  54. void * out,
  55. int length)
  56. {
  57. u32 ret = -EINVAL;
  58. struct scatterlist sg[1];
  59. u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
  60. SKCIPHER_REQUEST_ON_STACK(req, tfm);
  61. if (length % crypto_skcipher_blocksize(tfm) != 0)
  62. goto out;
  63. if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
  64. dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
  65. crypto_skcipher_ivsize(tfm));
  66. goto out;
  67. }
  68. if (iv)
  69. memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm));
  70. memcpy(out, in, length);
  71. sg_init_one(sg, out, length);
  72. skcipher_request_set_callback(req, 0, NULL, NULL);
  73. skcipher_request_set_crypt(req, sg, sg, length, local_iv);
  74. ret = crypto_skcipher_encrypt(req);
  75. skcipher_request_zero(req);
  76. out:
  77. dprintk("RPC: krb5_encrypt returns %d\n", ret);
  78. return ret;
  79. }
  80. u32
  81. krb5_decrypt(
  82. struct crypto_skcipher *tfm,
  83. void * iv,
  84. void * in,
  85. void * out,
  86. int length)
  87. {
  88. u32 ret = -EINVAL;
  89. struct scatterlist sg[1];
  90. u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
  91. SKCIPHER_REQUEST_ON_STACK(req, tfm);
  92. if (length % crypto_skcipher_blocksize(tfm) != 0)
  93. goto out;
  94. if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
  95. dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
  96. crypto_skcipher_ivsize(tfm));
  97. goto out;
  98. }
  99. if (iv)
  100. memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm));
  101. memcpy(out, in, length);
  102. sg_init_one(sg, out, length);
  103. skcipher_request_set_callback(req, 0, NULL, NULL);
  104. skcipher_request_set_crypt(req, sg, sg, length, local_iv);
  105. ret = crypto_skcipher_decrypt(req);
  106. skcipher_request_zero(req);
  107. out:
  108. dprintk("RPC: gss_k5decrypt returns %d\n",ret);
  109. return ret;
  110. }
  111. static int
  112. checksummer(struct scatterlist *sg, void *data)
  113. {
  114. struct ahash_request *req = data;
  115. ahash_request_set_crypt(req, sg, NULL, sg->length);
  116. return crypto_ahash_update(req);
  117. }
  118. static int
  119. arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
  120. {
  121. unsigned int ms_usage;
  122. switch (usage) {
  123. case KG_USAGE_SIGN:
  124. ms_usage = 15;
  125. break;
  126. case KG_USAGE_SEAL:
  127. ms_usage = 13;
  128. break;
  129. default:
  130. return -EINVAL;
  131. }
  132. salt[0] = (ms_usage >> 0) & 0xff;
  133. salt[1] = (ms_usage >> 8) & 0xff;
  134. salt[2] = (ms_usage >> 16) & 0xff;
  135. salt[3] = (ms_usage >> 24) & 0xff;
  136. return 0;
  137. }
  138. static u32
  139. make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
  140. struct xdr_buf *body, int body_offset, u8 *cksumkey,
  141. unsigned int usage, struct xdr_netobj *cksumout)
  142. {
  143. struct scatterlist sg[1];
  144. int err;
  145. u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
  146. u8 rc4salt[4];
  147. struct crypto_ahash *md5;
  148. struct crypto_ahash *hmac_md5;
  149. struct ahash_request *req;
  150. if (cksumkey == NULL)
  151. return GSS_S_FAILURE;
  152. if (cksumout->len < kctx->gk5e->cksumlength) {
  153. dprintk("%s: checksum buffer length, %u, too small for %s\n",
  154. __func__, cksumout->len, kctx->gk5e->name);
  155. return GSS_S_FAILURE;
  156. }
  157. if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
  158. dprintk("%s: invalid usage value %u\n", __func__, usage);
  159. return GSS_S_FAILURE;
  160. }
  161. md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
  162. if (IS_ERR(md5))
  163. return GSS_S_FAILURE;
  164. hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0,
  165. CRYPTO_ALG_ASYNC);
  166. if (IS_ERR(hmac_md5)) {
  167. crypto_free_ahash(md5);
  168. return GSS_S_FAILURE;
  169. }
  170. req = ahash_request_alloc(md5, GFP_KERNEL);
  171. if (!req) {
  172. crypto_free_ahash(hmac_md5);
  173. crypto_free_ahash(md5);
  174. return GSS_S_FAILURE;
  175. }
  176. ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
  177. err = crypto_ahash_init(req);
  178. if (err)
  179. goto out;
  180. sg_init_one(sg, rc4salt, 4);
  181. ahash_request_set_crypt(req, sg, NULL, 4);
  182. err = crypto_ahash_update(req);
  183. if (err)
  184. goto out;
  185. sg_init_one(sg, header, hdrlen);
  186. ahash_request_set_crypt(req, sg, NULL, hdrlen);
  187. err = crypto_ahash_update(req);
  188. if (err)
  189. goto out;
  190. err = xdr_process_buf(body, body_offset, body->len - body_offset,
  191. checksummer, req);
  192. if (err)
  193. goto out;
  194. ahash_request_set_crypt(req, NULL, checksumdata, 0);
  195. err = crypto_ahash_final(req);
  196. if (err)
  197. goto out;
  198. ahash_request_free(req);
  199. req = ahash_request_alloc(hmac_md5, GFP_KERNEL);
  200. if (!req) {
  201. crypto_free_ahash(hmac_md5);
  202. crypto_free_ahash(md5);
  203. return GSS_S_FAILURE;
  204. }
  205. ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
  206. err = crypto_ahash_init(req);
  207. if (err)
  208. goto out;
  209. err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
  210. if (err)
  211. goto out;
  212. sg_init_one(sg, checksumdata, crypto_ahash_digestsize(md5));
  213. ahash_request_set_crypt(req, sg, checksumdata,
  214. crypto_ahash_digestsize(md5));
  215. err = crypto_ahash_digest(req);
  216. if (err)
  217. goto out;
  218. memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
  219. cksumout->len = kctx->gk5e->cksumlength;
  220. out:
  221. ahash_request_free(req);
  222. crypto_free_ahash(md5);
  223. crypto_free_ahash(hmac_md5);
  224. return err ? GSS_S_FAILURE : 0;
  225. }
  226. /*
  227. * checksum the plaintext data and hdrlen bytes of the token header
  228. * The checksum is performed over the first 8 bytes of the
  229. * gss token header and then over the data body
  230. */
  231. u32
  232. make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
  233. struct xdr_buf *body, int body_offset, u8 *cksumkey,
  234. unsigned int usage, struct xdr_netobj *cksumout)
  235. {
  236. struct crypto_ahash *tfm;
  237. struct ahash_request *req;
  238. struct scatterlist sg[1];
  239. int err;
  240. u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
  241. unsigned int checksumlen;
  242. if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
  243. return make_checksum_hmac_md5(kctx, header, hdrlen,
  244. body, body_offset,
  245. cksumkey, usage, cksumout);
  246. if (cksumout->len < kctx->gk5e->cksumlength) {
  247. dprintk("%s: checksum buffer length, %u, too small for %s\n",
  248. __func__, cksumout->len, kctx->gk5e->name);
  249. return GSS_S_FAILURE;
  250. }
  251. tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
  252. if (IS_ERR(tfm))
  253. return GSS_S_FAILURE;
  254. req = ahash_request_alloc(tfm, GFP_KERNEL);
  255. if (!req) {
  256. crypto_free_ahash(tfm);
  257. return GSS_S_FAILURE;
  258. }
  259. ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
  260. checksumlen = crypto_ahash_digestsize(tfm);
  261. if (cksumkey != NULL) {
  262. err = crypto_ahash_setkey(tfm, cksumkey,
  263. kctx->gk5e->keylength);
  264. if (err)
  265. goto out;
  266. }
  267. err = crypto_ahash_init(req);
  268. if (err)
  269. goto out;
  270. sg_init_one(sg, header, hdrlen);
  271. ahash_request_set_crypt(req, sg, NULL, hdrlen);
  272. err = crypto_ahash_update(req);
  273. if (err)
  274. goto out;
  275. err = xdr_process_buf(body, body_offset, body->len - body_offset,
  276. checksummer, req);
  277. if (err)
  278. goto out;
  279. ahash_request_set_crypt(req, NULL, checksumdata, 0);
  280. err = crypto_ahash_final(req);
  281. if (err)
  282. goto out;
  283. switch (kctx->gk5e->ctype) {
  284. case CKSUMTYPE_RSA_MD5:
  285. err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
  286. checksumdata, checksumlen);
  287. if (err)
  288. goto out;
  289. memcpy(cksumout->data,
  290. checksumdata + checksumlen - kctx->gk5e->cksumlength,
  291. kctx->gk5e->cksumlength);
  292. break;
  293. case CKSUMTYPE_HMAC_SHA1_DES3:
  294. memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
  295. break;
  296. default:
  297. BUG();
  298. break;
  299. }
  300. cksumout->len = kctx->gk5e->cksumlength;
  301. out:
  302. ahash_request_free(req);
  303. crypto_free_ahash(tfm);
  304. return err ? GSS_S_FAILURE : 0;
  305. }
  306. /*
  307. * checksum the plaintext data and hdrlen bytes of the token header
  308. * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
  309. * body then over the first 16 octets of the MIC token
  310. * Inclusion of the header data in the calculation of the
  311. * checksum is optional.
  312. */
  313. u32
  314. make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
  315. struct xdr_buf *body, int body_offset, u8 *cksumkey,
  316. unsigned int usage, struct xdr_netobj *cksumout)
  317. {
  318. struct crypto_ahash *tfm;
  319. struct ahash_request *req;
  320. struct scatterlist sg[1];
  321. int err;
  322. u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
  323. unsigned int checksumlen;
  324. if (kctx->gk5e->keyed_cksum == 0) {
  325. dprintk("%s: expected keyed hash for %s\n",
  326. __func__, kctx->gk5e->name);
  327. return GSS_S_FAILURE;
  328. }
  329. if (cksumkey == NULL) {
  330. dprintk("%s: no key supplied for %s\n",
  331. __func__, kctx->gk5e->name);
  332. return GSS_S_FAILURE;
  333. }
  334. tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
  335. if (IS_ERR(tfm))
  336. return GSS_S_FAILURE;
  337. checksumlen = crypto_ahash_digestsize(tfm);
  338. req = ahash_request_alloc(tfm, GFP_KERNEL);
  339. if (!req) {
  340. crypto_free_ahash(tfm);
  341. return GSS_S_FAILURE;
  342. }
  343. ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
  344. err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength);
  345. if (err)
  346. goto out;
  347. err = crypto_ahash_init(req);
  348. if (err)
  349. goto out;
  350. err = xdr_process_buf(body, body_offset, body->len - body_offset,
  351. checksummer, req);
  352. if (err)
  353. goto out;
  354. if (header != NULL) {
  355. sg_init_one(sg, header, hdrlen);
  356. ahash_request_set_crypt(req, sg, NULL, hdrlen);
  357. err = crypto_ahash_update(req);
  358. if (err)
  359. goto out;
  360. }
  361. ahash_request_set_crypt(req, NULL, checksumdata, 0);
  362. err = crypto_ahash_final(req);
  363. if (err)
  364. goto out;
  365. cksumout->len = kctx->gk5e->cksumlength;
  366. switch (kctx->gk5e->ctype) {
  367. case CKSUMTYPE_HMAC_SHA1_96_AES128:
  368. case CKSUMTYPE_HMAC_SHA1_96_AES256:
  369. /* note that this truncates the hash */
  370. memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
  371. break;
  372. default:
  373. BUG();
  374. break;
  375. }
  376. out:
  377. ahash_request_free(req);
  378. crypto_free_ahash(tfm);
  379. return err ? GSS_S_FAILURE : 0;
  380. }
  381. struct encryptor_desc {
  382. u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
  383. struct skcipher_request *req;
  384. int pos;
  385. struct xdr_buf *outbuf;
  386. struct page **pages;
  387. struct scatterlist infrags[4];
  388. struct scatterlist outfrags[4];
  389. int fragno;
  390. int fraglen;
  391. };
  392. static int
  393. encryptor(struct scatterlist *sg, void *data)
  394. {
  395. struct encryptor_desc *desc = data;
  396. struct xdr_buf *outbuf = desc->outbuf;
  397. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req);
  398. struct page *in_page;
  399. int thislen = desc->fraglen + sg->length;
  400. int fraglen, ret;
  401. int page_pos;
  402. /* Worst case is 4 fragments: head, end of page 1, start
  403. * of page 2, tail. Anything more is a bug. */
  404. BUG_ON(desc->fragno > 3);
  405. page_pos = desc->pos - outbuf->head[0].iov_len;
  406. if (page_pos >= 0 && page_pos < outbuf->page_len) {
  407. /* pages are not in place: */
  408. int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
  409. in_page = desc->pages[i];
  410. } else {
  411. in_page = sg_page(sg);
  412. }
  413. sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
  414. sg->offset);
  415. sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
  416. sg->offset);
  417. desc->fragno++;
  418. desc->fraglen += sg->length;
  419. desc->pos += sg->length;
  420. fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1);
  421. thislen -= fraglen;
  422. if (thislen == 0)
  423. return 0;
  424. sg_mark_end(&desc->infrags[desc->fragno - 1]);
  425. sg_mark_end(&desc->outfrags[desc->fragno - 1]);
  426. skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags,
  427. thislen, desc->iv);
  428. ret = crypto_skcipher_encrypt(desc->req);
  429. if (ret)
  430. return ret;
  431. sg_init_table(desc->infrags, 4);
  432. sg_init_table(desc->outfrags, 4);
  433. if (fraglen) {
  434. sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
  435. sg->offset + sg->length - fraglen);
  436. desc->infrags[0] = desc->outfrags[0];
  437. sg_assign_page(&desc->infrags[0], in_page);
  438. desc->fragno = 1;
  439. desc->fraglen = fraglen;
  440. } else {
  441. desc->fragno = 0;
  442. desc->fraglen = 0;
  443. }
  444. return 0;
  445. }
  446. int
  447. gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf,
  448. int offset, struct page **pages)
  449. {
  450. int ret;
  451. struct encryptor_desc desc;
  452. SKCIPHER_REQUEST_ON_STACK(req, tfm);
  453. BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0);
  454. skcipher_request_set_tfm(req, tfm);
  455. skcipher_request_set_callback(req, 0, NULL, NULL);
  456. memset(desc.iv, 0, sizeof(desc.iv));
  457. desc.req = req;
  458. desc.pos = offset;
  459. desc.outbuf = buf;
  460. desc.pages = pages;
  461. desc.fragno = 0;
  462. desc.fraglen = 0;
  463. sg_init_table(desc.infrags, 4);
  464. sg_init_table(desc.outfrags, 4);
  465. ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
  466. skcipher_request_zero(req);
  467. return ret;
  468. }
  469. struct decryptor_desc {
  470. u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
  471. struct skcipher_request *req;
  472. struct scatterlist frags[4];
  473. int fragno;
  474. int fraglen;
  475. };
  476. static int
  477. decryptor(struct scatterlist *sg, void *data)
  478. {
  479. struct decryptor_desc *desc = data;
  480. int thislen = desc->fraglen + sg->length;
  481. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req);
  482. int fraglen, ret;
  483. /* Worst case is 4 fragments: head, end of page 1, start
  484. * of page 2, tail. Anything more is a bug. */
  485. BUG_ON(desc->fragno > 3);
  486. sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
  487. sg->offset);
  488. desc->fragno++;
  489. desc->fraglen += sg->length;
  490. fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1);
  491. thislen -= fraglen;
  492. if (thislen == 0)
  493. return 0;
  494. sg_mark_end(&desc->frags[desc->fragno - 1]);
  495. skcipher_request_set_crypt(desc->req, desc->frags, desc->frags,
  496. thislen, desc->iv);
  497. ret = crypto_skcipher_decrypt(desc->req);
  498. if (ret)
  499. return ret;
  500. sg_init_table(desc->frags, 4);
  501. if (fraglen) {
  502. sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
  503. sg->offset + sg->length - fraglen);
  504. desc->fragno = 1;
  505. desc->fraglen = fraglen;
  506. } else {
  507. desc->fragno = 0;
  508. desc->fraglen = 0;
  509. }
  510. return 0;
  511. }
  512. int
  513. gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf,
  514. int offset)
  515. {
  516. int ret;
  517. struct decryptor_desc desc;
  518. SKCIPHER_REQUEST_ON_STACK(req, tfm);
  519. /* XXXJBF: */
  520. BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0);
  521. skcipher_request_set_tfm(req, tfm);
  522. skcipher_request_set_callback(req, 0, NULL, NULL);
  523. memset(desc.iv, 0, sizeof(desc.iv));
  524. desc.req = req;
  525. desc.fragno = 0;
  526. desc.fraglen = 0;
  527. sg_init_table(desc.frags, 4);
  528. ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
  529. skcipher_request_zero(req);
  530. return ret;
  531. }
  532. /*
  533. * This function makes the assumption that it was ultimately called
  534. * from gss_wrap().
  535. *
  536. * The client auth_gss code moves any existing tail data into a
  537. * separate page before calling gss_wrap.
  538. * The server svcauth_gss code ensures that both the head and the
  539. * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
  540. *
  541. * Even with that guarantee, this function may be called more than
  542. * once in the processing of gss_wrap(). The best we can do is
  543. * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
  544. * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
  545. * At run-time we can verify that a single invocation of this
  546. * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
  547. */
  548. int
  549. xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
  550. {
  551. u8 *p;
  552. if (shiftlen == 0)
  553. return 0;
  554. BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
  555. BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
  556. p = buf->head[0].iov_base + base;
  557. memmove(p + shiftlen, p, buf->head[0].iov_len - base);
  558. buf->head[0].iov_len += shiftlen;
  559. buf->len += shiftlen;
  560. return 0;
  561. }
  562. static u32
  563. gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
  564. u32 offset, u8 *iv, struct page **pages, int encrypt)
  565. {
  566. u32 ret;
  567. struct scatterlist sg[1];
  568. SKCIPHER_REQUEST_ON_STACK(req, cipher);
  569. u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2];
  570. struct page **save_pages;
  571. u32 len = buf->len - offset;
  572. if (len > ARRAY_SIZE(data)) {
  573. WARN_ON(0);
  574. return -ENOMEM;
  575. }
  576. /*
  577. * For encryption, we want to read from the cleartext
  578. * page cache pages, and write the encrypted data to
  579. * the supplied xdr_buf pages.
  580. */
  581. save_pages = buf->pages;
  582. if (encrypt)
  583. buf->pages = pages;
  584. ret = read_bytes_from_xdr_buf(buf, offset, data, len);
  585. buf->pages = save_pages;
  586. if (ret)
  587. goto out;
  588. sg_init_one(sg, data, len);
  589. skcipher_request_set_tfm(req, cipher);
  590. skcipher_request_set_callback(req, 0, NULL, NULL);
  591. skcipher_request_set_crypt(req, sg, sg, len, iv);
  592. if (encrypt)
  593. ret = crypto_skcipher_encrypt(req);
  594. else
  595. ret = crypto_skcipher_decrypt(req);
  596. skcipher_request_zero(req);
  597. if (ret)
  598. goto out;
  599. ret = write_bytes_to_xdr_buf(buf, offset, data, len);
  600. out:
  601. return ret;
  602. }
  603. u32
  604. gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
  605. struct xdr_buf *buf, struct page **pages)
  606. {
  607. u32 err;
  608. struct xdr_netobj hmac;
  609. u8 *cksumkey;
  610. u8 *ecptr;
  611. struct crypto_skcipher *cipher, *aux_cipher;
  612. int blocksize;
  613. struct page **save_pages;
  614. int nblocks, nbytes;
  615. struct encryptor_desc desc;
  616. u32 cbcbytes;
  617. unsigned int usage;
  618. if (kctx->initiate) {
  619. cipher = kctx->initiator_enc;
  620. aux_cipher = kctx->initiator_enc_aux;
  621. cksumkey = kctx->initiator_integ;
  622. usage = KG_USAGE_INITIATOR_SEAL;
  623. } else {
  624. cipher = kctx->acceptor_enc;
  625. aux_cipher = kctx->acceptor_enc_aux;
  626. cksumkey = kctx->acceptor_integ;
  627. usage = KG_USAGE_ACCEPTOR_SEAL;
  628. }
  629. blocksize = crypto_skcipher_blocksize(cipher);
  630. /* hide the gss token header and insert the confounder */
  631. offset += GSS_KRB5_TOK_HDR_LEN;
  632. if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
  633. return GSS_S_FAILURE;
  634. gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
  635. offset -= GSS_KRB5_TOK_HDR_LEN;
  636. if (buf->tail[0].iov_base != NULL) {
  637. ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
  638. } else {
  639. buf->tail[0].iov_base = buf->head[0].iov_base
  640. + buf->head[0].iov_len;
  641. buf->tail[0].iov_len = 0;
  642. ecptr = buf->tail[0].iov_base;
  643. }
  644. /* copy plaintext gss token header after filler (if any) */
  645. memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN);
  646. buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
  647. buf->len += GSS_KRB5_TOK_HDR_LEN;
  648. /* Do the HMAC */
  649. hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
  650. hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
  651. /*
  652. * When we are called, pages points to the real page cache
  653. * data -- which we can't go and encrypt! buf->pages points
  654. * to scratch pages which we are going to send off to the
  655. * client/server. Swap in the plaintext pages to calculate
  656. * the hmac.
  657. */
  658. save_pages = buf->pages;
  659. buf->pages = pages;
  660. err = make_checksum_v2(kctx, NULL, 0, buf,
  661. offset + GSS_KRB5_TOK_HDR_LEN,
  662. cksumkey, usage, &hmac);
  663. buf->pages = save_pages;
  664. if (err)
  665. return GSS_S_FAILURE;
  666. nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
  667. nblocks = (nbytes + blocksize - 1) / blocksize;
  668. cbcbytes = 0;
  669. if (nblocks > 2)
  670. cbcbytes = (nblocks - 2) * blocksize;
  671. memset(desc.iv, 0, sizeof(desc.iv));
  672. if (cbcbytes) {
  673. SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
  674. desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
  675. desc.fragno = 0;
  676. desc.fraglen = 0;
  677. desc.pages = pages;
  678. desc.outbuf = buf;
  679. desc.req = req;
  680. skcipher_request_set_tfm(req, aux_cipher);
  681. skcipher_request_set_callback(req, 0, NULL, NULL);
  682. sg_init_table(desc.infrags, 4);
  683. sg_init_table(desc.outfrags, 4);
  684. err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
  685. cbcbytes, encryptor, &desc);
  686. skcipher_request_zero(req);
  687. if (err)
  688. goto out_err;
  689. }
  690. /* Make sure IV carries forward from any CBC results. */
  691. err = gss_krb5_cts_crypt(cipher, buf,
  692. offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
  693. desc.iv, pages, 1);
  694. if (err) {
  695. err = GSS_S_FAILURE;
  696. goto out_err;
  697. }
  698. /* Now update buf to account for HMAC */
  699. buf->tail[0].iov_len += kctx->gk5e->cksumlength;
  700. buf->len += kctx->gk5e->cksumlength;
  701. out_err:
  702. if (err)
  703. err = GSS_S_FAILURE;
  704. return err;
  705. }
  706. u32
  707. gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
  708. u32 *headskip, u32 *tailskip)
  709. {
  710. struct xdr_buf subbuf;
  711. u32 ret = 0;
  712. u8 *cksum_key;
  713. struct crypto_skcipher *cipher, *aux_cipher;
  714. struct xdr_netobj our_hmac_obj;
  715. u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
  716. u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
  717. int nblocks, blocksize, cbcbytes;
  718. struct decryptor_desc desc;
  719. unsigned int usage;
  720. if (kctx->initiate) {
  721. cipher = kctx->acceptor_enc;
  722. aux_cipher = kctx->acceptor_enc_aux;
  723. cksum_key = kctx->acceptor_integ;
  724. usage = KG_USAGE_ACCEPTOR_SEAL;
  725. } else {
  726. cipher = kctx->initiator_enc;
  727. aux_cipher = kctx->initiator_enc_aux;
  728. cksum_key = kctx->initiator_integ;
  729. usage = KG_USAGE_INITIATOR_SEAL;
  730. }
  731. blocksize = crypto_skcipher_blocksize(cipher);
  732. /* create a segment skipping the header and leaving out the checksum */
  733. xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
  734. (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
  735. kctx->gk5e->cksumlength));
  736. nblocks = (subbuf.len + blocksize - 1) / blocksize;
  737. cbcbytes = 0;
  738. if (nblocks > 2)
  739. cbcbytes = (nblocks - 2) * blocksize;
  740. memset(desc.iv, 0, sizeof(desc.iv));
  741. if (cbcbytes) {
  742. SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
  743. desc.fragno = 0;
  744. desc.fraglen = 0;
  745. desc.req = req;
  746. skcipher_request_set_tfm(req, aux_cipher);
  747. skcipher_request_set_callback(req, 0, NULL, NULL);
  748. sg_init_table(desc.frags, 4);
  749. ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
  750. skcipher_request_zero(req);
  751. if (ret)
  752. goto out_err;
  753. }
  754. /* Make sure IV carries forward from any CBC results. */
  755. ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
  756. if (ret)
  757. goto out_err;
  758. /* Calculate our hmac over the plaintext data */
  759. our_hmac_obj.len = sizeof(our_hmac);
  760. our_hmac_obj.data = our_hmac;
  761. ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
  762. cksum_key, usage, &our_hmac_obj);
  763. if (ret)
  764. goto out_err;
  765. /* Get the packet's hmac value */
  766. ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
  767. pkt_hmac, kctx->gk5e->cksumlength);
  768. if (ret)
  769. goto out_err;
  770. if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
  771. ret = GSS_S_BAD_SIG;
  772. goto out_err;
  773. }
  774. *headskip = kctx->gk5e->conflen;
  775. *tailskip = kctx->gk5e->cksumlength;
  776. out_err:
  777. if (ret && ret != GSS_S_BAD_SIG)
  778. ret = GSS_S_FAILURE;
  779. return ret;
  780. }
  781. /*
  782. * Compute Kseq given the initial session key and the checksum.
  783. * Set the key of the given cipher.
  784. */
  785. int
  786. krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
  787. unsigned char *cksum)
  788. {
  789. struct crypto_shash *hmac;
  790. struct shash_desc *desc;
  791. u8 Kseq[GSS_KRB5_MAX_KEYLEN];
  792. u32 zeroconstant = 0;
  793. int err;
  794. dprintk("%s: entered\n", __func__);
  795. hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0);
  796. if (IS_ERR(hmac)) {
  797. dprintk("%s: error %ld, allocating hash '%s'\n",
  798. __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
  799. return PTR_ERR(hmac);
  800. }
  801. desc = kmalloc(sizeof(*desc), GFP_KERNEL);
  802. if (!desc) {
  803. dprintk("%s: failed to allocate shash descriptor for '%s'\n",
  804. __func__, kctx->gk5e->cksum_name);
  805. crypto_free_shash(hmac);
  806. return -ENOMEM;
  807. }
  808. desc->tfm = hmac;
  809. desc->flags = 0;
  810. /* Compute intermediate Kseq from session key */
  811. err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
  812. if (err)
  813. goto out_err;
  814. err = crypto_shash_digest(desc, (u8 *)&zeroconstant, 4, Kseq);
  815. if (err)
  816. goto out_err;
  817. /* Compute final Kseq from the checksum and intermediate Kseq */
  818. err = crypto_shash_setkey(hmac, Kseq, kctx->gk5e->keylength);
  819. if (err)
  820. goto out_err;
  821. err = crypto_shash_digest(desc, cksum, 8, Kseq);
  822. if (err)
  823. goto out_err;
  824. err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
  825. if (err)
  826. goto out_err;
  827. err = 0;
  828. out_err:
  829. kzfree(desc);
  830. crypto_free_shash(hmac);
  831. dprintk("%s: returning %d\n", __func__, err);
  832. return err;
  833. }
  834. /*
  835. * Compute Kcrypt given the initial session key and the plaintext seqnum.
  836. * Set the key of cipher kctx->enc.
  837. */
  838. int
  839. krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
  840. s32 seqnum)
  841. {
  842. struct crypto_shash *hmac;
  843. struct shash_desc *desc;
  844. u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];
  845. u8 zeroconstant[4] = {0};
  846. u8 seqnumarray[4];
  847. int err, i;
  848. dprintk("%s: entered, seqnum %u\n", __func__, seqnum);
  849. hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0);
  850. if (IS_ERR(hmac)) {
  851. dprintk("%s: error %ld, allocating hash '%s'\n",
  852. __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
  853. return PTR_ERR(hmac);
  854. }
  855. desc = kmalloc(sizeof(*desc), GFP_KERNEL);
  856. if (!desc) {
  857. dprintk("%s: failed to allocate shash descriptor for '%s'\n",
  858. __func__, kctx->gk5e->cksum_name);
  859. crypto_free_shash(hmac);
  860. return -ENOMEM;
  861. }
  862. desc->tfm = hmac;
  863. desc->flags = 0;
  864. /* Compute intermediate Kcrypt from session key */
  865. for (i = 0; i < kctx->gk5e->keylength; i++)
  866. Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;
  867. err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
  868. if (err)
  869. goto out_err;
  870. err = crypto_shash_digest(desc, zeroconstant, 4, Kcrypt);
  871. if (err)
  872. goto out_err;
  873. /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */
  874. err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
  875. if (err)
  876. goto out_err;
  877. seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff);
  878. seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff);
  879. seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);
  880. seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);
  881. err = crypto_shash_digest(desc, seqnumarray, 4, Kcrypt);
  882. if (err)
  883. goto out_err;
  884. err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
  885. if (err)
  886. goto out_err;
  887. err = 0;
  888. out_err:
  889. kzfree(desc);
  890. crypto_free_shash(hmac);
  891. dprintk("%s: returning %d\n", __func__, err);
  892. return err;
  893. }