gss_krb5_crypto.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082
  1. /*
  2. * linux/net/sunrpc/gss_krb5_crypto.c
  3. *
  4. * Copyright (c) 2000-2008 The Regents of the University of Michigan.
  5. * All rights reserved.
  6. *
  7. * Andy Adamson <andros@umich.edu>
  8. * Bruce Fields <bfields@umich.edu>
  9. */
  10. /*
  11. * Copyright (C) 1998 by the FundsXpress, INC.
  12. *
  13. * All rights reserved.
  14. *
  15. * Export of this software from the United States of America may require
  16. * a specific license from the United States Government. It is the
  17. * responsibility of any person or organization contemplating export to
  18. * obtain such a license before exporting.
  19. *
  20. * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
  21. * distribute this software and its documentation for any purpose and
  22. * without fee is hereby granted, provided that the above copyright
  23. * notice appear in all copies and that both that copyright notice and
  24. * this permission notice appear in supporting documentation, and that
  25. * the name of FundsXpress. not be used in advertising or publicity pertaining
  26. * to distribution of the software without specific, written prior
  27. * permission. FundsXpress makes no representations about the suitability of
  28. * this software for any purpose. It is provided "as is" without express
  29. * or implied warranty.
  30. *
  31. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
  32. * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
  33. * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  34. */
  35. #include <crypto/hash.h>
  36. #include <crypto/skcipher.h>
  37. #include <linux/err.h>
  38. #include <linux/types.h>
  39. #include <linux/mm.h>
  40. #include <linux/scatterlist.h>
  41. #include <linux/highmem.h>
  42. #include <linux/pagemap.h>
  43. #include <linux/random.h>
  44. #include <linux/sunrpc/gss_krb5.h>
  45. #include <linux/sunrpc/xdr.h>
  46. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  47. # define RPCDBG_FACILITY RPCDBG_AUTH
  48. #endif
  49. u32
  50. krb5_encrypt(
  51. struct crypto_skcipher *tfm,
  52. void * iv,
  53. void * in,
  54. void * out,
  55. int length)
  56. {
  57. u32 ret = -EINVAL;
  58. struct scatterlist sg[1];
  59. u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
  60. SKCIPHER_REQUEST_ON_STACK(req, tfm);
  61. if (length % crypto_skcipher_blocksize(tfm) != 0)
  62. goto out;
  63. if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
  64. dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
  65. crypto_skcipher_ivsize(tfm));
  66. goto out;
  67. }
  68. if (iv)
  69. memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm));
  70. memcpy(out, in, length);
  71. sg_init_one(sg, out, length);
  72. skcipher_request_set_tfm(req, tfm);
  73. skcipher_request_set_callback(req, 0, NULL, NULL);
  74. skcipher_request_set_crypt(req, sg, sg, length, local_iv);
  75. ret = crypto_skcipher_encrypt(req);
  76. skcipher_request_zero(req);
  77. out:
  78. dprintk("RPC: krb5_encrypt returns %d\n", ret);
  79. return ret;
  80. }
  81. u32
  82. krb5_decrypt(
  83. struct crypto_skcipher *tfm,
  84. void * iv,
  85. void * in,
  86. void * out,
  87. int length)
  88. {
  89. u32 ret = -EINVAL;
  90. struct scatterlist sg[1];
  91. u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
  92. SKCIPHER_REQUEST_ON_STACK(req, tfm);
  93. if (length % crypto_skcipher_blocksize(tfm) != 0)
  94. goto out;
  95. if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
  96. dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
  97. crypto_skcipher_ivsize(tfm));
  98. goto out;
  99. }
  100. if (iv)
  101. memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm));
  102. memcpy(out, in, length);
  103. sg_init_one(sg, out, length);
  104. skcipher_request_set_tfm(req, tfm);
  105. skcipher_request_set_callback(req, 0, NULL, NULL);
  106. skcipher_request_set_crypt(req, sg, sg, length, local_iv);
  107. ret = crypto_skcipher_decrypt(req);
  108. skcipher_request_zero(req);
  109. out:
  110. dprintk("RPC: gss_k5decrypt returns %d\n",ret);
  111. return ret;
  112. }
  113. static int
  114. checksummer(struct scatterlist *sg, void *data)
  115. {
  116. struct ahash_request *req = data;
  117. ahash_request_set_crypt(req, sg, NULL, sg->length);
  118. return crypto_ahash_update(req);
  119. }
  120. static int
  121. arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
  122. {
  123. unsigned int ms_usage;
  124. switch (usage) {
  125. case KG_USAGE_SIGN:
  126. ms_usage = 15;
  127. break;
  128. case KG_USAGE_SEAL:
  129. ms_usage = 13;
  130. break;
  131. default:
  132. return -EINVAL;
  133. }
  134. salt[0] = (ms_usage >> 0) & 0xff;
  135. salt[1] = (ms_usage >> 8) & 0xff;
  136. salt[2] = (ms_usage >> 16) & 0xff;
  137. salt[3] = (ms_usage >> 24) & 0xff;
  138. return 0;
  139. }
  140. static u32
  141. make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
  142. struct xdr_buf *body, int body_offset, u8 *cksumkey,
  143. unsigned int usage, struct xdr_netobj *cksumout)
  144. {
  145. struct scatterlist sg[1];
  146. int err = -1;
  147. u8 *checksumdata;
  148. u8 rc4salt[4];
  149. struct crypto_ahash *md5;
  150. struct crypto_ahash *hmac_md5;
  151. struct ahash_request *req;
  152. if (cksumkey == NULL)
  153. return GSS_S_FAILURE;
  154. if (cksumout->len < kctx->gk5e->cksumlength) {
  155. dprintk("%s: checksum buffer length, %u, too small for %s\n",
  156. __func__, cksumout->len, kctx->gk5e->name);
  157. return GSS_S_FAILURE;
  158. }
  159. if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
  160. dprintk("%s: invalid usage value %u\n", __func__, usage);
  161. return GSS_S_FAILURE;
  162. }
  163. checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
  164. if (!checksumdata)
  165. return GSS_S_FAILURE;
  166. md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
  167. if (IS_ERR(md5))
  168. goto out_free_cksum;
  169. hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0,
  170. CRYPTO_ALG_ASYNC);
  171. if (IS_ERR(hmac_md5))
  172. goto out_free_md5;
  173. req = ahash_request_alloc(md5, GFP_NOFS);
  174. if (!req)
  175. goto out_free_hmac_md5;
  176. ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
  177. err = crypto_ahash_init(req);
  178. if (err)
  179. goto out;
  180. sg_init_one(sg, rc4salt, 4);
  181. ahash_request_set_crypt(req, sg, NULL, 4);
  182. err = crypto_ahash_update(req);
  183. if (err)
  184. goto out;
  185. sg_init_one(sg, header, hdrlen);
  186. ahash_request_set_crypt(req, sg, NULL, hdrlen);
  187. err = crypto_ahash_update(req);
  188. if (err)
  189. goto out;
  190. err = xdr_process_buf(body, body_offset, body->len - body_offset,
  191. checksummer, req);
  192. if (err)
  193. goto out;
  194. ahash_request_set_crypt(req, NULL, checksumdata, 0);
  195. err = crypto_ahash_final(req);
  196. if (err)
  197. goto out;
  198. ahash_request_free(req);
  199. req = ahash_request_alloc(hmac_md5, GFP_NOFS);
  200. if (!req)
  201. goto out_free_hmac_md5;
  202. ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
  203. err = crypto_ahash_init(req);
  204. if (err)
  205. goto out;
  206. err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
  207. if (err)
  208. goto out;
  209. sg_init_one(sg, checksumdata, crypto_ahash_digestsize(md5));
  210. ahash_request_set_crypt(req, sg, checksumdata,
  211. crypto_ahash_digestsize(md5));
  212. err = crypto_ahash_digest(req);
  213. if (err)
  214. goto out;
  215. memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
  216. cksumout->len = kctx->gk5e->cksumlength;
  217. out:
  218. ahash_request_free(req);
  219. out_free_hmac_md5:
  220. crypto_free_ahash(hmac_md5);
  221. out_free_md5:
  222. crypto_free_ahash(md5);
  223. out_free_cksum:
  224. kfree(checksumdata);
  225. return err ? GSS_S_FAILURE : 0;
  226. }
  227. /*
  228. * checksum the plaintext data and hdrlen bytes of the token header
  229. * The checksum is performed over the first 8 bytes of the
  230. * gss token header and then over the data body
  231. */
  232. u32
  233. make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
  234. struct xdr_buf *body, int body_offset, u8 *cksumkey,
  235. unsigned int usage, struct xdr_netobj *cksumout)
  236. {
  237. struct crypto_ahash *tfm;
  238. struct ahash_request *req;
  239. struct scatterlist sg[1];
  240. int err = -1;
  241. u8 *checksumdata;
  242. unsigned int checksumlen;
  243. if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
  244. return make_checksum_hmac_md5(kctx, header, hdrlen,
  245. body, body_offset,
  246. cksumkey, usage, cksumout);
  247. if (cksumout->len < kctx->gk5e->cksumlength) {
  248. dprintk("%s: checksum buffer length, %u, too small for %s\n",
  249. __func__, cksumout->len, kctx->gk5e->name);
  250. return GSS_S_FAILURE;
  251. }
  252. checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
  253. if (checksumdata == NULL)
  254. return GSS_S_FAILURE;
  255. tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
  256. if (IS_ERR(tfm))
  257. goto out_free_cksum;
  258. req = ahash_request_alloc(tfm, GFP_NOFS);
  259. if (!req)
  260. goto out_free_ahash;
  261. ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
  262. checksumlen = crypto_ahash_digestsize(tfm);
  263. if (cksumkey != NULL) {
  264. err = crypto_ahash_setkey(tfm, cksumkey,
  265. kctx->gk5e->keylength);
  266. if (err)
  267. goto out;
  268. }
  269. err = crypto_ahash_init(req);
  270. if (err)
  271. goto out;
  272. sg_init_one(sg, header, hdrlen);
  273. ahash_request_set_crypt(req, sg, NULL, hdrlen);
  274. err = crypto_ahash_update(req);
  275. if (err)
  276. goto out;
  277. err = xdr_process_buf(body, body_offset, body->len - body_offset,
  278. checksummer, req);
  279. if (err)
  280. goto out;
  281. ahash_request_set_crypt(req, NULL, checksumdata, 0);
  282. err = crypto_ahash_final(req);
  283. if (err)
  284. goto out;
  285. switch (kctx->gk5e->ctype) {
  286. case CKSUMTYPE_RSA_MD5:
  287. err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
  288. checksumdata, checksumlen);
  289. if (err)
  290. goto out;
  291. memcpy(cksumout->data,
  292. checksumdata + checksumlen - kctx->gk5e->cksumlength,
  293. kctx->gk5e->cksumlength);
  294. break;
  295. case CKSUMTYPE_HMAC_SHA1_DES3:
  296. memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
  297. break;
  298. default:
  299. BUG();
  300. break;
  301. }
  302. cksumout->len = kctx->gk5e->cksumlength;
  303. out:
  304. ahash_request_free(req);
  305. out_free_ahash:
  306. crypto_free_ahash(tfm);
  307. out_free_cksum:
  308. kfree(checksumdata);
  309. return err ? GSS_S_FAILURE : 0;
  310. }
  311. /*
  312. * checksum the plaintext data and hdrlen bytes of the token header
  313. * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
  314. * body then over the first 16 octets of the MIC token
  315. * Inclusion of the header data in the calculation of the
  316. * checksum is optional.
  317. */
  318. u32
  319. make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
  320. struct xdr_buf *body, int body_offset, u8 *cksumkey,
  321. unsigned int usage, struct xdr_netobj *cksumout)
  322. {
  323. struct crypto_ahash *tfm;
  324. struct ahash_request *req;
  325. struct scatterlist sg[1];
  326. int err = -1;
  327. u8 *checksumdata;
  328. unsigned int checksumlen;
  329. if (kctx->gk5e->keyed_cksum == 0) {
  330. dprintk("%s: expected keyed hash for %s\n",
  331. __func__, kctx->gk5e->name);
  332. return GSS_S_FAILURE;
  333. }
  334. if (cksumkey == NULL) {
  335. dprintk("%s: no key supplied for %s\n",
  336. __func__, kctx->gk5e->name);
  337. return GSS_S_FAILURE;
  338. }
  339. checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
  340. if (!checksumdata)
  341. return GSS_S_FAILURE;
  342. tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
  343. if (IS_ERR(tfm))
  344. goto out_free_cksum;
  345. checksumlen = crypto_ahash_digestsize(tfm);
  346. req = ahash_request_alloc(tfm, GFP_NOFS);
  347. if (!req)
  348. goto out_free_ahash;
  349. ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
  350. err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength);
  351. if (err)
  352. goto out;
  353. err = crypto_ahash_init(req);
  354. if (err)
  355. goto out;
  356. err = xdr_process_buf(body, body_offset, body->len - body_offset,
  357. checksummer, req);
  358. if (err)
  359. goto out;
  360. if (header != NULL) {
  361. sg_init_one(sg, header, hdrlen);
  362. ahash_request_set_crypt(req, sg, NULL, hdrlen);
  363. err = crypto_ahash_update(req);
  364. if (err)
  365. goto out;
  366. }
  367. ahash_request_set_crypt(req, NULL, checksumdata, 0);
  368. err = crypto_ahash_final(req);
  369. if (err)
  370. goto out;
  371. cksumout->len = kctx->gk5e->cksumlength;
  372. switch (kctx->gk5e->ctype) {
  373. case CKSUMTYPE_HMAC_SHA1_96_AES128:
  374. case CKSUMTYPE_HMAC_SHA1_96_AES256:
  375. /* note that this truncates the hash */
  376. memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
  377. break;
  378. default:
  379. BUG();
  380. break;
  381. }
  382. out:
  383. ahash_request_free(req);
  384. out_free_ahash:
  385. crypto_free_ahash(tfm);
  386. out_free_cksum:
  387. kfree(checksumdata);
  388. return err ? GSS_S_FAILURE : 0;
  389. }
  390. struct encryptor_desc {
  391. u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
  392. struct skcipher_request *req;
  393. int pos;
  394. struct xdr_buf *outbuf;
  395. struct page **pages;
  396. struct scatterlist infrags[4];
  397. struct scatterlist outfrags[4];
  398. int fragno;
  399. int fraglen;
  400. };
  401. static int
  402. encryptor(struct scatterlist *sg, void *data)
  403. {
  404. struct encryptor_desc *desc = data;
  405. struct xdr_buf *outbuf = desc->outbuf;
  406. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req);
  407. struct page *in_page;
  408. int thislen = desc->fraglen + sg->length;
  409. int fraglen, ret;
  410. int page_pos;
  411. /* Worst case is 4 fragments: head, end of page 1, start
  412. * of page 2, tail. Anything more is a bug. */
  413. BUG_ON(desc->fragno > 3);
  414. page_pos = desc->pos - outbuf->head[0].iov_len;
  415. if (page_pos >= 0 && page_pos < outbuf->page_len) {
  416. /* pages are not in place: */
  417. int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT;
  418. in_page = desc->pages[i];
  419. } else {
  420. in_page = sg_page(sg);
  421. }
  422. sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
  423. sg->offset);
  424. sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
  425. sg->offset);
  426. desc->fragno++;
  427. desc->fraglen += sg->length;
  428. desc->pos += sg->length;
  429. fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1);
  430. thislen -= fraglen;
  431. if (thislen == 0)
  432. return 0;
  433. sg_mark_end(&desc->infrags[desc->fragno - 1]);
  434. sg_mark_end(&desc->outfrags[desc->fragno - 1]);
  435. skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags,
  436. thislen, desc->iv);
  437. ret = crypto_skcipher_encrypt(desc->req);
  438. if (ret)
  439. return ret;
  440. sg_init_table(desc->infrags, 4);
  441. sg_init_table(desc->outfrags, 4);
  442. if (fraglen) {
  443. sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
  444. sg->offset + sg->length - fraglen);
  445. desc->infrags[0] = desc->outfrags[0];
  446. sg_assign_page(&desc->infrags[0], in_page);
  447. desc->fragno = 1;
  448. desc->fraglen = fraglen;
  449. } else {
  450. desc->fragno = 0;
  451. desc->fraglen = 0;
  452. }
  453. return 0;
  454. }
  455. int
  456. gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf,
  457. int offset, struct page **pages)
  458. {
  459. int ret;
  460. struct encryptor_desc desc;
  461. SKCIPHER_REQUEST_ON_STACK(req, tfm);
  462. BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0);
  463. skcipher_request_set_tfm(req, tfm);
  464. skcipher_request_set_callback(req, 0, NULL, NULL);
  465. memset(desc.iv, 0, sizeof(desc.iv));
  466. desc.req = req;
  467. desc.pos = offset;
  468. desc.outbuf = buf;
  469. desc.pages = pages;
  470. desc.fragno = 0;
  471. desc.fraglen = 0;
  472. sg_init_table(desc.infrags, 4);
  473. sg_init_table(desc.outfrags, 4);
  474. ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
  475. skcipher_request_zero(req);
  476. return ret;
  477. }
  478. struct decryptor_desc {
  479. u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
  480. struct skcipher_request *req;
  481. struct scatterlist frags[4];
  482. int fragno;
  483. int fraglen;
  484. };
  485. static int
  486. decryptor(struct scatterlist *sg, void *data)
  487. {
  488. struct decryptor_desc *desc = data;
  489. int thislen = desc->fraglen + sg->length;
  490. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req);
  491. int fraglen, ret;
  492. /* Worst case is 4 fragments: head, end of page 1, start
  493. * of page 2, tail. Anything more is a bug. */
  494. BUG_ON(desc->fragno > 3);
  495. sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
  496. sg->offset);
  497. desc->fragno++;
  498. desc->fraglen += sg->length;
  499. fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1);
  500. thislen -= fraglen;
  501. if (thislen == 0)
  502. return 0;
  503. sg_mark_end(&desc->frags[desc->fragno - 1]);
  504. skcipher_request_set_crypt(desc->req, desc->frags, desc->frags,
  505. thislen, desc->iv);
  506. ret = crypto_skcipher_decrypt(desc->req);
  507. if (ret)
  508. return ret;
  509. sg_init_table(desc->frags, 4);
  510. if (fraglen) {
  511. sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
  512. sg->offset + sg->length - fraglen);
  513. desc->fragno = 1;
  514. desc->fraglen = fraglen;
  515. } else {
  516. desc->fragno = 0;
  517. desc->fraglen = 0;
  518. }
  519. return 0;
  520. }
  521. int
  522. gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf,
  523. int offset)
  524. {
  525. int ret;
  526. struct decryptor_desc desc;
  527. SKCIPHER_REQUEST_ON_STACK(req, tfm);
  528. /* XXXJBF: */
  529. BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0);
  530. skcipher_request_set_tfm(req, tfm);
  531. skcipher_request_set_callback(req, 0, NULL, NULL);
  532. memset(desc.iv, 0, sizeof(desc.iv));
  533. desc.req = req;
  534. desc.fragno = 0;
  535. desc.fraglen = 0;
  536. sg_init_table(desc.frags, 4);
  537. ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
  538. skcipher_request_zero(req);
  539. return ret;
  540. }
  541. /*
  542. * This function makes the assumption that it was ultimately called
  543. * from gss_wrap().
  544. *
  545. * The client auth_gss code moves any existing tail data into a
  546. * separate page before calling gss_wrap.
  547. * The server svcauth_gss code ensures that both the head and the
  548. * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
  549. *
  550. * Even with that guarantee, this function may be called more than
  551. * once in the processing of gss_wrap(). The best we can do is
  552. * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
  553. * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
  554. * At run-time we can verify that a single invocation of this
  555. * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
  556. */
  557. int
  558. xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
  559. {
  560. u8 *p;
  561. if (shiftlen == 0)
  562. return 0;
  563. BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
  564. BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
  565. p = buf->head[0].iov_base + base;
  566. memmove(p + shiftlen, p, buf->head[0].iov_len - base);
  567. buf->head[0].iov_len += shiftlen;
  568. buf->len += shiftlen;
  569. return 0;
  570. }
  571. static u32
  572. gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
  573. u32 offset, u8 *iv, struct page **pages, int encrypt)
  574. {
  575. u32 ret;
  576. struct scatterlist sg[1];
  577. SKCIPHER_REQUEST_ON_STACK(req, cipher);
  578. u8 *data;
  579. struct page **save_pages;
  580. u32 len = buf->len - offset;
  581. if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) {
  582. WARN_ON(0);
  583. return -ENOMEM;
  584. }
  585. data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS);
  586. if (!data)
  587. return -ENOMEM;
  588. /*
  589. * For encryption, we want to read from the cleartext
  590. * page cache pages, and write the encrypted data to
  591. * the supplied xdr_buf pages.
  592. */
  593. save_pages = buf->pages;
  594. if (encrypt)
  595. buf->pages = pages;
  596. ret = read_bytes_from_xdr_buf(buf, offset, data, len);
  597. buf->pages = save_pages;
  598. if (ret)
  599. goto out;
  600. sg_init_one(sg, data, len);
  601. skcipher_request_set_tfm(req, cipher);
  602. skcipher_request_set_callback(req, 0, NULL, NULL);
  603. skcipher_request_set_crypt(req, sg, sg, len, iv);
  604. if (encrypt)
  605. ret = crypto_skcipher_encrypt(req);
  606. else
  607. ret = crypto_skcipher_decrypt(req);
  608. skcipher_request_zero(req);
  609. if (ret)
  610. goto out;
  611. ret = write_bytes_to_xdr_buf(buf, offset, data, len);
  612. out:
  613. kfree(data);
  614. return ret;
  615. }
  616. u32
  617. gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
  618. struct xdr_buf *buf, struct page **pages)
  619. {
  620. u32 err;
  621. struct xdr_netobj hmac;
  622. u8 *cksumkey;
  623. u8 *ecptr;
  624. struct crypto_skcipher *cipher, *aux_cipher;
  625. int blocksize;
  626. struct page **save_pages;
  627. int nblocks, nbytes;
  628. struct encryptor_desc desc;
  629. u32 cbcbytes;
  630. unsigned int usage;
  631. if (kctx->initiate) {
  632. cipher = kctx->initiator_enc;
  633. aux_cipher = kctx->initiator_enc_aux;
  634. cksumkey = kctx->initiator_integ;
  635. usage = KG_USAGE_INITIATOR_SEAL;
  636. } else {
  637. cipher = kctx->acceptor_enc;
  638. aux_cipher = kctx->acceptor_enc_aux;
  639. cksumkey = kctx->acceptor_integ;
  640. usage = KG_USAGE_ACCEPTOR_SEAL;
  641. }
  642. blocksize = crypto_skcipher_blocksize(cipher);
  643. /* hide the gss token header and insert the confounder */
  644. offset += GSS_KRB5_TOK_HDR_LEN;
  645. if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
  646. return GSS_S_FAILURE;
  647. gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
  648. offset -= GSS_KRB5_TOK_HDR_LEN;
  649. if (buf->tail[0].iov_base != NULL) {
  650. ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
  651. } else {
  652. buf->tail[0].iov_base = buf->head[0].iov_base
  653. + buf->head[0].iov_len;
  654. buf->tail[0].iov_len = 0;
  655. ecptr = buf->tail[0].iov_base;
  656. }
  657. /* copy plaintext gss token header after filler (if any) */
  658. memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN);
  659. buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
  660. buf->len += GSS_KRB5_TOK_HDR_LEN;
  661. /* Do the HMAC */
  662. hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
  663. hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
  664. /*
  665. * When we are called, pages points to the real page cache
  666. * data -- which we can't go and encrypt! buf->pages points
  667. * to scratch pages which we are going to send off to the
  668. * client/server. Swap in the plaintext pages to calculate
  669. * the hmac.
  670. */
  671. save_pages = buf->pages;
  672. buf->pages = pages;
  673. err = make_checksum_v2(kctx, NULL, 0, buf,
  674. offset + GSS_KRB5_TOK_HDR_LEN,
  675. cksumkey, usage, &hmac);
  676. buf->pages = save_pages;
  677. if (err)
  678. return GSS_S_FAILURE;
  679. nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
  680. nblocks = (nbytes + blocksize - 1) / blocksize;
  681. cbcbytes = 0;
  682. if (nblocks > 2)
  683. cbcbytes = (nblocks - 2) * blocksize;
  684. memset(desc.iv, 0, sizeof(desc.iv));
  685. if (cbcbytes) {
  686. SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
  687. desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
  688. desc.fragno = 0;
  689. desc.fraglen = 0;
  690. desc.pages = pages;
  691. desc.outbuf = buf;
  692. desc.req = req;
  693. skcipher_request_set_tfm(req, aux_cipher);
  694. skcipher_request_set_callback(req, 0, NULL, NULL);
  695. sg_init_table(desc.infrags, 4);
  696. sg_init_table(desc.outfrags, 4);
  697. err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
  698. cbcbytes, encryptor, &desc);
  699. skcipher_request_zero(req);
  700. if (err)
  701. goto out_err;
  702. }
  703. /* Make sure IV carries forward from any CBC results. */
  704. err = gss_krb5_cts_crypt(cipher, buf,
  705. offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
  706. desc.iv, pages, 1);
  707. if (err) {
  708. err = GSS_S_FAILURE;
  709. goto out_err;
  710. }
  711. /* Now update buf to account for HMAC */
  712. buf->tail[0].iov_len += kctx->gk5e->cksumlength;
  713. buf->len += kctx->gk5e->cksumlength;
  714. out_err:
  715. if (err)
  716. err = GSS_S_FAILURE;
  717. return err;
  718. }
  719. u32
  720. gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
  721. u32 *headskip, u32 *tailskip)
  722. {
  723. struct xdr_buf subbuf;
  724. u32 ret = 0;
  725. u8 *cksum_key;
  726. struct crypto_skcipher *cipher, *aux_cipher;
  727. struct xdr_netobj our_hmac_obj;
  728. u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
  729. u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
  730. int nblocks, blocksize, cbcbytes;
  731. struct decryptor_desc desc;
  732. unsigned int usage;
  733. if (kctx->initiate) {
  734. cipher = kctx->acceptor_enc;
  735. aux_cipher = kctx->acceptor_enc_aux;
  736. cksum_key = kctx->acceptor_integ;
  737. usage = KG_USAGE_ACCEPTOR_SEAL;
  738. } else {
  739. cipher = kctx->initiator_enc;
  740. aux_cipher = kctx->initiator_enc_aux;
  741. cksum_key = kctx->initiator_integ;
  742. usage = KG_USAGE_INITIATOR_SEAL;
  743. }
  744. blocksize = crypto_skcipher_blocksize(cipher);
  745. /* create a segment skipping the header and leaving out the checksum */
  746. xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
  747. (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
  748. kctx->gk5e->cksumlength));
  749. nblocks = (subbuf.len + blocksize - 1) / blocksize;
  750. cbcbytes = 0;
  751. if (nblocks > 2)
  752. cbcbytes = (nblocks - 2) * blocksize;
  753. memset(desc.iv, 0, sizeof(desc.iv));
  754. if (cbcbytes) {
  755. SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
  756. desc.fragno = 0;
  757. desc.fraglen = 0;
  758. desc.req = req;
  759. skcipher_request_set_tfm(req, aux_cipher);
  760. skcipher_request_set_callback(req, 0, NULL, NULL);
  761. sg_init_table(desc.frags, 4);
  762. ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
  763. skcipher_request_zero(req);
  764. if (ret)
  765. goto out_err;
  766. }
  767. /* Make sure IV carries forward from any CBC results. */
  768. ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
  769. if (ret)
  770. goto out_err;
  771. /* Calculate our hmac over the plaintext data */
  772. our_hmac_obj.len = sizeof(our_hmac);
  773. our_hmac_obj.data = our_hmac;
  774. ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
  775. cksum_key, usage, &our_hmac_obj);
  776. if (ret)
  777. goto out_err;
  778. /* Get the packet's hmac value */
  779. ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
  780. pkt_hmac, kctx->gk5e->cksumlength);
  781. if (ret)
  782. goto out_err;
  783. if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
  784. ret = GSS_S_BAD_SIG;
  785. goto out_err;
  786. }
  787. *headskip = kctx->gk5e->conflen;
  788. *tailskip = kctx->gk5e->cksumlength;
  789. out_err:
  790. if (ret && ret != GSS_S_BAD_SIG)
  791. ret = GSS_S_FAILURE;
  792. return ret;
  793. }
  794. /*
  795. * Compute Kseq given the initial session key and the checksum.
  796. * Set the key of the given cipher.
  797. */
  798. int
  799. krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
  800. unsigned char *cksum)
  801. {
  802. struct crypto_shash *hmac;
  803. struct shash_desc *desc;
  804. u8 Kseq[GSS_KRB5_MAX_KEYLEN];
  805. u32 zeroconstant = 0;
  806. int err;
  807. dprintk("%s: entered\n", __func__);
  808. hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0);
  809. if (IS_ERR(hmac)) {
  810. dprintk("%s: error %ld, allocating hash '%s'\n",
  811. __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
  812. return PTR_ERR(hmac);
  813. }
  814. desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
  815. GFP_NOFS);
  816. if (!desc) {
  817. dprintk("%s: failed to allocate shash descriptor for '%s'\n",
  818. __func__, kctx->gk5e->cksum_name);
  819. crypto_free_shash(hmac);
  820. return -ENOMEM;
  821. }
  822. desc->tfm = hmac;
  823. desc->flags = 0;
  824. /* Compute intermediate Kseq from session key */
  825. err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
  826. if (err)
  827. goto out_err;
  828. err = crypto_shash_digest(desc, (u8 *)&zeroconstant, 4, Kseq);
  829. if (err)
  830. goto out_err;
  831. /* Compute final Kseq from the checksum and intermediate Kseq */
  832. err = crypto_shash_setkey(hmac, Kseq, kctx->gk5e->keylength);
  833. if (err)
  834. goto out_err;
  835. err = crypto_shash_digest(desc, cksum, 8, Kseq);
  836. if (err)
  837. goto out_err;
  838. err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
  839. if (err)
  840. goto out_err;
  841. err = 0;
  842. out_err:
  843. kzfree(desc);
  844. crypto_free_shash(hmac);
  845. dprintk("%s: returning %d\n", __func__, err);
  846. return err;
  847. }
  848. /*
  849. * Compute Kcrypt given the initial session key and the plaintext seqnum.
  850. * Set the key of cipher kctx->enc.
  851. */
  852. int
  853. krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
  854. s32 seqnum)
  855. {
  856. struct crypto_shash *hmac;
  857. struct shash_desc *desc;
  858. u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];
  859. u8 zeroconstant[4] = {0};
  860. u8 seqnumarray[4];
  861. int err, i;
  862. dprintk("%s: entered, seqnum %u\n", __func__, seqnum);
  863. hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0);
  864. if (IS_ERR(hmac)) {
  865. dprintk("%s: error %ld, allocating hash '%s'\n",
  866. __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
  867. return PTR_ERR(hmac);
  868. }
  869. desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
  870. GFP_NOFS);
  871. if (!desc) {
  872. dprintk("%s: failed to allocate shash descriptor for '%s'\n",
  873. __func__, kctx->gk5e->cksum_name);
  874. crypto_free_shash(hmac);
  875. return -ENOMEM;
  876. }
  877. desc->tfm = hmac;
  878. desc->flags = 0;
  879. /* Compute intermediate Kcrypt from session key */
  880. for (i = 0; i < kctx->gk5e->keylength; i++)
  881. Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;
  882. err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
  883. if (err)
  884. goto out_err;
  885. err = crypto_shash_digest(desc, zeroconstant, 4, Kcrypt);
  886. if (err)
  887. goto out_err;
  888. /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */
  889. err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
  890. if (err)
  891. goto out_err;
  892. seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff);
  893. seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff);
  894. seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);
  895. seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);
  896. err = crypto_shash_digest(desc, seqnumarray, 4, Kcrypt);
  897. if (err)
  898. goto out_err;
  899. err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
  900. if (err)
  901. goto out_err;
  902. err = 0;
  903. out_err:
  904. kzfree(desc);
  905. crypto_free_shash(hmac);
  906. dprintk("%s: returning %d\n", __func__, err);
  907. return err;
  908. }