zcrypt_api.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * zcrypt 2.1.0
  4. *
  5. * Copyright IBM Corp. 2001, 2012
  6. * Author(s): Robert Burroughs
  7. * Eric Rossman (edrossma@us.ibm.com)
  8. * Cornelia Huck <cornelia.huck@de.ibm.com>
  9. *
  10. * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  11. * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  12. * Ralph Wuerthner <rwuerthn@de.ibm.com>
  13. * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
  14. */
  15. #include <linux/module.h>
  16. #include <linux/init.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/miscdevice.h>
  19. #include <linux/fs.h>
  20. #include <linux/compat.h>
  21. #include <linux/slab.h>
  22. #include <linux/atomic.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/hw_random.h>
  25. #include <linux/debugfs.h>
  26. #include <asm/debug.h>
  27. #define CREATE_TRACE_POINTS
  28. #include <asm/trace/zcrypt.h>
  29. #include "zcrypt_api.h"
  30. #include "zcrypt_debug.h"
  31. #include "zcrypt_msgtype6.h"
  32. #include "zcrypt_msgtype50.h"
  33. /*
  34. * Module description.
  35. */
  36. MODULE_AUTHOR("IBM Corporation");
  37. MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
  38. "Copyright IBM Corp. 2001, 2012");
  39. MODULE_LICENSE("GPL");
  40. /*
  41. * zcrypt tracepoint functions
  42. */
  43. EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
  44. EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
  45. static int zcrypt_hwrng_seed = 1;
  46. module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
  47. MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
  48. DEFINE_SPINLOCK(zcrypt_list_lock);
  49. LIST_HEAD(zcrypt_card_list);
  50. int zcrypt_device_count;
  51. static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
  52. static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
  53. atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
  54. EXPORT_SYMBOL(zcrypt_rescan_req);
  55. static LIST_HEAD(zcrypt_ops_list);
  56. /* Zcrypt related debug feature stuff. */
  57. debug_info_t *zcrypt_dbf_info;
  58. /**
  59. * Process a rescan of the transport layer.
  60. *
  61. * Returns 1, if the rescan has been processed, otherwise 0.
  62. */
  63. static inline int zcrypt_process_rescan(void)
  64. {
  65. if (atomic_read(&zcrypt_rescan_req)) {
  66. atomic_set(&zcrypt_rescan_req, 0);
  67. atomic_inc(&zcrypt_rescan_count);
  68. ap_bus_force_rescan();
  69. ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n",
  70. atomic_inc_return(&zcrypt_rescan_count));
  71. return 1;
  72. }
  73. return 0;
  74. }
  75. void zcrypt_msgtype_register(struct zcrypt_ops *zops)
  76. {
  77. list_add_tail(&zops->list, &zcrypt_ops_list);
  78. }
  79. void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
  80. {
  81. list_del_init(&zops->list);
  82. }
  83. struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
  84. {
  85. struct zcrypt_ops *zops;
  86. list_for_each_entry(zops, &zcrypt_ops_list, list)
  87. if ((zops->variant == variant) &&
  88. (!strncmp(zops->name, name, sizeof(zops->name))))
  89. return zops;
  90. return NULL;
  91. }
  92. EXPORT_SYMBOL(zcrypt_msgtype);
  93. /**
  94. * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
  95. *
  96. * This function is not supported beyond zcrypt 1.3.1.
  97. */
  98. static ssize_t zcrypt_read(struct file *filp, char __user *buf,
  99. size_t count, loff_t *f_pos)
  100. {
  101. return -EPERM;
  102. }
  103. /**
  104. * zcrypt_write(): Not allowed.
  105. *
  106. * Write is is not allowed
  107. */
  108. static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
  109. size_t count, loff_t *f_pos)
  110. {
  111. return -EPERM;
  112. }
  113. /**
  114. * zcrypt_open(): Count number of users.
  115. *
  116. * Device open function to count number of users.
  117. */
  118. static int zcrypt_open(struct inode *inode, struct file *filp)
  119. {
  120. atomic_inc(&zcrypt_open_count);
  121. return nonseekable_open(inode, filp);
  122. }
  123. /**
  124. * zcrypt_release(): Count number of users.
  125. *
  126. * Device close function to count number of users.
  127. */
  128. static int zcrypt_release(struct inode *inode, struct file *filp)
  129. {
  130. atomic_dec(&zcrypt_open_count);
  131. return 0;
  132. }
  133. static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
  134. struct zcrypt_queue *zq,
  135. unsigned int weight)
  136. {
  137. if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
  138. return NULL;
  139. zcrypt_queue_get(zq);
  140. get_device(&zq->queue->ap_dev.device);
  141. atomic_add(weight, &zc->load);
  142. atomic_add(weight, &zq->load);
  143. zq->request_count++;
  144. return zq;
  145. }
  146. static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
  147. struct zcrypt_queue *zq,
  148. unsigned int weight)
  149. {
  150. struct module *mod = zq->queue->ap_dev.drv->driver.owner;
  151. zq->request_count--;
  152. atomic_sub(weight, &zc->load);
  153. atomic_sub(weight, &zq->load);
  154. put_device(&zq->queue->ap_dev.device);
  155. zcrypt_queue_put(zq);
  156. module_put(mod);
  157. }
  158. static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
  159. struct zcrypt_card *pref_zc,
  160. unsigned weight, unsigned pref_weight)
  161. {
  162. if (!pref_zc)
  163. return false;
  164. weight += atomic_read(&zc->load);
  165. pref_weight += atomic_read(&pref_zc->load);
  166. if (weight == pref_weight)
  167. return atomic_read(&zc->card->total_request_count) >
  168. atomic_read(&pref_zc->card->total_request_count);
  169. return weight > pref_weight;
  170. }
  171. static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
  172. struct zcrypt_queue *pref_zq,
  173. unsigned weight, unsigned pref_weight)
  174. {
  175. if (!pref_zq)
  176. return false;
  177. weight += atomic_read(&zq->load);
  178. pref_weight += atomic_read(&pref_zq->load);
  179. if (weight == pref_weight)
  180. return zq->queue->total_request_count >
  181. pref_zq->queue->total_request_count;
  182. return weight > pref_weight;
  183. }
  184. /*
  185. * zcrypt ioctls.
  186. */
  187. static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
  188. {
  189. struct zcrypt_card *zc, *pref_zc;
  190. struct zcrypt_queue *zq, *pref_zq;
  191. unsigned int weight, pref_weight;
  192. unsigned int func_code;
  193. int qid = 0, rc = -ENODEV;
  194. trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
  195. if (mex->outputdatalength < mex->inputdatalength) {
  196. rc = -EINVAL;
  197. goto out;
  198. }
  199. /*
  200. * As long as outputdatalength is big enough, we can set the
  201. * outputdatalength equal to the inputdatalength, since that is the
  202. * number of bytes we will copy in any case
  203. */
  204. mex->outputdatalength = mex->inputdatalength;
  205. rc = get_rsa_modex_fc(mex, &func_code);
  206. if (rc)
  207. goto out;
  208. pref_zc = NULL;
  209. pref_zq = NULL;
  210. spin_lock(&zcrypt_list_lock);
  211. for_each_zcrypt_card(zc) {
  212. /* Check for online accelarator and CCA cards */
  213. if (!zc->online || !(zc->card->functions & 0x18000000))
  214. continue;
  215. /* Check for size limits */
  216. if (zc->min_mod_size > mex->inputdatalength ||
  217. zc->max_mod_size < mex->inputdatalength)
  218. continue;
  219. /* get weight index of the card device */
  220. weight = zc->speed_rating[func_code];
  221. if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
  222. continue;
  223. for_each_zcrypt_queue(zq, zc) {
  224. /* check if device is online and eligible */
  225. if (!zq->online || !zq->ops->rsa_modexpo)
  226. continue;
  227. if (zcrypt_queue_compare(zq, pref_zq,
  228. weight, pref_weight))
  229. continue;
  230. pref_zc = zc;
  231. pref_zq = zq;
  232. pref_weight = weight;
  233. }
  234. }
  235. pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
  236. spin_unlock(&zcrypt_list_lock);
  237. if (!pref_zq) {
  238. rc = -ENODEV;
  239. goto out;
  240. }
  241. qid = pref_zq->queue->qid;
  242. rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
  243. spin_lock(&zcrypt_list_lock);
  244. zcrypt_drop_queue(pref_zc, pref_zq, weight);
  245. spin_unlock(&zcrypt_list_lock);
  246. out:
  247. trace_s390_zcrypt_rep(mex, func_code, rc,
  248. AP_QID_CARD(qid), AP_QID_QUEUE(qid));
  249. return rc;
  250. }
  251. static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
  252. {
  253. struct zcrypt_card *zc, *pref_zc;
  254. struct zcrypt_queue *zq, *pref_zq;
  255. unsigned int weight, pref_weight;
  256. unsigned int func_code;
  257. int qid = 0, rc = -ENODEV;
  258. trace_s390_zcrypt_req(crt, TP_ICARSACRT);
  259. if (crt->outputdatalength < crt->inputdatalength) {
  260. rc = -EINVAL;
  261. goto out;
  262. }
  263. /*
  264. * As long as outputdatalength is big enough, we can set the
  265. * outputdatalength equal to the inputdatalength, since that is the
  266. * number of bytes we will copy in any case
  267. */
  268. crt->outputdatalength = crt->inputdatalength;
  269. rc = get_rsa_crt_fc(crt, &func_code);
  270. if (rc)
  271. goto out;
  272. pref_zc = NULL;
  273. pref_zq = NULL;
  274. spin_lock(&zcrypt_list_lock);
  275. for_each_zcrypt_card(zc) {
  276. /* Check for online accelarator and CCA cards */
  277. if (!zc->online || !(zc->card->functions & 0x18000000))
  278. continue;
  279. /* Check for size limits */
  280. if (zc->min_mod_size > crt->inputdatalength ||
  281. zc->max_mod_size < crt->inputdatalength)
  282. continue;
  283. /* get weight index of the card device */
  284. weight = zc->speed_rating[func_code];
  285. if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
  286. continue;
  287. for_each_zcrypt_queue(zq, zc) {
  288. /* check if device is online and eligible */
  289. if (!zq->online || !zq->ops->rsa_modexpo_crt)
  290. continue;
  291. if (zcrypt_queue_compare(zq, pref_zq,
  292. weight, pref_weight))
  293. continue;
  294. pref_zc = zc;
  295. pref_zq = zq;
  296. pref_weight = weight;
  297. }
  298. }
  299. pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
  300. spin_unlock(&zcrypt_list_lock);
  301. if (!pref_zq) {
  302. rc = -ENODEV;
  303. goto out;
  304. }
  305. qid = pref_zq->queue->qid;
  306. rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
  307. spin_lock(&zcrypt_list_lock);
  308. zcrypt_drop_queue(pref_zc, pref_zq, weight);
  309. spin_unlock(&zcrypt_list_lock);
  310. out:
  311. trace_s390_zcrypt_rep(crt, func_code, rc,
  312. AP_QID_CARD(qid), AP_QID_QUEUE(qid));
  313. return rc;
  314. }
  315. long zcrypt_send_cprb(struct ica_xcRB *xcRB)
  316. {
  317. struct zcrypt_card *zc, *pref_zc;
  318. struct zcrypt_queue *zq, *pref_zq;
  319. struct ap_message ap_msg;
  320. unsigned int weight, pref_weight;
  321. unsigned int func_code;
  322. unsigned short *domain;
  323. int qid = 0, rc = -ENODEV;
  324. trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
  325. rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
  326. if (rc)
  327. goto out;
  328. pref_zc = NULL;
  329. pref_zq = NULL;
  330. spin_lock(&zcrypt_list_lock);
  331. for_each_zcrypt_card(zc) {
  332. /* Check for online CCA cards */
  333. if (!zc->online || !(zc->card->functions & 0x10000000))
  334. continue;
  335. /* Check for user selected CCA card */
  336. if (xcRB->user_defined != AUTOSELECT &&
  337. xcRB->user_defined != zc->card->id)
  338. continue;
  339. /* get weight index of the card device */
  340. weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
  341. if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
  342. continue;
  343. for_each_zcrypt_queue(zq, zc) {
  344. /* check if device is online and eligible */
  345. if (!zq->online ||
  346. !zq->ops->send_cprb ||
  347. ((*domain != (unsigned short) AUTOSELECT) &&
  348. (*domain != AP_QID_QUEUE(zq->queue->qid))))
  349. continue;
  350. if (zcrypt_queue_compare(zq, pref_zq,
  351. weight, pref_weight))
  352. continue;
  353. pref_zc = zc;
  354. pref_zq = zq;
  355. pref_weight = weight;
  356. }
  357. }
  358. pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
  359. spin_unlock(&zcrypt_list_lock);
  360. if (!pref_zq) {
  361. rc = -ENODEV;
  362. goto out;
  363. }
  364. /* in case of auto select, provide the correct domain */
  365. qid = pref_zq->queue->qid;
  366. if (*domain == (unsigned short) AUTOSELECT)
  367. *domain = AP_QID_QUEUE(qid);
  368. rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
  369. spin_lock(&zcrypt_list_lock);
  370. zcrypt_drop_queue(pref_zc, pref_zq, weight);
  371. spin_unlock(&zcrypt_list_lock);
  372. out:
  373. trace_s390_zcrypt_rep(xcRB, func_code, rc,
  374. AP_QID_CARD(qid), AP_QID_QUEUE(qid));
  375. return rc;
  376. }
  377. EXPORT_SYMBOL(zcrypt_send_cprb);
  378. static bool is_desired_ep11_card(unsigned int dev_id,
  379. unsigned short target_num,
  380. struct ep11_target_dev *targets)
  381. {
  382. while (target_num-- > 0) {
  383. if (dev_id == targets->ap_id)
  384. return true;
  385. targets++;
  386. }
  387. return false;
  388. }
  389. static bool is_desired_ep11_queue(unsigned int dev_qid,
  390. unsigned short target_num,
  391. struct ep11_target_dev *targets)
  392. {
  393. while (target_num-- > 0) {
  394. if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
  395. return true;
  396. targets++;
  397. }
  398. return false;
  399. }
  400. static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
  401. {
  402. struct zcrypt_card *zc, *pref_zc;
  403. struct zcrypt_queue *zq, *pref_zq;
  404. struct ep11_target_dev *targets;
  405. unsigned short target_num;
  406. unsigned int weight, pref_weight;
  407. unsigned int func_code;
  408. struct ap_message ap_msg;
  409. int qid = 0, rc = -ENODEV;
  410. trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
  411. target_num = (unsigned short) xcrb->targets_num;
  412. /* empty list indicates autoselect (all available targets) */
  413. targets = NULL;
  414. if (target_num != 0) {
  415. struct ep11_target_dev __user *uptr;
  416. targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
  417. if (!targets) {
  418. rc = -ENOMEM;
  419. goto out;
  420. }
  421. uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
  422. if (copy_from_user(targets, uptr,
  423. target_num * sizeof(*targets))) {
  424. rc = -EFAULT;
  425. goto out;
  426. }
  427. }
  428. rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code);
  429. if (rc)
  430. goto out_free;
  431. pref_zc = NULL;
  432. pref_zq = NULL;
  433. spin_lock(&zcrypt_list_lock);
  434. for_each_zcrypt_card(zc) {
  435. /* Check for online EP11 cards */
  436. if (!zc->online || !(zc->card->functions & 0x04000000))
  437. continue;
  438. /* Check for user selected EP11 card */
  439. if (targets &&
  440. !is_desired_ep11_card(zc->card->id, target_num, targets))
  441. continue;
  442. /* get weight index of the card device */
  443. weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
  444. if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
  445. continue;
  446. for_each_zcrypt_queue(zq, zc) {
  447. /* check if device is online and eligible */
  448. if (!zq->online ||
  449. !zq->ops->send_ep11_cprb ||
  450. (targets &&
  451. !is_desired_ep11_queue(zq->queue->qid,
  452. target_num, targets)))
  453. continue;
  454. if (zcrypt_queue_compare(zq, pref_zq,
  455. weight, pref_weight))
  456. continue;
  457. pref_zc = zc;
  458. pref_zq = zq;
  459. pref_weight = weight;
  460. }
  461. }
  462. pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
  463. spin_unlock(&zcrypt_list_lock);
  464. if (!pref_zq) {
  465. rc = -ENODEV;
  466. goto out_free;
  467. }
  468. qid = pref_zq->queue->qid;
  469. rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
  470. spin_lock(&zcrypt_list_lock);
  471. zcrypt_drop_queue(pref_zc, pref_zq, weight);
  472. spin_unlock(&zcrypt_list_lock);
  473. out_free:
  474. kfree(targets);
  475. out:
  476. trace_s390_zcrypt_rep(xcrb, func_code, rc,
  477. AP_QID_CARD(qid), AP_QID_QUEUE(qid));
  478. return rc;
  479. }
  480. static long zcrypt_rng(char *buffer)
  481. {
  482. struct zcrypt_card *zc, *pref_zc;
  483. struct zcrypt_queue *zq, *pref_zq;
  484. unsigned int weight, pref_weight;
  485. unsigned int func_code;
  486. struct ap_message ap_msg;
  487. unsigned int domain;
  488. int qid = 0, rc = -ENODEV;
  489. trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
  490. rc = get_rng_fc(&ap_msg, &func_code, &domain);
  491. if (rc)
  492. goto out;
  493. pref_zc = NULL;
  494. pref_zq = NULL;
  495. spin_lock(&zcrypt_list_lock);
  496. for_each_zcrypt_card(zc) {
  497. /* Check for online CCA cards */
  498. if (!zc->online || !(zc->card->functions & 0x10000000))
  499. continue;
  500. /* get weight index of the card device */
  501. weight = zc->speed_rating[func_code];
  502. if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
  503. continue;
  504. for_each_zcrypt_queue(zq, zc) {
  505. /* check if device is online and eligible */
  506. if (!zq->online || !zq->ops->rng)
  507. continue;
  508. if (zcrypt_queue_compare(zq, pref_zq,
  509. weight, pref_weight))
  510. continue;
  511. pref_zc = zc;
  512. pref_zq = zq;
  513. pref_weight = weight;
  514. }
  515. }
  516. pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
  517. spin_unlock(&zcrypt_list_lock);
  518. if (!pref_zq)
  519. return -ENODEV;
  520. qid = pref_zq->queue->qid;
  521. rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
  522. spin_lock(&zcrypt_list_lock);
  523. zcrypt_drop_queue(pref_zc, pref_zq, weight);
  524. spin_unlock(&zcrypt_list_lock);
  525. out:
  526. trace_s390_zcrypt_rep(buffer, func_code, rc,
  527. AP_QID_CARD(qid), AP_QID_QUEUE(qid));
  528. return rc;
  529. }
  530. static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
  531. {
  532. struct zcrypt_card *zc;
  533. struct zcrypt_queue *zq;
  534. struct zcrypt_device_status *stat;
  535. int card, queue;
  536. memset(devstatus, 0, MAX_ZDEV_ENTRIES
  537. * sizeof(struct zcrypt_device_status));
  538. spin_lock(&zcrypt_list_lock);
  539. for_each_zcrypt_card(zc) {
  540. for_each_zcrypt_queue(zq, zc) {
  541. card = AP_QID_CARD(zq->queue->qid);
  542. if (card >= MAX_ZDEV_CARDIDS)
  543. continue;
  544. queue = AP_QID_QUEUE(zq->queue->qid);
  545. stat = &devstatus[card * AP_DOMAINS + queue];
  546. stat->hwtype = zc->card->ap_dev.device_type;
  547. stat->functions = zc->card->functions >> 26;
  548. stat->qid = zq->queue->qid;
  549. stat->online = zq->online ? 0x01 : 0x00;
  550. }
  551. }
  552. spin_unlock(&zcrypt_list_lock);
  553. }
  554. void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
  555. {
  556. struct zcrypt_card *zc;
  557. struct zcrypt_queue *zq;
  558. struct zcrypt_device_status_ext *stat;
  559. int card, queue;
  560. memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
  561. * sizeof(struct zcrypt_device_status_ext));
  562. spin_lock(&zcrypt_list_lock);
  563. for_each_zcrypt_card(zc) {
  564. for_each_zcrypt_queue(zq, zc) {
  565. card = AP_QID_CARD(zq->queue->qid);
  566. queue = AP_QID_QUEUE(zq->queue->qid);
  567. stat = &devstatus[card * AP_DOMAINS + queue];
  568. stat->hwtype = zc->card->ap_dev.device_type;
  569. stat->functions = zc->card->functions >> 26;
  570. stat->qid = zq->queue->qid;
  571. stat->online = zq->online ? 0x01 : 0x00;
  572. }
  573. }
  574. spin_unlock(&zcrypt_list_lock);
  575. }
  576. EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
  577. static void zcrypt_status_mask(char status[], size_t max_adapters)
  578. {
  579. struct zcrypt_card *zc;
  580. struct zcrypt_queue *zq;
  581. int card;
  582. memset(status, 0, max_adapters);
  583. spin_lock(&zcrypt_list_lock);
  584. for_each_zcrypt_card(zc) {
  585. for_each_zcrypt_queue(zq, zc) {
  586. card = AP_QID_CARD(zq->queue->qid);
  587. if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
  588. || card >= max_adapters)
  589. continue;
  590. status[card] = zc->online ? zc->user_space_type : 0x0d;
  591. }
  592. }
  593. spin_unlock(&zcrypt_list_lock);
  594. }
  595. static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
  596. {
  597. struct zcrypt_card *zc;
  598. struct zcrypt_queue *zq;
  599. int card;
  600. memset(qdepth, 0, max_adapters);
  601. spin_lock(&zcrypt_list_lock);
  602. local_bh_disable();
  603. for_each_zcrypt_card(zc) {
  604. for_each_zcrypt_queue(zq, zc) {
  605. card = AP_QID_CARD(zq->queue->qid);
  606. if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
  607. || card >= max_adapters)
  608. continue;
  609. spin_lock(&zq->queue->lock);
  610. qdepth[card] =
  611. zq->queue->pendingq_count +
  612. zq->queue->requestq_count;
  613. spin_unlock(&zq->queue->lock);
  614. }
  615. }
  616. local_bh_enable();
  617. spin_unlock(&zcrypt_list_lock);
  618. }
  619. static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
  620. {
  621. struct zcrypt_card *zc;
  622. struct zcrypt_queue *zq;
  623. int card;
  624. memset(reqcnt, 0, sizeof(int) * max_adapters);
  625. spin_lock(&zcrypt_list_lock);
  626. local_bh_disable();
  627. for_each_zcrypt_card(zc) {
  628. for_each_zcrypt_queue(zq, zc) {
  629. card = AP_QID_CARD(zq->queue->qid);
  630. if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
  631. || card >= max_adapters)
  632. continue;
  633. spin_lock(&zq->queue->lock);
  634. reqcnt[card] = zq->queue->total_request_count;
  635. spin_unlock(&zq->queue->lock);
  636. }
  637. }
  638. local_bh_enable();
  639. spin_unlock(&zcrypt_list_lock);
  640. }
  641. static int zcrypt_pendingq_count(void)
  642. {
  643. struct zcrypt_card *zc;
  644. struct zcrypt_queue *zq;
  645. int pendingq_count;
  646. pendingq_count = 0;
  647. spin_lock(&zcrypt_list_lock);
  648. local_bh_disable();
  649. for_each_zcrypt_card(zc) {
  650. for_each_zcrypt_queue(zq, zc) {
  651. if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
  652. continue;
  653. spin_lock(&zq->queue->lock);
  654. pendingq_count += zq->queue->pendingq_count;
  655. spin_unlock(&zq->queue->lock);
  656. }
  657. }
  658. local_bh_enable();
  659. spin_unlock(&zcrypt_list_lock);
  660. return pendingq_count;
  661. }
  662. static int zcrypt_requestq_count(void)
  663. {
  664. struct zcrypt_card *zc;
  665. struct zcrypt_queue *zq;
  666. int requestq_count;
  667. requestq_count = 0;
  668. spin_lock(&zcrypt_list_lock);
  669. local_bh_disable();
  670. for_each_zcrypt_card(zc) {
  671. for_each_zcrypt_queue(zq, zc) {
  672. if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
  673. continue;
  674. spin_lock(&zq->queue->lock);
  675. requestq_count += zq->queue->requestq_count;
  676. spin_unlock(&zq->queue->lock);
  677. }
  678. }
  679. local_bh_enable();
  680. spin_unlock(&zcrypt_list_lock);
  681. return requestq_count;
  682. }
  683. static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
  684. unsigned long arg)
  685. {
  686. int rc = 0;
  687. switch (cmd) {
  688. case ICARSAMODEXPO: {
  689. struct ica_rsa_modexpo __user *umex = (void __user *) arg;
  690. struct ica_rsa_modexpo mex;
  691. if (copy_from_user(&mex, umex, sizeof(mex)))
  692. return -EFAULT;
  693. do {
  694. rc = zcrypt_rsa_modexpo(&mex);
  695. } while (rc == -EAGAIN);
  696. /* on failure: retry once again after a requested rescan */
  697. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  698. do {
  699. rc = zcrypt_rsa_modexpo(&mex);
  700. } while (rc == -EAGAIN);
  701. if (rc) {
  702. ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
  703. return rc;
  704. }
  705. return put_user(mex.outputdatalength, &umex->outputdatalength);
  706. }
  707. case ICARSACRT: {
  708. struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
  709. struct ica_rsa_modexpo_crt crt;
  710. if (copy_from_user(&crt, ucrt, sizeof(crt)))
  711. return -EFAULT;
  712. do {
  713. rc = zcrypt_rsa_crt(&crt);
  714. } while (rc == -EAGAIN);
  715. /* on failure: retry once again after a requested rescan */
  716. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  717. do {
  718. rc = zcrypt_rsa_crt(&crt);
  719. } while (rc == -EAGAIN);
  720. if (rc) {
  721. ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
  722. return rc;
  723. }
  724. return put_user(crt.outputdatalength, &ucrt->outputdatalength);
  725. }
  726. case ZSECSENDCPRB: {
  727. struct ica_xcRB __user *uxcRB = (void __user *) arg;
  728. struct ica_xcRB xcRB;
  729. if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
  730. return -EFAULT;
  731. do {
  732. rc = zcrypt_send_cprb(&xcRB);
  733. } while (rc == -EAGAIN);
  734. /* on failure: retry once again after a requested rescan */
  735. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  736. do {
  737. rc = zcrypt_send_cprb(&xcRB);
  738. } while (rc == -EAGAIN);
  739. if (rc)
  740. ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d\n", rc);
  741. if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
  742. return -EFAULT;
  743. return rc;
  744. }
  745. case ZSENDEP11CPRB: {
  746. struct ep11_urb __user *uxcrb = (void __user *)arg;
  747. struct ep11_urb xcrb;
  748. if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
  749. return -EFAULT;
  750. do {
  751. rc = zcrypt_send_ep11_cprb(&xcrb);
  752. } while (rc == -EAGAIN);
  753. /* on failure: retry once again after a requested rescan */
  754. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  755. do {
  756. rc = zcrypt_send_ep11_cprb(&xcrb);
  757. } while (rc == -EAGAIN);
  758. if (rc)
  759. ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
  760. if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
  761. return -EFAULT;
  762. return rc;
  763. }
  764. case ZCRYPT_DEVICE_STATUS: {
  765. struct zcrypt_device_status_ext *device_status;
  766. size_t total_size = MAX_ZDEV_ENTRIES_EXT
  767. * sizeof(struct zcrypt_device_status_ext);
  768. device_status = kzalloc(total_size, GFP_KERNEL);
  769. if (!device_status)
  770. return -ENOMEM;
  771. zcrypt_device_status_mask_ext(device_status);
  772. if (copy_to_user((char __user *) arg, device_status,
  773. total_size))
  774. rc = -EFAULT;
  775. kfree(device_status);
  776. return rc;
  777. }
  778. case ZCRYPT_STATUS_MASK: {
  779. char status[AP_DEVICES];
  780. zcrypt_status_mask(status, AP_DEVICES);
  781. if (copy_to_user((char __user *) arg, status, sizeof(status)))
  782. return -EFAULT;
  783. return 0;
  784. }
  785. case ZCRYPT_QDEPTH_MASK: {
  786. char qdepth[AP_DEVICES];
  787. zcrypt_qdepth_mask(qdepth, AP_DEVICES);
  788. if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
  789. return -EFAULT;
  790. return 0;
  791. }
  792. case ZCRYPT_PERDEV_REQCNT: {
  793. int *reqcnt;
  794. reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
  795. if (!reqcnt)
  796. return -ENOMEM;
  797. zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
  798. if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
  799. rc = -EFAULT;
  800. kfree(reqcnt);
  801. return rc;
  802. }
  803. case Z90STAT_REQUESTQ_COUNT:
  804. return put_user(zcrypt_requestq_count(), (int __user *) arg);
  805. case Z90STAT_PENDINGQ_COUNT:
  806. return put_user(zcrypt_pendingq_count(), (int __user *) arg);
  807. case Z90STAT_TOTALOPEN_COUNT:
  808. return put_user(atomic_read(&zcrypt_open_count),
  809. (int __user *) arg);
  810. case Z90STAT_DOMAIN_INDEX:
  811. return put_user(ap_domain_index, (int __user *) arg);
  812. /*
  813. * Deprecated ioctls
  814. */
  815. case ZDEVICESTATUS: {
  816. /* the old ioctl supports only 64 adapters */
  817. struct zcrypt_device_status *device_status;
  818. size_t total_size = MAX_ZDEV_ENTRIES
  819. * sizeof(struct zcrypt_device_status);
  820. device_status = kzalloc(total_size, GFP_KERNEL);
  821. if (!device_status)
  822. return -ENOMEM;
  823. zcrypt_device_status_mask(device_status);
  824. if (copy_to_user((char __user *) arg, device_status,
  825. total_size))
  826. rc = -EFAULT;
  827. kfree(device_status);
  828. return rc;
  829. }
  830. case Z90STAT_STATUS_MASK: {
  831. /* the old ioctl supports only 64 adapters */
  832. char status[MAX_ZDEV_CARDIDS];
  833. zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
  834. if (copy_to_user((char __user *) arg, status, sizeof(status)))
  835. return -EFAULT;
  836. return 0;
  837. }
  838. case Z90STAT_QDEPTH_MASK: {
  839. /* the old ioctl supports only 64 adapters */
  840. char qdepth[MAX_ZDEV_CARDIDS];
  841. zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
  842. if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
  843. return -EFAULT;
  844. return 0;
  845. }
  846. case Z90STAT_PERDEV_REQCNT: {
  847. /* the old ioctl supports only 64 adapters */
  848. int reqcnt[MAX_ZDEV_CARDIDS];
  849. zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
  850. if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
  851. return -EFAULT;
  852. return 0;
  853. }
  854. /* unknown ioctl number */
  855. default:
  856. ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd);
  857. return -ENOIOCTLCMD;
  858. }
  859. }
  860. #ifdef CONFIG_COMPAT
  861. /*
  862. * ioctl32 conversion routines
  863. */
  864. struct compat_ica_rsa_modexpo {
  865. compat_uptr_t inputdata;
  866. unsigned int inputdatalength;
  867. compat_uptr_t outputdata;
  868. unsigned int outputdatalength;
  869. compat_uptr_t b_key;
  870. compat_uptr_t n_modulus;
  871. };
  872. static long trans_modexpo32(struct file *filp, unsigned int cmd,
  873. unsigned long arg)
  874. {
  875. struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
  876. struct compat_ica_rsa_modexpo mex32;
  877. struct ica_rsa_modexpo mex64;
  878. long rc;
  879. if (copy_from_user(&mex32, umex32, sizeof(mex32)))
  880. return -EFAULT;
  881. mex64.inputdata = compat_ptr(mex32.inputdata);
  882. mex64.inputdatalength = mex32.inputdatalength;
  883. mex64.outputdata = compat_ptr(mex32.outputdata);
  884. mex64.outputdatalength = mex32.outputdatalength;
  885. mex64.b_key = compat_ptr(mex32.b_key);
  886. mex64.n_modulus = compat_ptr(mex32.n_modulus);
  887. do {
  888. rc = zcrypt_rsa_modexpo(&mex64);
  889. } while (rc == -EAGAIN);
  890. /* on failure: retry once again after a requested rescan */
  891. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  892. do {
  893. rc = zcrypt_rsa_modexpo(&mex64);
  894. } while (rc == -EAGAIN);
  895. if (rc)
  896. return rc;
  897. return put_user(mex64.outputdatalength,
  898. &umex32->outputdatalength);
  899. }
  900. struct compat_ica_rsa_modexpo_crt {
  901. compat_uptr_t inputdata;
  902. unsigned int inputdatalength;
  903. compat_uptr_t outputdata;
  904. unsigned int outputdatalength;
  905. compat_uptr_t bp_key;
  906. compat_uptr_t bq_key;
  907. compat_uptr_t np_prime;
  908. compat_uptr_t nq_prime;
  909. compat_uptr_t u_mult_inv;
  910. };
  911. static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
  912. unsigned long arg)
  913. {
  914. struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
  915. struct compat_ica_rsa_modexpo_crt crt32;
  916. struct ica_rsa_modexpo_crt crt64;
  917. long rc;
  918. if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
  919. return -EFAULT;
  920. crt64.inputdata = compat_ptr(crt32.inputdata);
  921. crt64.inputdatalength = crt32.inputdatalength;
  922. crt64.outputdata= compat_ptr(crt32.outputdata);
  923. crt64.outputdatalength = crt32.outputdatalength;
  924. crt64.bp_key = compat_ptr(crt32.bp_key);
  925. crt64.bq_key = compat_ptr(crt32.bq_key);
  926. crt64.np_prime = compat_ptr(crt32.np_prime);
  927. crt64.nq_prime = compat_ptr(crt32.nq_prime);
  928. crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
  929. do {
  930. rc = zcrypt_rsa_crt(&crt64);
  931. } while (rc == -EAGAIN);
  932. /* on failure: retry once again after a requested rescan */
  933. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  934. do {
  935. rc = zcrypt_rsa_crt(&crt64);
  936. } while (rc == -EAGAIN);
  937. if (rc)
  938. return rc;
  939. return put_user(crt64.outputdatalength,
  940. &ucrt32->outputdatalength);
  941. }
  942. struct compat_ica_xcRB {
  943. unsigned short agent_ID;
  944. unsigned int user_defined;
  945. unsigned short request_ID;
  946. unsigned int request_control_blk_length;
  947. unsigned char padding1[16 - sizeof (compat_uptr_t)];
  948. compat_uptr_t request_control_blk_addr;
  949. unsigned int request_data_length;
  950. char padding2[16 - sizeof (compat_uptr_t)];
  951. compat_uptr_t request_data_address;
  952. unsigned int reply_control_blk_length;
  953. char padding3[16 - sizeof (compat_uptr_t)];
  954. compat_uptr_t reply_control_blk_addr;
  955. unsigned int reply_data_length;
  956. char padding4[16 - sizeof (compat_uptr_t)];
  957. compat_uptr_t reply_data_addr;
  958. unsigned short priority_window;
  959. unsigned int status;
  960. } __attribute__((packed));
  961. static long trans_xcRB32(struct file *filp, unsigned int cmd,
  962. unsigned long arg)
  963. {
  964. struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
  965. struct compat_ica_xcRB xcRB32;
  966. struct ica_xcRB xcRB64;
  967. long rc;
  968. if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
  969. return -EFAULT;
  970. xcRB64.agent_ID = xcRB32.agent_ID;
  971. xcRB64.user_defined = xcRB32.user_defined;
  972. xcRB64.request_ID = xcRB32.request_ID;
  973. xcRB64.request_control_blk_length =
  974. xcRB32.request_control_blk_length;
  975. xcRB64.request_control_blk_addr =
  976. compat_ptr(xcRB32.request_control_blk_addr);
  977. xcRB64.request_data_length =
  978. xcRB32.request_data_length;
  979. xcRB64.request_data_address =
  980. compat_ptr(xcRB32.request_data_address);
  981. xcRB64.reply_control_blk_length =
  982. xcRB32.reply_control_blk_length;
  983. xcRB64.reply_control_blk_addr =
  984. compat_ptr(xcRB32.reply_control_blk_addr);
  985. xcRB64.reply_data_length = xcRB32.reply_data_length;
  986. xcRB64.reply_data_addr =
  987. compat_ptr(xcRB32.reply_data_addr);
  988. xcRB64.priority_window = xcRB32.priority_window;
  989. xcRB64.status = xcRB32.status;
  990. do {
  991. rc = zcrypt_send_cprb(&xcRB64);
  992. } while (rc == -EAGAIN);
  993. /* on failure: retry once again after a requested rescan */
  994. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  995. do {
  996. rc = zcrypt_send_cprb(&xcRB64);
  997. } while (rc == -EAGAIN);
  998. xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
  999. xcRB32.reply_data_length = xcRB64.reply_data_length;
  1000. xcRB32.status = xcRB64.status;
  1001. if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
  1002. return -EFAULT;
  1003. return rc;
  1004. }
  1005. static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
  1006. unsigned long arg)
  1007. {
  1008. if (cmd == ICARSAMODEXPO)
  1009. return trans_modexpo32(filp, cmd, arg);
  1010. if (cmd == ICARSACRT)
  1011. return trans_modexpo_crt32(filp, cmd, arg);
  1012. if (cmd == ZSECSENDCPRB)
  1013. return trans_xcRB32(filp, cmd, arg);
  1014. return zcrypt_unlocked_ioctl(filp, cmd, arg);
  1015. }
  1016. #endif
  1017. /*
  1018. * Misc device file operations.
  1019. */
  1020. static const struct file_operations zcrypt_fops = {
  1021. .owner = THIS_MODULE,
  1022. .read = zcrypt_read,
  1023. .write = zcrypt_write,
  1024. .unlocked_ioctl = zcrypt_unlocked_ioctl,
  1025. #ifdef CONFIG_COMPAT
  1026. .compat_ioctl = zcrypt_compat_ioctl,
  1027. #endif
  1028. .open = zcrypt_open,
  1029. .release = zcrypt_release,
  1030. .llseek = no_llseek,
  1031. };
  1032. /*
  1033. * Misc device.
  1034. */
  1035. static struct miscdevice zcrypt_misc_device = {
  1036. .minor = MISC_DYNAMIC_MINOR,
  1037. .name = "z90crypt",
  1038. .fops = &zcrypt_fops,
  1039. };
  1040. static int zcrypt_rng_device_count;
  1041. static u32 *zcrypt_rng_buffer;
  1042. static int zcrypt_rng_buffer_index;
  1043. static DEFINE_MUTEX(zcrypt_rng_mutex);
  1044. static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
  1045. {
  1046. int rc;
  1047. /*
  1048. * We don't need locking here because the RNG API guarantees serialized
  1049. * read method calls.
  1050. */
  1051. if (zcrypt_rng_buffer_index == 0) {
  1052. rc = zcrypt_rng((char *) zcrypt_rng_buffer);
  1053. /* on failure: retry once again after a requested rescan */
  1054. if ((rc == -ENODEV) && (zcrypt_process_rescan()))
  1055. rc = zcrypt_rng((char *) zcrypt_rng_buffer);
  1056. if (rc < 0)
  1057. return -EIO;
  1058. zcrypt_rng_buffer_index = rc / sizeof *data;
  1059. }
  1060. *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
  1061. return sizeof *data;
  1062. }
  1063. static struct hwrng zcrypt_rng_dev = {
  1064. .name = "zcrypt",
  1065. .data_read = zcrypt_rng_data_read,
  1066. .quality = 990,
  1067. };
  1068. int zcrypt_rng_device_add(void)
  1069. {
  1070. int rc = 0;
  1071. mutex_lock(&zcrypt_rng_mutex);
  1072. if (zcrypt_rng_device_count == 0) {
  1073. zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
  1074. if (!zcrypt_rng_buffer) {
  1075. rc = -ENOMEM;
  1076. goto out;
  1077. }
  1078. zcrypt_rng_buffer_index = 0;
  1079. if (!zcrypt_hwrng_seed)
  1080. zcrypt_rng_dev.quality = 0;
  1081. rc = hwrng_register(&zcrypt_rng_dev);
  1082. if (rc)
  1083. goto out_free;
  1084. zcrypt_rng_device_count = 1;
  1085. } else
  1086. zcrypt_rng_device_count++;
  1087. mutex_unlock(&zcrypt_rng_mutex);
  1088. return 0;
  1089. out_free:
  1090. free_page((unsigned long) zcrypt_rng_buffer);
  1091. out:
  1092. mutex_unlock(&zcrypt_rng_mutex);
  1093. return rc;
  1094. }
  1095. void zcrypt_rng_device_remove(void)
  1096. {
  1097. mutex_lock(&zcrypt_rng_mutex);
  1098. zcrypt_rng_device_count--;
  1099. if (zcrypt_rng_device_count == 0) {
  1100. hwrng_unregister(&zcrypt_rng_dev);
  1101. free_page((unsigned long) zcrypt_rng_buffer);
  1102. }
  1103. mutex_unlock(&zcrypt_rng_mutex);
  1104. }
  1105. int __init zcrypt_debug_init(void)
  1106. {
  1107. zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
  1108. DBF_MAX_SPRINTF_ARGS * sizeof(long));
  1109. debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
  1110. debug_set_level(zcrypt_dbf_info, DBF_ERR);
  1111. return 0;
  1112. }
  1113. void zcrypt_debug_exit(void)
  1114. {
  1115. debug_unregister(zcrypt_dbf_info);
  1116. }
  1117. /**
  1118. * zcrypt_api_init(): Module initialization.
  1119. *
  1120. * The module initialization code.
  1121. */
  1122. int __init zcrypt_api_init(void)
  1123. {
  1124. int rc;
  1125. rc = zcrypt_debug_init();
  1126. if (rc)
  1127. goto out;
  1128. /* Register the request sprayer. */
  1129. rc = misc_register(&zcrypt_misc_device);
  1130. if (rc < 0)
  1131. goto out;
  1132. zcrypt_msgtype6_init();
  1133. zcrypt_msgtype50_init();
  1134. return 0;
  1135. out:
  1136. return rc;
  1137. }
  1138. /**
  1139. * zcrypt_api_exit(): Module termination.
  1140. *
  1141. * The module termination code.
  1142. */
  1143. void __exit zcrypt_api_exit(void)
  1144. {
  1145. misc_deregister(&zcrypt_misc_device);
  1146. zcrypt_msgtype6_exit();
  1147. zcrypt_msgtype50_exit();
  1148. zcrypt_debug_exit();
  1149. }
  1150. module_init(zcrypt_api_init);
  1151. module_exit(zcrypt_api_exit);