tpm_ibmvtpm.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710
  1. /*
  2. * Copyright (C) 2012 IBM Corporation
  3. *
  4. * Author: Ashley Lai <ashleydlai@gmail.com>
  5. *
  6. * Maintained by: <tpmdd-devel@lists.sourceforge.net>
  7. *
  8. * Device driver for TCG/TCPA TPM (trusted platform module).
  9. * Specifications at www.trustedcomputinggroup.org
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License as
  13. * published by the Free Software Foundation, version 2 of the
  14. * License.
  15. *
  16. */
  17. #include <linux/dma-mapping.h>
  18. #include <linux/dmapool.h>
  19. #include <linux/slab.h>
  20. #include <asm/vio.h>
  21. #include <asm/irq.h>
  22. #include <linux/types.h>
  23. #include <linux/list.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/wait.h>
  27. #include <asm/prom.h>
  28. #include "tpm.h"
  29. #include "tpm_ibmvtpm.h"
  30. static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
  31. static struct vio_device_id tpm_ibmvtpm_device_table[] = {
  32. { "IBM,vtpm", "IBM,vtpm"},
  33. { "", "" }
  34. };
  35. MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
  36. /**
  37. * ibmvtpm_send_crq - Send a CRQ request
  38. *
  39. * @vdev: vio device struct
  40. * @w1: first word
  41. * @w2: second word
  42. *
  43. * Return:
  44. * 0 -Sucess
  45. * Non-zero - Failure
  46. */
  47. static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
  48. {
  49. return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
  50. }
  51. /**
  52. * tpm_ibmvtpm_recv - Receive data after send
  53. *
  54. * @chip: tpm chip struct
  55. * @buf: buffer to read
  56. * @count: size of buffer
  57. *
  58. * Return:
  59. * Number of bytes read
  60. */
  61. static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
  62. {
  63. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  64. u16 len;
  65. int sig;
  66. if (!ibmvtpm->rtce_buf) {
  67. dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
  68. return 0;
  69. }
  70. sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
  71. if (sig)
  72. return -EINTR;
  73. len = ibmvtpm->res_len;
  74. if (count < len) {
  75. dev_err(ibmvtpm->dev,
  76. "Invalid size in recv: count=%zd, crq_size=%d\n",
  77. count, len);
  78. return -EIO;
  79. }
  80. spin_lock(&ibmvtpm->rtce_lock);
  81. memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
  82. memset(ibmvtpm->rtce_buf, 0, len);
  83. ibmvtpm->res_len = 0;
  84. spin_unlock(&ibmvtpm->rtce_lock);
  85. return len;
  86. }
  87. /**
  88. * tpm_ibmvtpm_send - Send tpm request
  89. *
  90. * @chip: tpm chip struct
  91. * @buf: buffer contains data to send
  92. * @count: size of buffer
  93. *
  94. * Return:
  95. * Number of bytes sent or < 0 on error.
  96. */
  97. static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
  98. {
  99. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  100. struct ibmvtpm_crq crq;
  101. __be64 *word = (__be64 *)&crq;
  102. int rc, sig;
  103. if (!ibmvtpm->rtce_buf) {
  104. dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
  105. return 0;
  106. }
  107. if (count > ibmvtpm->rtce_size) {
  108. dev_err(ibmvtpm->dev,
  109. "Invalid size in send: count=%zd, rtce_size=%d\n",
  110. count, ibmvtpm->rtce_size);
  111. return -EIO;
  112. }
  113. if (ibmvtpm->tpm_processing_cmd) {
  114. dev_info(ibmvtpm->dev,
  115. "Need to wait for TPM to finish\n");
  116. /* wait for previous command to finish */
  117. sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
  118. if (sig)
  119. return -EINTR;
  120. }
  121. spin_lock(&ibmvtpm->rtce_lock);
  122. ibmvtpm->res_len = 0;
  123. memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
  124. crq.valid = (u8)IBMVTPM_VALID_CMD;
  125. crq.msg = (u8)VTPM_TPM_COMMAND;
  126. crq.len = cpu_to_be16(count);
  127. crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
  128. /*
  129. * set the processing flag before the Hcall, since we may get the
  130. * result (interrupt) before even being able to check rc.
  131. */
  132. ibmvtpm->tpm_processing_cmd = true;
  133. rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
  134. be64_to_cpu(word[1]));
  135. if (rc != H_SUCCESS) {
  136. dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
  137. rc = 0;
  138. ibmvtpm->tpm_processing_cmd = false;
  139. } else
  140. rc = count;
  141. spin_unlock(&ibmvtpm->rtce_lock);
  142. return rc;
  143. }
  144. static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
  145. {
  146. return;
  147. }
  148. static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
  149. {
  150. return 0;
  151. }
  152. /**
  153. * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
  154. *
  155. * @ibmvtpm: vtpm device struct
  156. *
  157. * Return:
  158. * 0 on success.
  159. * Non-zero on failure.
  160. */
  161. static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
  162. {
  163. struct ibmvtpm_crq crq;
  164. u64 *buf = (u64 *) &crq;
  165. int rc;
  166. crq.valid = (u8)IBMVTPM_VALID_CMD;
  167. crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
  168. rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
  169. cpu_to_be64(buf[1]));
  170. if (rc != H_SUCCESS)
  171. dev_err(ibmvtpm->dev,
  172. "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
  173. return rc;
  174. }
  175. /**
  176. * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
  177. * - Note that this is vtpm version and not tpm version
  178. *
  179. * @ibmvtpm: vtpm device struct
  180. *
  181. * Return:
  182. * 0 on success.
  183. * Non-zero on failure.
  184. */
  185. static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
  186. {
  187. struct ibmvtpm_crq crq;
  188. u64 *buf = (u64 *) &crq;
  189. int rc;
  190. crq.valid = (u8)IBMVTPM_VALID_CMD;
  191. crq.msg = (u8)VTPM_GET_VERSION;
  192. rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
  193. cpu_to_be64(buf[1]));
  194. if (rc != H_SUCCESS)
  195. dev_err(ibmvtpm->dev,
  196. "ibmvtpm_crq_get_version failed rc=%d\n", rc);
  197. return rc;
  198. }
  199. /**
  200. * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
  201. * @ibmvtpm: vtpm device struct
  202. *
  203. * Return:
  204. * 0 on success.
  205. * Non-zero on failure.
  206. */
  207. static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
  208. {
  209. int rc;
  210. rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0);
  211. if (rc != H_SUCCESS)
  212. dev_err(ibmvtpm->dev,
  213. "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
  214. return rc;
  215. }
  216. /**
  217. * ibmvtpm_crq_send_init - Send a CRQ initialize message
  218. * @ibmvtpm: vtpm device struct
  219. *
  220. * Return:
  221. * 0 on success.
  222. * Non-zero on failure.
  223. */
  224. static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
  225. {
  226. int rc;
  227. rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0);
  228. if (rc != H_SUCCESS)
  229. dev_err(ibmvtpm->dev,
  230. "ibmvtpm_crq_send_init failed rc=%d\n", rc);
  231. return rc;
  232. }
  233. /**
  234. * tpm_ibmvtpm_remove - ibm vtpm remove entry point
  235. * @vdev: vio device struct
  236. *
  237. * Return: Always 0.
  238. */
  239. static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
  240. {
  241. struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
  242. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  243. int rc = 0;
  244. tpm_chip_unregister(chip);
  245. free_irq(vdev->irq, ibmvtpm);
  246. do {
  247. if (rc)
  248. msleep(100);
  249. rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
  250. } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
  251. dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
  252. CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
  253. free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
  254. if (ibmvtpm->rtce_buf) {
  255. dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
  256. ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
  257. kfree(ibmvtpm->rtce_buf);
  258. }
  259. kfree(ibmvtpm);
  260. /* For tpm_ibmvtpm_get_desired_dma */
  261. dev_set_drvdata(&vdev->dev, NULL);
  262. return 0;
  263. }
  264. /**
  265. * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
  266. * @vdev: vio device struct
  267. *
  268. * Return:
  269. * Number of bytes the driver needs to DMA map.
  270. */
  271. static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
  272. {
  273. struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
  274. struct ibmvtpm_dev *ibmvtpm;
  275. /*
  276. * ibmvtpm initializes at probe time, so the data we are
  277. * asking for may not be set yet. Estimate that 4K required
  278. * for TCE-mapped buffer in addition to CRQ.
  279. */
  280. if (chip)
  281. ibmvtpm = dev_get_drvdata(&chip->dev);
  282. else
  283. return CRQ_RES_BUF_SIZE + PAGE_SIZE;
  284. return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
  285. }
  286. /**
  287. * tpm_ibmvtpm_suspend - Suspend
  288. * @dev: device struct
  289. *
  290. * Return: Always 0.
  291. */
  292. static int tpm_ibmvtpm_suspend(struct device *dev)
  293. {
  294. struct tpm_chip *chip = dev_get_drvdata(dev);
  295. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  296. struct ibmvtpm_crq crq;
  297. u64 *buf = (u64 *) &crq;
  298. int rc = 0;
  299. crq.valid = (u8)IBMVTPM_VALID_CMD;
  300. crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
  301. rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
  302. cpu_to_be64(buf[1]));
  303. if (rc != H_SUCCESS)
  304. dev_err(ibmvtpm->dev,
  305. "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
  306. return rc;
  307. }
  308. /**
  309. * ibmvtpm_reset_crq - Reset CRQ
  310. *
  311. * @ibmvtpm: ibm vtpm struct
  312. *
  313. * Return:
  314. * 0 on success.
  315. * Non-zero on failure.
  316. */
  317. static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
  318. {
  319. int rc = 0;
  320. do {
  321. if (rc)
  322. msleep(100);
  323. rc = plpar_hcall_norets(H_FREE_CRQ,
  324. ibmvtpm->vdev->unit_address);
  325. } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
  326. memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
  327. ibmvtpm->crq_queue.index = 0;
  328. return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
  329. ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
  330. }
  331. /**
  332. * tpm_ibmvtpm_resume - Resume from suspend
  333. *
  334. * @dev: device struct
  335. *
  336. * Return: Always 0.
  337. */
  338. static int tpm_ibmvtpm_resume(struct device *dev)
  339. {
  340. struct tpm_chip *chip = dev_get_drvdata(dev);
  341. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  342. int rc = 0;
  343. do {
  344. if (rc)
  345. msleep(100);
  346. rc = plpar_hcall_norets(H_ENABLE_CRQ,
  347. ibmvtpm->vdev->unit_address);
  348. } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
  349. if (rc) {
  350. dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
  351. return rc;
  352. }
  353. rc = vio_enable_interrupts(ibmvtpm->vdev);
  354. if (rc) {
  355. dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
  356. return rc;
  357. }
  358. rc = ibmvtpm_crq_send_init(ibmvtpm);
  359. if (rc)
  360. dev_err(dev, "Error send_init rc=%d\n", rc);
  361. return rc;
  362. }
  363. static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
  364. {
  365. return (status == 0);
  366. }
  367. static const struct tpm_class_ops tpm_ibmvtpm = {
  368. .recv = tpm_ibmvtpm_recv,
  369. .send = tpm_ibmvtpm_send,
  370. .cancel = tpm_ibmvtpm_cancel,
  371. .status = tpm_ibmvtpm_status,
  372. .req_complete_mask = 0,
  373. .req_complete_val = 0,
  374. .req_canceled = tpm_ibmvtpm_req_canceled,
  375. };
  376. static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
  377. .suspend = tpm_ibmvtpm_suspend,
  378. .resume = tpm_ibmvtpm_resume,
  379. };
  380. /**
  381. * ibmvtpm_crq_get_next - Get next responded crq
  382. *
  383. * @ibmvtpm: vtpm device struct
  384. *
  385. * Return: vtpm crq pointer or NULL.
  386. */
  387. static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
  388. {
  389. struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
  390. struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
  391. if (crq->valid & VTPM_MSG_RES) {
  392. if (++crq_q->index == crq_q->num_entry)
  393. crq_q->index = 0;
  394. smp_rmb();
  395. } else
  396. crq = NULL;
  397. return crq;
  398. }
  399. /**
  400. * ibmvtpm_crq_process - Process responded crq
  401. *
  402. * @crq: crq to be processed
  403. * @ibmvtpm: vtpm device struct
  404. *
  405. */
  406. static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
  407. struct ibmvtpm_dev *ibmvtpm)
  408. {
  409. int rc = 0;
  410. switch (crq->valid) {
  411. case VALID_INIT_CRQ:
  412. switch (crq->msg) {
  413. case INIT_CRQ_RES:
  414. dev_info(ibmvtpm->dev, "CRQ initialized\n");
  415. rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
  416. if (rc)
  417. dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
  418. return;
  419. case INIT_CRQ_COMP_RES:
  420. dev_info(ibmvtpm->dev,
  421. "CRQ initialization completed\n");
  422. return;
  423. default:
  424. dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
  425. return;
  426. }
  427. case IBMVTPM_VALID_CMD:
  428. switch (crq->msg) {
  429. case VTPM_GET_RTCE_BUFFER_SIZE_RES:
  430. if (be16_to_cpu(crq->len) <= 0) {
  431. dev_err(ibmvtpm->dev, "Invalid rtce size\n");
  432. return;
  433. }
  434. ibmvtpm->rtce_size = be16_to_cpu(crq->len);
  435. ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
  436. GFP_ATOMIC);
  437. if (!ibmvtpm->rtce_buf) {
  438. dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
  439. return;
  440. }
  441. ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
  442. ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
  443. DMA_BIDIRECTIONAL);
  444. if (dma_mapping_error(ibmvtpm->dev,
  445. ibmvtpm->rtce_dma_handle)) {
  446. kfree(ibmvtpm->rtce_buf);
  447. ibmvtpm->rtce_buf = NULL;
  448. dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
  449. }
  450. return;
  451. case VTPM_GET_VERSION_RES:
  452. ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
  453. return;
  454. case VTPM_TPM_COMMAND_RES:
  455. /* len of the data in rtce buffer */
  456. ibmvtpm->res_len = be16_to_cpu(crq->len);
  457. ibmvtpm->tpm_processing_cmd = false;
  458. wake_up_interruptible(&ibmvtpm->wq);
  459. return;
  460. default:
  461. return;
  462. }
  463. }
  464. return;
  465. }
  466. /**
  467. * ibmvtpm_interrupt - Interrupt handler
  468. *
  469. * @irq: irq number to handle
  470. * @vtpm_instance: vtpm that received interrupt
  471. *
  472. * Returns:
  473. * IRQ_HANDLED
  474. **/
  475. static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
  476. {
  477. struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
  478. struct ibmvtpm_crq *crq;
  479. /* while loop is needed for initial setup (get version and
  480. * get rtce_size). There should be only one tpm request at any
  481. * given time.
  482. */
  483. while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
  484. ibmvtpm_crq_process(crq, ibmvtpm);
  485. crq->valid = 0;
  486. smp_wmb();
  487. }
  488. return IRQ_HANDLED;
  489. }
  490. /**
  491. * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
  492. *
  493. * @vio_dev: vio device struct
  494. * @id: vio device id struct
  495. *
  496. * Return:
  497. * 0 on success.
  498. * Non-zero on failure.
  499. */
  500. static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
  501. const struct vio_device_id *id)
  502. {
  503. struct ibmvtpm_dev *ibmvtpm;
  504. struct device *dev = &vio_dev->dev;
  505. struct ibmvtpm_crq_queue *crq_q;
  506. struct tpm_chip *chip;
  507. int rc = -ENOMEM, rc1;
  508. chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
  509. if (IS_ERR(chip))
  510. return PTR_ERR(chip);
  511. ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
  512. if (!ibmvtpm) {
  513. dev_err(dev, "kzalloc for ibmvtpm failed\n");
  514. goto cleanup;
  515. }
  516. ibmvtpm->dev = dev;
  517. ibmvtpm->vdev = vio_dev;
  518. crq_q = &ibmvtpm->crq_queue;
  519. crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
  520. if (!crq_q->crq_addr) {
  521. dev_err(dev, "Unable to allocate memory for crq_addr\n");
  522. goto cleanup;
  523. }
  524. crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
  525. ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
  526. CRQ_RES_BUF_SIZE,
  527. DMA_BIDIRECTIONAL);
  528. if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
  529. dev_err(dev, "dma mapping failed\n");
  530. goto cleanup;
  531. }
  532. rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
  533. ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
  534. if (rc == H_RESOURCE)
  535. rc = ibmvtpm_reset_crq(ibmvtpm);
  536. if (rc) {
  537. dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
  538. goto reg_crq_cleanup;
  539. }
  540. rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
  541. tpm_ibmvtpm_driver_name, ibmvtpm);
  542. if (rc) {
  543. dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
  544. goto init_irq_cleanup;
  545. }
  546. rc = vio_enable_interrupts(vio_dev);
  547. if (rc) {
  548. dev_err(dev, "Error %d enabling interrupts\n", rc);
  549. goto init_irq_cleanup;
  550. }
  551. init_waitqueue_head(&ibmvtpm->wq);
  552. crq_q->index = 0;
  553. dev_set_drvdata(&chip->dev, ibmvtpm);
  554. spin_lock_init(&ibmvtpm->rtce_lock);
  555. rc = ibmvtpm_crq_send_init(ibmvtpm);
  556. if (rc)
  557. goto init_irq_cleanup;
  558. rc = ibmvtpm_crq_get_version(ibmvtpm);
  559. if (rc)
  560. goto init_irq_cleanup;
  561. rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
  562. if (rc)
  563. goto init_irq_cleanup;
  564. return tpm_chip_register(chip);
  565. init_irq_cleanup:
  566. do {
  567. rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
  568. } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
  569. reg_crq_cleanup:
  570. dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
  571. DMA_BIDIRECTIONAL);
  572. cleanup:
  573. if (ibmvtpm) {
  574. if (crq_q->crq_addr)
  575. free_page((unsigned long)crq_q->crq_addr);
  576. kfree(ibmvtpm);
  577. }
  578. return rc;
  579. }
  580. static struct vio_driver ibmvtpm_driver = {
  581. .id_table = tpm_ibmvtpm_device_table,
  582. .probe = tpm_ibmvtpm_probe,
  583. .remove = tpm_ibmvtpm_remove,
  584. .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
  585. .name = tpm_ibmvtpm_driver_name,
  586. .pm = &tpm_ibmvtpm_pm_ops,
  587. };
  588. /**
  589. * ibmvtpm_module_init - Initialize ibm vtpm module.
  590. *
  591. *
  592. * Return:
  593. * 0 on success.
  594. * Non-zero on failure.
  595. */
  596. static int __init ibmvtpm_module_init(void)
  597. {
  598. return vio_register_driver(&ibmvtpm_driver);
  599. }
  600. /**
  601. * ibmvtpm_module_exit - Tear down ibm vtpm module.
  602. */
  603. static void __exit ibmvtpm_module_exit(void)
  604. {
  605. vio_unregister_driver(&ibmvtpm_driver);
  606. }
  607. module_init(ibmvtpm_module_init);
  608. module_exit(ibmvtpm_module_exit);
  609. MODULE_AUTHOR("adlai@us.ibm.com");
  610. MODULE_DESCRIPTION("IBM vTPM Driver");
  611. MODULE_VERSION("1.0");
  612. MODULE_LICENSE("GPL");