tpm_tis.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882
  1. /*
  2. * Copyright (C) 2005, 2006 IBM Corporation
  3. *
  4. * Authors:
  5. * Leendert van Doorn <leendert@watson.ibm.com>
  6. * Kylene Hall <kjhall@us.ibm.com>
  7. *
  8. * Maintained by: <tpmdd-devel@lists.sourceforge.net>
  9. *
  10. * Device driver for TCG/TCPA TPM (trusted platform module).
  11. * Specifications at www.trustedcomputinggroup.org
  12. *
  13. * This device driver implements the TPM interface as defined in
  14. * the TCG TPM Interface Spec version 1.2, revision 1.0.
  15. *
  16. * This program is free software; you can redistribute it and/or
  17. * modify it under the terms of the GNU General Public License as
  18. * published by the Free Software Foundation, version 2 of the
  19. * License.
  20. */
  21. #include <linux/init.h>
  22. #include <linux/module.h>
  23. #include <linux/moduleparam.h>
  24. #include <linux/pnp.h>
  25. #include <linux/slab.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/wait.h>
  28. #include <linux/acpi.h>
  29. #include <linux/freezer.h>
  30. #include "tpm.h"
  31. enum tis_access {
  32. TPM_ACCESS_VALID = 0x80,
  33. TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
  34. TPM_ACCESS_REQUEST_PENDING = 0x04,
  35. TPM_ACCESS_REQUEST_USE = 0x02,
  36. };
  37. enum tis_status {
  38. TPM_STS_VALID = 0x80,
  39. TPM_STS_COMMAND_READY = 0x40,
  40. TPM_STS_GO = 0x20,
  41. TPM_STS_DATA_AVAIL = 0x10,
  42. TPM_STS_DATA_EXPECT = 0x08,
  43. };
  44. enum tis_int_flags {
  45. TPM_GLOBAL_INT_ENABLE = 0x80000000,
  46. TPM_INTF_BURST_COUNT_STATIC = 0x100,
  47. TPM_INTF_CMD_READY_INT = 0x080,
  48. TPM_INTF_INT_EDGE_FALLING = 0x040,
  49. TPM_INTF_INT_EDGE_RISING = 0x020,
  50. TPM_INTF_INT_LEVEL_LOW = 0x010,
  51. TPM_INTF_INT_LEVEL_HIGH = 0x008,
  52. TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
  53. TPM_INTF_STS_VALID_INT = 0x002,
  54. TPM_INTF_DATA_AVAIL_INT = 0x001,
  55. };
  56. enum tis_defaults {
  57. TIS_MEM_BASE = 0xFED40000,
  58. TIS_MEM_LEN = 0x5000,
  59. TIS_SHORT_TIMEOUT = 750, /* ms */
  60. TIS_LONG_TIMEOUT = 2000, /* 2 sec */
  61. };
  62. #define TPM_ACCESS(l) (0x0000 | ((l) << 12))
  63. #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
  64. #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
  65. #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
  66. #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
  67. #define TPM_STS(l) (0x0018 | ((l) << 12))
  68. #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
  69. #define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
  70. #define TPM_RID(l) (0x0F04 | ((l) << 12))
  71. static LIST_HEAD(tis_chips);
  72. static DEFINE_MUTEX(tis_lock);
  73. #if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
  74. static int is_itpm(struct pnp_dev *dev)
  75. {
  76. struct acpi_device *acpi = pnp_acpi_device(dev);
  77. struct acpi_hardware_id *id;
  78. if (!acpi)
  79. return 0;
  80. list_for_each_entry(id, &acpi->pnp.ids, list) {
  81. if (!strcmp("INTC0102", id->id))
  82. return 1;
  83. }
  84. return 0;
  85. }
  86. #else
  87. static inline int is_itpm(struct pnp_dev *dev)
  88. {
  89. return 0;
  90. }
  91. #endif
  92. /* Before we attempt to access the TPM we must see that the valid bit is set.
  93. * The specification says that this bit is 0 at reset and remains 0 until the
  94. * 'TPM has gone through its self test and initialization and has established
  95. * correct values in the other bits.' */
  96. static int wait_startup(struct tpm_chip *chip, int l)
  97. {
  98. unsigned long stop = jiffies + chip->vendor.timeout_a;
  99. do {
  100. if (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
  101. TPM_ACCESS_VALID)
  102. return 0;
  103. msleep(TPM_TIMEOUT);
  104. } while (time_before(jiffies, stop));
  105. return -1;
  106. }
  107. static int check_locality(struct tpm_chip *chip, int l)
  108. {
  109. if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
  110. (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
  111. (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
  112. return chip->vendor.locality = l;
  113. return -1;
  114. }
  115. static void release_locality(struct tpm_chip *chip, int l, int force)
  116. {
  117. if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
  118. (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
  119. (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
  120. iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
  121. chip->vendor.iobase + TPM_ACCESS(l));
  122. }
  123. static int request_locality(struct tpm_chip *chip, int l)
  124. {
  125. unsigned long stop, timeout;
  126. long rc;
  127. if (check_locality(chip, l) >= 0)
  128. return l;
  129. iowrite8(TPM_ACCESS_REQUEST_USE,
  130. chip->vendor.iobase + TPM_ACCESS(l));
  131. stop = jiffies + chip->vendor.timeout_a;
  132. if (chip->vendor.irq) {
  133. again:
  134. timeout = stop - jiffies;
  135. if ((long)timeout <= 0)
  136. return -1;
  137. rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
  138. (check_locality
  139. (chip, l) >= 0),
  140. timeout);
  141. if (rc > 0)
  142. return l;
  143. if (rc == -ERESTARTSYS && freezing(current)) {
  144. clear_thread_flag(TIF_SIGPENDING);
  145. goto again;
  146. }
  147. } else {
  148. /* wait for burstcount */
  149. do {
  150. if (check_locality(chip, l) >= 0)
  151. return l;
  152. msleep(TPM_TIMEOUT);
  153. }
  154. while (time_before(jiffies, stop));
  155. }
  156. return -1;
  157. }
  158. static u8 tpm_tis_status(struct tpm_chip *chip)
  159. {
  160. return ioread8(chip->vendor.iobase +
  161. TPM_STS(chip->vendor.locality));
  162. }
  163. static void tpm_tis_ready(struct tpm_chip *chip)
  164. {
  165. /* this causes the current command to be aborted */
  166. iowrite8(TPM_STS_COMMAND_READY,
  167. chip->vendor.iobase + TPM_STS(chip->vendor.locality));
  168. }
  169. static int get_burstcount(struct tpm_chip *chip)
  170. {
  171. unsigned long stop;
  172. int burstcnt;
  173. /* wait for burstcount */
  174. /* which timeout value, spec has 2 answers (c & d) */
  175. stop = jiffies + chip->vendor.timeout_d;
  176. do {
  177. burstcnt = ioread8(chip->vendor.iobase +
  178. TPM_STS(chip->vendor.locality) + 1);
  179. burstcnt += ioread8(chip->vendor.iobase +
  180. TPM_STS(chip->vendor.locality) +
  181. 2) << 8;
  182. if (burstcnt)
  183. return burstcnt;
  184. msleep(TPM_TIMEOUT);
  185. } while (time_before(jiffies, stop));
  186. return -EBUSY;
  187. }
  188. static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
  189. {
  190. int size = 0, burstcnt;
  191. while (size < count &&
  192. wait_for_tpm_stat(chip,
  193. TPM_STS_DATA_AVAIL | TPM_STS_VALID,
  194. chip->vendor.timeout_c,
  195. &chip->vendor.read_queue, true)
  196. == 0) {
  197. burstcnt = get_burstcount(chip);
  198. for (; burstcnt > 0 && size < count; burstcnt--)
  199. buf[size++] = ioread8(chip->vendor.iobase +
  200. TPM_DATA_FIFO(chip->vendor.
  201. locality));
  202. }
  203. return size;
  204. }
  205. static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
  206. {
  207. int size = 0;
  208. int expected, status;
  209. if (count < TPM_HEADER_SIZE) {
  210. size = -EIO;
  211. goto out;
  212. }
  213. /* read first 10 bytes, including tag, paramsize, and result */
  214. if ((size =
  215. recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
  216. dev_err(chip->dev, "Unable to read header\n");
  217. goto out;
  218. }
  219. expected = be32_to_cpu(*(__be32 *) (buf + 2));
  220. if (expected > count) {
  221. size = -EIO;
  222. goto out;
  223. }
  224. if ((size +=
  225. recv_data(chip, &buf[TPM_HEADER_SIZE],
  226. expected - TPM_HEADER_SIZE)) < expected) {
  227. dev_err(chip->dev, "Unable to read remainder of result\n");
  228. size = -ETIME;
  229. goto out;
  230. }
  231. wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
  232. &chip->vendor.int_queue, false);
  233. status = tpm_tis_status(chip);
  234. if (status & TPM_STS_DATA_AVAIL) { /* retry? */
  235. dev_err(chip->dev, "Error left over data\n");
  236. size = -EIO;
  237. goto out;
  238. }
  239. out:
  240. tpm_tis_ready(chip);
  241. release_locality(chip, chip->vendor.locality, 0);
  242. return size;
  243. }
  244. static bool itpm;
  245. module_param(itpm, bool, 0444);
  246. MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
  247. /*
  248. * If interrupts are used (signaled by an irq set in the vendor structure)
  249. * tpm.c can skip polling for the data to be available as the interrupt is
  250. * waited for here
  251. */
  252. static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
  253. {
  254. int rc, status, burstcnt;
  255. size_t count = 0;
  256. if (request_locality(chip, 0) < 0)
  257. return -EBUSY;
  258. status = tpm_tis_status(chip);
  259. if ((status & TPM_STS_COMMAND_READY) == 0) {
  260. tpm_tis_ready(chip);
  261. if (wait_for_tpm_stat
  262. (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
  263. &chip->vendor.int_queue, false) < 0) {
  264. rc = -ETIME;
  265. goto out_err;
  266. }
  267. }
  268. while (count < len - 1) {
  269. burstcnt = get_burstcount(chip);
  270. for (; burstcnt > 0 && count < len - 1; burstcnt--) {
  271. iowrite8(buf[count], chip->vendor.iobase +
  272. TPM_DATA_FIFO(chip->vendor.locality));
  273. count++;
  274. }
  275. wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
  276. &chip->vendor.int_queue, false);
  277. status = tpm_tis_status(chip);
  278. if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
  279. rc = -EIO;
  280. goto out_err;
  281. }
  282. }
  283. /* write last byte */
  284. iowrite8(buf[count],
  285. chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
  286. wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
  287. &chip->vendor.int_queue, false);
  288. status = tpm_tis_status(chip);
  289. if ((status & TPM_STS_DATA_EXPECT) != 0) {
  290. rc = -EIO;
  291. goto out_err;
  292. }
  293. return 0;
  294. out_err:
  295. tpm_tis_ready(chip);
  296. release_locality(chip, chip->vendor.locality, 0);
  297. return rc;
  298. }
  299. /*
  300. * If interrupts are used (signaled by an irq set in the vendor structure)
  301. * tpm.c can skip polling for the data to be available as the interrupt is
  302. * waited for here
  303. */
  304. static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
  305. {
  306. int rc;
  307. u32 ordinal;
  308. rc = tpm_tis_send_data(chip, buf, len);
  309. if (rc < 0)
  310. return rc;
  311. /* go and do it */
  312. iowrite8(TPM_STS_GO,
  313. chip->vendor.iobase + TPM_STS(chip->vendor.locality));
  314. if (chip->vendor.irq) {
  315. ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
  316. if (wait_for_tpm_stat
  317. (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
  318. tpm_calc_ordinal_duration(chip, ordinal),
  319. &chip->vendor.read_queue, false) < 0) {
  320. rc = -ETIME;
  321. goto out_err;
  322. }
  323. }
  324. return len;
  325. out_err:
  326. tpm_tis_ready(chip);
  327. release_locality(chip, chip->vendor.locality, 0);
  328. return rc;
  329. }
  330. /*
  331. * Early probing for iTPM with STS_DATA_EXPECT flaw.
  332. * Try sending command without itpm flag set and if that
  333. * fails, repeat with itpm flag set.
  334. */
  335. static int probe_itpm(struct tpm_chip *chip)
  336. {
  337. int rc = 0;
  338. u8 cmd_getticks[] = {
  339. 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
  340. 0x00, 0x00, 0x00, 0xf1
  341. };
  342. size_t len = sizeof(cmd_getticks);
  343. bool rem_itpm = itpm;
  344. u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0));
  345. /* probe only iTPMS */
  346. if (vendor != TPM_VID_INTEL)
  347. return 0;
  348. itpm = false;
  349. rc = tpm_tis_send_data(chip, cmd_getticks, len);
  350. if (rc == 0)
  351. goto out;
  352. tpm_tis_ready(chip);
  353. release_locality(chip, chip->vendor.locality, 0);
  354. itpm = true;
  355. rc = tpm_tis_send_data(chip, cmd_getticks, len);
  356. if (rc == 0) {
  357. dev_info(chip->dev, "Detected an iTPM.\n");
  358. rc = 1;
  359. } else
  360. rc = -EFAULT;
  361. out:
  362. itpm = rem_itpm;
  363. tpm_tis_ready(chip);
  364. release_locality(chip, chip->vendor.locality, 0);
  365. return rc;
  366. }
  367. static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
  368. {
  369. switch (chip->vendor.manufacturer_id) {
  370. case TPM_VID_WINBOND:
  371. return ((status == TPM_STS_VALID) ||
  372. (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
  373. case TPM_VID_STM:
  374. return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
  375. default:
  376. return (status == TPM_STS_COMMAND_READY);
  377. }
  378. }
  379. static const struct tpm_class_ops tpm_tis = {
  380. .status = tpm_tis_status,
  381. .recv = tpm_tis_recv,
  382. .send = tpm_tis_send,
  383. .cancel = tpm_tis_ready,
  384. .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
  385. .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
  386. .req_canceled = tpm_tis_req_canceled,
  387. };
  388. static irqreturn_t tis_int_probe(int irq, void *dev_id)
  389. {
  390. struct tpm_chip *chip = dev_id;
  391. u32 interrupt;
  392. interrupt = ioread32(chip->vendor.iobase +
  393. TPM_INT_STATUS(chip->vendor.locality));
  394. if (interrupt == 0)
  395. return IRQ_NONE;
  396. chip->vendor.probed_irq = irq;
  397. /* Clear interrupts handled with TPM_EOI */
  398. iowrite32(interrupt,
  399. chip->vendor.iobase +
  400. TPM_INT_STATUS(chip->vendor.locality));
  401. return IRQ_HANDLED;
  402. }
  403. static irqreturn_t tis_int_handler(int dummy, void *dev_id)
  404. {
  405. struct tpm_chip *chip = dev_id;
  406. u32 interrupt;
  407. int i;
  408. interrupt = ioread32(chip->vendor.iobase +
  409. TPM_INT_STATUS(chip->vendor.locality));
  410. if (interrupt == 0)
  411. return IRQ_NONE;
  412. if (interrupt & TPM_INTF_DATA_AVAIL_INT)
  413. wake_up_interruptible(&chip->vendor.read_queue);
  414. if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
  415. for (i = 0; i < 5; i++)
  416. if (check_locality(chip, i) >= 0)
  417. break;
  418. if (interrupt &
  419. (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
  420. TPM_INTF_CMD_READY_INT))
  421. wake_up_interruptible(&chip->vendor.int_queue);
  422. /* Clear interrupts handled with TPM_EOI */
  423. iowrite32(interrupt,
  424. chip->vendor.iobase +
  425. TPM_INT_STATUS(chip->vendor.locality));
  426. ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
  427. return IRQ_HANDLED;
  428. }
  429. static bool interrupts = true;
  430. module_param(interrupts, bool, 0444);
  431. MODULE_PARM_DESC(interrupts, "Enable interrupts");
  432. static int tpm_tis_init(struct device *dev, resource_size_t start,
  433. resource_size_t len, unsigned int irq)
  434. {
  435. u32 vendor, intfcaps, intmask;
  436. int rc, i, irq_s, irq_e, probe;
  437. struct tpm_chip *chip;
  438. if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
  439. return -ENODEV;
  440. chip->vendor.iobase = ioremap(start, len);
  441. if (!chip->vendor.iobase) {
  442. rc = -EIO;
  443. goto out_err;
  444. }
  445. /* Default timeouts */
  446. chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
  447. chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
  448. chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
  449. chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
  450. if (wait_startup(chip, 0) != 0) {
  451. rc = -ENODEV;
  452. goto out_err;
  453. }
  454. if (request_locality(chip, 0) != 0) {
  455. rc = -ENODEV;
  456. goto out_err;
  457. }
  458. vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
  459. chip->vendor.manufacturer_id = vendor;
  460. dev_info(dev,
  461. "1.2 TPM (device-id 0x%X, rev-id %d)\n",
  462. vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
  463. if (!itpm) {
  464. probe = probe_itpm(chip);
  465. if (probe < 0) {
  466. rc = -ENODEV;
  467. goto out_err;
  468. }
  469. itpm = !!probe;
  470. }
  471. if (itpm)
  472. dev_info(dev, "Intel iTPM workaround enabled\n");
  473. /* Figure out the capabilities */
  474. intfcaps =
  475. ioread32(chip->vendor.iobase +
  476. TPM_INTF_CAPS(chip->vendor.locality));
  477. dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
  478. intfcaps);
  479. if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
  480. dev_dbg(dev, "\tBurst Count Static\n");
  481. if (intfcaps & TPM_INTF_CMD_READY_INT)
  482. dev_dbg(dev, "\tCommand Ready Int Support\n");
  483. if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
  484. dev_dbg(dev, "\tInterrupt Edge Falling\n");
  485. if (intfcaps & TPM_INTF_INT_EDGE_RISING)
  486. dev_dbg(dev, "\tInterrupt Edge Rising\n");
  487. if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
  488. dev_dbg(dev, "\tInterrupt Level Low\n");
  489. if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
  490. dev_dbg(dev, "\tInterrupt Level High\n");
  491. if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
  492. dev_dbg(dev, "\tLocality Change Int Support\n");
  493. if (intfcaps & TPM_INTF_STS_VALID_INT)
  494. dev_dbg(dev, "\tSts Valid Int Support\n");
  495. if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
  496. dev_dbg(dev, "\tData Avail Int Support\n");
  497. /* get the timeouts before testing for irqs */
  498. if (tpm_get_timeouts(chip)) {
  499. dev_err(dev, "Could not get TPM timeouts and durations\n");
  500. rc = -ENODEV;
  501. goto out_err;
  502. }
  503. if (tpm_do_selftest(chip)) {
  504. dev_err(dev, "TPM self test failed\n");
  505. rc = -ENODEV;
  506. goto out_err;
  507. }
  508. /* INTERRUPT Setup */
  509. init_waitqueue_head(&chip->vendor.read_queue);
  510. init_waitqueue_head(&chip->vendor.int_queue);
  511. intmask =
  512. ioread32(chip->vendor.iobase +
  513. TPM_INT_ENABLE(chip->vendor.locality));
  514. intmask |= TPM_INTF_CMD_READY_INT
  515. | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
  516. | TPM_INTF_STS_VALID_INT;
  517. iowrite32(intmask,
  518. chip->vendor.iobase +
  519. TPM_INT_ENABLE(chip->vendor.locality));
  520. if (interrupts)
  521. chip->vendor.irq = irq;
  522. if (interrupts && !chip->vendor.irq) {
  523. irq_s =
  524. ioread8(chip->vendor.iobase +
  525. TPM_INT_VECTOR(chip->vendor.locality));
  526. if (irq_s) {
  527. irq_e = irq_s;
  528. } else {
  529. irq_s = 3;
  530. irq_e = 15;
  531. }
  532. for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
  533. iowrite8(i, chip->vendor.iobase +
  534. TPM_INT_VECTOR(chip->vendor.locality));
  535. if (request_irq
  536. (i, tis_int_probe, IRQF_SHARED,
  537. chip->vendor.miscdev.name, chip) != 0) {
  538. dev_info(chip->dev,
  539. "Unable to request irq: %d for probe\n",
  540. i);
  541. continue;
  542. }
  543. /* Clear all existing */
  544. iowrite32(ioread32
  545. (chip->vendor.iobase +
  546. TPM_INT_STATUS(chip->vendor.locality)),
  547. chip->vendor.iobase +
  548. TPM_INT_STATUS(chip->vendor.locality));
  549. /* Turn on */
  550. iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
  551. chip->vendor.iobase +
  552. TPM_INT_ENABLE(chip->vendor.locality));
  553. chip->vendor.probed_irq = 0;
  554. /* Generate Interrupts */
  555. tpm_gen_interrupt(chip);
  556. chip->vendor.irq = chip->vendor.probed_irq;
  557. /* free_irq will call into tis_int_probe;
  558. clear all irqs we haven't seen while doing
  559. tpm_gen_interrupt */
  560. iowrite32(ioread32
  561. (chip->vendor.iobase +
  562. TPM_INT_STATUS(chip->vendor.locality)),
  563. chip->vendor.iobase +
  564. TPM_INT_STATUS(chip->vendor.locality));
  565. /* Turn off */
  566. iowrite32(intmask,
  567. chip->vendor.iobase +
  568. TPM_INT_ENABLE(chip->vendor.locality));
  569. free_irq(i, chip);
  570. }
  571. }
  572. if (chip->vendor.irq) {
  573. iowrite8(chip->vendor.irq,
  574. chip->vendor.iobase +
  575. TPM_INT_VECTOR(chip->vendor.locality));
  576. if (request_irq
  577. (chip->vendor.irq, tis_int_handler, IRQF_SHARED,
  578. chip->vendor.miscdev.name, chip) != 0) {
  579. dev_info(chip->dev,
  580. "Unable to request irq: %d for use\n",
  581. chip->vendor.irq);
  582. chip->vendor.irq = 0;
  583. } else {
  584. /* Clear all existing */
  585. iowrite32(ioread32
  586. (chip->vendor.iobase +
  587. TPM_INT_STATUS(chip->vendor.locality)),
  588. chip->vendor.iobase +
  589. TPM_INT_STATUS(chip->vendor.locality));
  590. /* Turn on */
  591. iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
  592. chip->vendor.iobase +
  593. TPM_INT_ENABLE(chip->vendor.locality));
  594. }
  595. }
  596. INIT_LIST_HEAD(&chip->vendor.list);
  597. mutex_lock(&tis_lock);
  598. list_add(&chip->vendor.list, &tis_chips);
  599. mutex_unlock(&tis_lock);
  600. return 0;
  601. out_err:
  602. if (chip->vendor.iobase)
  603. iounmap(chip->vendor.iobase);
  604. tpm_remove_hardware(chip->dev);
  605. return rc;
  606. }
  607. #ifdef CONFIG_PM_SLEEP
  608. static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
  609. {
  610. u32 intmask;
  611. /* reenable interrupts that device may have lost or
  612. BIOS/firmware may have disabled */
  613. iowrite8(chip->vendor.irq, chip->vendor.iobase +
  614. TPM_INT_VECTOR(chip->vendor.locality));
  615. intmask =
  616. ioread32(chip->vendor.iobase +
  617. TPM_INT_ENABLE(chip->vendor.locality));
  618. intmask |= TPM_INTF_CMD_READY_INT
  619. | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
  620. | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
  621. iowrite32(intmask,
  622. chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
  623. }
  624. static int tpm_tis_resume(struct device *dev)
  625. {
  626. struct tpm_chip *chip = dev_get_drvdata(dev);
  627. int ret;
  628. if (chip->vendor.irq)
  629. tpm_tis_reenable_interrupts(chip);
  630. ret = tpm_pm_resume(dev);
  631. if (!ret)
  632. tpm_do_selftest(chip);
  633. return ret;
  634. }
  635. #endif
  636. static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
  637. #ifdef CONFIG_PNP
  638. static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
  639. const struct pnp_device_id *pnp_id)
  640. {
  641. resource_size_t start, len;
  642. unsigned int irq = 0;
  643. start = pnp_mem_start(pnp_dev, 0);
  644. len = pnp_mem_len(pnp_dev, 0);
  645. if (pnp_irq_valid(pnp_dev, 0))
  646. irq = pnp_irq(pnp_dev, 0);
  647. else
  648. interrupts = false;
  649. if (is_itpm(pnp_dev))
  650. itpm = true;
  651. return tpm_tis_init(&pnp_dev->dev, start, len, irq);
  652. }
  653. static struct pnp_device_id tpm_pnp_tbl[] = {
  654. {"PNP0C31", 0}, /* TPM */
  655. {"ATM1200", 0}, /* Atmel */
  656. {"IFX0102", 0}, /* Infineon */
  657. {"BCM0101", 0}, /* Broadcom */
  658. {"BCM0102", 0}, /* Broadcom */
  659. {"NSC1200", 0}, /* National */
  660. {"ICO0102", 0}, /* Intel */
  661. /* Add new here */
  662. {"", 0}, /* User Specified */
  663. {"", 0} /* Terminator */
  664. };
  665. MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
  666. static void tpm_tis_pnp_remove(struct pnp_dev *dev)
  667. {
  668. struct tpm_chip *chip = pnp_get_drvdata(dev);
  669. tpm_dev_vendor_release(chip);
  670. kfree(chip);
  671. }
  672. static struct pnp_driver tis_pnp_driver = {
  673. .name = "tpm_tis",
  674. .id_table = tpm_pnp_tbl,
  675. .probe = tpm_tis_pnp_init,
  676. .remove = tpm_tis_pnp_remove,
  677. .driver = {
  678. .pm = &tpm_tis_pm,
  679. },
  680. };
  681. #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
  682. module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
  683. sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
  684. MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
  685. #endif
  686. static struct platform_driver tis_drv = {
  687. .driver = {
  688. .name = "tpm_tis",
  689. .owner = THIS_MODULE,
  690. .pm = &tpm_tis_pm,
  691. },
  692. };
  693. static struct platform_device *pdev;
  694. static bool force;
  695. module_param(force, bool, 0444);
  696. MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
  697. static int __init init_tis(void)
  698. {
  699. int rc;
  700. #ifdef CONFIG_PNP
  701. if (!force)
  702. return pnp_register_driver(&tis_pnp_driver);
  703. #endif
  704. rc = platform_driver_register(&tis_drv);
  705. if (rc < 0)
  706. return rc;
  707. pdev = platform_device_register_simple("tpm_tis", -1, NULL, 0);
  708. if (IS_ERR(pdev)) {
  709. rc = PTR_ERR(pdev);
  710. goto err_dev;
  711. }
  712. rc = tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0);
  713. if (rc)
  714. goto err_init;
  715. return 0;
  716. err_init:
  717. platform_device_unregister(pdev);
  718. err_dev:
  719. platform_driver_unregister(&tis_drv);
  720. return rc;
  721. }
  722. static void __exit cleanup_tis(void)
  723. {
  724. struct tpm_vendor_specific *i, *j;
  725. struct tpm_chip *chip;
  726. mutex_lock(&tis_lock);
  727. list_for_each_entry_safe(i, j, &tis_chips, list) {
  728. chip = to_tpm_chip(i);
  729. tpm_remove_hardware(chip->dev);
  730. iowrite32(~TPM_GLOBAL_INT_ENABLE &
  731. ioread32(chip->vendor.iobase +
  732. TPM_INT_ENABLE(chip->vendor.
  733. locality)),
  734. chip->vendor.iobase +
  735. TPM_INT_ENABLE(chip->vendor.locality));
  736. release_locality(chip, chip->vendor.locality, 1);
  737. if (chip->vendor.irq)
  738. free_irq(chip->vendor.irq, chip);
  739. iounmap(i->iobase);
  740. list_del(&i->list);
  741. }
  742. mutex_unlock(&tis_lock);
  743. #ifdef CONFIG_PNP
  744. if (!force) {
  745. pnp_unregister_driver(&tis_pnp_driver);
  746. return;
  747. }
  748. #endif
  749. platform_device_unregister(pdev);
  750. platform_driver_unregister(&tis_drv);
  751. }
  752. module_init(init_tis);
  753. module_exit(cleanup_tis);
  754. MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
  755. MODULE_DESCRIPTION("TPM Driver");
  756. MODULE_VERSION("2.0");
  757. MODULE_LICENSE("GPL");