tpm_ibmvtpm.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2012-2020 IBM Corporation
  4. *
  5. * Author: Ashley Lai <[email protected]>
  6. *
  7. * Maintained by: <[email protected]>
  8. *
  9. * Device driver for TCG/TCPA TPM (trusted platform module).
  10. * Specifications at www.trustedcomputinggroup.org
  11. */
  12. #include <linux/dma-mapping.h>
  13. #include <linux/dmapool.h>
  14. #include <linux/slab.h>
  15. #include <asm/vio.h>
  16. #include <asm/irq.h>
  17. #include <linux/types.h>
  18. #include <linux/list.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/wait.h>
  22. #include <asm/prom.h>
  23. #include "tpm.h"
  24. #include "tpm_ibmvtpm.h"
  25. static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
  26. static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
  27. { "IBM,vtpm", "IBM,vtpm"},
  28. { "IBM,vtpm", "IBM,vtpm20"},
  29. { "", "" }
  30. };
  31. MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
  32. /**
  33. * ibmvtpm_send_crq_word() - Send a CRQ request
  34. * @vdev: vio device struct
  35. * @w1: pre-constructed first word of tpm crq (second word is reserved)
  36. *
  37. * Return:
  38. * 0 - Success
  39. * Non-zero - Failure
  40. */
  41. static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1)
  42. {
  43. return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0);
  44. }
  45. /**
  46. * ibmvtpm_send_crq() - Send a CRQ request
  47. *
  48. * @vdev: vio device struct
  49. * @valid: Valid field
  50. * @msg: Type field
  51. * @len: Length field
  52. * @data: Data field
  53. *
  54. * The ibmvtpm crq is defined as follows:
  55. *
  56. * Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7
  57. * -----------------------------------------------------------------------
  58. * Word0 | Valid | Type | Length | Data
  59. * -----------------------------------------------------------------------
  60. * Word1 | Reserved
  61. * -----------------------------------------------------------------------
  62. *
  63. * Which matches the following structure (on bigendian host):
  64. *
  65. * struct ibmvtpm_crq {
  66. * u8 valid;
  67. * u8 msg;
  68. * __be16 len;
  69. * __be32 data;
  70. * __be64 reserved;
  71. * } __attribute__((packed, aligned(8)));
  72. *
  73. * However, the value is passed in a register so just compute the numeric value
  74. * to load into the register avoiding byteswap altogether. Endian only affects
  75. * memory loads and stores - registers are internally represented the same.
  76. *
  77. * Return:
  78. * 0 (H_SUCCESS) - Success
  79. * Non-zero - Failure
  80. */
  81. static int ibmvtpm_send_crq(struct vio_dev *vdev,
  82. u8 valid, u8 msg, u16 len, u32 data)
  83. {
  84. u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) |
  85. (u64)data;
  86. return ibmvtpm_send_crq_word(vdev, w1);
  87. }
  88. /**
  89. * tpm_ibmvtpm_recv - Receive data after send
  90. *
  91. * @chip: tpm chip struct
  92. * @buf: buffer to read
  93. * @count: size of buffer
  94. *
  95. * Return:
  96. * Number of bytes read
  97. */
  98. static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
  99. {
  100. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  101. u16 len;
  102. if (!ibmvtpm->rtce_buf) {
  103. dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
  104. return 0;
  105. }
  106. len = ibmvtpm->res_len;
  107. if (count < len) {
  108. dev_err(ibmvtpm->dev,
  109. "Invalid size in recv: count=%zd, crq_size=%d\n",
  110. count, len);
  111. return -EIO;
  112. }
  113. spin_lock(&ibmvtpm->rtce_lock);
  114. memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
  115. memset(ibmvtpm->rtce_buf, 0, len);
  116. ibmvtpm->res_len = 0;
  117. spin_unlock(&ibmvtpm->rtce_lock);
  118. return len;
  119. }
  120. /**
  121. * ibmvtpm_crq_send_init - Send a CRQ initialize message
  122. * @ibmvtpm: vtpm device struct
  123. *
  124. * Return:
  125. * 0 on success.
  126. * Non-zero on failure.
  127. */
  128. static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
  129. {
  130. int rc;
  131. rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
  132. if (rc != H_SUCCESS)
  133. dev_err(ibmvtpm->dev,
  134. "%s failed rc=%d\n", __func__, rc);
  135. return rc;
  136. }
  137. /**
  138. * tpm_ibmvtpm_resume - Resume from suspend
  139. *
  140. * @dev: device struct
  141. *
  142. * Return: Always 0.
  143. */
  144. static int tpm_ibmvtpm_resume(struct device *dev)
  145. {
  146. struct tpm_chip *chip = dev_get_drvdata(dev);
  147. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  148. int rc = 0;
  149. do {
  150. if (rc)
  151. msleep(100);
  152. rc = plpar_hcall_norets(H_ENABLE_CRQ,
  153. ibmvtpm->vdev->unit_address);
  154. } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
  155. if (rc) {
  156. dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
  157. return rc;
  158. }
  159. rc = vio_enable_interrupts(ibmvtpm->vdev);
  160. if (rc) {
  161. dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
  162. return rc;
  163. }
  164. rc = ibmvtpm_crq_send_init(ibmvtpm);
  165. if (rc)
  166. dev_err(dev, "Error send_init rc=%d\n", rc);
  167. return rc;
  168. }
  169. /**
  170. * tpm_ibmvtpm_send() - Send a TPM command
  171. * @chip: tpm chip struct
  172. * @buf: buffer contains data to send
  173. * @count: size of buffer
  174. *
  175. * Return:
  176. * 0 on success,
  177. * -errno on error
  178. */
  179. static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
  180. {
  181. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  182. bool retry = true;
  183. int rc, sig;
  184. if (!ibmvtpm->rtce_buf) {
  185. dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
  186. return 0;
  187. }
  188. if (count > ibmvtpm->rtce_size) {
  189. dev_err(ibmvtpm->dev,
  190. "Invalid size in send: count=%zd, rtce_size=%d\n",
  191. count, ibmvtpm->rtce_size);
  192. return -EIO;
  193. }
  194. if (ibmvtpm->tpm_processing_cmd) {
  195. dev_info(ibmvtpm->dev,
  196. "Need to wait for TPM to finish\n");
  197. /* wait for previous command to finish */
  198. sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
  199. if (sig)
  200. return -EINTR;
  201. }
  202. spin_lock(&ibmvtpm->rtce_lock);
  203. ibmvtpm->res_len = 0;
  204. memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
  205. /*
  206. * set the processing flag before the Hcall, since we may get the
  207. * result (interrupt) before even being able to check rc.
  208. */
  209. ibmvtpm->tpm_processing_cmd = 1;
  210. again:
  211. rc = ibmvtpm_send_crq(ibmvtpm->vdev,
  212. IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
  213. count, ibmvtpm->rtce_dma_handle);
  214. if (rc != H_SUCCESS) {
  215. /*
  216. * H_CLOSED can be returned after LPM resume. Call
  217. * tpm_ibmvtpm_resume() to re-enable the CRQ then retry
  218. * ibmvtpm_send_crq() once before failing.
  219. */
  220. if (rc == H_CLOSED && retry) {
  221. tpm_ibmvtpm_resume(ibmvtpm->dev);
  222. retry = false;
  223. goto again;
  224. }
  225. dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
  226. ibmvtpm->tpm_processing_cmd = 0;
  227. }
  228. spin_unlock(&ibmvtpm->rtce_lock);
  229. return 0;
  230. }
  231. static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
  232. {
  233. return;
  234. }
  235. static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
  236. {
  237. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  238. return ibmvtpm->tpm_processing_cmd;
  239. }
  240. /**
  241. * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
  242. *
  243. * @ibmvtpm: vtpm device struct
  244. *
  245. * Return:
  246. * 0 on success.
  247. * Non-zero on failure.
  248. */
  249. static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
  250. {
  251. int rc;
  252. rc = ibmvtpm_send_crq(ibmvtpm->vdev,
  253. IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0);
  254. if (rc != H_SUCCESS)
  255. dev_err(ibmvtpm->dev,
  256. "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
  257. return rc;
  258. }
  259. /**
  260. * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
  261. * - Note that this is vtpm version and not tpm version
  262. *
  263. * @ibmvtpm: vtpm device struct
  264. *
  265. * Return:
  266. * 0 on success.
  267. * Non-zero on failure.
  268. */
  269. static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
  270. {
  271. int rc;
  272. rc = ibmvtpm_send_crq(ibmvtpm->vdev,
  273. IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0);
  274. if (rc != H_SUCCESS)
  275. dev_err(ibmvtpm->dev,
  276. "ibmvtpm_crq_get_version failed rc=%d\n", rc);
  277. return rc;
  278. }
  279. /**
  280. * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
  281. * @ibmvtpm: vtpm device struct
  282. *
  283. * Return:
  284. * 0 on success.
  285. * Non-zero on failure.
  286. */
  287. static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
  288. {
  289. int rc;
  290. rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD);
  291. if (rc != H_SUCCESS)
  292. dev_err(ibmvtpm->dev,
  293. "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
  294. return rc;
  295. }
  296. /**
  297. * tpm_ibmvtpm_remove - ibm vtpm remove entry point
  298. * @vdev: vio device struct
  299. *
  300. * Return: Always 0.
  301. */
  302. static void tpm_ibmvtpm_remove(struct vio_dev *vdev)
  303. {
  304. struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
  305. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  306. int rc = 0;
  307. tpm_chip_unregister(chip);
  308. free_irq(vdev->irq, ibmvtpm);
  309. do {
  310. if (rc)
  311. msleep(100);
  312. rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
  313. } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
  314. dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
  315. CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
  316. free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
  317. if (ibmvtpm->rtce_buf) {
  318. dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
  319. ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
  320. kfree(ibmvtpm->rtce_buf);
  321. }
  322. kfree(ibmvtpm);
  323. /* For tpm_ibmvtpm_get_desired_dma */
  324. dev_set_drvdata(&vdev->dev, NULL);
  325. }
  326. /**
  327. * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
  328. * @vdev: vio device struct
  329. *
  330. * Return:
  331. * Number of bytes the driver needs to DMA map.
  332. */
  333. static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
  334. {
  335. struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
  336. struct ibmvtpm_dev *ibmvtpm;
  337. /*
  338. * ibmvtpm initializes at probe time, so the data we are
  339. * asking for may not be set yet. Estimate that 4K required
  340. * for TCE-mapped buffer in addition to CRQ.
  341. */
  342. if (chip)
  343. ibmvtpm = dev_get_drvdata(&chip->dev);
  344. else
  345. return CRQ_RES_BUF_SIZE + PAGE_SIZE;
  346. return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
  347. }
  348. /**
  349. * tpm_ibmvtpm_suspend - Suspend
  350. * @dev: device struct
  351. *
  352. * Return: Always 0.
  353. */
  354. static int tpm_ibmvtpm_suspend(struct device *dev)
  355. {
  356. struct tpm_chip *chip = dev_get_drvdata(dev);
  357. struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
  358. int rc = 0;
  359. rc = ibmvtpm_send_crq(ibmvtpm->vdev,
  360. IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0);
  361. if (rc != H_SUCCESS)
  362. dev_err(ibmvtpm->dev,
  363. "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
  364. return rc;
  365. }
  366. /**
  367. * ibmvtpm_reset_crq - Reset CRQ
  368. *
  369. * @ibmvtpm: ibm vtpm struct
  370. *
  371. * Return:
  372. * 0 on success.
  373. * Non-zero on failure.
  374. */
  375. static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
  376. {
  377. int rc = 0;
  378. do {
  379. if (rc)
  380. msleep(100);
  381. rc = plpar_hcall_norets(H_FREE_CRQ,
  382. ibmvtpm->vdev->unit_address);
  383. } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
  384. memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
  385. ibmvtpm->crq_queue.index = 0;
  386. return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
  387. ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
  388. }
  389. static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
  390. {
  391. return (status == 0);
  392. }
  393. static const struct tpm_class_ops tpm_ibmvtpm = {
  394. .recv = tpm_ibmvtpm_recv,
  395. .send = tpm_ibmvtpm_send,
  396. .cancel = tpm_ibmvtpm_cancel,
  397. .status = tpm_ibmvtpm_status,
  398. .req_complete_mask = 1,
  399. .req_complete_val = 0,
  400. .req_canceled = tpm_ibmvtpm_req_canceled,
  401. };
  402. static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
  403. .suspend = tpm_ibmvtpm_suspend,
  404. .resume = tpm_ibmvtpm_resume,
  405. };
  406. /**
  407. * ibmvtpm_crq_get_next - Get next responded crq
  408. *
  409. * @ibmvtpm: vtpm device struct
  410. *
  411. * Return: vtpm crq pointer or NULL.
  412. */
  413. static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
  414. {
  415. struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
  416. struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
  417. if (crq->valid & VTPM_MSG_RES) {
  418. if (++crq_q->index == crq_q->num_entry)
  419. crq_q->index = 0;
  420. smp_rmb();
  421. } else
  422. crq = NULL;
  423. return crq;
  424. }
  425. /**
  426. * ibmvtpm_crq_process - Process responded crq
  427. *
  428. * @crq: crq to be processed
  429. * @ibmvtpm: vtpm device struct
  430. *
  431. */
  432. static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
  433. struct ibmvtpm_dev *ibmvtpm)
  434. {
  435. int rc = 0;
  436. switch (crq->valid) {
  437. case VALID_INIT_CRQ:
  438. switch (crq->msg) {
  439. case INIT_CRQ_RES:
  440. dev_info(ibmvtpm->dev, "CRQ initialized\n");
  441. rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
  442. if (rc)
  443. dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
  444. return;
  445. case INIT_CRQ_COMP_RES:
  446. dev_info(ibmvtpm->dev,
  447. "CRQ initialization completed\n");
  448. return;
  449. default:
  450. dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
  451. return;
  452. }
  453. case IBMVTPM_VALID_CMD:
  454. switch (crq->msg) {
  455. case VTPM_GET_RTCE_BUFFER_SIZE_RES:
  456. if (be16_to_cpu(crq->len) <= 0) {
  457. dev_err(ibmvtpm->dev, "Invalid rtce size\n");
  458. return;
  459. }
  460. ibmvtpm->rtce_size = be16_to_cpu(crq->len);
  461. ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
  462. GFP_ATOMIC);
  463. if (!ibmvtpm->rtce_buf) {
  464. dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
  465. return;
  466. }
  467. ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
  468. ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
  469. DMA_BIDIRECTIONAL);
  470. if (dma_mapping_error(ibmvtpm->dev,
  471. ibmvtpm->rtce_dma_handle)) {
  472. kfree(ibmvtpm->rtce_buf);
  473. ibmvtpm->rtce_buf = NULL;
  474. dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
  475. }
  476. return;
  477. case VTPM_GET_VERSION_RES:
  478. ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
  479. return;
  480. case VTPM_TPM_COMMAND_RES:
  481. /* len of the data in rtce buffer */
  482. ibmvtpm->res_len = be16_to_cpu(crq->len);
  483. ibmvtpm->tpm_processing_cmd = 0;
  484. wake_up_interruptible(&ibmvtpm->wq);
  485. return;
  486. default:
  487. return;
  488. }
  489. }
  490. return;
  491. }
  492. /**
  493. * ibmvtpm_interrupt - Interrupt handler
  494. *
  495. * @irq: irq number to handle
  496. * @vtpm_instance: vtpm that received interrupt
  497. *
  498. * Returns:
  499. * IRQ_HANDLED
  500. **/
  501. static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
  502. {
  503. struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
  504. struct ibmvtpm_crq *crq;
  505. /* while loop is needed for initial setup (get version and
  506. * get rtce_size). There should be only one tpm request at any
  507. * given time.
  508. */
  509. while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
  510. ibmvtpm_crq_process(crq, ibmvtpm);
  511. wake_up_interruptible(&ibmvtpm->crq_queue.wq);
  512. crq->valid = 0;
  513. smp_wmb();
  514. }
  515. return IRQ_HANDLED;
  516. }
  517. /**
  518. * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
  519. *
  520. * @vio_dev: vio device struct
  521. * @id: vio device id struct
  522. *
  523. * Return:
  524. * 0 on success.
  525. * Non-zero on failure.
  526. */
  527. static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
  528. const struct vio_device_id *id)
  529. {
  530. struct ibmvtpm_dev *ibmvtpm;
  531. struct device *dev = &vio_dev->dev;
  532. struct ibmvtpm_crq_queue *crq_q;
  533. struct tpm_chip *chip;
  534. int rc = -ENOMEM, rc1;
  535. chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
  536. if (IS_ERR(chip))
  537. return PTR_ERR(chip);
  538. ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
  539. if (!ibmvtpm) {
  540. dev_err(dev, "kzalloc for ibmvtpm failed\n");
  541. goto cleanup;
  542. }
  543. ibmvtpm->dev = dev;
  544. ibmvtpm->vdev = vio_dev;
  545. crq_q = &ibmvtpm->crq_queue;
  546. crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
  547. if (!crq_q->crq_addr) {
  548. dev_err(dev, "Unable to allocate memory for crq_addr\n");
  549. goto cleanup;
  550. }
  551. crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
  552. init_waitqueue_head(&crq_q->wq);
  553. ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
  554. CRQ_RES_BUF_SIZE,
  555. DMA_BIDIRECTIONAL);
  556. if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
  557. dev_err(dev, "dma mapping failed\n");
  558. goto cleanup;
  559. }
  560. rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
  561. ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
  562. if (rc == H_RESOURCE)
  563. rc = ibmvtpm_reset_crq(ibmvtpm);
  564. if (rc) {
  565. dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
  566. goto reg_crq_cleanup;
  567. }
  568. rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
  569. tpm_ibmvtpm_driver_name, ibmvtpm);
  570. if (rc) {
  571. dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
  572. goto init_irq_cleanup;
  573. }
  574. rc = vio_enable_interrupts(vio_dev);
  575. if (rc) {
  576. dev_err(dev, "Error %d enabling interrupts\n", rc);
  577. goto init_irq_cleanup;
  578. }
  579. init_waitqueue_head(&ibmvtpm->wq);
  580. crq_q->index = 0;
  581. dev_set_drvdata(&chip->dev, ibmvtpm);
  582. spin_lock_init(&ibmvtpm->rtce_lock);
  583. rc = ibmvtpm_crq_send_init(ibmvtpm);
  584. if (rc)
  585. goto init_irq_cleanup;
  586. rc = ibmvtpm_crq_get_version(ibmvtpm);
  587. if (rc)
  588. goto init_irq_cleanup;
  589. rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
  590. if (rc)
  591. goto init_irq_cleanup;
  592. if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
  593. ibmvtpm->rtce_buf != NULL,
  594. HZ)) {
  595. rc = -ENODEV;
  596. dev_err(dev, "CRQ response timed out\n");
  597. goto init_irq_cleanup;
  598. }
  599. if (!strcmp(id->compat, "IBM,vtpm20"))
  600. chip->flags |= TPM_CHIP_FLAG_TPM2;
  601. rc = tpm_get_timeouts(chip);
  602. if (rc)
  603. goto init_irq_cleanup;
  604. if (chip->flags & TPM_CHIP_FLAG_TPM2) {
  605. rc = tpm2_get_cc_attrs_tbl(chip);
  606. if (rc)
  607. goto init_irq_cleanup;
  608. }
  609. return tpm_chip_register(chip);
  610. init_irq_cleanup:
  611. do {
  612. rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
  613. } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
  614. reg_crq_cleanup:
  615. dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
  616. DMA_BIDIRECTIONAL);
  617. cleanup:
  618. if (ibmvtpm) {
  619. if (crq_q->crq_addr)
  620. free_page((unsigned long)crq_q->crq_addr);
  621. kfree(ibmvtpm);
  622. }
  623. return rc;
  624. }
  625. static struct vio_driver ibmvtpm_driver = {
  626. .id_table = tpm_ibmvtpm_device_table,
  627. .probe = tpm_ibmvtpm_probe,
  628. .remove = tpm_ibmvtpm_remove,
  629. .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
  630. .name = tpm_ibmvtpm_driver_name,
  631. .pm = &tpm_ibmvtpm_pm_ops,
  632. };
  633. /**
  634. * ibmvtpm_module_init - Initialize ibm vtpm module.
  635. *
  636. *
  637. * Return:
  638. * 0 on success.
  639. * Non-zero on failure.
  640. */
  641. static int __init ibmvtpm_module_init(void)
  642. {
  643. return vio_register_driver(&ibmvtpm_driver);
  644. }
  645. /**
  646. * ibmvtpm_module_exit - Tear down ibm vtpm module.
  647. */
  648. static void __exit ibmvtpm_module_exit(void)
  649. {
  650. vio_unregister_driver(&ibmvtpm_driver);
  651. }
  652. module_init(ibmvtpm_module_init);
  653. module_exit(ibmvtpm_module_exit);
  654. MODULE_AUTHOR("[email protected]");
  655. MODULE_DESCRIPTION("IBM vTPM Driver");
  656. MODULE_VERSION("1.0");
  657. MODULE_LICENSE("GPL");