tpm-dev-common.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2004 IBM Corporation
  4. * Authors:
  5. * Leendert van Doorn <[email protected]>
  6. * Dave Safford <[email protected]>
  7. * Reiner Sailer <[email protected]>
  8. * Kylene Hall <[email protected]>
  9. *
  10. * Copyright (C) 2013 Obsidian Research Corp
  11. * Jason Gunthorpe <[email protected]>
  12. *
  13. * Device file system interface to the TPM
  14. */
  15. #include <linux/poll.h>
  16. #include <linux/slab.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/workqueue.h>
  19. #include "tpm.h"
  20. #include "tpm-dev.h"
  21. static struct workqueue_struct *tpm_dev_wq;
  22. static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
  23. u8 *buf, size_t bufsiz)
  24. {
  25. struct tpm_header *header = (void *)buf;
  26. ssize_t ret, len;
  27. ret = tpm2_prepare_space(chip, space, buf, bufsiz);
  28. /* If the command is not implemented by the TPM, synthesize a
  29. * response with a TPM2_RC_COMMAND_CODE return for user-space.
  30. */
  31. if (ret == -EOPNOTSUPP) {
  32. header->length = cpu_to_be32(sizeof(*header));
  33. header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
  34. header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
  35. TSS2_RESMGR_TPM_RC_LAYER);
  36. ret = sizeof(*header);
  37. }
  38. if (ret)
  39. goto out_rc;
  40. len = tpm_transmit(chip, buf, bufsiz);
  41. if (len < 0)
  42. ret = len;
  43. if (!ret)
  44. ret = tpm2_commit_space(chip, space, buf, &len);
  45. out_rc:
  46. return ret ? ret : len;
  47. }
  48. static void tpm_dev_async_work(struct work_struct *work)
  49. {
  50. struct file_priv *priv =
  51. container_of(work, struct file_priv, async_work);
  52. ssize_t ret;
  53. mutex_lock(&priv->buffer_mutex);
  54. priv->command_enqueued = false;
  55. ret = tpm_try_get_ops(priv->chip);
  56. if (ret) {
  57. priv->response_length = ret;
  58. goto out;
  59. }
  60. ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
  61. sizeof(priv->data_buffer));
  62. tpm_put_ops(priv->chip);
  63. /*
  64. * If ret is > 0 then tpm_dev_transmit returned the size of the
  65. * response. If ret is < 0 then tpm_dev_transmit failed and
  66. * returned an error code.
  67. */
  68. if (ret != 0) {
  69. priv->response_length = ret;
  70. mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
  71. }
  72. out:
  73. mutex_unlock(&priv->buffer_mutex);
  74. wake_up_interruptible(&priv->async_wait);
  75. }
  76. static void user_reader_timeout(struct timer_list *t)
  77. {
  78. struct file_priv *priv = from_timer(priv, t, user_read_timer);
  79. pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
  80. task_tgid_nr(current));
  81. schedule_work(&priv->timeout_work);
  82. }
  83. static void tpm_timeout_work(struct work_struct *work)
  84. {
  85. struct file_priv *priv = container_of(work, struct file_priv,
  86. timeout_work);
  87. mutex_lock(&priv->buffer_mutex);
  88. priv->response_read = true;
  89. priv->response_length = 0;
  90. memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
  91. mutex_unlock(&priv->buffer_mutex);
  92. wake_up_interruptible(&priv->async_wait);
  93. }
  94. void tpm_common_open(struct file *file, struct tpm_chip *chip,
  95. struct file_priv *priv, struct tpm_space *space)
  96. {
  97. priv->chip = chip;
  98. priv->space = space;
  99. priv->response_read = true;
  100. mutex_init(&priv->buffer_mutex);
  101. timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
  102. INIT_WORK(&priv->timeout_work, tpm_timeout_work);
  103. INIT_WORK(&priv->async_work, tpm_dev_async_work);
  104. init_waitqueue_head(&priv->async_wait);
  105. file->private_data = priv;
  106. }
  107. ssize_t tpm_common_read(struct file *file, char __user *buf,
  108. size_t size, loff_t *off)
  109. {
  110. struct file_priv *priv = file->private_data;
  111. ssize_t ret_size = 0;
  112. int rc;
  113. mutex_lock(&priv->buffer_mutex);
  114. if (priv->response_length) {
  115. priv->response_read = true;
  116. ret_size = min_t(ssize_t, size, priv->response_length);
  117. if (ret_size <= 0) {
  118. priv->response_length = 0;
  119. goto out;
  120. }
  121. rc = copy_to_user(buf, priv->data_buffer + *off, ret_size);
  122. if (rc) {
  123. memset(priv->data_buffer, 0, TPM_BUFSIZE);
  124. priv->response_length = 0;
  125. ret_size = -EFAULT;
  126. } else {
  127. memset(priv->data_buffer + *off, 0, ret_size);
  128. priv->response_length -= ret_size;
  129. *off += ret_size;
  130. }
  131. }
  132. out:
  133. if (!priv->response_length) {
  134. *off = 0;
  135. del_singleshot_timer_sync(&priv->user_read_timer);
  136. flush_work(&priv->timeout_work);
  137. }
  138. mutex_unlock(&priv->buffer_mutex);
  139. return ret_size;
  140. }
  141. ssize_t tpm_common_write(struct file *file, const char __user *buf,
  142. size_t size, loff_t *off)
  143. {
  144. struct file_priv *priv = file->private_data;
  145. int ret = 0;
  146. if (size > TPM_BUFSIZE)
  147. return -E2BIG;
  148. mutex_lock(&priv->buffer_mutex);
  149. /* Cannot perform a write until the read has cleared either via
  150. * tpm_read or a user_read_timer timeout. This also prevents split
  151. * buffered writes from blocking here.
  152. */
  153. if ((!priv->response_read && priv->response_length) ||
  154. priv->command_enqueued) {
  155. ret = -EBUSY;
  156. goto out;
  157. }
  158. if (copy_from_user(priv->data_buffer, buf, size)) {
  159. ret = -EFAULT;
  160. goto out;
  161. }
  162. if (size < 6 ||
  163. size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) {
  164. ret = -EINVAL;
  165. goto out;
  166. }
  167. priv->response_length = 0;
  168. priv->response_read = false;
  169. *off = 0;
  170. /*
  171. * If in nonblocking mode schedule an async job to send
  172. * the command return the size.
  173. * In case of error the err code will be returned in
  174. * the subsequent read call.
  175. */
  176. if (file->f_flags & O_NONBLOCK) {
  177. priv->command_enqueued = true;
  178. queue_work(tpm_dev_wq, &priv->async_work);
  179. mutex_unlock(&priv->buffer_mutex);
  180. return size;
  181. }
  182. /* atomic tpm command send and result receive. We only hold the ops
  183. * lock during this period so that the tpm can be unregistered even if
  184. * the char dev is held open.
  185. */
  186. if (tpm_try_get_ops(priv->chip)) {
  187. ret = -EPIPE;
  188. goto out;
  189. }
  190. ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
  191. sizeof(priv->data_buffer));
  192. tpm_put_ops(priv->chip);
  193. if (ret > 0) {
  194. priv->response_length = ret;
  195. mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
  196. ret = size;
  197. }
  198. out:
  199. mutex_unlock(&priv->buffer_mutex);
  200. return ret;
  201. }
  202. __poll_t tpm_common_poll(struct file *file, poll_table *wait)
  203. {
  204. struct file_priv *priv = file->private_data;
  205. __poll_t mask = 0;
  206. poll_wait(file, &priv->async_wait, wait);
  207. mutex_lock(&priv->buffer_mutex);
  208. /*
  209. * The response_length indicates if there is still response
  210. * (or part of it) to be consumed. Partial reads decrease it
  211. * by the number of bytes read, and write resets it the zero.
  212. */
  213. if (priv->response_length)
  214. mask = EPOLLIN | EPOLLRDNORM;
  215. else
  216. mask = EPOLLOUT | EPOLLWRNORM;
  217. mutex_unlock(&priv->buffer_mutex);
  218. return mask;
  219. }
  220. /*
  221. * Called on file close
  222. */
  223. void tpm_common_release(struct file *file, struct file_priv *priv)
  224. {
  225. flush_work(&priv->async_work);
  226. del_singleshot_timer_sync(&priv->user_read_timer);
  227. flush_work(&priv->timeout_work);
  228. file->private_data = NULL;
  229. priv->response_length = 0;
  230. }
  231. int __init tpm_dev_common_init(void)
  232. {
  233. tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
  234. return !tpm_dev_wq ? -ENOMEM : 0;
  235. }
  236. void __exit tpm_dev_common_exit(void)
  237. {
  238. if (tpm_dev_wq) {
  239. destroy_workqueue(tpm_dev_wq);
  240. tpm_dev_wq = NULL;
  241. }
  242. }