call.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. // SPDX-License-Identifier: MIT
  2. /*
  3. * Copyright 2019 Advanced Micro Devices, Inc.
  4. */
  5. #include <linux/device.h>
  6. #include <linux/tee.h>
  7. #include <linux/tee_drv.h>
  8. #include <linux/psp-tee.h>
  9. #include <linux/slab.h>
  10. #include <linux/psp-sev.h>
  11. #include "amdtee_if.h"
  12. #include "amdtee_private.h"
  13. static int tee_params_to_amd_params(struct tee_param *tee, u32 count,
  14. struct tee_operation *amd)
  15. {
  16. int i, ret = 0;
  17. u32 type;
  18. if (!count)
  19. return 0;
  20. if (!tee || !amd || count > TEE_MAX_PARAMS)
  21. return -EINVAL;
  22. amd->param_types = 0;
  23. for (i = 0; i < count; i++) {
  24. /* AMD TEE does not support meta parameter */
  25. if (tee[i].attr > TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT)
  26. return -EINVAL;
  27. amd->param_types |= ((tee[i].attr & 0xF) << i * 4);
  28. }
  29. for (i = 0; i < count; i++) {
  30. type = TEE_PARAM_TYPE_GET(amd->param_types, i);
  31. pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
  32. if (type == TEE_OP_PARAM_TYPE_INVALID)
  33. return -EINVAL;
  34. if (type == TEE_OP_PARAM_TYPE_NONE)
  35. continue;
  36. /* It is assumed that all values are within 2^32-1 */
  37. if (type > TEE_OP_PARAM_TYPE_VALUE_INOUT) {
  38. u32 buf_id = get_buffer_id(tee[i].u.memref.shm);
  39. amd->params[i].mref.buf_id = buf_id;
  40. amd->params[i].mref.offset = tee[i].u.memref.shm_offs;
  41. amd->params[i].mref.size = tee[i].u.memref.size;
  42. pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
  43. __func__,
  44. i, amd->params[i].mref.buf_id,
  45. i, amd->params[i].mref.offset,
  46. i, amd->params[i].mref.size);
  47. } else {
  48. if (tee[i].u.value.c)
  49. pr_warn("%s: Discarding value c", __func__);
  50. amd->params[i].val.a = tee[i].u.value.a;
  51. amd->params[i].val.b = tee[i].u.value.b;
  52. pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n", __func__,
  53. i, amd->params[i].val.a,
  54. i, amd->params[i].val.b);
  55. }
  56. }
  57. return ret;
  58. }
  59. static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
  60. struct tee_operation *amd)
  61. {
  62. int i, ret = 0;
  63. u32 type;
  64. if (!count)
  65. return 0;
  66. if (!tee || !amd || count > TEE_MAX_PARAMS)
  67. return -EINVAL;
  68. /* Assumes amd->param_types is valid */
  69. for (i = 0; i < count; i++) {
  70. type = TEE_PARAM_TYPE_GET(amd->param_types, i);
  71. pr_debug("%s: type[%d] = 0x%x\n", __func__, i, type);
  72. if (type == TEE_OP_PARAM_TYPE_INVALID ||
  73. type > TEE_OP_PARAM_TYPE_MEMREF_INOUT)
  74. return -EINVAL;
  75. if (type == TEE_OP_PARAM_TYPE_NONE ||
  76. type == TEE_OP_PARAM_TYPE_VALUE_INPUT ||
  77. type == TEE_OP_PARAM_TYPE_MEMREF_INPUT)
  78. continue;
  79. /*
  80. * It is assumed that buf_id remains unchanged for
  81. * both open_session and invoke_cmd call
  82. */
  83. if (type > TEE_OP_PARAM_TYPE_MEMREF_INPUT) {
  84. tee[i].u.memref.shm_offs = amd->params[i].mref.offset;
  85. tee[i].u.memref.size = amd->params[i].mref.size;
  86. pr_debug("%s: bufid[%d] = 0x%x, offset[%d] = 0x%x, size[%d] = 0x%x\n",
  87. __func__,
  88. i, amd->params[i].mref.buf_id,
  89. i, amd->params[i].mref.offset,
  90. i, amd->params[i].mref.size);
  91. } else {
  92. /* field 'c' not supported by AMD TEE */
  93. tee[i].u.value.a = amd->params[i].val.a;
  94. tee[i].u.value.b = amd->params[i].val.b;
  95. tee[i].u.value.c = 0;
  96. pr_debug("%s: a[%d] = 0x%x, b[%d] = 0x%x\n",
  97. __func__,
  98. i, amd->params[i].val.a,
  99. i, amd->params[i].val.b);
  100. }
  101. }
  102. return ret;
  103. }
  104. static DEFINE_MUTEX(ta_refcount_mutex);
  105. static LIST_HEAD(ta_list);
  106. static u32 get_ta_refcount(u32 ta_handle)
  107. {
  108. struct amdtee_ta_data *ta_data;
  109. u32 count = 0;
  110. /* Caller must hold a mutex */
  111. list_for_each_entry(ta_data, &ta_list, list_node)
  112. if (ta_data->ta_handle == ta_handle)
  113. return ++ta_data->refcount;
  114. ta_data = kzalloc(sizeof(*ta_data), GFP_KERNEL);
  115. if (ta_data) {
  116. ta_data->ta_handle = ta_handle;
  117. ta_data->refcount = 1;
  118. count = ta_data->refcount;
  119. list_add(&ta_data->list_node, &ta_list);
  120. }
  121. return count;
  122. }
  123. static u32 put_ta_refcount(u32 ta_handle)
  124. {
  125. struct amdtee_ta_data *ta_data;
  126. u32 count = 0;
  127. /* Caller must hold a mutex */
  128. list_for_each_entry(ta_data, &ta_list, list_node)
  129. if (ta_data->ta_handle == ta_handle) {
  130. count = --ta_data->refcount;
  131. if (count == 0) {
  132. list_del(&ta_data->list_node);
  133. kfree(ta_data);
  134. break;
  135. }
  136. }
  137. return count;
  138. }
  139. int handle_unload_ta(u32 ta_handle)
  140. {
  141. struct tee_cmd_unload_ta cmd = {0};
  142. u32 status, count;
  143. int ret;
  144. if (!ta_handle)
  145. return -EINVAL;
  146. mutex_lock(&ta_refcount_mutex);
  147. count = put_ta_refcount(ta_handle);
  148. if (count) {
  149. pr_debug("unload ta: not unloading %u count %u\n",
  150. ta_handle, count);
  151. ret = -EBUSY;
  152. goto unlock;
  153. }
  154. cmd.ta_handle = ta_handle;
  155. ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd,
  156. sizeof(cmd), &status);
  157. if (!ret && status != 0) {
  158. pr_err("unload ta: status = 0x%x\n", status);
  159. ret = -EBUSY;
  160. } else {
  161. pr_debug("unloaded ta handle %u\n", ta_handle);
  162. }
  163. unlock:
  164. mutex_unlock(&ta_refcount_mutex);
  165. return ret;
  166. }
  167. int handle_close_session(u32 ta_handle, u32 info)
  168. {
  169. struct tee_cmd_close_session cmd = {0};
  170. u32 status;
  171. int ret;
  172. if (ta_handle == 0)
  173. return -EINVAL;
  174. cmd.ta_handle = ta_handle;
  175. cmd.session_info = info;
  176. ret = psp_tee_process_cmd(TEE_CMD_ID_CLOSE_SESSION, (void *)&cmd,
  177. sizeof(cmd), &status);
  178. if (!ret && status != 0) {
  179. pr_err("close session: status = 0x%x\n", status);
  180. ret = -EBUSY;
  181. }
  182. return ret;
  183. }
  184. void handle_unmap_shmem(u32 buf_id)
  185. {
  186. struct tee_cmd_unmap_shared_mem cmd = {0};
  187. u32 status;
  188. int ret;
  189. cmd.buf_id = buf_id;
  190. ret = psp_tee_process_cmd(TEE_CMD_ID_UNMAP_SHARED_MEM, (void *)&cmd,
  191. sizeof(cmd), &status);
  192. if (!ret)
  193. pr_debug("unmap shared memory: buf_id %u status = 0x%x\n",
  194. buf_id, status);
  195. }
  196. int handle_invoke_cmd(struct tee_ioctl_invoke_arg *arg, u32 sinfo,
  197. struct tee_param *p)
  198. {
  199. struct tee_cmd_invoke_cmd cmd = {0};
  200. int ret;
  201. if (!arg || (!p && arg->num_params))
  202. return -EINVAL;
  203. arg->ret_origin = TEEC_ORIGIN_COMMS;
  204. if (arg->session == 0) {
  205. arg->ret = TEEC_ERROR_BAD_PARAMETERS;
  206. return -EINVAL;
  207. }
  208. ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op);
  209. if (ret) {
  210. pr_err("invalid Params. Abort invoke command\n");
  211. arg->ret = TEEC_ERROR_BAD_PARAMETERS;
  212. return ret;
  213. }
  214. cmd.ta_handle = get_ta_handle(arg->session);
  215. cmd.cmd_id = arg->func;
  216. cmd.session_info = sinfo;
  217. ret = psp_tee_process_cmd(TEE_CMD_ID_INVOKE_CMD, (void *)&cmd,
  218. sizeof(cmd), &arg->ret);
  219. if (ret) {
  220. arg->ret = TEEC_ERROR_COMMUNICATION;
  221. } else {
  222. ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op);
  223. if (unlikely(ret)) {
  224. pr_err("invoke command: failed to copy output\n");
  225. arg->ret = TEEC_ERROR_GENERIC;
  226. return ret;
  227. }
  228. arg->ret_origin = cmd.return_origin;
  229. pr_debug("invoke command: RO = 0x%x ret = 0x%x\n",
  230. arg->ret_origin, arg->ret);
  231. }
  232. return ret;
  233. }
  234. int handle_map_shmem(u32 count, struct shmem_desc *start, u32 *buf_id)
  235. {
  236. struct tee_cmd_map_shared_mem *cmd;
  237. phys_addr_t paddr;
  238. int ret, i;
  239. u32 status;
  240. if (!count || !start || !buf_id)
  241. return -EINVAL;
  242. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  243. if (!cmd)
  244. return -ENOMEM;
  245. /* Size must be page aligned */
  246. for (i = 0; i < count ; i++) {
  247. if (!start[i].kaddr || (start[i].size & (PAGE_SIZE - 1))) {
  248. ret = -EINVAL;
  249. goto free_cmd;
  250. }
  251. if ((u64)start[i].kaddr & (PAGE_SIZE - 1)) {
  252. pr_err("map shared memory: page unaligned. addr 0x%llx",
  253. (u64)start[i].kaddr);
  254. ret = -EINVAL;
  255. goto free_cmd;
  256. }
  257. }
  258. cmd->sg_list.count = count;
  259. /* Create buffer list */
  260. for (i = 0; i < count ; i++) {
  261. paddr = __psp_pa(start[i].kaddr);
  262. cmd->sg_list.buf[i].hi_addr = upper_32_bits(paddr);
  263. cmd->sg_list.buf[i].low_addr = lower_32_bits(paddr);
  264. cmd->sg_list.buf[i].size = start[i].size;
  265. cmd->sg_list.size += cmd->sg_list.buf[i].size;
  266. pr_debug("buf[%d]:hi addr = 0x%x\n", i,
  267. cmd->sg_list.buf[i].hi_addr);
  268. pr_debug("buf[%d]:low addr = 0x%x\n", i,
  269. cmd->sg_list.buf[i].low_addr);
  270. pr_debug("buf[%d]:size = 0x%x\n", i, cmd->sg_list.buf[i].size);
  271. pr_debug("list size = 0x%x\n", cmd->sg_list.size);
  272. }
  273. *buf_id = 0;
  274. ret = psp_tee_process_cmd(TEE_CMD_ID_MAP_SHARED_MEM, (void *)cmd,
  275. sizeof(*cmd), &status);
  276. if (!ret && !status) {
  277. *buf_id = cmd->buf_id;
  278. pr_debug("mapped buffer ID = 0x%x\n", *buf_id);
  279. } else {
  280. pr_err("map shared memory: status = 0x%x\n", status);
  281. ret = -ENOMEM;
  282. }
  283. free_cmd:
  284. kfree(cmd);
  285. return ret;
  286. }
  287. int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
  288. struct tee_param *p)
  289. {
  290. struct tee_cmd_open_session cmd = {0};
  291. int ret;
  292. if (!arg || !info || (!p && arg->num_params))
  293. return -EINVAL;
  294. arg->ret_origin = TEEC_ORIGIN_COMMS;
  295. if (arg->session == 0) {
  296. arg->ret = TEEC_ERROR_GENERIC;
  297. return -EINVAL;
  298. }
  299. ret = tee_params_to_amd_params(p, arg->num_params, &cmd.op);
  300. if (ret) {
  301. pr_err("invalid Params. Abort open session\n");
  302. arg->ret = TEEC_ERROR_BAD_PARAMETERS;
  303. return ret;
  304. }
  305. cmd.ta_handle = get_ta_handle(arg->session);
  306. *info = 0;
  307. ret = psp_tee_process_cmd(TEE_CMD_ID_OPEN_SESSION, (void *)&cmd,
  308. sizeof(cmd), &arg->ret);
  309. if (ret) {
  310. arg->ret = TEEC_ERROR_COMMUNICATION;
  311. } else {
  312. ret = amd_params_to_tee_params(p, arg->num_params, &cmd.op);
  313. if (unlikely(ret)) {
  314. pr_err("open session: failed to copy output\n");
  315. arg->ret = TEEC_ERROR_GENERIC;
  316. return ret;
  317. }
  318. arg->ret_origin = cmd.return_origin;
  319. *info = cmd.session_info;
  320. pr_debug("open session: session info = 0x%x\n", *info);
  321. }
  322. pr_debug("open session: ret = 0x%x RO = 0x%x\n", arg->ret,
  323. arg->ret_origin);
  324. return ret;
  325. }
  326. int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
  327. {
  328. struct tee_cmd_unload_ta unload_cmd = {};
  329. struct tee_cmd_load_ta load_cmd = {};
  330. phys_addr_t blob;
  331. int ret;
  332. if (size == 0 || !data || !arg)
  333. return -EINVAL;
  334. blob = __psp_pa(data);
  335. if (blob & (PAGE_SIZE - 1)) {
  336. pr_err("load TA: page unaligned. blob 0x%llx", blob);
  337. return -EINVAL;
  338. }
  339. load_cmd.hi_addr = upper_32_bits(blob);
  340. load_cmd.low_addr = lower_32_bits(blob);
  341. load_cmd.size = size;
  342. mutex_lock(&ta_refcount_mutex);
  343. ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&load_cmd,
  344. sizeof(load_cmd), &arg->ret);
  345. if (ret) {
  346. arg->ret_origin = TEEC_ORIGIN_COMMS;
  347. arg->ret = TEEC_ERROR_COMMUNICATION;
  348. } else {
  349. arg->ret_origin = load_cmd.return_origin;
  350. if (arg->ret == TEEC_SUCCESS) {
  351. ret = get_ta_refcount(load_cmd.ta_handle);
  352. if (!ret) {
  353. arg->ret_origin = TEEC_ORIGIN_COMMS;
  354. arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
  355. /* Unload the TA on error */
  356. unload_cmd.ta_handle = load_cmd.ta_handle;
  357. psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
  358. (void *)&unload_cmd,
  359. sizeof(unload_cmd), &ret);
  360. } else {
  361. set_session_id(load_cmd.ta_handle, 0, &arg->session);
  362. }
  363. }
  364. }
  365. mutex_unlock(&ta_refcount_mutex);
  366. pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
  367. load_cmd.ta_handle, arg->ret_origin, arg->ret);
  368. return 0;
  369. }