ffa_abi.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, Linaro Limited
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/arm_ffa.h>
  7. #include <linux/errno.h>
  8. #include <linux/scatterlist.h>
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/string.h>
  12. #include <linux/tee_drv.h>
  13. #include <linux/types.h>
  14. #include "optee_private.h"
  15. #include "optee_ffa.h"
  16. #include "optee_rpc_cmd.h"
  17. /*
  18. * This file implement the FF-A ABI used when communicating with secure world
  19. * OP-TEE OS via FF-A.
  20. * This file is divided into the following sections:
  21. * 1. Maintain a hash table for lookup of a global FF-A memory handle
  22. * 2. Convert between struct tee_param and struct optee_msg_param
  23. * 3. Low level support functions to register shared memory in secure world
  24. * 4. Dynamic shared memory pool based on alloc_pages()
  25. * 5. Do a normal scheduled call into secure world
  26. * 6. Driver initialization.
  27. */
  28. /*
  29. * 1. Maintain a hash table for lookup of a global FF-A memory handle
  30. *
  31. * FF-A assigns a global memory handle for each piece shared memory.
  32. * This handle is then used when communicating with secure world.
  33. *
  34. * Main functions are optee_shm_add_ffa_handle() and optee_shm_rem_ffa_handle()
  35. */
  36. struct shm_rhash {
  37. struct tee_shm *shm;
  38. u64 global_id;
  39. struct rhash_head linkage;
  40. };
  41. static void rh_free_fn(void *ptr, void *arg)
  42. {
  43. kfree(ptr);
  44. }
  45. static const struct rhashtable_params shm_rhash_params = {
  46. .head_offset = offsetof(struct shm_rhash, linkage),
  47. .key_len = sizeof(u64),
  48. .key_offset = offsetof(struct shm_rhash, global_id),
  49. .automatic_shrinking = true,
  50. };
  51. static struct tee_shm *optee_shm_from_ffa_handle(struct optee *optee,
  52. u64 global_id)
  53. {
  54. struct tee_shm *shm = NULL;
  55. struct shm_rhash *r;
  56. mutex_lock(&optee->ffa.mutex);
  57. r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id,
  58. shm_rhash_params);
  59. if (r)
  60. shm = r->shm;
  61. mutex_unlock(&optee->ffa.mutex);
  62. return shm;
  63. }
  64. static int optee_shm_add_ffa_handle(struct optee *optee, struct tee_shm *shm,
  65. u64 global_id)
  66. {
  67. struct shm_rhash *r;
  68. int rc;
  69. r = kmalloc(sizeof(*r), GFP_KERNEL);
  70. if (!r)
  71. return -ENOMEM;
  72. r->shm = shm;
  73. r->global_id = global_id;
  74. mutex_lock(&optee->ffa.mutex);
  75. rc = rhashtable_lookup_insert_fast(&optee->ffa.global_ids, &r->linkage,
  76. shm_rhash_params);
  77. mutex_unlock(&optee->ffa.mutex);
  78. if (rc)
  79. kfree(r);
  80. return rc;
  81. }
  82. static int optee_shm_rem_ffa_handle(struct optee *optee, u64 global_id)
  83. {
  84. struct shm_rhash *r;
  85. int rc = -ENOENT;
  86. mutex_lock(&optee->ffa.mutex);
  87. r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id,
  88. shm_rhash_params);
  89. if (r)
  90. rc = rhashtable_remove_fast(&optee->ffa.global_ids,
  91. &r->linkage, shm_rhash_params);
  92. mutex_unlock(&optee->ffa.mutex);
  93. if (!rc)
  94. kfree(r);
  95. return rc;
  96. }
  97. /*
  98. * 2. Convert between struct tee_param and struct optee_msg_param
  99. *
  100. * optee_ffa_from_msg_param() and optee_ffa_to_msg_param() are the main
  101. * functions.
  102. */
  103. static void from_msg_param_ffa_mem(struct optee *optee, struct tee_param *p,
  104. u32 attr, const struct optee_msg_param *mp)
  105. {
  106. struct tee_shm *shm = NULL;
  107. u64 offs_high = 0;
  108. u64 offs_low = 0;
  109. p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
  110. attr - OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
  111. p->u.memref.size = mp->u.fmem.size;
  112. if (mp->u.fmem.global_id != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
  113. shm = optee_shm_from_ffa_handle(optee, mp->u.fmem.global_id);
  114. p->u.memref.shm = shm;
  115. if (shm) {
  116. offs_low = mp->u.fmem.offs_low;
  117. offs_high = mp->u.fmem.offs_high;
  118. }
  119. p->u.memref.shm_offs = offs_low | offs_high << 32;
  120. }
  121. /**
  122. * optee_ffa_from_msg_param() - convert from OPTEE_MSG parameters to
  123. * struct tee_param
  124. * @optee: main service struct
  125. * @params: subsystem internal parameter representation
  126. * @num_params: number of elements in the parameter arrays
  127. * @msg_params: OPTEE_MSG parameters
  128. *
  129. * Returns 0 on success or <0 on failure
  130. */
  131. static int optee_ffa_from_msg_param(struct optee *optee,
  132. struct tee_param *params, size_t num_params,
  133. const struct optee_msg_param *msg_params)
  134. {
  135. size_t n;
  136. for (n = 0; n < num_params; n++) {
  137. struct tee_param *p = params + n;
  138. const struct optee_msg_param *mp = msg_params + n;
  139. u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
  140. switch (attr) {
  141. case OPTEE_MSG_ATTR_TYPE_NONE:
  142. p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
  143. memset(&p->u, 0, sizeof(p->u));
  144. break;
  145. case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
  146. case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
  147. case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
  148. optee_from_msg_param_value(p, attr, mp);
  149. break;
  150. case OPTEE_MSG_ATTR_TYPE_FMEM_INPUT:
  151. case OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT:
  152. case OPTEE_MSG_ATTR_TYPE_FMEM_INOUT:
  153. from_msg_param_ffa_mem(optee, p, attr, mp);
  154. break;
  155. default:
  156. return -EINVAL;
  157. }
  158. }
  159. return 0;
  160. }
  161. static int to_msg_param_ffa_mem(struct optee_msg_param *mp,
  162. const struct tee_param *p)
  163. {
  164. struct tee_shm *shm = p->u.memref.shm;
  165. mp->attr = OPTEE_MSG_ATTR_TYPE_FMEM_INPUT + p->attr -
  166. TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
  167. if (shm) {
  168. u64 shm_offs = p->u.memref.shm_offs;
  169. mp->u.fmem.internal_offs = shm->offset;
  170. mp->u.fmem.offs_low = shm_offs;
  171. mp->u.fmem.offs_high = shm_offs >> 32;
  172. /* Check that the entire offset could be stored. */
  173. if (mp->u.fmem.offs_high != shm_offs >> 32)
  174. return -EINVAL;
  175. mp->u.fmem.global_id = shm->sec_world_id;
  176. } else {
  177. memset(&mp->u, 0, sizeof(mp->u));
  178. mp->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
  179. }
  180. mp->u.fmem.size = p->u.memref.size;
  181. return 0;
  182. }
  183. /**
  184. * optee_ffa_to_msg_param() - convert from struct tee_params to OPTEE_MSG
  185. * parameters
  186. * @optee: main service struct
  187. * @msg_params: OPTEE_MSG parameters
  188. * @num_params: number of elements in the parameter arrays
  189. * @params: subsystem itnernal parameter representation
  190. * Returns 0 on success or <0 on failure
  191. */
  192. static int optee_ffa_to_msg_param(struct optee *optee,
  193. struct optee_msg_param *msg_params,
  194. size_t num_params,
  195. const struct tee_param *params)
  196. {
  197. size_t n;
  198. for (n = 0; n < num_params; n++) {
  199. const struct tee_param *p = params + n;
  200. struct optee_msg_param *mp = msg_params + n;
  201. switch (p->attr) {
  202. case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
  203. mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
  204. memset(&mp->u, 0, sizeof(mp->u));
  205. break;
  206. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
  207. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
  208. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
  209. optee_to_msg_param_value(mp, p);
  210. break;
  211. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
  212. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
  213. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
  214. if (to_msg_param_ffa_mem(mp, p))
  215. return -EINVAL;
  216. break;
  217. default:
  218. return -EINVAL;
  219. }
  220. }
  221. return 0;
  222. }
  223. /*
  224. * 3. Low level support functions to register shared memory in secure world
  225. *
  226. * Functions to register and unregister shared memory both for normal
  227. * clients and for tee-supplicant.
  228. */
  229. static int optee_ffa_shm_register(struct tee_context *ctx, struct tee_shm *shm,
  230. struct page **pages, size_t num_pages,
  231. unsigned long start)
  232. {
  233. struct optee *optee = tee_get_drvdata(ctx->teedev);
  234. struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
  235. const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
  236. struct ffa_mem_region_attributes mem_attr = {
  237. .receiver = ffa_dev->vm_id,
  238. .attrs = FFA_MEM_RW,
  239. };
  240. struct ffa_mem_ops_args args = {
  241. .use_txbuf = true,
  242. .attrs = &mem_attr,
  243. .nattrs = 1,
  244. };
  245. struct sg_table sgt;
  246. int rc;
  247. rc = optee_check_mem_type(start, num_pages);
  248. if (rc)
  249. return rc;
  250. rc = sg_alloc_table_from_pages(&sgt, pages, num_pages, 0,
  251. num_pages * PAGE_SIZE, GFP_KERNEL);
  252. if (rc)
  253. return rc;
  254. args.sg = sgt.sgl;
  255. rc = mem_ops->memory_share(&args);
  256. sg_free_table(&sgt);
  257. if (rc)
  258. return rc;
  259. rc = optee_shm_add_ffa_handle(optee, shm, args.g_handle);
  260. if (rc) {
  261. mem_ops->memory_reclaim(args.g_handle, 0);
  262. return rc;
  263. }
  264. shm->sec_world_id = args.g_handle;
  265. return 0;
  266. }
  267. static int optee_ffa_shm_unregister(struct tee_context *ctx,
  268. struct tee_shm *shm)
  269. {
  270. struct optee *optee = tee_get_drvdata(ctx->teedev);
  271. struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
  272. const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
  273. const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
  274. u64 global_handle = shm->sec_world_id;
  275. struct ffa_send_direct_data data = {
  276. .data0 = OPTEE_FFA_UNREGISTER_SHM,
  277. .data1 = (u32)global_handle,
  278. .data2 = (u32)(global_handle >> 32)
  279. };
  280. int rc;
  281. optee_shm_rem_ffa_handle(optee, global_handle);
  282. shm->sec_world_id = 0;
  283. rc = msg_ops->sync_send_receive(ffa_dev, &data);
  284. if (rc)
  285. pr_err("Unregister SHM id 0x%llx rc %d\n", global_handle, rc);
  286. rc = mem_ops->memory_reclaim(global_handle, 0);
  287. if (rc)
  288. pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
  289. return rc;
  290. }
  291. static int optee_ffa_shm_unregister_supp(struct tee_context *ctx,
  292. struct tee_shm *shm)
  293. {
  294. struct optee *optee = tee_get_drvdata(ctx->teedev);
  295. const struct ffa_mem_ops *mem_ops;
  296. u64 global_handle = shm->sec_world_id;
  297. int rc;
  298. /*
  299. * We're skipping the OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM call
  300. * since this is OP-TEE freeing via RPC so it has already retired
  301. * this ID.
  302. */
  303. optee_shm_rem_ffa_handle(optee, global_handle);
  304. mem_ops = optee->ffa.ffa_dev->ops->mem_ops;
  305. rc = mem_ops->memory_reclaim(global_handle, 0);
  306. if (rc)
  307. pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
  308. shm->sec_world_id = 0;
  309. return rc;
  310. }
  311. /*
  312. * 4. Dynamic shared memory pool based on alloc_pages()
  313. *
  314. * Implements an OP-TEE specific shared memory pool.
  315. * The main function is optee_ffa_shm_pool_alloc_pages().
  316. */
  317. static int pool_ffa_op_alloc(struct tee_shm_pool *pool,
  318. struct tee_shm *shm, size_t size, size_t align)
  319. {
  320. return optee_pool_op_alloc_helper(pool, shm, size, align,
  321. optee_ffa_shm_register);
  322. }
  323. static void pool_ffa_op_free(struct tee_shm_pool *pool,
  324. struct tee_shm *shm)
  325. {
  326. optee_pool_op_free_helper(pool, shm, optee_ffa_shm_unregister);
  327. }
  328. static void pool_ffa_op_destroy_pool(struct tee_shm_pool *pool)
  329. {
  330. kfree(pool);
  331. }
  332. static const struct tee_shm_pool_ops pool_ffa_ops = {
  333. .alloc = pool_ffa_op_alloc,
  334. .free = pool_ffa_op_free,
  335. .destroy_pool = pool_ffa_op_destroy_pool,
  336. };
  337. /**
  338. * optee_ffa_shm_pool_alloc_pages() - create page-based allocator pool
  339. *
  340. * This pool is used with OP-TEE over FF-A. In this case command buffers
  341. * and such are allocated from kernel's own memory.
  342. */
  343. static struct tee_shm_pool *optee_ffa_shm_pool_alloc_pages(void)
  344. {
  345. struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
  346. if (!pool)
  347. return ERR_PTR(-ENOMEM);
  348. pool->ops = &pool_ffa_ops;
  349. return pool;
  350. }
  351. /*
  352. * 5. Do a normal scheduled call into secure world
  353. *
  354. * The function optee_ffa_do_call_with_arg() performs a normal scheduled
  355. * call into secure world. During this call may normal world request help
  356. * from normal world using RPCs, Remote Procedure Calls. This includes
  357. * delivery of non-secure interrupts to for instance allow rescheduling of
  358. * the current task.
  359. */
  360. static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
  361. struct optee *optee,
  362. struct optee_msg_arg *arg)
  363. {
  364. struct tee_shm *shm;
  365. if (arg->num_params != 1 ||
  366. arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
  367. arg->ret = TEEC_ERROR_BAD_PARAMETERS;
  368. return;
  369. }
  370. switch (arg->params[0].u.value.a) {
  371. case OPTEE_RPC_SHM_TYPE_APPL:
  372. shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b);
  373. break;
  374. case OPTEE_RPC_SHM_TYPE_KERNEL:
  375. shm = tee_shm_alloc_priv_buf(optee->ctx,
  376. arg->params[0].u.value.b);
  377. break;
  378. default:
  379. arg->ret = TEEC_ERROR_BAD_PARAMETERS;
  380. return;
  381. }
  382. if (IS_ERR(shm)) {
  383. arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
  384. return;
  385. }
  386. arg->params[0] = (struct optee_msg_param){
  387. .attr = OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT,
  388. .u.fmem.size = tee_shm_get_size(shm),
  389. .u.fmem.global_id = shm->sec_world_id,
  390. .u.fmem.internal_offs = shm->offset,
  391. };
  392. arg->ret = TEEC_SUCCESS;
  393. }
  394. static void handle_ffa_rpc_func_cmd_shm_free(struct tee_context *ctx,
  395. struct optee *optee,
  396. struct optee_msg_arg *arg)
  397. {
  398. struct tee_shm *shm;
  399. if (arg->num_params != 1 ||
  400. arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
  401. goto err_bad_param;
  402. shm = optee_shm_from_ffa_handle(optee, arg->params[0].u.value.b);
  403. if (!shm)
  404. goto err_bad_param;
  405. switch (arg->params[0].u.value.a) {
  406. case OPTEE_RPC_SHM_TYPE_APPL:
  407. optee_rpc_cmd_free_suppl(ctx, shm);
  408. break;
  409. case OPTEE_RPC_SHM_TYPE_KERNEL:
  410. tee_shm_free(shm);
  411. break;
  412. default:
  413. goto err_bad_param;
  414. }
  415. arg->ret = TEEC_SUCCESS;
  416. return;
  417. err_bad_param:
  418. arg->ret = TEEC_ERROR_BAD_PARAMETERS;
  419. }
  420. static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
  421. struct optee *optee,
  422. struct optee_msg_arg *arg)
  423. {
  424. arg->ret_origin = TEEC_ORIGIN_COMMS;
  425. switch (arg->cmd) {
  426. case OPTEE_RPC_CMD_SHM_ALLOC:
  427. handle_ffa_rpc_func_cmd_shm_alloc(ctx, optee, arg);
  428. break;
  429. case OPTEE_RPC_CMD_SHM_FREE:
  430. handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg);
  431. break;
  432. default:
  433. optee_rpc_cmd(ctx, optee, arg);
  434. }
  435. }
  436. static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee,
  437. u32 cmd, struct optee_msg_arg *arg)
  438. {
  439. switch (cmd) {
  440. case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD:
  441. handle_ffa_rpc_func_cmd(ctx, optee, arg);
  442. break;
  443. case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT:
  444. /* Interrupt delivered by now */
  445. break;
  446. default:
  447. pr_warn("Unknown RPC func 0x%x\n", cmd);
  448. break;
  449. }
  450. }
  451. static int optee_ffa_yielding_call(struct tee_context *ctx,
  452. struct ffa_send_direct_data *data,
  453. struct optee_msg_arg *rpc_arg)
  454. {
  455. struct optee *optee = tee_get_drvdata(ctx->teedev);
  456. struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
  457. const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
  458. struct optee_call_waiter w;
  459. u32 cmd = data->data0;
  460. u32 w4 = data->data1;
  461. u32 w5 = data->data2;
  462. u32 w6 = data->data3;
  463. int rc;
  464. /* Initialize waiter */
  465. optee_cq_wait_init(&optee->call_queue, &w);
  466. while (true) {
  467. rc = msg_ops->sync_send_receive(ffa_dev, data);
  468. if (rc)
  469. goto done;
  470. switch ((int)data->data0) {
  471. case TEEC_SUCCESS:
  472. break;
  473. case TEEC_ERROR_BUSY:
  474. if (cmd == OPTEE_FFA_YIELDING_CALL_RESUME) {
  475. rc = -EIO;
  476. goto done;
  477. }
  478. /*
  479. * Out of threads in secure world, wait for a thread
  480. * become available.
  481. */
  482. optee_cq_wait_for_completion(&optee->call_queue, &w);
  483. data->data0 = cmd;
  484. data->data1 = w4;
  485. data->data2 = w5;
  486. data->data3 = w6;
  487. continue;
  488. default:
  489. rc = -EIO;
  490. goto done;
  491. }
  492. if (data->data1 == OPTEE_FFA_YIELDING_CALL_RETURN_DONE)
  493. goto done;
  494. /*
  495. * OP-TEE has returned with a RPC request.
  496. *
  497. * Note that data->data4 (passed in register w7) is already
  498. * filled in by ffa_mem_ops->sync_send_receive() returning
  499. * above.
  500. */
  501. cond_resched();
  502. optee_handle_ffa_rpc(ctx, optee, data->data1, rpc_arg);
  503. cmd = OPTEE_FFA_YIELDING_CALL_RESUME;
  504. data->data0 = cmd;
  505. data->data1 = 0;
  506. data->data2 = 0;
  507. data->data3 = 0;
  508. }
  509. done:
  510. /*
  511. * We're done with our thread in secure world, if there's any
  512. * thread waiters wake up one.
  513. */
  514. optee_cq_wait_final(&optee->call_queue, &w);
  515. return rc;
  516. }
  517. /**
  518. * optee_ffa_do_call_with_arg() - Do a FF-A call to enter OP-TEE in secure world
  519. * @ctx: calling context
  520. * @shm: shared memory holding the message to pass to secure world
  521. * @offs: offset of the message in @shm
  522. *
  523. * Does a FF-A call to OP-TEE in secure world and handles eventual resulting
  524. * Remote Procedure Calls (RPC) from OP-TEE.
  525. *
  526. * Returns return code from FF-A, 0 is OK
  527. */
  528. static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
  529. struct tee_shm *shm, u_int offs)
  530. {
  531. struct ffa_send_direct_data data = {
  532. .data0 = OPTEE_FFA_YIELDING_CALL_WITH_ARG,
  533. .data1 = (u32)shm->sec_world_id,
  534. .data2 = (u32)(shm->sec_world_id >> 32),
  535. .data3 = offs,
  536. };
  537. struct optee_msg_arg *arg;
  538. unsigned int rpc_arg_offs;
  539. struct optee_msg_arg *rpc_arg;
  540. /*
  541. * The shared memory object has to start on a page when passed as
  542. * an argument struct. This is also what the shm pool allocator
  543. * returns, but check this before calling secure world to catch
  544. * eventual errors early in case something changes.
  545. */
  546. if (shm->offset)
  547. return -EINVAL;
  548. arg = tee_shm_get_va(shm, offs);
  549. if (IS_ERR(arg))
  550. return PTR_ERR(arg);
  551. rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
  552. rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
  553. if (IS_ERR(rpc_arg))
  554. return PTR_ERR(rpc_arg);
  555. return optee_ffa_yielding_call(ctx, &data, rpc_arg);
  556. }
  557. /*
  558. * 6. Driver initialization
  559. *
  560. * During driver inititialization is the OP-TEE Secure Partition is probed
  561. * to find out which features it supports so the driver can be initialized
  562. * with a matching configuration.
  563. */
  564. static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
  565. const struct ffa_ops *ops)
  566. {
  567. const struct ffa_msg_ops *msg_ops = ops->msg_ops;
  568. struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION };
  569. int rc;
  570. msg_ops->mode_32bit_set(ffa_dev);
  571. rc = msg_ops->sync_send_receive(ffa_dev, &data);
  572. if (rc) {
  573. pr_err("Unexpected error %d\n", rc);
  574. return false;
  575. }
  576. if (data.data0 != OPTEE_FFA_VERSION_MAJOR ||
  577. data.data1 < OPTEE_FFA_VERSION_MINOR) {
  578. pr_err("Incompatible OP-TEE API version %lu.%lu",
  579. data.data0, data.data1);
  580. return false;
  581. }
  582. data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION };
  583. rc = msg_ops->sync_send_receive(ffa_dev, &data);
  584. if (rc) {
  585. pr_err("Unexpected error %d\n", rc);
  586. return false;
  587. }
  588. if (data.data2)
  589. pr_info("revision %lu.%lu (%08lx)",
  590. data.data0, data.data1, data.data2);
  591. else
  592. pr_info("revision %lu.%lu", data.data0, data.data1);
  593. return true;
  594. }
  595. static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
  596. const struct ffa_ops *ops,
  597. u32 *sec_caps,
  598. unsigned int *rpc_param_count)
  599. {
  600. struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
  601. int rc;
  602. rc = ops->msg_ops->sync_send_receive(ffa_dev, &data);
  603. if (rc) {
  604. pr_err("Unexpected error %d", rc);
  605. return false;
  606. }
  607. if (data.data0) {
  608. pr_err("Unexpected exchange error %lu", data.data0);
  609. return false;
  610. }
  611. *rpc_param_count = (u8)data.data1;
  612. *sec_caps = data.data2;
  613. return true;
  614. }
  615. static void optee_ffa_get_version(struct tee_device *teedev,
  616. struct tee_ioctl_version_data *vers)
  617. {
  618. struct tee_ioctl_version_data v = {
  619. .impl_id = TEE_IMPL_ID_OPTEE,
  620. .impl_caps = TEE_OPTEE_CAP_TZ,
  621. .gen_caps = TEE_GEN_CAP_GP | TEE_GEN_CAP_REG_MEM |
  622. TEE_GEN_CAP_MEMREF_NULL,
  623. };
  624. *vers = v;
  625. }
  626. static int optee_ffa_open(struct tee_context *ctx)
  627. {
  628. return optee_open(ctx, true);
  629. }
  630. static const struct tee_driver_ops optee_ffa_clnt_ops = {
  631. .get_version = optee_ffa_get_version,
  632. .open = optee_ffa_open,
  633. .release = optee_release,
  634. .open_session = optee_open_session,
  635. .close_session = optee_close_session,
  636. .invoke_func = optee_invoke_func,
  637. .cancel_req = optee_cancel_req,
  638. .shm_register = optee_ffa_shm_register,
  639. .shm_unregister = optee_ffa_shm_unregister,
  640. };
  641. static const struct tee_desc optee_ffa_clnt_desc = {
  642. .name = DRIVER_NAME "-ffa-clnt",
  643. .ops = &optee_ffa_clnt_ops,
  644. .owner = THIS_MODULE,
  645. };
  646. static const struct tee_driver_ops optee_ffa_supp_ops = {
  647. .get_version = optee_ffa_get_version,
  648. .open = optee_ffa_open,
  649. .release = optee_release_supp,
  650. .supp_recv = optee_supp_recv,
  651. .supp_send = optee_supp_send,
  652. .shm_register = optee_ffa_shm_register, /* same as for clnt ops */
  653. .shm_unregister = optee_ffa_shm_unregister_supp,
  654. };
  655. static const struct tee_desc optee_ffa_supp_desc = {
  656. .name = DRIVER_NAME "-ffa-supp",
  657. .ops = &optee_ffa_supp_ops,
  658. .owner = THIS_MODULE,
  659. .flags = TEE_DESC_PRIVILEGED,
  660. };
  661. static const struct optee_ops optee_ffa_ops = {
  662. .do_call_with_arg = optee_ffa_do_call_with_arg,
  663. .to_msg_param = optee_ffa_to_msg_param,
  664. .from_msg_param = optee_ffa_from_msg_param,
  665. };
  666. static void optee_ffa_remove(struct ffa_device *ffa_dev)
  667. {
  668. struct optee *optee = ffa_dev_get_drvdata(ffa_dev);
  669. optee_remove_common(optee);
  670. mutex_destroy(&optee->ffa.mutex);
  671. rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
  672. kfree(optee);
  673. }
  674. static int optee_ffa_probe(struct ffa_device *ffa_dev)
  675. {
  676. const struct ffa_ops *ffa_ops;
  677. unsigned int rpc_param_count;
  678. struct tee_shm_pool *pool;
  679. struct tee_device *teedev;
  680. struct tee_context *ctx;
  681. u32 arg_cache_flags = 0;
  682. struct optee *optee;
  683. u32 sec_caps;
  684. int rc;
  685. ffa_ops = ffa_dev->ops;
  686. if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops))
  687. return -EINVAL;
  688. if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &sec_caps,
  689. &rpc_param_count))
  690. return -EINVAL;
  691. if (sec_caps & OPTEE_FFA_SEC_CAP_ARG_OFFSET)
  692. arg_cache_flags |= OPTEE_SHM_ARG_SHARED;
  693. optee = kzalloc(sizeof(*optee), GFP_KERNEL);
  694. if (!optee)
  695. return -ENOMEM;
  696. pool = optee_ffa_shm_pool_alloc_pages();
  697. if (IS_ERR(pool)) {
  698. rc = PTR_ERR(pool);
  699. goto err_free_optee;
  700. }
  701. optee->pool = pool;
  702. optee->ops = &optee_ffa_ops;
  703. optee->ffa.ffa_dev = ffa_dev;
  704. optee->rpc_param_count = rpc_param_count;
  705. teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool,
  706. optee);
  707. if (IS_ERR(teedev)) {
  708. rc = PTR_ERR(teedev);
  709. goto err_free_pool;
  710. }
  711. optee->teedev = teedev;
  712. teedev = tee_device_alloc(&optee_ffa_supp_desc, NULL, optee->pool,
  713. optee);
  714. if (IS_ERR(teedev)) {
  715. rc = PTR_ERR(teedev);
  716. goto err_unreg_teedev;
  717. }
  718. optee->supp_teedev = teedev;
  719. rc = tee_device_register(optee->teedev);
  720. if (rc)
  721. goto err_unreg_supp_teedev;
  722. rc = tee_device_register(optee->supp_teedev);
  723. if (rc)
  724. goto err_unreg_supp_teedev;
  725. rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params);
  726. if (rc)
  727. goto err_unreg_supp_teedev;
  728. mutex_init(&optee->ffa.mutex);
  729. mutex_init(&optee->call_queue.mutex);
  730. INIT_LIST_HEAD(&optee->call_queue.waiters);
  731. optee_supp_init(&optee->supp);
  732. optee_shm_arg_cache_init(optee, arg_cache_flags);
  733. ffa_dev_set_drvdata(ffa_dev, optee);
  734. ctx = teedev_open(optee->teedev);
  735. if (IS_ERR(ctx)) {
  736. rc = PTR_ERR(ctx);
  737. goto err_rhashtable_free;
  738. }
  739. optee->ctx = ctx;
  740. rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
  741. if (rc)
  742. goto err_close_ctx;
  743. rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
  744. if (rc)
  745. goto err_unregister_devices;
  746. pr_info("initialized driver\n");
  747. return 0;
  748. err_unregister_devices:
  749. optee_unregister_devices();
  750. optee_notif_uninit(optee);
  751. err_close_ctx:
  752. teedev_close_context(ctx);
  753. err_rhashtable_free:
  754. rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
  755. optee_supp_uninit(&optee->supp);
  756. mutex_destroy(&optee->call_queue.mutex);
  757. mutex_destroy(&optee->ffa.mutex);
  758. err_unreg_supp_teedev:
  759. tee_device_unregister(optee->supp_teedev);
  760. err_unreg_teedev:
  761. tee_device_unregister(optee->teedev);
  762. err_free_pool:
  763. tee_shm_pool_free(pool);
  764. err_free_optee:
  765. kfree(optee);
  766. return rc;
  767. }
  768. static const struct ffa_device_id optee_ffa_device_id[] = {
  769. /* 486178e0-e7f8-11e3-bc5e0002a5d5c51b */
  770. { UUID_INIT(0x486178e0, 0xe7f8, 0x11e3,
  771. 0xbc, 0x5e, 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b) },
  772. {}
  773. };
  774. static struct ffa_driver optee_ffa_driver = {
  775. .name = "optee",
  776. .probe = optee_ffa_probe,
  777. .remove = optee_ffa_remove,
  778. .id_table = optee_ffa_device_id,
  779. };
  780. int optee_ffa_abi_register(void)
  781. {
  782. if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT))
  783. return ffa_register(&optee_ffa_driver);
  784. else
  785. return -EOPNOTSUPP;
  786. }
  787. void optee_ffa_abi_unregister(void)
  788. {
  789. if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT))
  790. ffa_unregister(&optee_ffa_driver);
  791. }