qcom_scm-smc.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/io.h>
  6. #include <linux/errno.h>
  7. #include <linux/delay.h>
  8. #include <linux/mutex.h>
  9. #include <linux/slab.h>
  10. #include <linux/types.h>
  11. #include <linux/qcom_scm.h>
  12. #include <linux/arm-smccc.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/qtee_shmbridge.h>
  15. #include <linux/qcom_scm_hab.h>
  16. #include "qcom_scm.h"
  17. static bool hab_calling_convention;
  18. static DEFINE_MUTEX(qcom_scm_lock);
  19. #define QCOM_SCM_EBUSY_WAIT_MS 30
  20. #define QCOM_SCM_EBUSY_MAX_RETRY 20
  21. #define SCM_SMC_N_REG_ARGS 4
  22. #define SCM_SMC_FIRST_EXT_IDX (SCM_SMC_N_REG_ARGS - 1)
  23. #define SCM_SMC_N_EXT_ARGS (MAX_QCOM_SCM_ARGS - SCM_SMC_N_REG_ARGS + 1)
  24. #define SCM_SMC_FIRST_REG_IDX 2
  25. #define SCM_SMC_LAST_REG_IDX (SCM_SMC_FIRST_REG_IDX + SCM_SMC_N_REG_ARGS - 1)
  26. static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
  27. struct arm_smccc_res *res)
  28. {
  29. unsigned long a0 = smc->args[0];
  30. struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
  31. bool atomic = ARM_SMCCC_IS_FAST_CALL(smc->args[0]) ? true : false;
  32. quirk.state.a6 = 0;
  33. if (hab_calling_convention) {
  34. scm_call_qcpe(smc, res, atomic);
  35. } else {
  36. do {
  37. arm_smccc_smc_quirk(a0, smc->args[1], smc->args[2],
  38. smc->args[3], smc->args[4],
  39. smc->args[5], quirk.state.a6,
  40. smc->args[7], res, &quirk);
  41. if (res->a0 == QCOM_SCM_INTERRUPTED)
  42. a0 = res->a0;
  43. } while (res->a0 == QCOM_SCM_INTERRUPTED);
  44. }
  45. }
  46. #define IS_WAITQ_SLEEP_OR_WAKE(res) \
  47. (res->a0 == QCOM_SCM_WAITQ_SLEEP || res->a0 == QCOM_SCM_WAITQ_WAKE)
  48. static void fill_wq_resume_args(struct arm_smccc_args *resume, u32 smc_call_ctx)
  49. {
  50. memset(resume->args, 0, ARRAY_SIZE(resume->args));
  51. resume->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
  52. ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
  53. SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_RESUME));
  54. resume->args[1] = QCOM_SCM_ARGS(1);
  55. resume->args[2] = smc_call_ctx;
  56. }
  57. static void fill_wq_wake_ack_args(struct arm_smccc_args *wake_ack, u32 smc_call_ctx)
  58. {
  59. memset(wake_ack->args, 0, ARRAY_SIZE(wake_ack->args));
  60. wake_ack->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
  61. ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
  62. SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_ACK));
  63. wake_ack->args[1] = QCOM_SCM_ARGS(1);
  64. wake_ack->args[2] = smc_call_ctx;
  65. }
  66. static void fill_get_wq_ctx_args(struct arm_smccc_args *get_wq_ctx)
  67. {
  68. memset(get_wq_ctx->args, 0, ARRAY_SIZE(get_wq_ctx->args));
  69. get_wq_ctx->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,
  70. ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
  71. SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX));
  72. }
  73. int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending)
  74. {
  75. int ret;
  76. struct arm_smccc_args get_wq_ctx = {0};
  77. struct arm_smccc_res get_wq_res;
  78. fill_get_wq_ctx_args(&get_wq_ctx);
  79. __scm_smc_do_quirk(&get_wq_ctx, &get_wq_res);
  80. /* Guaranteed to return only success or error, no WAITQ_* */
  81. ret = get_wq_res.a0;
  82. if (ret)
  83. return ret;
  84. *wq_ctx = get_wq_res.a1;
  85. *flags = get_wq_res.a2;
  86. *more_pending = get_wq_res.a3;
  87. return 0;
  88. }
  89. static int scm_smc_do_quirk(struct device *dev, struct arm_smccc_args *smc,
  90. struct arm_smccc_res *res)
  91. {
  92. struct completion *wq = NULL;
  93. struct qcom_scm *qscm;
  94. struct arm_smccc_args original = *smc;
  95. u32 wq_ctx, smc_call_ctx, flags;
  96. do {
  97. __scm_smc_do_quirk(smc, res);
  98. if (IS_WAITQ_SLEEP_OR_WAKE(res)) {
  99. wq_ctx = res->a1;
  100. smc_call_ctx = res->a2;
  101. flags = res->a3;
  102. if (!dev)
  103. return -EPROBE_DEFER;
  104. qscm = dev_get_drvdata(dev);
  105. wq = qcom_scm_lookup_wq(qscm, wq_ctx);
  106. if (IS_ERR_OR_NULL(wq)) {
  107. pr_err("Did not find waitqueue for wq_ctx %d: %d\n",
  108. wq_ctx, PTR_ERR(wq));
  109. return PTR_ERR(wq);
  110. }
  111. if (res->a0 == QCOM_SCM_WAITQ_SLEEP) {
  112. wait_for_completion(wq);
  113. fill_wq_resume_args(smc, smc_call_ctx);
  114. continue;
  115. } else {
  116. fill_wq_wake_ack_args(smc, smc_call_ctx);
  117. scm_waitq_flag_handler(wq, flags);
  118. continue;
  119. }
  120. } else if ((long)res->a0 < 0) {
  121. /* Error, return to caller with original SMC call */
  122. *smc = original;
  123. break;
  124. } else
  125. return 0;
  126. } while (IS_WAITQ_SLEEP_OR_WAKE(res));
  127. return 0;
  128. }
  129. static int __scm_smc_do(struct device *dev, struct arm_smccc_args *smc,
  130. struct arm_smccc_res *res,
  131. enum qcom_scm_call_type call_type,
  132. bool multicall_allowed)
  133. {
  134. int ret, retry_count = 0;
  135. bool multi_smc_call = qcom_scm_multi_call_allow(dev, multicall_allowed);
  136. if (call_type == QCOM_SCM_CALL_ATOMIC) {
  137. __scm_smc_do_quirk(smc, res);
  138. return 0;
  139. }
  140. do {
  141. if (!multi_smc_call)
  142. mutex_lock(&qcom_scm_lock);
  143. down(&qcom_scm_sem_lock);
  144. ret = scm_smc_do_quirk(dev, smc, res);
  145. up(&qcom_scm_sem_lock);
  146. if (!multi_smc_call)
  147. mutex_unlock(&qcom_scm_lock);
  148. if (ret)
  149. return ret;
  150. if (res->a0 == QCOM_SCM_V2_EBUSY) {
  151. if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY ||
  152. (call_type == QCOM_SCM_CALL_NORETRY))
  153. break;
  154. msleep(QCOM_SCM_EBUSY_WAIT_MS);
  155. }
  156. } while (res->a0 == QCOM_SCM_V2_EBUSY);
  157. return 0;
  158. }
  159. int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
  160. enum qcom_scm_convention qcom_convention,
  161. struct qcom_scm_res *res, enum qcom_scm_call_type call_type)
  162. {
  163. int arglen = desc->arginfo & 0xf;
  164. int i, ret;
  165. struct qtee_shm shm = {0};
  166. bool use_qtee_shmbridge;
  167. size_t alloc_len;
  168. const bool atomic = (call_type == QCOM_SCM_CALL_ATOMIC);
  169. gfp_t flag = atomic ? GFP_ATOMIC : GFP_NOIO;
  170. u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
  171. u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
  172. ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
  173. struct arm_smccc_res smc_res;
  174. struct arm_smccc_args smc = {0};
  175. smc.args[0] = ARM_SMCCC_CALL_VAL(
  176. smccc_call_type,
  177. qcom_smccc_convention,
  178. desc->owner,
  179. SCM_SMC_FNID(desc->svc, desc->cmd));
  180. smc.args[1] = desc->arginfo;
  181. for (i = 0; i < SCM_SMC_N_REG_ARGS; i++)
  182. smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
  183. if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
  184. if (!dev)
  185. return -EPROBE_DEFER;
  186. alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64);
  187. use_qtee_shmbridge = qtee_shmbridge_is_enabled();
  188. if (use_qtee_shmbridge) {
  189. ret = qtee_shmbridge_allocate_shm(alloc_len, &shm);
  190. if (ret)
  191. return ret;
  192. } else {
  193. shm.vaddr = kzalloc(PAGE_ALIGN(alloc_len), flag);
  194. if (!shm.vaddr)
  195. return -ENOMEM;
  196. }
  197. if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
  198. __le32 *args = shm.vaddr;
  199. for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
  200. args[i] = cpu_to_le32(desc->args[i +
  201. SCM_SMC_FIRST_EXT_IDX]);
  202. } else {
  203. __le64 *args = shm.vaddr;
  204. for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
  205. args[i] = cpu_to_le64(desc->args[i +
  206. SCM_SMC_FIRST_EXT_IDX]);
  207. }
  208. shm.paddr = dma_map_single(dev, shm.vaddr, alloc_len,
  209. DMA_TO_DEVICE);
  210. if (dma_mapping_error(dev, shm.paddr)) {
  211. if (use_qtee_shmbridge)
  212. qtee_shmbridge_free_shm(&shm);
  213. else
  214. kfree(shm.vaddr);
  215. return -ENOMEM;
  216. }
  217. smc.args[SCM_SMC_LAST_REG_IDX] = shm.paddr;
  218. }
  219. ret = __scm_smc_do(dev, &smc, &smc_res, call_type, desc->multicall_allowed);
  220. /* ret error check follows shm cleanup */
  221. if (shm.vaddr) {
  222. dma_unmap_single(dev, shm.paddr, alloc_len, DMA_TO_DEVICE);
  223. if (use_qtee_shmbridge)
  224. qtee_shmbridge_free_shm(&shm);
  225. else
  226. kfree(shm.vaddr);
  227. }
  228. if (ret)
  229. return ret;
  230. if (res) {
  231. res->result[0] = smc_res.a1;
  232. res->result[1] = smc_res.a2;
  233. res->result[2] = smc_res.a3;
  234. }
  235. ret = (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
  236. return ret;
  237. }
  238. void __qcom_scm_init(void)
  239. {
  240. int ret;
  241. /**
  242. * The HAB connection should be opened before first SMC call.
  243. * If not, there could be errors that might cause the
  244. * system to crash.
  245. */
  246. ret = scm_qcpe_hab_open();
  247. if (ret != -EOPNOTSUPP) {
  248. hab_calling_convention = true;
  249. pr_debug("using HAB channel communication ret = %d\n", ret);
  250. }
  251. }
  252. void __qcom_scm_qcpe_exit(void)
  253. {
  254. scm_qcpe_hab_close();
  255. }