smcinvoke.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "smcinvoke: %s: " fmt, __func__
  6. #include <linux/module.h>
  7. #include <linux/mod_devicetable.h>
  8. #include <linux/device.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/slab.h>
  11. #include <linux/file.h>
  12. #include <linux/fs.h>
  13. #include <linux/anon_inodes.h>
  14. #include <linux/hashtable.h>
  15. #include <linux/cdev.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/dma-buf.h>
  18. #include <linux/delay.h>
  19. #include <linux/kref.h>
  20. #include <linux/signal.h>
  21. #include <linux/msm_ion.h>
  22. #include <linux/mem-buf.h>
  23. #include <linux/of_platform.h>
  24. #include <linux/firmware.h>
  25. #include <linux/qcom_scm.h>
  26. #include <asm/cacheflush.h>
  27. #include <soc/qcom/qseecomi.h>
  28. #include <linux/qtee_shmbridge.h>
  29. #include "smcinvoke.h"
  30. #include "smcinvoke_object.h"
  31. #include "misc/qseecom_kernel.h"
  32. #define CREATE_TRACE_POINTS
  33. #include "trace_smcinvoke.h"
  34. #define SMCINVOKE_DEV "smcinvoke"
  35. #define SMCINVOKE_TZ_ROOT_OBJ 1
  36. #define SMCINVOKE_TZ_OBJ_NULL 0
  37. #define SMCINVOKE_TZ_MIN_BUF_SIZE 4096
  38. #define SMCINVOKE_ARGS_ALIGN_SIZE (sizeof(uint64_t))
  39. #define SMCINVOKE_NEXT_AVAILABLE_TXN 0
  40. #define SMCINVOKE_REQ_PLACED 1
  41. #define SMCINVOKE_REQ_PROCESSING 2
  42. #define SMCINVOKE_REQ_PROCESSED 3
  43. #define SMCINVOKE_INCREMENT 1
  44. #define SMCINVOKE_DECREMENT 0
  45. #define SMCINVOKE_OBJ_TYPE_TZ_OBJ 0
  46. #define SMCINVOKE_OBJ_TYPE_SERVER 1
  47. #define SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL 2
  48. #define SMCINVOKE_MEM_MAP_OBJ 0
  49. #define SMCINVOKE_MEM_RGN_OBJ 1
  50. #define SMCINVOKE_MEM_PERM_RW 6
  51. #define SMCINVOKE_SCM_EBUSY_WAIT_MS 30
  52. #define SMCINVOKE_SCM_EBUSY_MAX_RETRY 67
  53. /* TZ defined values - Start */
  54. #define SMCINVOKE_INVOKE_PARAM_ID 0x224
  55. #define SMCINVOKE_CB_RSP_PARAM_ID 0x22
  56. #define SMCINVOKE_INVOKE_CMD_LEGACY 0x32000600
  57. #define SMCINVOKE_INVOKE_CMD 0x32000602
  58. #define SMCINVOKE_CB_RSP_CMD 0x32000601
  59. #define SMCINVOKE_RESULT_INBOUND_REQ_NEEDED 3
  60. /* TZ defined values - End */
  61. /*
  62. * This is the state when server FD has been closed but
  63. * TZ still has refs of CBOBjs served by this server
  64. */
  65. #define SMCINVOKE_SERVER_STATE_DEFUNCT 1
  66. #define CBOBJ_MAX_RETRIES 5
  67. #define FOR_ARGS(ndxvar, counts, section) \
  68. for (ndxvar = OBJECT_COUNTS_INDEX_##section(counts); \
  69. ndxvar < (OBJECT_COUNTS_INDEX_##section(counts) \
  70. + OBJECT_COUNTS_NUM_##section(counts)); \
  71. ++ndxvar)
  72. #define TZCB_BUF_OFFSET(tzcb_req) (sizeof(tzcb_req->result) + \
  73. sizeof(struct smcinvoke_msg_hdr) + \
  74. sizeof(union smcinvoke_tz_args) * \
  75. OBJECT_COUNTS_TOTAL(tzcb_req->hdr.counts))
  76. /*
  77. * +ve uhandle : either remote obj or mem obj, decided by f_ops
  78. * -ve uhandle : either Obj NULL or CBObj
  79. * - -1: OBJ NULL
  80. * - < -1: CBObj
  81. */
  82. #define UHANDLE_IS_FD(h) ((h) >= 0)
  83. #define UHANDLE_IS_NULL(h) ((h) == SMCINVOKE_USERSPACE_OBJ_NULL)
  84. #define UHANDLE_IS_CB_OBJ(h) (h < SMCINVOKE_USERSPACE_OBJ_NULL)
  85. #define UHANDLE_NULL (SMCINVOKE_USERSPACE_OBJ_NULL)
  86. /*
  87. * MAKE => create handle for other domain i.e. TZ or userspace
  88. * GET => retrieve obj from incoming handle
  89. */
  90. #define UHANDLE_GET_CB_OBJ(h) (-2-(h))
  91. #define UHANDLE_MAKE_CB_OBJ(o) (-2-(o))
  92. #define UHANDLE_GET_FD(h) (h)
  93. /*
  94. * +ve tzhandle : remote object i.e. owned by TZ
  95. * -ve tzhandle : local object i.e. owned by linux
  96. * --------------------------------------------------
  97. *| 1 (1 bit) | Obj Id (15 bits) | srvr id (16 bits) |
  98. * ---------------------------------------------------
  99. * Server ids are defined below for various local objects
  100. * server id 0 : Kernel Obj
  101. * server id 1 : Memory region Obj
  102. * server id 2 : Memory map Obj
  103. * server id 3-15: Reserverd
  104. * server id 16 & up: Callback Objs
  105. */
  106. #define KRNL_SRVR_ID 0
  107. #define MEM_RGN_SRVR_ID 1
  108. #define MEM_MAP_SRVR_ID 2
  109. #define CBOBJ_SERVER_ID_START 0x10
  110. #define CBOBJ_SERVER_ID_END ((1<<16) - 1)
  111. /* local obj id is represented by 15 bits */
  112. #define MAX_LOCAL_OBJ_ID ((1<<15) - 1)
  113. /* CBOBJs will be served by server id 0x10 onwards */
  114. #define TZHANDLE_GET_SERVER(h) ((uint16_t)((h) & 0xFFFF))
  115. #define TZHANDLE_GET_OBJID(h) (((h) >> 16) & 0x7FFF)
  116. #define TZHANDLE_MAKE_LOCAL(s, o) (((0x8000 | (o)) << 16) | s)
  117. #define TZHANDLE_IS_NULL(h) ((h) == SMCINVOKE_TZ_OBJ_NULL)
  118. #define TZHANDLE_IS_LOCAL(h) ((h) & 0x80000000)
  119. #define TZHANDLE_IS_REMOTE(h) (!TZHANDLE_IS_NULL(h) && !TZHANDLE_IS_LOCAL(h))
  120. #define TZHANDLE_IS_KERNEL_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
  121. TZHANDLE_GET_SERVER(h) == KRNL_SRVR_ID)
  122. #define TZHANDLE_IS_MEM_RGN_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
  123. TZHANDLE_GET_SERVER(h) == MEM_RGN_SRVR_ID)
  124. #define TZHANDLE_IS_MEM_MAP_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
  125. TZHANDLE_GET_SERVER(h) == MEM_MAP_SRVR_ID)
  126. #define TZHANDLE_IS_MEM_OBJ(h) (TZHANDLE_IS_MEM_RGN_OBJ(h) || \
  127. TZHANDLE_IS_MEM_MAP_OBJ(h))
  128. #define TZHANDLE_IS_CB_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
  129. TZHANDLE_GET_SERVER(h) >= CBOBJ_SERVER_ID_START)
  130. #define FILE_IS_REMOTE_OBJ(f) ((f)->f_op && (f)->f_op == &g_smcinvoke_fops)
  131. static DEFINE_MUTEX(g_smcinvoke_lock);
  132. #define NO_LOCK 0
  133. #define TAKE_LOCK 1
  134. #define MUTEX_LOCK(x) { if (x) mutex_lock(&g_smcinvoke_lock); }
  135. #define MUTEX_UNLOCK(x) { if (x) mutex_unlock(&g_smcinvoke_lock); }
  136. static DEFINE_HASHTABLE(g_cb_servers, 8);
  137. static LIST_HEAD(g_mem_objs);
  138. static uint16_t g_last_cb_server_id = CBOBJ_SERVER_ID_START;
  139. static uint16_t g_last_mem_rgn_id, g_last_mem_map_obj_id;
  140. static size_t g_max_cb_buf_size = SMCINVOKE_TZ_MIN_BUF_SIZE;
  141. static unsigned int cb_reqs_inflight;
  142. static bool legacy_smc_call;
  143. static int invoke_cmd;
  144. static long smcinvoke_ioctl(struct file *, unsigned int, unsigned long);
  145. static int smcinvoke_open(struct inode *, struct file *);
  146. static int smcinvoke_release(struct inode *, struct file *);
  147. static int release_cb_server(uint16_t);
  148. static const struct file_operations g_smcinvoke_fops = {
  149. .owner = THIS_MODULE,
  150. .unlocked_ioctl = smcinvoke_ioctl,
  151. .compat_ioctl = smcinvoke_ioctl,
  152. .open = smcinvoke_open,
  153. .release = smcinvoke_release,
  154. };
  155. static dev_t smcinvoke_device_no;
  156. static struct cdev smcinvoke_cdev;
  157. static struct class *driver_class;
  158. static struct device *class_dev;
  159. static struct platform_device *smcinvoke_pdev;
  160. struct smcinvoke_buf_hdr {
  161. uint32_t offset;
  162. uint32_t size;
  163. };
  164. union smcinvoke_tz_args {
  165. struct smcinvoke_buf_hdr b;
  166. int32_t handle;
  167. };
  168. struct smcinvoke_msg_hdr {
  169. uint32_t tzhandle;
  170. uint32_t op;
  171. uint32_t counts;
  172. };
  173. /* Inbound reqs from TZ */
  174. struct smcinvoke_tzcb_req {
  175. int32_t result;
  176. struct smcinvoke_msg_hdr hdr;
  177. union smcinvoke_tz_args args[0];
  178. };
  179. struct smcinvoke_file_data {
  180. uint32_t context_type;
  181. union {
  182. uint32_t tzhandle;
  183. uint16_t server_id;
  184. };
  185. };
  186. struct smcinvoke_piggyback_msg {
  187. uint32_t version;
  188. uint32_t op;
  189. uint32_t counts;
  190. int32_t objs[0];
  191. };
  192. /* Data structure to hold request coming from TZ */
  193. struct smcinvoke_cb_txn {
  194. uint32_t txn_id;
  195. int32_t state;
  196. struct smcinvoke_tzcb_req *cb_req;
  197. size_t cb_req_bytes;
  198. struct file **filp_to_release;
  199. struct hlist_node hash;
  200. struct kref ref_cnt;
  201. };
  202. struct smcinvoke_server_info {
  203. uint16_t server_id;
  204. uint16_t state;
  205. uint32_t txn_id;
  206. struct kref ref_cnt;
  207. wait_queue_head_t req_wait_q;
  208. wait_queue_head_t rsp_wait_q;
  209. size_t cb_buf_size;
  210. DECLARE_HASHTABLE(reqs_table, 4);
  211. DECLARE_HASHTABLE(responses_table, 4);
  212. struct hlist_node hash;
  213. struct list_head pending_cbobjs;
  214. };
  215. struct smcinvoke_cbobj {
  216. uint16_t cbobj_id;
  217. struct kref ref_cnt;
  218. struct smcinvoke_server_info *server;
  219. struct list_head list;
  220. };
  221. /*
  222. * We require couple of objects, one for mem region & another
  223. * for mapped mem_obj once mem region has been mapped. It is
  224. * possible that TZ can release either independent of other.
  225. */
  226. struct smcinvoke_mem_obj {
  227. /* these ids are objid part of tzhandle */
  228. uint16_t mem_region_id;
  229. uint16_t mem_map_obj_id;
  230. struct dma_buf *dma_buf;
  231. struct dma_buf_attachment *buf_attach;
  232. struct sg_table *sgt;
  233. struct kref mem_regn_ref_cnt;
  234. struct kref mem_map_obj_ref_cnt;
  235. uint64_t p_addr;
  236. size_t p_addr_len;
  237. struct list_head list;
  238. bool bridge_created_by_others;
  239. uint64_t shmbridge_handle;
  240. };
  241. static void destroy_cb_server(struct kref *kref)
  242. {
  243. struct smcinvoke_server_info *server = container_of(kref,
  244. struct smcinvoke_server_info, ref_cnt);
  245. if (server) {
  246. hash_del(&server->hash);
  247. kfree(server);
  248. }
  249. }
  250. /*
  251. * A separate find func is reqd mainly for couple of cases:
  252. * next_cb_server_id_locked which checks if server id had been utilized or not.
  253. * - It would be overhead if we do ref_cnt for this case
  254. * smcinvoke_release: which is called when server is closed from userspace.
  255. * - During server creation we init ref count, now put it back
  256. */
  257. static struct smcinvoke_server_info *find_cb_server_locked(uint16_t server_id)
  258. {
  259. struct smcinvoke_server_info *data = NULL;
  260. hash_for_each_possible(g_cb_servers, data, hash, server_id) {
  261. if (data->server_id == server_id)
  262. return data;
  263. }
  264. return NULL;
  265. }
  266. static struct smcinvoke_server_info *get_cb_server_locked(uint16_t server_id)
  267. {
  268. struct smcinvoke_server_info *server = find_cb_server_locked(server_id);
  269. if (server)
  270. kref_get(&server->ref_cnt);
  271. return server;
  272. }
  273. static uint16_t next_cb_server_id_locked(void)
  274. {
  275. if (g_last_cb_server_id == CBOBJ_SERVER_ID_END)
  276. g_last_cb_server_id = CBOBJ_SERVER_ID_START;
  277. while (find_cb_server_locked(++g_last_cb_server_id))
  278. ;
  279. return g_last_cb_server_id;
  280. }
  281. static inline void release_filp(struct file **filp_to_release, size_t arr_len)
  282. {
  283. size_t i = 0;
  284. for (i = 0; i < arr_len; i++) {
  285. if (filp_to_release[i]) {
  286. fput(filp_to_release[i]);
  287. filp_to_release[i] = NULL;
  288. }
  289. }
  290. }
  291. static struct smcinvoke_mem_obj *find_mem_obj_locked(uint16_t mem_obj_id,
  292. bool is_mem_rgn_obj)
  293. {
  294. struct smcinvoke_mem_obj *mem_obj = NULL;
  295. if (list_empty(&g_mem_objs))
  296. return NULL;
  297. list_for_each_entry(mem_obj, &g_mem_objs, list) {
  298. if ((is_mem_rgn_obj &&
  299. (mem_obj->mem_region_id == mem_obj_id)) ||
  300. (!is_mem_rgn_obj &&
  301. (mem_obj->mem_map_obj_id == mem_obj_id)))
  302. return mem_obj;
  303. }
  304. return NULL;
  305. }
  306. static uint32_t next_mem_region_obj_id_locked(void)
  307. {
  308. if (g_last_mem_rgn_id == MAX_LOCAL_OBJ_ID)
  309. g_last_mem_rgn_id = 0;
  310. while (find_mem_obj_locked(++g_last_mem_rgn_id, SMCINVOKE_MEM_RGN_OBJ))
  311. ;
  312. return g_last_mem_rgn_id;
  313. }
  314. static uint32_t next_mem_map_obj_id_locked(void)
  315. {
  316. if (g_last_mem_map_obj_id == MAX_LOCAL_OBJ_ID)
  317. g_last_mem_map_obj_id = 0;
  318. while (find_mem_obj_locked(++g_last_mem_map_obj_id,
  319. SMCINVOKE_MEM_MAP_OBJ))
  320. ;
  321. return g_last_mem_map_obj_id;
  322. }
  323. static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj)
  324. {
  325. list_del(&mem_obj->list);
  326. dma_buf_put(mem_obj->dma_buf);
  327. if (!mem_obj->bridge_created_by_others)
  328. qtee_shmbridge_deregister(mem_obj->shmbridge_handle);
  329. kfree(mem_obj);
  330. }
  331. static void del_mem_regn_obj_locked(struct kref *kref)
  332. {
  333. struct smcinvoke_mem_obj *mem_obj = container_of(kref,
  334. struct smcinvoke_mem_obj, mem_regn_ref_cnt);
  335. /*
  336. * mem_regn obj and mem_map obj are held into mem_obj structure which
  337. * can't be released until both kinds of objs have been released.
  338. * So check whether mem_map iobj has ref 0 and only then release mem_obj
  339. */
  340. if (kref_read(&mem_obj->mem_map_obj_ref_cnt) == 0)
  341. free_mem_obj_locked(mem_obj);
  342. }
  343. static void del_mem_map_obj_locked(struct kref *kref)
  344. {
  345. struct smcinvoke_mem_obj *mem_obj = container_of(kref,
  346. struct smcinvoke_mem_obj, mem_map_obj_ref_cnt);
  347. mem_obj->p_addr_len = 0;
  348. mem_obj->p_addr = 0;
  349. if (mem_obj->sgt)
  350. dma_buf_unmap_attachment(mem_obj->buf_attach,
  351. mem_obj->sgt, DMA_BIDIRECTIONAL);
  352. if (mem_obj->buf_attach)
  353. dma_buf_detach(mem_obj->dma_buf, mem_obj->buf_attach);
  354. /*
  355. * mem_regn obj and mem_map obj are held into mem_obj structure which
  356. * can't be released until both kinds of objs have been released.
  357. * So check if mem_regn obj has ref 0 and only then release mem_obj
  358. */
  359. if (kref_read(&mem_obj->mem_regn_ref_cnt) == 0)
  360. free_mem_obj_locked(mem_obj);
  361. }
  362. static int release_mem_obj_locked(int32_t tzhandle)
  363. {
  364. int is_mem_regn_obj = TZHANDLE_IS_MEM_RGN_OBJ(tzhandle);
  365. struct smcinvoke_mem_obj *mem_obj = find_mem_obj_locked(
  366. TZHANDLE_GET_OBJID(tzhandle), is_mem_regn_obj);
  367. if (!mem_obj) {
  368. pr_err("memory object not found\n");
  369. return OBJECT_ERROR_BADOBJ;
  370. }
  371. if (is_mem_regn_obj)
  372. kref_put(&mem_obj->mem_regn_ref_cnt, del_mem_regn_obj_locked);
  373. else
  374. kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
  375. return OBJECT_OK;
  376. }
  377. static void free_pending_cbobj_locked(struct kref *kref)
  378. {
  379. struct smcinvoke_server_info *server = NULL;
  380. struct smcinvoke_cbobj *obj = container_of(kref,
  381. struct smcinvoke_cbobj, ref_cnt);
  382. list_del(&obj->list);
  383. server = obj->server;
  384. kfree(obj);
  385. if (server)
  386. kref_put(&server->ref_cnt, destroy_cb_server);
  387. }
  388. static int get_pending_cbobj_locked(uint16_t srvr_id, int16_t obj_id)
  389. {
  390. int ret = 0;
  391. bool release_server = true;
  392. struct list_head *head = NULL;
  393. struct smcinvoke_cbobj *cbobj = NULL;
  394. struct smcinvoke_cbobj *obj = NULL;
  395. struct smcinvoke_server_info *server = get_cb_server_locked(srvr_id);
  396. if (!server) {
  397. pr_err("%s, server id : %u not found\n", __func__, srvr_id);
  398. return OBJECT_ERROR_BADOBJ;
  399. }
  400. head = &server->pending_cbobjs;
  401. list_for_each_entry(cbobj, head, list)
  402. if (cbobj->cbobj_id == obj_id) {
  403. kref_get(&cbobj->ref_cnt);
  404. goto out;
  405. }
  406. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  407. if (!obj) {
  408. ret = OBJECT_ERROR_KMEM;
  409. goto out;
  410. }
  411. obj->cbobj_id = obj_id;
  412. kref_init(&obj->ref_cnt);
  413. obj->server = server;
  414. /*
  415. * we are holding server ref in cbobj; we will
  416. * release server ref when cbobj is destroyed
  417. */
  418. release_server = false;
  419. list_add_tail(&obj->list, head);
  420. out:
  421. if (release_server)
  422. kref_put(&server->ref_cnt, destroy_cb_server);
  423. return ret;
  424. }
  425. static int put_pending_cbobj_locked(uint16_t srvr_id, int16_t obj_id)
  426. {
  427. int ret = -EINVAL;
  428. struct smcinvoke_server_info *srvr_info =
  429. get_cb_server_locked(srvr_id);
  430. struct list_head *head = NULL;
  431. struct smcinvoke_cbobj *cbobj = NULL;
  432. if (!srvr_info) {
  433. pr_err("%s, server id : %u not found\n", __func__, srvr_id);
  434. return ret;
  435. }
  436. trace_put_pending_cbobj_locked(srvr_id, obj_id);
  437. head = &srvr_info->pending_cbobjs;
  438. list_for_each_entry(cbobj, head, list)
  439. if (cbobj->cbobj_id == obj_id) {
  440. kref_put(&cbobj->ref_cnt, free_pending_cbobj_locked);
  441. ret = 0;
  442. break;
  443. }
  444. kref_put(&srvr_info->ref_cnt, destroy_cb_server);
  445. return ret;
  446. }
  447. static int release_tzhandle_locked(int32_t tzhandle)
  448. {
  449. if (TZHANDLE_IS_MEM_OBJ(tzhandle))
  450. return release_mem_obj_locked(tzhandle);
  451. else if (TZHANDLE_IS_CB_OBJ(tzhandle))
  452. return put_pending_cbobj_locked(TZHANDLE_GET_SERVER(tzhandle),
  453. TZHANDLE_GET_OBJID(tzhandle));
  454. return OBJECT_ERROR;
  455. }
  456. static void release_tzhandles(const int32_t *tzhandles, size_t len)
  457. {
  458. size_t i;
  459. mutex_lock(&g_smcinvoke_lock);
  460. for (i = 0; i < len; i++)
  461. release_tzhandle_locked(tzhandles[i]);
  462. mutex_unlock(&g_smcinvoke_lock);
  463. }
  464. static void delete_cb_txn(struct kref *kref)
  465. {
  466. struct smcinvoke_cb_txn *cb_txn = container_of(kref,
  467. struct smcinvoke_cb_txn, ref_cnt);
  468. if (OBJECT_OP_METHODID(cb_txn->cb_req->hdr.op) == OBJECT_OP_RELEASE)
  469. release_tzhandle_locked(cb_txn->cb_req->hdr.tzhandle);
  470. kfree(cb_txn->cb_req);
  471. hash_del(&cb_txn->hash);
  472. kfree(cb_txn);
  473. }
  474. static struct smcinvoke_cb_txn *find_cbtxn_locked(
  475. struct smcinvoke_server_info *server,
  476. uint32_t txn_id, int32_t state)
  477. {
  478. int i = 0;
  479. struct smcinvoke_cb_txn *cb_txn = NULL;
  480. /*
  481. * Since HASH_BITS() does not work on pointers, we can't select hash
  482. * table using state and loop over it.
  483. */
  484. if (state == SMCINVOKE_REQ_PLACED) {
  485. /* pick up 1st req */
  486. hash_for_each(server->reqs_table, i, cb_txn, hash) {
  487. kref_get(&cb_txn->ref_cnt);
  488. hash_del(&cb_txn->hash);
  489. return cb_txn;
  490. }
  491. } else if (state == SMCINVOKE_REQ_PROCESSING) {
  492. hash_for_each_possible(
  493. server->responses_table, cb_txn, hash, txn_id) {
  494. if (cb_txn->txn_id == txn_id) {
  495. kref_get(&cb_txn->ref_cnt);
  496. hash_del(&cb_txn->hash);
  497. return cb_txn;
  498. }
  499. }
  500. }
  501. return NULL;
  502. }
  503. /*
  504. * size_add saturates at SIZE_MAX. If integer overflow is detected,
  505. * this function would return SIZE_MAX otherwise normal a+b is returned.
  506. */
  507. static inline size_t size_add(size_t a, size_t b)
  508. {
  509. return (b > (SIZE_MAX - a)) ? SIZE_MAX : a + b;
  510. }
  511. /*
  512. * pad_size is used along with size_align to define a buffer overflow
  513. * protected version of ALIGN
  514. */
  515. static inline size_t pad_size(size_t a, size_t b)
  516. {
  517. return (~a + 1) % b;
  518. }
  519. /*
  520. * size_align saturates at SIZE_MAX. If integer overflow is detected, this
  521. * function would return SIZE_MAX otherwise next aligned size is returned.
  522. */
  523. static inline size_t size_align(size_t a, size_t b)
  524. {
  525. return size_add(a, pad_size(a, b));
  526. }
  527. static uint16_t get_server_id(int cb_server_fd)
  528. {
  529. uint16_t server_id = 0;
  530. struct smcinvoke_file_data *svr_cxt = NULL;
  531. struct file *tmp_filp = fget(cb_server_fd);
  532. if (!tmp_filp)
  533. return server_id;
  534. svr_cxt = tmp_filp->private_data;
  535. if (svr_cxt && svr_cxt->context_type == SMCINVOKE_OBJ_TYPE_SERVER)
  536. server_id = svr_cxt->server_id;
  537. if (tmp_filp)
  538. fput(tmp_filp);
  539. return server_id;
  540. }
  541. static bool is_dma_fd(int32_t uhandle, struct dma_buf **dma_buf)
  542. {
  543. *dma_buf = dma_buf_get(uhandle);
  544. return IS_ERR_OR_NULL(*dma_buf) ? false : true;
  545. }
  546. static bool is_remote_obj(int32_t uhandle, struct smcinvoke_file_data **tzobj,
  547. struct file **filp)
  548. {
  549. bool ret = false;
  550. struct file *tmp_filp = fget(uhandle);
  551. if (!tmp_filp)
  552. return ret;
  553. if (FILE_IS_REMOTE_OBJ(tmp_filp)) {
  554. *tzobj = tmp_filp->private_data;
  555. if ((*tzobj)->context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
  556. *filp = tmp_filp;
  557. tmp_filp = NULL;
  558. ret = true;
  559. }
  560. }
  561. if (tmp_filp)
  562. fput(tmp_filp);
  563. return ret;
  564. }
  565. static int create_mem_obj(struct dma_buf *dma_buf, int32_t *mem_obj)
  566. {
  567. struct smcinvoke_mem_obj *t_mem_obj =
  568. kzalloc(sizeof(*t_mem_obj), GFP_KERNEL);
  569. if (!t_mem_obj) {
  570. dma_buf_put(dma_buf);
  571. return -ENOMEM;
  572. }
  573. kref_init(&t_mem_obj->mem_regn_ref_cnt);
  574. t_mem_obj->dma_buf = dma_buf;
  575. mutex_lock(&g_smcinvoke_lock);
  576. t_mem_obj->mem_region_id = next_mem_region_obj_id_locked();
  577. list_add_tail(&t_mem_obj->list, &g_mem_objs);
  578. mutex_unlock(&g_smcinvoke_lock);
  579. *mem_obj = TZHANDLE_MAKE_LOCAL(MEM_RGN_SRVR_ID,
  580. t_mem_obj->mem_region_id);
  581. return 0;
  582. }
  583. /*
  584. * This function retrieves file pointer corresponding to FD provided. It stores
  585. * retrieved file pointer until IOCTL call is concluded. Once call is completed,
  586. * all stored file pointers are released. file pointers are stored to prevent
  587. * other threads from releasing that FD while IOCTL is in progress.
  588. */
  589. static int get_tzhandle_from_uhandle(int32_t uhandle, int32_t server_fd,
  590. struct file **filp, uint32_t *tzhandle)
  591. {
  592. int ret = -EBADF;
  593. uint16_t server_id = 0;
  594. if (UHANDLE_IS_NULL(uhandle)) {
  595. *tzhandle = SMCINVOKE_TZ_OBJ_NULL;
  596. ret = 0;
  597. } else if (UHANDLE_IS_CB_OBJ(uhandle)) {
  598. server_id = get_server_id(server_fd);
  599. if (server_id < CBOBJ_SERVER_ID_START)
  600. goto out;
  601. mutex_lock(&g_smcinvoke_lock);
  602. ret = get_pending_cbobj_locked(server_id,
  603. UHANDLE_GET_CB_OBJ(uhandle));
  604. mutex_unlock(&g_smcinvoke_lock);
  605. if (ret)
  606. goto out;
  607. *tzhandle = TZHANDLE_MAKE_LOCAL(server_id,
  608. UHANDLE_GET_CB_OBJ(uhandle));
  609. ret = 0;
  610. } else if (UHANDLE_IS_FD(uhandle)) {
  611. struct dma_buf *dma_buf = NULL;
  612. struct smcinvoke_file_data *tzobj = NULL;
  613. if (is_dma_fd(UHANDLE_GET_FD(uhandle), &dma_buf)) {
  614. ret = create_mem_obj(dma_buf, tzhandle);
  615. } else if (is_remote_obj(UHANDLE_GET_FD(uhandle),
  616. &tzobj, filp)) {
  617. *tzhandle = tzobj->tzhandle;
  618. ret = 0;
  619. }
  620. }
  621. out:
  622. return ret;
  623. }
  624. static int get_fd_for_obj(uint32_t obj_type, uint32_t obj, int32_t *fd)
  625. {
  626. int unused_fd = -1, ret = -EINVAL;
  627. struct file *f = NULL;
  628. struct smcinvoke_file_data *cxt = NULL;
  629. cxt = kzalloc(sizeof(*cxt), GFP_KERNEL);
  630. if (!cxt) {
  631. ret = -ENOMEM;
  632. goto out;
  633. }
  634. if (obj_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ ||
  635. obj_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
  636. cxt->context_type = obj_type;
  637. cxt->tzhandle = obj;
  638. } else if (obj_type == SMCINVOKE_OBJ_TYPE_SERVER) {
  639. cxt->context_type = SMCINVOKE_OBJ_TYPE_SERVER;
  640. cxt->server_id = obj;
  641. } else {
  642. goto out;
  643. }
  644. unused_fd = get_unused_fd_flags(O_RDWR);
  645. if (unused_fd < 0)
  646. goto out;
  647. if (fd == NULL)
  648. goto out;
  649. f = anon_inode_getfile(SMCINVOKE_DEV, &g_smcinvoke_fops, cxt, O_RDWR);
  650. if (IS_ERR(f))
  651. goto out;
  652. *fd = unused_fd;
  653. fd_install(*fd, f);
  654. return 0;
  655. out:
  656. if (unused_fd >= 0)
  657. put_unused_fd(unused_fd);
  658. kfree(cxt);
  659. return ret;
  660. }
  661. static int get_uhandle_from_tzhandle(int32_t tzhandle, int32_t srvr_id,
  662. int32_t *uhandle, bool lock, uint32_t context_type)
  663. {
  664. int ret = -1;
  665. if (TZHANDLE_IS_NULL(tzhandle)) {
  666. *uhandle = UHANDLE_NULL;
  667. ret = 0;
  668. } else if (TZHANDLE_IS_CB_OBJ(tzhandle)) {
  669. if (srvr_id != TZHANDLE_GET_SERVER(tzhandle))
  670. goto out;
  671. *uhandle = UHANDLE_MAKE_CB_OBJ(TZHANDLE_GET_OBJID(tzhandle));
  672. MUTEX_LOCK(lock)
  673. ret = get_pending_cbobj_locked(TZHANDLE_GET_SERVER(tzhandle),
  674. TZHANDLE_GET_OBJID(tzhandle));
  675. MUTEX_UNLOCK(lock)
  676. } else if (TZHANDLE_IS_MEM_RGN_OBJ(tzhandle)) {
  677. struct smcinvoke_mem_obj *mem_obj = NULL;
  678. MUTEX_LOCK(lock)
  679. mem_obj = find_mem_obj_locked(TZHANDLE_GET_OBJID(tzhandle),
  680. SMCINVOKE_MEM_RGN_OBJ);
  681. if (mem_obj != NULL) {
  682. int fd;
  683. fd = dma_buf_fd(mem_obj->dma_buf, O_CLOEXEC);
  684. if (fd < 0)
  685. goto exit_lock;
  686. *uhandle = fd;
  687. ret = 0;
  688. }
  689. exit_lock:
  690. MUTEX_UNLOCK(lock)
  691. } else if (TZHANDLE_IS_REMOTE(tzhandle)) {
  692. /* if execution comes here => tzhandle is an unsigned int */
  693. ret = get_fd_for_obj(context_type,
  694. (uint32_t)tzhandle, uhandle);
  695. }
  696. out:
  697. return ret;
  698. }
  699. static int smcinvoke_create_bridge(struct smcinvoke_mem_obj *mem_obj)
  700. {
  701. int ret = 0;
  702. int tz_perm = PERM_READ|PERM_WRITE;
  703. uint32_t *vmid_list;
  704. uint32_t *perms_list;
  705. uint32_t nelems = 0;
  706. struct dma_buf *dmabuf = mem_obj->dma_buf;
  707. phys_addr_t phys = mem_obj->p_addr;
  708. size_t size = mem_obj->p_addr_len;
  709. if (!qtee_shmbridge_is_enabled())
  710. return 0;
  711. ret = mem_buf_dma_buf_copy_vmperm(dmabuf, (int **)&vmid_list,
  712. (int **)&perms_list, (int *)&nelems);
  713. if (ret) {
  714. pr_err("mem_buf_dma_buf_copy_vmperm failure, err=%d\n", ret);
  715. return ret;
  716. }
  717. if (mem_buf_dma_buf_exclusive_owner(dmabuf))
  718. perms_list[0] = PERM_READ | PERM_WRITE;
  719. ret = qtee_shmbridge_register(phys, size, vmid_list, perms_list, nelems,
  720. tz_perm, &mem_obj->shmbridge_handle);
  721. if (ret && ret != -EEXIST) {
  722. pr_err("creation of shm bridge for mem_region_id %d failed ret %d\n",
  723. mem_obj->mem_region_id, ret);
  724. goto exit;
  725. }
  726. if (ret == -EEXIST) {
  727. mem_obj->bridge_created_by_others = true;
  728. ret = 0;
  729. }
  730. trace_smcinvoke_create_bridge(mem_obj->shmbridge_handle, mem_obj->mem_region_id);
  731. exit:
  732. kfree(perms_list);
  733. kfree(vmid_list);
  734. return ret;
  735. }
  736. static int32_t smcinvoke_release_mem_obj_locked(void *buf, size_t buf_len)
  737. {
  738. struct smcinvoke_tzcb_req *msg = buf;
  739. if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 0, 0, 0)) {
  740. pr_err("Invalid object count in %s\n", __func__);
  741. return OBJECT_ERROR_INVALID;
  742. }
  743. trace_release_mem_obj_locked(msg->hdr.tzhandle, buf_len);
  744. return release_tzhandle_locked(msg->hdr.tzhandle);
  745. }
  746. static int32_t smcinvoke_map_mem_region(void *buf, size_t buf_len)
  747. {
  748. int ret = OBJECT_OK;
  749. struct smcinvoke_tzcb_req *msg = buf;
  750. struct {
  751. uint64_t p_addr;
  752. uint64_t len;
  753. uint32_t perms;
  754. } *ob = NULL;
  755. int32_t *oo = NULL;
  756. struct smcinvoke_mem_obj *mem_obj = NULL;
  757. struct dma_buf_attachment *buf_attach = NULL;
  758. struct sg_table *sgt = NULL;
  759. if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 1, 1, 1) ||
  760. (buf_len - msg->args[0].b.offset < msg->args[0].b.size)) {
  761. pr_err("Invalid counts received for mapping mem obj\n");
  762. return OBJECT_ERROR_INVALID;
  763. }
  764. /* args[0] = BO, args[1] = OI, args[2] = OO */
  765. ob = buf + msg->args[0].b.offset;
  766. oo = &msg->args[2].handle;
  767. mutex_lock(&g_smcinvoke_lock);
  768. mem_obj = find_mem_obj_locked(TZHANDLE_GET_OBJID(msg->args[1].handle),
  769. SMCINVOKE_MEM_RGN_OBJ);
  770. if (!mem_obj) {
  771. mutex_unlock(&g_smcinvoke_lock);
  772. pr_err("Memory object not found\n");
  773. return OBJECT_ERROR_BADOBJ;
  774. }
  775. if (!mem_obj->p_addr) {
  776. kref_init(&mem_obj->mem_map_obj_ref_cnt);
  777. buf_attach = dma_buf_attach(mem_obj->dma_buf,
  778. &smcinvoke_pdev->dev);
  779. if (IS_ERR(buf_attach)) {
  780. ret = OBJECT_ERROR_KMEM;
  781. pr_err("dma buf attach failed, ret: %d\n", ret);
  782. goto out;
  783. }
  784. mem_obj->buf_attach = buf_attach;
  785. sgt = dma_buf_map_attachment(buf_attach, DMA_BIDIRECTIONAL);
  786. if (IS_ERR(sgt)) {
  787. pr_err("mapping dma buffers failed, ret: %d\n",
  788. PTR_ERR(sgt));
  789. ret = OBJECT_ERROR_KMEM;
  790. goto out;
  791. }
  792. mem_obj->sgt = sgt;
  793. /* contiguous only => nents=1 */
  794. if (sgt->nents != 1) {
  795. ret = OBJECT_ERROR_INVALID;
  796. pr_err("sg enries are not contigous, ret: %d\n", ret);
  797. goto out;
  798. }
  799. mem_obj->p_addr = sg_dma_address(sgt->sgl);
  800. mem_obj->p_addr_len = sgt->sgl->length;
  801. if (!mem_obj->p_addr) {
  802. ret = OBJECT_ERROR_INVALID;
  803. pr_err("invalid physical address, ret: %d\n", ret);
  804. goto out;
  805. }
  806. ret = smcinvoke_create_bridge(mem_obj);
  807. if (ret) {
  808. ret = OBJECT_ERROR_INVALID;
  809. goto out;
  810. }
  811. mem_obj->mem_map_obj_id = next_mem_map_obj_id_locked();
  812. } else {
  813. kref_get(&mem_obj->mem_map_obj_ref_cnt);
  814. }
  815. ob->p_addr = mem_obj->p_addr;
  816. ob->len = mem_obj->p_addr_len;
  817. ob->perms = SMCINVOKE_MEM_PERM_RW;
  818. *oo = TZHANDLE_MAKE_LOCAL(MEM_MAP_SRVR_ID, mem_obj->mem_map_obj_id);
  819. out:
  820. if (ret != OBJECT_OK)
  821. kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
  822. mutex_unlock(&g_smcinvoke_lock);
  823. return ret;
  824. }
  825. static void process_kernel_obj(void *buf, size_t buf_len)
  826. {
  827. struct smcinvoke_tzcb_req *cb_req = buf;
  828. switch (cb_req->hdr.op) {
  829. case OBJECT_OP_MAP_REGION:
  830. cb_req->result = smcinvoke_map_mem_region(buf, buf_len);
  831. break;
  832. case OBJECT_OP_YIELD:
  833. cb_req->result = OBJECT_OK;
  834. break;
  835. default:
  836. pr_err(" invalid operation for tz kernel object\n");
  837. cb_req->result = OBJECT_ERROR_INVALID;
  838. break;
  839. }
  840. }
  841. static void process_mem_obj(void *buf, size_t buf_len)
  842. {
  843. struct smcinvoke_tzcb_req *cb_req = buf;
  844. mutex_lock(&g_smcinvoke_lock);
  845. cb_req->result = (cb_req->hdr.op == OBJECT_OP_RELEASE) ?
  846. smcinvoke_release_mem_obj_locked(buf, buf_len) :
  847. OBJECT_ERROR_INVALID;
  848. mutex_unlock(&g_smcinvoke_lock);
  849. }
  850. static int invoke_cmd_handler(int cmd, phys_addr_t in_paddr, size_t in_buf_len,
  851. uint8_t *out_buf, phys_addr_t out_paddr,
  852. size_t out_buf_len, int32_t *result, u64 *response_type,
  853. unsigned int *data, struct qtee_shm *in_shm,
  854. struct qtee_shm *out_shm)
  855. {
  856. int ret = 0;
  857. switch (cmd) {
  858. case SMCINVOKE_INVOKE_CMD_LEGACY:
  859. qtee_shmbridge_flush_shm_buf(in_shm);
  860. qtee_shmbridge_flush_shm_buf(out_shm);
  861. ret = qcom_scm_invoke_smc_legacy(in_paddr, in_buf_len, out_paddr, out_buf_len,
  862. result, response_type, data);
  863. qtee_shmbridge_inv_shm_buf(in_shm);
  864. qtee_shmbridge_inv_shm_buf(out_shm);
  865. break;
  866. case SMCINVOKE_INVOKE_CMD:
  867. ret = qcom_scm_invoke_smc(in_paddr, in_buf_len, out_paddr, out_buf_len,
  868. result, response_type, data);
  869. break;
  870. case SMCINVOKE_CB_RSP_CMD:
  871. ret = qcom_scm_invoke_callback_response(virt_to_phys(out_buf), out_buf_len,
  872. result, response_type, data);
  873. break;
  874. default:
  875. ret = -EINVAL;
  876. break;
  877. }
  878. trace_invoke_cmd_handler(cmd, *response_type, *result, ret);
  879. return ret;
  880. }
  881. /*
  882. * Buf should be aligned to struct smcinvoke_tzcb_req
  883. */
  884. static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
  885. {
  886. /* ret is going to TZ. Provide values from OBJECT_ERROR_<> */
  887. int ret = OBJECT_ERROR_DEFUNCT;
  888. int cbobj_retries = 0;
  889. long timeout_jiff;
  890. struct smcinvoke_cb_txn *cb_txn = NULL;
  891. struct smcinvoke_tzcb_req *cb_req = NULL, *tmp_cb_req = NULL;
  892. struct smcinvoke_server_info *srvr_info = NULL;
  893. if (buf_len < sizeof(struct smcinvoke_tzcb_req)) {
  894. pr_err("smaller buffer length : %u\n", buf_len);
  895. return;
  896. }
  897. cb_req = buf;
  898. /* check whether it is to be served by kernel or userspace */
  899. if (TZHANDLE_IS_KERNEL_OBJ(cb_req->hdr.tzhandle)) {
  900. return process_kernel_obj(buf, buf_len);
  901. } else if (TZHANDLE_IS_MEM_OBJ(cb_req->hdr.tzhandle)) {
  902. return process_mem_obj(buf, buf_len);
  903. } else if (!TZHANDLE_IS_CB_OBJ(cb_req->hdr.tzhandle)) {
  904. pr_err("Request object is not a callback object\n");
  905. cb_req->result = OBJECT_ERROR_INVALID;
  906. return;
  907. }
  908. /*
  909. * We need a copy of req that could be sent to server. Otherwise, if
  910. * someone kills invoke caller, buf would go away and server would be
  911. * working on already freed buffer, causing a device crash.
  912. */
  913. tmp_cb_req = kmemdup(buf, buf_len, GFP_KERNEL);
  914. if (!tmp_cb_req) {
  915. /* we need to return error to caller so fill up result */
  916. cb_req->result = OBJECT_ERROR_KMEM;
  917. pr_err("failed to create copy of request, set result: %d\n",
  918. cb_req->result);
  919. return;
  920. }
  921. cb_txn = kzalloc(sizeof(*cb_txn), GFP_KERNEL);
  922. if (!cb_txn) {
  923. cb_req->result = OBJECT_ERROR_KMEM;
  924. pr_err("failed to allocate memory for request, result: %d\n",
  925. cb_req->result);
  926. kfree(tmp_cb_req);
  927. return;
  928. }
  929. /* no need for memcpy as we did kmemdup() above */
  930. cb_req = tmp_cb_req;
  931. trace_process_tzcb_req_handle(cb_req->hdr.tzhandle, cb_req->hdr.op, cb_req->hdr.counts);
  932. cb_txn->state = SMCINVOKE_REQ_PLACED;
  933. cb_txn->cb_req = cb_req;
  934. cb_txn->cb_req_bytes = buf_len;
  935. cb_txn->filp_to_release = arr_filp;
  936. kref_init(&cb_txn->ref_cnt);
  937. mutex_lock(&g_smcinvoke_lock);
  938. ++cb_reqs_inflight;
  939. srvr_info = get_cb_server_locked(
  940. TZHANDLE_GET_SERVER(cb_req->hdr.tzhandle));
  941. if (!srvr_info || srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) {
  942. /* ret equals Object_ERROR_DEFUNCT, at this point go to out */
  943. if (!srvr_info)
  944. pr_err("server is invalid\n");
  945. else {
  946. pr_err("server is defunct, state= %d tzhandle = %d\n",
  947. srvr_info->state, cb_req->hdr.tzhandle);
  948. }
  949. mutex_unlock(&g_smcinvoke_lock);
  950. goto out;
  951. }
  952. cb_txn->txn_id = ++srvr_info->txn_id;
  953. hash_add(srvr_info->reqs_table, &cb_txn->hash, cb_txn->txn_id);
  954. mutex_unlock(&g_smcinvoke_lock);
  955. trace_process_tzcb_req_wait(cb_req->hdr.tzhandle, cbobj_retries, cb_txn->txn_id,
  956. current->pid, current->tgid, srvr_info->state, srvr_info->server_id,
  957. cb_reqs_inflight);
  958. /*
  959. * we need not worry that server_info will be deleted because as long
  960. * as this CBObj is served by this server, srvr_info will be valid.
  961. */
  962. wake_up_interruptible_all(&srvr_info->req_wait_q);
  963. /* timeout before 1s otherwise tzbusy would come */
  964. timeout_jiff = msecs_to_jiffies(1000);
  965. while (cbobj_retries < CBOBJ_MAX_RETRIES) {
  966. ret = wait_event_interruptible_timeout(srvr_info->rsp_wait_q,
  967. (cb_txn->state == SMCINVOKE_REQ_PROCESSED) ||
  968. (srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT),
  969. timeout_jiff);
  970. if (ret == 0) {
  971. pr_err("CBobj timed out cb-tzhandle:%d, retry:%d, op:%d counts :%d\n",
  972. cb_req->hdr.tzhandle, cbobj_retries,
  973. cb_req->hdr.op, cb_req->hdr.counts);
  974. pr_err("CBobj %d timedout pid %x,tid %x, srvr state=%d, srvr id:%u\n",
  975. cb_req->hdr.tzhandle, current->pid,
  976. current->tgid, srvr_info->state,
  977. srvr_info->server_id);
  978. } else {
  979. break;
  980. }
  981. cbobj_retries++;
  982. }
  983. out:
  984. /*
  985. * we could be here because of either:
  986. * a. Req is PROCESSED
  987. * b. Server was killed
  988. * c. Invoke thread is killed
  989. * sometime invoke thread and server are part of same process.
  990. */
  991. mutex_lock(&g_smcinvoke_lock);
  992. hash_del(&cb_txn->hash);
  993. if (ret == 0) {
  994. pr_err("CBObj timed out! No more retries\n");
  995. cb_req->result = Object_ERROR_TIMEOUT;
  996. } else if (ret == -ERESTARTSYS) {
  997. pr_err("wait event interruped, ret: %d\n", ret);
  998. cb_req->result = OBJECT_ERROR_ABORT;
  999. } else {
  1000. if (cb_txn->state == SMCINVOKE_REQ_PROCESSED) {
  1001. /*
  1002. * it is possible that server was killed immediately
  1003. * after CB Req was processed but who cares now!
  1004. */
  1005. } else if (!srvr_info ||
  1006. srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) {
  1007. cb_req->result = OBJECT_ERROR_DEFUNCT;
  1008. pr_err("server invalid, res: %d\n", cb_req->result);
  1009. } else {
  1010. pr_err("%s: unexpected event happened, ret:%d\n", __func__, ret);
  1011. cb_req->result = OBJECT_ERROR_ABORT;
  1012. }
  1013. }
  1014. --cb_reqs_inflight;
  1015. trace_process_tzcb_req_result(cb_req->result, cb_req->hdr.tzhandle, cb_req->hdr.op,
  1016. cb_req->hdr.counts, cb_reqs_inflight);
  1017. memcpy(buf, cb_req, buf_len);
  1018. kref_put(&cb_txn->ref_cnt, delete_cb_txn);
  1019. if (srvr_info)
  1020. kref_put(&srvr_info->ref_cnt, destroy_cb_server);
  1021. mutex_unlock(&g_smcinvoke_lock);
  1022. }
  1023. static int marshal_out_invoke_req(const uint8_t *buf, uint32_t buf_size,
  1024. struct smcinvoke_cmd_req *req,
  1025. union smcinvoke_arg *args_buf,
  1026. uint32_t context_type)
  1027. {
  1028. int ret = -EINVAL, i = 0;
  1029. int32_t temp_fd = UHANDLE_NULL;
  1030. union smcinvoke_tz_args *tz_args = NULL;
  1031. size_t offset = sizeof(struct smcinvoke_msg_hdr) +
  1032. OBJECT_COUNTS_TOTAL(req->counts) *
  1033. sizeof(union smcinvoke_tz_args);
  1034. if (offset > buf_size)
  1035. goto out;
  1036. tz_args = (union smcinvoke_tz_args *)
  1037. (buf + sizeof(struct smcinvoke_msg_hdr));
  1038. tz_args += OBJECT_COUNTS_NUM_BI(req->counts);
  1039. if (args_buf == NULL)
  1040. return 0;
  1041. FOR_ARGS(i, req->counts, BO) {
  1042. args_buf[i].b.size = tz_args->b.size;
  1043. if ((buf_size - tz_args->b.offset < tz_args->b.size) ||
  1044. tz_args->b.offset > buf_size) {
  1045. pr_err("%s: buffer overflow detected\n", __func__);
  1046. goto out;
  1047. }
  1048. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
  1049. if (copy_to_user((void __user *)
  1050. (uintptr_t)(args_buf[i].b.addr),
  1051. (uint8_t *)(buf) + tz_args->b.offset,
  1052. tz_args->b.size)) {
  1053. pr_err("Error %d copying ctxt to user\n", ret);
  1054. goto out;
  1055. }
  1056. } else {
  1057. memcpy((uint8_t *)(args_buf[i].b.addr),
  1058. (uint8_t *)(buf) + tz_args->b.offset,
  1059. tz_args->b.size);
  1060. }
  1061. tz_args++;
  1062. }
  1063. tz_args += OBJECT_COUNTS_NUM_OI(req->counts);
  1064. FOR_ARGS(i, req->counts, OO) {
  1065. /*
  1066. * create a new FD and assign to output object's context.
  1067. * We are passing cb_server_fd from output param in case OO
  1068. * is a CBObj. For CBObj, we have to ensure that it is sent
  1069. * to server who serves it and that info comes from USpace.
  1070. */
  1071. temp_fd = UHANDLE_NULL;
  1072. ret = get_uhandle_from_tzhandle(tz_args->handle,
  1073. TZHANDLE_GET_SERVER(tz_args->handle),
  1074. &temp_fd, NO_LOCK, context_type);
  1075. args_buf[i].o.fd = temp_fd;
  1076. if (ret)
  1077. goto out;
  1078. trace_marshal_out_invoke_req(i, tz_args->handle,
  1079. TZHANDLE_GET_SERVER(tz_args->handle), temp_fd);
  1080. tz_args++;
  1081. }
  1082. ret = 0;
  1083. out:
  1084. return ret;
  1085. }
  1086. static bool is_inbound_req(int val)
  1087. {
  1088. return (val == SMCINVOKE_RESULT_INBOUND_REQ_NEEDED ||
  1089. val == QSEOS_RESULT_INCOMPLETE ||
  1090. val == QSEOS_RESULT_BLOCKED_ON_LISTENER);
  1091. }
  1092. static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr,
  1093. size_t in_buf_len,
  1094. uint8_t *out_buf, phys_addr_t out_paddr,
  1095. size_t out_buf_len,
  1096. struct smcinvoke_cmd_req *req,
  1097. union smcinvoke_arg *args_buf,
  1098. bool *tz_acked, uint32_t context_type,
  1099. struct qtee_shm *in_shm, struct qtee_shm *out_shm)
  1100. {
  1101. int ret = 0, cmd, retry_count = 0;
  1102. u64 response_type;
  1103. unsigned int data;
  1104. struct file *arr_filp[OBJECT_COUNTS_MAX_OO] = {NULL};
  1105. *tz_acked = false;
  1106. /* buf size should be page aligned */
  1107. if ((in_buf_len % PAGE_SIZE) != 0 || (out_buf_len % PAGE_SIZE) != 0)
  1108. return -EINVAL;
  1109. cmd = invoke_cmd;
  1110. /*
  1111. * purpose of lock here is to ensure that any CB obj that may be going
  1112. * to user as OO is not released by piggyback message on another invoke
  1113. * request. We should not move this lock to process_invoke_req() because
  1114. * that will either cause deadlock or prevent any other invoke request
  1115. * to come in. We release this lock when either
  1116. * a) TZ requires HLOS action to complete ongoing invoke operation
  1117. * b) Final response to invoke has been marshalled out
  1118. */
  1119. while (1) {
  1120. mutex_lock(&g_smcinvoke_lock);
  1121. do {
  1122. ret = invoke_cmd_handler(cmd, in_paddr, in_buf_len, out_buf,
  1123. out_paddr, out_buf_len, &req->result,
  1124. &response_type, &data, in_shm, out_shm);
  1125. if (ret == -EBUSY) {
  1126. pr_err("Secure side is busy,will retry after 30 ms\n");
  1127. mutex_unlock(&g_smcinvoke_lock);
  1128. msleep(SMCINVOKE_SCM_EBUSY_WAIT_MS);
  1129. mutex_lock(&g_smcinvoke_lock);
  1130. }
  1131. } while ((ret == -EBUSY) &&
  1132. (retry_count++ < SMCINVOKE_SCM_EBUSY_MAX_RETRY));
  1133. if (!ret && !is_inbound_req(response_type)) {
  1134. /* dont marshal if Obj returns an error */
  1135. if (!req->result) {
  1136. if (args_buf != NULL)
  1137. ret = marshal_out_invoke_req(in_buf,
  1138. in_buf_len, req, args_buf,
  1139. context_type);
  1140. }
  1141. *tz_acked = true;
  1142. }
  1143. mutex_unlock(&g_smcinvoke_lock);
  1144. if (cmd == SMCINVOKE_CB_RSP_CMD)
  1145. release_filp(arr_filp, OBJECT_COUNTS_MAX_OO);
  1146. if (ret || !is_inbound_req(response_type))
  1147. break;
  1148. /* process listener request */
  1149. if (response_type == QSEOS_RESULT_INCOMPLETE ||
  1150. response_type == QSEOS_RESULT_BLOCKED_ON_LISTENER) {
  1151. ret = qseecom_process_listener_from_smcinvoke(
  1152. &req->result, &response_type, &data);
  1153. trace_prepare_send_scm_msg(response_type, req->result);
  1154. if (!req->result &&
  1155. response_type != SMCINVOKE_RESULT_INBOUND_REQ_NEEDED) {
  1156. ret = marshal_out_invoke_req(in_buf,
  1157. in_buf_len, req, args_buf,
  1158. context_type);
  1159. }
  1160. *tz_acked = true;
  1161. }
  1162. /*
  1163. * qseecom does not understand smcinvoke's callback object &&
  1164. * erringly sets ret value as -EINVAL :( We need to handle it.
  1165. */
  1166. if (response_type != SMCINVOKE_RESULT_INBOUND_REQ_NEEDED)
  1167. break;
  1168. if (response_type == SMCINVOKE_RESULT_INBOUND_REQ_NEEDED) {
  1169. trace_status(__func__, "looks like inbnd req reqd");
  1170. process_tzcb_req(out_buf, out_buf_len, arr_filp);
  1171. cmd = SMCINVOKE_CB_RSP_CMD;
  1172. }
  1173. }
  1174. return ret;
  1175. }
  1176. /*
  1177. * SMC expects arguments in following format
  1178. * ---------------------------------------------------------------------------
  1179. * | cxt | op | counts | ptr|size |ptr|size...|ORef|ORef|...| rest of payload |
  1180. * ---------------------------------------------------------------------------
  1181. * cxt: target, op: operation, counts: total arguments
  1182. * offset: offset is from beginning of buffer i.e. cxt
  1183. * size: size is 8 bytes aligned value
  1184. */
  1185. static size_t compute_in_msg_size(const struct smcinvoke_cmd_req *req,
  1186. const union smcinvoke_arg *args_buf)
  1187. {
  1188. uint32_t i = 0;
  1189. size_t total_size = sizeof(struct smcinvoke_msg_hdr) +
  1190. OBJECT_COUNTS_TOTAL(req->counts) *
  1191. sizeof(union smcinvoke_tz_args);
  1192. /* Computed total_size should be 8 bytes aligned from start of buf */
  1193. total_size = ALIGN(total_size, SMCINVOKE_ARGS_ALIGN_SIZE);
  1194. /* each buffer has to be 8 bytes aligned */
  1195. while (i < OBJECT_COUNTS_NUM_buffers(req->counts))
  1196. total_size = size_add(total_size,
  1197. size_align(args_buf[i++].b.size,
  1198. SMCINVOKE_ARGS_ALIGN_SIZE));
  1199. return PAGE_ALIGN(total_size);
  1200. }
  1201. static int marshal_in_invoke_req(const struct smcinvoke_cmd_req *req,
  1202. const union smcinvoke_arg *args_buf, uint32_t tzhandle,
  1203. uint8_t *buf, size_t buf_size, struct file **arr_filp,
  1204. int32_t *tzhandles_to_release, uint32_t context_type)
  1205. {
  1206. int ret = -EINVAL, i = 0, j = 0, k = 0;
  1207. const struct smcinvoke_msg_hdr msg_hdr = {
  1208. tzhandle, req->op, req->counts};
  1209. uint32_t offset = sizeof(struct smcinvoke_msg_hdr) +
  1210. sizeof(union smcinvoke_tz_args) *
  1211. OBJECT_COUNTS_TOTAL(req->counts);
  1212. union smcinvoke_tz_args *tz_args = NULL;
  1213. if (buf_size < offset)
  1214. goto out;
  1215. *(struct smcinvoke_msg_hdr *)buf = msg_hdr;
  1216. tz_args = (union smcinvoke_tz_args *)(buf +
  1217. sizeof(struct smcinvoke_msg_hdr));
  1218. if (args_buf == NULL)
  1219. return 0;
  1220. FOR_ARGS(i, req->counts, BI) {
  1221. offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
  1222. if ((offset > buf_size) ||
  1223. (args_buf[i].b.size > (buf_size - offset)))
  1224. goto out;
  1225. tz_args[i].b.offset = offset;
  1226. tz_args[i].b.size = args_buf[i].b.size;
  1227. if (context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
  1228. if (copy_from_user(buf + offset,
  1229. (void __user *)(uintptr_t)(args_buf[i].b.addr),
  1230. args_buf[i].b.size))
  1231. goto out;
  1232. } else {
  1233. memcpy(buf + offset, (void *)(args_buf[i].b.addr),
  1234. args_buf[i].b.size);
  1235. }
  1236. offset += args_buf[i].b.size;
  1237. }
  1238. FOR_ARGS(i, req->counts, BO) {
  1239. offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
  1240. if ((offset > buf_size) ||
  1241. (args_buf[i].b.size > (buf_size - offset)))
  1242. goto out;
  1243. tz_args[i].b.offset = offset;
  1244. tz_args[i].b.size = args_buf[i].b.size;
  1245. offset += args_buf[i].b.size;
  1246. }
  1247. FOR_ARGS(i, req->counts, OI) {
  1248. ret = get_tzhandle_from_uhandle(args_buf[i].o.fd,
  1249. args_buf[i].o.cb_server_fd, &arr_filp[j++],
  1250. &(tz_args[i].handle));
  1251. if (ret)
  1252. goto out;
  1253. trace_marshal_in_invoke_req(i, args_buf[i].o.fd,
  1254. args_buf[i].o.cb_server_fd, tz_args[i].handle);
  1255. tzhandles_to_release[k++] = tz_args[i].handle;
  1256. }
  1257. ret = 0;
  1258. out:
  1259. return ret;
  1260. }
  1261. static int marshal_in_tzcb_req(const struct smcinvoke_cb_txn *cb_txn,
  1262. struct smcinvoke_accept *user_req, int srvr_id)
  1263. {
  1264. int ret = 0, i = 0;
  1265. int32_t temp_fd = UHANDLE_NULL;
  1266. union smcinvoke_arg tmp_arg;
  1267. struct smcinvoke_tzcb_req *tzcb_req = cb_txn->cb_req;
  1268. union smcinvoke_tz_args *tz_args = tzcb_req->args;
  1269. size_t tzcb_req_len = cb_txn->cb_req_bytes;
  1270. size_t tz_buf_offset = TZCB_BUF_OFFSET(tzcb_req);
  1271. size_t user_req_buf_offset = sizeof(union smcinvoke_arg) *
  1272. OBJECT_COUNTS_TOTAL(tzcb_req->hdr.counts);
  1273. if (tz_buf_offset > tzcb_req_len) {
  1274. ret = -EINVAL;
  1275. goto out;
  1276. }
  1277. user_req->txn_id = cb_txn->txn_id;
  1278. if (get_uhandle_from_tzhandle(tzcb_req->hdr.tzhandle, srvr_id,
  1279. &user_req->cbobj_id, TAKE_LOCK,
  1280. SMCINVOKE_OBJ_TYPE_TZ_OBJ)) {
  1281. ret = -EINVAL;
  1282. goto out;
  1283. }
  1284. user_req->op = tzcb_req->hdr.op;
  1285. user_req->counts = tzcb_req->hdr.counts;
  1286. user_req->argsize = sizeof(union smcinvoke_arg);
  1287. trace_marshal_in_tzcb_req_handle(tzcb_req->hdr.tzhandle, srvr_id,
  1288. user_req->cbobj_id, user_req->op, user_req->counts);
  1289. FOR_ARGS(i, tzcb_req->hdr.counts, BI) {
  1290. user_req_buf_offset = size_align(user_req_buf_offset,
  1291. SMCINVOKE_ARGS_ALIGN_SIZE);
  1292. tmp_arg.b.size = tz_args[i].b.size;
  1293. if ((tz_args[i].b.offset > tzcb_req_len) ||
  1294. (tz_args[i].b.size > tzcb_req_len - tz_args[i].b.offset) ||
  1295. (user_req_buf_offset > user_req->buf_len) ||
  1296. (tmp_arg.b.size >
  1297. user_req->buf_len - user_req_buf_offset)) {
  1298. ret = -EINVAL;
  1299. pr_err("%s: buffer overflow detected\n", __func__);
  1300. goto out;
  1301. }
  1302. tmp_arg.b.addr = user_req->buf_addr + user_req_buf_offset;
  1303. if (copy_to_user(u64_to_user_ptr
  1304. (user_req->buf_addr + i * sizeof(tmp_arg)),
  1305. &tmp_arg, sizeof(tmp_arg)) ||
  1306. copy_to_user(u64_to_user_ptr(tmp_arg.b.addr),
  1307. (uint8_t *)(tzcb_req) + tz_args[i].b.offset,
  1308. tz_args[i].b.size)) {
  1309. ret = -EFAULT;
  1310. goto out;
  1311. }
  1312. user_req_buf_offset += tmp_arg.b.size;
  1313. }
  1314. FOR_ARGS(i, tzcb_req->hdr.counts, BO) {
  1315. user_req_buf_offset = size_align(user_req_buf_offset,
  1316. SMCINVOKE_ARGS_ALIGN_SIZE);
  1317. tmp_arg.b.size = tz_args[i].b.size;
  1318. if ((user_req_buf_offset > user_req->buf_len) ||
  1319. (tmp_arg.b.size >
  1320. user_req->buf_len - user_req_buf_offset)) {
  1321. ret = -EINVAL;
  1322. pr_err("%s: buffer overflow detected\n", __func__);
  1323. goto out;
  1324. }
  1325. tmp_arg.b.addr = user_req->buf_addr + user_req_buf_offset;
  1326. if (copy_to_user(u64_to_user_ptr
  1327. (user_req->buf_addr + i * sizeof(tmp_arg)),
  1328. &tmp_arg, sizeof(tmp_arg))) {
  1329. ret = -EFAULT;
  1330. goto out;
  1331. }
  1332. user_req_buf_offset += tmp_arg.b.size;
  1333. }
  1334. FOR_ARGS(i, tzcb_req->hdr.counts, OI) {
  1335. /*
  1336. * create a new FD and assign to output object's
  1337. * context
  1338. */
  1339. temp_fd = UHANDLE_NULL;
  1340. ret = get_uhandle_from_tzhandle(tz_args[i].handle, srvr_id,
  1341. &temp_fd, TAKE_LOCK, SMCINVOKE_OBJ_TYPE_TZ_OBJ);
  1342. tmp_arg.o.fd = temp_fd;
  1343. if (ret) {
  1344. ret = -EINVAL;
  1345. goto out;
  1346. }
  1347. if (copy_to_user(u64_to_user_ptr
  1348. (user_req->buf_addr + i * sizeof(tmp_arg)),
  1349. &tmp_arg, sizeof(tmp_arg))) {
  1350. ret = -EFAULT;
  1351. goto out;
  1352. }
  1353. trace_marshal_in_tzcb_req_fd(i, tz_args[i].handle, srvr_id, temp_fd);
  1354. }
  1355. out:
  1356. return ret;
  1357. }
  1358. static int marshal_out_tzcb_req(const struct smcinvoke_accept *user_req,
  1359. struct smcinvoke_cb_txn *cb_txn,
  1360. struct file **arr_filp)
  1361. {
  1362. int ret = -EINVAL, i = 0;
  1363. int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0};
  1364. struct smcinvoke_tzcb_req *tzcb_req = cb_txn->cb_req;
  1365. union smcinvoke_tz_args *tz_args = tzcb_req->args;
  1366. release_tzhandles(&cb_txn->cb_req->hdr.tzhandle, 1);
  1367. tzcb_req->result = user_req->result;
  1368. FOR_ARGS(i, tzcb_req->hdr.counts, BO) {
  1369. union smcinvoke_arg tmp_arg;
  1370. if (copy_from_user((uint8_t *)&tmp_arg, u64_to_user_ptr(
  1371. user_req->buf_addr + i * sizeof(union smcinvoke_arg)),
  1372. sizeof(union smcinvoke_arg))) {
  1373. ret = -EFAULT;
  1374. goto out;
  1375. }
  1376. if (tmp_arg.b.size > tz_args[i].b.size)
  1377. goto out;
  1378. if (copy_from_user((uint8_t *)(tzcb_req) + tz_args[i].b.offset,
  1379. u64_to_user_ptr(tmp_arg.b.addr),
  1380. tmp_arg.b.size)) {
  1381. ret = -EFAULT;
  1382. goto out;
  1383. }
  1384. }
  1385. FOR_ARGS(i, tzcb_req->hdr.counts, OO) {
  1386. union smcinvoke_arg tmp_arg;
  1387. if (copy_from_user((uint8_t *)&tmp_arg, u64_to_user_ptr(
  1388. user_req->buf_addr + i * sizeof(union smcinvoke_arg)),
  1389. sizeof(union smcinvoke_arg))) {
  1390. ret = -EFAULT;
  1391. goto out;
  1392. }
  1393. ret = get_tzhandle_from_uhandle(tmp_arg.o.fd,
  1394. tmp_arg.o.cb_server_fd, &arr_filp[i],
  1395. &(tz_args[i].handle));
  1396. if (ret)
  1397. goto out;
  1398. tzhandles_to_release[i] = tz_args[i].handle;
  1399. trace_marshal_out_tzcb_req(i, tmp_arg.o.fd,
  1400. tmp_arg.o.cb_server_fd, tz_args[i].handle);
  1401. }
  1402. FOR_ARGS(i, tzcb_req->hdr.counts, OI) {
  1403. if (TZHANDLE_IS_CB_OBJ(tz_args[i].handle))
  1404. release_tzhandles(&tz_args[i].handle, 1);
  1405. }
  1406. ret = 0;
  1407. out:
  1408. if (ret)
  1409. release_tzhandles(tzhandles_to_release, OBJECT_COUNTS_MAX_OO);
  1410. return ret;
  1411. }
  1412. static void process_piggyback_data(void *buf, size_t buf_size)
  1413. {
  1414. int i;
  1415. struct smcinvoke_tzcb_req req = {0};
  1416. struct smcinvoke_piggyback_msg *msg = buf;
  1417. int32_t *objs = msg->objs;
  1418. for (i = 0; i < msg->counts; i++) {
  1419. req.hdr.op = msg->op;
  1420. req.hdr.counts = 0; /* release op does not require any args */
  1421. req.hdr.tzhandle = objs[i];
  1422. process_tzcb_req(&req, sizeof(struct smcinvoke_tzcb_req), NULL);
  1423. /* cbobjs_in_flight will be adjusted during CB processing */
  1424. }
  1425. }
  1426. static long process_ack_local_obj(struct file *filp, unsigned int cmd,
  1427. unsigned long arg)
  1428. {
  1429. int ret = -1;
  1430. int32_t local_obj = SMCINVOKE_USERSPACE_OBJ_NULL;
  1431. struct smcinvoke_file_data *filp_data = filp->private_data;
  1432. if (_IOC_SIZE(cmd) != sizeof(int32_t))
  1433. return -EINVAL;
  1434. ret = copy_from_user(&local_obj, (void __user *)(uintptr_t)arg,
  1435. sizeof(int32_t));
  1436. if (ret)
  1437. return -EFAULT;
  1438. mutex_lock(&g_smcinvoke_lock);
  1439. if (UHANDLE_IS_CB_OBJ(local_obj))
  1440. ret = put_pending_cbobj_locked(filp_data->server_id,
  1441. UHANDLE_GET_CB_OBJ(local_obj));
  1442. mutex_unlock(&g_smcinvoke_lock);
  1443. return ret;
  1444. }
  1445. static long process_server_req(struct file *filp, unsigned int cmd,
  1446. unsigned long arg)
  1447. {
  1448. int ret = -1;
  1449. int32_t server_fd = -1;
  1450. struct smcinvoke_server server_req = {0};
  1451. struct smcinvoke_server_info *server_info = NULL;
  1452. if (_IOC_SIZE(cmd) != sizeof(server_req)) {
  1453. pr_err("invalid command size received for server request\n");
  1454. return -EINVAL;
  1455. }
  1456. ret = copy_from_user(&server_req, (void __user *)(uintptr_t)arg,
  1457. sizeof(server_req));
  1458. if (ret) {
  1459. pr_err("copying server request from user failed\n");
  1460. return -EFAULT;
  1461. }
  1462. server_info = kzalloc(sizeof(*server_info), GFP_KERNEL);
  1463. if (!server_info)
  1464. return -ENOMEM;
  1465. kref_init(&server_info->ref_cnt);
  1466. init_waitqueue_head(&server_info->req_wait_q);
  1467. init_waitqueue_head(&server_info->rsp_wait_q);
  1468. server_info->cb_buf_size = server_req.cb_buf_size;
  1469. hash_init(server_info->reqs_table);
  1470. hash_init(server_info->responses_table);
  1471. INIT_LIST_HEAD(&server_info->pending_cbobjs);
  1472. mutex_lock(&g_smcinvoke_lock);
  1473. server_info->server_id = next_cb_server_id_locked();
  1474. hash_add(g_cb_servers, &server_info->hash,
  1475. server_info->server_id);
  1476. if (g_max_cb_buf_size < server_req.cb_buf_size)
  1477. g_max_cb_buf_size = server_req.cb_buf_size;
  1478. mutex_unlock(&g_smcinvoke_lock);
  1479. ret = get_fd_for_obj(SMCINVOKE_OBJ_TYPE_SERVER,
  1480. server_info->server_id, &server_fd);
  1481. if (ret)
  1482. release_cb_server(server_info->server_id);
  1483. return server_fd;
  1484. }
  1485. static long process_accept_req(struct file *filp, unsigned int cmd,
  1486. unsigned long arg)
  1487. {
  1488. int ret = -1;
  1489. struct smcinvoke_file_data *server_obj = filp->private_data;
  1490. struct smcinvoke_accept user_args = {0};
  1491. struct smcinvoke_cb_txn *cb_txn = NULL;
  1492. struct smcinvoke_server_info *server_info = NULL;
  1493. if (_IOC_SIZE(cmd) != sizeof(struct smcinvoke_accept)) {
  1494. pr_err("command size invalid for accept request\n");
  1495. return -EINVAL;
  1496. }
  1497. if (copy_from_user(&user_args, (void __user *)arg,
  1498. sizeof(struct smcinvoke_accept))) {
  1499. pr_err("copying accept request from user failed\n");
  1500. return -EFAULT;
  1501. }
  1502. if (user_args.argsize != sizeof(union smcinvoke_arg)) {
  1503. pr_err("arguments size is invalid for accept thread\n");
  1504. return -EINVAL;
  1505. }
  1506. /* ACCEPT is available only on server obj */
  1507. if (server_obj->context_type != SMCINVOKE_OBJ_TYPE_SERVER) {
  1508. pr_err("invalid object type received for accept req\n");
  1509. return -EPERM;
  1510. }
  1511. mutex_lock(&g_smcinvoke_lock);
  1512. server_info = get_cb_server_locked(server_obj->server_id);
  1513. if (!server_info) {
  1514. pr_err("No matching server with server id : %u found\n",
  1515. server_obj->server_id);
  1516. mutex_unlock(&g_smcinvoke_lock);
  1517. return -EINVAL;
  1518. }
  1519. if (server_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT)
  1520. server_info->state = 0;
  1521. mutex_unlock(&g_smcinvoke_lock);
  1522. /* First check if it has response otherwise wait for req */
  1523. if (user_args.has_resp) {
  1524. trace_process_accept_req_has_response(current->pid, current->tgid);
  1525. mutex_lock(&g_smcinvoke_lock);
  1526. cb_txn = find_cbtxn_locked(server_info, user_args.txn_id,
  1527. SMCINVOKE_REQ_PROCESSING);
  1528. mutex_unlock(&g_smcinvoke_lock);
  1529. /*
  1530. * cb_txn can be null if userspace provides wrong txn id OR
  1531. * invoke thread died while server was processing cb req.
  1532. * if invoke thread dies, it would remove req from Q. So
  1533. * no matching cb_txn would be on Q and hence NULL cb_txn.
  1534. * In this case, we want this thread to come back and start
  1535. * waiting for new cb requests, hence return EAGAIN here
  1536. */
  1537. if (!cb_txn) {
  1538. pr_err("%s txn %d either invalid or removed from Q\n",
  1539. __func__, user_args.txn_id);
  1540. ret = -EAGAIN;
  1541. goto out;
  1542. }
  1543. ret = marshal_out_tzcb_req(&user_args, cb_txn,
  1544. cb_txn->filp_to_release);
  1545. /*
  1546. * if client did not set error and we get error locally,
  1547. * we return local error to TA
  1548. */
  1549. if (ret && cb_txn->cb_req->result == 0)
  1550. cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL;
  1551. cb_txn->state = SMCINVOKE_REQ_PROCESSED;
  1552. kref_put(&cb_txn->ref_cnt, delete_cb_txn);
  1553. wake_up(&server_info->rsp_wait_q);
  1554. /*
  1555. * if marshal_out fails, we should let userspace release
  1556. * any ref/obj it created for CB processing
  1557. */
  1558. if (ret && OBJECT_COUNTS_NUM_OO(user_args.counts))
  1559. goto out;
  1560. }
  1561. /*
  1562. * Once response has been delivered, thread will wait for another
  1563. * callback req to process.
  1564. */
  1565. do {
  1566. ret = wait_event_interruptible(server_info->req_wait_q,
  1567. !hash_empty(server_info->reqs_table));
  1568. if (ret) {
  1569. trace_process_accept_req_ret(current->pid, current->tgid, ret);
  1570. /*
  1571. * Ideally, we should destroy server if accept threads
  1572. * are returning due to client being killed or device
  1573. * going down (Shutdown/Reboot) but that would make
  1574. * server_info invalid. Other accept/invoke threads are
  1575. * using server_info and would crash. So dont do that.
  1576. */
  1577. mutex_lock(&g_smcinvoke_lock);
  1578. server_info->state = SMCINVOKE_SERVER_STATE_DEFUNCT;
  1579. mutex_unlock(&g_smcinvoke_lock);
  1580. wake_up_interruptible(&server_info->rsp_wait_q);
  1581. goto out;
  1582. }
  1583. mutex_lock(&g_smcinvoke_lock);
  1584. cb_txn = find_cbtxn_locked(server_info,
  1585. SMCINVOKE_NEXT_AVAILABLE_TXN,
  1586. SMCINVOKE_REQ_PLACED);
  1587. mutex_unlock(&g_smcinvoke_lock);
  1588. if (cb_txn) {
  1589. cb_txn->state = SMCINVOKE_REQ_PROCESSING;
  1590. ret = marshal_in_tzcb_req(cb_txn, &user_args,
  1591. server_obj->server_id);
  1592. if (ret) {
  1593. pr_err("failed to marshal in the callback request\n");
  1594. cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL;
  1595. cb_txn->state = SMCINVOKE_REQ_PROCESSED;
  1596. kref_put(&cb_txn->ref_cnt, delete_cb_txn);
  1597. wake_up_interruptible(&server_info->rsp_wait_q);
  1598. continue;
  1599. }
  1600. mutex_lock(&g_smcinvoke_lock);
  1601. hash_add(server_info->responses_table, &cb_txn->hash,
  1602. cb_txn->txn_id);
  1603. kref_put(&cb_txn->ref_cnt, delete_cb_txn);
  1604. mutex_unlock(&g_smcinvoke_lock);
  1605. trace_process_accept_req_placed(current->pid, current->tgid);
  1606. ret = copy_to_user((void __user *)arg, &user_args,
  1607. sizeof(struct smcinvoke_accept));
  1608. }
  1609. } while (!cb_txn);
  1610. out:
  1611. if (server_info)
  1612. kref_put(&server_info->ref_cnt, destroy_cb_server);
  1613. if (ret && ret != -ERESTARTSYS)
  1614. pr_err("accept thread returning with ret: %d\n", ret);
  1615. return ret;
  1616. }
  1617. static long process_invoke_req(struct file *filp, unsigned int cmd,
  1618. unsigned long arg)
  1619. {
  1620. int ret = -1, nr_args = 0;
  1621. struct smcinvoke_cmd_req req = {0};
  1622. void *in_msg = NULL, *out_msg = NULL;
  1623. size_t inmsg_size = 0, outmsg_size = SMCINVOKE_TZ_MIN_BUF_SIZE;
  1624. union smcinvoke_arg *args_buf = NULL;
  1625. struct smcinvoke_file_data *tzobj = filp->private_data;
  1626. struct qtee_shm in_shm = {0}, out_shm = {0};
  1627. /*
  1628. * Hold reference to remote object until invoke op is not
  1629. * completed. Release once invoke is done.
  1630. */
  1631. struct file *filp_to_release[OBJECT_COUNTS_MAX_OO] = {NULL};
  1632. /*
  1633. * If anything goes wrong, release alloted tzhandles for
  1634. * local objs which could be either CBObj or MemObj.
  1635. */
  1636. int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0};
  1637. bool tz_acked = false;
  1638. uint32_t context_type = tzobj->context_type;
  1639. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ &&
  1640. _IOC_SIZE(cmd) != sizeof(req)) {
  1641. pr_err("command size for invoke req is invalid\n");
  1642. return -EINVAL;
  1643. }
  1644. if (context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ &&
  1645. context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
  1646. pr_err("invalid context_type %d\n", context_type);
  1647. return -EPERM;
  1648. }
  1649. if (context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
  1650. ret = copy_from_user(&req, (void __user *)arg, sizeof(req));
  1651. if (ret) {
  1652. pr_err("copying invoke req failed\n");
  1653. return -EFAULT;
  1654. }
  1655. } else {
  1656. req = *(struct smcinvoke_cmd_req *)arg;
  1657. }
  1658. if (req.argsize != sizeof(union smcinvoke_arg)) {
  1659. pr_err("arguments size for invoke req is invalid\n");
  1660. return -EINVAL;
  1661. }
  1662. nr_args = OBJECT_COUNTS_NUM_buffers(req.counts) +
  1663. OBJECT_COUNTS_NUM_objects(req.counts);
  1664. if (nr_args) {
  1665. args_buf = kcalloc(nr_args, req.argsize, GFP_KERNEL);
  1666. if (!args_buf)
  1667. return -ENOMEM;
  1668. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
  1669. ret = copy_from_user(args_buf,
  1670. u64_to_user_ptr(req.args),
  1671. nr_args * req.argsize);
  1672. if (ret) {
  1673. ret = -EFAULT;
  1674. goto out;
  1675. }
  1676. } else {
  1677. memcpy(args_buf, (void *)(req.args),
  1678. nr_args * req.argsize);
  1679. }
  1680. }
  1681. inmsg_size = compute_in_msg_size(&req, args_buf);
  1682. ret = qtee_shmbridge_allocate_shm(inmsg_size, &in_shm);
  1683. if (ret) {
  1684. ret = -ENOMEM;
  1685. pr_err("shmbridge alloc failed for in msg in invoke req\n");
  1686. goto out;
  1687. }
  1688. in_msg = in_shm.vaddr;
  1689. mutex_lock(&g_smcinvoke_lock);
  1690. outmsg_size = PAGE_ALIGN(g_max_cb_buf_size);
  1691. mutex_unlock(&g_smcinvoke_lock);
  1692. ret = qtee_shmbridge_allocate_shm(outmsg_size, &out_shm);
  1693. if (ret) {
  1694. ret = -ENOMEM;
  1695. pr_err("shmbridge alloc failed for out msg in invoke req\n");
  1696. goto out;
  1697. }
  1698. out_msg = out_shm.vaddr;
  1699. trace_process_invoke_req_tzhandle(tzobj->tzhandle, req.op, req.counts);
  1700. ret = marshal_in_invoke_req(&req, args_buf, tzobj->tzhandle, in_msg,
  1701. inmsg_size, filp_to_release, tzhandles_to_release,
  1702. context_type);
  1703. if (ret) {
  1704. pr_err("failed to marshal in invoke req, ret :%d\n", ret);
  1705. goto out;
  1706. }
  1707. ret = prepare_send_scm_msg(in_msg, in_shm.paddr, inmsg_size,
  1708. out_msg, out_shm.paddr, outmsg_size,
  1709. &req, args_buf, &tz_acked, context_type,
  1710. &in_shm, &out_shm);
  1711. /*
  1712. * If scm_call is success, TZ owns responsibility to release
  1713. * refs for local objs.
  1714. */
  1715. if (!tz_acked) {
  1716. trace_status(__func__, "scm call successful");
  1717. goto out;
  1718. }
  1719. memset(tzhandles_to_release, 0, sizeof(tzhandles_to_release));
  1720. /*
  1721. * if invoke op results in an err, no need to marshal_out and
  1722. * copy args buf to user space
  1723. */
  1724. if (!req.result) {
  1725. /*
  1726. * Dont check ret of marshal_out because there might be a
  1727. * FD for OO which userspace must release even if an error
  1728. * occurs. Releasing FD from user space is much simpler than
  1729. * doing here. ORing of ret is reqd not to miss past error
  1730. */
  1731. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ)
  1732. ret |= copy_to_user(u64_to_user_ptr(req.args),
  1733. args_buf, nr_args * req.argsize);
  1734. else
  1735. memcpy((void *)(req.args), args_buf,
  1736. nr_args * req.argsize);
  1737. }
  1738. /* copy result of invoke op */
  1739. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
  1740. ret |= copy_to_user((void __user *)arg, &req, sizeof(req));
  1741. if (ret)
  1742. goto out;
  1743. } else {
  1744. memcpy((void *)arg, (void *)&req, sizeof(req));
  1745. }
  1746. /* Outbuf could be carrying local objs to be released. */
  1747. process_piggyback_data(out_msg, outmsg_size);
  1748. out:
  1749. trace_process_invoke_req_result(ret, req.result, tzobj->tzhandle,
  1750. req.op, req.counts);
  1751. release_filp(filp_to_release, OBJECT_COUNTS_MAX_OO);
  1752. if (ret)
  1753. release_tzhandles(tzhandles_to_release, OBJECT_COUNTS_MAX_OO);
  1754. qtee_shmbridge_free_shm(&in_shm);
  1755. qtee_shmbridge_free_shm(&out_shm);
  1756. kfree(args_buf);
  1757. if (ret)
  1758. pr_err("invoke thread returning with ret = %d\n", ret);
  1759. return ret;
  1760. }
  1761. static long process_log_info(struct file *filp, unsigned int cmd,
  1762. unsigned long arg)
  1763. {
  1764. int ret = 0;
  1765. char buf[SMCINVOKE_LOG_BUF_SIZE];
  1766. struct smcinvoke_file_data *tzobj = filp->private_data;
  1767. ret = copy_from_user(buf, (void __user *)arg, SMCINVOKE_LOG_BUF_SIZE);
  1768. if (ret) {
  1769. pr_err("logging HLOS info copy failed\n");
  1770. return -EFAULT;
  1771. }
  1772. buf[SMCINVOKE_LOG_BUF_SIZE - 1] = '\0';
  1773. trace_process_log_info(buf, tzobj->context_type, tzobj->tzhandle);
  1774. return ret;
  1775. }
  1776. static long smcinvoke_ioctl(struct file *filp, unsigned int cmd,
  1777. unsigned long arg)
  1778. {
  1779. long ret = 0;
  1780. switch (cmd) {
  1781. case SMCINVOKE_IOCTL_INVOKE_REQ:
  1782. ret = process_invoke_req(filp, cmd, arg);
  1783. break;
  1784. case SMCINVOKE_IOCTL_ACCEPT_REQ:
  1785. ret = process_accept_req(filp, cmd, arg);
  1786. break;
  1787. case SMCINVOKE_IOCTL_SERVER_REQ:
  1788. ret = process_server_req(filp, cmd, arg);
  1789. break;
  1790. case SMCINVOKE_IOCTL_ACK_LOCAL_OBJ:
  1791. ret = process_ack_local_obj(filp, cmd, arg);
  1792. break;
  1793. case SMCINVOKE_IOCTL_LOG:
  1794. ret = process_log_info(filp, cmd, arg);
  1795. break;
  1796. default:
  1797. ret = -ENOIOCTLCMD;
  1798. break;
  1799. }
  1800. trace_smcinvoke_ioctl(cmd, ret);
  1801. return ret;
  1802. }
  1803. int get_root_fd(int *root_fd)
  1804. {
  1805. if (!root_fd)
  1806. return -EINVAL;
  1807. else
  1808. return get_fd_for_obj(SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL,
  1809. SMCINVOKE_TZ_ROOT_OBJ, root_fd);
  1810. }
  1811. int process_invoke_request_from_kernel_client(int fd,
  1812. struct smcinvoke_cmd_req *req)
  1813. {
  1814. struct file *filp = NULL;
  1815. int ret = 0;
  1816. if (!req) {
  1817. pr_err("NULL req\n");
  1818. return -EINVAL;
  1819. }
  1820. filp = fget(fd);
  1821. if (!filp) {
  1822. pr_err("Invalid fd %d\n", fd);
  1823. return -EINVAL;
  1824. }
  1825. ret = process_invoke_req(filp, 0, (uintptr_t)req);
  1826. fput(filp);
  1827. trace_process_invoke_request_from_kernel_client(fd, filp, file_count(filp));
  1828. return ret;
  1829. }
  1830. char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, struct qtee_shm *shm)
  1831. {
  1832. int rc = 0;
  1833. const struct firmware *fw_entry = NULL, *fw_entry00 = NULL, *fw_entry07 = NULL;
  1834. char fw_name[MAX_APP_NAME_SIZE] = "\0";
  1835. int num_images = 0, phi = 0;
  1836. unsigned char app_arch = 0;
  1837. u8 *img_data_ptr = NULL;
  1838. size_t offset[8], bufferOffset = 0, phdr_table_offset = 0;
  1839. Elf32_Phdr phdr32;
  1840. Elf64_Phdr phdr64;
  1841. struct elf32_hdr *ehdr = NULL;
  1842. struct elf64_hdr *ehdr64 = NULL;
  1843. /* load b00*/
  1844. snprintf(fw_name, sizeof(fw_name), "%s.b00", appname);
  1845. rc = firmware_request_nowarn(&fw_entry00, fw_name, class_dev);
  1846. if (rc) {
  1847. pr_err("Load %s failed, ret:%d\n", fw_name, rc);
  1848. return NULL;
  1849. }
  1850. app_arch = *(unsigned char *)(fw_entry00->data + EI_CLASS);
  1851. /*Get the offsets for split images header*/
  1852. offset[0] = 0;
  1853. if (app_arch == ELFCLASS32) {
  1854. ehdr = (struct elf32_hdr *)fw_entry00->data;
  1855. num_images = ehdr->e_phnum;
  1856. if (num_images != 8) {
  1857. pr_err("Number of images :%d is not valid\n", num_images);
  1858. goto release_fw_entry00;
  1859. }
  1860. phdr_table_offset = (size_t) ehdr->e_phoff;
  1861. for (phi = 1; phi < num_images; ++phi) {
  1862. bufferOffset = phdr_table_offset + phi * sizeof(Elf32_Phdr);
  1863. phdr32 = *(Elf32_Phdr *)(fw_entry00->data + bufferOffset);
  1864. offset[phi] = (size_t)phdr32.p_offset;
  1865. }
  1866. } else if (app_arch == ELFCLASS64) {
  1867. ehdr64 = (struct elf64_hdr *)fw_entry00->data;
  1868. num_images = ehdr64->e_phnum;
  1869. if (num_images != 8) {
  1870. pr_err("Number of images :%d is not valid\n", num_images);
  1871. goto release_fw_entry00;
  1872. }
  1873. phdr_table_offset = (size_t) ehdr64->e_phoff;
  1874. for (phi = 1; phi < num_images; ++phi) {
  1875. bufferOffset = phdr_table_offset + phi * sizeof(Elf64_Phdr);
  1876. phdr64 = *(Elf64_Phdr *)(fw_entry00->data + bufferOffset);
  1877. offset[phi] = (size_t)phdr64.p_offset;
  1878. }
  1879. } else {
  1880. pr_err("QSEE %s app, arch %u is not supported\n", appname, app_arch);
  1881. goto release_fw_entry00;
  1882. }
  1883. /*Find the size of last split bin image*/
  1884. snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, num_images-1);
  1885. rc = firmware_request_nowarn(&fw_entry07, fw_name, class_dev);
  1886. if (rc) {
  1887. pr_err("Failed to locate blob %s\n", fw_name);
  1888. goto release_fw_entry00;
  1889. }
  1890. /*Total size of image will be the offset of last image + the size of last split image*/
  1891. *fw_size = fw_entry07->size + offset[num_images-1];
  1892. /*Allocate memory for the buffer that will hold the split image*/
  1893. rc = qtee_shmbridge_allocate_shm((*fw_size), shm);
  1894. if (rc) {
  1895. pr_err("smbridge alloc failed for size: %zu\n", *fw_size);
  1896. goto release_fw_entry07;
  1897. }
  1898. img_data_ptr = shm->vaddr;
  1899. /*
  1900. * Copy contents of split bins to the buffer
  1901. */
  1902. memcpy(img_data_ptr, fw_entry00->data, fw_entry00->size);
  1903. for (phi = 1; phi < num_images-1; phi++) {
  1904. snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, phi);
  1905. rc = firmware_request_nowarn(&fw_entry, fw_name, class_dev);
  1906. if (rc) {
  1907. pr_err("Failed to locate blob %s\n", fw_name);
  1908. qtee_shmbridge_free_shm(shm);
  1909. img_data_ptr = NULL;
  1910. goto release_fw_entry07;
  1911. }
  1912. memcpy(img_data_ptr + offset[phi], fw_entry->data, fw_entry->size);
  1913. release_firmware(fw_entry);
  1914. fw_entry = NULL;
  1915. }
  1916. memcpy(img_data_ptr + offset[phi], fw_entry07->data, fw_entry07->size);
  1917. release_fw_entry07:
  1918. release_firmware(fw_entry07);
  1919. release_fw_entry00:
  1920. release_firmware(fw_entry00);
  1921. return img_data_ptr;
  1922. }
  1923. EXPORT_SYMBOL(firmware_request_from_smcinvoke);
  1924. static int smcinvoke_open(struct inode *nodp, struct file *filp)
  1925. {
  1926. struct smcinvoke_file_data *tzcxt = NULL;
  1927. tzcxt = kzalloc(sizeof(*tzcxt), GFP_KERNEL);
  1928. if (!tzcxt)
  1929. return -ENOMEM;
  1930. tzcxt->tzhandle = SMCINVOKE_TZ_ROOT_OBJ;
  1931. tzcxt->context_type = SMCINVOKE_OBJ_TYPE_TZ_OBJ;
  1932. filp->private_data = tzcxt;
  1933. return 0;
  1934. }
  1935. static int release_cb_server(uint16_t server_id)
  1936. {
  1937. struct smcinvoke_server_info *server = NULL;
  1938. mutex_lock(&g_smcinvoke_lock);
  1939. server = find_cb_server_locked(server_id);
  1940. if (server)
  1941. kref_put(&server->ref_cnt, destroy_cb_server);
  1942. mutex_unlock(&g_smcinvoke_lock);
  1943. return 0;
  1944. }
  1945. int smcinvoke_release_filp(struct file *filp)
  1946. {
  1947. int ret = 0;
  1948. bool release_handles;
  1949. uint8_t *in_buf = NULL;
  1950. uint8_t *out_buf = NULL;
  1951. struct smcinvoke_msg_hdr hdr = {0};
  1952. struct smcinvoke_file_data *file_data = filp->private_data;
  1953. struct smcinvoke_cmd_req req = {0};
  1954. uint32_t tzhandle = 0;
  1955. struct qtee_shm in_shm = {0}, out_shm = {0};
  1956. trace_smcinvoke_release_filp(current->files, filp,
  1957. file_count(filp), file_data->context_type);
  1958. if (file_data->context_type == SMCINVOKE_OBJ_TYPE_SERVER) {
  1959. ret = release_cb_server(file_data->server_id);
  1960. goto out;
  1961. }
  1962. tzhandle = file_data->tzhandle;
  1963. /* Root object is special in sense it is indestructible */
  1964. if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ)
  1965. goto out;
  1966. ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &in_shm);
  1967. if (ret) {
  1968. ret = -ENOMEM;
  1969. pr_err("shmbridge alloc failed for in msg in release\n");
  1970. goto out;
  1971. }
  1972. ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &out_shm);
  1973. if (ret) {
  1974. ret = -ENOMEM;
  1975. pr_err("shmbridge alloc failed for out msg in release\n");
  1976. goto out;
  1977. }
  1978. in_buf = in_shm.vaddr;
  1979. out_buf = out_shm.vaddr;
  1980. hdr.tzhandle = tzhandle;
  1981. hdr.op = OBJECT_OP_RELEASE;
  1982. hdr.counts = 0;
  1983. *(struct smcinvoke_msg_hdr *)in_buf = hdr;
  1984. ret = prepare_send_scm_msg(in_buf, in_shm.paddr,
  1985. SMCINVOKE_TZ_MIN_BUF_SIZE, out_buf, out_shm.paddr,
  1986. SMCINVOKE_TZ_MIN_BUF_SIZE, &req, NULL, &release_handles,
  1987. file_data->context_type, &in_shm, &out_shm);
  1988. process_piggyback_data(out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE);
  1989. out:
  1990. kfree(filp->private_data);
  1991. filp->private_data = NULL;
  1992. qtee_shmbridge_free_shm(&in_shm);
  1993. qtee_shmbridge_free_shm(&out_shm);
  1994. return ret;
  1995. }
  1996. int smcinvoke_release_from_kernel_client(int fd)
  1997. {
  1998. struct file *filp = NULL;
  1999. /* use fget() to get filp, but this will increase file ref_cnt to 1,
  2000. * then decrease file ref_cnt to 0 with fput().
  2001. */
  2002. filp = fget(fd);
  2003. if (!filp) {
  2004. pr_err("invalid fd %d to release\n", fd);
  2005. return -EINVAL;
  2006. }
  2007. trace_smcinvoke_release_from_kernel_client(current->files, filp,
  2008. file_count(filp));
  2009. /* free filp, notify TZ to release object */
  2010. smcinvoke_release_filp(filp);
  2011. fput(filp);
  2012. return 0;
  2013. }
  2014. static int smcinvoke_release(struct inode *nodp, struct file *filp)
  2015. {
  2016. trace_smcinvoke_release(current->files, filp, file_count(filp),
  2017. filp->private_data);
  2018. if (filp->private_data)
  2019. return smcinvoke_release_filp(filp);
  2020. else
  2021. return 0;
  2022. }
  2023. static int smcinvoke_probe(struct platform_device *pdev)
  2024. {
  2025. unsigned int baseminor = 0;
  2026. unsigned int count = 1;
  2027. int rc = 0;
  2028. rc = alloc_chrdev_region(&smcinvoke_device_no, baseminor, count,
  2029. SMCINVOKE_DEV);
  2030. if (rc < 0) {
  2031. pr_err("chrdev_region failed %d for %s\n", rc, SMCINVOKE_DEV);
  2032. return rc;
  2033. }
  2034. driver_class = class_create(THIS_MODULE, SMCINVOKE_DEV);
  2035. if (IS_ERR(driver_class)) {
  2036. rc = -ENOMEM;
  2037. pr_err("class_create failed %d\n", rc);
  2038. goto exit_unreg_chrdev_region;
  2039. }
  2040. class_dev = device_create(driver_class, NULL, smcinvoke_device_no,
  2041. NULL, SMCINVOKE_DEV);
  2042. if (!class_dev) {
  2043. pr_err("class_device_create failed %d\n", rc);
  2044. rc = -ENOMEM;
  2045. goto exit_destroy_class;
  2046. }
  2047. cdev_init(&smcinvoke_cdev, &g_smcinvoke_fops);
  2048. smcinvoke_cdev.owner = THIS_MODULE;
  2049. rc = cdev_add(&smcinvoke_cdev, MKDEV(MAJOR(smcinvoke_device_no), 0),
  2050. count);
  2051. if (rc < 0) {
  2052. pr_err("cdev_add failed %d for %s\n", rc, SMCINVOKE_DEV);
  2053. goto exit_destroy_device;
  2054. }
  2055. smcinvoke_pdev = pdev;
  2056. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  2057. if (rc) {
  2058. pr_err("dma_set_mask_and_coherent failed %d\n", rc);
  2059. goto exit_destroy_device;
  2060. }
  2061. legacy_smc_call = of_property_read_bool((&pdev->dev)->of_node,
  2062. "qcom,support-legacy_smc");
  2063. invoke_cmd = legacy_smc_call ? SMCINVOKE_INVOKE_CMD_LEGACY : SMCINVOKE_INVOKE_CMD;
  2064. return 0;
  2065. exit_destroy_device:
  2066. device_destroy(driver_class, smcinvoke_device_no);
  2067. exit_destroy_class:
  2068. class_destroy(driver_class);
  2069. exit_unreg_chrdev_region:
  2070. unregister_chrdev_region(smcinvoke_device_no, count);
  2071. return rc;
  2072. }
  2073. static int smcinvoke_remove(struct platform_device *pdev)
  2074. {
  2075. int count = 1;
  2076. cdev_del(&smcinvoke_cdev);
  2077. device_destroy(driver_class, smcinvoke_device_no);
  2078. class_destroy(driver_class);
  2079. unregister_chrdev_region(smcinvoke_device_no, count);
  2080. return 0;
  2081. }
  2082. static int __maybe_unused smcinvoke_suspend(struct platform_device *pdev,
  2083. pm_message_t state)
  2084. {
  2085. int ret = 0;
  2086. mutex_lock(&g_smcinvoke_lock);
  2087. if (cb_reqs_inflight) {
  2088. pr_err("Failed to suspend smcinvoke driver\n");
  2089. ret = -EIO;
  2090. }
  2091. mutex_unlock(&g_smcinvoke_lock);
  2092. return ret;
  2093. }
  2094. static int __maybe_unused smcinvoke_resume(struct platform_device *pdev)
  2095. {
  2096. return 0;
  2097. }
  2098. static const struct of_device_id smcinvoke_match[] = {
  2099. {
  2100. .compatible = "qcom,smcinvoke",
  2101. },
  2102. {},
  2103. };
  2104. static struct platform_driver smcinvoke_plat_driver = {
  2105. .probe = smcinvoke_probe,
  2106. .remove = smcinvoke_remove,
  2107. .suspend = smcinvoke_suspend,
  2108. .resume = smcinvoke_resume,
  2109. .driver = {
  2110. .name = "smcinvoke",
  2111. .of_match_table = smcinvoke_match,
  2112. },
  2113. };
  2114. static int smcinvoke_init(void)
  2115. {
  2116. return platform_driver_register(&smcinvoke_plat_driver);
  2117. }
  2118. static void smcinvoke_exit(void)
  2119. {
  2120. platform_driver_unregister(&smcinvoke_plat_driver);
  2121. }
  2122. module_init(smcinvoke_init);
  2123. module_exit(smcinvoke_exit);
  2124. MODULE_LICENSE("GPL v2");
  2125. MODULE_DESCRIPTION("SMC Invoke driver");