smcinvoke.c 90 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "smcinvoke: %s: " fmt, __func__
  7. #include <linux/module.h>
  8. #include <linux/mod_devicetable.h>
  9. #include <linux/device.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/slab.h>
  12. #include <linux/file.h>
  13. #include <linux/fs.h>
  14. #include <linux/anon_inodes.h>
  15. #include <linux/hashtable.h>
  16. #include <linux/cdev.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/dma-buf.h>
  19. #include <linux/delay.h>
  20. #include <linux/kref.h>
  21. #include <linux/signal.h>
  22. #include <linux/msm_ion.h>
  23. #include <linux/mem-buf.h>
  24. #include <linux/of_platform.h>
  25. #include <linux/firmware.h>
  26. #include <linux/qcom_scm.h>
  27. #include <linux/freezer.h>
  28. #include <asm/cacheflush.h>
  29. #include <soc/qcom/qseecomi.h>
  30. #include <linux/qtee_shmbridge.h>
  31. #include <linux/kthread.h>
  32. #include "smcinvoke.h"
  33. #include "smcinvoke_object.h"
  34. #include "IClientEnv.h"
  35. #if IS_ENABLED(CONFIG_QSEECOM_PROXY)
  36. #include <linux/qseecom_kernel.h>
  37. #include "misc/qseecom_priv.h"
  38. #else
  39. #include "misc/qseecom_kernel.h"
  40. #endif
  41. #define CREATE_TRACE_POINTS
  42. #include "trace_smcinvoke.h"
  43. #define SMCINVOKE_DEV "smcinvoke"
  44. #define SMCINVOKE_TZ_ROOT_OBJ 1
  45. #define SMCINVOKE_TZ_OBJ_NULL 0
  46. #define SMCINVOKE_TZ_MIN_BUF_SIZE 4096
  47. #define SMCINVOKE_ARGS_ALIGN_SIZE (sizeof(uint64_t))
  48. #define SMCINVOKE_NEXT_AVAILABLE_TXN 0
  49. #define SMCINVOKE_REQ_PLACED 1
  50. #define SMCINVOKE_REQ_PROCESSING 2
  51. #define SMCINVOKE_REQ_PROCESSED 3
  52. #define SMCINVOKE_INCREMENT 1
  53. #define SMCINVOKE_DECREMENT 0
  54. #define SMCINVOKE_OBJ_TYPE_TZ_OBJ 0
  55. #define SMCINVOKE_OBJ_TYPE_SERVER 1
  56. #define SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL 2
  57. #define SMCINVOKE_MEM_MAP_OBJ 0
  58. #define SMCINVOKE_MEM_RGN_OBJ 1
  59. #define SMCINVOKE_MEM_PERM_RW 6
  60. #define SMCINVOKE_SCM_EBUSY_WAIT_MS 30
  61. #define SMCINVOKE_SCM_EBUSY_MAX_RETRY 200
  62. /* TZ defined values - Start */
  63. #define SMCINVOKE_INVOKE_PARAM_ID 0x224
  64. #define SMCINVOKE_CB_RSP_PARAM_ID 0x22
  65. #define SMCINVOKE_INVOKE_CMD_LEGACY 0x32000600
  66. #define SMCINVOKE_INVOKE_CMD 0x32000602
  67. #define SMCINVOKE_CB_RSP_CMD 0x32000601
  68. #define SMCINVOKE_RESULT_INBOUND_REQ_NEEDED 3
  69. /* TZ defined values - End */
  70. /* Asynchronous protocol values */
  71. /* Driver async version is set to match the minimal TZ version that supports async memory object */
  72. #define SMCINVOKE_ASYNC_VERSION (0x00010002)
  73. #define SMCINVOKE_ASYNC_OP_MEMORY_OBJECT (0x00000003)
  74. /*
  75. * This is the state when server FD has been closed but
  76. * TZ still has refs of CBOBjs served by this server
  77. */
  78. #define SMCINVOKE_SERVER_STATE_DEFUNCT 1
  79. #define CBOBJ_MAX_RETRIES 50
  80. #define FOR_ARGS(ndxvar, counts, section) \
  81. for (ndxvar = OBJECT_COUNTS_INDEX_##section(counts); \
  82. ndxvar < (OBJECT_COUNTS_INDEX_##section(counts) \
  83. + OBJECT_COUNTS_NUM_##section(counts)); \
  84. ++ndxvar)
  85. #define TZCB_BUF_OFFSET(tzcb_req) (sizeof(tzcb_req->result) + \
  86. sizeof(struct smcinvoke_msg_hdr) + \
  87. sizeof(union smcinvoke_tz_args) * \
  88. OBJECT_COUNTS_TOTAL(tzcb_req->hdr.counts))
  89. /*
  90. * +ve uhandle : either remote obj or mem obj, decided by f_ops
  91. * -ve uhandle : either Obj NULL or CBObj
  92. * - -1: OBJ NULL
  93. * - < -1: CBObj
  94. */
  95. #define UHANDLE_IS_FD(h) ((h) >= 0)
  96. #define UHANDLE_IS_NULL(h) ((h) == SMCINVOKE_USERSPACE_OBJ_NULL)
  97. #define UHANDLE_IS_CB_OBJ(h) (h < SMCINVOKE_USERSPACE_OBJ_NULL)
  98. #define UHANDLE_NULL (SMCINVOKE_USERSPACE_OBJ_NULL)
  99. /*
  100. * MAKE => create handle for other domain i.e. TZ or userspace
  101. * GET => retrieve obj from incoming handle
  102. */
  103. #define UHANDLE_GET_CB_OBJ(h) (-2-(h))
  104. #define UHANDLE_MAKE_CB_OBJ(o) (-2-(o))
  105. #define UHANDLE_GET_FD(h) (h)
  106. /*
  107. * +ve tzhandle : remote object i.e. owned by TZ
  108. * -ve tzhandle : local object i.e. owned by linux
  109. * --------------------------------------------------
  110. *| 1 (1 bit) | Obj Id (15 bits) | srvr id (16 bits) |
  111. * ---------------------------------------------------
  112. * Server ids are defined below for various local objects
  113. * server id 0 : Kernel Obj
  114. * server id 1 : Memory region Obj
  115. * server id 2 : Memory map Obj
  116. * server id 3-15: Reserverd
  117. * server id 16 & up: Callback Objs
  118. */
  119. #define KRNL_SRVR_ID 0
  120. #define MEM_RGN_SRVR_ID 1
  121. #define MEM_MAP_SRVR_ID 2
  122. #define CBOBJ_SERVER_ID_START 0x10
  123. #define CBOBJ_SERVER_ID_END ((1<<16) - 1)
  124. /* local obj id is represented by 15 bits */
  125. #define MAX_LOCAL_OBJ_ID ((1<<15) - 1)
  126. /* CBOBJs will be served by server id 0x10 onwards */
  127. #define TZHANDLE_GET_SERVER(h) ((uint16_t)((h) & 0xFFFF))
  128. #define TZHANDLE_GET_OBJID(h) (((h) >> 16) & 0x7FFF)
  129. #define TZHANDLE_MAKE_LOCAL(s, o) (((0x8000 | (o)) << 16) | s)
  130. #define SET_BIT(s,b) (s | (1 << b))
  131. #define UNSET_BIT(s,b) (s & (~ (1 << b)))
  132. #define TZHANDLE_IS_NULL(h) ((h) == SMCINVOKE_TZ_OBJ_NULL)
  133. #define TZHANDLE_IS_LOCAL(h) ((h) & 0x80000000)
  134. #define TZHANDLE_IS_REMOTE(h) (!TZHANDLE_IS_NULL(h) && !TZHANDLE_IS_LOCAL(h))
  135. #define TZHANDLE_IS_KERNEL_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
  136. TZHANDLE_GET_SERVER(h) == KRNL_SRVR_ID)
  137. #define TZHANDLE_IS_MEM_RGN_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
  138. TZHANDLE_GET_SERVER(h) == MEM_RGN_SRVR_ID)
  139. #define TZHANDLE_IS_MEM_MAP_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
  140. TZHANDLE_GET_SERVER(h) == MEM_MAP_SRVR_ID)
  141. #define TZHANDLE_IS_MEM_OBJ(h) (TZHANDLE_IS_MEM_RGN_OBJ(h) || \
  142. TZHANDLE_IS_MEM_MAP_OBJ(h))
  143. #define TZHANDLE_IS_CB_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
  144. TZHANDLE_GET_SERVER(h) >= CBOBJ_SERVER_ID_START)
  145. #define FILE_IS_REMOTE_OBJ(f) ((f)->f_op && (f)->f_op == &g_smcinvoke_fops)
  146. static DEFINE_MUTEX(g_smcinvoke_lock);
  147. #define NO_LOCK 0
  148. #define TAKE_LOCK 1
  149. #define MUTEX_LOCK(x) { if (x) mutex_lock(&g_smcinvoke_lock); }
  150. #define MUTEX_UNLOCK(x) { if (x) mutex_unlock(&g_smcinvoke_lock); }
  151. #define POST_KT_SLEEP 0
  152. #define POST_KT_WAKEUP 1
  153. #define MAX_CHAR_NAME 50
  154. enum worker_thread_type {
  155. SHMB_WORKER_THREAD = 0,
  156. OBJECT_WORKER_THREAD,
  157. ADCI_WORKER_THREAD,
  158. MAX_THREAD_NUMBER
  159. };
  160. static DEFINE_HASHTABLE(g_cb_servers, 8);
  161. static LIST_HEAD(g_mem_objs);
  162. static uint16_t g_last_cb_server_id = CBOBJ_SERVER_ID_START;
  163. static uint16_t g_last_mem_rgn_id, g_last_mem_map_obj_id;
  164. static size_t g_max_cb_buf_size = SMCINVOKE_TZ_MIN_BUF_SIZE;
  165. static unsigned int cb_reqs_inflight;
  166. static bool legacy_smc_call;
  167. static int invoke_cmd;
  168. static long smcinvoke_ioctl(struct file *, unsigned int, unsigned long);
  169. static int smcinvoke_open(struct inode *, struct file *);
  170. static int smcinvoke_release(struct inode *, struct file *);
  171. static int release_cb_server(uint16_t);
  172. static const struct file_operations g_smcinvoke_fops = {
  173. .owner = THIS_MODULE,
  174. .unlocked_ioctl = smcinvoke_ioctl,
  175. .compat_ioctl = smcinvoke_ioctl,
  176. .open = smcinvoke_open,
  177. .release = smcinvoke_release,
  178. };
  179. static dev_t smcinvoke_device_no;
  180. static struct cdev smcinvoke_cdev;
  181. static struct class *driver_class;
  182. static struct device *class_dev;
  183. static struct platform_device *smcinvoke_pdev;
  184. /* We disable async memory object support by default,
  185. * until we receive the first message from TZ over the
  186. * async channel and can determine TZ async version.
  187. */
  188. static bool mem_obj_async_support = false;
  189. static uint32_t tz_async_version = 0x0;
  190. struct smcinvoke_buf_hdr {
  191. uint32_t offset;
  192. uint32_t size;
  193. };
  194. union smcinvoke_tz_args {
  195. struct smcinvoke_buf_hdr b;
  196. int32_t handle;
  197. };
  198. struct smcinvoke_msg_hdr {
  199. uint32_t tzhandle;
  200. uint32_t op;
  201. uint32_t counts;
  202. };
  203. /* Inbound reqs from TZ */
  204. struct smcinvoke_tzcb_req {
  205. int32_t result;
  206. struct smcinvoke_msg_hdr hdr;
  207. union smcinvoke_tz_args args[0];
  208. };
  209. struct smcinvoke_file_data {
  210. uint32_t context_type;
  211. union {
  212. uint32_t tzhandle;
  213. uint16_t server_id;
  214. };
  215. };
  216. struct smcinvoke_piggyback_msg {
  217. uint32_t version;
  218. uint32_t op;
  219. uint32_t counts;
  220. int32_t objs[0];
  221. };
  222. /* Mapped memory object data
  223. *
  224. * memObjRef Handle reference for the memory object
  225. * mapObjRef Handle reference for the map object
  226. * addr Mapped memory address
  227. * size Size of mapped memory
  228. * perm Access rights for the memory
  229. */
  230. struct smcinvoke_mem_obj_info {
  231. uint32_t memObjRef;
  232. uint32_t mapObjRef;
  233. uint64_t addr;
  234. uint64_t size;
  235. uint32_t perm;
  236. };
  237. /* Memory object info to be written into the async buffer
  238. *
  239. * version Async protocol version
  240. * op Async protocol operation
  241. * count Number of memory objects passed
  242. * mo Mapped memory object data
  243. */
  244. struct smcinvoke_mem_obj_msg {
  245. uint32_t version;
  246. uint32_t op;
  247. uint32_t count;
  248. struct smcinvoke_mem_obj_info mo[];
  249. };
  250. struct smcinvoke_mem_obj_pending_async {
  251. struct smcinvoke_mem_obj *mem_obj;
  252. struct list_head list;
  253. };
  254. /* Data structure to hold request coming from TZ */
  255. struct smcinvoke_cb_txn {
  256. uint32_t txn_id;
  257. int32_t state;
  258. struct smcinvoke_tzcb_req *cb_req;
  259. size_t cb_req_bytes;
  260. struct file **filp_to_release;
  261. struct hlist_node hash;
  262. struct kref ref_cnt;
  263. };
  264. struct smcinvoke_server_info {
  265. uint16_t server_id;
  266. uint16_t state;
  267. uint32_t txn_id;
  268. struct kref ref_cnt;
  269. wait_queue_head_t req_wait_q;
  270. wait_queue_head_t rsp_wait_q;
  271. size_t cb_buf_size;
  272. DECLARE_HASHTABLE(reqs_table, 4);
  273. DECLARE_HASHTABLE(responses_table, 4);
  274. struct hlist_node hash;
  275. struct list_head pending_cbobjs;
  276. uint8_t is_server_suspended;
  277. };
  278. struct smcinvoke_cbobj {
  279. uint16_t cbobj_id;
  280. struct kref ref_cnt;
  281. struct smcinvoke_server_info *server;
  282. struct list_head list;
  283. };
  284. /*
  285. * We require couple of objects, one for mem region & another
  286. * for mapped mem_obj once mem region has been mapped. It is
  287. * possible that TZ can release either independent of other.
  288. */
  289. struct smcinvoke_mem_obj {
  290. /* these ids are objid part of tzhandle */
  291. uint16_t mem_region_id;
  292. uint16_t mem_map_obj_id;
  293. struct dma_buf *dma_buf;
  294. struct dma_buf_attachment *buf_attach;
  295. struct sg_table *sgt;
  296. struct kref mem_regn_ref_cnt;
  297. struct kref mem_map_obj_ref_cnt;
  298. uint64_t p_addr;
  299. size_t p_addr_len;
  300. struct list_head list;
  301. bool is_smcinvoke_created_shmbridge;
  302. uint64_t shmbridge_handle;
  303. struct smcinvoke_server_info *server;
  304. int32_t mem_obj_user_fd;
  305. };
  306. static LIST_HEAD(g_bridge_postprocess);
  307. DEFINE_MUTEX(bridge_postprocess_lock);
  308. static LIST_HEAD(g_object_postprocess);
  309. DEFINE_MUTEX(object_postprocess_lock);
  310. struct bridge_deregister {
  311. uint64_t shmbridge_handle;
  312. struct dma_buf *dmabuf_to_free;
  313. };
  314. struct object_release {
  315. uint32_t tzhandle;
  316. uint32_t context_type;
  317. };
  318. struct smcinvoke_shmbridge_deregister_pending_list {
  319. struct list_head list;
  320. struct bridge_deregister data;
  321. };
  322. struct smcinvoke_object_release_pending_list {
  323. struct list_head list;
  324. struct object_release data;
  325. };
  326. struct smcinvoke_worker_thread {
  327. enum worker_thread_type type;
  328. atomic_t postprocess_kthread_state;
  329. wait_queue_head_t postprocess_kthread_wq;
  330. struct task_struct *postprocess_kthread_task;
  331. };
  332. static struct smcinvoke_worker_thread smcinvoke[MAX_THREAD_NUMBER];
  333. static const char thread_name[MAX_THREAD_NUMBER][MAX_CHAR_NAME] = {
  334. "smcinvoke_shmbridge_postprocess", "smcinvoke_object_postprocess", "smcinvoke_adci_thread"};
  335. static struct Object adci_clientEnv = Object_NULL;
  336. static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr,
  337. size_t in_buf_len,
  338. uint8_t *out_buf, phys_addr_t out_paddr,
  339. size_t out_buf_len,
  340. struct smcinvoke_cmd_req *req,
  341. union smcinvoke_arg *args_buf,
  342. bool *tz_acked, uint32_t context_type,
  343. struct qtee_shm *in_shm, struct qtee_shm *out_shm);
  344. static void process_piggyback_data(void *buf, size_t buf_size);
  345. static void destroy_cb_server(struct kref *kref)
  346. {
  347. struct smcinvoke_server_info *server = container_of(kref,
  348. struct smcinvoke_server_info, ref_cnt);
  349. if (server) {
  350. hash_del(&server->hash);
  351. kfree(server);
  352. }
  353. }
  354. /*
  355. * A separate find func is reqd mainly for couple of cases:
  356. * next_cb_server_id_locked which checks if server id had been utilized or not.
  357. * - It would be overhead if we do ref_cnt for this case
  358. * smcinvoke_release: which is called when server is closed from userspace.
  359. * - During server creation we init ref count, now put it back
  360. */
  361. static struct smcinvoke_server_info *find_cb_server_locked(uint16_t server_id)
  362. {
  363. struct smcinvoke_server_info *data = NULL;
  364. hash_for_each_possible(g_cb_servers, data, hash, server_id) {
  365. if (data->server_id == server_id)
  366. return data;
  367. }
  368. return NULL;
  369. }
  370. static struct smcinvoke_server_info *get_cb_server_locked(uint16_t server_id)
  371. {
  372. struct smcinvoke_server_info *server = find_cb_server_locked(server_id);
  373. if (server)
  374. kref_get(&server->ref_cnt);
  375. return server;
  376. }
  377. static uint16_t next_cb_server_id_locked(void)
  378. {
  379. if (g_last_cb_server_id == CBOBJ_SERVER_ID_END)
  380. g_last_cb_server_id = CBOBJ_SERVER_ID_START;
  381. while (find_cb_server_locked(++g_last_cb_server_id))
  382. ;
  383. return g_last_cb_server_id;
  384. }
  385. static inline void release_filp(struct file **filp_to_release, size_t arr_len)
  386. {
  387. size_t i = 0;
  388. for (i = 0; i < arr_len; i++) {
  389. if (filp_to_release[i]) {
  390. fput(filp_to_release[i]);
  391. filp_to_release[i] = NULL;
  392. }
  393. }
  394. }
  395. static struct smcinvoke_mem_obj *find_mem_obj_locked(uint16_t mem_obj_id,
  396. bool is_mem_rgn_obj)
  397. {
  398. struct smcinvoke_mem_obj *mem_obj = NULL;
  399. if (list_empty(&g_mem_objs))
  400. return NULL;
  401. list_for_each_entry(mem_obj, &g_mem_objs, list) {
  402. if ((is_mem_rgn_obj &&
  403. (mem_obj->mem_region_id == mem_obj_id)) ||
  404. (!is_mem_rgn_obj &&
  405. (mem_obj->mem_map_obj_id == mem_obj_id)))
  406. return mem_obj;
  407. }
  408. return NULL;
  409. }
  410. static uint32_t next_mem_region_obj_id_locked(void)
  411. {
  412. if (g_last_mem_rgn_id == MAX_LOCAL_OBJ_ID)
  413. g_last_mem_rgn_id = 0;
  414. while (find_mem_obj_locked(++g_last_mem_rgn_id, SMCINVOKE_MEM_RGN_OBJ))
  415. ;
  416. return g_last_mem_rgn_id;
  417. }
  418. static uint32_t next_mem_map_obj_id_locked(void)
  419. {
  420. if (g_last_mem_map_obj_id == MAX_LOCAL_OBJ_ID)
  421. g_last_mem_map_obj_id = 0;
  422. while (find_mem_obj_locked(++g_last_mem_map_obj_id,
  423. SMCINVOKE_MEM_MAP_OBJ))
  424. ;
  425. return g_last_mem_map_obj_id;
  426. }
  427. static void smcinvoke_shmbridge_post_process(void)
  428. {
  429. struct smcinvoke_shmbridge_deregister_pending_list *entry = NULL;
  430. struct list_head *pos;
  431. int ret = 0;
  432. uint64_t handle = 0;
  433. struct dma_buf *dmabuf_to_free = NULL;
  434. do {
  435. mutex_lock(&bridge_postprocess_lock);
  436. if (list_empty(&g_bridge_postprocess)) {
  437. mutex_unlock(&bridge_postprocess_lock);
  438. break;
  439. }
  440. pos = g_bridge_postprocess.next;
  441. entry = list_entry(pos,
  442. struct smcinvoke_shmbridge_deregister_pending_list,
  443. list);
  444. if (entry) {
  445. handle = entry->data.shmbridge_handle;
  446. dmabuf_to_free = entry->data.dmabuf_to_free;
  447. } else {
  448. pr_err("entry is NULL, pos:%#llx\n", (uint64_t)pos);
  449. }
  450. list_del(pos);
  451. kfree_sensitive(entry);
  452. mutex_unlock(&bridge_postprocess_lock);
  453. if (entry) {
  454. do {
  455. ret = qtee_shmbridge_deregister(handle);
  456. if (unlikely(ret)) {
  457. pr_err("SHM failed: ret:%d ptr:0x%x h:%#llx\n",
  458. ret,
  459. dmabuf_to_free,
  460. handle);
  461. } else {
  462. pr_debug("SHM deletion: Handle:%#llx\n",
  463. handle);
  464. dma_buf_put(dmabuf_to_free);
  465. }
  466. } while (-EBUSY == ret);
  467. }
  468. } while (1);
  469. }
  470. static int smcinvoke_object_post_process(void)
  471. {
  472. struct smcinvoke_object_release_pending_list *entry = NULL;
  473. struct list_head *pos;
  474. int ret = 0;
  475. bool release_handles;
  476. uint32_t context_type;
  477. uint8_t *in_buf = NULL;
  478. uint8_t *out_buf = NULL;
  479. struct smcinvoke_cmd_req req = {0};
  480. struct smcinvoke_msg_hdr hdr = {0};
  481. struct qtee_shm in_shm = {0}, out_shm = {0};
  482. ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &in_shm);
  483. if (ret) {
  484. ret = -ENOMEM;
  485. pr_err("shmbridge alloc failed for in msg in object release\n");
  486. goto out;
  487. }
  488. ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &out_shm);
  489. if (ret) {
  490. ret = -ENOMEM;
  491. pr_err("shmbridge alloc failed for out msg in object release\n");
  492. goto out;
  493. }
  494. do {
  495. mutex_lock(&object_postprocess_lock);
  496. if (list_empty(&g_object_postprocess)) {
  497. mutex_unlock(&object_postprocess_lock);
  498. break;
  499. }
  500. pos = g_object_postprocess.next;
  501. entry = list_entry(pos, struct smcinvoke_object_release_pending_list, list);
  502. if (entry) {
  503. in_buf = in_shm.vaddr;
  504. out_buf = out_shm.vaddr;
  505. hdr.tzhandle = entry->data.tzhandle;
  506. hdr.op = OBJECT_OP_RELEASE;
  507. hdr.counts = 0;
  508. *(struct smcinvoke_msg_hdr *)in_buf = hdr;
  509. context_type = entry->data.context_type;
  510. } else {
  511. pr_err("entry is NULL, pos:%#llx\n", (uint64_t)pos);
  512. }
  513. list_del(pos);
  514. kfree_sensitive(entry);
  515. mutex_unlock(&object_postprocess_lock);
  516. if (entry) {
  517. do {
  518. ret = prepare_send_scm_msg(in_buf, in_shm.paddr,
  519. SMCINVOKE_TZ_MIN_BUF_SIZE, out_buf, out_shm.paddr,
  520. SMCINVOKE_TZ_MIN_BUF_SIZE, &req, NULL,
  521. &release_handles, context_type, &in_shm, &out_shm);
  522. process_piggyback_data(out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE);
  523. if (ret) {
  524. pr_err("Failed to release object(0x%x), ret:%d\n",
  525. hdr.tzhandle, ret);
  526. } else {
  527. pr_debug("Released object(0x%x) successfully.\n",
  528. hdr.tzhandle);
  529. }
  530. } while (-EBUSY == ret);
  531. }
  532. } while (1);
  533. out:
  534. qtee_shmbridge_free_shm(&in_shm);
  535. qtee_shmbridge_free_shm(&out_shm);
  536. return ret;
  537. }
  538. static void smcinvoke_start_adci_thread(void)
  539. {
  540. int32_t ret = OBJECT_ERROR;
  541. int retry_count = 0;
  542. ret = get_client_env_object(&adci_clientEnv);
  543. if (ret) {
  544. pr_err("failed to get clientEnv for ADCI invoke thread. ret = %d\n", ret);
  545. /* Marking it Object_NULL in case of failure scenario in order to avoid
  546. * undefined behavior while releasing garbage adci_clientEnv object.
  547. */
  548. adci_clientEnv = Object_NULL;
  549. goto out;
  550. }
  551. /* Invoke call to QTEE which should never return if ADCI is supported */
  552. do {
  553. ret = IClientEnv_adciAccept(adci_clientEnv);
  554. if (ret == OBJECT_ERROR_BUSY) {
  555. pr_err("Secure side is busy,will retry after 5 ms, retry_count = %d",retry_count);
  556. msleep(SMCINVOKE_INTERFACE_BUSY_WAIT_MS);
  557. }
  558. } while ((ret == OBJECT_ERROR_BUSY) && (retry_count++ < SMCINVOKE_INTERFACE_MAX_RETRY));
  559. if (ret == OBJECT_ERROR_INVALID)
  560. pr_err("ADCI feature is not supported on this chipsets, ret = %d\n", ret);
  561. else
  562. pr_debug("Received response from QTEE, ret = %d\n", ret);
  563. out:
  564. /* Control should reach to this point only if ADCI feature is not supported by QTEE
  565. (or) ADCI thread held in QTEE is released. */
  566. Object_ASSIGN_NULL(adci_clientEnv);
  567. }
  568. static void __wakeup_postprocess_kthread(struct smcinvoke_worker_thread *smcinvoke)
  569. {
  570. if (smcinvoke) {
  571. atomic_set(&smcinvoke->postprocess_kthread_state,
  572. POST_KT_WAKEUP);
  573. wake_up_interruptible(&smcinvoke->postprocess_kthread_wq);
  574. } else {
  575. pr_err("Invalid smcinvoke pointer.\n");
  576. }
  577. }
  578. static int smcinvoke_postprocess_kthread_func(void *data)
  579. {
  580. struct smcinvoke_worker_thread *smcinvoke_wrk_trd = data;
  581. const char *tag;
  582. if (!smcinvoke_wrk_trd) {
  583. pr_err("Bad input.\n");
  584. return -EINVAL;
  585. }
  586. while (!kthread_should_stop()) {
  587. wait_event_interruptible(
  588. smcinvoke_wrk_trd->postprocess_kthread_wq,
  589. kthread_should_stop() ||
  590. (atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state)
  591. == POST_KT_WAKEUP));
  592. switch (smcinvoke_wrk_trd->type) {
  593. case SHMB_WORKER_THREAD:
  594. tag = "shmbridge";
  595. pr_debug("kthread to %s postprocess is called %d\n",
  596. tag, atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state));
  597. smcinvoke_shmbridge_post_process();
  598. break;
  599. case OBJECT_WORKER_THREAD:
  600. tag = "object";
  601. pr_debug("kthread to %s postprocess is called %d\n",
  602. tag, atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state));
  603. smcinvoke_object_post_process();
  604. break;
  605. case ADCI_WORKER_THREAD:
  606. tag = "adci";
  607. pr_debug("kthread to %s postprocess is called %d\n",
  608. tag, atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state));
  609. smcinvoke_start_adci_thread();
  610. break;
  611. default:
  612. pr_err("Invalid thread type(%d), do nothing.\n",
  613. (int)smcinvoke_wrk_trd->type);
  614. break;
  615. }
  616. /* For ADCI thread, if control reaches here, that indicates either ADCI
  617. * thread is not supported (or) released by QTEE. Since ADCI thread is
  618. * getting signaled only during the smcinvoke driver initialization,
  619. * there is no point of putting the thread into sleep state again. All the
  620. * required post-processing will be taken care by object and shmbridge threads.
  621. */
  622. if(smcinvoke_wrk_trd->type == ADCI_WORKER_THREAD) {
  623. break;
  624. }
  625. atomic_set(&smcinvoke_wrk_trd->postprocess_kthread_state,
  626. POST_KT_SLEEP);
  627. }
  628. pr_warn("kthread to %s postprocess stopped\n", tag);
  629. return 0;
  630. }
  631. static int smcinvoke_create_kthreads(void)
  632. {
  633. int i, rc = 0;
  634. const enum worker_thread_type thread_type[MAX_THREAD_NUMBER] = {
  635. SHMB_WORKER_THREAD, OBJECT_WORKER_THREAD, ADCI_WORKER_THREAD};
  636. for (i = 0; i < MAX_THREAD_NUMBER; i++) {
  637. init_waitqueue_head(&smcinvoke[i].postprocess_kthread_wq);
  638. smcinvoke[i].type = thread_type[i];
  639. smcinvoke[i].postprocess_kthread_task = kthread_run(
  640. smcinvoke_postprocess_kthread_func,
  641. &smcinvoke[i], thread_name[i]);
  642. if (IS_ERR(smcinvoke[i].postprocess_kthread_task)) {
  643. rc = PTR_ERR(smcinvoke[i].postprocess_kthread_task);
  644. pr_err("fail to create kthread to postprocess, rc = %x\n",
  645. rc);
  646. return rc;
  647. }
  648. atomic_set(&smcinvoke[i].postprocess_kthread_state,
  649. POST_KT_SLEEP);
  650. }
  651. return rc;
  652. }
  653. static void smcinvoke_destroy_kthreads(void)
  654. {
  655. int i;
  656. int32_t ret = OBJECT_ERROR;
  657. int retry_count = 0;
  658. if(!Object_isNull(adci_clientEnv)) {
  659. do {
  660. ret = IClientEnv_adciShutdown(adci_clientEnv);
  661. if (ret == OBJECT_ERROR_BUSY) {
  662. pr_err("Secure side is busy,will retry after 5 ms, retry_count = %d",retry_count);
  663. msleep(SMCINVOKE_INTERFACE_BUSY_WAIT_MS);
  664. }
  665. } while ((ret == OBJECT_ERROR_BUSY) && (retry_count++ < SMCINVOKE_INTERFACE_MAX_RETRY));
  666. if(OBJECT_isERROR(ret)) {
  667. pr_err("adciShutdown in QTEE failed with error = %d\n", ret);
  668. }
  669. Object_ASSIGN_NULL(adci_clientEnv);
  670. }
  671. for (i = 0; i < MAX_THREAD_NUMBER; i++) {
  672. kthread_stop(smcinvoke[i].postprocess_kthread_task);
  673. }
  674. }
  675. /* Queue newly created memory object to l_pending_mem_obj list.
  676. * Later, the mapping information for objects in this list will be sent to TZ
  677. * over the async side channel.
  678. *
  679. * No return value as TZ is always able to explicitly ask for this information
  680. * in case this function fails and the memory object is not added to this list.
  681. */
  682. static void queue_mem_obj_pending_async_locked(struct smcinvoke_mem_obj *mem_obj, struct list_head *l_pending_mem_obj)
  683. {
  684. struct smcinvoke_mem_obj_pending_async *t_mem_obj_pending =
  685. kzalloc(sizeof(*t_mem_obj_pending), GFP_KERNEL);
  686. /*
  687. * We are not failing execution in case of a failure here,
  688. * since TZ can always ask for this information explicitly
  689. * if it's not available in the side channel.
  690. */
  691. if (!t_mem_obj_pending) {
  692. pr_err("Unable to allocate memory\n");
  693. return;
  694. }
  695. t_mem_obj_pending->mem_obj = mem_obj;
  696. list_add(&t_mem_obj_pending->list, l_pending_mem_obj);
  697. }
  698. static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj)
  699. {
  700. int ret = 0;
  701. bool is_bridge_created = mem_obj->is_smcinvoke_created_shmbridge;
  702. struct dma_buf *dmabuf_to_free = mem_obj->dma_buf;
  703. uint64_t shmbridge_handle = mem_obj->shmbridge_handle;
  704. struct smcinvoke_shmbridge_deregister_pending_list *entry = NULL;
  705. list_del(&mem_obj->list);
  706. kfree(mem_obj->server);
  707. kfree(mem_obj);
  708. mem_obj = NULL;
  709. mutex_unlock(&g_smcinvoke_lock);
  710. if (is_bridge_created)
  711. ret = qtee_shmbridge_deregister(shmbridge_handle);
  712. if (ret) {
  713. pr_err("Error:%d delete bridge failed leaking memory 0x%x\n",
  714. ret, dmabuf_to_free);
  715. if (ret == -EBUSY) {
  716. pr_err("EBUSY: we postpone it 0x%x\n",
  717. dmabuf_to_free);
  718. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  719. if (entry) {
  720. entry->data.shmbridge_handle = shmbridge_handle;
  721. entry->data.dmabuf_to_free = dmabuf_to_free;
  722. mutex_lock(&bridge_postprocess_lock);
  723. list_add_tail(&entry->list, &g_bridge_postprocess);
  724. mutex_unlock(&bridge_postprocess_lock);
  725. pr_debug("SHMBridge list: added a Handle:%#llx\n",
  726. shmbridge_handle);
  727. __wakeup_postprocess_kthread(
  728. &smcinvoke[SHMB_WORKER_THREAD]);
  729. }
  730. }
  731. } else {
  732. dma_buf_put(dmabuf_to_free);
  733. }
  734. mutex_lock(&g_smcinvoke_lock);
  735. }
  736. static void del_mem_regn_obj_locked(struct kref *kref)
  737. {
  738. struct smcinvoke_mem_obj *mem_obj = container_of(kref,
  739. struct smcinvoke_mem_obj, mem_regn_ref_cnt);
  740. /*
  741. * mem_regn obj and mem_map obj are held into mem_obj structure which
  742. * can't be released until both kinds of objs have been released.
  743. * So check whether mem_map iobj has ref 0 and only then release mem_obj
  744. */
  745. if (kref_read(&mem_obj->mem_map_obj_ref_cnt) == 0)
  746. free_mem_obj_locked(mem_obj);
  747. }
  748. static void del_mem_map_obj_locked(struct kref *kref)
  749. {
  750. struct smcinvoke_mem_obj *mem_obj = container_of(kref,
  751. struct smcinvoke_mem_obj, mem_map_obj_ref_cnt);
  752. mem_obj->p_addr_len = 0;
  753. mem_obj->p_addr = 0;
  754. if (mem_obj->sgt)
  755. dma_buf_unmap_attachment(mem_obj->buf_attach,
  756. mem_obj->sgt, DMA_BIDIRECTIONAL);
  757. if (mem_obj->buf_attach)
  758. dma_buf_detach(mem_obj->dma_buf, mem_obj->buf_attach);
  759. /*
  760. * mem_regn obj and mem_map obj are held into mem_obj structure which
  761. * can't be released until both kinds of objs have been released.
  762. * So check if mem_regn obj has ref 0 and only then release mem_obj
  763. */
  764. if (kref_read(&mem_obj->mem_regn_ref_cnt) == 0)
  765. free_mem_obj_locked(mem_obj);
  766. }
  767. static int release_mem_obj_locked(int32_t tzhandle)
  768. {
  769. int is_mem_regn_obj = TZHANDLE_IS_MEM_RGN_OBJ(tzhandle);
  770. struct smcinvoke_mem_obj *mem_obj = find_mem_obj_locked(
  771. TZHANDLE_GET_OBJID(tzhandle), is_mem_regn_obj);
  772. if (!mem_obj) {
  773. pr_err("memory object not found\n");
  774. return OBJECT_ERROR_BADOBJ;
  775. }
  776. if (is_mem_regn_obj)
  777. kref_put(&mem_obj->mem_regn_ref_cnt, del_mem_regn_obj_locked);
  778. else
  779. kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
  780. return OBJECT_OK;
  781. }
  782. static void free_pending_cbobj_locked(struct kref *kref)
  783. {
  784. struct smcinvoke_server_info *server = NULL;
  785. struct smcinvoke_cbobj *obj = container_of(kref,
  786. struct smcinvoke_cbobj, ref_cnt);
  787. list_del(&obj->list);
  788. server = obj->server;
  789. kfree(obj);
  790. if (server)
  791. kref_put(&server->ref_cnt, destroy_cb_server);
  792. }
  793. static int get_pending_cbobj_locked(uint16_t srvr_id, int16_t obj_id)
  794. {
  795. int ret = 0;
  796. bool release_server = true;
  797. struct list_head *head = NULL;
  798. struct smcinvoke_cbobj *cbobj = NULL;
  799. struct smcinvoke_cbobj *obj = NULL;
  800. struct smcinvoke_server_info *server = get_cb_server_locked(srvr_id);
  801. if (!server) {
  802. pr_err("%s, server id : %u not found\n", __func__, srvr_id);
  803. return OBJECT_ERROR_BADOBJ;
  804. }
  805. head = &server->pending_cbobjs;
  806. list_for_each_entry(cbobj, head, list)
  807. if (cbobj->cbobj_id == obj_id) {
  808. kref_get(&cbobj->ref_cnt);
  809. goto out;
  810. }
  811. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  812. if (!obj) {
  813. ret = OBJECT_ERROR_KMEM;
  814. goto out;
  815. }
  816. obj->cbobj_id = obj_id;
  817. kref_init(&obj->ref_cnt);
  818. obj->server = server;
  819. /*
  820. * we are holding server ref in cbobj; we will
  821. * release server ref when cbobj is destroyed
  822. */
  823. release_server = false;
  824. list_add_tail(&obj->list, head);
  825. out:
  826. if (release_server)
  827. kref_put(&server->ref_cnt, destroy_cb_server);
  828. return ret;
  829. }
  830. static int put_pending_cbobj_locked(uint16_t srvr_id, int16_t obj_id)
  831. {
  832. int ret = -EINVAL;
  833. struct smcinvoke_server_info *srvr_info =
  834. get_cb_server_locked(srvr_id);
  835. struct list_head *head = NULL;
  836. struct smcinvoke_cbobj *cbobj = NULL;
  837. if (!srvr_info) {
  838. pr_err("%s, server id : %u not found\n", __func__, srvr_id);
  839. return ret;
  840. }
  841. trace_put_pending_cbobj_locked(srvr_id, obj_id);
  842. head = &srvr_info->pending_cbobjs;
  843. list_for_each_entry(cbobj, head, list)
  844. if (cbobj->cbobj_id == obj_id) {
  845. kref_put(&cbobj->ref_cnt, free_pending_cbobj_locked);
  846. ret = 0;
  847. break;
  848. }
  849. kref_put(&srvr_info->ref_cnt, destroy_cb_server);
  850. return ret;
  851. }
  852. static int release_tzhandle_locked(int32_t tzhandle)
  853. {
  854. if (TZHANDLE_IS_MEM_OBJ(tzhandle))
  855. return release_mem_obj_locked(tzhandle);
  856. else if (TZHANDLE_IS_CB_OBJ(tzhandle))
  857. return put_pending_cbobj_locked(TZHANDLE_GET_SERVER(tzhandle),
  858. TZHANDLE_GET_OBJID(tzhandle));
  859. return OBJECT_ERROR;
  860. }
  861. static void release_tzhandles(const int32_t *tzhandles, size_t len)
  862. {
  863. size_t i;
  864. mutex_lock(&g_smcinvoke_lock);
  865. for (i = 0; i < len; i++)
  866. release_tzhandle_locked(tzhandles[i]);
  867. mutex_unlock(&g_smcinvoke_lock);
  868. }
  869. static void delete_cb_txn_locked(struct kref *kref)
  870. {
  871. struct smcinvoke_cb_txn *cb_txn = container_of(kref,
  872. struct smcinvoke_cb_txn, ref_cnt);
  873. if (OBJECT_OP_METHODID(cb_txn->cb_req->hdr.op) == OBJECT_OP_RELEASE)
  874. release_tzhandle_locked(cb_txn->cb_req->hdr.tzhandle);
  875. kfree(cb_txn->cb_req);
  876. hash_del(&cb_txn->hash);
  877. kfree(cb_txn);
  878. }
  879. static struct smcinvoke_cb_txn *find_cbtxn_locked(
  880. struct smcinvoke_server_info *server,
  881. uint32_t txn_id, int32_t state)
  882. {
  883. int i = 0;
  884. struct smcinvoke_cb_txn *cb_txn = NULL;
  885. struct smcinvoke_mem_obj *mem_obj = NULL;
  886. int32_t tzhandle = 0;
  887. /*
  888. * Since HASH_BITS() does not work on pointers, we can't select hash
  889. * table using state and loop over it.
  890. */
  891. if (state == SMCINVOKE_REQ_PLACED) {
  892. /* pick up 1st req */
  893. hash_for_each(server->reqs_table, i, cb_txn, hash) {
  894. kref_get(&cb_txn->ref_cnt);
  895. tzhandle = (cb_txn->cb_req)->hdr.tzhandle;
  896. if(TZHANDLE_IS_MEM_OBJ(tzhandle)) {
  897. mem_obj= find_mem_obj_locked(TZHANDLE_GET_OBJID(tzhandle),
  898. SMCINVOKE_MEM_RGN_OBJ);
  899. kref_get(&mem_obj->mem_regn_ref_cnt);
  900. }
  901. hash_del(&cb_txn->hash);
  902. return cb_txn;
  903. }
  904. } else if (state == SMCINVOKE_REQ_PROCESSING) {
  905. hash_for_each_possible(
  906. server->responses_table, cb_txn, hash, txn_id) {
  907. if (cb_txn->txn_id == txn_id) {
  908. kref_get(&cb_txn->ref_cnt);
  909. tzhandle = (cb_txn->cb_req)->hdr.tzhandle;
  910. if(TZHANDLE_IS_MEM_OBJ(tzhandle)) {
  911. mem_obj= find_mem_obj_locked(TZHANDLE_GET_OBJID(tzhandle),
  912. SMCINVOKE_MEM_RGN_OBJ);
  913. kref_get(&mem_obj->mem_regn_ref_cnt);
  914. }
  915. hash_del(&cb_txn->hash);
  916. return cb_txn;
  917. }
  918. }
  919. }
  920. return NULL;
  921. }
  922. /*
  923. * size_add_ saturates at SIZE_MAX. If integer overflow is detected,
  924. * this function would return SIZE_MAX otherwise normal a+b is returned.
  925. */
  926. static inline size_t size_add_(size_t a, size_t b)
  927. {
  928. return (b > (SIZE_MAX - a)) ? SIZE_MAX : a + b;
  929. }
  930. /*
  931. * pad_size is used along with size_align to define a buffer overflow
  932. * protected version of ALIGN
  933. */
  934. static inline size_t pad_size(size_t a, size_t b)
  935. {
  936. return (~a + 1) % b;
  937. }
  938. /*
  939. * size_align saturates at SIZE_MAX. If integer overflow is detected, this
  940. * function would return SIZE_MAX otherwise next aligned size is returned.
  941. */
  942. static inline size_t size_align(size_t a, size_t b)
  943. {
  944. return size_add_(a, pad_size(a, b));
  945. }
  946. static uint16_t get_server_id(int cb_server_fd)
  947. {
  948. uint16_t server_id = 0;
  949. struct smcinvoke_file_data *svr_cxt = NULL;
  950. struct file *tmp_filp = fget(cb_server_fd);
  951. if (!tmp_filp || !FILE_IS_REMOTE_OBJ(tmp_filp))
  952. return server_id;
  953. svr_cxt = tmp_filp->private_data;
  954. if (svr_cxt && svr_cxt->context_type == SMCINVOKE_OBJ_TYPE_SERVER)
  955. server_id = svr_cxt->server_id;
  956. fput(tmp_filp);
  957. return server_id;
  958. }
  959. static bool is_dma_fd(int32_t uhandle, struct dma_buf **dma_buf)
  960. {
  961. *dma_buf = dma_buf_get(uhandle);
  962. return IS_ERR_OR_NULL(*dma_buf) ? false : true;
  963. }
  964. static bool is_remote_obj(int32_t uhandle, struct smcinvoke_file_data **tzobj,
  965. struct file **filp)
  966. {
  967. bool ret = false;
  968. struct file *tmp_filp = fget(uhandle);
  969. if (!tmp_filp)
  970. return ret;
  971. if (FILE_IS_REMOTE_OBJ(tmp_filp)) {
  972. *tzobj = tmp_filp->private_data;
  973. if ((*tzobj)->context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
  974. *filp = tmp_filp;
  975. tmp_filp = NULL;
  976. ret = true;
  977. }
  978. }
  979. if (tmp_filp)
  980. fput(tmp_filp);
  981. return ret;
  982. }
  983. static int smcinvoke_create_bridge(struct smcinvoke_mem_obj *mem_obj)
  984. {
  985. int ret = 0;
  986. int tz_perm = PERM_READ|PERM_WRITE;
  987. uint32_t *vmid_list;
  988. uint32_t *perms_list;
  989. uint32_t nelems = 0;
  990. struct dma_buf *dmabuf = mem_obj->dma_buf;
  991. phys_addr_t phys = mem_obj->p_addr;
  992. size_t size = mem_obj->p_addr_len;
  993. if (!qtee_shmbridge_is_enabled())
  994. return 0;
  995. ret = mem_buf_dma_buf_copy_vmperm(dmabuf, (int **)&vmid_list,
  996. (int **)&perms_list, (int *)&nelems);
  997. if (ret) {
  998. pr_err("mem_buf_dma_buf_copy_vmperm failure, err=%d\n", ret);
  999. return ret;
  1000. }
  1001. if (mem_buf_dma_buf_exclusive_owner(dmabuf))
  1002. perms_list[0] = PERM_READ | PERM_WRITE;
  1003. ret = qtee_shmbridge_register(phys, size, vmid_list, perms_list, nelems,
  1004. tz_perm, &mem_obj->shmbridge_handle);
  1005. if (ret == 0) {
  1006. /* In case of ret=0/success handle has to be freed in memobj release */
  1007. mem_obj->is_smcinvoke_created_shmbridge = true;
  1008. } else if (ret == -EEXIST) {
  1009. ret = 0;
  1010. goto exit;
  1011. } else {
  1012. pr_err("creation of shm bridge for mem_region_id %d failed ret %d\n",
  1013. mem_obj->mem_region_id, ret);
  1014. goto exit;
  1015. }
  1016. trace_smcinvoke_create_bridge(mem_obj->shmbridge_handle, mem_obj->mem_region_id);
  1017. exit:
  1018. kfree(perms_list);
  1019. kfree(vmid_list);
  1020. return ret;
  1021. }
  1022. /* Map memory region for a given memory object.
  1023. * Mapping information will be saved as part of the memory object structure.
  1024. */
  1025. static int32_t smcinvoke_map_mem_region_locked(struct smcinvoke_mem_obj* mem_obj)
  1026. {
  1027. int ret = OBJECT_OK;
  1028. struct dma_buf_attachment *buf_attach = NULL;
  1029. struct sg_table *sgt = NULL;
  1030. if (!mem_obj) {
  1031. pr_err("Invalid memory object\n");
  1032. return OBJECT_ERROR_BADOBJ;
  1033. }
  1034. if (!mem_obj->p_addr) {
  1035. kref_init(&mem_obj->mem_map_obj_ref_cnt);
  1036. buf_attach = dma_buf_attach(mem_obj->dma_buf,
  1037. &smcinvoke_pdev->dev);
  1038. if (IS_ERR(buf_attach)) {
  1039. ret = OBJECT_ERROR_KMEM;
  1040. pr_err("dma buf attach failed, ret: %d\n", ret);
  1041. goto out;
  1042. }
  1043. mem_obj->buf_attach = buf_attach;
  1044. sgt = dma_buf_map_attachment(buf_attach, DMA_BIDIRECTIONAL);
  1045. if (IS_ERR(sgt)) {
  1046. pr_err("mapping dma buffers failed, ret: %d\n",
  1047. PTR_ERR(sgt));
  1048. ret = OBJECT_ERROR_KMEM;
  1049. goto out;
  1050. }
  1051. mem_obj->sgt = sgt;
  1052. /* contiguous only => nents=1 */
  1053. if (sgt->nents != 1) {
  1054. ret = OBJECT_ERROR_INVALID;
  1055. pr_err("sg enries are not contigous, ret: %d\n", ret);
  1056. goto out;
  1057. }
  1058. mem_obj->p_addr = sg_dma_address(sgt->sgl);
  1059. mem_obj->p_addr_len = sgt->sgl->length;
  1060. if (!mem_obj->p_addr) {
  1061. ret = OBJECT_ERROR_INVALID;
  1062. pr_err("invalid physical address, ret: %d\n", ret);
  1063. goto out;
  1064. }
  1065. /* Increase reference count as we are feeding the memobj to
  1066. * smcinvoke and unlock the mutex. No need to hold the mutex in
  1067. * case of shmbridge creation.
  1068. */
  1069. kref_get(&mem_obj->mem_map_obj_ref_cnt);
  1070. mutex_unlock(&g_smcinvoke_lock);
  1071. ret = smcinvoke_create_bridge(mem_obj);
  1072. /* Take lock again and decrease the reference count which we
  1073. * increased for shmbridge but before proceeding further we
  1074. * have to check again if the memobj is still valid or not
  1075. * after decreasing the reference.
  1076. */
  1077. mutex_lock(&g_smcinvoke_lock);
  1078. kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
  1079. if (ret) {
  1080. ret = OBJECT_ERROR_INVALID;
  1081. pr_err("Unable to create shm bridge, ret: %d\n", ret);
  1082. goto out;
  1083. }
  1084. if (!find_mem_obj_locked(mem_obj->mem_region_id,
  1085. SMCINVOKE_MEM_RGN_OBJ)) {
  1086. mutex_unlock(&g_smcinvoke_lock);
  1087. pr_err("Memory object not found\n");
  1088. return OBJECT_ERROR_BADOBJ;
  1089. }
  1090. mem_obj->mem_map_obj_id = next_mem_map_obj_id_locked();
  1091. }
  1092. out:
  1093. if (ret != OBJECT_OK)
  1094. kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
  1095. return ret;
  1096. }
  1097. static int create_mem_obj(struct dma_buf *dma_buf, int32_t *tzhandle,
  1098. struct smcinvoke_mem_obj **mem_obj, int32_t server_id, int32_t user_handle)
  1099. {
  1100. struct smcinvoke_mem_obj *t_mem_obj = NULL;
  1101. struct smcinvoke_server_info *server_i = NULL;
  1102. t_mem_obj = kzalloc(sizeof(struct smcinvoke_mem_obj), GFP_KERNEL);
  1103. if (!t_mem_obj) {
  1104. dma_buf_put(dma_buf);
  1105. return -ENOMEM;
  1106. }
  1107. server_i = kzalloc(sizeof(struct smcinvoke_server_info),GFP_KERNEL);
  1108. if (!server_i) {
  1109. kfree(t_mem_obj);
  1110. dma_buf_put(dma_buf);
  1111. return -ENOMEM;
  1112. }
  1113. kref_init(&t_mem_obj->mem_regn_ref_cnt);
  1114. t_mem_obj->dma_buf = dma_buf;
  1115. mutex_lock(&g_smcinvoke_lock);
  1116. t_mem_obj->mem_region_id = next_mem_region_obj_id_locked();
  1117. server_i->server_id = server_id;
  1118. t_mem_obj->server = server_i;
  1119. t_mem_obj->mem_obj_user_fd = user_handle;
  1120. list_add_tail(&t_mem_obj->list, &g_mem_objs);
  1121. mutex_unlock(&g_smcinvoke_lock);
  1122. *mem_obj = t_mem_obj;
  1123. *tzhandle = TZHANDLE_MAKE_LOCAL(MEM_RGN_SRVR_ID,
  1124. t_mem_obj->mem_region_id);
  1125. return 0;
  1126. }
  1127. /*
  1128. * This function retrieves file pointer corresponding to FD provided. It stores
  1129. * retrieved file pointer until IOCTL call is concluded. Once call is completed,
  1130. * all stored file pointers are released. file pointers are stored to prevent
  1131. * other threads from releasing that FD while IOCTL is in progress.
  1132. */
  1133. static int get_tzhandle_from_uhandle(int32_t uhandle, int32_t server_fd,
  1134. struct file **filp, uint32_t *tzhandle, struct list_head *l_pending_mem_obj)
  1135. {
  1136. int ret = -EBADF;
  1137. uint16_t server_id = 0;
  1138. struct smcinvoke_mem_obj *mem_obj = NULL;
  1139. if (UHANDLE_IS_NULL(uhandle)) {
  1140. *tzhandle = SMCINVOKE_TZ_OBJ_NULL;
  1141. ret = 0;
  1142. } else if (UHANDLE_IS_CB_OBJ(uhandle)) {
  1143. server_id = get_server_id(server_fd);
  1144. if (server_id < CBOBJ_SERVER_ID_START)
  1145. goto out;
  1146. mutex_lock(&g_smcinvoke_lock);
  1147. ret = get_pending_cbobj_locked(server_id,
  1148. UHANDLE_GET_CB_OBJ(uhandle));
  1149. mutex_unlock(&g_smcinvoke_lock);
  1150. if (ret)
  1151. goto out;
  1152. *tzhandle = TZHANDLE_MAKE_LOCAL(server_id,
  1153. UHANDLE_GET_CB_OBJ(uhandle));
  1154. ret = 0;
  1155. } else if (UHANDLE_IS_FD(uhandle)) {
  1156. struct dma_buf *dma_buf = NULL;
  1157. struct smcinvoke_file_data *tzobj = NULL;
  1158. if (is_dma_fd(UHANDLE_GET_FD(uhandle), &dma_buf)) {
  1159. server_id = get_server_id(server_fd);
  1160. ret = create_mem_obj(dma_buf, tzhandle, &mem_obj, server_id, uhandle);
  1161. if (!ret && mem_obj_async_support && l_pending_mem_obj) {
  1162. mutex_lock(&g_smcinvoke_lock);
  1163. /* Map the newly created memory object and add it
  1164. * to l_pending_mem_obj list.
  1165. * Before returning to TZ, add the mapping data
  1166. * to the async side channel so it's available to TZ
  1167. * together with the memory object.
  1168. */
  1169. if (!smcinvoke_map_mem_region_locked(mem_obj)) {
  1170. queue_mem_obj_pending_async_locked(mem_obj, l_pending_mem_obj);
  1171. } else {
  1172. pr_err("Failed to map memory region\n");
  1173. }
  1174. mutex_unlock(&g_smcinvoke_lock);
  1175. }
  1176. } else if (is_remote_obj(UHANDLE_GET_FD(uhandle),
  1177. &tzobj, filp)) {
  1178. *tzhandle = tzobj->tzhandle;
  1179. ret = 0;
  1180. }
  1181. }
  1182. out:
  1183. return ret;
  1184. }
  1185. static int get_fd_for_obj(uint32_t obj_type, uint32_t obj, int32_t *fd)
  1186. {
  1187. int unused_fd = -1, ret = -EINVAL;
  1188. struct file *f = NULL;
  1189. struct smcinvoke_file_data *cxt = NULL;
  1190. cxt = kzalloc(sizeof(*cxt), GFP_KERNEL);
  1191. if (!cxt) {
  1192. ret = -ENOMEM;
  1193. goto out;
  1194. }
  1195. if (obj_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ ||
  1196. obj_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
  1197. cxt->context_type = obj_type;
  1198. cxt->tzhandle = obj;
  1199. } else if (obj_type == SMCINVOKE_OBJ_TYPE_SERVER) {
  1200. cxt->context_type = SMCINVOKE_OBJ_TYPE_SERVER;
  1201. cxt->server_id = obj;
  1202. } else {
  1203. goto out;
  1204. }
  1205. unused_fd = get_unused_fd_flags(O_RDWR);
  1206. if (unused_fd < 0)
  1207. goto out;
  1208. if (fd == NULL)
  1209. goto out;
  1210. f = anon_inode_getfile(SMCINVOKE_DEV, &g_smcinvoke_fops, cxt, O_RDWR);
  1211. if (IS_ERR(f))
  1212. goto out;
  1213. *fd = unused_fd;
  1214. fd_install(*fd, f);
  1215. return 0;
  1216. out:
  1217. if (unused_fd >= 0)
  1218. put_unused_fd(unused_fd);
  1219. kfree(cxt);
  1220. return ret;
  1221. }
  1222. static int get_uhandle_from_tzhandle(int32_t tzhandle, int32_t srvr_id,
  1223. int32_t *uhandle, bool lock, uint32_t context_type)
  1224. {
  1225. int ret = -1;
  1226. if (TZHANDLE_IS_NULL(tzhandle)) {
  1227. *uhandle = UHANDLE_NULL;
  1228. ret = 0;
  1229. } else if (TZHANDLE_IS_CB_OBJ(tzhandle)) {
  1230. if (srvr_id != TZHANDLE_GET_SERVER(tzhandle))
  1231. goto out;
  1232. *uhandle = UHANDLE_MAKE_CB_OBJ(TZHANDLE_GET_OBJID(tzhandle));
  1233. MUTEX_LOCK(lock)
  1234. ret = get_pending_cbobj_locked(TZHANDLE_GET_SERVER(tzhandle),
  1235. TZHANDLE_GET_OBJID(tzhandle));
  1236. MUTEX_UNLOCK(lock)
  1237. } else if (TZHANDLE_IS_MEM_RGN_OBJ(tzhandle)) {
  1238. struct smcinvoke_mem_obj *mem_obj = NULL;
  1239. MUTEX_LOCK(lock)
  1240. mem_obj = find_mem_obj_locked(TZHANDLE_GET_OBJID(tzhandle),
  1241. SMCINVOKE_MEM_RGN_OBJ);
  1242. if (mem_obj != NULL) {
  1243. int fd;
  1244. fd = mem_obj->mem_obj_user_fd;
  1245. if (fd < 0)
  1246. goto exit_lock;
  1247. *uhandle = fd;
  1248. ret = 0;
  1249. }
  1250. exit_lock:
  1251. MUTEX_UNLOCK(lock)
  1252. } else if (TZHANDLE_IS_REMOTE(tzhandle)) {
  1253. /* if execution comes here => tzhandle is an unsigned int */
  1254. ret = get_fd_for_obj(context_type,
  1255. (uint32_t)tzhandle, uhandle);
  1256. }
  1257. out:
  1258. return ret;
  1259. }
  1260. static int32_t smcinvoke_release_mem_obj_locked(void *buf, size_t buf_len)
  1261. {
  1262. struct smcinvoke_tzcb_req *msg = buf;
  1263. if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 0, 0, 0)) {
  1264. pr_err("Invalid object count in %s\n", __func__);
  1265. return OBJECT_ERROR_INVALID;
  1266. }
  1267. trace_release_mem_obj_locked(msg->hdr.tzhandle, buf_len);
  1268. return release_tzhandle_locked(msg->hdr.tzhandle);
  1269. }
  1270. static int32_t smcinvoke_process_map_mem_region_req(void *buf, size_t buf_len)
  1271. {
  1272. int ret = OBJECT_OK;
  1273. struct smcinvoke_tzcb_req *msg = buf;
  1274. struct {
  1275. uint64_t p_addr;
  1276. uint64_t len;
  1277. uint32_t perms;
  1278. } *ob = NULL;
  1279. int32_t *oo = NULL;
  1280. struct smcinvoke_mem_obj *mem_obj = NULL;
  1281. if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 1, 1, 1) ||
  1282. (buf_len - msg->args[0].b.offset < msg->args[0].b.size)) {
  1283. pr_err("Invalid counts received for mapping mem obj\n");
  1284. return OBJECT_ERROR_INVALID;
  1285. }
  1286. /* args[0] = BO, args[1] = OI, args[2] = OO */
  1287. ob = buf + msg->args[0].b.offset;
  1288. oo = &msg->args[2].handle;
  1289. mutex_lock(&g_smcinvoke_lock);
  1290. mem_obj = find_mem_obj_locked(TZHANDLE_GET_OBJID(msg->args[1].handle),
  1291. SMCINVOKE_MEM_RGN_OBJ);
  1292. if (!mem_obj) {
  1293. mutex_unlock(&g_smcinvoke_lock);
  1294. pr_err("Memory object not found\n");
  1295. return OBJECT_ERROR_BADOBJ;
  1296. }
  1297. if (!mem_obj->p_addr) {
  1298. ret = smcinvoke_map_mem_region_locked(mem_obj);
  1299. } else {
  1300. kref_get(&mem_obj->mem_map_obj_ref_cnt);
  1301. }
  1302. if (!ret) {
  1303. ob->p_addr = mem_obj->p_addr;
  1304. ob->len = mem_obj->p_addr_len;
  1305. ob->perms = SMCINVOKE_MEM_PERM_RW;
  1306. *oo = TZHANDLE_MAKE_LOCAL(MEM_MAP_SRVR_ID, mem_obj->mem_map_obj_id);
  1307. }
  1308. mutex_unlock(&g_smcinvoke_lock);
  1309. return ret;
  1310. }
  1311. static int32_t smcinvoke_sleep(void *buf, size_t buf_len)
  1312. {
  1313. struct smcinvoke_tzcb_req *msg = buf;
  1314. uint32_t sleepTimeMs_val = 0;
  1315. if (msg->hdr.counts != OBJECT_COUNTS_PACK(1, 0, 0, 0) ||
  1316. (buf_len - msg->args[0].b.offset < msg->args[0].b.size)) {
  1317. pr_err("Invalid counts received for sleeping in hlos\n");
  1318. return OBJECT_ERROR_INVALID;
  1319. }
  1320. /* Time in miliseconds is expected from tz */
  1321. sleepTimeMs_val = *((uint32_t *)(buf + msg->args[0].b.offset));
  1322. msleep(sleepTimeMs_val);
  1323. return OBJECT_OK;
  1324. }
  1325. static void process_kernel_obj(void *buf, size_t buf_len)
  1326. {
  1327. struct smcinvoke_tzcb_req *cb_req = buf;
  1328. switch (cb_req->hdr.op) {
  1329. case OBJECT_OP_MAP_REGION:
  1330. pr_debug("Received a request to map memory region\n");
  1331. cb_req->result = smcinvoke_process_map_mem_region_req(buf, buf_len);
  1332. break;
  1333. case OBJECT_OP_YIELD:
  1334. cb_req->result = OBJECT_OK;
  1335. break;
  1336. case OBJECT_OP_SLEEP:
  1337. cb_req->result = smcinvoke_sleep(buf, buf_len);
  1338. break;
  1339. default:
  1340. pr_err(" invalid operation for tz kernel object\n");
  1341. cb_req->result = OBJECT_ERROR_INVALID;
  1342. break;
  1343. }
  1344. }
  1345. static void process_mem_obj(void *buf, size_t buf_len)
  1346. {
  1347. struct smcinvoke_tzcb_req *cb_req = buf;
  1348. mutex_lock(&g_smcinvoke_lock);
  1349. cb_req->result = (cb_req->hdr.op == OBJECT_OP_RELEASE) ?
  1350. smcinvoke_release_mem_obj_locked(buf, buf_len) :
  1351. OBJECT_ERROR_INVALID;
  1352. mutex_unlock(&g_smcinvoke_lock);
  1353. }
  1354. static int invoke_cmd_handler(int cmd, phys_addr_t in_paddr, size_t in_buf_len,
  1355. uint8_t *out_buf, phys_addr_t out_paddr,
  1356. size_t out_buf_len, int32_t *result, u64 *response_type,
  1357. unsigned int *data, struct qtee_shm *in_shm,
  1358. struct qtee_shm *out_shm)
  1359. {
  1360. int ret = 0;
  1361. switch (cmd) {
  1362. case SMCINVOKE_INVOKE_CMD_LEGACY:
  1363. qtee_shmbridge_flush_shm_buf(in_shm);
  1364. qtee_shmbridge_flush_shm_buf(out_shm);
  1365. ret = qcom_scm_invoke_smc_legacy(in_paddr, in_buf_len, out_paddr, out_buf_len,
  1366. result, response_type, data);
  1367. qtee_shmbridge_inv_shm_buf(in_shm);
  1368. qtee_shmbridge_inv_shm_buf(out_shm);
  1369. break;
  1370. case SMCINVOKE_INVOKE_CMD:
  1371. ret = qcom_scm_invoke_smc(in_paddr, in_buf_len, out_paddr, out_buf_len,
  1372. result, response_type, data);
  1373. break;
  1374. case SMCINVOKE_CB_RSP_CMD:
  1375. if (legacy_smc_call)
  1376. qtee_shmbridge_flush_shm_buf(out_shm);
  1377. ret = qcom_scm_invoke_callback_response(virt_to_phys(out_buf), out_buf_len,
  1378. result, response_type, data);
  1379. if (legacy_smc_call) {
  1380. qtee_shmbridge_inv_shm_buf(in_shm);
  1381. qtee_shmbridge_inv_shm_buf(out_shm);
  1382. }
  1383. break;
  1384. default:
  1385. ret = -EINVAL;
  1386. break;
  1387. }
  1388. trace_invoke_cmd_handler(cmd, *response_type, *result, ret);
  1389. return ret;
  1390. }
  1391. /*
  1392. * Buf should be aligned to struct smcinvoke_tzcb_req
  1393. */
  1394. static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
  1395. {
  1396. /* ret is going to TZ. Provide values from OBJECT_ERROR_<> */
  1397. int ret = OBJECT_ERROR_DEFUNCT;
  1398. int cbobj_retries = 0;
  1399. long timeout_jiff;
  1400. bool wait_interrupted = false;
  1401. struct smcinvoke_cb_txn *cb_txn = NULL;
  1402. struct smcinvoke_tzcb_req *cb_req = NULL, *tmp_cb_req = NULL;
  1403. struct smcinvoke_server_info *srvr_info = NULL;
  1404. struct smcinvoke_mem_obj *mem_obj = NULL;
  1405. uint16_t server_id = 0;
  1406. if (buf_len < sizeof(struct smcinvoke_tzcb_req)) {
  1407. pr_err("smaller buffer length : %u\n", buf_len);
  1408. return;
  1409. }
  1410. cb_req = buf;
  1411. /* check whether it is to be served by kernel or userspace */
  1412. if (TZHANDLE_IS_KERNEL_OBJ(cb_req->hdr.tzhandle)) {
  1413. return process_kernel_obj(buf, buf_len);
  1414. } else if (TZHANDLE_IS_MEM_MAP_OBJ(cb_req->hdr.tzhandle)) {
  1415. /*
  1416. * MEM_MAP memory object is created and owned by kernel,
  1417. * hence its processing(handling deletion) is done in
  1418. * kernel context.
  1419. */
  1420. return process_mem_obj(buf, buf_len);
  1421. } else if(TZHANDLE_IS_MEM_RGN_OBJ(cb_req->hdr.tzhandle)) {
  1422. /*
  1423. * MEM_RGN memory objects are created and owned by userspace,
  1424. * and hence their deletion/handling requires going back to the
  1425. * userspace, similar to that of callback objects. If we enter
  1426. * this 'if' condition, its no-op here, and proceed similar to
  1427. * case of callback objects.
  1428. */
  1429. } else if (!TZHANDLE_IS_CB_OBJ(cb_req->hdr.tzhandle)) {
  1430. pr_err("Request object is not a callback object\n");
  1431. cb_req->result = OBJECT_ERROR_INVALID;
  1432. return;
  1433. }
  1434. /*
  1435. * We need a copy of req that could be sent to server. Otherwise, if
  1436. * someone kills invoke caller, buf would go away and server would be
  1437. * working on already freed buffer, causing a device crash.
  1438. */
  1439. tmp_cb_req = kmemdup(buf, buf_len, GFP_KERNEL);
  1440. if (!tmp_cb_req) {
  1441. /* we need to return error to caller so fill up result */
  1442. cb_req->result = OBJECT_ERROR_KMEM;
  1443. pr_err("failed to create copy of request, set result: %d\n",
  1444. cb_req->result);
  1445. return;
  1446. }
  1447. cb_txn = kzalloc(sizeof(*cb_txn), GFP_KERNEL);
  1448. if (!cb_txn) {
  1449. cb_req->result = OBJECT_ERROR_KMEM;
  1450. pr_err("failed to allocate memory for request, result: %d\n",
  1451. cb_req->result);
  1452. kfree(tmp_cb_req);
  1453. return;
  1454. }
  1455. /* no need for memcpy as we did kmemdup() above */
  1456. cb_req = tmp_cb_req;
  1457. trace_process_tzcb_req_handle(cb_req->hdr.tzhandle, cb_req->hdr.op, cb_req->hdr.counts);
  1458. cb_txn->state = SMCINVOKE_REQ_PLACED;
  1459. cb_txn->cb_req = cb_req;
  1460. cb_txn->cb_req_bytes = buf_len;
  1461. cb_txn->filp_to_release = arr_filp;
  1462. kref_init(&cb_txn->ref_cnt);
  1463. mutex_lock(&g_smcinvoke_lock);
  1464. ++cb_reqs_inflight;
  1465. if(TZHANDLE_IS_MEM_RGN_OBJ(cb_req->hdr.tzhandle)) {
  1466. mem_obj= find_mem_obj_locked(TZHANDLE_GET_OBJID(cb_req->hdr.tzhandle),SMCINVOKE_MEM_RGN_OBJ);
  1467. if(!mem_obj) {
  1468. pr_err("mem obj with tzhandle : %d not found",cb_req->hdr.tzhandle);
  1469. goto out;
  1470. }
  1471. server_id = mem_obj->server->server_id;
  1472. } else {
  1473. server_id = TZHANDLE_GET_SERVER(cb_req->hdr.tzhandle);
  1474. }
  1475. srvr_info = get_cb_server_locked(server_id);
  1476. if (!srvr_info || srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) {
  1477. /* ret equals Object_ERROR_DEFUNCT, at this point go to out */
  1478. if (!srvr_info)
  1479. pr_err("server is invalid\n");
  1480. else {
  1481. pr_err("server is defunct, state= %d tzhandle = %d\n",
  1482. srvr_info->state, cb_req->hdr.tzhandle);
  1483. }
  1484. mutex_unlock(&g_smcinvoke_lock);
  1485. goto out;
  1486. }
  1487. cb_txn->txn_id = ++srvr_info->txn_id;
  1488. hash_add(srvr_info->reqs_table, &cb_txn->hash, cb_txn->txn_id);
  1489. mutex_unlock(&g_smcinvoke_lock);
  1490. trace_process_tzcb_req_wait(cb_req->hdr.tzhandle, cbobj_retries, cb_txn->txn_id,
  1491. current->pid, current->tgid, srvr_info->state, srvr_info->server_id,
  1492. cb_reqs_inflight);
  1493. /*
  1494. * we need not worry that server_info will be deleted because as long
  1495. * as this CBObj is served by this server, srvr_info will be valid.
  1496. */
  1497. wake_up_interruptible_all(&srvr_info->req_wait_q);
  1498. /* timeout before 1s otherwise tzbusy would come */
  1499. timeout_jiff = msecs_to_jiffies(100);
  1500. while (cbobj_retries < CBOBJ_MAX_RETRIES) {
  1501. if (wait_interrupted) {
  1502. ret = wait_event_timeout(srvr_info->rsp_wait_q,
  1503. (cb_txn->state == SMCINVOKE_REQ_PROCESSED) ||
  1504. (srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT),
  1505. timeout_jiff);
  1506. } else {
  1507. ret = wait_event_interruptible_timeout(srvr_info->rsp_wait_q,
  1508. (cb_txn->state == SMCINVOKE_REQ_PROCESSED) ||
  1509. (srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT),
  1510. timeout_jiff);
  1511. }
  1512. if (ret == 0) {
  1513. if (srvr_info->is_server_suspended == 0) {
  1514. pr_err("CBobj timed out waiting on cbtxn :%d,cb-tzhandle:%d, retry:%d, op:%d counts :%d\n",
  1515. cb_txn->txn_id,cb_req->hdr.tzhandle, cbobj_retries,
  1516. cb_req->hdr.op, cb_req->hdr.counts);
  1517. pr_err("CBobj %d timedout pid %x,tid %x, srvr state=%d, srvr id:%u\n",
  1518. cb_req->hdr.tzhandle, current->pid,
  1519. current->tgid, srvr_info->state,
  1520. srvr_info->server_id);
  1521. }
  1522. } else {
  1523. /* wait_event returned due to a signal */
  1524. if (srvr_info->state != SMCINVOKE_SERVER_STATE_DEFUNCT &&
  1525. cb_txn->state != SMCINVOKE_REQ_PROCESSED) {
  1526. wait_interrupted = true;
  1527. } else {
  1528. break;
  1529. }
  1530. }
  1531. /*
  1532. * If bit corresponding to any accept thread is set, invoke threads
  1533. * should wait infinitely for the accept thread to come back with
  1534. * response.
  1535. */
  1536. if (srvr_info->is_server_suspended > 0) {
  1537. cbobj_retries = 0;
  1538. } else {
  1539. cbobj_retries++;
  1540. }
  1541. }
  1542. out:
  1543. /*
  1544. * we could be here because of either:
  1545. * a. Req is PROCESSED
  1546. * b. Server was killed
  1547. * c. Invoke thread is killed
  1548. * sometime invoke thread and server are part of same process.
  1549. */
  1550. mutex_lock(&g_smcinvoke_lock);
  1551. hash_del(&cb_txn->hash);
  1552. if (ret == 0) {
  1553. pr_err("CBObj timed out! No more retries\n");
  1554. cb_req->result = Object_ERROR_TIMEOUT;
  1555. } else if (ret == -ERESTARTSYS) {
  1556. pr_err("wait event interruped, ret: %d\n", ret);
  1557. cb_req->result = OBJECT_ERROR_ABORT;
  1558. } else {
  1559. if (cb_txn->state == SMCINVOKE_REQ_PROCESSED) {
  1560. /*
  1561. * it is possible that server was killed immediately
  1562. * after CB Req was processed but who cares now!
  1563. */
  1564. } else if (!srvr_info ||
  1565. srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) {
  1566. cb_req->result = OBJECT_ERROR_DEFUNCT;
  1567. pr_err("server invalid, res: %d\n", cb_req->result);
  1568. } else {
  1569. pr_err("%s: unexpected event happened, ret:%d\n", __func__, ret);
  1570. cb_req->result = OBJECT_ERROR_ABORT;
  1571. }
  1572. }
  1573. --cb_reqs_inflight;
  1574. trace_process_tzcb_req_result(cb_req->result, cb_req->hdr.tzhandle, cb_req->hdr.op,
  1575. cb_req->hdr.counts, cb_reqs_inflight);
  1576. memcpy(buf, cb_req, buf_len);
  1577. if (TZHANDLE_IS_MEM_RGN_OBJ(cb_req->hdr.tzhandle)) {
  1578. mutex_unlock(&g_smcinvoke_lock);
  1579. process_mem_obj(buf, buf_len);
  1580. pr_err("ppid : %x, mem obj deleted\n", current->pid);
  1581. mutex_lock(&g_smcinvoke_lock);
  1582. }
  1583. kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked);
  1584. if (srvr_info)
  1585. kref_put(&srvr_info->ref_cnt, destroy_cb_server);
  1586. mutex_unlock(&g_smcinvoke_lock);
  1587. }
  1588. static int marshal_out_invoke_req(const uint8_t *buf, uint32_t buf_size,
  1589. struct smcinvoke_cmd_req *req,
  1590. union smcinvoke_arg *args_buf,
  1591. uint32_t context_type)
  1592. {
  1593. int ret = -EINVAL, i = 0;
  1594. int32_t temp_fd = UHANDLE_NULL;
  1595. union smcinvoke_tz_args *tz_args = NULL;
  1596. size_t offset = sizeof(struct smcinvoke_msg_hdr) +
  1597. OBJECT_COUNTS_TOTAL(req->counts) *
  1598. sizeof(union smcinvoke_tz_args);
  1599. if (offset > buf_size)
  1600. goto out;
  1601. tz_args = (union smcinvoke_tz_args *)
  1602. (buf + sizeof(struct smcinvoke_msg_hdr));
  1603. tz_args += OBJECT_COUNTS_NUM_BI(req->counts);
  1604. if (args_buf == NULL)
  1605. return 0;
  1606. FOR_ARGS(i, req->counts, BO) {
  1607. args_buf[i].b.size = tz_args->b.size;
  1608. if ((buf_size - tz_args->b.offset < tz_args->b.size) ||
  1609. tz_args->b.offset > buf_size) {
  1610. pr_err("%s: buffer overflow detected\n", __func__);
  1611. goto out;
  1612. }
  1613. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
  1614. if (copy_to_user((void __user *)
  1615. (uintptr_t)(args_buf[i].b.addr),
  1616. (uint8_t *)(buf) + tz_args->b.offset,
  1617. tz_args->b.size)) {
  1618. pr_err("Error %d copying ctxt to user\n", ret);
  1619. goto out;
  1620. }
  1621. } else {
  1622. memcpy((uint8_t *)(args_buf[i].b.addr),
  1623. (uint8_t *)(buf) + tz_args->b.offset,
  1624. tz_args->b.size);
  1625. }
  1626. tz_args++;
  1627. }
  1628. tz_args += OBJECT_COUNTS_NUM_OI(req->counts);
  1629. FOR_ARGS(i, req->counts, OO) {
  1630. /*
  1631. * create a new FD and assign to output object's context.
  1632. * We are passing cb_server_fd from output param in case OO
  1633. * is a CBObj. For CBObj, we have to ensure that it is sent
  1634. * to server who serves it and that info comes from USpace.
  1635. */
  1636. temp_fd = UHANDLE_NULL;
  1637. ret = get_uhandle_from_tzhandle(tz_args->handle,
  1638. TZHANDLE_GET_SERVER(tz_args->handle),
  1639. &temp_fd, NO_LOCK, context_type);
  1640. args_buf[i].o.fd = temp_fd;
  1641. if (ret)
  1642. goto out;
  1643. trace_marshal_out_invoke_req(i, tz_args->handle,
  1644. TZHANDLE_GET_SERVER(tz_args->handle), temp_fd);
  1645. tz_args++;
  1646. }
  1647. ret = 0;
  1648. out:
  1649. return ret;
  1650. }
  1651. static bool is_inbound_req(int val)
  1652. {
  1653. return (val == SMCINVOKE_RESULT_INBOUND_REQ_NEEDED ||
  1654. val == QSEOS_RESULT_INCOMPLETE ||
  1655. val == QSEOS_RESULT_BLOCKED_ON_LISTENER);
  1656. }
  1657. static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr,
  1658. size_t in_buf_len,
  1659. uint8_t *out_buf, phys_addr_t out_paddr,
  1660. size_t out_buf_len,
  1661. struct smcinvoke_cmd_req *req,
  1662. union smcinvoke_arg *args_buf,
  1663. bool *tz_acked, uint32_t context_type,
  1664. struct qtee_shm *in_shm, struct qtee_shm *out_shm)
  1665. {
  1666. int ret = 0, cmd, retry_count = 0;
  1667. u64 response_type;
  1668. unsigned int data;
  1669. struct file *arr_filp[OBJECT_COUNTS_MAX_OO] = {NULL};
  1670. *tz_acked = false;
  1671. /* buf size should be page aligned */
  1672. if ((in_buf_len % PAGE_SIZE) != 0 || (out_buf_len % PAGE_SIZE) != 0)
  1673. return -EINVAL;
  1674. cmd = invoke_cmd;
  1675. while (1) {
  1676. do {
  1677. ret = invoke_cmd_handler(cmd, in_paddr, in_buf_len, out_buf,
  1678. out_paddr, out_buf_len, &req->result,
  1679. &response_type, &data, in_shm, out_shm);
  1680. if (ret == -EBUSY) {
  1681. pr_err("Secure side is busy,will retry after 30 ms, retry_count = %d",retry_count);
  1682. msleep(SMCINVOKE_SCM_EBUSY_WAIT_MS);
  1683. }
  1684. } while ((ret == -EBUSY) &&
  1685. (retry_count++ < SMCINVOKE_SCM_EBUSY_MAX_RETRY));
  1686. if (!ret && !is_inbound_req(response_type)) {
  1687. /* dont marshal if Obj returns an error */
  1688. if (!req->result) {
  1689. if (args_buf != NULL)
  1690. ret = marshal_out_invoke_req(in_buf,
  1691. in_buf_len, req, args_buf,
  1692. context_type);
  1693. }
  1694. *tz_acked = true;
  1695. }
  1696. if (cmd == SMCINVOKE_CB_RSP_CMD)
  1697. release_filp(arr_filp, OBJECT_COUNTS_MAX_OO);
  1698. if (ret || !is_inbound_req(response_type))
  1699. break;
  1700. /* process listener request */
  1701. if (response_type == QSEOS_RESULT_INCOMPLETE ||
  1702. response_type == QSEOS_RESULT_BLOCKED_ON_LISTENER) {
  1703. ret = qseecom_process_listener_from_smcinvoke(
  1704. &req->result, &response_type, &data);
  1705. trace_prepare_send_scm_msg(response_type, req->result);
  1706. if (!req->result &&
  1707. response_type != SMCINVOKE_RESULT_INBOUND_REQ_NEEDED) {
  1708. ret = marshal_out_invoke_req(in_buf,
  1709. in_buf_len, req, args_buf,
  1710. context_type);
  1711. }
  1712. *tz_acked = true;
  1713. }
  1714. /*
  1715. * qseecom does not understand smcinvoke's callback object &&
  1716. * erringly sets ret value as -EINVAL :( We need to handle it.
  1717. */
  1718. if (response_type != SMCINVOKE_RESULT_INBOUND_REQ_NEEDED)
  1719. break;
  1720. if (response_type == SMCINVOKE_RESULT_INBOUND_REQ_NEEDED) {
  1721. trace_status(__func__, "looks like inbnd req reqd");
  1722. process_tzcb_req(out_buf, out_buf_len, arr_filp);
  1723. cmd = SMCINVOKE_CB_RSP_CMD;
  1724. }
  1725. }
  1726. return ret;
  1727. }
  1728. /*
  1729. * SMC expects arguments in following format
  1730. * ---------------------------------------------------------------------------
  1731. * | cxt | op | counts | ptr|size |ptr|size...|ORef|ORef|...| rest of payload |
  1732. * ---------------------------------------------------------------------------
  1733. * cxt: target, op: operation, counts: total arguments
  1734. * offset: offset is from beginning of buffer i.e. cxt
  1735. * size: size is 8 bytes aligned value
  1736. */
  1737. static size_t compute_in_msg_size(const struct smcinvoke_cmd_req *req,
  1738. const union smcinvoke_arg *args_buf)
  1739. {
  1740. uint32_t i = 0;
  1741. size_t total_size = sizeof(struct smcinvoke_msg_hdr) +
  1742. OBJECT_COUNTS_TOTAL(req->counts) *
  1743. sizeof(union smcinvoke_tz_args);
  1744. /* Computed total_size should be 8 bytes aligned from start of buf */
  1745. total_size = ALIGN(total_size, SMCINVOKE_ARGS_ALIGN_SIZE);
  1746. /* each buffer has to be 8 bytes aligned */
  1747. while (i < OBJECT_COUNTS_NUM_buffers(req->counts))
  1748. total_size = size_add_(total_size,
  1749. size_align(args_buf[i++].b.size,
  1750. SMCINVOKE_ARGS_ALIGN_SIZE));
  1751. return PAGE_ALIGN(total_size);
  1752. }
  1753. static int marshal_in_invoke_req(const struct smcinvoke_cmd_req *req,
  1754. const union smcinvoke_arg *args_buf, uint32_t tzhandle,
  1755. uint8_t *buf, size_t buf_size, struct file **arr_filp,
  1756. int32_t *tzhandles_to_release, uint32_t context_type,
  1757. struct list_head *l_pending_mem_obj)
  1758. {
  1759. int ret = -EINVAL, i = 0, j = 0, k = 0;
  1760. const struct smcinvoke_msg_hdr msg_hdr = {
  1761. tzhandle, req->op, req->counts};
  1762. uint32_t offset = sizeof(struct smcinvoke_msg_hdr) +
  1763. sizeof(union smcinvoke_tz_args) *
  1764. OBJECT_COUNTS_TOTAL(req->counts);
  1765. union smcinvoke_tz_args *tz_args = NULL;
  1766. if (buf_size < offset)
  1767. goto out;
  1768. *(struct smcinvoke_msg_hdr *)buf = msg_hdr;
  1769. tz_args = (union smcinvoke_tz_args *)(buf +
  1770. sizeof(struct smcinvoke_msg_hdr));
  1771. if (args_buf == NULL)
  1772. return 0;
  1773. FOR_ARGS(i, req->counts, BI) {
  1774. offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
  1775. if ((offset > buf_size) ||
  1776. (args_buf[i].b.size > (buf_size - offset)))
  1777. goto out;
  1778. tz_args[i].b.offset = offset;
  1779. tz_args[i].b.size = args_buf[i].b.size;
  1780. if (context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
  1781. if (copy_from_user(buf + offset,
  1782. (void __user *)(uintptr_t)(args_buf[i].b.addr),
  1783. args_buf[i].b.size))
  1784. goto out;
  1785. } else {
  1786. memcpy(buf + offset, (void *)(args_buf[i].b.addr),
  1787. args_buf[i].b.size);
  1788. }
  1789. offset += args_buf[i].b.size;
  1790. }
  1791. FOR_ARGS(i, req->counts, BO) {
  1792. offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
  1793. if ((offset > buf_size) ||
  1794. (args_buf[i].b.size > (buf_size - offset)))
  1795. goto out;
  1796. tz_args[i].b.offset = offset;
  1797. tz_args[i].b.size = args_buf[i].b.size;
  1798. offset += args_buf[i].b.size;
  1799. }
  1800. FOR_ARGS(i, req->counts, OI) {
  1801. ret = get_tzhandle_from_uhandle(args_buf[i].o.fd,
  1802. args_buf[i].o.cb_server_fd, &arr_filp[j++],
  1803. &(tz_args[i].handle), l_pending_mem_obj);
  1804. if (ret)
  1805. goto out;
  1806. trace_marshal_in_invoke_req(i, args_buf[i].o.fd,
  1807. args_buf[i].o.cb_server_fd, tz_args[i].handle);
  1808. tzhandles_to_release[k++] = tz_args[i].handle;
  1809. }
  1810. ret = 0;
  1811. out:
  1812. return ret;
  1813. }
  1814. static int marshal_in_tzcb_req(const struct smcinvoke_cb_txn *cb_txn,
  1815. struct smcinvoke_accept *user_req, int srvr_id)
  1816. {
  1817. int ret = 0, i = 0;
  1818. int32_t temp_fd = UHANDLE_NULL;
  1819. union smcinvoke_arg tmp_arg;
  1820. struct smcinvoke_tzcb_req *tzcb_req = cb_txn->cb_req;
  1821. union smcinvoke_tz_args *tz_args = tzcb_req->args;
  1822. size_t tzcb_req_len = cb_txn->cb_req_bytes;
  1823. size_t tz_buf_offset = TZCB_BUF_OFFSET(tzcb_req);
  1824. size_t user_req_buf_offset = sizeof(union smcinvoke_arg) *
  1825. OBJECT_COUNTS_TOTAL(tzcb_req->hdr.counts);
  1826. if (tz_buf_offset > tzcb_req_len) {
  1827. ret = -EINVAL;
  1828. goto out;
  1829. }
  1830. user_req->txn_id = cb_txn->txn_id;
  1831. if (get_uhandle_from_tzhandle(tzcb_req->hdr.tzhandle, srvr_id,
  1832. &user_req->cbobj_id, TAKE_LOCK,
  1833. SMCINVOKE_OBJ_TYPE_TZ_OBJ)) {
  1834. ret = -EINVAL;
  1835. goto out;
  1836. }
  1837. user_req->op = tzcb_req->hdr.op;
  1838. user_req->counts = tzcb_req->hdr.counts;
  1839. user_req->argsize = sizeof(union smcinvoke_arg);
  1840. trace_marshal_in_tzcb_req_handle(tzcb_req->hdr.tzhandle, srvr_id,
  1841. user_req->cbobj_id, user_req->op, user_req->counts);
  1842. FOR_ARGS(i, tzcb_req->hdr.counts, BI) {
  1843. user_req_buf_offset = size_align(user_req_buf_offset,
  1844. SMCINVOKE_ARGS_ALIGN_SIZE);
  1845. tmp_arg.b.size = tz_args[i].b.size;
  1846. if ((tz_args[i].b.offset > tzcb_req_len) ||
  1847. (tz_args[i].b.size > tzcb_req_len - tz_args[i].b.offset) ||
  1848. (user_req_buf_offset > user_req->buf_len) ||
  1849. (tmp_arg.b.size >
  1850. user_req->buf_len - user_req_buf_offset)) {
  1851. ret = -EINVAL;
  1852. pr_err("%s: buffer overflow detected\n", __func__);
  1853. goto out;
  1854. }
  1855. tmp_arg.b.addr = user_req->buf_addr + user_req_buf_offset;
  1856. if (copy_to_user(u64_to_user_ptr
  1857. (user_req->buf_addr + i * sizeof(tmp_arg)),
  1858. &tmp_arg, sizeof(tmp_arg)) ||
  1859. copy_to_user(u64_to_user_ptr(tmp_arg.b.addr),
  1860. (uint8_t *)(tzcb_req) + tz_args[i].b.offset,
  1861. tz_args[i].b.size)) {
  1862. ret = -EFAULT;
  1863. goto out;
  1864. }
  1865. user_req_buf_offset += tmp_arg.b.size;
  1866. }
  1867. FOR_ARGS(i, tzcb_req->hdr.counts, BO) {
  1868. user_req_buf_offset = size_align(user_req_buf_offset,
  1869. SMCINVOKE_ARGS_ALIGN_SIZE);
  1870. tmp_arg.b.size = tz_args[i].b.size;
  1871. if ((user_req_buf_offset > user_req->buf_len) ||
  1872. (tmp_arg.b.size >
  1873. user_req->buf_len - user_req_buf_offset)) {
  1874. ret = -EINVAL;
  1875. pr_err("%s: buffer overflow detected\n", __func__);
  1876. goto out;
  1877. }
  1878. tmp_arg.b.addr = user_req->buf_addr + user_req_buf_offset;
  1879. if (copy_to_user(u64_to_user_ptr
  1880. (user_req->buf_addr + i * sizeof(tmp_arg)),
  1881. &tmp_arg, sizeof(tmp_arg))) {
  1882. ret = -EFAULT;
  1883. goto out;
  1884. }
  1885. user_req_buf_offset += tmp_arg.b.size;
  1886. }
  1887. FOR_ARGS(i, tzcb_req->hdr.counts, OI) {
  1888. /*
  1889. * create a new FD and assign to output object's
  1890. * context
  1891. */
  1892. temp_fd = UHANDLE_NULL;
  1893. ret = get_uhandle_from_tzhandle(tz_args[i].handle, srvr_id,
  1894. &temp_fd, TAKE_LOCK, SMCINVOKE_OBJ_TYPE_TZ_OBJ);
  1895. tmp_arg.o.fd = temp_fd;
  1896. if (ret) {
  1897. ret = -EINVAL;
  1898. goto out;
  1899. }
  1900. if (copy_to_user(u64_to_user_ptr
  1901. (user_req->buf_addr + i * sizeof(tmp_arg)),
  1902. &tmp_arg, sizeof(tmp_arg))) {
  1903. ret = -EFAULT;
  1904. goto out;
  1905. }
  1906. trace_marshal_in_tzcb_req_fd(i, tz_args[i].handle, srvr_id, temp_fd);
  1907. }
  1908. out:
  1909. return ret;
  1910. }
  1911. static int marshal_out_tzcb_req(const struct smcinvoke_accept *user_req,
  1912. struct smcinvoke_cb_txn *cb_txn,
  1913. struct file **arr_filp)
  1914. {
  1915. int ret = -EINVAL, i = 0;
  1916. int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0};
  1917. struct smcinvoke_tzcb_req *tzcb_req = cb_txn->cb_req;
  1918. union smcinvoke_tz_args *tz_args = tzcb_req->args;
  1919. release_tzhandles(&cb_txn->cb_req->hdr.tzhandle, 1);
  1920. tzcb_req->result = user_req->result;
  1921. /* Return without marshaling user args if destination callback invocation was
  1922. unsuccessful. */
  1923. if (tzcb_req->result != 0) {
  1924. ret = 0;
  1925. goto out;
  1926. }
  1927. FOR_ARGS(i, tzcb_req->hdr.counts, BO) {
  1928. union smcinvoke_arg tmp_arg;
  1929. if (copy_from_user((uint8_t *)&tmp_arg, u64_to_user_ptr(
  1930. user_req->buf_addr + i * sizeof(union smcinvoke_arg)),
  1931. sizeof(union smcinvoke_arg))) {
  1932. ret = -EFAULT;
  1933. goto out;
  1934. }
  1935. if (tmp_arg.b.size > tz_args[i].b.size)
  1936. goto out;
  1937. if (copy_from_user((uint8_t *)(tzcb_req) + tz_args[i].b.offset,
  1938. u64_to_user_ptr(tmp_arg.b.addr),
  1939. tmp_arg.b.size)) {
  1940. ret = -EFAULT;
  1941. goto out;
  1942. }
  1943. }
  1944. FOR_ARGS(i, tzcb_req->hdr.counts, OO) {
  1945. union smcinvoke_arg tmp_arg;
  1946. if (copy_from_user((uint8_t *)&tmp_arg, u64_to_user_ptr(
  1947. user_req->buf_addr + i * sizeof(union smcinvoke_arg)),
  1948. sizeof(union smcinvoke_arg))) {
  1949. ret = -EFAULT;
  1950. goto out;
  1951. }
  1952. ret = get_tzhandle_from_uhandle(tmp_arg.o.fd,
  1953. tmp_arg.o.cb_server_fd, &arr_filp[i],
  1954. &(tz_args[i].handle), NULL);
  1955. if (ret)
  1956. goto out;
  1957. tzhandles_to_release[i] = tz_args[i].handle;
  1958. trace_marshal_out_tzcb_req(i, tmp_arg.o.fd,
  1959. tmp_arg.o.cb_server_fd, tz_args[i].handle);
  1960. }
  1961. ret = 0;
  1962. out:
  1963. FOR_ARGS(i, tzcb_req->hdr.counts, OI) {
  1964. if (TZHANDLE_IS_CB_OBJ(tz_args[i].handle))
  1965. release_tzhandles(&tz_args[i].handle, 1);
  1966. }
  1967. if (ret)
  1968. release_tzhandles(tzhandles_to_release, OBJECT_COUNTS_MAX_OO);
  1969. return ret;
  1970. }
  1971. static void set_tz_version (uint32_t tz_version)
  1972. {
  1973. tz_async_version = tz_version;
  1974. /* We enable async memory object support when TZ async
  1975. * version is equal or larger than the driver version.
  1976. * It is expected that if the protocol changes in later
  1977. * TZ versions, TZ will support backward compatibility
  1978. * so this condition should still be valid.
  1979. */
  1980. if (tz_version >= SMCINVOKE_ASYNC_VERSION) {
  1981. mem_obj_async_support = true;
  1982. pr_debug("Enabled asynchronous memory object support\n");
  1983. }
  1984. }
  1985. static void process_piggyback_data(void *buf, size_t buf_size)
  1986. {
  1987. int i;
  1988. struct smcinvoke_tzcb_req req = {0};
  1989. struct smcinvoke_piggyback_msg *msg = buf;
  1990. int32_t *objs = msg->objs;
  1991. for (i = 0; i < msg->counts; i++) {
  1992. req.hdr.op = msg->op;
  1993. req.hdr.counts = 0; /* release op does not require any args */
  1994. req.hdr.tzhandle = objs[i];
  1995. if (tz_async_version == 0)
  1996. set_tz_version(msg->version);
  1997. process_tzcb_req(&req, sizeof(struct smcinvoke_tzcb_req), NULL);
  1998. /* cbobjs_in_flight will be adjusted during CB processing */
  1999. }
  2000. }
  2001. /* Add memory object mapped data to the async side channel, so it's available to TZ
  2002. * together with the memory object.
  2003. *
  2004. * No return value as TZ is always able to explicitly ask for this information
  2005. * in case this function fails.
  2006. */
  2007. static void add_mem_obj_info_to_async_side_channel_locked(void *buf, size_t buf_size, struct list_head *l_pending_mem_obj)
  2008. {
  2009. struct smcinvoke_mem_obj_msg *msg = buf;
  2010. struct smcinvoke_mem_obj_pending_async *mem_obj_pending = NULL;
  2011. size_t header_size = 0;
  2012. size_t mo_size = 0;
  2013. size_t used = 0;
  2014. size_t index = 0;
  2015. if (list_empty(l_pending_mem_obj))
  2016. return;
  2017. header_size = sizeof(struct smcinvoke_mem_obj_msg);
  2018. mo_size = sizeof(struct smcinvoke_mem_obj_info);
  2019. /* Minimal size required is the header data + one mem obj info */
  2020. if (buf_size < header_size + mo_size) {
  2021. pr_err("Unable to add memory object info to async channel\n");
  2022. return;
  2023. }
  2024. msg->version = SMCINVOKE_ASYNC_VERSION;
  2025. msg->op = SMCINVOKE_ASYNC_OP_MEMORY_OBJECT;
  2026. msg->count = 0;
  2027. used = header_size;
  2028. index = 0;
  2029. list_for_each_entry(mem_obj_pending, l_pending_mem_obj, list) {
  2030. if (NULL == mem_obj_pending->mem_obj) {
  2031. pr_err("Memory object is no longer valid\n");
  2032. continue;
  2033. }
  2034. if (used + mo_size > buf_size) {
  2035. pr_err("Not all memory object info was added to the async channel\n");
  2036. break;
  2037. }
  2038. msg->mo[index].memObjRef = TZHANDLE_MAKE_LOCAL(MEM_RGN_SRVR_ID, mem_obj_pending->mem_obj->mem_region_id);
  2039. msg->mo[index].mapObjRef = TZHANDLE_MAKE_LOCAL(MEM_MAP_SRVR_ID, mem_obj_pending->mem_obj->mem_map_obj_id);
  2040. msg->mo[index].addr = mem_obj_pending->mem_obj->p_addr;
  2041. msg->mo[index].size = mem_obj_pending->mem_obj->p_addr_len;
  2042. msg->mo[index].perm = SMCINVOKE_MEM_PERM_RW;
  2043. used += sizeof(msg->mo[index]);
  2044. index++;
  2045. }
  2046. msg->count = index;
  2047. pr_debug("Added %d memory objects to the side channel, total size = %d\n", index, used);
  2048. return;
  2049. }
  2050. /*
  2051. * Delete entire pending async list.
  2052. */
  2053. static void delete_pending_async_list_locked(struct list_head *l_pending_mem_obj)
  2054. {
  2055. struct smcinvoke_mem_obj_pending_async *mem_obj_pending = NULL;
  2056. struct smcinvoke_mem_obj_pending_async *temp = NULL;
  2057. if (list_empty(l_pending_mem_obj))
  2058. return;
  2059. list_for_each_entry_safe(mem_obj_pending, temp, l_pending_mem_obj, list) {
  2060. mem_obj_pending->mem_obj = NULL;
  2061. list_del(&mem_obj_pending->list);
  2062. kfree(mem_obj_pending);
  2063. }
  2064. }
  2065. static long process_ack_local_obj(struct file *filp, unsigned int cmd,
  2066. unsigned long arg)
  2067. {
  2068. int ret = -1;
  2069. int32_t local_obj = SMCINVOKE_USERSPACE_OBJ_NULL;
  2070. struct smcinvoke_file_data *filp_data = filp->private_data;
  2071. if (_IOC_SIZE(cmd) != sizeof(int32_t))
  2072. return -EINVAL;
  2073. ret = copy_from_user(&local_obj, (void __user *)(uintptr_t)arg,
  2074. sizeof(int32_t));
  2075. if (ret)
  2076. return -EFAULT;
  2077. mutex_lock(&g_smcinvoke_lock);
  2078. if (UHANDLE_IS_CB_OBJ(local_obj))
  2079. ret = put_pending_cbobj_locked(filp_data->server_id,
  2080. UHANDLE_GET_CB_OBJ(local_obj));
  2081. mutex_unlock(&g_smcinvoke_lock);
  2082. return ret;
  2083. }
  2084. static long process_server_req(struct file *filp, unsigned int cmd,
  2085. unsigned long arg)
  2086. {
  2087. int ret = -1;
  2088. int32_t server_fd = -1;
  2089. struct smcinvoke_server server_req = {0};
  2090. struct smcinvoke_server_info *server_info = NULL;
  2091. if (_IOC_SIZE(cmd) != sizeof(server_req)) {
  2092. pr_err("invalid command size received for server request\n");
  2093. return -EINVAL;
  2094. }
  2095. ret = copy_from_user(&server_req, (void __user *)(uintptr_t)arg,
  2096. sizeof(server_req));
  2097. if (ret) {
  2098. pr_err("copying server request from user failed\n");
  2099. return -EFAULT;
  2100. }
  2101. server_info = kzalloc(sizeof(*server_info), GFP_KERNEL);
  2102. if (!server_info)
  2103. return -ENOMEM;
  2104. kref_init(&server_info->ref_cnt);
  2105. init_waitqueue_head(&server_info->req_wait_q);
  2106. init_waitqueue_head(&server_info->rsp_wait_q);
  2107. server_info->cb_buf_size = server_req.cb_buf_size;
  2108. hash_init(server_info->reqs_table);
  2109. hash_init(server_info->responses_table);
  2110. INIT_LIST_HEAD(&server_info->pending_cbobjs);
  2111. server_info->is_server_suspended = 0;
  2112. mutex_lock(&g_smcinvoke_lock);
  2113. server_info->server_id = next_cb_server_id_locked();
  2114. hash_add(g_cb_servers, &server_info->hash,
  2115. server_info->server_id);
  2116. if (g_max_cb_buf_size < server_req.cb_buf_size)
  2117. g_max_cb_buf_size = server_req.cb_buf_size;
  2118. mutex_unlock(&g_smcinvoke_lock);
  2119. ret = get_fd_for_obj(SMCINVOKE_OBJ_TYPE_SERVER,
  2120. server_info->server_id, &server_fd);
  2121. if (ret)
  2122. release_cb_server(server_info->server_id);
  2123. return server_fd;
  2124. }
  2125. static long process_accept_req(struct file *filp, unsigned int cmd,
  2126. unsigned long arg)
  2127. {
  2128. int ret = -1;
  2129. struct smcinvoke_file_data *server_obj = filp->private_data;
  2130. struct smcinvoke_accept user_args = {0};
  2131. struct smcinvoke_cb_txn *cb_txn = NULL;
  2132. struct smcinvoke_server_info *server_info = NULL;
  2133. if (_IOC_SIZE(cmd) != sizeof(struct smcinvoke_accept)) {
  2134. pr_err("command size invalid for accept request\n");
  2135. return -EINVAL;
  2136. }
  2137. if (copy_from_user(&user_args, (void __user *)arg,
  2138. sizeof(struct smcinvoke_accept))) {
  2139. pr_err("copying accept request from user failed\n");
  2140. return -EFAULT;
  2141. }
  2142. if (user_args.argsize != sizeof(union smcinvoke_arg)) {
  2143. pr_err("arguments size is invalid for accept thread\n");
  2144. return -EINVAL;
  2145. }
  2146. /* ACCEPT is available only on server obj */
  2147. if (server_obj->context_type != SMCINVOKE_OBJ_TYPE_SERVER) {
  2148. pr_err("invalid object type received for accept req\n");
  2149. return -EPERM;
  2150. }
  2151. mutex_lock(&g_smcinvoke_lock);
  2152. server_info = get_cb_server_locked(server_obj->server_id);
  2153. if (!server_info) {
  2154. pr_err("No matching server with server id : %u found\n",
  2155. server_obj->server_id);
  2156. mutex_unlock(&g_smcinvoke_lock);
  2157. return -EINVAL;
  2158. }
  2159. if (server_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT)
  2160. server_info->state = 0;
  2161. server_info->is_server_suspended = UNSET_BIT(server_info->is_server_suspended,
  2162. (current->pid)%DEFAULT_CB_OBJ_THREAD_CNT);
  2163. mutex_unlock(&g_smcinvoke_lock);
  2164. /* First check if it has response otherwise wait for req */
  2165. if (user_args.has_resp) {
  2166. trace_process_accept_req_has_response(current->pid, current->tgid);
  2167. mutex_lock(&g_smcinvoke_lock);
  2168. cb_txn = find_cbtxn_locked(server_info, user_args.txn_id,
  2169. SMCINVOKE_REQ_PROCESSING);
  2170. mutex_unlock(&g_smcinvoke_lock);
  2171. /*
  2172. * cb_txn can be null if userspace provides wrong txn id OR
  2173. * invoke thread died while server was processing cb req.
  2174. * if invoke thread dies, it would remove req from Q. So
  2175. * no matching cb_txn would be on Q and hence NULL cb_txn.
  2176. * In this case, we want this thread to start waiting
  2177. * new cb requests.
  2178. */
  2179. if (!cb_txn) {
  2180. pr_err("%s txn %d either invalid or removed from Q\n",
  2181. __func__, user_args.txn_id);
  2182. goto start_waiting_for_requests;
  2183. }
  2184. ret = marshal_out_tzcb_req(&user_args, cb_txn,
  2185. cb_txn->filp_to_release);
  2186. /*
  2187. * if client did not set error and we get error locally,
  2188. * we return local error to TA
  2189. */
  2190. if (ret && cb_txn->cb_req->result == 0)
  2191. cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL;
  2192. cb_txn->state = SMCINVOKE_REQ_PROCESSED;
  2193. mutex_lock(&g_smcinvoke_lock);
  2194. kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked);
  2195. mutex_unlock(&g_smcinvoke_lock);
  2196. wake_up(&server_info->rsp_wait_q);
  2197. /*
  2198. * if marshal_out fails, we should let userspace release
  2199. * any ref/obj it created for CB processing
  2200. */
  2201. if (ret && OBJECT_COUNTS_NUM_OO(user_args.counts))
  2202. goto out;
  2203. }
  2204. start_waiting_for_requests:
  2205. /*
  2206. * Once response has been delivered, thread will wait for another
  2207. * callback req to process.
  2208. */
  2209. do {
  2210. ret = wait_event_interruptible(server_info->req_wait_q,
  2211. !hash_empty(server_info->reqs_table));
  2212. if (ret) {
  2213. trace_process_accept_req_ret(current->pid, current->tgid, ret);
  2214. /*
  2215. * Ideally, we should destroy server if accept threads
  2216. * are returning due to client being killed or device
  2217. * going down (Shutdown/Reboot) but that would make
  2218. * server_info invalid. Other accept/invoke threads are
  2219. * using server_info and would crash. So dont do that.
  2220. */
  2221. mutex_lock(&g_smcinvoke_lock);
  2222. if(freezing(current)) {
  2223. pr_err("Server id :%d interrupted probaby due to suspend, pid:%d",
  2224. server_info->server_id, current->pid);
  2225. /*
  2226. * Each accept thread is identified by bits ranging from
  2227. * 0 to DEFAULT_CBOBJ_THREAD_CNT-1. When an accept thread is
  2228. * interrupted by a signal other than SIGUSR1,SIGKILL,SIGTERM,
  2229. * set the corresponding bit of accept thread, indicating that
  2230. * current accept thread's state to be "suspended"/ or something
  2231. * that needs infinite timeout for invoke thread.
  2232. */
  2233. server_info->is_server_suspended =
  2234. SET_BIT(server_info->is_server_suspended,
  2235. (current->pid)%DEFAULT_CB_OBJ_THREAD_CNT);
  2236. } else {
  2237. pr_err("Setting pid:%d, server id : %d state to defunct",
  2238. current->pid, server_info->server_id);
  2239. server_info->state = SMCINVOKE_SERVER_STATE_DEFUNCT;
  2240. }
  2241. mutex_unlock(&g_smcinvoke_lock);
  2242. wake_up_interruptible(&server_info->rsp_wait_q);
  2243. goto out;
  2244. }
  2245. mutex_lock(&g_smcinvoke_lock);
  2246. cb_txn = find_cbtxn_locked(server_info,
  2247. SMCINVOKE_NEXT_AVAILABLE_TXN,
  2248. SMCINVOKE_REQ_PLACED);
  2249. mutex_unlock(&g_smcinvoke_lock);
  2250. if (cb_txn) {
  2251. cb_txn->state = SMCINVOKE_REQ_PROCESSING;
  2252. ret = marshal_in_tzcb_req(cb_txn, &user_args,
  2253. server_obj->server_id);
  2254. if (ret) {
  2255. pr_err("failed to marshal in the callback request\n");
  2256. cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL;
  2257. cb_txn->state = SMCINVOKE_REQ_PROCESSED;
  2258. mutex_lock(&g_smcinvoke_lock);
  2259. kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked);
  2260. mutex_unlock(&g_smcinvoke_lock);
  2261. wake_up_interruptible(&server_info->rsp_wait_q);
  2262. continue;
  2263. }
  2264. mutex_lock(&g_smcinvoke_lock);
  2265. hash_add(server_info->responses_table, &cb_txn->hash,
  2266. cb_txn->txn_id);
  2267. kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked);
  2268. mutex_unlock(&g_smcinvoke_lock);
  2269. trace_process_accept_req_placed(current->pid, current->tgid);
  2270. ret = copy_to_user((void __user *)arg, &user_args,
  2271. sizeof(struct smcinvoke_accept));
  2272. }
  2273. } while (!cb_txn);
  2274. out:
  2275. if (server_info)
  2276. kref_put(&server_info->ref_cnt, destroy_cb_server);
  2277. if (ret && ret != -ERESTARTSYS)
  2278. pr_err("accept thread returning with ret: %d\n", ret);
  2279. return ret;
  2280. }
  2281. static long process_invoke_req(struct file *filp, unsigned int cmd,
  2282. unsigned long arg)
  2283. {
  2284. int ret = -1, nr_args = 0;
  2285. struct smcinvoke_cmd_req req = {0};
  2286. void *in_msg = NULL, *out_msg = NULL;
  2287. size_t inmsg_size = 0, outmsg_size = SMCINVOKE_TZ_MIN_BUF_SIZE;
  2288. union smcinvoke_arg *args_buf = NULL;
  2289. struct smcinvoke_file_data *tzobj = filp->private_data;
  2290. struct qtee_shm in_shm = {0}, out_shm = {0};
  2291. LIST_HEAD(l_mem_objs_pending_async); /* Holds new memory objects, to be later sent to TZ */
  2292. /*
  2293. * Hold reference to remote object until invoke op is not
  2294. * completed. Release once invoke is done.
  2295. */
  2296. struct file *filp_to_release[OBJECT_COUNTS_MAX_OO] = {NULL};
  2297. /*
  2298. * If anything goes wrong, release alloted tzhandles for
  2299. * local objs which could be either CBObj or MemObj.
  2300. */
  2301. int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0};
  2302. bool tz_acked = false;
  2303. uint32_t context_type = tzobj->context_type;
  2304. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ &&
  2305. _IOC_SIZE(cmd) != sizeof(req)) {
  2306. pr_err("command size for invoke req is invalid\n");
  2307. return -EINVAL;
  2308. }
  2309. if (context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ &&
  2310. context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
  2311. pr_err("invalid context_type %d\n", context_type);
  2312. return -EPERM;
  2313. }
  2314. if (context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
  2315. ret = copy_from_user(&req, (void __user *)arg, sizeof(req));
  2316. if (ret) {
  2317. pr_err("copying invoke req failed\n");
  2318. return -EFAULT;
  2319. }
  2320. } else {
  2321. req = *(struct smcinvoke_cmd_req *)arg;
  2322. }
  2323. if (req.argsize != sizeof(union smcinvoke_arg)) {
  2324. pr_err("arguments size for invoke req is invalid\n");
  2325. return -EINVAL;
  2326. }
  2327. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ &&
  2328. tzobj->tzhandle == SMCINVOKE_TZ_ROOT_OBJ &&
  2329. (req.op == IClientEnv_OP_notifyDomainChange ||
  2330. req.op == IClientEnv_OP_registerWithCredentials ||
  2331. req.op == IClientEnv_OP_adciAccept ||
  2332. req.op == IClientEnv_OP_adciShutdown)) {
  2333. pr_err("invalid rootenv op\n");
  2334. return -EINVAL;
  2335. }
  2336. nr_args = OBJECT_COUNTS_NUM_buffers(req.counts) +
  2337. OBJECT_COUNTS_NUM_objects(req.counts);
  2338. if (nr_args) {
  2339. args_buf = kcalloc(nr_args, req.argsize, GFP_KERNEL);
  2340. if (!args_buf)
  2341. return -ENOMEM;
  2342. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
  2343. ret = copy_from_user(args_buf,
  2344. u64_to_user_ptr(req.args),
  2345. nr_args * req.argsize);
  2346. if (ret) {
  2347. ret = -EFAULT;
  2348. goto out;
  2349. }
  2350. } else {
  2351. memcpy(args_buf, (void *)(req.args),
  2352. nr_args * req.argsize);
  2353. }
  2354. }
  2355. inmsg_size = compute_in_msg_size(&req, args_buf);
  2356. ret = qtee_shmbridge_allocate_shm(inmsg_size, &in_shm);
  2357. if (ret) {
  2358. ret = -ENOMEM;
  2359. pr_err("shmbridge alloc failed for in msg in invoke req\n");
  2360. goto out;
  2361. }
  2362. in_msg = in_shm.vaddr;
  2363. mutex_lock(&g_smcinvoke_lock);
  2364. outmsg_size = PAGE_ALIGN(g_max_cb_buf_size);
  2365. mutex_unlock(&g_smcinvoke_lock);
  2366. ret = qtee_shmbridge_allocate_shm(outmsg_size, &out_shm);
  2367. if (ret) {
  2368. ret = -ENOMEM;
  2369. pr_err("shmbridge alloc failed for out msg in invoke req\n");
  2370. goto out;
  2371. }
  2372. out_msg = out_shm.vaddr;
  2373. trace_process_invoke_req_tzhandle(tzobj->tzhandle, req.op, req.counts);
  2374. ret = marshal_in_invoke_req(&req, args_buf, tzobj->tzhandle, in_msg,
  2375. inmsg_size, filp_to_release, tzhandles_to_release,
  2376. context_type, &l_mem_objs_pending_async);
  2377. if (ret) {
  2378. pr_err("failed to marshal in invoke req, ret :%d\n", ret);
  2379. goto out;
  2380. }
  2381. if (mem_obj_async_support) {
  2382. mutex_lock(&g_smcinvoke_lock);
  2383. add_mem_obj_info_to_async_side_channel_locked(out_msg, outmsg_size, &l_mem_objs_pending_async);
  2384. delete_pending_async_list_locked(&l_mem_objs_pending_async);
  2385. mutex_unlock(&g_smcinvoke_lock);
  2386. }
  2387. ret = prepare_send_scm_msg(in_msg, in_shm.paddr, inmsg_size,
  2388. out_msg, out_shm.paddr, outmsg_size,
  2389. &req, args_buf, &tz_acked, context_type,
  2390. &in_shm, &out_shm);
  2391. /*
  2392. * If scm_call is success, TZ owns responsibility to release
  2393. * refs for local objs.
  2394. */
  2395. if (!tz_acked) {
  2396. trace_status(__func__, "scm call successful");
  2397. goto out;
  2398. }
  2399. memset(tzhandles_to_release, 0, sizeof(tzhandles_to_release));
  2400. /*
  2401. * if invoke op results in an err, no need to marshal_out and
  2402. * copy args buf to user space
  2403. */
  2404. if (!req.result) {
  2405. /*
  2406. * Dont check ret of marshal_out because there might be a
  2407. * FD for OO which userspace must release even if an error
  2408. * occurs. Releasing FD from user space is much simpler than
  2409. * doing here. ORing of ret is reqd not to miss past error
  2410. */
  2411. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ)
  2412. ret |= copy_to_user(u64_to_user_ptr(req.args),
  2413. args_buf, nr_args * req.argsize);
  2414. else
  2415. memcpy((void *)(req.args), args_buf,
  2416. nr_args * req.argsize);
  2417. }
  2418. /* copy result of invoke op */
  2419. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
  2420. ret |= copy_to_user((void __user *)arg, &req, sizeof(req));
  2421. if (ret)
  2422. goto out;
  2423. } else {
  2424. memcpy((void *)arg, (void *)&req, sizeof(req));
  2425. }
  2426. /* Outbuf could be carrying local objs to be released. */
  2427. process_piggyback_data(out_msg, outmsg_size);
  2428. out:
  2429. trace_process_invoke_req_result(ret, req.result, tzobj->tzhandle,
  2430. req.op, req.counts);
  2431. release_filp(filp_to_release, OBJECT_COUNTS_MAX_OO);
  2432. if (ret)
  2433. release_tzhandles(tzhandles_to_release, OBJECT_COUNTS_MAX_OO);
  2434. qtee_shmbridge_free_shm(&in_shm);
  2435. qtee_shmbridge_free_shm(&out_shm);
  2436. kfree(args_buf);
  2437. if (ret)
  2438. pr_err("invoke thread returning with ret = %d\n", ret);
  2439. return ret;
  2440. }
  2441. static long process_log_info(struct file *filp, unsigned int cmd,
  2442. unsigned long arg)
  2443. {
  2444. int ret = 0;
  2445. char buf[SMCINVOKE_LOG_BUF_SIZE];
  2446. struct smcinvoke_file_data *tzobj = filp->private_data;
  2447. ret = copy_from_user(buf, (void __user *)arg, SMCINVOKE_LOG_BUF_SIZE);
  2448. if (ret) {
  2449. pr_err("logging HLOS info copy failed\n");
  2450. return -EFAULT;
  2451. }
  2452. buf[SMCINVOKE_LOG_BUF_SIZE - 1] = '\0';
  2453. trace_process_log_info(buf, tzobj->context_type, tzobj->tzhandle);
  2454. return ret;
  2455. }
  2456. static long smcinvoke_ioctl(struct file *filp, unsigned int cmd,
  2457. unsigned long arg)
  2458. {
  2459. long ret = 0;
  2460. switch (cmd) {
  2461. case SMCINVOKE_IOCTL_INVOKE_REQ:
  2462. ret = process_invoke_req(filp, cmd, arg);
  2463. break;
  2464. case SMCINVOKE_IOCTL_ACCEPT_REQ:
  2465. ret = process_accept_req(filp, cmd, arg);
  2466. break;
  2467. case SMCINVOKE_IOCTL_SERVER_REQ:
  2468. ret = process_server_req(filp, cmd, arg);
  2469. break;
  2470. case SMCINVOKE_IOCTL_ACK_LOCAL_OBJ:
  2471. ret = process_ack_local_obj(filp, cmd, arg);
  2472. break;
  2473. case SMCINVOKE_IOCTL_LOG:
  2474. ret = process_log_info(filp, cmd, arg);
  2475. break;
  2476. default:
  2477. ret = -ENOIOCTLCMD;
  2478. break;
  2479. }
  2480. trace_smcinvoke_ioctl(cmd, ret);
  2481. return ret;
  2482. }
  2483. int get_root_fd(int *root_fd)
  2484. {
  2485. if (!root_fd)
  2486. return -EINVAL;
  2487. else
  2488. return get_fd_for_obj(SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL,
  2489. SMCINVOKE_TZ_ROOT_OBJ, root_fd);
  2490. }
  2491. int process_invoke_request_from_kernel_client(int fd,
  2492. struct smcinvoke_cmd_req *req)
  2493. {
  2494. struct file *filp = NULL;
  2495. int ret = 0;
  2496. if (!req) {
  2497. pr_err("NULL req\n");
  2498. return -EINVAL;
  2499. }
  2500. filp = fget(fd);
  2501. if (!filp) {
  2502. pr_err("Invalid fd %d\n", fd);
  2503. return -EINVAL;
  2504. }
  2505. ret = process_invoke_req(filp, 0, (uintptr_t)req);
  2506. fput(filp);
  2507. trace_process_invoke_request_from_kernel_client(fd, filp, file_count(filp));
  2508. return ret;
  2509. }
  2510. char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, struct qtee_shm *shm)
  2511. {
  2512. int rc = 0;
  2513. const struct firmware *fw_entry = NULL, *fw_entry00 = NULL, *fw_entrylast = NULL;
  2514. char fw_name[MAX_APP_NAME_SIZE] = "\0";
  2515. int num_images = 0, phi = 0;
  2516. unsigned char app_arch = 0;
  2517. u8 *img_data_ptr = NULL;
  2518. size_t bufferOffset = 0, phdr_table_offset = 0;
  2519. size_t *offset = NULL;
  2520. Elf32_Phdr phdr32;
  2521. Elf64_Phdr phdr64;
  2522. struct elf32_hdr *ehdr = NULL;
  2523. struct elf64_hdr *ehdr64 = NULL;
  2524. /* load b00*/
  2525. snprintf(fw_name, sizeof(fw_name), "%s.b00", appname);
  2526. rc = firmware_request_nowarn(&fw_entry00, fw_name, class_dev);
  2527. if (rc) {
  2528. pr_err("Load %s failed, ret:%d\n", fw_name, rc);
  2529. return NULL;
  2530. }
  2531. app_arch = *(unsigned char *)(fw_entry00->data + EI_CLASS);
  2532. /*Get the offsets for split images header*/
  2533. if (app_arch == ELFCLASS32) {
  2534. ehdr = (struct elf32_hdr *)fw_entry00->data;
  2535. num_images = ehdr->e_phnum;
  2536. offset = kcalloc(num_images, sizeof(size_t), GFP_KERNEL);
  2537. if (offset == NULL)
  2538. goto release_fw_entry00;
  2539. phdr_table_offset = (size_t) ehdr->e_phoff;
  2540. for (phi = 1; phi < num_images; ++phi) {
  2541. bufferOffset = phdr_table_offset + phi * sizeof(Elf32_Phdr);
  2542. phdr32 = *(Elf32_Phdr *)(fw_entry00->data + bufferOffset);
  2543. offset[phi] = (size_t)phdr32.p_offset;
  2544. }
  2545. } else if (app_arch == ELFCLASS64) {
  2546. ehdr64 = (struct elf64_hdr *)fw_entry00->data;
  2547. num_images = ehdr64->e_phnum;
  2548. offset = kcalloc(num_images, sizeof(size_t), GFP_KERNEL);
  2549. if (offset == NULL)
  2550. goto release_fw_entry00;
  2551. phdr_table_offset = (size_t) ehdr64->e_phoff;
  2552. for (phi = 1; phi < num_images; ++phi) {
  2553. bufferOffset = phdr_table_offset + phi * sizeof(Elf64_Phdr);
  2554. phdr64 = *(Elf64_Phdr *)(fw_entry00->data + bufferOffset);
  2555. offset[phi] = (size_t)phdr64.p_offset;
  2556. }
  2557. } else {
  2558. pr_err("QSEE %s app, arch %u is not supported\n", appname, app_arch);
  2559. goto release_fw_entry00;
  2560. }
  2561. /*Find the size of last split bin image*/
  2562. snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, num_images-1);
  2563. rc = firmware_request_nowarn(&fw_entrylast, fw_name, class_dev);
  2564. if (rc) {
  2565. pr_err("Failed to locate blob %s\n", fw_name);
  2566. goto release_fw_entry00;
  2567. }
  2568. /*Total size of image will be the offset of last image + the size of last split image*/
  2569. *fw_size = fw_entrylast->size + offset[num_images-1];
  2570. /*Allocate memory for the buffer that will hold the split image*/
  2571. rc = qtee_shmbridge_allocate_shm((*fw_size), shm);
  2572. if (rc) {
  2573. pr_err("smbridge alloc failed for size: %zu\n", *fw_size);
  2574. goto release_fw_entrylast;
  2575. }
  2576. img_data_ptr = shm->vaddr;
  2577. /*
  2578. * Copy contents of split bins to the buffer
  2579. */
  2580. memcpy(img_data_ptr, fw_entry00->data, fw_entry00->size);
  2581. for (phi = 1; phi < num_images-1; phi++) {
  2582. snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, phi);
  2583. rc = firmware_request_nowarn(&fw_entry, fw_name, class_dev);
  2584. if (rc) {
  2585. pr_err("Failed to locate blob %s\n", fw_name);
  2586. qtee_shmbridge_free_shm(shm);
  2587. img_data_ptr = NULL;
  2588. goto release_fw_entrylast;
  2589. }
  2590. memcpy(img_data_ptr + offset[phi], fw_entry->data, fw_entry->size);
  2591. release_firmware(fw_entry);
  2592. fw_entry = NULL;
  2593. }
  2594. memcpy(img_data_ptr + offset[phi], fw_entrylast->data, fw_entrylast->size);
  2595. release_fw_entrylast:
  2596. release_firmware(fw_entrylast);
  2597. release_fw_entry00:
  2598. release_firmware(fw_entry00);
  2599. kfree(offset);
  2600. return img_data_ptr;
  2601. }
  2602. EXPORT_SYMBOL(firmware_request_from_smcinvoke);
  2603. static int smcinvoke_open(struct inode *nodp, struct file *filp)
  2604. {
  2605. struct smcinvoke_file_data *tzcxt = NULL;
  2606. tzcxt = kzalloc(sizeof(*tzcxt), GFP_KERNEL);
  2607. if (!tzcxt)
  2608. return -ENOMEM;
  2609. tzcxt->tzhandle = SMCINVOKE_TZ_ROOT_OBJ;
  2610. tzcxt->context_type = SMCINVOKE_OBJ_TYPE_TZ_OBJ;
  2611. filp->private_data = tzcxt;
  2612. return 0;
  2613. }
  2614. static int release_cb_server(uint16_t server_id)
  2615. {
  2616. struct smcinvoke_server_info *server = NULL;
  2617. mutex_lock(&g_smcinvoke_lock);
  2618. server = find_cb_server_locked(server_id);
  2619. if (server)
  2620. kref_put(&server->ref_cnt, destroy_cb_server);
  2621. mutex_unlock(&g_smcinvoke_lock);
  2622. return 0;
  2623. }
  2624. int smcinvoke_release_filp(struct file *filp)
  2625. {
  2626. int ret = 0;
  2627. struct smcinvoke_file_data *file_data = filp->private_data;
  2628. uint32_t tzhandle = 0;
  2629. struct smcinvoke_object_release_pending_list *entry = NULL;
  2630. trace_smcinvoke_release_filp(current->files, filp,
  2631. file_count(filp), file_data->context_type);
  2632. if (file_data->context_type == SMCINVOKE_OBJ_TYPE_SERVER) {
  2633. ret = release_cb_server(file_data->server_id);
  2634. goto out;
  2635. }
  2636. tzhandle = file_data->tzhandle;
  2637. /* Root object is special in sense it is indestructible */
  2638. if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ)
  2639. goto out;
  2640. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  2641. if (!entry) {
  2642. ret = -ENOMEM;
  2643. goto out;
  2644. }
  2645. entry->data.tzhandle = tzhandle;
  2646. entry->data.context_type = file_data->context_type;
  2647. mutex_lock(&object_postprocess_lock);
  2648. list_add_tail(&entry->list, &g_object_postprocess);
  2649. mutex_unlock(&object_postprocess_lock);
  2650. pr_debug("Object release list: added a handle:0x%lx\n", tzhandle);
  2651. __wakeup_postprocess_kthread(&smcinvoke[OBJECT_WORKER_THREAD]);
  2652. out:
  2653. kfree(filp->private_data);
  2654. filp->private_data = NULL;
  2655. return ret;
  2656. }
  2657. int smcinvoke_release_from_kernel_client(int fd)
  2658. {
  2659. struct file *filp = NULL;
  2660. /* use fget() to get filp, but this will increase file ref_cnt to 1,
  2661. * then decrease file ref_cnt to 0 with fput().
  2662. */
  2663. filp = fget(fd);
  2664. if (!filp) {
  2665. pr_err("invalid fd %d to release\n", fd);
  2666. return -EINVAL;
  2667. }
  2668. trace_smcinvoke_release_from_kernel_client(current->files, filp,
  2669. file_count(filp));
  2670. /* free filp, notify TZ to release object */
  2671. smcinvoke_release_filp(filp);
  2672. fput(filp);
  2673. return 0;
  2674. }
  2675. static int smcinvoke_release(struct inode *nodp, struct file *filp)
  2676. {
  2677. trace_smcinvoke_release(current->files, filp, file_count(filp),
  2678. filp->private_data);
  2679. if (filp->private_data)
  2680. return smcinvoke_release_filp(filp);
  2681. else
  2682. return 0;
  2683. }
  2684. static int smcinvoke_probe(struct platform_device *pdev)
  2685. {
  2686. unsigned int baseminor = 0;
  2687. unsigned int count = 1;
  2688. int rc = 0;
  2689. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  2690. if (rc) {
  2691. pr_err("dma_set_mask_and_coherent failed %d\n", rc);
  2692. return rc;
  2693. }
  2694. legacy_smc_call = of_property_read_bool((&pdev->dev)->of_node,
  2695. "qcom,support-legacy_smc");
  2696. invoke_cmd = legacy_smc_call ? SMCINVOKE_INVOKE_CMD_LEGACY : SMCINVOKE_INVOKE_CMD;
  2697. rc = smcinvoke_create_kthreads();
  2698. if (rc) {
  2699. pr_err("smcinvoke_create_kthreads failed %d\n", rc);
  2700. return rc;
  2701. }
  2702. rc = alloc_chrdev_region(&smcinvoke_device_no, baseminor, count,
  2703. SMCINVOKE_DEV);
  2704. if (rc < 0) {
  2705. pr_err("chrdev_region failed %d for %s\n", rc, SMCINVOKE_DEV);
  2706. goto exit_destroy_wkthread;
  2707. }
  2708. driver_class = class_create(THIS_MODULE, SMCINVOKE_DEV);
  2709. if (IS_ERR(driver_class)) {
  2710. rc = -ENOMEM;
  2711. pr_err("class_create failed %d\n", rc);
  2712. goto exit_unreg_chrdev_region;
  2713. }
  2714. class_dev = device_create(driver_class, NULL, smcinvoke_device_no,
  2715. NULL, SMCINVOKE_DEV);
  2716. if (!class_dev) {
  2717. pr_err("class_device_create failed %d\n", rc);
  2718. rc = -ENOMEM;
  2719. goto exit_destroy_class;
  2720. }
  2721. cdev_init(&smcinvoke_cdev, &g_smcinvoke_fops);
  2722. smcinvoke_cdev.owner = THIS_MODULE;
  2723. rc = cdev_add(&smcinvoke_cdev, MKDEV(MAJOR(smcinvoke_device_no), 0),
  2724. count);
  2725. if (rc < 0) {
  2726. pr_err("cdev_add failed %d for %s\n", rc, SMCINVOKE_DEV);
  2727. goto exit_destroy_device;
  2728. }
  2729. smcinvoke_pdev = pdev;
  2730. #if !IS_ENABLED(CONFIG_QSEECOM) && IS_ENABLED(CONFIG_QSEECOM_PROXY)
  2731. /*If the api fails to get the func ops, print the error and continue
  2732. * Do not treat it as fatal*/
  2733. rc = get_qseecom_kernel_fun_ops();
  2734. if (rc) {
  2735. pr_err("failed to get qseecom kernel func ops %d", rc);
  2736. }
  2737. #endif
  2738. __wakeup_postprocess_kthread(&smcinvoke[ADCI_WORKER_THREAD]);
  2739. return 0;
  2740. exit_destroy_device:
  2741. device_destroy(driver_class, smcinvoke_device_no);
  2742. exit_destroy_class:
  2743. class_destroy(driver_class);
  2744. exit_unreg_chrdev_region:
  2745. unregister_chrdev_region(smcinvoke_device_no, count);
  2746. exit_destroy_wkthread:
  2747. smcinvoke_destroy_kthreads();
  2748. return rc;
  2749. }
  2750. static int smcinvoke_remove(struct platform_device *pdev)
  2751. {
  2752. int count = 1;
  2753. smcinvoke_destroy_kthreads();
  2754. cdev_del(&smcinvoke_cdev);
  2755. device_destroy(driver_class, smcinvoke_device_no);
  2756. class_destroy(driver_class);
  2757. unregister_chrdev_region(smcinvoke_device_no, count);
  2758. return 0;
  2759. }
  2760. static int __maybe_unused smcinvoke_suspend(struct platform_device *pdev,
  2761. pm_message_t state)
  2762. {
  2763. int ret = 0;
  2764. mutex_lock(&g_smcinvoke_lock);
  2765. if (cb_reqs_inflight) {
  2766. pr_err("Failed to suspend smcinvoke driver\n");
  2767. ret = -EIO;
  2768. }
  2769. mutex_unlock(&g_smcinvoke_lock);
  2770. return ret;
  2771. }
  2772. static int __maybe_unused smcinvoke_resume(struct platform_device *pdev)
  2773. {
  2774. return 0;
  2775. }
  2776. static const struct of_device_id smcinvoke_match[] = {
  2777. {
  2778. .compatible = "qcom,smcinvoke",
  2779. },
  2780. {},
  2781. };
  2782. static struct platform_driver smcinvoke_plat_driver = {
  2783. .probe = smcinvoke_probe,
  2784. .remove = smcinvoke_remove,
  2785. .suspend = smcinvoke_suspend,
  2786. .resume = smcinvoke_resume,
  2787. .driver = {
  2788. .name = "smcinvoke",
  2789. .of_match_table = smcinvoke_match,
  2790. },
  2791. };
  2792. static int smcinvoke_init(void)
  2793. {
  2794. return platform_driver_register(&smcinvoke_plat_driver);
  2795. }
  2796. static void smcinvoke_exit(void)
  2797. {
  2798. platform_driver_unregister(&smcinvoke_plat_driver);
  2799. }
  2800. module_init(smcinvoke_init);
  2801. module_exit(smcinvoke_exit);
  2802. MODULE_LICENSE("GPL v2");
  2803. MODULE_DESCRIPTION("SMC Invoke driver");
  2804. MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
  2805. MODULE_IMPORT_NS(DMA_BUF);