smcinvoke.c 90 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "smcinvoke: %s: " fmt, __func__
  7. #include <linux/module.h>
  8. #include <linux/mod_devicetable.h>
  9. #include <linux/device.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/slab.h>
  12. #include <linux/file.h>
  13. #include <linux/fs.h>
  14. #include <linux/anon_inodes.h>
  15. #include <linux/hashtable.h>
  16. #include <linux/cdev.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/dma-buf.h>
  19. #include <linux/delay.h>
  20. #include <linux/kref.h>
  21. #include <linux/signal.h>
  22. #include <linux/msm_ion.h>
  23. #include <linux/mem-buf.h>
  24. #include <linux/of_platform.h>
  25. #include <linux/firmware.h>
  26. #include <linux/qcom_scm.h>
  27. #include <linux/freezer.h>
  28. #include <asm/cacheflush.h>
  29. #include <soc/qcom/qseecomi.h>
  30. #include <linux/qtee_shmbridge.h>
  31. #include <linux/kthread.h>
  32. #include "smcinvoke.h"
  33. #include "smcinvoke_object.h"
  34. #include "IClientEnv.h"
  35. #if IS_ENABLED(CONFIG_QSEECOM_PROXY)
  36. #include <linux/qseecom_kernel.h>
  37. #include "misc/qseecom_priv.h"
  38. #else
  39. #include "misc/qseecom_kernel.h"
  40. #endif
  41. #define CREATE_TRACE_POINTS
  42. #include "trace_smcinvoke.h"
  43. #define SMCINVOKE_DEV "smcinvoke"
  44. #define SMCINVOKE_TZ_ROOT_OBJ 1
  45. #define SMCINVOKE_TZ_OBJ_NULL 0
  46. #define SMCINVOKE_TZ_MIN_BUF_SIZE 4096
  47. #define SMCINVOKE_ARGS_ALIGN_SIZE (sizeof(uint64_t))
  48. #define SMCINVOKE_NEXT_AVAILABLE_TXN 0
  49. #define SMCINVOKE_REQ_PLACED 1
  50. #define SMCINVOKE_REQ_PROCESSING 2
  51. #define SMCINVOKE_REQ_PROCESSED 3
  52. #define SMCINVOKE_INCREMENT 1
  53. #define SMCINVOKE_DECREMENT 0
  54. #define SMCINVOKE_OBJ_TYPE_TZ_OBJ 0
  55. #define SMCINVOKE_OBJ_TYPE_SERVER 1
  56. #define SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL 2
  57. #define SMCINVOKE_MEM_MAP_OBJ 0
  58. #define SMCINVOKE_MEM_RGN_OBJ 1
  59. #define SMCINVOKE_MEM_PERM_RW 6
  60. #define SMCINVOKE_SCM_EBUSY_WAIT_MS 30
  61. #define SMCINVOKE_SCM_EBUSY_MAX_RETRY 200
  62. /* TZ defined values - Start */
  63. #define SMCINVOKE_INVOKE_PARAM_ID 0x224
  64. #define SMCINVOKE_CB_RSP_PARAM_ID 0x22
  65. #define SMCINVOKE_INVOKE_CMD_LEGACY 0x32000600
  66. #define SMCINVOKE_INVOKE_CMD 0x32000602
  67. #define SMCINVOKE_CB_RSP_CMD 0x32000601
  68. #define SMCINVOKE_RESULT_INBOUND_REQ_NEEDED 3
  69. /* TZ defined values - End */
  70. /* Asynchronous protocol values */
  71. /* Driver async version is set to match the minimal TZ version that supports async memory object */
  72. #define SMCINVOKE_ASYNC_VERSION (0x00010002)
  73. #define SMCINVOKE_ASYNC_OP_MEMORY_OBJECT (0x00000003)
  74. /*
  75. * This is the state when server FD has been closed but
  76. * TZ still has refs of CBOBjs served by this server
  77. */
  78. #define SMCINVOKE_SERVER_STATE_DEFUNCT 1
  79. #define CBOBJ_MAX_RETRIES 50
  80. #define FOR_ARGS(ndxvar, counts, section) \
  81. for (ndxvar = OBJECT_COUNTS_INDEX_##section(counts); \
  82. ndxvar < (OBJECT_COUNTS_INDEX_##section(counts) \
  83. + OBJECT_COUNTS_NUM_##section(counts)); \
  84. ++ndxvar)
  85. #define TZCB_BUF_OFFSET(tzcb_req) (sizeof(tzcb_req->result) + \
  86. sizeof(struct smcinvoke_msg_hdr) + \
  87. sizeof(union smcinvoke_tz_args) * \
  88. OBJECT_COUNTS_TOTAL(tzcb_req->hdr.counts))
  89. /*
  90. * +ve uhandle : either remote obj or mem obj, decided by f_ops
  91. * -ve uhandle : either Obj NULL or CBObj
  92. * - -1: OBJ NULL
  93. * - < -1: CBObj
  94. */
  95. #define UHANDLE_IS_FD(h) ((h) >= 0)
  96. #define UHANDLE_IS_NULL(h) ((h) == SMCINVOKE_USERSPACE_OBJ_NULL)
  97. #define UHANDLE_IS_CB_OBJ(h) (h < SMCINVOKE_USERSPACE_OBJ_NULL)
  98. #define UHANDLE_NULL (SMCINVOKE_USERSPACE_OBJ_NULL)
  99. /*
  100. * MAKE => create handle for other domain i.e. TZ or userspace
  101. * GET => retrieve obj from incoming handle
  102. */
  103. #define UHANDLE_GET_CB_OBJ(h) (-2-(h))
  104. #define UHANDLE_MAKE_CB_OBJ(o) (-2-(o))
  105. #define UHANDLE_GET_FD(h) (h)
  106. /*
  107. * +ve tzhandle : remote object i.e. owned by TZ
  108. * -ve tzhandle : local object i.e. owned by linux
  109. * --------------------------------------------------
  110. *| 1 (1 bit) | Obj Id (15 bits) | srvr id (16 bits) |
  111. * ---------------------------------------------------
  112. * Server ids are defined below for various local objects
  113. * server id 0 : Kernel Obj
  114. * server id 1 : Memory region Obj
  115. * server id 2 : Memory map Obj
  116. * server id 3-15: Reserverd
  117. * server id 16 & up: Callback Objs
  118. */
  119. #define KRNL_SRVR_ID 0
  120. #define MEM_RGN_SRVR_ID 1
  121. #define MEM_MAP_SRVR_ID 2
  122. #define CBOBJ_SERVER_ID_START 0x10
  123. #define CBOBJ_SERVER_ID_END ((1<<16) - 1)
  124. /* local obj id is represented by 15 bits */
  125. #define MAX_LOCAL_OBJ_ID ((1<<15) - 1)
  126. /* CBOBJs will be served by server id 0x10 onwards */
  127. #define TZHANDLE_GET_SERVER(h) ((uint16_t)((h) & 0xFFFF))
  128. #define TZHANDLE_GET_OBJID(h) (((h) >> 16) & 0x7FFF)
  129. #define TZHANDLE_MAKE_LOCAL(s, o) (((0x8000 | (o)) << 16) | s)
  130. #define SET_BIT(s,b) (s | (1 << b))
  131. #define UNSET_BIT(s,b) (s & (~ (1 << b)))
  132. #define TZHANDLE_IS_NULL(h) ((h) == SMCINVOKE_TZ_OBJ_NULL)
  133. #define TZHANDLE_IS_LOCAL(h) ((h) & 0x80000000)
  134. #define TZHANDLE_IS_REMOTE(h) (!TZHANDLE_IS_NULL(h) && !TZHANDLE_IS_LOCAL(h))
  135. #define TZHANDLE_IS_KERNEL_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
  136. TZHANDLE_GET_SERVER(h) == KRNL_SRVR_ID)
  137. #define TZHANDLE_IS_MEM_RGN_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
  138. TZHANDLE_GET_SERVER(h) == MEM_RGN_SRVR_ID)
  139. #define TZHANDLE_IS_MEM_MAP_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
  140. TZHANDLE_GET_SERVER(h) == MEM_MAP_SRVR_ID)
  141. #define TZHANDLE_IS_MEM_OBJ(h) (TZHANDLE_IS_MEM_RGN_OBJ(h) || \
  142. TZHANDLE_IS_MEM_MAP_OBJ(h))
  143. #define TZHANDLE_IS_CB_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
  144. TZHANDLE_GET_SERVER(h) >= CBOBJ_SERVER_ID_START)
  145. #define FILE_IS_REMOTE_OBJ(f) ((f)->f_op && (f)->f_op == &g_smcinvoke_fops)
  146. static DEFINE_MUTEX(g_smcinvoke_lock);
  147. #define NO_LOCK 0
  148. #define TAKE_LOCK 1
  149. #define MUTEX_LOCK(x) { if (x) mutex_lock(&g_smcinvoke_lock); }
  150. #define MUTEX_UNLOCK(x) { if (x) mutex_unlock(&g_smcinvoke_lock); }
  151. #define POST_KT_SLEEP 0
  152. #define POST_KT_WAKEUP 1
  153. #define MAX_CHAR_NAME 50
  154. enum worker_thread_type {
  155. SHMB_WORKER_THREAD = 0,
  156. OBJECT_WORKER_THREAD,
  157. ADCI_WORKER_THREAD,
  158. MAX_THREAD_NUMBER
  159. };
  160. static DEFINE_HASHTABLE(g_cb_servers, 8);
  161. static LIST_HEAD(g_mem_objs);
  162. static uint16_t g_last_cb_server_id = CBOBJ_SERVER_ID_START;
  163. static uint16_t g_last_mem_rgn_id, g_last_mem_map_obj_id;
  164. static size_t g_max_cb_buf_size = SMCINVOKE_TZ_MIN_BUF_SIZE;
  165. static unsigned int cb_reqs_inflight;
  166. static bool legacy_smc_call;
  167. static int invoke_cmd;
  168. static long smcinvoke_ioctl(struct file *, unsigned int, unsigned long);
  169. static int smcinvoke_open(struct inode *, struct file *);
  170. static int smcinvoke_release(struct inode *, struct file *);
  171. static int release_cb_server(uint16_t);
  172. static const struct file_operations g_smcinvoke_fops = {
  173. .owner = THIS_MODULE,
  174. .unlocked_ioctl = smcinvoke_ioctl,
  175. .compat_ioctl = smcinvoke_ioctl,
  176. .open = smcinvoke_open,
  177. .release = smcinvoke_release,
  178. };
  179. static dev_t smcinvoke_device_no;
  180. static struct cdev smcinvoke_cdev;
  181. static struct class *driver_class;
  182. static struct device *class_dev;
  183. static struct platform_device *smcinvoke_pdev;
  184. /* We disable async memory object support by default,
  185. * until we receive the first message from TZ over the
  186. * async channel and can determine TZ async version.
  187. */
  188. static bool mem_obj_async_support = false;
  189. static uint32_t tz_async_version = 0x0;
  190. struct smcinvoke_buf_hdr {
  191. uint32_t offset;
  192. uint32_t size;
  193. };
  194. union smcinvoke_tz_args {
  195. struct smcinvoke_buf_hdr b;
  196. int32_t handle;
  197. };
  198. struct smcinvoke_msg_hdr {
  199. uint32_t tzhandle;
  200. uint32_t op;
  201. uint32_t counts;
  202. };
  203. /* Inbound reqs from TZ */
  204. struct smcinvoke_tzcb_req {
  205. int32_t result;
  206. struct smcinvoke_msg_hdr hdr;
  207. union smcinvoke_tz_args args[0];
  208. };
  209. struct smcinvoke_file_data {
  210. uint32_t context_type;
  211. union {
  212. uint32_t tzhandle;
  213. uint16_t server_id;
  214. };
  215. };
  216. struct smcinvoke_piggyback_msg {
  217. uint32_t version;
  218. uint32_t op;
  219. uint32_t counts;
  220. int32_t objs[0];
  221. };
  222. /* Mapped memory object data
  223. *
  224. * memObjRef Handle reference for the memory object
  225. * mapObjRef Handle reference for the map object
  226. * addr Mapped memory address
  227. * size Size of mapped memory
  228. * perm Access rights for the memory
  229. */
  230. struct smcinvoke_mem_obj_info {
  231. uint32_t memObjRef;
  232. uint32_t mapObjRef;
  233. uint64_t addr;
  234. uint64_t size;
  235. uint32_t perm;
  236. };
  237. /* Memory object info to be written into the async buffer
  238. *
  239. * version Async protocol version
  240. * op Async protocol operation
  241. * count Number of memory objects passed
  242. * mo Mapped memory object data
  243. */
  244. struct smcinvoke_mem_obj_msg {
  245. uint32_t version;
  246. uint32_t op;
  247. uint32_t count;
  248. struct smcinvoke_mem_obj_info mo[];
  249. };
  250. struct smcinvoke_mem_obj_pending_async {
  251. struct smcinvoke_mem_obj *mem_obj;
  252. struct list_head list;
  253. };
  254. /* Data structure to hold request coming from TZ */
  255. struct smcinvoke_cb_txn {
  256. uint32_t txn_id;
  257. int32_t state;
  258. struct smcinvoke_tzcb_req *cb_req;
  259. size_t cb_req_bytes;
  260. struct file **filp_to_release;
  261. struct hlist_node hash;
  262. struct kref ref_cnt;
  263. };
  264. struct smcinvoke_server_info {
  265. uint16_t server_id;
  266. uint16_t state;
  267. uint32_t txn_id;
  268. struct kref ref_cnt;
  269. wait_queue_head_t req_wait_q;
  270. wait_queue_head_t rsp_wait_q;
  271. size_t cb_buf_size;
  272. DECLARE_HASHTABLE(reqs_table, 4);
  273. DECLARE_HASHTABLE(responses_table, 4);
  274. struct hlist_node hash;
  275. struct list_head pending_cbobjs;
  276. uint8_t is_server_suspended;
  277. };
  278. struct smcinvoke_cbobj {
  279. uint16_t cbobj_id;
  280. struct kref ref_cnt;
  281. struct smcinvoke_server_info *server;
  282. struct list_head list;
  283. };
  284. /*
  285. * We require couple of objects, one for mem region & another
  286. * for mapped mem_obj once mem region has been mapped. It is
  287. * possible that TZ can release either independent of other.
  288. */
  289. struct smcinvoke_mem_obj {
  290. /* these ids are objid part of tzhandle */
  291. uint16_t mem_region_id;
  292. uint16_t mem_map_obj_id;
  293. struct dma_buf *dma_buf;
  294. struct dma_buf_attachment *buf_attach;
  295. struct sg_table *sgt;
  296. struct kref mem_regn_ref_cnt;
  297. struct kref mem_map_obj_ref_cnt;
  298. uint64_t p_addr;
  299. size_t p_addr_len;
  300. struct list_head list;
  301. bool is_smcinvoke_created_shmbridge;
  302. uint64_t shmbridge_handle;
  303. struct smcinvoke_server_info *server;
  304. int32_t mem_obj_user_fd;
  305. };
  306. static LIST_HEAD(g_bridge_postprocess);
  307. DEFINE_MUTEX(bridge_postprocess_lock);
  308. static LIST_HEAD(g_object_postprocess);
  309. DEFINE_MUTEX(object_postprocess_lock);
  310. struct bridge_deregister {
  311. uint64_t shmbridge_handle;
  312. struct dma_buf *dmabuf_to_free;
  313. };
  314. struct object_release {
  315. uint32_t tzhandle;
  316. uint32_t context_type;
  317. };
  318. struct smcinvoke_shmbridge_deregister_pending_list {
  319. struct list_head list;
  320. struct bridge_deregister data;
  321. };
  322. struct smcinvoke_object_release_pending_list {
  323. struct list_head list;
  324. struct object_release data;
  325. };
  326. struct smcinvoke_worker_thread {
  327. enum worker_thread_type type;
  328. atomic_t postprocess_kthread_state;
  329. wait_queue_head_t postprocess_kthread_wq;
  330. struct task_struct *postprocess_kthread_task;
  331. };
  332. static struct smcinvoke_worker_thread smcinvoke[MAX_THREAD_NUMBER];
  333. static const char thread_name[MAX_THREAD_NUMBER][MAX_CHAR_NAME] = {
  334. "smcinvoke_shmbridge_postprocess", "smcinvoke_object_postprocess", "smcinvoke_adci_thread"};
  335. static struct Object adci_clientEnv = Object_NULL;
  336. static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr,
  337. size_t in_buf_len,
  338. uint8_t *out_buf, phys_addr_t out_paddr,
  339. size_t out_buf_len,
  340. struct smcinvoke_cmd_req *req,
  341. union smcinvoke_arg *args_buf,
  342. bool *tz_acked, uint32_t context_type,
  343. struct qtee_shm *in_shm, struct qtee_shm *out_shm);
  344. static void process_piggyback_data(void *buf, size_t buf_size);
  345. static void destroy_cb_server(struct kref *kref)
  346. {
  347. struct smcinvoke_server_info *server = container_of(kref,
  348. struct smcinvoke_server_info, ref_cnt);
  349. if (server) {
  350. hash_del(&server->hash);
  351. kfree(server);
  352. }
  353. }
  354. /*
  355. * A separate find func is reqd mainly for couple of cases:
  356. * next_cb_server_id_locked which checks if server id had been utilized or not.
  357. * - It would be overhead if we do ref_cnt for this case
  358. * smcinvoke_release: which is called when server is closed from userspace.
  359. * - During server creation we init ref count, now put it back
  360. */
  361. static struct smcinvoke_server_info *find_cb_server_locked(uint16_t server_id)
  362. {
  363. struct smcinvoke_server_info *data = NULL;
  364. hash_for_each_possible(g_cb_servers, data, hash, server_id) {
  365. if (data->server_id == server_id)
  366. return data;
  367. }
  368. return NULL;
  369. }
  370. static struct smcinvoke_server_info *get_cb_server_locked(uint16_t server_id)
  371. {
  372. struct smcinvoke_server_info *server = find_cb_server_locked(server_id);
  373. if (server)
  374. kref_get(&server->ref_cnt);
  375. return server;
  376. }
  377. static uint16_t next_cb_server_id_locked(void)
  378. {
  379. if (g_last_cb_server_id == CBOBJ_SERVER_ID_END)
  380. g_last_cb_server_id = CBOBJ_SERVER_ID_START;
  381. while (find_cb_server_locked(++g_last_cb_server_id))
  382. ;
  383. return g_last_cb_server_id;
  384. }
  385. static inline void release_filp(struct file **filp_to_release, size_t arr_len)
  386. {
  387. size_t i = 0;
  388. for (i = 0; i < arr_len; i++) {
  389. if (filp_to_release[i]) {
  390. fput(filp_to_release[i]);
  391. filp_to_release[i] = NULL;
  392. }
  393. }
  394. }
  395. static struct smcinvoke_mem_obj *find_mem_obj_locked(uint16_t mem_obj_id,
  396. bool is_mem_rgn_obj)
  397. {
  398. struct smcinvoke_mem_obj *mem_obj = NULL;
  399. if (list_empty(&g_mem_objs))
  400. return NULL;
  401. list_for_each_entry(mem_obj, &g_mem_objs, list) {
  402. if ((is_mem_rgn_obj &&
  403. (mem_obj->mem_region_id == mem_obj_id)) ||
  404. (!is_mem_rgn_obj &&
  405. (mem_obj->mem_map_obj_id == mem_obj_id)))
  406. return mem_obj;
  407. }
  408. return NULL;
  409. }
  410. static uint32_t next_mem_region_obj_id_locked(void)
  411. {
  412. if (g_last_mem_rgn_id == MAX_LOCAL_OBJ_ID)
  413. g_last_mem_rgn_id = 0;
  414. while (find_mem_obj_locked(++g_last_mem_rgn_id, SMCINVOKE_MEM_RGN_OBJ))
  415. ;
  416. return g_last_mem_rgn_id;
  417. }
  418. static uint32_t next_mem_map_obj_id_locked(void)
  419. {
  420. if (g_last_mem_map_obj_id == MAX_LOCAL_OBJ_ID)
  421. g_last_mem_map_obj_id = 0;
  422. while (find_mem_obj_locked(++g_last_mem_map_obj_id,
  423. SMCINVOKE_MEM_MAP_OBJ))
  424. ;
  425. return g_last_mem_map_obj_id;
  426. }
  427. static void smcinvoke_shmbridge_post_process(void)
  428. {
  429. struct smcinvoke_shmbridge_deregister_pending_list *entry = NULL;
  430. struct list_head *pos;
  431. int ret = 0;
  432. uint64_t handle = 0;
  433. struct dma_buf *dmabuf_to_free = NULL;
  434. do {
  435. mutex_lock(&bridge_postprocess_lock);
  436. if (list_empty(&g_bridge_postprocess)) {
  437. mutex_unlock(&bridge_postprocess_lock);
  438. break;
  439. }
  440. pos = g_bridge_postprocess.next;
  441. entry = list_entry(pos,
  442. struct smcinvoke_shmbridge_deregister_pending_list,
  443. list);
  444. if (entry) {
  445. handle = entry->data.shmbridge_handle;
  446. dmabuf_to_free = entry->data.dmabuf_to_free;
  447. } else {
  448. pr_err("entry is NULL, pos:%#llx\n", (uint64_t)pos);
  449. }
  450. list_del(pos);
  451. kfree_sensitive(entry);
  452. mutex_unlock(&bridge_postprocess_lock);
  453. if (entry) {
  454. do {
  455. ret = qtee_shmbridge_deregister(handle);
  456. if (unlikely(ret)) {
  457. pr_err("SHM failed: ret:%d ptr:0x%x h:%#llx\n",
  458. ret,
  459. dmabuf_to_free,
  460. handle);
  461. } else {
  462. pr_debug("SHM deletion: Handle:%#llx\n",
  463. handle);
  464. dma_buf_put(dmabuf_to_free);
  465. }
  466. } while (-EBUSY == ret);
  467. }
  468. } while (1);
  469. }
  470. static int smcinvoke_object_post_process(void)
  471. {
  472. struct smcinvoke_object_release_pending_list *entry = NULL;
  473. struct list_head *pos;
  474. int ret = 0;
  475. bool release_handles;
  476. uint32_t context_type;
  477. uint8_t *in_buf = NULL;
  478. uint8_t *out_buf = NULL;
  479. struct smcinvoke_cmd_req req = {0};
  480. struct smcinvoke_msg_hdr hdr = {0};
  481. struct qtee_shm in_shm = {0}, out_shm = {0};
  482. ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &in_shm);
  483. if (ret) {
  484. ret = -ENOMEM;
  485. pr_err("shmbridge alloc failed for in msg in object release\n");
  486. goto out;
  487. }
  488. ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &out_shm);
  489. if (ret) {
  490. ret = -ENOMEM;
  491. pr_err("shmbridge alloc failed for out msg in object release\n");
  492. goto out;
  493. }
  494. do {
  495. mutex_lock(&object_postprocess_lock);
  496. if (list_empty(&g_object_postprocess)) {
  497. mutex_unlock(&object_postprocess_lock);
  498. break;
  499. }
  500. pos = g_object_postprocess.next;
  501. entry = list_entry(pos, struct smcinvoke_object_release_pending_list, list);
  502. if (entry) {
  503. in_buf = in_shm.vaddr;
  504. out_buf = out_shm.vaddr;
  505. hdr.tzhandle = entry->data.tzhandle;
  506. hdr.op = OBJECT_OP_RELEASE;
  507. hdr.counts = 0;
  508. *(struct smcinvoke_msg_hdr *)in_buf = hdr;
  509. context_type = entry->data.context_type;
  510. } else {
  511. pr_err("entry is NULL, pos:%#llx\n", (uint64_t)pos);
  512. }
  513. list_del(pos);
  514. kfree_sensitive(entry);
  515. mutex_unlock(&object_postprocess_lock);
  516. if (entry) {
  517. do {
  518. ret = prepare_send_scm_msg(in_buf, in_shm.paddr,
  519. SMCINVOKE_TZ_MIN_BUF_SIZE, out_buf, out_shm.paddr,
  520. SMCINVOKE_TZ_MIN_BUF_SIZE, &req, NULL,
  521. &release_handles, context_type, &in_shm, &out_shm);
  522. process_piggyback_data(out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE);
  523. if (ret) {
  524. pr_err("Failed to release object(0x%x), ret:%d\n",
  525. hdr.tzhandle, ret);
  526. } else {
  527. pr_debug("Released object(0x%x) successfully.\n",
  528. hdr.tzhandle);
  529. }
  530. } while (-EBUSY == ret);
  531. }
  532. } while (1);
  533. out:
  534. qtee_shmbridge_free_shm(&in_shm);
  535. qtee_shmbridge_free_shm(&out_shm);
  536. return ret;
  537. }
  538. static void smcinvoke_start_adci_thread(void)
  539. {
  540. int32_t ret = OBJECT_ERROR;
  541. int retry_count = 0;
  542. ret = get_client_env_object(&adci_clientEnv);
  543. if (ret) {
  544. pr_err("failed to get clientEnv for ADCI invoke thread. ret = %d\n", ret);
  545. adci_clientEnv = Object_NULL;
  546. goto out;
  547. }
  548. /* Invoke call to QTEE which should never return if ADCI is supported */
  549. do {
  550. ret = IClientEnv_accept(adci_clientEnv);
  551. if (ret == OBJECT_ERROR_BUSY) {
  552. pr_err("Secure side is busy,will retry after 5 ms, retry_count = %d",retry_count);
  553. msleep(5);
  554. }
  555. } while ((ret == OBJECT_ERROR_BUSY) && (retry_count++ < SMCINVOKE_INTERFACE_MAX_RETRY));
  556. if (ret == OBJECT_ERROR_INVALID)
  557. pr_err("ADCI feature is not supported on this chipsets, ret = %d\n", ret);
  558. /* Need to take decesion here if we want to restart the ADCI thread */
  559. else
  560. pr_err("Received response from QTEE, ret = %d\n", ret);
  561. out:
  562. /* Control should reach to this point only if ADCI feature is not supported by QTEE
  563. (or) ADCI thread held in QTEE is released. */
  564. Object_ASSIGN_NULL(adci_clientEnv);
  565. }
  566. static void __wakeup_postprocess_kthread(struct smcinvoke_worker_thread *smcinvoke)
  567. {
  568. if (smcinvoke) {
  569. atomic_set(&smcinvoke->postprocess_kthread_state,
  570. POST_KT_WAKEUP);
  571. wake_up_interruptible(&smcinvoke->postprocess_kthread_wq);
  572. } else {
  573. pr_err("Invalid smcinvoke pointer.\n");
  574. }
  575. }
  576. static int smcinvoke_postprocess_kthread_func(void *data)
  577. {
  578. struct smcinvoke_worker_thread *smcinvoke_wrk_trd = data;
  579. const char *tag;
  580. if (!smcinvoke_wrk_trd) {
  581. pr_err("Bad input.\n");
  582. return -EINVAL;
  583. }
  584. while (!kthread_should_stop()) {
  585. wait_event_interruptible(
  586. smcinvoke_wrk_trd->postprocess_kthread_wq,
  587. kthread_should_stop() ||
  588. (atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state)
  589. == POST_KT_WAKEUP));
  590. switch (smcinvoke_wrk_trd->type) {
  591. case SHMB_WORKER_THREAD:
  592. tag = "shmbridge";
  593. pr_debug("kthread to %s postprocess is called %d\n",
  594. tag, atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state));
  595. smcinvoke_shmbridge_post_process();
  596. break;
  597. case OBJECT_WORKER_THREAD:
  598. tag = "object";
  599. pr_debug("kthread to %s postprocess is called %d\n",
  600. tag, atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state));
  601. smcinvoke_object_post_process();
  602. break;
  603. case ADCI_WORKER_THREAD:
  604. tag = "adci";
  605. pr_debug("kthread to %s postprocess is called %d\n",
  606. tag, atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state));
  607. smcinvoke_start_adci_thread();
  608. break;
  609. default:
  610. pr_err("Invalid thread type(%d), do nothing.\n",
  611. (int)smcinvoke_wrk_trd->type);
  612. break;
  613. }
  614. /* For ADCI thread, if control reaches here, that indicates either ADCI
  615. * thread is not supported (or) released by QTEE. Since ADCI thread is
  616. * getting signaled only during the smcinvoke driver initialization,
  617. * there is no point of putting the thread into sleep state again. All the
  618. * required post-processing will be taken care by object and shmbridge threads.
  619. */
  620. if(smcinvoke_wrk_trd->type == ADCI_WORKER_THREAD) {
  621. break;
  622. }
  623. atomic_set(&smcinvoke_wrk_trd->postprocess_kthread_state,
  624. POST_KT_SLEEP);
  625. }
  626. pr_warn("kthread to %s postprocess stopped\n", tag);
  627. return 0;
  628. }
  629. static int smcinvoke_create_kthreads(void)
  630. {
  631. int i, rc = 0;
  632. const enum worker_thread_type thread_type[MAX_THREAD_NUMBER] = {
  633. SHMB_WORKER_THREAD, OBJECT_WORKER_THREAD, ADCI_WORKER_THREAD};
  634. for (i = 0; i < MAX_THREAD_NUMBER; i++) {
  635. init_waitqueue_head(&smcinvoke[i].postprocess_kthread_wq);
  636. smcinvoke[i].type = thread_type[i];
  637. smcinvoke[i].postprocess_kthread_task = kthread_run(
  638. smcinvoke_postprocess_kthread_func,
  639. &smcinvoke[i], thread_name[i]);
  640. if (IS_ERR(smcinvoke[i].postprocess_kthread_task)) {
  641. rc = PTR_ERR(smcinvoke[i].postprocess_kthread_task);
  642. pr_err("fail to create kthread to postprocess, rc = %x\n",
  643. rc);
  644. return rc;
  645. }
  646. atomic_set(&smcinvoke[i].postprocess_kthread_state,
  647. POST_KT_SLEEP);
  648. }
  649. return rc;
  650. }
  651. static void smcinvoke_destroy_kthreads(void)
  652. {
  653. int i;
  654. int32_t ret = OBJECT_ERROR;
  655. int retry_count = 0;
  656. if(!Object_isNull(adci_clientEnv)) {
  657. do {
  658. ret = IClientEnv_adciShutdown(adci_clientEnv);
  659. if (ret == OBJECT_ERROR_BUSY) {
  660. pr_err("Secure side is busy,will retry after 5 ms, retry_count = %d",retry_count);
  661. msleep(5);
  662. }
  663. } while ((ret == OBJECT_ERROR_BUSY) && (retry_count++ < SMCINVOKE_INTERFACE_MAX_RETRY));
  664. if(OBJECT_isERROR(ret)) {
  665. pr_err("adciShutdown in QTEE failed with error = %d\n", ret);
  666. }
  667. Object_ASSIGN_NULL(adci_clientEnv);
  668. }
  669. for (i = 0; i < MAX_THREAD_NUMBER; i++) {
  670. kthread_stop(smcinvoke[i].postprocess_kthread_task);
  671. }
  672. }
  673. /* Queue newly created memory object to l_pending_mem_obj list.
  674. * Later, the mapping information for objects in this list will be sent to TZ
  675. * over the async side channel.
  676. *
  677. * No return value as TZ is always able to explicitly ask for this information
  678. * in case this function fails and the memory object is not added to this list.
  679. */
  680. static void queue_mem_obj_pending_async_locked(struct smcinvoke_mem_obj *mem_obj, struct list_head *l_pending_mem_obj)
  681. {
  682. struct smcinvoke_mem_obj_pending_async *t_mem_obj_pending =
  683. kzalloc(sizeof(*t_mem_obj_pending), GFP_KERNEL);
  684. /*
  685. * We are not failing execution in case of a failure here,
  686. * since TZ can always ask for this information explicitly
  687. * if it's not available in the side channel.
  688. */
  689. if (!t_mem_obj_pending) {
  690. pr_err("Unable to allocate memory\n");
  691. return;
  692. }
  693. t_mem_obj_pending->mem_obj = mem_obj;
  694. list_add(&t_mem_obj_pending->list, l_pending_mem_obj);
  695. }
  696. static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj)
  697. {
  698. int ret = 0;
  699. bool is_bridge_created = mem_obj->is_smcinvoke_created_shmbridge;
  700. struct dma_buf *dmabuf_to_free = mem_obj->dma_buf;
  701. uint64_t shmbridge_handle = mem_obj->shmbridge_handle;
  702. struct smcinvoke_shmbridge_deregister_pending_list *entry = NULL;
  703. list_del(&mem_obj->list);
  704. kfree(mem_obj->server);
  705. kfree(mem_obj);
  706. mem_obj = NULL;
  707. mutex_unlock(&g_smcinvoke_lock);
  708. if (is_bridge_created)
  709. ret = qtee_shmbridge_deregister(shmbridge_handle);
  710. if (ret) {
  711. pr_err("Error:%d delete bridge failed leaking memory 0x%x\n",
  712. ret, dmabuf_to_free);
  713. if (ret == -EBUSY) {
  714. pr_err("EBUSY: we postpone it 0x%x\n",
  715. dmabuf_to_free);
  716. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  717. if (entry) {
  718. entry->data.shmbridge_handle = shmbridge_handle;
  719. entry->data.dmabuf_to_free = dmabuf_to_free;
  720. mutex_lock(&bridge_postprocess_lock);
  721. list_add_tail(&entry->list, &g_bridge_postprocess);
  722. mutex_unlock(&bridge_postprocess_lock);
  723. pr_debug("SHMBridge list: added a Handle:%#llx\n",
  724. shmbridge_handle);
  725. __wakeup_postprocess_kthread(
  726. &smcinvoke[SHMB_WORKER_THREAD]);
  727. }
  728. }
  729. } else {
  730. dma_buf_put(dmabuf_to_free);
  731. }
  732. mutex_lock(&g_smcinvoke_lock);
  733. }
  734. static void del_mem_regn_obj_locked(struct kref *kref)
  735. {
  736. struct smcinvoke_mem_obj *mem_obj = container_of(kref,
  737. struct smcinvoke_mem_obj, mem_regn_ref_cnt);
  738. /*
  739. * mem_regn obj and mem_map obj are held into mem_obj structure which
  740. * can't be released until both kinds of objs have been released.
  741. * So check whether mem_map iobj has ref 0 and only then release mem_obj
  742. */
  743. if (kref_read(&mem_obj->mem_map_obj_ref_cnt) == 0)
  744. free_mem_obj_locked(mem_obj);
  745. }
  746. static void del_mem_map_obj_locked(struct kref *kref)
  747. {
  748. struct smcinvoke_mem_obj *mem_obj = container_of(kref,
  749. struct smcinvoke_mem_obj, mem_map_obj_ref_cnt);
  750. mem_obj->p_addr_len = 0;
  751. mem_obj->p_addr = 0;
  752. if (mem_obj->sgt)
  753. dma_buf_unmap_attachment(mem_obj->buf_attach,
  754. mem_obj->sgt, DMA_BIDIRECTIONAL);
  755. if (mem_obj->buf_attach)
  756. dma_buf_detach(mem_obj->dma_buf, mem_obj->buf_attach);
  757. /*
  758. * mem_regn obj and mem_map obj are held into mem_obj structure which
  759. * can't be released until both kinds of objs have been released.
  760. * So check if mem_regn obj has ref 0 and only then release mem_obj
  761. */
  762. if (kref_read(&mem_obj->mem_regn_ref_cnt) == 0)
  763. free_mem_obj_locked(mem_obj);
  764. }
  765. static int release_mem_obj_locked(int32_t tzhandle)
  766. {
  767. int is_mem_regn_obj = TZHANDLE_IS_MEM_RGN_OBJ(tzhandle);
  768. struct smcinvoke_mem_obj *mem_obj = find_mem_obj_locked(
  769. TZHANDLE_GET_OBJID(tzhandle), is_mem_regn_obj);
  770. if (!mem_obj) {
  771. pr_err("memory object not found\n");
  772. return OBJECT_ERROR_BADOBJ;
  773. }
  774. if (is_mem_regn_obj)
  775. kref_put(&mem_obj->mem_regn_ref_cnt, del_mem_regn_obj_locked);
  776. else
  777. kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
  778. return OBJECT_OK;
  779. }
  780. static void free_pending_cbobj_locked(struct kref *kref)
  781. {
  782. struct smcinvoke_server_info *server = NULL;
  783. struct smcinvoke_cbobj *obj = container_of(kref,
  784. struct smcinvoke_cbobj, ref_cnt);
  785. list_del(&obj->list);
  786. server = obj->server;
  787. kfree(obj);
  788. if (server)
  789. kref_put(&server->ref_cnt, destroy_cb_server);
  790. }
  791. static int get_pending_cbobj_locked(uint16_t srvr_id, int16_t obj_id)
  792. {
  793. int ret = 0;
  794. bool release_server = true;
  795. struct list_head *head = NULL;
  796. struct smcinvoke_cbobj *cbobj = NULL;
  797. struct smcinvoke_cbobj *obj = NULL;
  798. struct smcinvoke_server_info *server = get_cb_server_locked(srvr_id);
  799. if (!server) {
  800. pr_err("%s, server id : %u not found\n", __func__, srvr_id);
  801. return OBJECT_ERROR_BADOBJ;
  802. }
  803. head = &server->pending_cbobjs;
  804. list_for_each_entry(cbobj, head, list)
  805. if (cbobj->cbobj_id == obj_id) {
  806. kref_get(&cbobj->ref_cnt);
  807. goto out;
  808. }
  809. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  810. if (!obj) {
  811. ret = OBJECT_ERROR_KMEM;
  812. goto out;
  813. }
  814. obj->cbobj_id = obj_id;
  815. kref_init(&obj->ref_cnt);
  816. obj->server = server;
  817. /*
  818. * we are holding server ref in cbobj; we will
  819. * release server ref when cbobj is destroyed
  820. */
  821. release_server = false;
  822. list_add_tail(&obj->list, head);
  823. out:
  824. if (release_server)
  825. kref_put(&server->ref_cnt, destroy_cb_server);
  826. return ret;
  827. }
  828. static int put_pending_cbobj_locked(uint16_t srvr_id, int16_t obj_id)
  829. {
  830. int ret = -EINVAL;
  831. struct smcinvoke_server_info *srvr_info =
  832. get_cb_server_locked(srvr_id);
  833. struct list_head *head = NULL;
  834. struct smcinvoke_cbobj *cbobj = NULL;
  835. if (!srvr_info) {
  836. pr_err("%s, server id : %u not found\n", __func__, srvr_id);
  837. return ret;
  838. }
  839. trace_put_pending_cbobj_locked(srvr_id, obj_id);
  840. head = &srvr_info->pending_cbobjs;
  841. list_for_each_entry(cbobj, head, list)
  842. if (cbobj->cbobj_id == obj_id) {
  843. kref_put(&cbobj->ref_cnt, free_pending_cbobj_locked);
  844. ret = 0;
  845. break;
  846. }
  847. kref_put(&srvr_info->ref_cnt, destroy_cb_server);
  848. return ret;
  849. }
  850. static int release_tzhandle_locked(int32_t tzhandle)
  851. {
  852. if (TZHANDLE_IS_MEM_OBJ(tzhandle))
  853. return release_mem_obj_locked(tzhandle);
  854. else if (TZHANDLE_IS_CB_OBJ(tzhandle))
  855. return put_pending_cbobj_locked(TZHANDLE_GET_SERVER(tzhandle),
  856. TZHANDLE_GET_OBJID(tzhandle));
  857. return OBJECT_ERROR;
  858. }
  859. static void release_tzhandles(const int32_t *tzhandles, size_t len)
  860. {
  861. size_t i;
  862. mutex_lock(&g_smcinvoke_lock);
  863. for (i = 0; i < len; i++)
  864. release_tzhandle_locked(tzhandles[i]);
  865. mutex_unlock(&g_smcinvoke_lock);
  866. }
  867. static void delete_cb_txn_locked(struct kref *kref)
  868. {
  869. struct smcinvoke_cb_txn *cb_txn = container_of(kref,
  870. struct smcinvoke_cb_txn, ref_cnt);
  871. if (OBJECT_OP_METHODID(cb_txn->cb_req->hdr.op) == OBJECT_OP_RELEASE)
  872. release_tzhandle_locked(cb_txn->cb_req->hdr.tzhandle);
  873. kfree(cb_txn->cb_req);
  874. hash_del(&cb_txn->hash);
  875. kfree(cb_txn);
  876. }
  877. static struct smcinvoke_cb_txn *find_cbtxn_locked(
  878. struct smcinvoke_server_info *server,
  879. uint32_t txn_id, int32_t state)
  880. {
  881. int i = 0;
  882. struct smcinvoke_cb_txn *cb_txn = NULL;
  883. struct smcinvoke_mem_obj *mem_obj = NULL;
  884. int32_t tzhandle = 0;
  885. /*
  886. * Since HASH_BITS() does not work on pointers, we can't select hash
  887. * table using state and loop over it.
  888. */
  889. if (state == SMCINVOKE_REQ_PLACED) {
  890. /* pick up 1st req */
  891. hash_for_each(server->reqs_table, i, cb_txn, hash) {
  892. kref_get(&cb_txn->ref_cnt);
  893. tzhandle = (cb_txn->cb_req)->hdr.tzhandle;
  894. if(TZHANDLE_IS_MEM_OBJ(tzhandle)) {
  895. mem_obj= find_mem_obj_locked(TZHANDLE_GET_OBJID(tzhandle),
  896. SMCINVOKE_MEM_RGN_OBJ);
  897. kref_get(&mem_obj->mem_regn_ref_cnt);
  898. }
  899. hash_del(&cb_txn->hash);
  900. return cb_txn;
  901. }
  902. } else if (state == SMCINVOKE_REQ_PROCESSING) {
  903. hash_for_each_possible(
  904. server->responses_table, cb_txn, hash, txn_id) {
  905. if (cb_txn->txn_id == txn_id) {
  906. kref_get(&cb_txn->ref_cnt);
  907. tzhandle = (cb_txn->cb_req)->hdr.tzhandle;
  908. if(TZHANDLE_IS_MEM_OBJ(tzhandle)) {
  909. mem_obj= find_mem_obj_locked(TZHANDLE_GET_OBJID(tzhandle),
  910. SMCINVOKE_MEM_RGN_OBJ);
  911. kref_get(&mem_obj->mem_regn_ref_cnt);
  912. }
  913. hash_del(&cb_txn->hash);
  914. return cb_txn;
  915. }
  916. }
  917. }
  918. return NULL;
  919. }
  920. /*
  921. * size_add_ saturates at SIZE_MAX. If integer overflow is detected,
  922. * this function would return SIZE_MAX otherwise normal a+b is returned.
  923. */
  924. static inline size_t size_add_(size_t a, size_t b)
  925. {
  926. return (b > (SIZE_MAX - a)) ? SIZE_MAX : a + b;
  927. }
  928. /*
  929. * pad_size is used along with size_align to define a buffer overflow
  930. * protected version of ALIGN
  931. */
  932. static inline size_t pad_size(size_t a, size_t b)
  933. {
  934. return (~a + 1) % b;
  935. }
  936. /*
  937. * size_align saturates at SIZE_MAX. If integer overflow is detected, this
  938. * function would return SIZE_MAX otherwise next aligned size is returned.
  939. */
  940. static inline size_t size_align(size_t a, size_t b)
  941. {
  942. return size_add_(a, pad_size(a, b));
  943. }
  944. static uint16_t get_server_id(int cb_server_fd)
  945. {
  946. uint16_t server_id = 0;
  947. struct smcinvoke_file_data *svr_cxt = NULL;
  948. struct file *tmp_filp = fget(cb_server_fd);
  949. if (!tmp_filp || !FILE_IS_REMOTE_OBJ(tmp_filp))
  950. return server_id;
  951. svr_cxt = tmp_filp->private_data;
  952. if (svr_cxt && svr_cxt->context_type == SMCINVOKE_OBJ_TYPE_SERVER)
  953. server_id = svr_cxt->server_id;
  954. fput(tmp_filp);
  955. return server_id;
  956. }
  957. static bool is_dma_fd(int32_t uhandle, struct dma_buf **dma_buf)
  958. {
  959. *dma_buf = dma_buf_get(uhandle);
  960. return IS_ERR_OR_NULL(*dma_buf) ? false : true;
  961. }
  962. static bool is_remote_obj(int32_t uhandle, struct smcinvoke_file_data **tzobj,
  963. struct file **filp)
  964. {
  965. bool ret = false;
  966. struct file *tmp_filp = fget(uhandle);
  967. if (!tmp_filp)
  968. return ret;
  969. if (FILE_IS_REMOTE_OBJ(tmp_filp)) {
  970. *tzobj = tmp_filp->private_data;
  971. if ((*tzobj)->context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
  972. *filp = tmp_filp;
  973. tmp_filp = NULL;
  974. ret = true;
  975. }
  976. }
  977. if (tmp_filp)
  978. fput(tmp_filp);
  979. return ret;
  980. }
  981. static int smcinvoke_create_bridge(struct smcinvoke_mem_obj *mem_obj)
  982. {
  983. int ret = 0;
  984. int tz_perm = PERM_READ|PERM_WRITE;
  985. uint32_t *vmid_list;
  986. uint32_t *perms_list;
  987. uint32_t nelems = 0;
  988. struct dma_buf *dmabuf = mem_obj->dma_buf;
  989. phys_addr_t phys = mem_obj->p_addr;
  990. size_t size = mem_obj->p_addr_len;
  991. if (!qtee_shmbridge_is_enabled())
  992. return 0;
  993. ret = mem_buf_dma_buf_copy_vmperm(dmabuf, (int **)&vmid_list,
  994. (int **)&perms_list, (int *)&nelems);
  995. if (ret) {
  996. pr_err("mem_buf_dma_buf_copy_vmperm failure, err=%d\n", ret);
  997. return ret;
  998. }
  999. if (mem_buf_dma_buf_exclusive_owner(dmabuf))
  1000. perms_list[0] = PERM_READ | PERM_WRITE;
  1001. ret = qtee_shmbridge_register(phys, size, vmid_list, perms_list, nelems,
  1002. tz_perm, &mem_obj->shmbridge_handle);
  1003. if (ret == 0) {
  1004. /* In case of ret=0/success handle has to be freed in memobj release */
  1005. mem_obj->is_smcinvoke_created_shmbridge = true;
  1006. } else if (ret == -EEXIST) {
  1007. ret = 0;
  1008. goto exit;
  1009. } else {
  1010. pr_err("creation of shm bridge for mem_region_id %d failed ret %d\n",
  1011. mem_obj->mem_region_id, ret);
  1012. goto exit;
  1013. }
  1014. trace_smcinvoke_create_bridge(mem_obj->shmbridge_handle, mem_obj->mem_region_id);
  1015. exit:
  1016. kfree(perms_list);
  1017. kfree(vmid_list);
  1018. return ret;
  1019. }
  1020. /* Map memory region for a given memory object.
  1021. * Mapping information will be saved as part of the memory object structure.
  1022. */
  1023. static int32_t smcinvoke_map_mem_region_locked(struct smcinvoke_mem_obj* mem_obj)
  1024. {
  1025. int ret = OBJECT_OK;
  1026. struct dma_buf_attachment *buf_attach = NULL;
  1027. struct sg_table *sgt = NULL;
  1028. if (!mem_obj) {
  1029. pr_err("Invalid memory object\n");
  1030. return OBJECT_ERROR_BADOBJ;
  1031. }
  1032. if (!mem_obj->p_addr) {
  1033. kref_init(&mem_obj->mem_map_obj_ref_cnt);
  1034. buf_attach = dma_buf_attach(mem_obj->dma_buf,
  1035. &smcinvoke_pdev->dev);
  1036. if (IS_ERR(buf_attach)) {
  1037. ret = OBJECT_ERROR_KMEM;
  1038. pr_err("dma buf attach failed, ret: %d\n", ret);
  1039. goto out;
  1040. }
  1041. mem_obj->buf_attach = buf_attach;
  1042. sgt = dma_buf_map_attachment(buf_attach, DMA_BIDIRECTIONAL);
  1043. if (IS_ERR(sgt)) {
  1044. pr_err("mapping dma buffers failed, ret: %d\n",
  1045. PTR_ERR(sgt));
  1046. ret = OBJECT_ERROR_KMEM;
  1047. goto out;
  1048. }
  1049. mem_obj->sgt = sgt;
  1050. /* contiguous only => nents=1 */
  1051. if (sgt->nents != 1) {
  1052. ret = OBJECT_ERROR_INVALID;
  1053. pr_err("sg enries are not contigous, ret: %d\n", ret);
  1054. goto out;
  1055. }
  1056. mem_obj->p_addr = sg_dma_address(sgt->sgl);
  1057. mem_obj->p_addr_len = sgt->sgl->length;
  1058. if (!mem_obj->p_addr) {
  1059. ret = OBJECT_ERROR_INVALID;
  1060. pr_err("invalid physical address, ret: %d\n", ret);
  1061. goto out;
  1062. }
  1063. /* Increase reference count as we are feeding the memobj to
  1064. * smcinvoke and unlock the mutex. No need to hold the mutex in
  1065. * case of shmbridge creation.
  1066. */
  1067. kref_get(&mem_obj->mem_map_obj_ref_cnt);
  1068. mutex_unlock(&g_smcinvoke_lock);
  1069. ret = smcinvoke_create_bridge(mem_obj);
  1070. /* Take lock again and decrease the reference count which we
  1071. * increased for shmbridge but before proceeding further we
  1072. * have to check again if the memobj is still valid or not
  1073. * after decreasing the reference.
  1074. */
  1075. mutex_lock(&g_smcinvoke_lock);
  1076. kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
  1077. if (ret) {
  1078. ret = OBJECT_ERROR_INVALID;
  1079. pr_err("Unable to create shm bridge, ret: %d\n", ret);
  1080. goto out;
  1081. }
  1082. if (!find_mem_obj_locked(mem_obj->mem_region_id,
  1083. SMCINVOKE_MEM_RGN_OBJ)) {
  1084. mutex_unlock(&g_smcinvoke_lock);
  1085. pr_err("Memory object not found\n");
  1086. return OBJECT_ERROR_BADOBJ;
  1087. }
  1088. mem_obj->mem_map_obj_id = next_mem_map_obj_id_locked();
  1089. }
  1090. out:
  1091. if (ret != OBJECT_OK)
  1092. kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
  1093. return ret;
  1094. }
  1095. static int create_mem_obj(struct dma_buf *dma_buf, int32_t *tzhandle,
  1096. struct smcinvoke_mem_obj **mem_obj, int32_t server_id, int32_t user_handle)
  1097. {
  1098. struct smcinvoke_mem_obj *t_mem_obj = NULL;
  1099. struct smcinvoke_server_info *server_i = NULL;
  1100. t_mem_obj = kzalloc(sizeof(struct smcinvoke_mem_obj), GFP_KERNEL);
  1101. if (!t_mem_obj) {
  1102. dma_buf_put(dma_buf);
  1103. return -ENOMEM;
  1104. }
  1105. server_i = kzalloc(sizeof(struct smcinvoke_server_info),GFP_KERNEL);
  1106. if (!server_i) {
  1107. kfree(t_mem_obj);
  1108. dma_buf_put(dma_buf);
  1109. return -ENOMEM;
  1110. }
  1111. kref_init(&t_mem_obj->mem_regn_ref_cnt);
  1112. t_mem_obj->dma_buf = dma_buf;
  1113. mutex_lock(&g_smcinvoke_lock);
  1114. t_mem_obj->mem_region_id = next_mem_region_obj_id_locked();
  1115. server_i->server_id = server_id;
  1116. t_mem_obj->server = server_i;
  1117. t_mem_obj->mem_obj_user_fd = user_handle;
  1118. list_add_tail(&t_mem_obj->list, &g_mem_objs);
  1119. mutex_unlock(&g_smcinvoke_lock);
  1120. *mem_obj = t_mem_obj;
  1121. *tzhandle = TZHANDLE_MAKE_LOCAL(MEM_RGN_SRVR_ID,
  1122. t_mem_obj->mem_region_id);
  1123. return 0;
  1124. }
  1125. /*
  1126. * This function retrieves file pointer corresponding to FD provided. It stores
  1127. * retrieved file pointer until IOCTL call is concluded. Once call is completed,
  1128. * all stored file pointers are released. file pointers are stored to prevent
  1129. * other threads from releasing that FD while IOCTL is in progress.
  1130. */
  1131. static int get_tzhandle_from_uhandle(int32_t uhandle, int32_t server_fd,
  1132. struct file **filp, uint32_t *tzhandle, struct list_head *l_pending_mem_obj)
  1133. {
  1134. int ret = -EBADF;
  1135. uint16_t server_id = 0;
  1136. struct smcinvoke_mem_obj *mem_obj = NULL;
  1137. if (UHANDLE_IS_NULL(uhandle)) {
  1138. *tzhandle = SMCINVOKE_TZ_OBJ_NULL;
  1139. ret = 0;
  1140. } else if (UHANDLE_IS_CB_OBJ(uhandle)) {
  1141. server_id = get_server_id(server_fd);
  1142. if (server_id < CBOBJ_SERVER_ID_START)
  1143. goto out;
  1144. mutex_lock(&g_smcinvoke_lock);
  1145. ret = get_pending_cbobj_locked(server_id,
  1146. UHANDLE_GET_CB_OBJ(uhandle));
  1147. mutex_unlock(&g_smcinvoke_lock);
  1148. if (ret)
  1149. goto out;
  1150. *tzhandle = TZHANDLE_MAKE_LOCAL(server_id,
  1151. UHANDLE_GET_CB_OBJ(uhandle));
  1152. ret = 0;
  1153. } else if (UHANDLE_IS_FD(uhandle)) {
  1154. struct dma_buf *dma_buf = NULL;
  1155. struct smcinvoke_file_data *tzobj = NULL;
  1156. if (is_dma_fd(UHANDLE_GET_FD(uhandle), &dma_buf)) {
  1157. server_id = get_server_id(server_fd);
  1158. ret = create_mem_obj(dma_buf, tzhandle, &mem_obj, server_id, uhandle);
  1159. if (!ret && mem_obj_async_support && l_pending_mem_obj) {
  1160. mutex_lock(&g_smcinvoke_lock);
  1161. /* Map the newly created memory object and add it
  1162. * to l_pending_mem_obj list.
  1163. * Before returning to TZ, add the mapping data
  1164. * to the async side channel so it's available to TZ
  1165. * together with the memory object.
  1166. */
  1167. if (!smcinvoke_map_mem_region_locked(mem_obj)) {
  1168. queue_mem_obj_pending_async_locked(mem_obj, l_pending_mem_obj);
  1169. } else {
  1170. pr_err("Failed to map memory region\n");
  1171. }
  1172. mutex_unlock(&g_smcinvoke_lock);
  1173. }
  1174. } else if (is_remote_obj(UHANDLE_GET_FD(uhandle),
  1175. &tzobj, filp)) {
  1176. *tzhandle = tzobj->tzhandle;
  1177. ret = 0;
  1178. }
  1179. }
  1180. out:
  1181. return ret;
  1182. }
  1183. static int get_fd_for_obj(uint32_t obj_type, uint32_t obj, int32_t *fd)
  1184. {
  1185. int unused_fd = -1, ret = -EINVAL;
  1186. struct file *f = NULL;
  1187. struct smcinvoke_file_data *cxt = NULL;
  1188. cxt = kzalloc(sizeof(*cxt), GFP_KERNEL);
  1189. if (!cxt) {
  1190. ret = -ENOMEM;
  1191. goto out;
  1192. }
  1193. if (obj_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ ||
  1194. obj_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
  1195. cxt->context_type = obj_type;
  1196. cxt->tzhandle = obj;
  1197. } else if (obj_type == SMCINVOKE_OBJ_TYPE_SERVER) {
  1198. cxt->context_type = SMCINVOKE_OBJ_TYPE_SERVER;
  1199. cxt->server_id = obj;
  1200. } else {
  1201. goto out;
  1202. }
  1203. unused_fd = get_unused_fd_flags(O_RDWR);
  1204. if (unused_fd < 0)
  1205. goto out;
  1206. if (fd == NULL)
  1207. goto out;
  1208. f = anon_inode_getfile(SMCINVOKE_DEV, &g_smcinvoke_fops, cxt, O_RDWR);
  1209. if (IS_ERR(f))
  1210. goto out;
  1211. *fd = unused_fd;
  1212. fd_install(*fd, f);
  1213. return 0;
  1214. out:
  1215. if (unused_fd >= 0)
  1216. put_unused_fd(unused_fd);
  1217. kfree(cxt);
  1218. return ret;
  1219. }
  1220. static int get_uhandle_from_tzhandle(int32_t tzhandle, int32_t srvr_id,
  1221. int32_t *uhandle, bool lock, uint32_t context_type)
  1222. {
  1223. int ret = -1;
  1224. if (TZHANDLE_IS_NULL(tzhandle)) {
  1225. *uhandle = UHANDLE_NULL;
  1226. ret = 0;
  1227. } else if (TZHANDLE_IS_CB_OBJ(tzhandle)) {
  1228. if (srvr_id != TZHANDLE_GET_SERVER(tzhandle))
  1229. goto out;
  1230. *uhandle = UHANDLE_MAKE_CB_OBJ(TZHANDLE_GET_OBJID(tzhandle));
  1231. MUTEX_LOCK(lock)
  1232. ret = get_pending_cbobj_locked(TZHANDLE_GET_SERVER(tzhandle),
  1233. TZHANDLE_GET_OBJID(tzhandle));
  1234. MUTEX_UNLOCK(lock)
  1235. } else if (TZHANDLE_IS_MEM_RGN_OBJ(tzhandle)) {
  1236. struct smcinvoke_mem_obj *mem_obj = NULL;
  1237. MUTEX_LOCK(lock)
  1238. mem_obj = find_mem_obj_locked(TZHANDLE_GET_OBJID(tzhandle),
  1239. SMCINVOKE_MEM_RGN_OBJ);
  1240. if (mem_obj != NULL) {
  1241. int fd;
  1242. fd = mem_obj->mem_obj_user_fd;
  1243. if (fd < 0)
  1244. goto exit_lock;
  1245. *uhandle = fd;
  1246. ret = 0;
  1247. }
  1248. exit_lock:
  1249. MUTEX_UNLOCK(lock)
  1250. } else if (TZHANDLE_IS_REMOTE(tzhandle)) {
  1251. /* if execution comes here => tzhandle is an unsigned int */
  1252. ret = get_fd_for_obj(context_type,
  1253. (uint32_t)tzhandle, uhandle);
  1254. }
  1255. out:
  1256. return ret;
  1257. }
  1258. static int32_t smcinvoke_release_mem_obj_locked(void *buf, size_t buf_len)
  1259. {
  1260. struct smcinvoke_tzcb_req *msg = buf;
  1261. if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 0, 0, 0)) {
  1262. pr_err("Invalid object count in %s\n", __func__);
  1263. return OBJECT_ERROR_INVALID;
  1264. }
  1265. trace_release_mem_obj_locked(msg->hdr.tzhandle, buf_len);
  1266. return release_tzhandle_locked(msg->hdr.tzhandle);
  1267. }
  1268. static int32_t smcinvoke_process_map_mem_region_req(void *buf, size_t buf_len)
  1269. {
  1270. int ret = OBJECT_OK;
  1271. struct smcinvoke_tzcb_req *msg = buf;
  1272. struct {
  1273. uint64_t p_addr;
  1274. uint64_t len;
  1275. uint32_t perms;
  1276. } *ob = NULL;
  1277. int32_t *oo = NULL;
  1278. struct smcinvoke_mem_obj *mem_obj = NULL;
  1279. if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 1, 1, 1) ||
  1280. (buf_len - msg->args[0].b.offset < msg->args[0].b.size)) {
  1281. pr_err("Invalid counts received for mapping mem obj\n");
  1282. return OBJECT_ERROR_INVALID;
  1283. }
  1284. /* args[0] = BO, args[1] = OI, args[2] = OO */
  1285. ob = buf + msg->args[0].b.offset;
  1286. oo = &msg->args[2].handle;
  1287. mutex_lock(&g_smcinvoke_lock);
  1288. mem_obj = find_mem_obj_locked(TZHANDLE_GET_OBJID(msg->args[1].handle),
  1289. SMCINVOKE_MEM_RGN_OBJ);
  1290. if (!mem_obj) {
  1291. mutex_unlock(&g_smcinvoke_lock);
  1292. pr_err("Memory object not found\n");
  1293. return OBJECT_ERROR_BADOBJ;
  1294. }
  1295. if (!mem_obj->p_addr) {
  1296. ret = smcinvoke_map_mem_region_locked(mem_obj);
  1297. } else {
  1298. kref_get(&mem_obj->mem_map_obj_ref_cnt);
  1299. }
  1300. if (!ret) {
  1301. ob->p_addr = mem_obj->p_addr;
  1302. ob->len = mem_obj->p_addr_len;
  1303. ob->perms = SMCINVOKE_MEM_PERM_RW;
  1304. *oo = TZHANDLE_MAKE_LOCAL(MEM_MAP_SRVR_ID, mem_obj->mem_map_obj_id);
  1305. }
  1306. mutex_unlock(&g_smcinvoke_lock);
  1307. return ret;
  1308. }
  1309. static int32_t smcinvoke_sleep(void *buf, size_t buf_len)
  1310. {
  1311. struct smcinvoke_tzcb_req *msg = buf;
  1312. uint32_t sleepTimeMs_val = 0;
  1313. if (msg->hdr.counts != OBJECT_COUNTS_PACK(1, 0, 0, 0) ||
  1314. (buf_len - msg->args[0].b.offset < msg->args[0].b.size)) {
  1315. pr_err("Invalid counts received for sleeping in hlos\n");
  1316. return OBJECT_ERROR_INVALID;
  1317. }
  1318. /* Time in miliseconds is expected from tz */
  1319. sleepTimeMs_val = *((uint32_t *)(buf + msg->args[0].b.offset));
  1320. msleep(sleepTimeMs_val);
  1321. return OBJECT_OK;
  1322. }
  1323. static void process_kernel_obj(void *buf, size_t buf_len)
  1324. {
  1325. struct smcinvoke_tzcb_req *cb_req = buf;
  1326. switch (cb_req->hdr.op) {
  1327. case OBJECT_OP_MAP_REGION:
  1328. pr_debug("Received a request to map memory region\n");
  1329. cb_req->result = smcinvoke_process_map_mem_region_req(buf, buf_len);
  1330. break;
  1331. case OBJECT_OP_YIELD:
  1332. cb_req->result = OBJECT_OK;
  1333. break;
  1334. case OBJECT_OP_SLEEP:
  1335. cb_req->result = smcinvoke_sleep(buf, buf_len);
  1336. break;
  1337. default:
  1338. pr_err(" invalid operation for tz kernel object\n");
  1339. cb_req->result = OBJECT_ERROR_INVALID;
  1340. break;
  1341. }
  1342. }
  1343. static void process_mem_obj(void *buf, size_t buf_len)
  1344. {
  1345. struct smcinvoke_tzcb_req *cb_req = buf;
  1346. mutex_lock(&g_smcinvoke_lock);
  1347. cb_req->result = (cb_req->hdr.op == OBJECT_OP_RELEASE) ?
  1348. smcinvoke_release_mem_obj_locked(buf, buf_len) :
  1349. OBJECT_ERROR_INVALID;
  1350. mutex_unlock(&g_smcinvoke_lock);
  1351. }
  1352. static int invoke_cmd_handler(int cmd, phys_addr_t in_paddr, size_t in_buf_len,
  1353. uint8_t *out_buf, phys_addr_t out_paddr,
  1354. size_t out_buf_len, int32_t *result, u64 *response_type,
  1355. unsigned int *data, struct qtee_shm *in_shm,
  1356. struct qtee_shm *out_shm)
  1357. {
  1358. int ret = 0;
  1359. switch (cmd) {
  1360. case SMCINVOKE_INVOKE_CMD_LEGACY:
  1361. qtee_shmbridge_flush_shm_buf(in_shm);
  1362. qtee_shmbridge_flush_shm_buf(out_shm);
  1363. ret = qcom_scm_invoke_smc_legacy(in_paddr, in_buf_len, out_paddr, out_buf_len,
  1364. result, response_type, data);
  1365. qtee_shmbridge_inv_shm_buf(in_shm);
  1366. qtee_shmbridge_inv_shm_buf(out_shm);
  1367. break;
  1368. case SMCINVOKE_INVOKE_CMD:
  1369. ret = qcom_scm_invoke_smc(in_paddr, in_buf_len, out_paddr, out_buf_len,
  1370. result, response_type, data);
  1371. break;
  1372. case SMCINVOKE_CB_RSP_CMD:
  1373. if (legacy_smc_call)
  1374. qtee_shmbridge_flush_shm_buf(out_shm);
  1375. ret = qcom_scm_invoke_callback_response(virt_to_phys(out_buf), out_buf_len,
  1376. result, response_type, data);
  1377. if (legacy_smc_call) {
  1378. qtee_shmbridge_inv_shm_buf(in_shm);
  1379. qtee_shmbridge_inv_shm_buf(out_shm);
  1380. }
  1381. break;
  1382. default:
  1383. ret = -EINVAL;
  1384. break;
  1385. }
  1386. trace_invoke_cmd_handler(cmd, *response_type, *result, ret);
  1387. return ret;
  1388. }
  1389. /*
  1390. * Buf should be aligned to struct smcinvoke_tzcb_req
  1391. */
  1392. static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
  1393. {
  1394. /* ret is going to TZ. Provide values from OBJECT_ERROR_<> */
  1395. int ret = OBJECT_ERROR_DEFUNCT;
  1396. int cbobj_retries = 0;
  1397. long timeout_jiff;
  1398. bool wait_interrupted = false;
  1399. struct smcinvoke_cb_txn *cb_txn = NULL;
  1400. struct smcinvoke_tzcb_req *cb_req = NULL, *tmp_cb_req = NULL;
  1401. struct smcinvoke_server_info *srvr_info = NULL;
  1402. struct smcinvoke_mem_obj *mem_obj = NULL;
  1403. uint16_t server_id = 0;
  1404. if (buf_len < sizeof(struct smcinvoke_tzcb_req)) {
  1405. pr_err("smaller buffer length : %u\n", buf_len);
  1406. return;
  1407. }
  1408. cb_req = buf;
  1409. /* check whether it is to be served by kernel or userspace */
  1410. if (TZHANDLE_IS_KERNEL_OBJ(cb_req->hdr.tzhandle)) {
  1411. return process_kernel_obj(buf, buf_len);
  1412. } else if (TZHANDLE_IS_MEM_MAP_OBJ(cb_req->hdr.tzhandle)) {
  1413. /*
  1414. * MEM_MAP memory object is created and owned by kernel,
  1415. * hence its processing(handling deletion) is done in
  1416. * kernel context.
  1417. */
  1418. return process_mem_obj(buf, buf_len);
  1419. } else if(TZHANDLE_IS_MEM_RGN_OBJ(cb_req->hdr.tzhandle)) {
  1420. /*
  1421. * MEM_RGN memory objects are created and owned by userspace,
  1422. * and hence their deletion/handling requires going back to the
  1423. * userspace, similar to that of callback objects. If we enter
  1424. * this 'if' condition, its no-op here, and proceed similar to
  1425. * case of callback objects.
  1426. */
  1427. } else if (!TZHANDLE_IS_CB_OBJ(cb_req->hdr.tzhandle)) {
  1428. pr_err("Request object is not a callback object\n");
  1429. cb_req->result = OBJECT_ERROR_INVALID;
  1430. return;
  1431. }
  1432. /*
  1433. * We need a copy of req that could be sent to server. Otherwise, if
  1434. * someone kills invoke caller, buf would go away and server would be
  1435. * working on already freed buffer, causing a device crash.
  1436. */
  1437. tmp_cb_req = kmemdup(buf, buf_len, GFP_KERNEL);
  1438. if (!tmp_cb_req) {
  1439. /* we need to return error to caller so fill up result */
  1440. cb_req->result = OBJECT_ERROR_KMEM;
  1441. pr_err("failed to create copy of request, set result: %d\n",
  1442. cb_req->result);
  1443. return;
  1444. }
  1445. cb_txn = kzalloc(sizeof(*cb_txn), GFP_KERNEL);
  1446. if (!cb_txn) {
  1447. cb_req->result = OBJECT_ERROR_KMEM;
  1448. pr_err("failed to allocate memory for request, result: %d\n",
  1449. cb_req->result);
  1450. kfree(tmp_cb_req);
  1451. return;
  1452. }
  1453. /* no need for memcpy as we did kmemdup() above */
  1454. cb_req = tmp_cb_req;
  1455. trace_process_tzcb_req_handle(cb_req->hdr.tzhandle, cb_req->hdr.op, cb_req->hdr.counts);
  1456. cb_txn->state = SMCINVOKE_REQ_PLACED;
  1457. cb_txn->cb_req = cb_req;
  1458. cb_txn->cb_req_bytes = buf_len;
  1459. cb_txn->filp_to_release = arr_filp;
  1460. kref_init(&cb_txn->ref_cnt);
  1461. mutex_lock(&g_smcinvoke_lock);
  1462. ++cb_reqs_inflight;
  1463. if(TZHANDLE_IS_MEM_RGN_OBJ(cb_req->hdr.tzhandle)) {
  1464. mem_obj= find_mem_obj_locked(TZHANDLE_GET_OBJID(cb_req->hdr.tzhandle),SMCINVOKE_MEM_RGN_OBJ);
  1465. if(!mem_obj) {
  1466. pr_err("mem obj with tzhandle : %d not found",cb_req->hdr.tzhandle);
  1467. goto out;
  1468. }
  1469. server_id = mem_obj->server->server_id;
  1470. } else {
  1471. server_id = TZHANDLE_GET_SERVER(cb_req->hdr.tzhandle);
  1472. }
  1473. srvr_info = get_cb_server_locked(server_id);
  1474. if (!srvr_info || srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) {
  1475. /* ret equals Object_ERROR_DEFUNCT, at this point go to out */
  1476. if (!srvr_info)
  1477. pr_err("server is invalid\n");
  1478. else {
  1479. pr_err("server is defunct, state= %d tzhandle = %d\n",
  1480. srvr_info->state, cb_req->hdr.tzhandle);
  1481. }
  1482. mutex_unlock(&g_smcinvoke_lock);
  1483. goto out;
  1484. }
  1485. cb_txn->txn_id = ++srvr_info->txn_id;
  1486. hash_add(srvr_info->reqs_table, &cb_txn->hash, cb_txn->txn_id);
  1487. mutex_unlock(&g_smcinvoke_lock);
  1488. trace_process_tzcb_req_wait(cb_req->hdr.tzhandle, cbobj_retries, cb_txn->txn_id,
  1489. current->pid, current->tgid, srvr_info->state, srvr_info->server_id,
  1490. cb_reqs_inflight);
  1491. /*
  1492. * we need not worry that server_info will be deleted because as long
  1493. * as this CBObj is served by this server, srvr_info will be valid.
  1494. */
  1495. wake_up_interruptible_all(&srvr_info->req_wait_q);
  1496. /* timeout before 1s otherwise tzbusy would come */
  1497. timeout_jiff = msecs_to_jiffies(100);
  1498. while (cbobj_retries < CBOBJ_MAX_RETRIES) {
  1499. if (wait_interrupted) {
  1500. ret = wait_event_timeout(srvr_info->rsp_wait_q,
  1501. (cb_txn->state == SMCINVOKE_REQ_PROCESSED) ||
  1502. (srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT),
  1503. timeout_jiff);
  1504. } else {
  1505. ret = wait_event_interruptible_timeout(srvr_info->rsp_wait_q,
  1506. (cb_txn->state == SMCINVOKE_REQ_PROCESSED) ||
  1507. (srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT),
  1508. timeout_jiff);
  1509. }
  1510. if (ret == 0) {
  1511. if (srvr_info->is_server_suspended == 0) {
  1512. pr_err("CBobj timed out waiting on cbtxn :%d,cb-tzhandle:%d, retry:%d, op:%d counts :%d\n",
  1513. cb_txn->txn_id,cb_req->hdr.tzhandle, cbobj_retries,
  1514. cb_req->hdr.op, cb_req->hdr.counts);
  1515. pr_err("CBobj %d timedout pid %x,tid %x, srvr state=%d, srvr id:%u\n",
  1516. cb_req->hdr.tzhandle, current->pid,
  1517. current->tgid, srvr_info->state,
  1518. srvr_info->server_id);
  1519. }
  1520. } else {
  1521. /* wait_event returned due to a signal */
  1522. if (srvr_info->state != SMCINVOKE_SERVER_STATE_DEFUNCT &&
  1523. cb_txn->state != SMCINVOKE_REQ_PROCESSED) {
  1524. wait_interrupted = true;
  1525. } else {
  1526. break;
  1527. }
  1528. }
  1529. /*
  1530. * If bit corresponding to any accept thread is set, invoke threads
  1531. * should wait infinitely for the accept thread to come back with
  1532. * response.
  1533. */
  1534. if (srvr_info->is_server_suspended > 0) {
  1535. cbobj_retries = 0;
  1536. } else {
  1537. cbobj_retries++;
  1538. }
  1539. }
  1540. out:
  1541. /*
  1542. * we could be here because of either:
  1543. * a. Req is PROCESSED
  1544. * b. Server was killed
  1545. * c. Invoke thread is killed
  1546. * sometime invoke thread and server are part of same process.
  1547. */
  1548. mutex_lock(&g_smcinvoke_lock);
  1549. hash_del(&cb_txn->hash);
  1550. if (ret == 0) {
  1551. pr_err("CBObj timed out! No more retries\n");
  1552. cb_req->result = Object_ERROR_TIMEOUT;
  1553. } else if (ret == -ERESTARTSYS) {
  1554. pr_err("wait event interruped, ret: %d\n", ret);
  1555. cb_req->result = OBJECT_ERROR_ABORT;
  1556. } else {
  1557. if (cb_txn->state == SMCINVOKE_REQ_PROCESSED) {
  1558. /*
  1559. * it is possible that server was killed immediately
  1560. * after CB Req was processed but who cares now!
  1561. */
  1562. } else if (!srvr_info ||
  1563. srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) {
  1564. cb_req->result = OBJECT_ERROR_DEFUNCT;
  1565. pr_err("server invalid, res: %d\n", cb_req->result);
  1566. } else {
  1567. pr_err("%s: unexpected event happened, ret:%d\n", __func__, ret);
  1568. cb_req->result = OBJECT_ERROR_ABORT;
  1569. }
  1570. }
  1571. --cb_reqs_inflight;
  1572. trace_process_tzcb_req_result(cb_req->result, cb_req->hdr.tzhandle, cb_req->hdr.op,
  1573. cb_req->hdr.counts, cb_reqs_inflight);
  1574. memcpy(buf, cb_req, buf_len);
  1575. if (TZHANDLE_IS_MEM_RGN_OBJ(cb_req->hdr.tzhandle)) {
  1576. mutex_unlock(&g_smcinvoke_lock);
  1577. process_mem_obj(buf, buf_len);
  1578. pr_err("ppid : %x, mem obj deleted\n", current->pid);
  1579. mutex_lock(&g_smcinvoke_lock);
  1580. }
  1581. kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked);
  1582. if (srvr_info)
  1583. kref_put(&srvr_info->ref_cnt, destroy_cb_server);
  1584. mutex_unlock(&g_smcinvoke_lock);
  1585. }
  1586. static int marshal_out_invoke_req(const uint8_t *buf, uint32_t buf_size,
  1587. struct smcinvoke_cmd_req *req,
  1588. union smcinvoke_arg *args_buf,
  1589. uint32_t context_type)
  1590. {
  1591. int ret = -EINVAL, i = 0;
  1592. int32_t temp_fd = UHANDLE_NULL;
  1593. union smcinvoke_tz_args *tz_args = NULL;
  1594. size_t offset = sizeof(struct smcinvoke_msg_hdr) +
  1595. OBJECT_COUNTS_TOTAL(req->counts) *
  1596. sizeof(union smcinvoke_tz_args);
  1597. if (offset > buf_size)
  1598. goto out;
  1599. tz_args = (union smcinvoke_tz_args *)
  1600. (buf + sizeof(struct smcinvoke_msg_hdr));
  1601. tz_args += OBJECT_COUNTS_NUM_BI(req->counts);
  1602. if (args_buf == NULL)
  1603. return 0;
  1604. FOR_ARGS(i, req->counts, BO) {
  1605. args_buf[i].b.size = tz_args->b.size;
  1606. if ((buf_size - tz_args->b.offset < tz_args->b.size) ||
  1607. tz_args->b.offset > buf_size) {
  1608. pr_err("%s: buffer overflow detected\n", __func__);
  1609. goto out;
  1610. }
  1611. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
  1612. if (copy_to_user((void __user *)
  1613. (uintptr_t)(args_buf[i].b.addr),
  1614. (uint8_t *)(buf) + tz_args->b.offset,
  1615. tz_args->b.size)) {
  1616. pr_err("Error %d copying ctxt to user\n", ret);
  1617. goto out;
  1618. }
  1619. } else {
  1620. memcpy((uint8_t *)(args_buf[i].b.addr),
  1621. (uint8_t *)(buf) + tz_args->b.offset,
  1622. tz_args->b.size);
  1623. }
  1624. tz_args++;
  1625. }
  1626. tz_args += OBJECT_COUNTS_NUM_OI(req->counts);
  1627. FOR_ARGS(i, req->counts, OO) {
  1628. /*
  1629. * create a new FD and assign to output object's context.
  1630. * We are passing cb_server_fd from output param in case OO
  1631. * is a CBObj. For CBObj, we have to ensure that it is sent
  1632. * to server who serves it and that info comes from USpace.
  1633. */
  1634. temp_fd = UHANDLE_NULL;
  1635. ret = get_uhandle_from_tzhandle(tz_args->handle,
  1636. TZHANDLE_GET_SERVER(tz_args->handle),
  1637. &temp_fd, NO_LOCK, context_type);
  1638. args_buf[i].o.fd = temp_fd;
  1639. if (ret)
  1640. goto out;
  1641. trace_marshal_out_invoke_req(i, tz_args->handle,
  1642. TZHANDLE_GET_SERVER(tz_args->handle), temp_fd);
  1643. tz_args++;
  1644. }
  1645. ret = 0;
  1646. out:
  1647. return ret;
  1648. }
  1649. static bool is_inbound_req(int val)
  1650. {
  1651. return (val == SMCINVOKE_RESULT_INBOUND_REQ_NEEDED ||
  1652. val == QSEOS_RESULT_INCOMPLETE ||
  1653. val == QSEOS_RESULT_BLOCKED_ON_LISTENER);
  1654. }
  1655. static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr,
  1656. size_t in_buf_len,
  1657. uint8_t *out_buf, phys_addr_t out_paddr,
  1658. size_t out_buf_len,
  1659. struct smcinvoke_cmd_req *req,
  1660. union smcinvoke_arg *args_buf,
  1661. bool *tz_acked, uint32_t context_type,
  1662. struct qtee_shm *in_shm, struct qtee_shm *out_shm)
  1663. {
  1664. int ret = 0, cmd, retry_count = 0;
  1665. u64 response_type;
  1666. unsigned int data;
  1667. struct file *arr_filp[OBJECT_COUNTS_MAX_OO] = {NULL};
  1668. *tz_acked = false;
  1669. /* buf size should be page aligned */
  1670. if ((in_buf_len % PAGE_SIZE) != 0 || (out_buf_len % PAGE_SIZE) != 0)
  1671. return -EINVAL;
  1672. cmd = invoke_cmd;
  1673. while (1) {
  1674. do {
  1675. ret = invoke_cmd_handler(cmd, in_paddr, in_buf_len, out_buf,
  1676. out_paddr, out_buf_len, &req->result,
  1677. &response_type, &data, in_shm, out_shm);
  1678. if (ret == -EBUSY) {
  1679. pr_err("Secure side is busy,will retry after 30 ms, retry_count = %d",retry_count);
  1680. msleep(SMCINVOKE_SCM_EBUSY_WAIT_MS);
  1681. }
  1682. } while ((ret == -EBUSY) &&
  1683. (retry_count++ < SMCINVOKE_SCM_EBUSY_MAX_RETRY));
  1684. if (!ret && !is_inbound_req(response_type)) {
  1685. /* dont marshal if Obj returns an error */
  1686. if (!req->result) {
  1687. if (args_buf != NULL)
  1688. ret = marshal_out_invoke_req(in_buf,
  1689. in_buf_len, req, args_buf,
  1690. context_type);
  1691. }
  1692. *tz_acked = true;
  1693. }
  1694. if (cmd == SMCINVOKE_CB_RSP_CMD)
  1695. release_filp(arr_filp, OBJECT_COUNTS_MAX_OO);
  1696. if (ret || !is_inbound_req(response_type))
  1697. break;
  1698. /* process listener request */
  1699. if (response_type == QSEOS_RESULT_INCOMPLETE ||
  1700. response_type == QSEOS_RESULT_BLOCKED_ON_LISTENER) {
  1701. ret = qseecom_process_listener_from_smcinvoke(
  1702. &req->result, &response_type, &data);
  1703. trace_prepare_send_scm_msg(response_type, req->result);
  1704. if (!req->result &&
  1705. response_type != SMCINVOKE_RESULT_INBOUND_REQ_NEEDED) {
  1706. ret = marshal_out_invoke_req(in_buf,
  1707. in_buf_len, req, args_buf,
  1708. context_type);
  1709. }
  1710. *tz_acked = true;
  1711. }
  1712. /*
  1713. * qseecom does not understand smcinvoke's callback object &&
  1714. * erringly sets ret value as -EINVAL :( We need to handle it.
  1715. */
  1716. if (response_type != SMCINVOKE_RESULT_INBOUND_REQ_NEEDED)
  1717. break;
  1718. if (response_type == SMCINVOKE_RESULT_INBOUND_REQ_NEEDED) {
  1719. trace_status(__func__, "looks like inbnd req reqd");
  1720. process_tzcb_req(out_buf, out_buf_len, arr_filp);
  1721. cmd = SMCINVOKE_CB_RSP_CMD;
  1722. }
  1723. }
  1724. return ret;
  1725. }
  1726. /*
  1727. * SMC expects arguments in following format
  1728. * ---------------------------------------------------------------------------
  1729. * | cxt | op | counts | ptr|size |ptr|size...|ORef|ORef|...| rest of payload |
  1730. * ---------------------------------------------------------------------------
  1731. * cxt: target, op: operation, counts: total arguments
  1732. * offset: offset is from beginning of buffer i.e. cxt
  1733. * size: size is 8 bytes aligned value
  1734. */
  1735. static size_t compute_in_msg_size(const struct smcinvoke_cmd_req *req,
  1736. const union smcinvoke_arg *args_buf)
  1737. {
  1738. uint32_t i = 0;
  1739. size_t total_size = sizeof(struct smcinvoke_msg_hdr) +
  1740. OBJECT_COUNTS_TOTAL(req->counts) *
  1741. sizeof(union smcinvoke_tz_args);
  1742. /* Computed total_size should be 8 bytes aligned from start of buf */
  1743. total_size = ALIGN(total_size, SMCINVOKE_ARGS_ALIGN_SIZE);
  1744. /* each buffer has to be 8 bytes aligned */
  1745. while (i < OBJECT_COUNTS_NUM_buffers(req->counts))
  1746. total_size = size_add_(total_size,
  1747. size_align(args_buf[i++].b.size,
  1748. SMCINVOKE_ARGS_ALIGN_SIZE));
  1749. return PAGE_ALIGN(total_size);
  1750. }
  1751. static int marshal_in_invoke_req(const struct smcinvoke_cmd_req *req,
  1752. const union smcinvoke_arg *args_buf, uint32_t tzhandle,
  1753. uint8_t *buf, size_t buf_size, struct file **arr_filp,
  1754. int32_t *tzhandles_to_release, uint32_t context_type,
  1755. struct list_head *l_pending_mem_obj)
  1756. {
  1757. int ret = -EINVAL, i = 0, j = 0, k = 0;
  1758. const struct smcinvoke_msg_hdr msg_hdr = {
  1759. tzhandle, req->op, req->counts};
  1760. uint32_t offset = sizeof(struct smcinvoke_msg_hdr) +
  1761. sizeof(union smcinvoke_tz_args) *
  1762. OBJECT_COUNTS_TOTAL(req->counts);
  1763. union smcinvoke_tz_args *tz_args = NULL;
  1764. if (buf_size < offset)
  1765. goto out;
  1766. *(struct smcinvoke_msg_hdr *)buf = msg_hdr;
  1767. tz_args = (union smcinvoke_tz_args *)(buf +
  1768. sizeof(struct smcinvoke_msg_hdr));
  1769. if (args_buf == NULL)
  1770. return 0;
  1771. FOR_ARGS(i, req->counts, BI) {
  1772. offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
  1773. if ((offset > buf_size) ||
  1774. (args_buf[i].b.size > (buf_size - offset)))
  1775. goto out;
  1776. tz_args[i].b.offset = offset;
  1777. tz_args[i].b.size = args_buf[i].b.size;
  1778. if (context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
  1779. if (copy_from_user(buf + offset,
  1780. (void __user *)(uintptr_t)(args_buf[i].b.addr),
  1781. args_buf[i].b.size))
  1782. goto out;
  1783. } else {
  1784. memcpy(buf + offset, (void *)(args_buf[i].b.addr),
  1785. args_buf[i].b.size);
  1786. }
  1787. offset += args_buf[i].b.size;
  1788. }
  1789. FOR_ARGS(i, req->counts, BO) {
  1790. offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
  1791. if ((offset > buf_size) ||
  1792. (args_buf[i].b.size > (buf_size - offset)))
  1793. goto out;
  1794. tz_args[i].b.offset = offset;
  1795. tz_args[i].b.size = args_buf[i].b.size;
  1796. offset += args_buf[i].b.size;
  1797. }
  1798. FOR_ARGS(i, req->counts, OI) {
  1799. ret = get_tzhandle_from_uhandle(args_buf[i].o.fd,
  1800. args_buf[i].o.cb_server_fd, &arr_filp[j++],
  1801. &(tz_args[i].handle), l_pending_mem_obj);
  1802. if (ret)
  1803. goto out;
  1804. trace_marshal_in_invoke_req(i, args_buf[i].o.fd,
  1805. args_buf[i].o.cb_server_fd, tz_args[i].handle);
  1806. tzhandles_to_release[k++] = tz_args[i].handle;
  1807. }
  1808. ret = 0;
  1809. out:
  1810. return ret;
  1811. }
  1812. static int marshal_in_tzcb_req(const struct smcinvoke_cb_txn *cb_txn,
  1813. struct smcinvoke_accept *user_req, int srvr_id)
  1814. {
  1815. int ret = 0, i = 0;
  1816. int32_t temp_fd = UHANDLE_NULL;
  1817. union smcinvoke_arg tmp_arg;
  1818. struct smcinvoke_tzcb_req *tzcb_req = cb_txn->cb_req;
  1819. union smcinvoke_tz_args *tz_args = tzcb_req->args;
  1820. size_t tzcb_req_len = cb_txn->cb_req_bytes;
  1821. size_t tz_buf_offset = TZCB_BUF_OFFSET(tzcb_req);
  1822. size_t user_req_buf_offset = sizeof(union smcinvoke_arg) *
  1823. OBJECT_COUNTS_TOTAL(tzcb_req->hdr.counts);
  1824. if (tz_buf_offset > tzcb_req_len) {
  1825. ret = -EINVAL;
  1826. goto out;
  1827. }
  1828. user_req->txn_id = cb_txn->txn_id;
  1829. if (get_uhandle_from_tzhandle(tzcb_req->hdr.tzhandle, srvr_id,
  1830. &user_req->cbobj_id, TAKE_LOCK,
  1831. SMCINVOKE_OBJ_TYPE_TZ_OBJ)) {
  1832. ret = -EINVAL;
  1833. goto out;
  1834. }
  1835. user_req->op = tzcb_req->hdr.op;
  1836. user_req->counts = tzcb_req->hdr.counts;
  1837. user_req->argsize = sizeof(union smcinvoke_arg);
  1838. trace_marshal_in_tzcb_req_handle(tzcb_req->hdr.tzhandle, srvr_id,
  1839. user_req->cbobj_id, user_req->op, user_req->counts);
  1840. FOR_ARGS(i, tzcb_req->hdr.counts, BI) {
  1841. user_req_buf_offset = size_align(user_req_buf_offset,
  1842. SMCINVOKE_ARGS_ALIGN_SIZE);
  1843. tmp_arg.b.size = tz_args[i].b.size;
  1844. if ((tz_args[i].b.offset > tzcb_req_len) ||
  1845. (tz_args[i].b.size > tzcb_req_len - tz_args[i].b.offset) ||
  1846. (user_req_buf_offset > user_req->buf_len) ||
  1847. (tmp_arg.b.size >
  1848. user_req->buf_len - user_req_buf_offset)) {
  1849. ret = -EINVAL;
  1850. pr_err("%s: buffer overflow detected\n", __func__);
  1851. goto out;
  1852. }
  1853. tmp_arg.b.addr = user_req->buf_addr + user_req_buf_offset;
  1854. if (copy_to_user(u64_to_user_ptr
  1855. (user_req->buf_addr + i * sizeof(tmp_arg)),
  1856. &tmp_arg, sizeof(tmp_arg)) ||
  1857. copy_to_user(u64_to_user_ptr(tmp_arg.b.addr),
  1858. (uint8_t *)(tzcb_req) + tz_args[i].b.offset,
  1859. tz_args[i].b.size)) {
  1860. ret = -EFAULT;
  1861. goto out;
  1862. }
  1863. user_req_buf_offset += tmp_arg.b.size;
  1864. }
  1865. FOR_ARGS(i, tzcb_req->hdr.counts, BO) {
  1866. user_req_buf_offset = size_align(user_req_buf_offset,
  1867. SMCINVOKE_ARGS_ALIGN_SIZE);
  1868. tmp_arg.b.size = tz_args[i].b.size;
  1869. if ((user_req_buf_offset > user_req->buf_len) ||
  1870. (tmp_arg.b.size >
  1871. user_req->buf_len - user_req_buf_offset)) {
  1872. ret = -EINVAL;
  1873. pr_err("%s: buffer overflow detected\n", __func__);
  1874. goto out;
  1875. }
  1876. tmp_arg.b.addr = user_req->buf_addr + user_req_buf_offset;
  1877. if (copy_to_user(u64_to_user_ptr
  1878. (user_req->buf_addr + i * sizeof(tmp_arg)),
  1879. &tmp_arg, sizeof(tmp_arg))) {
  1880. ret = -EFAULT;
  1881. goto out;
  1882. }
  1883. user_req_buf_offset += tmp_arg.b.size;
  1884. }
  1885. FOR_ARGS(i, tzcb_req->hdr.counts, OI) {
  1886. /*
  1887. * create a new FD and assign to output object's
  1888. * context
  1889. */
  1890. temp_fd = UHANDLE_NULL;
  1891. ret = get_uhandle_from_tzhandle(tz_args[i].handle, srvr_id,
  1892. &temp_fd, TAKE_LOCK, SMCINVOKE_OBJ_TYPE_TZ_OBJ);
  1893. tmp_arg.o.fd = temp_fd;
  1894. if (ret) {
  1895. ret = -EINVAL;
  1896. goto out;
  1897. }
  1898. if (copy_to_user(u64_to_user_ptr
  1899. (user_req->buf_addr + i * sizeof(tmp_arg)),
  1900. &tmp_arg, sizeof(tmp_arg))) {
  1901. ret = -EFAULT;
  1902. goto out;
  1903. }
  1904. trace_marshal_in_tzcb_req_fd(i, tz_args[i].handle, srvr_id, temp_fd);
  1905. }
  1906. out:
  1907. return ret;
  1908. }
  1909. static int marshal_out_tzcb_req(const struct smcinvoke_accept *user_req,
  1910. struct smcinvoke_cb_txn *cb_txn,
  1911. struct file **arr_filp)
  1912. {
  1913. int ret = -EINVAL, i = 0;
  1914. int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0};
  1915. struct smcinvoke_tzcb_req *tzcb_req = cb_txn->cb_req;
  1916. union smcinvoke_tz_args *tz_args = tzcb_req->args;
  1917. release_tzhandles(&cb_txn->cb_req->hdr.tzhandle, 1);
  1918. tzcb_req->result = user_req->result;
  1919. /* Return without marshaling user args if destination callback invocation was
  1920. unsuccessful. */
  1921. if (tzcb_req->result != 0) {
  1922. ret = 0;
  1923. goto out;
  1924. }
  1925. FOR_ARGS(i, tzcb_req->hdr.counts, BO) {
  1926. union smcinvoke_arg tmp_arg;
  1927. if (copy_from_user((uint8_t *)&tmp_arg, u64_to_user_ptr(
  1928. user_req->buf_addr + i * sizeof(union smcinvoke_arg)),
  1929. sizeof(union smcinvoke_arg))) {
  1930. ret = -EFAULT;
  1931. goto out;
  1932. }
  1933. if (tmp_arg.b.size > tz_args[i].b.size)
  1934. goto out;
  1935. if (copy_from_user((uint8_t *)(tzcb_req) + tz_args[i].b.offset,
  1936. u64_to_user_ptr(tmp_arg.b.addr),
  1937. tmp_arg.b.size)) {
  1938. ret = -EFAULT;
  1939. goto out;
  1940. }
  1941. }
  1942. FOR_ARGS(i, tzcb_req->hdr.counts, OO) {
  1943. union smcinvoke_arg tmp_arg;
  1944. if (copy_from_user((uint8_t *)&tmp_arg, u64_to_user_ptr(
  1945. user_req->buf_addr + i * sizeof(union smcinvoke_arg)),
  1946. sizeof(union smcinvoke_arg))) {
  1947. ret = -EFAULT;
  1948. goto out;
  1949. }
  1950. ret = get_tzhandle_from_uhandle(tmp_arg.o.fd,
  1951. tmp_arg.o.cb_server_fd, &arr_filp[i],
  1952. &(tz_args[i].handle), NULL);
  1953. if (ret)
  1954. goto out;
  1955. tzhandles_to_release[i] = tz_args[i].handle;
  1956. trace_marshal_out_tzcb_req(i, tmp_arg.o.fd,
  1957. tmp_arg.o.cb_server_fd, tz_args[i].handle);
  1958. }
  1959. ret = 0;
  1960. out:
  1961. FOR_ARGS(i, tzcb_req->hdr.counts, OI) {
  1962. if (TZHANDLE_IS_CB_OBJ(tz_args[i].handle))
  1963. release_tzhandles(&tz_args[i].handle, 1);
  1964. }
  1965. if (ret)
  1966. release_tzhandles(tzhandles_to_release, OBJECT_COUNTS_MAX_OO);
  1967. return ret;
  1968. }
  1969. static void set_tz_version (uint32_t tz_version)
  1970. {
  1971. tz_async_version = tz_version;
  1972. /* We enable async memory object support when TZ async
  1973. * version is equal or larger than the driver version.
  1974. * It is expected that if the protocol changes in later
  1975. * TZ versions, TZ will support backward compatibility
  1976. * so this condition should still be valid.
  1977. */
  1978. if (tz_version >= SMCINVOKE_ASYNC_VERSION) {
  1979. mem_obj_async_support = true;
  1980. pr_debug("Enabled asynchronous memory object support\n");
  1981. }
  1982. }
  1983. static void process_piggyback_data(void *buf, size_t buf_size)
  1984. {
  1985. int i;
  1986. struct smcinvoke_tzcb_req req = {0};
  1987. struct smcinvoke_piggyback_msg *msg = buf;
  1988. int32_t *objs = msg->objs;
  1989. for (i = 0; i < msg->counts; i++) {
  1990. req.hdr.op = msg->op;
  1991. req.hdr.counts = 0; /* release op does not require any args */
  1992. req.hdr.tzhandle = objs[i];
  1993. if (tz_async_version == 0)
  1994. set_tz_version(msg->version);
  1995. process_tzcb_req(&req, sizeof(struct smcinvoke_tzcb_req), NULL);
  1996. /* cbobjs_in_flight will be adjusted during CB processing */
  1997. }
  1998. }
  1999. /* Add memory object mapped data to the async side channel, so it's available to TZ
  2000. * together with the memory object.
  2001. *
  2002. * No return value as TZ is always able to explicitly ask for this information
  2003. * in case this function fails.
  2004. */
  2005. static void add_mem_obj_info_to_async_side_channel_locked(void *buf, size_t buf_size, struct list_head *l_pending_mem_obj)
  2006. {
  2007. struct smcinvoke_mem_obj_msg *msg = buf;
  2008. struct smcinvoke_mem_obj_pending_async *mem_obj_pending = NULL;
  2009. size_t header_size = 0;
  2010. size_t mo_size = 0;
  2011. size_t used = 0;
  2012. size_t index = 0;
  2013. if (list_empty(l_pending_mem_obj))
  2014. return;
  2015. header_size = sizeof(struct smcinvoke_mem_obj_msg);
  2016. mo_size = sizeof(struct smcinvoke_mem_obj_info);
  2017. /* Minimal size required is the header data + one mem obj info */
  2018. if (buf_size < header_size + mo_size) {
  2019. pr_err("Unable to add memory object info to async channel\n");
  2020. return;
  2021. }
  2022. msg->version = SMCINVOKE_ASYNC_VERSION;
  2023. msg->op = SMCINVOKE_ASYNC_OP_MEMORY_OBJECT;
  2024. msg->count = 0;
  2025. used = header_size;
  2026. index = 0;
  2027. list_for_each_entry(mem_obj_pending, l_pending_mem_obj, list) {
  2028. if (NULL == mem_obj_pending->mem_obj) {
  2029. pr_err("Memory object is no longer valid\n");
  2030. continue;
  2031. }
  2032. if (used + mo_size > buf_size) {
  2033. pr_err("Not all memory object info was added to the async channel\n");
  2034. break;
  2035. }
  2036. msg->mo[index].memObjRef = TZHANDLE_MAKE_LOCAL(MEM_RGN_SRVR_ID, mem_obj_pending->mem_obj->mem_region_id);
  2037. msg->mo[index].mapObjRef = TZHANDLE_MAKE_LOCAL(MEM_MAP_SRVR_ID, mem_obj_pending->mem_obj->mem_map_obj_id);
  2038. msg->mo[index].addr = mem_obj_pending->mem_obj->p_addr;
  2039. msg->mo[index].size = mem_obj_pending->mem_obj->p_addr_len;
  2040. msg->mo[index].perm = SMCINVOKE_MEM_PERM_RW;
  2041. used += sizeof(msg->mo[index]);
  2042. index++;
  2043. }
  2044. msg->count = index;
  2045. pr_debug("Added %d memory objects to the side channel, total size = %d\n", index, used);
  2046. return;
  2047. }
  2048. /*
  2049. * Delete entire pending async list.
  2050. */
  2051. static void delete_pending_async_list_locked(struct list_head *l_pending_mem_obj)
  2052. {
  2053. struct smcinvoke_mem_obj_pending_async *mem_obj_pending = NULL;
  2054. struct smcinvoke_mem_obj_pending_async *temp = NULL;
  2055. if (list_empty(l_pending_mem_obj))
  2056. return;
  2057. list_for_each_entry_safe(mem_obj_pending, temp, l_pending_mem_obj, list) {
  2058. mem_obj_pending->mem_obj = NULL;
  2059. list_del(&mem_obj_pending->list);
  2060. kfree(mem_obj_pending);
  2061. }
  2062. }
  2063. static long process_ack_local_obj(struct file *filp, unsigned int cmd,
  2064. unsigned long arg)
  2065. {
  2066. int ret = -1;
  2067. int32_t local_obj = SMCINVOKE_USERSPACE_OBJ_NULL;
  2068. struct smcinvoke_file_data *filp_data = filp->private_data;
  2069. if (_IOC_SIZE(cmd) != sizeof(int32_t))
  2070. return -EINVAL;
  2071. ret = copy_from_user(&local_obj, (void __user *)(uintptr_t)arg,
  2072. sizeof(int32_t));
  2073. if (ret)
  2074. return -EFAULT;
  2075. mutex_lock(&g_smcinvoke_lock);
  2076. if (UHANDLE_IS_CB_OBJ(local_obj))
  2077. ret = put_pending_cbobj_locked(filp_data->server_id,
  2078. UHANDLE_GET_CB_OBJ(local_obj));
  2079. mutex_unlock(&g_smcinvoke_lock);
  2080. return ret;
  2081. }
  2082. static long process_server_req(struct file *filp, unsigned int cmd,
  2083. unsigned long arg)
  2084. {
  2085. int ret = -1;
  2086. int32_t server_fd = -1;
  2087. struct smcinvoke_server server_req = {0};
  2088. struct smcinvoke_server_info *server_info = NULL;
  2089. if (_IOC_SIZE(cmd) != sizeof(server_req)) {
  2090. pr_err("invalid command size received for server request\n");
  2091. return -EINVAL;
  2092. }
  2093. ret = copy_from_user(&server_req, (void __user *)(uintptr_t)arg,
  2094. sizeof(server_req));
  2095. if (ret) {
  2096. pr_err("copying server request from user failed\n");
  2097. return -EFAULT;
  2098. }
  2099. server_info = kzalloc(sizeof(*server_info), GFP_KERNEL);
  2100. if (!server_info)
  2101. return -ENOMEM;
  2102. kref_init(&server_info->ref_cnt);
  2103. init_waitqueue_head(&server_info->req_wait_q);
  2104. init_waitqueue_head(&server_info->rsp_wait_q);
  2105. server_info->cb_buf_size = server_req.cb_buf_size;
  2106. hash_init(server_info->reqs_table);
  2107. hash_init(server_info->responses_table);
  2108. INIT_LIST_HEAD(&server_info->pending_cbobjs);
  2109. server_info->is_server_suspended = 0;
  2110. mutex_lock(&g_smcinvoke_lock);
  2111. server_info->server_id = next_cb_server_id_locked();
  2112. hash_add(g_cb_servers, &server_info->hash,
  2113. server_info->server_id);
  2114. if (g_max_cb_buf_size < server_req.cb_buf_size)
  2115. g_max_cb_buf_size = server_req.cb_buf_size;
  2116. mutex_unlock(&g_smcinvoke_lock);
  2117. ret = get_fd_for_obj(SMCINVOKE_OBJ_TYPE_SERVER,
  2118. server_info->server_id, &server_fd);
  2119. if (ret)
  2120. release_cb_server(server_info->server_id);
  2121. return server_fd;
  2122. }
  2123. static long process_accept_req(struct file *filp, unsigned int cmd,
  2124. unsigned long arg)
  2125. {
  2126. int ret = -1;
  2127. struct smcinvoke_file_data *server_obj = filp->private_data;
  2128. struct smcinvoke_accept user_args = {0};
  2129. struct smcinvoke_cb_txn *cb_txn = NULL;
  2130. struct smcinvoke_server_info *server_info = NULL;
  2131. if (_IOC_SIZE(cmd) != sizeof(struct smcinvoke_accept)) {
  2132. pr_err("command size invalid for accept request\n");
  2133. return -EINVAL;
  2134. }
  2135. if (copy_from_user(&user_args, (void __user *)arg,
  2136. sizeof(struct smcinvoke_accept))) {
  2137. pr_err("copying accept request from user failed\n");
  2138. return -EFAULT;
  2139. }
  2140. if (user_args.argsize != sizeof(union smcinvoke_arg)) {
  2141. pr_err("arguments size is invalid for accept thread\n");
  2142. return -EINVAL;
  2143. }
  2144. /* ACCEPT is available only on server obj */
  2145. if (server_obj->context_type != SMCINVOKE_OBJ_TYPE_SERVER) {
  2146. pr_err("invalid object type received for accept req\n");
  2147. return -EPERM;
  2148. }
  2149. mutex_lock(&g_smcinvoke_lock);
  2150. server_info = get_cb_server_locked(server_obj->server_id);
  2151. if (!server_info) {
  2152. pr_err("No matching server with server id : %u found\n",
  2153. server_obj->server_id);
  2154. mutex_unlock(&g_smcinvoke_lock);
  2155. return -EINVAL;
  2156. }
  2157. if (server_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT)
  2158. server_info->state = 0;
  2159. server_info->is_server_suspended = UNSET_BIT(server_info->is_server_suspended,
  2160. (current->pid)%DEFAULT_CB_OBJ_THREAD_CNT);
  2161. mutex_unlock(&g_smcinvoke_lock);
  2162. /* First check if it has response otherwise wait for req */
  2163. if (user_args.has_resp) {
  2164. trace_process_accept_req_has_response(current->pid, current->tgid);
  2165. mutex_lock(&g_smcinvoke_lock);
  2166. cb_txn = find_cbtxn_locked(server_info, user_args.txn_id,
  2167. SMCINVOKE_REQ_PROCESSING);
  2168. mutex_unlock(&g_smcinvoke_lock);
  2169. /*
  2170. * cb_txn can be null if userspace provides wrong txn id OR
  2171. * invoke thread died while server was processing cb req.
  2172. * if invoke thread dies, it would remove req from Q. So
  2173. * no matching cb_txn would be on Q and hence NULL cb_txn.
  2174. * In this case, we want this thread to start waiting
  2175. * new cb requests.
  2176. */
  2177. if (!cb_txn) {
  2178. pr_err("%s txn %d either invalid or removed from Q\n",
  2179. __func__, user_args.txn_id);
  2180. goto start_waiting_for_requests;
  2181. }
  2182. ret = marshal_out_tzcb_req(&user_args, cb_txn,
  2183. cb_txn->filp_to_release);
  2184. /*
  2185. * if client did not set error and we get error locally,
  2186. * we return local error to TA
  2187. */
  2188. if (ret && cb_txn->cb_req->result == 0)
  2189. cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL;
  2190. cb_txn->state = SMCINVOKE_REQ_PROCESSED;
  2191. mutex_lock(&g_smcinvoke_lock);
  2192. kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked);
  2193. mutex_unlock(&g_smcinvoke_lock);
  2194. wake_up(&server_info->rsp_wait_q);
  2195. /*
  2196. * if marshal_out fails, we should let userspace release
  2197. * any ref/obj it created for CB processing
  2198. */
  2199. if (ret && OBJECT_COUNTS_NUM_OO(user_args.counts))
  2200. goto out;
  2201. }
  2202. start_waiting_for_requests:
  2203. /*
  2204. * Once response has been delivered, thread will wait for another
  2205. * callback req to process.
  2206. */
  2207. do {
  2208. ret = wait_event_interruptible(server_info->req_wait_q,
  2209. !hash_empty(server_info->reqs_table));
  2210. if (ret) {
  2211. trace_process_accept_req_ret(current->pid, current->tgid, ret);
  2212. /*
  2213. * Ideally, we should destroy server if accept threads
  2214. * are returning due to client being killed or device
  2215. * going down (Shutdown/Reboot) but that would make
  2216. * server_info invalid. Other accept/invoke threads are
  2217. * using server_info and would crash. So dont do that.
  2218. */
  2219. mutex_lock(&g_smcinvoke_lock);
  2220. if(freezing(current)) {
  2221. pr_err("Server id :%d interrupted probaby due to suspend, pid:%d",
  2222. server_info->server_id, current->pid);
  2223. /*
  2224. * Each accept thread is identified by bits ranging from
  2225. * 0 to DEFAULT_CBOBJ_THREAD_CNT-1. When an accept thread is
  2226. * interrupted by a signal other than SIGUSR1,SIGKILL,SIGTERM,
  2227. * set the corresponding bit of accept thread, indicating that
  2228. * current accept thread's state to be "suspended"/ or something
  2229. * that needs infinite timeout for invoke thread.
  2230. */
  2231. server_info->is_server_suspended =
  2232. SET_BIT(server_info->is_server_suspended,
  2233. (current->pid)%DEFAULT_CB_OBJ_THREAD_CNT);
  2234. } else {
  2235. pr_err("Setting pid:%d, server id : %d state to defunct",
  2236. current->pid, server_info->server_id);
  2237. server_info->state = SMCINVOKE_SERVER_STATE_DEFUNCT;
  2238. }
  2239. mutex_unlock(&g_smcinvoke_lock);
  2240. wake_up_interruptible(&server_info->rsp_wait_q);
  2241. goto out;
  2242. }
  2243. mutex_lock(&g_smcinvoke_lock);
  2244. cb_txn = find_cbtxn_locked(server_info,
  2245. SMCINVOKE_NEXT_AVAILABLE_TXN,
  2246. SMCINVOKE_REQ_PLACED);
  2247. mutex_unlock(&g_smcinvoke_lock);
  2248. if (cb_txn) {
  2249. cb_txn->state = SMCINVOKE_REQ_PROCESSING;
  2250. ret = marshal_in_tzcb_req(cb_txn, &user_args,
  2251. server_obj->server_id);
  2252. if (ret) {
  2253. pr_err("failed to marshal in the callback request\n");
  2254. cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL;
  2255. cb_txn->state = SMCINVOKE_REQ_PROCESSED;
  2256. mutex_lock(&g_smcinvoke_lock);
  2257. kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked);
  2258. mutex_unlock(&g_smcinvoke_lock);
  2259. wake_up_interruptible(&server_info->rsp_wait_q);
  2260. continue;
  2261. }
  2262. mutex_lock(&g_smcinvoke_lock);
  2263. hash_add(server_info->responses_table, &cb_txn->hash,
  2264. cb_txn->txn_id);
  2265. kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked);
  2266. mutex_unlock(&g_smcinvoke_lock);
  2267. trace_process_accept_req_placed(current->pid, current->tgid);
  2268. ret = copy_to_user((void __user *)arg, &user_args,
  2269. sizeof(struct smcinvoke_accept));
  2270. }
  2271. } while (!cb_txn);
  2272. out:
  2273. if (server_info)
  2274. kref_put(&server_info->ref_cnt, destroy_cb_server);
  2275. if (ret && ret != -ERESTARTSYS)
  2276. pr_err("accept thread returning with ret: %d\n", ret);
  2277. return ret;
  2278. }
  2279. static long process_invoke_req(struct file *filp, unsigned int cmd,
  2280. unsigned long arg)
  2281. {
  2282. int ret = -1, nr_args = 0;
  2283. struct smcinvoke_cmd_req req = {0};
  2284. void *in_msg = NULL, *out_msg = NULL;
  2285. size_t inmsg_size = 0, outmsg_size = SMCINVOKE_TZ_MIN_BUF_SIZE;
  2286. union smcinvoke_arg *args_buf = NULL;
  2287. struct smcinvoke_file_data *tzobj = filp->private_data;
  2288. struct qtee_shm in_shm = {0}, out_shm = {0};
  2289. LIST_HEAD(l_mem_objs_pending_async); /* Holds new memory objects, to be later sent to TZ */
  2290. /*
  2291. * Hold reference to remote object until invoke op is not
  2292. * completed. Release once invoke is done.
  2293. */
  2294. struct file *filp_to_release[OBJECT_COUNTS_MAX_OO] = {NULL};
  2295. /*
  2296. * If anything goes wrong, release alloted tzhandles for
  2297. * local objs which could be either CBObj or MemObj.
  2298. */
  2299. int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0};
  2300. bool tz_acked = false;
  2301. uint32_t context_type = tzobj->context_type;
  2302. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ &&
  2303. _IOC_SIZE(cmd) != sizeof(req)) {
  2304. pr_err("command size for invoke req is invalid\n");
  2305. return -EINVAL;
  2306. }
  2307. if (context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ &&
  2308. context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
  2309. pr_err("invalid context_type %d\n", context_type);
  2310. return -EPERM;
  2311. }
  2312. if (context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
  2313. ret = copy_from_user(&req, (void __user *)arg, sizeof(req));
  2314. if (ret) {
  2315. pr_err("copying invoke req failed\n");
  2316. return -EFAULT;
  2317. }
  2318. } else {
  2319. req = *(struct smcinvoke_cmd_req *)arg;
  2320. }
  2321. if (req.argsize != sizeof(union smcinvoke_arg)) {
  2322. pr_err("arguments size for invoke req is invalid\n");
  2323. return -EINVAL;
  2324. }
  2325. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ &&
  2326. tzobj->tzhandle == SMCINVOKE_TZ_ROOT_OBJ &&
  2327. (req.op == IClientEnv_OP_notifyDomainChange ||
  2328. req.op == IClientEnv_OP_registerWithCredentials ||
  2329. req.op == IClientEnv_OP_accept ||
  2330. req.op == IClientEnv_OP_adciShutdown)) {
  2331. pr_err("invalid rootenv op\n");
  2332. return -EINVAL;
  2333. }
  2334. nr_args = OBJECT_COUNTS_NUM_buffers(req.counts) +
  2335. OBJECT_COUNTS_NUM_objects(req.counts);
  2336. if (nr_args) {
  2337. args_buf = kcalloc(nr_args, req.argsize, GFP_KERNEL);
  2338. if (!args_buf)
  2339. return -ENOMEM;
  2340. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
  2341. ret = copy_from_user(args_buf,
  2342. u64_to_user_ptr(req.args),
  2343. nr_args * req.argsize);
  2344. if (ret) {
  2345. ret = -EFAULT;
  2346. goto out;
  2347. }
  2348. } else {
  2349. memcpy(args_buf, (void *)(req.args),
  2350. nr_args * req.argsize);
  2351. }
  2352. }
  2353. inmsg_size = compute_in_msg_size(&req, args_buf);
  2354. ret = qtee_shmbridge_allocate_shm(inmsg_size, &in_shm);
  2355. if (ret) {
  2356. ret = -ENOMEM;
  2357. pr_err("shmbridge alloc failed for in msg in invoke req\n");
  2358. goto out;
  2359. }
  2360. in_msg = in_shm.vaddr;
  2361. mutex_lock(&g_smcinvoke_lock);
  2362. outmsg_size = PAGE_ALIGN(g_max_cb_buf_size);
  2363. mutex_unlock(&g_smcinvoke_lock);
  2364. ret = qtee_shmbridge_allocate_shm(outmsg_size, &out_shm);
  2365. if (ret) {
  2366. ret = -ENOMEM;
  2367. pr_err("shmbridge alloc failed for out msg in invoke req\n");
  2368. goto out;
  2369. }
  2370. out_msg = out_shm.vaddr;
  2371. trace_process_invoke_req_tzhandle(tzobj->tzhandle, req.op, req.counts);
  2372. ret = marshal_in_invoke_req(&req, args_buf, tzobj->tzhandle, in_msg,
  2373. inmsg_size, filp_to_release, tzhandles_to_release,
  2374. context_type, &l_mem_objs_pending_async);
  2375. if (ret) {
  2376. pr_err("failed to marshal in invoke req, ret :%d\n", ret);
  2377. goto out;
  2378. }
  2379. if (mem_obj_async_support) {
  2380. mutex_lock(&g_smcinvoke_lock);
  2381. add_mem_obj_info_to_async_side_channel_locked(out_msg, outmsg_size, &l_mem_objs_pending_async);
  2382. delete_pending_async_list_locked(&l_mem_objs_pending_async);
  2383. mutex_unlock(&g_smcinvoke_lock);
  2384. }
  2385. ret = prepare_send_scm_msg(in_msg, in_shm.paddr, inmsg_size,
  2386. out_msg, out_shm.paddr, outmsg_size,
  2387. &req, args_buf, &tz_acked, context_type,
  2388. &in_shm, &out_shm);
  2389. /*
  2390. * If scm_call is success, TZ owns responsibility to release
  2391. * refs for local objs.
  2392. */
  2393. if (!tz_acked) {
  2394. trace_status(__func__, "scm call successful");
  2395. goto out;
  2396. }
  2397. memset(tzhandles_to_release, 0, sizeof(tzhandles_to_release));
  2398. /*
  2399. * if invoke op results in an err, no need to marshal_out and
  2400. * copy args buf to user space
  2401. */
  2402. if (!req.result) {
  2403. /*
  2404. * Dont check ret of marshal_out because there might be a
  2405. * FD for OO which userspace must release even if an error
  2406. * occurs. Releasing FD from user space is much simpler than
  2407. * doing here. ORing of ret is reqd not to miss past error
  2408. */
  2409. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ)
  2410. ret |= copy_to_user(u64_to_user_ptr(req.args),
  2411. args_buf, nr_args * req.argsize);
  2412. else
  2413. memcpy((void *)(req.args), args_buf,
  2414. nr_args * req.argsize);
  2415. }
  2416. /* copy result of invoke op */
  2417. if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
  2418. ret |= copy_to_user((void __user *)arg, &req, sizeof(req));
  2419. if (ret)
  2420. goto out;
  2421. } else {
  2422. memcpy((void *)arg, (void *)&req, sizeof(req));
  2423. }
  2424. /* Outbuf could be carrying local objs to be released. */
  2425. process_piggyback_data(out_msg, outmsg_size);
  2426. out:
  2427. trace_process_invoke_req_result(ret, req.result, tzobj->tzhandle,
  2428. req.op, req.counts);
  2429. release_filp(filp_to_release, OBJECT_COUNTS_MAX_OO);
  2430. if (ret)
  2431. release_tzhandles(tzhandles_to_release, OBJECT_COUNTS_MAX_OO);
  2432. qtee_shmbridge_free_shm(&in_shm);
  2433. qtee_shmbridge_free_shm(&out_shm);
  2434. kfree(args_buf);
  2435. if (ret)
  2436. pr_err("invoke thread returning with ret = %d\n", ret);
  2437. return ret;
  2438. }
  2439. static long process_log_info(struct file *filp, unsigned int cmd,
  2440. unsigned long arg)
  2441. {
  2442. int ret = 0;
  2443. char buf[SMCINVOKE_LOG_BUF_SIZE];
  2444. struct smcinvoke_file_data *tzobj = filp->private_data;
  2445. ret = copy_from_user(buf, (void __user *)arg, SMCINVOKE_LOG_BUF_SIZE);
  2446. if (ret) {
  2447. pr_err("logging HLOS info copy failed\n");
  2448. return -EFAULT;
  2449. }
  2450. buf[SMCINVOKE_LOG_BUF_SIZE - 1] = '\0';
  2451. trace_process_log_info(buf, tzobj->context_type, tzobj->tzhandle);
  2452. return ret;
  2453. }
  2454. static long smcinvoke_ioctl(struct file *filp, unsigned int cmd,
  2455. unsigned long arg)
  2456. {
  2457. long ret = 0;
  2458. switch (cmd) {
  2459. case SMCINVOKE_IOCTL_INVOKE_REQ:
  2460. ret = process_invoke_req(filp, cmd, arg);
  2461. break;
  2462. case SMCINVOKE_IOCTL_ACCEPT_REQ:
  2463. ret = process_accept_req(filp, cmd, arg);
  2464. break;
  2465. case SMCINVOKE_IOCTL_SERVER_REQ:
  2466. ret = process_server_req(filp, cmd, arg);
  2467. break;
  2468. case SMCINVOKE_IOCTL_ACK_LOCAL_OBJ:
  2469. ret = process_ack_local_obj(filp, cmd, arg);
  2470. break;
  2471. case SMCINVOKE_IOCTL_LOG:
  2472. ret = process_log_info(filp, cmd, arg);
  2473. break;
  2474. default:
  2475. ret = -ENOIOCTLCMD;
  2476. break;
  2477. }
  2478. trace_smcinvoke_ioctl(cmd, ret);
  2479. return ret;
  2480. }
  2481. int get_root_fd(int *root_fd)
  2482. {
  2483. if (!root_fd)
  2484. return -EINVAL;
  2485. else
  2486. return get_fd_for_obj(SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL,
  2487. SMCINVOKE_TZ_ROOT_OBJ, root_fd);
  2488. }
  2489. int process_invoke_request_from_kernel_client(int fd,
  2490. struct smcinvoke_cmd_req *req)
  2491. {
  2492. struct file *filp = NULL;
  2493. int ret = 0;
  2494. if (!req) {
  2495. pr_err("NULL req\n");
  2496. return -EINVAL;
  2497. }
  2498. filp = fget(fd);
  2499. if (!filp) {
  2500. pr_err("Invalid fd %d\n", fd);
  2501. return -EINVAL;
  2502. }
  2503. ret = process_invoke_req(filp, 0, (uintptr_t)req);
  2504. fput(filp);
  2505. trace_process_invoke_request_from_kernel_client(fd, filp, file_count(filp));
  2506. return ret;
  2507. }
  2508. char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, struct qtee_shm *shm)
  2509. {
  2510. int rc = 0;
  2511. const struct firmware *fw_entry = NULL, *fw_entry00 = NULL, *fw_entrylast = NULL;
  2512. char fw_name[MAX_APP_NAME_SIZE] = "\0";
  2513. int num_images = 0, phi = 0;
  2514. unsigned char app_arch = 0;
  2515. u8 *img_data_ptr = NULL;
  2516. size_t bufferOffset = 0, phdr_table_offset = 0;
  2517. size_t *offset = NULL;
  2518. Elf32_Phdr phdr32;
  2519. Elf64_Phdr phdr64;
  2520. struct elf32_hdr *ehdr = NULL;
  2521. struct elf64_hdr *ehdr64 = NULL;
  2522. /* load b00*/
  2523. snprintf(fw_name, sizeof(fw_name), "%s.b00", appname);
  2524. rc = firmware_request_nowarn(&fw_entry00, fw_name, class_dev);
  2525. if (rc) {
  2526. pr_err("Load %s failed, ret:%d\n", fw_name, rc);
  2527. return NULL;
  2528. }
  2529. app_arch = *(unsigned char *)(fw_entry00->data + EI_CLASS);
  2530. /*Get the offsets for split images header*/
  2531. if (app_arch == ELFCLASS32) {
  2532. ehdr = (struct elf32_hdr *)fw_entry00->data;
  2533. num_images = ehdr->e_phnum;
  2534. offset = kcalloc(num_images, sizeof(size_t), GFP_KERNEL);
  2535. if (offset == NULL)
  2536. goto release_fw_entry00;
  2537. phdr_table_offset = (size_t) ehdr->e_phoff;
  2538. for (phi = 1; phi < num_images; ++phi) {
  2539. bufferOffset = phdr_table_offset + phi * sizeof(Elf32_Phdr);
  2540. phdr32 = *(Elf32_Phdr *)(fw_entry00->data + bufferOffset);
  2541. offset[phi] = (size_t)phdr32.p_offset;
  2542. }
  2543. } else if (app_arch == ELFCLASS64) {
  2544. ehdr64 = (struct elf64_hdr *)fw_entry00->data;
  2545. num_images = ehdr64->e_phnum;
  2546. offset = kcalloc(num_images, sizeof(size_t), GFP_KERNEL);
  2547. if (offset == NULL)
  2548. goto release_fw_entry00;
  2549. phdr_table_offset = (size_t) ehdr64->e_phoff;
  2550. for (phi = 1; phi < num_images; ++phi) {
  2551. bufferOffset = phdr_table_offset + phi * sizeof(Elf64_Phdr);
  2552. phdr64 = *(Elf64_Phdr *)(fw_entry00->data + bufferOffset);
  2553. offset[phi] = (size_t)phdr64.p_offset;
  2554. }
  2555. } else {
  2556. pr_err("QSEE %s app, arch %u is not supported\n", appname, app_arch);
  2557. goto release_fw_entry00;
  2558. }
  2559. /*Find the size of last split bin image*/
  2560. snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, num_images-1);
  2561. rc = firmware_request_nowarn(&fw_entrylast, fw_name, class_dev);
  2562. if (rc) {
  2563. pr_err("Failed to locate blob %s\n", fw_name);
  2564. goto release_fw_entry00;
  2565. }
  2566. /*Total size of image will be the offset of last image + the size of last split image*/
  2567. *fw_size = fw_entrylast->size + offset[num_images-1];
  2568. /*Allocate memory for the buffer that will hold the split image*/
  2569. rc = qtee_shmbridge_allocate_shm((*fw_size), shm);
  2570. if (rc) {
  2571. pr_err("smbridge alloc failed for size: %zu\n", *fw_size);
  2572. goto release_fw_entrylast;
  2573. }
  2574. img_data_ptr = shm->vaddr;
  2575. /*
  2576. * Copy contents of split bins to the buffer
  2577. */
  2578. memcpy(img_data_ptr, fw_entry00->data, fw_entry00->size);
  2579. for (phi = 1; phi < num_images-1; phi++) {
  2580. snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, phi);
  2581. rc = firmware_request_nowarn(&fw_entry, fw_name, class_dev);
  2582. if (rc) {
  2583. pr_err("Failed to locate blob %s\n", fw_name);
  2584. qtee_shmbridge_free_shm(shm);
  2585. img_data_ptr = NULL;
  2586. goto release_fw_entrylast;
  2587. }
  2588. memcpy(img_data_ptr + offset[phi], fw_entry->data, fw_entry->size);
  2589. release_firmware(fw_entry);
  2590. fw_entry = NULL;
  2591. }
  2592. memcpy(img_data_ptr + offset[phi], fw_entrylast->data, fw_entrylast->size);
  2593. release_fw_entrylast:
  2594. release_firmware(fw_entrylast);
  2595. release_fw_entry00:
  2596. release_firmware(fw_entry00);
  2597. kfree(offset);
  2598. return img_data_ptr;
  2599. }
  2600. EXPORT_SYMBOL(firmware_request_from_smcinvoke);
  2601. static int smcinvoke_open(struct inode *nodp, struct file *filp)
  2602. {
  2603. struct smcinvoke_file_data *tzcxt = NULL;
  2604. tzcxt = kzalloc(sizeof(*tzcxt), GFP_KERNEL);
  2605. if (!tzcxt)
  2606. return -ENOMEM;
  2607. tzcxt->tzhandle = SMCINVOKE_TZ_ROOT_OBJ;
  2608. tzcxt->context_type = SMCINVOKE_OBJ_TYPE_TZ_OBJ;
  2609. filp->private_data = tzcxt;
  2610. return 0;
  2611. }
  2612. static int release_cb_server(uint16_t server_id)
  2613. {
  2614. struct smcinvoke_server_info *server = NULL;
  2615. mutex_lock(&g_smcinvoke_lock);
  2616. server = find_cb_server_locked(server_id);
  2617. if (server)
  2618. kref_put(&server->ref_cnt, destroy_cb_server);
  2619. mutex_unlock(&g_smcinvoke_lock);
  2620. return 0;
  2621. }
  2622. int smcinvoke_release_filp(struct file *filp)
  2623. {
  2624. int ret = 0;
  2625. struct smcinvoke_file_data *file_data = filp->private_data;
  2626. uint32_t tzhandle = 0;
  2627. struct smcinvoke_object_release_pending_list *entry = NULL;
  2628. trace_smcinvoke_release_filp(current->files, filp,
  2629. file_count(filp), file_data->context_type);
  2630. if (file_data->context_type == SMCINVOKE_OBJ_TYPE_SERVER) {
  2631. ret = release_cb_server(file_data->server_id);
  2632. goto out;
  2633. }
  2634. tzhandle = file_data->tzhandle;
  2635. /* Root object is special in sense it is indestructible */
  2636. if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ)
  2637. goto out;
  2638. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  2639. if (!entry) {
  2640. ret = -ENOMEM;
  2641. goto out;
  2642. }
  2643. entry->data.tzhandle = tzhandle;
  2644. entry->data.context_type = file_data->context_type;
  2645. mutex_lock(&object_postprocess_lock);
  2646. list_add_tail(&entry->list, &g_object_postprocess);
  2647. mutex_unlock(&object_postprocess_lock);
  2648. pr_debug("Object release list: added a handle:0x%lx\n", tzhandle);
  2649. __wakeup_postprocess_kthread(&smcinvoke[OBJECT_WORKER_THREAD]);
  2650. out:
  2651. kfree(filp->private_data);
  2652. filp->private_data = NULL;
  2653. return ret;
  2654. }
  2655. int smcinvoke_release_from_kernel_client(int fd)
  2656. {
  2657. struct file *filp = NULL;
  2658. /* use fget() to get filp, but this will increase file ref_cnt to 1,
  2659. * then decrease file ref_cnt to 0 with fput().
  2660. */
  2661. filp = fget(fd);
  2662. if (!filp) {
  2663. pr_err("invalid fd %d to release\n", fd);
  2664. return -EINVAL;
  2665. }
  2666. trace_smcinvoke_release_from_kernel_client(current->files, filp,
  2667. file_count(filp));
  2668. /* free filp, notify TZ to release object */
  2669. smcinvoke_release_filp(filp);
  2670. fput(filp);
  2671. return 0;
  2672. }
  2673. static int smcinvoke_release(struct inode *nodp, struct file *filp)
  2674. {
  2675. trace_smcinvoke_release(current->files, filp, file_count(filp),
  2676. filp->private_data);
  2677. if (filp->private_data)
  2678. return smcinvoke_release_filp(filp);
  2679. else
  2680. return 0;
  2681. }
  2682. static int smcinvoke_probe(struct platform_device *pdev)
  2683. {
  2684. unsigned int baseminor = 0;
  2685. unsigned int count = 1;
  2686. int rc = 0;
  2687. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  2688. if (rc) {
  2689. pr_err("dma_set_mask_and_coherent failed %d\n", rc);
  2690. return rc;
  2691. }
  2692. legacy_smc_call = of_property_read_bool((&pdev->dev)->of_node,
  2693. "qcom,support-legacy_smc");
  2694. invoke_cmd = legacy_smc_call ? SMCINVOKE_INVOKE_CMD_LEGACY : SMCINVOKE_INVOKE_CMD;
  2695. rc = smcinvoke_create_kthreads();
  2696. if (rc) {
  2697. pr_err("smcinvoke_create_kthreads failed %d\n", rc);
  2698. return rc;
  2699. }
  2700. rc = alloc_chrdev_region(&smcinvoke_device_no, baseminor, count,
  2701. SMCINVOKE_DEV);
  2702. if (rc < 0) {
  2703. pr_err("chrdev_region failed %d for %s\n", rc, SMCINVOKE_DEV);
  2704. goto exit_destroy_wkthread;
  2705. }
  2706. driver_class = class_create(THIS_MODULE, SMCINVOKE_DEV);
  2707. if (IS_ERR(driver_class)) {
  2708. rc = -ENOMEM;
  2709. pr_err("class_create failed %d\n", rc);
  2710. goto exit_unreg_chrdev_region;
  2711. }
  2712. class_dev = device_create(driver_class, NULL, smcinvoke_device_no,
  2713. NULL, SMCINVOKE_DEV);
  2714. if (!class_dev) {
  2715. pr_err("class_device_create failed %d\n", rc);
  2716. rc = -ENOMEM;
  2717. goto exit_destroy_class;
  2718. }
  2719. cdev_init(&smcinvoke_cdev, &g_smcinvoke_fops);
  2720. smcinvoke_cdev.owner = THIS_MODULE;
  2721. rc = cdev_add(&smcinvoke_cdev, MKDEV(MAJOR(smcinvoke_device_no), 0),
  2722. count);
  2723. if (rc < 0) {
  2724. pr_err("cdev_add failed %d for %s\n", rc, SMCINVOKE_DEV);
  2725. goto exit_destroy_device;
  2726. }
  2727. smcinvoke_pdev = pdev;
  2728. #if !IS_ENABLED(CONFIG_QSEECOM) && IS_ENABLED(CONFIG_QSEECOM_PROXY)
  2729. /*If the api fails to get the func ops, print the error and continue
  2730. * Do not treat it as fatal*/
  2731. rc = get_qseecom_kernel_fun_ops();
  2732. if (rc) {
  2733. pr_err("failed to get qseecom kernel func ops %d", rc);
  2734. }
  2735. #endif
  2736. __wakeup_postprocess_kthread(&smcinvoke[ADCI_WORKER_THREAD]);
  2737. return 0;
  2738. exit_destroy_device:
  2739. device_destroy(driver_class, smcinvoke_device_no);
  2740. exit_destroy_class:
  2741. class_destroy(driver_class);
  2742. exit_unreg_chrdev_region:
  2743. unregister_chrdev_region(smcinvoke_device_no, count);
  2744. exit_destroy_wkthread:
  2745. smcinvoke_destroy_kthreads();
  2746. return rc;
  2747. }
  2748. static int smcinvoke_remove(struct platform_device *pdev)
  2749. {
  2750. int count = 1;
  2751. smcinvoke_destroy_kthreads();
  2752. cdev_del(&smcinvoke_cdev);
  2753. device_destroy(driver_class, smcinvoke_device_no);
  2754. class_destroy(driver_class);
  2755. unregister_chrdev_region(smcinvoke_device_no, count);
  2756. return 0;
  2757. }
  2758. static int __maybe_unused smcinvoke_suspend(struct platform_device *pdev,
  2759. pm_message_t state)
  2760. {
  2761. int ret = 0;
  2762. mutex_lock(&g_smcinvoke_lock);
  2763. if (cb_reqs_inflight) {
  2764. pr_err("Failed to suspend smcinvoke driver\n");
  2765. ret = -EIO;
  2766. }
  2767. mutex_unlock(&g_smcinvoke_lock);
  2768. return ret;
  2769. }
  2770. static int __maybe_unused smcinvoke_resume(struct platform_device *pdev)
  2771. {
  2772. return 0;
  2773. }
  2774. static const struct of_device_id smcinvoke_match[] = {
  2775. {
  2776. .compatible = "qcom,smcinvoke",
  2777. },
  2778. {},
  2779. };
  2780. static struct platform_driver smcinvoke_plat_driver = {
  2781. .probe = smcinvoke_probe,
  2782. .remove = smcinvoke_remove,
  2783. .suspend = smcinvoke_suspend,
  2784. .resume = smcinvoke_resume,
  2785. .driver = {
  2786. .name = "smcinvoke",
  2787. .of_match_table = smcinvoke_match,
  2788. },
  2789. };
  2790. static int smcinvoke_init(void)
  2791. {
  2792. return platform_driver_register(&smcinvoke_plat_driver);
  2793. }
  2794. static void smcinvoke_exit(void)
  2795. {
  2796. platform_driver_unregister(&smcinvoke_plat_driver);
  2797. }
  2798. module_init(smcinvoke_init);
  2799. module_exit(smcinvoke_exit);
  2800. MODULE_LICENSE("GPL v2");
  2801. MODULE_DESCRIPTION("SMC Invoke driver");
  2802. MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
  2803. MODULE_IMPORT_NS(DMA_BUF);