fastrpc.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
  3. // Copyright (c) 2018, Linaro Limited
  4. #include <linux/completion.h>
  5. #include <linux/device.h>
  6. #include <linux/dma-buf.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/idr.h>
  9. #include <linux/list.h>
  10. #include <linux/miscdevice.h>
  11. #include <linux/module.h>
  12. #include <linux/of_address.h>
  13. #include <linux/of.h>
  14. #include <linux/sort.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/rpmsg.h>
  17. #include <linux/scatterlist.h>
  18. #include <linux/slab.h>
  19. #include <linux/qcom_scm.h>
  20. #include <uapi/misc/fastrpc.h>
  21. #define ADSP_DOMAIN_ID (0)
  22. #define MDSP_DOMAIN_ID (1)
  23. #define SDSP_DOMAIN_ID (2)
  24. #define CDSP_DOMAIN_ID (3)
  25. #define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
  26. #define FASTRPC_MAX_SESSIONS 14
  27. #define FASTRPC_MAX_VMIDS 16
  28. #define FASTRPC_ALIGN 128
  29. #define FASTRPC_MAX_FDLIST 16
  30. #define FASTRPC_MAX_CRCLIST 64
  31. #define FASTRPC_PHYS(p) ((p) & 0xffffffff)
  32. #define FASTRPC_CTX_MAX (256)
  33. #define FASTRPC_INIT_HANDLE 1
  34. #define FASTRPC_DSP_UTILITIES_HANDLE 2
  35. #define FASTRPC_CTXID_MASK (0xFF0)
  36. #define INIT_FILELEN_MAX (2 * 1024 * 1024)
  37. #define FASTRPC_DEVICE_NAME "fastrpc"
  38. #define ADSP_MMAP_ADD_PAGES 0x1000
  39. #define DSP_UNSUPPORTED_API (0x80000414)
  40. /* MAX NUMBER of DSP ATTRIBUTES SUPPORTED */
  41. #define FASTRPC_MAX_DSP_ATTRIBUTES (256)
  42. #define FASTRPC_MAX_DSP_ATTRIBUTES_LEN (sizeof(u32) * FASTRPC_MAX_DSP_ATTRIBUTES)
  43. /* Retrives number of input buffers from the scalars parameter */
  44. #define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
  45. /* Retrives number of output buffers from the scalars parameter */
  46. #define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
  47. /* Retrives number of input handles from the scalars parameter */
  48. #define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
  49. /* Retrives number of output handles from the scalars parameter */
  50. #define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
  51. #define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
  52. REMOTE_SCALARS_OUTBUFS(sc) + \
  53. REMOTE_SCALARS_INHANDLES(sc)+ \
  54. REMOTE_SCALARS_OUTHANDLES(sc))
  55. #define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
  56. (((attr & 0x07) << 29) | \
  57. ((method & 0x1f) << 24) | \
  58. ((in & 0xff) << 16) | \
  59. ((out & 0xff) << 8) | \
  60. ((oin & 0x0f) << 4) | \
  61. (oout & 0x0f))
  62. #define FASTRPC_SCALARS(method, in, out) \
  63. FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
  64. #define FASTRPC_CREATE_PROCESS_NARGS 6
  65. /* Remote Method id table */
  66. #define FASTRPC_RMID_INIT_ATTACH 0
  67. #define FASTRPC_RMID_INIT_RELEASE 1
  68. #define FASTRPC_RMID_INIT_MMAP 4
  69. #define FASTRPC_RMID_INIT_MUNMAP 5
  70. #define FASTRPC_RMID_INIT_CREATE 6
  71. #define FASTRPC_RMID_INIT_CREATE_ATTR 7
  72. #define FASTRPC_RMID_INIT_CREATE_STATIC 8
  73. #define FASTRPC_RMID_INIT_MEM_MAP 10
  74. #define FASTRPC_RMID_INIT_MEM_UNMAP 11
  75. /* Protection Domain(PD) ids */
  76. #define AUDIO_PD (0) /* also GUEST_OS PD? */
  77. #define USER_PD (1)
  78. #define SENSORS_PD (2)
  79. #define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
  80. static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
  81. "sdsp", "cdsp"};
  82. struct fastrpc_phy_page {
  83. u64 addr; /* physical address */
  84. u64 size; /* size of contiguous region */
  85. };
  86. struct fastrpc_invoke_buf {
  87. u32 num; /* number of contiguous regions */
  88. u32 pgidx; /* index to start of contiguous region */
  89. };
  90. struct fastrpc_remote_dmahandle {
  91. s32 fd; /* dma handle fd */
  92. u32 offset; /* dma handle offset */
  93. u32 len; /* dma handle length */
  94. };
  95. struct fastrpc_remote_buf {
  96. u64 pv; /* buffer pointer */
  97. u64 len; /* length of buffer */
  98. };
  99. union fastrpc_remote_arg {
  100. struct fastrpc_remote_buf buf;
  101. struct fastrpc_remote_dmahandle dma;
  102. };
  103. struct fastrpc_mmap_rsp_msg {
  104. u64 vaddr;
  105. };
  106. struct fastrpc_mmap_req_msg {
  107. s32 pgid;
  108. u32 flags;
  109. u64 vaddr;
  110. s32 num;
  111. };
  112. struct fastrpc_mem_map_req_msg {
  113. s32 pgid;
  114. s32 fd;
  115. s32 offset;
  116. u32 flags;
  117. u64 vaddrin;
  118. s32 num;
  119. s32 data_len;
  120. };
  121. struct fastrpc_munmap_req_msg {
  122. s32 pgid;
  123. u64 vaddr;
  124. u64 size;
  125. };
  126. struct fastrpc_mem_unmap_req_msg {
  127. s32 pgid;
  128. s32 fd;
  129. u64 vaddrin;
  130. u64 len;
  131. };
  132. struct fastrpc_msg {
  133. int pid; /* process group id */
  134. int tid; /* thread id */
  135. u64 ctx; /* invoke caller context */
  136. u32 handle; /* handle to invoke */
  137. u32 sc; /* scalars structure describing the data */
  138. u64 addr; /* physical address */
  139. u64 size; /* size of contiguous region */
  140. };
  141. struct fastrpc_invoke_rsp {
  142. u64 ctx; /* invoke caller context */
  143. int retval; /* invoke return value */
  144. };
  145. struct fastrpc_buf_overlap {
  146. u64 start;
  147. u64 end;
  148. int raix;
  149. u64 mstart;
  150. u64 mend;
  151. u64 offset;
  152. };
  153. struct fastrpc_buf {
  154. struct fastrpc_user *fl;
  155. struct dma_buf *dmabuf;
  156. struct device *dev;
  157. void *virt;
  158. u64 phys;
  159. u64 size;
  160. /* Lock for dma buf attachments */
  161. struct mutex lock;
  162. struct list_head attachments;
  163. /* mmap support */
  164. struct list_head node; /* list of user requested mmaps */
  165. uintptr_t raddr;
  166. };
  167. struct fastrpc_dma_buf_attachment {
  168. struct device *dev;
  169. struct sg_table sgt;
  170. struct list_head node;
  171. };
  172. struct fastrpc_map {
  173. struct list_head node;
  174. struct fastrpc_user *fl;
  175. int fd;
  176. struct dma_buf *buf;
  177. struct sg_table *table;
  178. struct dma_buf_attachment *attach;
  179. u64 phys;
  180. u64 size;
  181. void *va;
  182. u64 len;
  183. u64 raddr;
  184. u32 attr;
  185. struct kref refcount;
  186. };
  187. struct fastrpc_invoke_ctx {
  188. int nscalars;
  189. int nbufs;
  190. int retval;
  191. int pid;
  192. int tgid;
  193. u32 sc;
  194. u32 *crc;
  195. u64 ctxid;
  196. u64 msg_sz;
  197. struct kref refcount;
  198. struct list_head node; /* list of ctxs */
  199. struct completion work;
  200. struct work_struct put_work;
  201. struct fastrpc_msg msg;
  202. struct fastrpc_user *fl;
  203. union fastrpc_remote_arg *rpra;
  204. struct fastrpc_map **maps;
  205. struct fastrpc_buf *buf;
  206. struct fastrpc_invoke_args *args;
  207. struct fastrpc_buf_overlap *olaps;
  208. struct fastrpc_channel_ctx *cctx;
  209. };
  210. struct fastrpc_session_ctx {
  211. struct device *dev;
  212. int sid;
  213. bool used;
  214. bool valid;
  215. };
  216. struct fastrpc_channel_ctx {
  217. int domain_id;
  218. int sesscount;
  219. int vmcount;
  220. u64 perms;
  221. struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
  222. struct rpmsg_device *rpdev;
  223. struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
  224. spinlock_t lock;
  225. struct idr ctx_idr;
  226. struct list_head users;
  227. struct kref refcount;
  228. /* Flag if dsp attributes are cached */
  229. bool valid_attributes;
  230. u32 dsp_attributes[FASTRPC_MAX_DSP_ATTRIBUTES];
  231. struct fastrpc_device *secure_fdevice;
  232. struct fastrpc_device *fdevice;
  233. bool secure;
  234. bool unsigned_support;
  235. };
  236. struct fastrpc_device {
  237. struct fastrpc_channel_ctx *cctx;
  238. struct miscdevice miscdev;
  239. bool secure;
  240. };
  241. struct fastrpc_user {
  242. struct list_head user;
  243. struct list_head maps;
  244. struct list_head pending;
  245. struct list_head mmaps;
  246. struct fastrpc_channel_ctx *cctx;
  247. struct fastrpc_session_ctx *sctx;
  248. struct fastrpc_buf *init_mem;
  249. int tgid;
  250. int pd;
  251. bool is_secure_dev;
  252. /* Lock for lists */
  253. spinlock_t lock;
  254. /* lock for allocations */
  255. struct mutex mutex;
  256. };
  257. static void fastrpc_free_map(struct kref *ref)
  258. {
  259. struct fastrpc_map *map;
  260. map = container_of(ref, struct fastrpc_map, refcount);
  261. if (map->table) {
  262. if (map->attr & FASTRPC_ATTR_SECUREMAP) {
  263. struct qcom_scm_vmperm perm;
  264. int err = 0;
  265. perm.vmid = QCOM_SCM_VMID_HLOS;
  266. perm.perm = QCOM_SCM_PERM_RWX;
  267. err = qcom_scm_assign_mem(map->phys, map->size,
  268. &map->fl->cctx->perms, &perm, 1);
  269. if (err) {
  270. dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
  271. map->phys, map->size, err);
  272. return;
  273. }
  274. }
  275. dma_buf_unmap_attachment_unlocked(map->attach, map->table,
  276. DMA_BIDIRECTIONAL);
  277. dma_buf_detach(map->buf, map->attach);
  278. dma_buf_put(map->buf);
  279. }
  280. if (map->fl) {
  281. spin_lock(&map->fl->lock);
  282. list_del(&map->node);
  283. spin_unlock(&map->fl->lock);
  284. map->fl = NULL;
  285. }
  286. kfree(map);
  287. }
  288. static void fastrpc_map_put(struct fastrpc_map *map)
  289. {
  290. if (map)
  291. kref_put(&map->refcount, fastrpc_free_map);
  292. }
  293. static int fastrpc_map_get(struct fastrpc_map *map)
  294. {
  295. if (!map)
  296. return -ENOENT;
  297. return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT;
  298. }
  299. static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
  300. struct fastrpc_map **ppmap, bool take_ref)
  301. {
  302. struct fastrpc_session_ctx *sess = fl->sctx;
  303. struct fastrpc_map *map = NULL;
  304. int ret = -ENOENT;
  305. spin_lock(&fl->lock);
  306. list_for_each_entry(map, &fl->maps, node) {
  307. if (map->fd != fd)
  308. continue;
  309. if (take_ref) {
  310. ret = fastrpc_map_get(map);
  311. if (ret) {
  312. dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
  313. __func__, fd, ret);
  314. break;
  315. }
  316. }
  317. *ppmap = map;
  318. ret = 0;
  319. break;
  320. }
  321. spin_unlock(&fl->lock);
  322. return ret;
  323. }
  324. static void fastrpc_buf_free(struct fastrpc_buf *buf)
  325. {
  326. dma_free_coherent(buf->dev, buf->size, buf->virt,
  327. FASTRPC_PHYS(buf->phys));
  328. kfree(buf);
  329. }
  330. static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
  331. u64 size, struct fastrpc_buf **obuf)
  332. {
  333. struct fastrpc_buf *buf;
  334. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  335. if (!buf)
  336. return -ENOMEM;
  337. INIT_LIST_HEAD(&buf->attachments);
  338. INIT_LIST_HEAD(&buf->node);
  339. mutex_init(&buf->lock);
  340. buf->fl = fl;
  341. buf->virt = NULL;
  342. buf->phys = 0;
  343. buf->size = size;
  344. buf->dev = dev;
  345. buf->raddr = 0;
  346. buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
  347. GFP_KERNEL);
  348. if (!buf->virt) {
  349. mutex_destroy(&buf->lock);
  350. kfree(buf);
  351. return -ENOMEM;
  352. }
  353. if (fl->sctx && fl->sctx->sid)
  354. buf->phys += ((u64)fl->sctx->sid << 32);
  355. *obuf = buf;
  356. return 0;
  357. }
  358. static void fastrpc_channel_ctx_free(struct kref *ref)
  359. {
  360. struct fastrpc_channel_ctx *cctx;
  361. cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
  362. kfree(cctx);
  363. }
  364. static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
  365. {
  366. kref_get(&cctx->refcount);
  367. }
  368. static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
  369. {
  370. kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
  371. }
  372. static void fastrpc_context_free(struct kref *ref)
  373. {
  374. struct fastrpc_invoke_ctx *ctx;
  375. struct fastrpc_channel_ctx *cctx;
  376. unsigned long flags;
  377. int i;
  378. ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
  379. cctx = ctx->cctx;
  380. for (i = 0; i < ctx->nbufs; i++)
  381. fastrpc_map_put(ctx->maps[i]);
  382. if (ctx->buf)
  383. fastrpc_buf_free(ctx->buf);
  384. spin_lock_irqsave(&cctx->lock, flags);
  385. idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
  386. spin_unlock_irqrestore(&cctx->lock, flags);
  387. kfree(ctx->maps);
  388. kfree(ctx->olaps);
  389. kfree(ctx);
  390. fastrpc_channel_ctx_put(cctx);
  391. }
  392. static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
  393. {
  394. kref_get(&ctx->refcount);
  395. }
  396. static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
  397. {
  398. kref_put(&ctx->refcount, fastrpc_context_free);
  399. }
  400. static void fastrpc_context_put_wq(struct work_struct *work)
  401. {
  402. struct fastrpc_invoke_ctx *ctx =
  403. container_of(work, struct fastrpc_invoke_ctx, put_work);
  404. fastrpc_context_put(ctx);
  405. }
  406. #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
  407. static int olaps_cmp(const void *a, const void *b)
  408. {
  409. struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
  410. struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
  411. /* sort with lowest starting buffer first */
  412. int st = CMP(pa->start, pb->start);
  413. /* sort with highest ending buffer first */
  414. int ed = CMP(pb->end, pa->end);
  415. return st == 0 ? ed : st;
  416. }
  417. static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
  418. {
  419. u64 max_end = 0;
  420. int i;
  421. for (i = 0; i < ctx->nbufs; ++i) {
  422. ctx->olaps[i].start = ctx->args[i].ptr;
  423. ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
  424. ctx->olaps[i].raix = i;
  425. }
  426. sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
  427. for (i = 0; i < ctx->nbufs; ++i) {
  428. /* Falling inside previous range */
  429. if (ctx->olaps[i].start < max_end) {
  430. ctx->olaps[i].mstart = max_end;
  431. ctx->olaps[i].mend = ctx->olaps[i].end;
  432. ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
  433. if (ctx->olaps[i].end > max_end) {
  434. max_end = ctx->olaps[i].end;
  435. } else {
  436. ctx->olaps[i].mend = 0;
  437. ctx->olaps[i].mstart = 0;
  438. }
  439. } else {
  440. ctx->olaps[i].mend = ctx->olaps[i].end;
  441. ctx->olaps[i].mstart = ctx->olaps[i].start;
  442. ctx->olaps[i].offset = 0;
  443. max_end = ctx->olaps[i].end;
  444. }
  445. }
  446. }
  447. static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
  448. struct fastrpc_user *user, u32 kernel, u32 sc,
  449. struct fastrpc_invoke_args *args)
  450. {
  451. struct fastrpc_channel_ctx *cctx = user->cctx;
  452. struct fastrpc_invoke_ctx *ctx = NULL;
  453. unsigned long flags;
  454. int ret;
  455. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  456. if (!ctx)
  457. return ERR_PTR(-ENOMEM);
  458. INIT_LIST_HEAD(&ctx->node);
  459. ctx->fl = user;
  460. ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
  461. ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
  462. REMOTE_SCALARS_OUTBUFS(sc);
  463. if (ctx->nscalars) {
  464. ctx->maps = kcalloc(ctx->nscalars,
  465. sizeof(*ctx->maps), GFP_KERNEL);
  466. if (!ctx->maps) {
  467. kfree(ctx);
  468. return ERR_PTR(-ENOMEM);
  469. }
  470. ctx->olaps = kcalloc(ctx->nscalars,
  471. sizeof(*ctx->olaps), GFP_KERNEL);
  472. if (!ctx->olaps) {
  473. kfree(ctx->maps);
  474. kfree(ctx);
  475. return ERR_PTR(-ENOMEM);
  476. }
  477. ctx->args = args;
  478. fastrpc_get_buff_overlaps(ctx);
  479. }
  480. /* Released in fastrpc_context_put() */
  481. fastrpc_channel_ctx_get(cctx);
  482. ctx->sc = sc;
  483. ctx->retval = -1;
  484. ctx->pid = current->pid;
  485. ctx->tgid = user->tgid;
  486. ctx->cctx = cctx;
  487. init_completion(&ctx->work);
  488. INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
  489. spin_lock(&user->lock);
  490. list_add_tail(&ctx->node, &user->pending);
  491. spin_unlock(&user->lock);
  492. spin_lock_irqsave(&cctx->lock, flags);
  493. ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
  494. FASTRPC_CTX_MAX, GFP_ATOMIC);
  495. if (ret < 0) {
  496. spin_unlock_irqrestore(&cctx->lock, flags);
  497. goto err_idr;
  498. }
  499. ctx->ctxid = ret << 4;
  500. spin_unlock_irqrestore(&cctx->lock, flags);
  501. kref_init(&ctx->refcount);
  502. return ctx;
  503. err_idr:
  504. spin_lock(&user->lock);
  505. list_del(&ctx->node);
  506. spin_unlock(&user->lock);
  507. fastrpc_channel_ctx_put(cctx);
  508. kfree(ctx->maps);
  509. kfree(ctx->olaps);
  510. kfree(ctx);
  511. return ERR_PTR(ret);
  512. }
  513. static struct sg_table *
  514. fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
  515. enum dma_data_direction dir)
  516. {
  517. struct fastrpc_dma_buf_attachment *a = attachment->priv;
  518. struct sg_table *table;
  519. int ret;
  520. table = &a->sgt;
  521. ret = dma_map_sgtable(attachment->dev, table, dir, 0);
  522. if (ret)
  523. table = ERR_PTR(ret);
  524. return table;
  525. }
  526. static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
  527. struct sg_table *table,
  528. enum dma_data_direction dir)
  529. {
  530. dma_unmap_sgtable(attach->dev, table, dir, 0);
  531. }
  532. static void fastrpc_release(struct dma_buf *dmabuf)
  533. {
  534. struct fastrpc_buf *buffer = dmabuf->priv;
  535. fastrpc_buf_free(buffer);
  536. }
  537. static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
  538. struct dma_buf_attachment *attachment)
  539. {
  540. struct fastrpc_dma_buf_attachment *a;
  541. struct fastrpc_buf *buffer = dmabuf->priv;
  542. int ret;
  543. a = kzalloc(sizeof(*a), GFP_KERNEL);
  544. if (!a)
  545. return -ENOMEM;
  546. ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
  547. FASTRPC_PHYS(buffer->phys), buffer->size);
  548. if (ret < 0) {
  549. dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
  550. kfree(a);
  551. return -EINVAL;
  552. }
  553. a->dev = attachment->dev;
  554. INIT_LIST_HEAD(&a->node);
  555. attachment->priv = a;
  556. mutex_lock(&buffer->lock);
  557. list_add(&a->node, &buffer->attachments);
  558. mutex_unlock(&buffer->lock);
  559. return 0;
  560. }
  561. static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
  562. struct dma_buf_attachment *attachment)
  563. {
  564. struct fastrpc_dma_buf_attachment *a = attachment->priv;
  565. struct fastrpc_buf *buffer = dmabuf->priv;
  566. mutex_lock(&buffer->lock);
  567. list_del(&a->node);
  568. mutex_unlock(&buffer->lock);
  569. sg_free_table(&a->sgt);
  570. kfree(a);
  571. }
  572. static int fastrpc_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
  573. {
  574. struct fastrpc_buf *buf = dmabuf->priv;
  575. iosys_map_set_vaddr(map, buf->virt);
  576. return 0;
  577. }
  578. static int fastrpc_mmap(struct dma_buf *dmabuf,
  579. struct vm_area_struct *vma)
  580. {
  581. struct fastrpc_buf *buf = dmabuf->priv;
  582. size_t size = vma->vm_end - vma->vm_start;
  583. return dma_mmap_coherent(buf->dev, vma, buf->virt,
  584. FASTRPC_PHYS(buf->phys), size);
  585. }
  586. static const struct dma_buf_ops fastrpc_dma_buf_ops = {
  587. .attach = fastrpc_dma_buf_attach,
  588. .detach = fastrpc_dma_buf_detatch,
  589. .map_dma_buf = fastrpc_map_dma_buf,
  590. .unmap_dma_buf = fastrpc_unmap_dma_buf,
  591. .mmap = fastrpc_mmap,
  592. .vmap = fastrpc_vmap,
  593. .release = fastrpc_release,
  594. };
  595. static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
  596. u64 len, u32 attr, struct fastrpc_map **ppmap)
  597. {
  598. struct fastrpc_session_ctx *sess = fl->sctx;
  599. struct fastrpc_map *map = NULL;
  600. struct sg_table *table;
  601. int err = 0;
  602. if (!fastrpc_map_lookup(fl, fd, ppmap, true))
  603. return 0;
  604. map = kzalloc(sizeof(*map), GFP_KERNEL);
  605. if (!map)
  606. return -ENOMEM;
  607. INIT_LIST_HEAD(&map->node);
  608. map->fl = fl;
  609. map->fd = fd;
  610. map->buf = dma_buf_get(fd);
  611. if (IS_ERR(map->buf)) {
  612. err = PTR_ERR(map->buf);
  613. goto get_err;
  614. }
  615. map->attach = dma_buf_attach(map->buf, sess->dev);
  616. if (IS_ERR(map->attach)) {
  617. dev_err(sess->dev, "Failed to attach dmabuf\n");
  618. err = PTR_ERR(map->attach);
  619. goto attach_err;
  620. }
  621. table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL);
  622. if (IS_ERR(table)) {
  623. err = PTR_ERR(table);
  624. goto map_err;
  625. }
  626. map->table = table;
  627. map->phys = sg_dma_address(map->table->sgl);
  628. map->phys += ((u64)fl->sctx->sid << 32);
  629. map->size = len;
  630. map->va = sg_virt(map->table->sgl);
  631. map->len = len;
  632. kref_init(&map->refcount);
  633. if (attr & FASTRPC_ATTR_SECUREMAP) {
  634. /*
  635. * If subsystem VMIDs are defined in DTSI, then do
  636. * hyp_assign from HLOS to those VM(s)
  637. */
  638. map->attr = attr;
  639. err = qcom_scm_assign_mem(map->phys, (u64)map->size, &fl->cctx->perms,
  640. fl->cctx->vmperms, fl->cctx->vmcount);
  641. if (err) {
  642. dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
  643. map->phys, map->size, err);
  644. goto map_err;
  645. }
  646. }
  647. spin_lock(&fl->lock);
  648. list_add_tail(&map->node, &fl->maps);
  649. spin_unlock(&fl->lock);
  650. *ppmap = map;
  651. return 0;
  652. map_err:
  653. dma_buf_detach(map->buf, map->attach);
  654. attach_err:
  655. dma_buf_put(map->buf);
  656. get_err:
  657. kfree(map);
  658. return err;
  659. }
  660. /*
  661. * Fastrpc payload buffer with metadata looks like:
  662. *
  663. * >>>>>> START of METADATA <<<<<<<<<
  664. * +---------------------------------+
  665. * | Arguments |
  666. * | type:(union fastrpc_remote_arg)|
  667. * | (0 - N) |
  668. * +---------------------------------+
  669. * | Invoke Buffer list |
  670. * | type:(struct fastrpc_invoke_buf)|
  671. * | (0 - N) |
  672. * +---------------------------------+
  673. * | Page info list |
  674. * | type:(struct fastrpc_phy_page) |
  675. * | (0 - N) |
  676. * +---------------------------------+
  677. * | Optional info |
  678. * |(can be specific to SoC/Firmware)|
  679. * +---------------------------------+
  680. * >>>>>>>> END of METADATA <<<<<<<<<
  681. * +---------------------------------+
  682. * | Inline ARGS |
  683. * | (0-N) |
  684. * +---------------------------------+
  685. */
  686. static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
  687. {
  688. int size = 0;
  689. size = (sizeof(struct fastrpc_remote_buf) +
  690. sizeof(struct fastrpc_invoke_buf) +
  691. sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
  692. sizeof(u64) * FASTRPC_MAX_FDLIST +
  693. sizeof(u32) * FASTRPC_MAX_CRCLIST;
  694. return size;
  695. }
  696. static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
  697. {
  698. u64 size = 0;
  699. int oix;
  700. size = ALIGN(metalen, FASTRPC_ALIGN);
  701. for (oix = 0; oix < ctx->nbufs; oix++) {
  702. int i = ctx->olaps[oix].raix;
  703. if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
  704. if (ctx->olaps[oix].offset == 0)
  705. size = ALIGN(size, FASTRPC_ALIGN);
  706. size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
  707. }
  708. }
  709. return size;
  710. }
  711. static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
  712. {
  713. struct device *dev = ctx->fl->sctx->dev;
  714. int i, err;
  715. for (i = 0; i < ctx->nscalars; ++i) {
  716. if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
  717. ctx->args[i].length == 0)
  718. continue;
  719. err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
  720. ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
  721. if (err) {
  722. dev_err(dev, "Error Creating map %d\n", err);
  723. return -EINVAL;
  724. }
  725. }
  726. return 0;
  727. }
  728. static struct fastrpc_invoke_buf *fastrpc_invoke_buf_start(union fastrpc_remote_arg *pra, int len)
  729. {
  730. return (struct fastrpc_invoke_buf *)(&pra[len]);
  731. }
  732. static struct fastrpc_phy_page *fastrpc_phy_page_start(struct fastrpc_invoke_buf *buf, int len)
  733. {
  734. return (struct fastrpc_phy_page *)(&buf[len]);
  735. }
  736. static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
  737. {
  738. struct device *dev = ctx->fl->sctx->dev;
  739. union fastrpc_remote_arg *rpra;
  740. struct fastrpc_invoke_buf *list;
  741. struct fastrpc_phy_page *pages;
  742. int inbufs, i, oix, err = 0;
  743. u64 len, rlen, pkt_size;
  744. u64 pg_start, pg_end;
  745. uintptr_t args;
  746. int metalen;
  747. inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
  748. metalen = fastrpc_get_meta_size(ctx);
  749. pkt_size = fastrpc_get_payload_size(ctx, metalen);
  750. err = fastrpc_create_maps(ctx);
  751. if (err)
  752. return err;
  753. ctx->msg_sz = pkt_size;
  754. err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
  755. if (err)
  756. return err;
  757. memset(ctx->buf->virt, 0, pkt_size);
  758. rpra = ctx->buf->virt;
  759. list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
  760. pages = fastrpc_phy_page_start(list, ctx->nscalars);
  761. args = (uintptr_t)ctx->buf->virt + metalen;
  762. rlen = pkt_size - metalen;
  763. ctx->rpra = rpra;
  764. for (oix = 0; oix < ctx->nbufs; ++oix) {
  765. int mlen;
  766. i = ctx->olaps[oix].raix;
  767. len = ctx->args[i].length;
  768. rpra[i].buf.pv = 0;
  769. rpra[i].buf.len = len;
  770. list[i].num = len ? 1 : 0;
  771. list[i].pgidx = i;
  772. if (!len)
  773. continue;
  774. if (ctx->maps[i]) {
  775. struct vm_area_struct *vma = NULL;
  776. rpra[i].buf.pv = (u64) ctx->args[i].ptr;
  777. pages[i].addr = ctx->maps[i]->phys;
  778. mmap_read_lock(current->mm);
  779. vma = find_vma(current->mm, ctx->args[i].ptr);
  780. if (vma)
  781. pages[i].addr += ctx->args[i].ptr -
  782. vma->vm_start;
  783. mmap_read_unlock(current->mm);
  784. pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
  785. pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
  786. PAGE_SHIFT;
  787. pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
  788. } else {
  789. if (ctx->olaps[oix].offset == 0) {
  790. rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
  791. args = ALIGN(args, FASTRPC_ALIGN);
  792. }
  793. mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
  794. if (rlen < mlen)
  795. goto bail;
  796. rpra[i].buf.pv = args - ctx->olaps[oix].offset;
  797. pages[i].addr = ctx->buf->phys -
  798. ctx->olaps[oix].offset +
  799. (pkt_size - rlen);
  800. pages[i].addr = pages[i].addr & PAGE_MASK;
  801. pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
  802. pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
  803. pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
  804. args = args + mlen;
  805. rlen -= mlen;
  806. }
  807. if (i < inbufs && !ctx->maps[i]) {
  808. void *dst = (void *)(uintptr_t)rpra[i].buf.pv;
  809. void *src = (void *)(uintptr_t)ctx->args[i].ptr;
  810. if (!kernel) {
  811. if (copy_from_user(dst, (void __user *)src,
  812. len)) {
  813. err = -EFAULT;
  814. goto bail;
  815. }
  816. } else {
  817. memcpy(dst, src, len);
  818. }
  819. }
  820. }
  821. for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
  822. list[i].num = ctx->args[i].length ? 1 : 0;
  823. list[i].pgidx = i;
  824. if (ctx->maps[i]) {
  825. pages[i].addr = ctx->maps[i]->phys;
  826. pages[i].size = ctx->maps[i]->size;
  827. }
  828. rpra[i].dma.fd = ctx->args[i].fd;
  829. rpra[i].dma.len = ctx->args[i].length;
  830. rpra[i].dma.offset = (u64) ctx->args[i].ptr;
  831. }
  832. bail:
  833. if (err)
  834. dev_err(dev, "Error: get invoke args failed:%d\n", err);
  835. return err;
  836. }
  837. static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
  838. u32 kernel)
  839. {
  840. union fastrpc_remote_arg *rpra = ctx->rpra;
  841. struct fastrpc_user *fl = ctx->fl;
  842. struct fastrpc_map *mmap = NULL;
  843. struct fastrpc_invoke_buf *list;
  844. struct fastrpc_phy_page *pages;
  845. u64 *fdlist;
  846. int i, inbufs, outbufs, handles;
  847. inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
  848. outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
  849. handles = REMOTE_SCALARS_INHANDLES(ctx->sc) + REMOTE_SCALARS_OUTHANDLES(ctx->sc);
  850. list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
  851. pages = fastrpc_phy_page_start(list, ctx->nscalars);
  852. fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
  853. for (i = inbufs; i < ctx->nbufs; ++i) {
  854. if (!ctx->maps[i]) {
  855. void *src = (void *)(uintptr_t)rpra[i].buf.pv;
  856. void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
  857. u64 len = rpra[i].buf.len;
  858. if (!kernel) {
  859. if (copy_to_user((void __user *)dst, src, len))
  860. return -EFAULT;
  861. } else {
  862. memcpy(dst, src, len);
  863. }
  864. }
  865. }
  866. /* Clean up fdlist which is updated by DSP */
  867. for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
  868. if (!fdlist[i])
  869. break;
  870. if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
  871. fastrpc_map_put(mmap);
  872. }
  873. return 0;
  874. }
  875. static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
  876. struct fastrpc_invoke_ctx *ctx,
  877. u32 kernel, uint32_t handle)
  878. {
  879. struct fastrpc_channel_ctx *cctx;
  880. struct fastrpc_user *fl = ctx->fl;
  881. struct fastrpc_msg *msg = &ctx->msg;
  882. int ret;
  883. cctx = fl->cctx;
  884. msg->pid = fl->tgid;
  885. msg->tid = current->pid;
  886. if (kernel)
  887. msg->pid = 0;
  888. msg->ctx = ctx->ctxid | fl->pd;
  889. msg->handle = handle;
  890. msg->sc = ctx->sc;
  891. msg->addr = ctx->buf ? ctx->buf->phys : 0;
  892. msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
  893. fastrpc_context_get(ctx);
  894. ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
  895. if (ret)
  896. fastrpc_context_put(ctx);
  897. return ret;
  898. }
  899. static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
  900. u32 handle, u32 sc,
  901. struct fastrpc_invoke_args *args)
  902. {
  903. struct fastrpc_invoke_ctx *ctx = NULL;
  904. int err = 0;
  905. if (!fl->sctx)
  906. return -EINVAL;
  907. if (!fl->cctx->rpdev)
  908. return -EPIPE;
  909. if (handle == FASTRPC_INIT_HANDLE && !kernel) {
  910. dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle);
  911. return -EPERM;
  912. }
  913. ctx = fastrpc_context_alloc(fl, kernel, sc, args);
  914. if (IS_ERR(ctx))
  915. return PTR_ERR(ctx);
  916. err = fastrpc_get_args(kernel, ctx);
  917. if (err)
  918. goto bail;
  919. /* make sure that all CPU memory writes are seen by DSP */
  920. dma_wmb();
  921. /* Send invoke buffer to remote dsp */
  922. err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
  923. if (err)
  924. goto bail;
  925. if (kernel) {
  926. if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
  927. err = -ETIMEDOUT;
  928. } else {
  929. err = wait_for_completion_interruptible(&ctx->work);
  930. }
  931. if (err)
  932. goto bail;
  933. /* make sure that all memory writes by DSP are seen by CPU */
  934. dma_rmb();
  935. /* populate all the output buffers with results */
  936. err = fastrpc_put_args(ctx, kernel);
  937. if (err)
  938. goto bail;
  939. /* Check the response from remote dsp */
  940. err = ctx->retval;
  941. if (err)
  942. goto bail;
  943. bail:
  944. if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
  945. /* We are done with this compute context */
  946. spin_lock(&fl->lock);
  947. list_del(&ctx->node);
  948. spin_unlock(&fl->lock);
  949. fastrpc_context_put(ctx);
  950. }
  951. if (err)
  952. dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
  953. return err;
  954. }
  955. static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_request)
  956. {
  957. /* Check if the device node is non-secure and channel is secure*/
  958. if (!fl->is_secure_dev && fl->cctx->secure) {
  959. /*
  960. * Allow untrusted applications to offload only to Unsigned PD when
  961. * channel is configured as secure and block untrusted apps on channel
  962. * that does not support unsigned PD offload
  963. */
  964. if (!fl->cctx->unsigned_support || !unsigned_pd_request) {
  965. dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD");
  966. return true;
  967. }
  968. }
  969. return false;
  970. }
  971. static int fastrpc_init_create_process(struct fastrpc_user *fl,
  972. char __user *argp)
  973. {
  974. struct fastrpc_init_create init;
  975. struct fastrpc_invoke_args *args;
  976. struct fastrpc_phy_page pages[1];
  977. struct fastrpc_map *map = NULL;
  978. struct fastrpc_buf *imem = NULL;
  979. int memlen;
  980. int err;
  981. struct {
  982. int pgid;
  983. u32 namelen;
  984. u32 filelen;
  985. u32 pageslen;
  986. u32 attrs;
  987. u32 siglen;
  988. } inbuf;
  989. u32 sc;
  990. bool unsigned_module = false;
  991. args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
  992. if (!args)
  993. return -ENOMEM;
  994. if (copy_from_user(&init, argp, sizeof(init))) {
  995. err = -EFAULT;
  996. goto err;
  997. }
  998. if (init.attrs & FASTRPC_MODE_UNSIGNED_MODULE)
  999. unsigned_module = true;
  1000. if (is_session_rejected(fl, unsigned_module)) {
  1001. err = -ECONNREFUSED;
  1002. goto err;
  1003. }
  1004. if (init.filelen > INIT_FILELEN_MAX) {
  1005. err = -EINVAL;
  1006. goto err;
  1007. }
  1008. inbuf.pgid = fl->tgid;
  1009. inbuf.namelen = strlen(current->comm) + 1;
  1010. inbuf.filelen = init.filelen;
  1011. inbuf.pageslen = 1;
  1012. inbuf.attrs = init.attrs;
  1013. inbuf.siglen = init.siglen;
  1014. fl->pd = USER_PD;
  1015. if (init.filelen && init.filefd) {
  1016. err = fastrpc_map_create(fl, init.filefd, init.filelen, 0, &map);
  1017. if (err)
  1018. goto err;
  1019. }
  1020. memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
  1021. 1024 * 1024);
  1022. err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
  1023. &imem);
  1024. if (err)
  1025. goto err_alloc;
  1026. fl->init_mem = imem;
  1027. args[0].ptr = (u64)(uintptr_t)&inbuf;
  1028. args[0].length = sizeof(inbuf);
  1029. args[0].fd = -1;
  1030. args[1].ptr = (u64)(uintptr_t)current->comm;
  1031. args[1].length = inbuf.namelen;
  1032. args[1].fd = -1;
  1033. args[2].ptr = (u64) init.file;
  1034. args[2].length = inbuf.filelen;
  1035. args[2].fd = init.filefd;
  1036. pages[0].addr = imem->phys;
  1037. pages[0].size = imem->size;
  1038. args[3].ptr = (u64)(uintptr_t) pages;
  1039. args[3].length = 1 * sizeof(*pages);
  1040. args[3].fd = -1;
  1041. args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
  1042. args[4].length = sizeof(inbuf.attrs);
  1043. args[4].fd = -1;
  1044. args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
  1045. args[5].length = sizeof(inbuf.siglen);
  1046. args[5].fd = -1;
  1047. sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
  1048. if (init.attrs)
  1049. sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0);
  1050. err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
  1051. sc, args);
  1052. if (err)
  1053. goto err_invoke;
  1054. kfree(args);
  1055. return 0;
  1056. err_invoke:
  1057. fl->init_mem = NULL;
  1058. fastrpc_buf_free(imem);
  1059. err_alloc:
  1060. fastrpc_map_put(map);
  1061. err:
  1062. kfree(args);
  1063. return err;
  1064. }
  1065. static struct fastrpc_session_ctx *fastrpc_session_alloc(
  1066. struct fastrpc_channel_ctx *cctx)
  1067. {
  1068. struct fastrpc_session_ctx *session = NULL;
  1069. unsigned long flags;
  1070. int i;
  1071. spin_lock_irqsave(&cctx->lock, flags);
  1072. for (i = 0; i < cctx->sesscount; i++) {
  1073. if (!cctx->session[i].used && cctx->session[i].valid) {
  1074. cctx->session[i].used = true;
  1075. session = &cctx->session[i];
  1076. break;
  1077. }
  1078. }
  1079. spin_unlock_irqrestore(&cctx->lock, flags);
  1080. return session;
  1081. }
  1082. static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
  1083. struct fastrpc_session_ctx *session)
  1084. {
  1085. unsigned long flags;
  1086. spin_lock_irqsave(&cctx->lock, flags);
  1087. session->used = false;
  1088. spin_unlock_irqrestore(&cctx->lock, flags);
  1089. }
  1090. static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
  1091. {
  1092. struct fastrpc_invoke_args args[1];
  1093. int tgid = 0;
  1094. u32 sc;
  1095. tgid = fl->tgid;
  1096. args[0].ptr = (u64)(uintptr_t) &tgid;
  1097. args[0].length = sizeof(tgid);
  1098. args[0].fd = -1;
  1099. sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
  1100. return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
  1101. sc, &args[0]);
  1102. }
  1103. static int fastrpc_device_release(struct inode *inode, struct file *file)
  1104. {
  1105. struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
  1106. struct fastrpc_channel_ctx *cctx = fl->cctx;
  1107. struct fastrpc_invoke_ctx *ctx, *n;
  1108. struct fastrpc_map *map, *m;
  1109. struct fastrpc_buf *buf, *b;
  1110. unsigned long flags;
  1111. fastrpc_release_current_dsp_process(fl);
  1112. spin_lock_irqsave(&cctx->lock, flags);
  1113. list_del(&fl->user);
  1114. spin_unlock_irqrestore(&cctx->lock, flags);
  1115. if (fl->init_mem)
  1116. fastrpc_buf_free(fl->init_mem);
  1117. list_for_each_entry_safe(ctx, n, &fl->pending, node) {
  1118. list_del(&ctx->node);
  1119. fastrpc_context_put(ctx);
  1120. }
  1121. list_for_each_entry_safe(map, m, &fl->maps, node)
  1122. fastrpc_map_put(map);
  1123. list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
  1124. list_del(&buf->node);
  1125. fastrpc_buf_free(buf);
  1126. }
  1127. fastrpc_session_free(cctx, fl->sctx);
  1128. fastrpc_channel_ctx_put(cctx);
  1129. mutex_destroy(&fl->mutex);
  1130. kfree(fl);
  1131. file->private_data = NULL;
  1132. return 0;
  1133. }
  1134. static int fastrpc_device_open(struct inode *inode, struct file *filp)
  1135. {
  1136. struct fastrpc_channel_ctx *cctx;
  1137. struct fastrpc_device *fdevice;
  1138. struct fastrpc_user *fl = NULL;
  1139. unsigned long flags;
  1140. fdevice = miscdev_to_fdevice(filp->private_data);
  1141. cctx = fdevice->cctx;
  1142. fl = kzalloc(sizeof(*fl), GFP_KERNEL);
  1143. if (!fl)
  1144. return -ENOMEM;
  1145. /* Released in fastrpc_device_release() */
  1146. fastrpc_channel_ctx_get(cctx);
  1147. filp->private_data = fl;
  1148. spin_lock_init(&fl->lock);
  1149. mutex_init(&fl->mutex);
  1150. INIT_LIST_HEAD(&fl->pending);
  1151. INIT_LIST_HEAD(&fl->maps);
  1152. INIT_LIST_HEAD(&fl->mmaps);
  1153. INIT_LIST_HEAD(&fl->user);
  1154. fl->tgid = current->tgid;
  1155. fl->cctx = cctx;
  1156. fl->is_secure_dev = fdevice->secure;
  1157. fl->sctx = fastrpc_session_alloc(cctx);
  1158. if (!fl->sctx) {
  1159. dev_err(&cctx->rpdev->dev, "No session available\n");
  1160. mutex_destroy(&fl->mutex);
  1161. kfree(fl);
  1162. return -EBUSY;
  1163. }
  1164. spin_lock_irqsave(&cctx->lock, flags);
  1165. list_add_tail(&fl->user, &cctx->users);
  1166. spin_unlock_irqrestore(&cctx->lock, flags);
  1167. return 0;
  1168. }
  1169. static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
  1170. {
  1171. struct fastrpc_alloc_dma_buf bp;
  1172. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  1173. struct fastrpc_buf *buf = NULL;
  1174. int err;
  1175. if (copy_from_user(&bp, argp, sizeof(bp)))
  1176. return -EFAULT;
  1177. err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
  1178. if (err)
  1179. return err;
  1180. exp_info.ops = &fastrpc_dma_buf_ops;
  1181. exp_info.size = bp.size;
  1182. exp_info.flags = O_RDWR;
  1183. exp_info.priv = buf;
  1184. buf->dmabuf = dma_buf_export(&exp_info);
  1185. if (IS_ERR(buf->dmabuf)) {
  1186. err = PTR_ERR(buf->dmabuf);
  1187. fastrpc_buf_free(buf);
  1188. return err;
  1189. }
  1190. bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
  1191. if (bp.fd < 0) {
  1192. dma_buf_put(buf->dmabuf);
  1193. return -EINVAL;
  1194. }
  1195. if (copy_to_user(argp, &bp, sizeof(bp))) {
  1196. /*
  1197. * The usercopy failed, but we can't do much about it, as
  1198. * dma_buf_fd() already called fd_install() and made the
  1199. * file descriptor accessible for the current process. It
  1200. * might already be closed and dmabuf no longer valid when
  1201. * we reach this point. Therefore "leak" the fd and rely on
  1202. * the process exit path to do any required cleanup.
  1203. */
  1204. return -EFAULT;
  1205. }
  1206. return 0;
  1207. }
  1208. static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
  1209. {
  1210. struct fastrpc_invoke_args args[1];
  1211. int tgid = fl->tgid;
  1212. u32 sc;
  1213. args[0].ptr = (u64)(uintptr_t) &tgid;
  1214. args[0].length = sizeof(tgid);
  1215. args[0].fd = -1;
  1216. sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
  1217. fl->pd = pd;
  1218. return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
  1219. sc, &args[0]);
  1220. }
  1221. static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
  1222. {
  1223. struct fastrpc_invoke_args *args = NULL;
  1224. struct fastrpc_invoke inv;
  1225. u32 nscalars;
  1226. int err;
  1227. if (copy_from_user(&inv, argp, sizeof(inv)))
  1228. return -EFAULT;
  1229. /* nscalars is truncated here to max supported value */
  1230. nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
  1231. if (nscalars) {
  1232. args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
  1233. if (!args)
  1234. return -ENOMEM;
  1235. if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
  1236. nscalars * sizeof(*args))) {
  1237. kfree(args);
  1238. return -EFAULT;
  1239. }
  1240. }
  1241. err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
  1242. kfree(args);
  1243. return err;
  1244. }
  1245. static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr_buf,
  1246. uint32_t dsp_attr_buf_len)
  1247. {
  1248. struct fastrpc_invoke_args args[2] = { 0 };
  1249. /* Capability filled in userspace */
  1250. dsp_attr_buf[0] = 0;
  1251. args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len;
  1252. args[0].length = sizeof(dsp_attr_buf_len);
  1253. args[0].fd = -1;
  1254. args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
  1255. args[1].length = dsp_attr_buf_len;
  1256. args[1].fd = -1;
  1257. fl->pd = USER_PD;
  1258. return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
  1259. FASTRPC_SCALARS(0, 1, 1), args);
  1260. }
  1261. static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
  1262. struct fastrpc_user *fl)
  1263. {
  1264. struct fastrpc_channel_ctx *cctx = fl->cctx;
  1265. uint32_t attribute_id = cap->attribute_id;
  1266. uint32_t *dsp_attributes;
  1267. unsigned long flags;
  1268. uint32_t domain = cap->domain;
  1269. int err;
  1270. spin_lock_irqsave(&cctx->lock, flags);
  1271. /* check if we already have queried dsp for attributes */
  1272. if (cctx->valid_attributes) {
  1273. spin_unlock_irqrestore(&cctx->lock, flags);
  1274. goto done;
  1275. }
  1276. spin_unlock_irqrestore(&cctx->lock, flags);
  1277. dsp_attributes = kzalloc(FASTRPC_MAX_DSP_ATTRIBUTES_LEN, GFP_KERNEL);
  1278. if (!dsp_attributes)
  1279. return -ENOMEM;
  1280. err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
  1281. if (err == DSP_UNSUPPORTED_API) {
  1282. dev_info(&cctx->rpdev->dev,
  1283. "Warning: DSP capabilities not supported on domain: %d\n", domain);
  1284. kfree(dsp_attributes);
  1285. return -EOPNOTSUPP;
  1286. } else if (err) {
  1287. dev_err(&cctx->rpdev->dev, "Error: dsp information is incorrect err: %d\n", err);
  1288. kfree(dsp_attributes);
  1289. return err;
  1290. }
  1291. spin_lock_irqsave(&cctx->lock, flags);
  1292. memcpy(cctx->dsp_attributes, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
  1293. cctx->valid_attributes = true;
  1294. spin_unlock_irqrestore(&cctx->lock, flags);
  1295. kfree(dsp_attributes);
  1296. done:
  1297. cap->capability = cctx->dsp_attributes[attribute_id];
  1298. return 0;
  1299. }
  1300. static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
  1301. {
  1302. struct fastrpc_ioctl_capability cap = {0};
  1303. int err = 0;
  1304. if (copy_from_user(&cap, argp, sizeof(cap)))
  1305. return -EFAULT;
  1306. cap.capability = 0;
  1307. if (cap.domain >= FASTRPC_DEV_MAX) {
  1308. dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n",
  1309. cap.domain, err);
  1310. return -ECHRNG;
  1311. }
  1312. /* Fastrpc Capablities does not support modem domain */
  1313. if (cap.domain == MDSP_DOMAIN_ID) {
  1314. dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err);
  1315. return -ECHRNG;
  1316. }
  1317. if (cap.attribute_id >= FASTRPC_MAX_DSP_ATTRIBUTES) {
  1318. dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n",
  1319. cap.attribute_id, err);
  1320. return -EOVERFLOW;
  1321. }
  1322. err = fastrpc_get_info_from_kernel(&cap, fl);
  1323. if (err)
  1324. return err;
  1325. if (copy_to_user(argp, &cap.capability, sizeof(cap.capability)))
  1326. return -EFAULT;
  1327. return 0;
  1328. }
  1329. static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
  1330. struct fastrpc_req_munmap *req)
  1331. {
  1332. struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
  1333. struct fastrpc_buf *buf = NULL, *iter, *b;
  1334. struct fastrpc_munmap_req_msg req_msg;
  1335. struct device *dev = fl->sctx->dev;
  1336. int err;
  1337. u32 sc;
  1338. spin_lock(&fl->lock);
  1339. list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
  1340. if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) {
  1341. buf = iter;
  1342. break;
  1343. }
  1344. }
  1345. spin_unlock(&fl->lock);
  1346. if (!buf) {
  1347. dev_err(dev, "mmap not in list\n");
  1348. return -EINVAL;
  1349. }
  1350. req_msg.pgid = fl->tgid;
  1351. req_msg.size = buf->size;
  1352. req_msg.vaddr = buf->raddr;
  1353. args[0].ptr = (u64) (uintptr_t) &req_msg;
  1354. args[0].length = sizeof(req_msg);
  1355. sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
  1356. err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
  1357. &args[0]);
  1358. if (!err) {
  1359. dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
  1360. spin_lock(&fl->lock);
  1361. list_del(&buf->node);
  1362. spin_unlock(&fl->lock);
  1363. fastrpc_buf_free(buf);
  1364. } else {
  1365. dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
  1366. }
  1367. return err;
  1368. }
  1369. static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
  1370. {
  1371. struct fastrpc_req_munmap req;
  1372. if (copy_from_user(&req, argp, sizeof(req)))
  1373. return -EFAULT;
  1374. return fastrpc_req_munmap_impl(fl, &req);
  1375. }
  1376. static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
  1377. {
  1378. struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
  1379. struct fastrpc_buf *buf = NULL;
  1380. struct fastrpc_mmap_req_msg req_msg;
  1381. struct fastrpc_mmap_rsp_msg rsp_msg;
  1382. struct fastrpc_req_munmap req_unmap;
  1383. struct fastrpc_phy_page pages;
  1384. struct fastrpc_req_mmap req;
  1385. struct device *dev = fl->sctx->dev;
  1386. int err;
  1387. u32 sc;
  1388. if (copy_from_user(&req, argp, sizeof(req)))
  1389. return -EFAULT;
  1390. if (req.flags != ADSP_MMAP_ADD_PAGES) {
  1391. dev_err(dev, "flag not supported 0x%x\n", req.flags);
  1392. return -EINVAL;
  1393. }
  1394. if (req.vaddrin) {
  1395. dev_err(dev, "adding user allocated pages is not supported\n");
  1396. return -EINVAL;
  1397. }
  1398. err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
  1399. if (err) {
  1400. dev_err(dev, "failed to allocate buffer\n");
  1401. return err;
  1402. }
  1403. req_msg.pgid = fl->tgid;
  1404. req_msg.flags = req.flags;
  1405. req_msg.vaddr = req.vaddrin;
  1406. req_msg.num = sizeof(pages);
  1407. args[0].ptr = (u64) (uintptr_t) &req_msg;
  1408. args[0].length = sizeof(req_msg);
  1409. pages.addr = buf->phys;
  1410. pages.size = buf->size;
  1411. args[1].ptr = (u64) (uintptr_t) &pages;
  1412. args[1].length = sizeof(pages);
  1413. args[2].ptr = (u64) (uintptr_t) &rsp_msg;
  1414. args[2].length = sizeof(rsp_msg);
  1415. sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
  1416. err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
  1417. &args[0]);
  1418. if (err) {
  1419. dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
  1420. goto err_invoke;
  1421. }
  1422. /* update the buffer to be able to deallocate the memory on the DSP */
  1423. buf->raddr = (uintptr_t) rsp_msg.vaddr;
  1424. /* let the client know the address to use */
  1425. req.vaddrout = rsp_msg.vaddr;
  1426. spin_lock(&fl->lock);
  1427. list_add_tail(&buf->node, &fl->mmaps);
  1428. spin_unlock(&fl->lock);
  1429. if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
  1430. /* unmap the memory and release the buffer */
  1431. req_unmap.vaddrout = buf->raddr;
  1432. req_unmap.size = buf->size;
  1433. fastrpc_req_munmap_impl(fl, &req_unmap);
  1434. return -EFAULT;
  1435. }
  1436. dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
  1437. buf->raddr, buf->size);
  1438. return 0;
  1439. err_invoke:
  1440. fastrpc_buf_free(buf);
  1441. return err;
  1442. }
  1443. static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
  1444. {
  1445. struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
  1446. struct fastrpc_map *map = NULL, *iter, *m;
  1447. struct fastrpc_mem_unmap_req_msg req_msg = { 0 };
  1448. int err = 0;
  1449. u32 sc;
  1450. struct device *dev = fl->sctx->dev;
  1451. spin_lock(&fl->lock);
  1452. list_for_each_entry_safe(iter, m, &fl->maps, node) {
  1453. if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) {
  1454. map = iter;
  1455. break;
  1456. }
  1457. }
  1458. spin_unlock(&fl->lock);
  1459. if (!map) {
  1460. dev_err(dev, "map not in list\n");
  1461. return -EINVAL;
  1462. }
  1463. req_msg.pgid = fl->tgid;
  1464. req_msg.len = map->len;
  1465. req_msg.vaddrin = map->raddr;
  1466. req_msg.fd = map->fd;
  1467. args[0].ptr = (u64) (uintptr_t) &req_msg;
  1468. args[0].length = sizeof(req_msg);
  1469. sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
  1470. err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
  1471. &args[0]);
  1472. if (err) {
  1473. dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr);
  1474. return err;
  1475. }
  1476. fastrpc_map_put(map);
  1477. return 0;
  1478. }
  1479. static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
  1480. {
  1481. struct fastrpc_mem_unmap req;
  1482. if (copy_from_user(&req, argp, sizeof(req)))
  1483. return -EFAULT;
  1484. return fastrpc_req_mem_unmap_impl(fl, &req);
  1485. }
  1486. static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
  1487. {
  1488. struct fastrpc_invoke_args args[4] = { [0 ... 3] = { 0 } };
  1489. struct fastrpc_mem_map_req_msg req_msg = { 0 };
  1490. struct fastrpc_mmap_rsp_msg rsp_msg = { 0 };
  1491. struct fastrpc_mem_unmap req_unmap = { 0 };
  1492. struct fastrpc_phy_page pages = { 0 };
  1493. struct fastrpc_mem_map req;
  1494. struct device *dev = fl->sctx->dev;
  1495. struct fastrpc_map *map = NULL;
  1496. int err;
  1497. u32 sc;
  1498. if (copy_from_user(&req, argp, sizeof(req)))
  1499. return -EFAULT;
  1500. /* create SMMU mapping */
  1501. err = fastrpc_map_create(fl, req.fd, req.length, 0, &map);
  1502. if (err) {
  1503. dev_err(dev, "failed to map buffer, fd = %d\n", req.fd);
  1504. return err;
  1505. }
  1506. req_msg.pgid = fl->tgid;
  1507. req_msg.fd = req.fd;
  1508. req_msg.offset = req.offset;
  1509. req_msg.vaddrin = req.vaddrin;
  1510. map->va = (void *) (uintptr_t) req.vaddrin;
  1511. req_msg.flags = req.flags;
  1512. req_msg.num = sizeof(pages);
  1513. req_msg.data_len = 0;
  1514. args[0].ptr = (u64) (uintptr_t) &req_msg;
  1515. args[0].length = sizeof(req_msg);
  1516. pages.addr = map->phys;
  1517. pages.size = map->size;
  1518. args[1].ptr = (u64) (uintptr_t) &pages;
  1519. args[1].length = sizeof(pages);
  1520. args[2].ptr = (u64) (uintptr_t) &pages;
  1521. args[2].length = 0;
  1522. args[3].ptr = (u64) (uintptr_t) &rsp_msg;
  1523. args[3].length = sizeof(rsp_msg);
  1524. sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP, 3, 1);
  1525. err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
  1526. if (err) {
  1527. dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
  1528. req.fd, req.vaddrin, map->size);
  1529. goto err_invoke;
  1530. }
  1531. /* update the buffer to be able to deallocate the memory on the DSP */
  1532. map->raddr = rsp_msg.vaddr;
  1533. /* let the client know the address to use */
  1534. req.vaddrout = rsp_msg.vaddr;
  1535. if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
  1536. /* unmap the memory and release the buffer */
  1537. req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
  1538. req_unmap.length = map->size;
  1539. fastrpc_req_mem_unmap_impl(fl, &req_unmap);
  1540. return -EFAULT;
  1541. }
  1542. return 0;
  1543. err_invoke:
  1544. fastrpc_map_put(map);
  1545. return err;
  1546. }
  1547. static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
  1548. unsigned long arg)
  1549. {
  1550. struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
  1551. char __user *argp = (char __user *)arg;
  1552. int err;
  1553. switch (cmd) {
  1554. case FASTRPC_IOCTL_INVOKE:
  1555. err = fastrpc_invoke(fl, argp);
  1556. break;
  1557. case FASTRPC_IOCTL_INIT_ATTACH:
  1558. err = fastrpc_init_attach(fl, AUDIO_PD);
  1559. break;
  1560. case FASTRPC_IOCTL_INIT_ATTACH_SNS:
  1561. err = fastrpc_init_attach(fl, SENSORS_PD);
  1562. break;
  1563. case FASTRPC_IOCTL_INIT_CREATE:
  1564. err = fastrpc_init_create_process(fl, argp);
  1565. break;
  1566. case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
  1567. err = fastrpc_dmabuf_alloc(fl, argp);
  1568. break;
  1569. case FASTRPC_IOCTL_MMAP:
  1570. err = fastrpc_req_mmap(fl, argp);
  1571. break;
  1572. case FASTRPC_IOCTL_MUNMAP:
  1573. err = fastrpc_req_munmap(fl, argp);
  1574. break;
  1575. case FASTRPC_IOCTL_MEM_MAP:
  1576. err = fastrpc_req_mem_map(fl, argp);
  1577. break;
  1578. case FASTRPC_IOCTL_MEM_UNMAP:
  1579. err = fastrpc_req_mem_unmap(fl, argp);
  1580. break;
  1581. case FASTRPC_IOCTL_GET_DSP_INFO:
  1582. err = fastrpc_get_dsp_info(fl, argp);
  1583. break;
  1584. default:
  1585. err = -ENOTTY;
  1586. break;
  1587. }
  1588. return err;
  1589. }
  1590. static const struct file_operations fastrpc_fops = {
  1591. .open = fastrpc_device_open,
  1592. .release = fastrpc_device_release,
  1593. .unlocked_ioctl = fastrpc_device_ioctl,
  1594. .compat_ioctl = fastrpc_device_ioctl,
  1595. };
  1596. static int fastrpc_cb_probe(struct platform_device *pdev)
  1597. {
  1598. struct fastrpc_channel_ctx *cctx;
  1599. struct fastrpc_session_ctx *sess;
  1600. struct device *dev = &pdev->dev;
  1601. int i, sessions = 0;
  1602. unsigned long flags;
  1603. int rc;
  1604. cctx = dev_get_drvdata(dev->parent);
  1605. if (!cctx)
  1606. return -EINVAL;
  1607. of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
  1608. spin_lock_irqsave(&cctx->lock, flags);
  1609. if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
  1610. dev_err(&pdev->dev, "too many sessions\n");
  1611. spin_unlock_irqrestore(&cctx->lock, flags);
  1612. return -ENOSPC;
  1613. }
  1614. sess = &cctx->session[cctx->sesscount++];
  1615. sess->used = false;
  1616. sess->valid = true;
  1617. sess->dev = dev;
  1618. dev_set_drvdata(dev, sess);
  1619. if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
  1620. dev_info(dev, "FastRPC Session ID not specified in DT\n");
  1621. if (sessions > 0) {
  1622. struct fastrpc_session_ctx *dup_sess;
  1623. for (i = 1; i < sessions; i++) {
  1624. if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
  1625. break;
  1626. dup_sess = &cctx->session[cctx->sesscount++];
  1627. memcpy(dup_sess, sess, sizeof(*dup_sess));
  1628. }
  1629. }
  1630. spin_unlock_irqrestore(&cctx->lock, flags);
  1631. rc = dma_set_mask(dev, DMA_BIT_MASK(32));
  1632. if (rc) {
  1633. dev_err(dev, "32-bit DMA enable failed\n");
  1634. return rc;
  1635. }
  1636. return 0;
  1637. }
  1638. static int fastrpc_cb_remove(struct platform_device *pdev)
  1639. {
  1640. struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
  1641. struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
  1642. unsigned long flags;
  1643. int i;
  1644. spin_lock_irqsave(&cctx->lock, flags);
  1645. for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
  1646. if (cctx->session[i].sid == sess->sid) {
  1647. cctx->session[i].valid = false;
  1648. cctx->sesscount--;
  1649. }
  1650. }
  1651. spin_unlock_irqrestore(&cctx->lock, flags);
  1652. return 0;
  1653. }
  1654. static const struct of_device_id fastrpc_match_table[] = {
  1655. { .compatible = "qcom,fastrpc-compute-cb", },
  1656. {}
  1657. };
  1658. static struct platform_driver fastrpc_cb_driver = {
  1659. .probe = fastrpc_cb_probe,
  1660. .remove = fastrpc_cb_remove,
  1661. .driver = {
  1662. .name = "qcom,fastrpc-cb",
  1663. .of_match_table = fastrpc_match_table,
  1664. .suppress_bind_attrs = true,
  1665. },
  1666. };
  1667. static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ctx *cctx,
  1668. bool is_secured, const char *domain)
  1669. {
  1670. struct fastrpc_device *fdev;
  1671. int err;
  1672. fdev = devm_kzalloc(dev, sizeof(*fdev), GFP_KERNEL);
  1673. if (!fdev)
  1674. return -ENOMEM;
  1675. fdev->secure = is_secured;
  1676. fdev->cctx = cctx;
  1677. fdev->miscdev.minor = MISC_DYNAMIC_MINOR;
  1678. fdev->miscdev.fops = &fastrpc_fops;
  1679. fdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "fastrpc-%s%s",
  1680. domain, is_secured ? "-secure" : "");
  1681. if (!fdev->miscdev.name)
  1682. return -ENOMEM;
  1683. err = misc_register(&fdev->miscdev);
  1684. if (!err) {
  1685. if (is_secured)
  1686. cctx->secure_fdevice = fdev;
  1687. else
  1688. cctx->fdevice = fdev;
  1689. }
  1690. return err;
  1691. }
  1692. static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
  1693. {
  1694. struct device *rdev = &rpdev->dev;
  1695. struct fastrpc_channel_ctx *data;
  1696. int i, err, domain_id = -1, vmcount;
  1697. const char *domain;
  1698. bool secure_dsp;
  1699. unsigned int vmids[FASTRPC_MAX_VMIDS];
  1700. err = of_property_read_string(rdev->of_node, "label", &domain);
  1701. if (err) {
  1702. dev_info(rdev, "FastRPC Domain not specified in DT\n");
  1703. return err;
  1704. }
  1705. for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
  1706. if (!strcmp(domains[i], domain)) {
  1707. domain_id = i;
  1708. break;
  1709. }
  1710. }
  1711. if (domain_id < 0) {
  1712. dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
  1713. return -EINVAL;
  1714. }
  1715. vmcount = of_property_read_variable_u32_array(rdev->of_node,
  1716. "qcom,vmids", &vmids[0], 0, FASTRPC_MAX_VMIDS);
  1717. if (vmcount < 0)
  1718. vmcount = 0;
  1719. else if (!qcom_scm_is_available())
  1720. return -EPROBE_DEFER;
  1721. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1722. if (!data)
  1723. return -ENOMEM;
  1724. if (vmcount) {
  1725. data->vmcount = vmcount;
  1726. data->perms = BIT(QCOM_SCM_VMID_HLOS);
  1727. for (i = 0; i < data->vmcount; i++) {
  1728. data->vmperms[i].vmid = vmids[i];
  1729. data->vmperms[i].perm = QCOM_SCM_PERM_RWX;
  1730. }
  1731. }
  1732. secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain"));
  1733. data->secure = secure_dsp;
  1734. switch (domain_id) {
  1735. case ADSP_DOMAIN_ID:
  1736. case MDSP_DOMAIN_ID:
  1737. case SDSP_DOMAIN_ID:
  1738. /* Unsigned PD offloading is only supported on CDSP*/
  1739. data->unsigned_support = false;
  1740. err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]);
  1741. if (err)
  1742. goto fdev_error;
  1743. break;
  1744. case CDSP_DOMAIN_ID:
  1745. data->unsigned_support = true;
  1746. /* Create both device nodes so that we can allow both Signed and Unsigned PD */
  1747. err = fastrpc_device_register(rdev, data, true, domains[domain_id]);
  1748. if (err)
  1749. goto fdev_error;
  1750. err = fastrpc_device_register(rdev, data, false, domains[domain_id]);
  1751. if (err)
  1752. goto fdev_error;
  1753. break;
  1754. default:
  1755. err = -EINVAL;
  1756. goto fdev_error;
  1757. }
  1758. kref_init(&data->refcount);
  1759. dev_set_drvdata(&rpdev->dev, data);
  1760. dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
  1761. INIT_LIST_HEAD(&data->users);
  1762. spin_lock_init(&data->lock);
  1763. idr_init(&data->ctx_idr);
  1764. data->domain_id = domain_id;
  1765. data->rpdev = rpdev;
  1766. err = of_platform_populate(rdev->of_node, NULL, NULL, rdev);
  1767. if (err)
  1768. goto populate_error;
  1769. return 0;
  1770. populate_error:
  1771. if (data->fdevice)
  1772. misc_deregister(&data->fdevice->miscdev);
  1773. if (data->secure_fdevice)
  1774. misc_deregister(&data->secure_fdevice->miscdev);
  1775. fdev_error:
  1776. kfree(data);
  1777. return err;
  1778. }
  1779. static void fastrpc_notify_users(struct fastrpc_user *user)
  1780. {
  1781. struct fastrpc_invoke_ctx *ctx;
  1782. spin_lock(&user->lock);
  1783. list_for_each_entry(ctx, &user->pending, node) {
  1784. ctx->retval = -EPIPE;
  1785. complete(&ctx->work);
  1786. }
  1787. spin_unlock(&user->lock);
  1788. }
  1789. static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
  1790. {
  1791. struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
  1792. struct fastrpc_user *user;
  1793. unsigned long flags;
  1794. /* No invocations past this point */
  1795. spin_lock_irqsave(&cctx->lock, flags);
  1796. cctx->rpdev = NULL;
  1797. list_for_each_entry(user, &cctx->users, user)
  1798. fastrpc_notify_users(user);
  1799. spin_unlock_irqrestore(&cctx->lock, flags);
  1800. if (cctx->fdevice)
  1801. misc_deregister(&cctx->fdevice->miscdev);
  1802. if (cctx->secure_fdevice)
  1803. misc_deregister(&cctx->secure_fdevice->miscdev);
  1804. of_platform_depopulate(&rpdev->dev);
  1805. fastrpc_channel_ctx_put(cctx);
  1806. }
  1807. static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
  1808. int len, void *priv, u32 addr)
  1809. {
  1810. struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
  1811. struct fastrpc_invoke_rsp *rsp = data;
  1812. struct fastrpc_invoke_ctx *ctx;
  1813. unsigned long flags;
  1814. unsigned long ctxid;
  1815. if (len < sizeof(*rsp))
  1816. return -EINVAL;
  1817. ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
  1818. spin_lock_irqsave(&cctx->lock, flags);
  1819. ctx = idr_find(&cctx->ctx_idr, ctxid);
  1820. spin_unlock_irqrestore(&cctx->lock, flags);
  1821. if (!ctx) {
  1822. dev_err(&rpdev->dev, "No context ID matches response\n");
  1823. return -ENOENT;
  1824. }
  1825. ctx->retval = rsp->retval;
  1826. complete(&ctx->work);
  1827. /*
  1828. * The DMA buffer associated with the context cannot be freed in
  1829. * interrupt context so schedule it through a worker thread to
  1830. * avoid a kernel BUG.
  1831. */
  1832. schedule_work(&ctx->put_work);
  1833. return 0;
  1834. }
  1835. static const struct of_device_id fastrpc_rpmsg_of_match[] = {
  1836. { .compatible = "qcom,fastrpc" },
  1837. { },
  1838. };
  1839. MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
  1840. static struct rpmsg_driver fastrpc_driver = {
  1841. .probe = fastrpc_rpmsg_probe,
  1842. .remove = fastrpc_rpmsg_remove,
  1843. .callback = fastrpc_rpmsg_callback,
  1844. .drv = {
  1845. .name = "qcom,fastrpc",
  1846. .of_match_table = fastrpc_rpmsg_of_match,
  1847. },
  1848. };
  1849. static int fastrpc_init(void)
  1850. {
  1851. int ret;
  1852. ret = platform_driver_register(&fastrpc_cb_driver);
  1853. if (ret < 0) {
  1854. pr_err("fastrpc: failed to register cb driver\n");
  1855. return ret;
  1856. }
  1857. ret = register_rpmsg_driver(&fastrpc_driver);
  1858. if (ret < 0) {
  1859. pr_err("fastrpc: failed to register rpmsg driver\n");
  1860. platform_driver_unregister(&fastrpc_cb_driver);
  1861. return ret;
  1862. }
  1863. return 0;
  1864. }
  1865. module_init(fastrpc_init);
  1866. static void fastrpc_exit(void)
  1867. {
  1868. platform_driver_unregister(&fastrpc_cb_driver);
  1869. unregister_rpmsg_driver(&fastrpc_driver);
  1870. }
  1871. module_exit(fastrpc_exit);
  1872. MODULE_LICENSE("GPL v2");
  1873. MODULE_IMPORT_NS(DMA_BUF);