q6usm.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459
  1. /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/mutex.h>
  14. #include <linux/wait.h>
  15. #include <linux/sched.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/slab.h>
  18. #include <dsp/msm_audio_ion.h>
  19. #include <dsp/apr_audio-v2.h>
  20. #include <ipc/apr_us.h>
  21. #include "q6usm.h"
  22. #define ADSP_MEMORY_MAP_SHMEM8_4K_POOL 3
  23. #define MEM_4K_OFFSET 4095
  24. #define MEM_4K_MASK 0xfffff000
  25. #define USM_SESSION_MAX 0x02 /* aDSP:USM limit */
  26. #define READDONE_IDX_STATUS 0
  27. #define WRITEDONE_IDX_STATUS 0
  28. /* Standard timeout in the asynchronous ops */
  29. #define Q6USM_TIMEOUT_JIFFIES (1*HZ) /* 1 sec */
  30. static DEFINE_MUTEX(session_lock);
  31. static struct us_client *session[USM_SESSION_MAX];
  32. static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv);
  33. static int32_t q6usm_callback(struct apr_client_data *data, void *priv);
  34. static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
  35. uint32_t pkt_size, bool cmd_flg);
  36. struct usm_mmap {
  37. atomic_t ref_cnt;
  38. atomic_t cmd_state;
  39. wait_queue_head_t cmd_wait;
  40. void *apr;
  41. int mem_handle;
  42. };
  43. static struct usm_mmap this_mmap;
  44. static void q6usm_add_mmaphdr(struct apr_hdr *hdr,
  45. uint32_t pkt_size, bool cmd_flg, u32 token)
  46. {
  47. hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  48. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  49. hdr->src_port = 0;
  50. hdr->dest_port = 0;
  51. if (cmd_flg) {
  52. hdr->token = token;
  53. atomic_set(&this_mmap.cmd_state, 1);
  54. }
  55. hdr->pkt_size = pkt_size;
  56. }
  57. static int q6usm_memory_map(phys_addr_t buf_add, int dir, uint32_t bufsz,
  58. uint32_t bufcnt, uint32_t session, uint32_t *mem_handle)
  59. {
  60. struct usm_cmd_memory_map_region mem_region_map;
  61. int rc = 0;
  62. if (this_mmap.apr == NULL) {
  63. pr_err("%s: APR handle NULL\n", __func__);
  64. return -EINVAL;
  65. }
  66. q6usm_add_mmaphdr(&mem_region_map.hdr,
  67. sizeof(struct usm_cmd_memory_map_region), true,
  68. ((session << 8) | dir));
  69. mem_region_map.hdr.opcode = USM_CMD_SHARED_MEM_MAP_REGION;
  70. mem_region_map.mempool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
  71. mem_region_map.num_regions = 1;
  72. mem_region_map.flags = 0;
  73. mem_region_map.shm_addr_lsw = lower_32_bits(buf_add);
  74. mem_region_map.shm_addr_msw =
  75. msm_audio_populate_upper_32_bits(buf_add);
  76. mem_region_map.mem_size_bytes = bufsz * bufcnt;
  77. rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_region_map);
  78. if (rc < 0) {
  79. pr_err("%s: mem_map op[0x%x]rc[%d]\n",
  80. __func__, mem_region_map.hdr.opcode, rc);
  81. rc = -EINVAL;
  82. goto fail_cmd;
  83. }
  84. rc = wait_event_timeout(this_mmap.cmd_wait,
  85. (atomic_read(&this_mmap.cmd_state) == 0),
  86. Q6USM_TIMEOUT_JIFFIES);
  87. if (!rc) {
  88. rc = -ETIME;
  89. pr_err("%s: timeout. waited for memory_map\n", __func__);
  90. } else {
  91. *mem_handle = this_mmap.mem_handle;
  92. rc = 0;
  93. }
  94. fail_cmd:
  95. return rc;
  96. }
  97. int q6usm_memory_unmap(phys_addr_t buf_add, int dir, uint32_t session,
  98. uint32_t mem_handle)
  99. {
  100. struct usm_cmd_memory_unmap_region mem_unmap;
  101. int rc = 0;
  102. if (this_mmap.apr == NULL) {
  103. pr_err("%s: APR handle NULL\n", __func__);
  104. return -EINVAL;
  105. }
  106. q6usm_add_mmaphdr(&mem_unmap.hdr,
  107. sizeof(struct usm_cmd_memory_unmap_region), true,
  108. ((session << 8) | dir));
  109. mem_unmap.hdr.opcode = USM_CMD_SHARED_MEM_UNMAP_REGION;
  110. mem_unmap.mem_map_handle = mem_handle;
  111. rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_unmap);
  112. if (rc < 0) {
  113. pr_err("%s: mem_unmap op[0x%x] rc[%d]\n",
  114. __func__, mem_unmap.hdr.opcode, rc);
  115. goto fail_cmd;
  116. }
  117. rc = wait_event_timeout(this_mmap.cmd_wait,
  118. (atomic_read(&this_mmap.cmd_state) == 0),
  119. Q6USM_TIMEOUT_JIFFIES);
  120. if (!rc) {
  121. rc = -ETIME;
  122. pr_err("%s: timeout. waited for memory_unmap\n", __func__);
  123. } else
  124. rc = 0;
  125. fail_cmd:
  126. return rc;
  127. }
  128. static int q6usm_session_alloc(struct us_client *usc)
  129. {
  130. int ind = 0;
  131. mutex_lock(&session_lock);
  132. for (ind = 0; ind < USM_SESSION_MAX; ++ind) {
  133. if (!session[ind]) {
  134. session[ind] = usc;
  135. mutex_unlock(&session_lock);
  136. ++ind; /* session id: 0 reserved */
  137. pr_debug("%s: session[%d] was allocated\n",
  138. __func__, ind);
  139. return ind;
  140. }
  141. }
  142. mutex_unlock(&session_lock);
  143. return -ENOMEM;
  144. }
  145. static void q6usm_session_free(struct us_client *usc)
  146. {
  147. /* Session index was incremented during allocation */
  148. uint16_t ind = (uint16_t)usc->session - 1;
  149. pr_debug("%s: to free session[%d]\n", __func__, ind);
  150. if (ind < USM_SESSION_MAX) {
  151. mutex_lock(&session_lock);
  152. session[ind] = NULL;
  153. mutex_unlock(&session_lock);
  154. }
  155. }
  156. static int q6usm_us_client_buf_free(unsigned int dir,
  157. struct us_client *usc)
  158. {
  159. struct us_port_data *port;
  160. int rc = 0;
  161. if ((usc == NULL) ||
  162. ((dir != IN) && (dir != OUT)))
  163. return -EINVAL;
  164. mutex_lock(&usc->cmd_lock);
  165. port = &usc->port[dir];
  166. if (port == NULL) {
  167. mutex_unlock(&usc->cmd_lock);
  168. return -EINVAL;
  169. }
  170. if (port->data == NULL) {
  171. mutex_unlock(&usc->cmd_lock);
  172. return 0;
  173. }
  174. rc = q6usm_memory_unmap(port->phys, dir, usc->session,
  175. *((uint32_t *)port->ext));
  176. pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
  177. (void *)port->data, (u64)port->phys, (void *)&port->phys);
  178. msm_audio_ion_free(port->dma_buf);
  179. port->data = NULL;
  180. port->phys = 0;
  181. port->buf_size = 0;
  182. port->buf_cnt = 0;
  183. port->dma_buf = NULL;
  184. mutex_unlock(&usc->cmd_lock);
  185. return rc;
  186. }
  187. int q6usm_us_param_buf_free(unsigned int dir,
  188. struct us_client *usc)
  189. {
  190. struct us_port_data *port;
  191. int rc = 0;
  192. if ((usc == NULL) ||
  193. ((dir != IN) && (dir != OUT)))
  194. return -EINVAL;
  195. mutex_lock(&usc->cmd_lock);
  196. port = &usc->port[dir];
  197. if (port == NULL) {
  198. mutex_unlock(&usc->cmd_lock);
  199. return -EINVAL;
  200. }
  201. if (port->param_buf == NULL) {
  202. mutex_unlock(&usc->cmd_lock);
  203. return 0;
  204. }
  205. rc = q6usm_memory_unmap(port->param_phys, dir, usc->session,
  206. *((uint32_t *)port->param_buf_mem_handle));
  207. pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
  208. (void *)port->param_buf, (u64)port->param_phys,
  209. (void *)&port->param_phys);
  210. msm_audio_ion_free(port->param_dma_buf);
  211. port->param_buf = NULL;
  212. port->param_phys = 0;
  213. port->param_buf_size = 0;
  214. port->param_dma_buf = NULL;
  215. mutex_unlock(&usc->cmd_lock);
  216. return rc;
  217. }
  218. void q6usm_us_client_free(struct us_client *usc)
  219. {
  220. int loopcnt = 0;
  221. struct us_port_data *port;
  222. uint32_t *p_mem_handle = NULL;
  223. if ((usc == NULL) ||
  224. !(usc->session))
  225. return;
  226. for (loopcnt = 0; loopcnt <= OUT; ++loopcnt) {
  227. port = &usc->port[loopcnt];
  228. if (port->data == NULL)
  229. continue;
  230. pr_debug("%s: loopcnt = %d\n", __func__, loopcnt);
  231. q6usm_us_client_buf_free(loopcnt, usc);
  232. q6usm_us_param_buf_free(loopcnt, usc);
  233. }
  234. q6usm_session_free(usc);
  235. apr_deregister(usc->apr);
  236. pr_debug("%s: APR De-Register\n", __func__);
  237. if (atomic_read(&this_mmap.ref_cnt) <= 0) {
  238. pr_err("%s: APR Common Port Already Closed\n", __func__);
  239. goto done;
  240. }
  241. atomic_dec(&this_mmap.ref_cnt);
  242. if (atomic_read(&this_mmap.ref_cnt) == 0) {
  243. apr_deregister(this_mmap.apr);
  244. pr_debug("%s: APR De-Register common port\n", __func__);
  245. }
  246. done:
  247. p_mem_handle = (uint32_t *)usc->port[IN].ext;
  248. kfree(p_mem_handle);
  249. kfree(usc);
  250. pr_debug("%s:\n", __func__);
  251. }
  252. struct us_client *q6usm_us_client_alloc(
  253. void (*cb)(uint32_t, uint32_t, uint32_t *, void *),
  254. void *priv)
  255. {
  256. struct us_client *usc;
  257. uint32_t *p_mem_handle = NULL;
  258. int n;
  259. int lcnt = 0;
  260. usc = kzalloc(sizeof(struct us_client), GFP_KERNEL);
  261. if (usc == NULL)
  262. return NULL;
  263. p_mem_handle = kzalloc(sizeof(uint32_t) * 4, GFP_KERNEL);
  264. if (p_mem_handle == NULL) {
  265. kfree(usc);
  266. return NULL;
  267. }
  268. n = q6usm_session_alloc(usc);
  269. if (n <= 0)
  270. goto fail_session;
  271. usc->session = n;
  272. usc->cb = cb;
  273. usc->priv = priv;
  274. usc->apr = apr_register("ADSP", "USM",
  275. (apr_fn)q6usm_callback,
  276. ((usc->session) << 8 | 0x0001),
  277. usc);
  278. if (usc->apr == NULL) {
  279. pr_err("%s: Registration with APR failed\n", __func__);
  280. goto fail;
  281. }
  282. pr_debug("%s: Registering the common port with APR\n", __func__);
  283. if (atomic_read(&this_mmap.ref_cnt) == 0) {
  284. this_mmap.apr = apr_register("ADSP", "USM",
  285. (apr_fn)q6usm_mmapcallback,
  286. 0x0FFFFFFFF, &this_mmap);
  287. if (this_mmap.apr == NULL) {
  288. pr_err("%s: USM port registration failed\n",
  289. __func__);
  290. goto fail;
  291. }
  292. }
  293. atomic_inc(&this_mmap.ref_cnt);
  294. init_waitqueue_head(&usc->cmd_wait);
  295. mutex_init(&usc->cmd_lock);
  296. for (lcnt = 0; lcnt <= OUT; ++lcnt) {
  297. mutex_init(&usc->port[lcnt].lock);
  298. spin_lock_init(&usc->port[lcnt].dsp_lock);
  299. usc->port[lcnt].ext = (void *)p_mem_handle++;
  300. usc->port[lcnt].param_buf_mem_handle = (void *)p_mem_handle++;
  301. pr_err("%s: usc->port[%d].ext=%pK;\n",
  302. __func__, lcnt, usc->port[lcnt].ext);
  303. }
  304. atomic_set(&usc->cmd_state, 0);
  305. return usc;
  306. fail:
  307. kfree(p_mem_handle);
  308. q6usm_us_client_free(usc);
  309. return NULL;
  310. fail_session:
  311. kfree(p_mem_handle);
  312. kfree(usc);
  313. return NULL;
  314. }
  315. int q6usm_us_client_buf_alloc(unsigned int dir,
  316. struct us_client *usc,
  317. unsigned int bufsz,
  318. unsigned int bufcnt)
  319. {
  320. int rc = 0;
  321. struct us_port_data *port = NULL;
  322. unsigned int size = bufsz*bufcnt;
  323. size_t len;
  324. if ((usc == NULL) ||
  325. ((dir != IN) && (dir != OUT)) || (size == 0) ||
  326. (usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
  327. pr_err("%s: wrong parameters: size=%d; bufcnt=%d\n",
  328. __func__, size, bufcnt);
  329. return -EINVAL;
  330. }
  331. mutex_lock(&usc->cmd_lock);
  332. port = &usc->port[dir];
  333. /* The size to allocate should be multiple of 4K bytes */
  334. size = PAGE_ALIGN(size);
  335. rc = msm_audio_ion_alloc(&port->dma_buf,
  336. size, &port->phys,
  337. &len, &port->data);
  338. if (rc) {
  339. pr_err("%s: US ION allocation failed, rc = %d\n",
  340. __func__, rc);
  341. mutex_unlock(&usc->cmd_lock);
  342. return -ENOMEM;
  343. }
  344. port->buf_cnt = bufcnt;
  345. port->buf_size = bufsz;
  346. pr_debug("%s: data[%pK]; phys[%llx]; [%pK]\n", __func__,
  347. (void *)port->data,
  348. (u64)port->phys,
  349. (void *)&port->phys);
  350. rc = q6usm_memory_map(port->phys, dir, size, 1, usc->session,
  351. (uint32_t *)port->ext);
  352. if (rc < 0) {
  353. pr_err("%s: CMD Memory_map failed\n", __func__);
  354. mutex_unlock(&usc->cmd_lock);
  355. q6usm_us_client_buf_free(dir, usc);
  356. q6usm_us_param_buf_free(dir, usc);
  357. } else {
  358. mutex_unlock(&usc->cmd_lock);
  359. rc = 0;
  360. }
  361. return rc;
  362. }
  363. int q6usm_us_param_buf_alloc(unsigned int dir,
  364. struct us_client *usc,
  365. unsigned int bufsz)
  366. {
  367. int rc = 0;
  368. struct us_port_data *port = NULL;
  369. unsigned int size = bufsz;
  370. size_t len;
  371. if ((usc == NULL) ||
  372. ((dir != IN) && (dir != OUT)) ||
  373. (usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
  374. pr_err("%s: wrong parameters: direction=%d, bufsz=%d\n",
  375. __func__, dir, bufsz);
  376. return -EINVAL;
  377. }
  378. mutex_lock(&usc->cmd_lock);
  379. port = &usc->port[dir];
  380. if (bufsz == 0) {
  381. pr_debug("%s: bufsz=0, get/set param commands are forbidden\n",
  382. __func__);
  383. port->param_buf = NULL;
  384. mutex_unlock(&usc->cmd_lock);
  385. return rc;
  386. }
  387. /* The size to allocate should be multiple of 4K bytes */
  388. size = PAGE_ALIGN(size);
  389. rc = msm_audio_ion_alloc(&port->param_dma_buf,
  390. size, &port->param_phys,
  391. &len, &port->param_buf);
  392. if (rc) {
  393. pr_err("%s: US ION allocation failed, rc = %d\n",
  394. __func__, rc);
  395. mutex_unlock(&usc->cmd_lock);
  396. return -ENOMEM;
  397. }
  398. port->param_buf_size = bufsz;
  399. pr_debug("%s: param_buf[%pK]; param_phys[%llx]; [%pK]\n", __func__,
  400. (void *)port->param_buf,
  401. (u64)port->param_phys,
  402. (void *)&port->param_phys);
  403. rc = q6usm_memory_map(port->param_phys, (IN | OUT), size, 1,
  404. usc->session, (uint32_t *)port->param_buf_mem_handle);
  405. if (rc < 0) {
  406. pr_err("%s: CMD Memory_map failed\n", __func__);
  407. mutex_unlock(&usc->cmd_lock);
  408. q6usm_us_client_buf_free(dir, usc);
  409. q6usm_us_param_buf_free(dir, usc);
  410. } else {
  411. mutex_unlock(&usc->cmd_lock);
  412. rc = 0;
  413. }
  414. return rc;
  415. }
  416. static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv)
  417. {
  418. uint32_t token;
  419. uint32_t *payload = data->payload;
  420. pr_debug("%s: ptr0[0x%x]; ptr1[0x%x]; opcode[0x%x]\n",
  421. __func__, payload[0], payload[1], data->opcode);
  422. pr_debug("%s: token[0x%x]; payload_size[%d]; src[%d]; dest[%d];\n",
  423. __func__, data->token, data->payload_size,
  424. data->src_port, data->dest_port);
  425. if (data->opcode == APR_BASIC_RSP_RESULT) {
  426. /* status field check */
  427. if (payload[1]) {
  428. pr_err("%s: wrong response[%d] on cmd [%d]\n",
  429. __func__, payload[1], payload[0]);
  430. } else {
  431. token = data->token;
  432. switch (payload[0]) {
  433. case USM_CMD_SHARED_MEM_UNMAP_REGION:
  434. if (atomic_read(&this_mmap.cmd_state)) {
  435. atomic_set(&this_mmap.cmd_state, 0);
  436. wake_up(&this_mmap.cmd_wait);
  437. }
  438. /* fallthrough */
  439. case USM_CMD_SHARED_MEM_MAP_REGION:
  440. /* For MEM_MAP, additional answer is waited, */
  441. /* therfore, no wake-up here */
  442. pr_debug("%s: cmd[0x%x]; result[0x%x]\n",
  443. __func__, payload[0], payload[1]);
  444. break;
  445. default:
  446. pr_debug("%s: wrong command[0x%x]\n",
  447. __func__, payload[0]);
  448. break;
  449. }
  450. }
  451. } else {
  452. if (data->opcode == USM_CMDRSP_SHARED_MEM_MAP_REGION) {
  453. this_mmap.mem_handle = payload[0];
  454. pr_debug("%s: memory map handle = 0x%x",
  455. __func__, payload[0]);
  456. if (atomic_read(&this_mmap.cmd_state)) {
  457. atomic_set(&this_mmap.cmd_state, 0);
  458. wake_up(&this_mmap.cmd_wait);
  459. }
  460. }
  461. }
  462. return 0;
  463. }
  464. static int32_t q6usm_callback(struct apr_client_data *data, void *priv)
  465. {
  466. struct us_client *usc = (struct us_client *)priv;
  467. unsigned long dsp_flags;
  468. uint32_t *payload = data->payload;
  469. uint32_t token = data->token;
  470. uint32_t opcode = Q6USM_EVENT_UNDEF;
  471. if (usc == NULL) {
  472. pr_err("%s: client info is NULL\n", __func__);
  473. return -EINVAL;
  474. }
  475. if (data->opcode == APR_BASIC_RSP_RESULT) {
  476. /* status field check */
  477. if (payload[1]) {
  478. pr_err("%s: wrong response[%d] on cmd [%d]\n",
  479. __func__, payload[1], payload[0]);
  480. if (usc->cb)
  481. usc->cb(data->opcode, token,
  482. (uint32_t *)data->payload, usc->priv);
  483. } else {
  484. switch (payload[0]) {
  485. case USM_SESSION_CMD_RUN:
  486. case USM_STREAM_CMD_CLOSE:
  487. if (token != usc->session) {
  488. pr_err("%s: wrong token[%d]",
  489. __func__, token);
  490. break;
  491. }
  492. case USM_STREAM_CMD_OPEN_READ:
  493. case USM_STREAM_CMD_OPEN_WRITE:
  494. case USM_STREAM_CMD_SET_ENC_PARAM:
  495. case USM_DATA_CMD_MEDIA_FORMAT_UPDATE:
  496. case USM_SESSION_CMD_SIGNAL_DETECT_MODE:
  497. case USM_STREAM_CMD_SET_PARAM:
  498. case USM_STREAM_CMD_GET_PARAM:
  499. if (atomic_read(&usc->cmd_state)) {
  500. atomic_set(&usc->cmd_state, 0);
  501. wake_up(&usc->cmd_wait);
  502. }
  503. if (usc->cb)
  504. usc->cb(data->opcode, token,
  505. (uint32_t *)data->payload,
  506. usc->priv);
  507. break;
  508. default:
  509. break;
  510. }
  511. }
  512. return 0;
  513. }
  514. switch (data->opcode) {
  515. case RESET_EVENTS: {
  516. pr_err("%s: Reset event is received: %d %d\n",
  517. __func__,
  518. data->reset_event,
  519. data->reset_proc);
  520. opcode = RESET_EVENTS;
  521. apr_reset(this_mmap.apr);
  522. this_mmap.apr = NULL;
  523. apr_reset(usc->apr);
  524. usc->apr = NULL;
  525. break;
  526. }
  527. case USM_DATA_EVENT_READ_DONE: {
  528. struct us_port_data *port = &usc->port[OUT];
  529. opcode = Q6USM_EVENT_READ_DONE;
  530. spin_lock_irqsave(&port->dsp_lock, dsp_flags);
  531. if (payload[READDONE_IDX_STATUS]) {
  532. pr_err("%s: wrong READDONE[%d]; token[%d]\n",
  533. __func__,
  534. payload[READDONE_IDX_STATUS],
  535. token);
  536. token = USM_WRONG_TOKEN;
  537. spin_unlock_irqrestore(&port->dsp_lock,
  538. dsp_flags);
  539. break;
  540. }
  541. if (port->expected_token != token) {
  542. u32 cpu_buf = port->cpu_buf;
  543. pr_err("%s: expected[%d] != token[%d]\n",
  544. __func__, port->expected_token, token);
  545. pr_debug("%s: dsp_buf=%d; cpu_buf=%d;\n",
  546. __func__, port->dsp_buf, cpu_buf);
  547. token = USM_WRONG_TOKEN;
  548. /* To prevent data handle continiue */
  549. port->expected_token = USM_WRONG_TOKEN;
  550. spin_unlock_irqrestore(&port->dsp_lock,
  551. dsp_flags);
  552. break;
  553. } /* port->expected_token != data->token */
  554. port->expected_token = token + 1;
  555. if (port->expected_token == port->buf_cnt)
  556. port->expected_token = 0;
  557. /* gap support */
  558. if (port->expected_token != port->cpu_buf) {
  559. port->dsp_buf = port->expected_token;
  560. token = port->dsp_buf; /* for callback */
  561. } else
  562. port->dsp_buf = token;
  563. spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
  564. break;
  565. } /* case USM_DATA_EVENT_READ_DONE */
  566. case USM_DATA_EVENT_WRITE_DONE: {
  567. struct us_port_data *port = &usc->port[IN];
  568. opcode = Q6USM_EVENT_WRITE_DONE;
  569. if (payload[WRITEDONE_IDX_STATUS]) {
  570. pr_err("%s: wrong WRITEDONE_IDX_STATUS[%d]\n",
  571. __func__,
  572. payload[WRITEDONE_IDX_STATUS]);
  573. break;
  574. }
  575. spin_lock_irqsave(&port->dsp_lock, dsp_flags);
  576. port->dsp_buf = token + 1;
  577. if (port->dsp_buf == port->buf_cnt)
  578. port->dsp_buf = 0;
  579. spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
  580. break;
  581. } /* case USM_DATA_EVENT_WRITE_DONE */
  582. case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT: {
  583. pr_debug("%s: US detect result: result=%d",
  584. __func__,
  585. payload[0]);
  586. opcode = Q6USM_EVENT_SIGNAL_DETECT_RESULT;
  587. break;
  588. } /* case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT */
  589. default:
  590. return 0;
  591. } /* switch */
  592. if (usc->cb)
  593. usc->cb(opcode, token,
  594. data->payload, usc->priv);
  595. return 0;
  596. }
  597. uint32_t q6usm_get_virtual_address(int dir,
  598. struct us_client *usc,
  599. struct vm_area_struct *vms)
  600. {
  601. uint32_t ret = 0xffffffff;
  602. if (vms && (usc != NULL) && ((dir == IN) || (dir == OUT))) {
  603. struct us_port_data *port = &usc->port[dir];
  604. int size = PAGE_ALIGN(port->buf_size * port->buf_cnt);
  605. struct audio_buffer ab;
  606. ab.phys = port->phys;
  607. ab.data = port->data;
  608. ab.used = 1;
  609. ab.size = size;
  610. ab.actual_size = size;
  611. ab.dma_buf = port->dma_buf;
  612. ret = msm_audio_ion_mmap(&ab, vms);
  613. }
  614. return ret;
  615. }
  616. static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
  617. uint32_t pkt_size, bool cmd_flg)
  618. {
  619. mutex_lock(&usc->cmd_lock);
  620. hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  621. APR_HDR_LEN(sizeof(struct apr_hdr)),
  622. APR_PKT_VER);
  623. hdr->src_svc = ((struct apr_svc *)usc->apr)->id;
  624. hdr->src_domain = APR_DOMAIN_APPS;
  625. hdr->dest_svc = APR_SVC_USM;
  626. hdr->dest_domain = APR_DOMAIN_ADSP;
  627. hdr->src_port = (usc->session << 8) | 0x0001;
  628. hdr->dest_port = (usc->session << 8) | 0x0001;
  629. if (cmd_flg) {
  630. hdr->token = usc->session;
  631. atomic_set(&usc->cmd_state, 1);
  632. }
  633. hdr->pkt_size = pkt_size;
  634. mutex_unlock(&usc->cmd_lock);
  635. }
  636. static uint32_t q6usm_ext2int_format(uint32_t ext_format)
  637. {
  638. uint32_t int_format = INVALID_FORMAT;
  639. switch (ext_format) {
  640. case FORMAT_USPS_EPOS:
  641. int_format = US_POINT_EPOS_FORMAT_V2;
  642. break;
  643. case FORMAT_USRAW:
  644. int_format = US_RAW_FORMAT_V2;
  645. break;
  646. case FORMAT_USPROX:
  647. int_format = US_PROX_FORMAT_V4;
  648. break;
  649. case FORMAT_USGES_SYNC:
  650. int_format = US_GES_SYNC_FORMAT;
  651. break;
  652. case FORMAT_USRAW_SYNC:
  653. int_format = US_RAW_SYNC_FORMAT;
  654. break;
  655. default:
  656. pr_err("%s: Invalid format[%d]\n", __func__, ext_format);
  657. break;
  658. }
  659. return int_format;
  660. }
  661. int q6usm_open_read(struct us_client *usc,
  662. uint32_t format)
  663. {
  664. uint32_t int_format = INVALID_FORMAT;
  665. int rc = 0x00;
  666. struct usm_stream_cmd_open_read open;
  667. if ((usc == NULL) || (usc->apr == NULL)) {
  668. pr_err("%s: client or its apr is NULL\n", __func__);
  669. return -EINVAL;
  670. }
  671. pr_debug("%s: session[%d]", __func__, usc->session);
  672. q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
  673. open.hdr.opcode = USM_STREAM_CMD_OPEN_READ;
  674. open.src_endpoint = 0; /* AFE */
  675. open.pre_proc_top = 0; /* No preprocessing required */
  676. int_format = q6usm_ext2int_format(format);
  677. if (int_format == INVALID_FORMAT)
  678. return -EINVAL;
  679. open.uMode = STREAM_PRIORITY_NORMAL;
  680. open.format = int_format;
  681. rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
  682. if (rc < 0) {
  683. pr_err("%s: open failed op[0x%x]rc[%d]\n",
  684. __func__, open.hdr.opcode, rc);
  685. goto fail_cmd;
  686. }
  687. rc = wait_event_timeout(usc->cmd_wait,
  688. (atomic_read(&usc->cmd_state) == 0),
  689. Q6USM_TIMEOUT_JIFFIES);
  690. if (!rc) {
  691. rc = -ETIME;
  692. pr_err("%s: timeout, waited for OPEN_READ rc[%d]\n",
  693. __func__, rc);
  694. goto fail_cmd;
  695. } else
  696. rc = 0;
  697. fail_cmd:
  698. return rc;
  699. }
  700. int q6usm_enc_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
  701. {
  702. uint32_t int_format = INVALID_FORMAT;
  703. struct usm_stream_cmd_encdec_cfg_blk enc_cfg_obj;
  704. struct usm_stream_cmd_encdec_cfg_blk *enc_cfg = &enc_cfg_obj;
  705. int rc = 0;
  706. uint32_t total_cfg_size =
  707. sizeof(struct usm_stream_cmd_encdec_cfg_blk);
  708. uint32_t round_params_size = 0;
  709. uint8_t is_allocated = 0;
  710. if ((usc == NULL) || (us_cfg == NULL)) {
  711. pr_err("%s: wrong input", __func__);
  712. return -EINVAL;
  713. }
  714. int_format = q6usm_ext2int_format(us_cfg->format_id);
  715. if (int_format == INVALID_FORMAT) {
  716. pr_err("%s: wrong input format[%d]",
  717. __func__, us_cfg->format_id);
  718. return -EINVAL;
  719. }
  720. /* Transparent configuration data is after enc_cfg */
  721. /* Integer number of u32s is required */
  722. round_params_size = ((us_cfg->params_size + 3)/4) * 4;
  723. if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
  724. /* Dynamic allocated encdec_cfg_blk is required */
  725. /* static part use */
  726. round_params_size -= USM_MAX_CFG_DATA_SIZE;
  727. total_cfg_size += round_params_size;
  728. enc_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
  729. if (enc_cfg == NULL) {
  730. pr_err("%s: enc_cfg[%d] allocation failed\n",
  731. __func__, total_cfg_size);
  732. return -ENOMEM;
  733. }
  734. is_allocated = 1;
  735. } else
  736. round_params_size = 0;
  737. q6usm_add_hdr(usc, &enc_cfg->hdr, total_cfg_size, true);
  738. enc_cfg->hdr.opcode = USM_STREAM_CMD_SET_ENC_PARAM;
  739. enc_cfg->param_id = USM_PARAM_ID_ENCDEC_ENC_CFG_BLK;
  740. enc_cfg->param_size = sizeof(struct usm_encode_cfg_blk)+
  741. round_params_size;
  742. enc_cfg->enc_blk.frames_per_buf = 1;
  743. enc_cfg->enc_blk.format_id = int_format;
  744. enc_cfg->enc_blk.cfg_size = sizeof(struct usm_cfg_common)+
  745. USM_MAX_CFG_DATA_SIZE +
  746. round_params_size;
  747. memcpy(&(enc_cfg->enc_blk.cfg_common), &(us_cfg->cfg_common),
  748. sizeof(struct usm_cfg_common));
  749. /* Transparent data copy */
  750. memcpy(enc_cfg->enc_blk.transp_data, us_cfg->params,
  751. us_cfg->params_size);
  752. pr_debug("%s: cfg_size[%d], params_size[%d]\n",
  753. __func__,
  754. enc_cfg->enc_blk.cfg_size,
  755. us_cfg->params_size);
  756. pr_debug("%s: params[%d,%d,%d,%d, %d,%d,%d,%d]\n",
  757. __func__,
  758. enc_cfg->enc_blk.transp_data[0],
  759. enc_cfg->enc_blk.transp_data[1],
  760. enc_cfg->enc_blk.transp_data[2],
  761. enc_cfg->enc_blk.transp_data[3],
  762. enc_cfg->enc_blk.transp_data[4],
  763. enc_cfg->enc_blk.transp_data[5],
  764. enc_cfg->enc_blk.transp_data[6],
  765. enc_cfg->enc_blk.transp_data[7]
  766. );
  767. pr_debug("%s: srate:%d, ch=%d, bps= %d;\n",
  768. __func__, enc_cfg->enc_blk.cfg_common.sample_rate,
  769. enc_cfg->enc_blk.cfg_common.ch_cfg,
  770. enc_cfg->enc_blk.cfg_common.bits_per_sample);
  771. pr_debug("dmap:[0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x]; dev_id=0x%x\n",
  772. enc_cfg->enc_blk.cfg_common.data_map[0],
  773. enc_cfg->enc_blk.cfg_common.data_map[1],
  774. enc_cfg->enc_blk.cfg_common.data_map[2],
  775. enc_cfg->enc_blk.cfg_common.data_map[3],
  776. enc_cfg->enc_blk.cfg_common.data_map[4],
  777. enc_cfg->enc_blk.cfg_common.data_map[5],
  778. enc_cfg->enc_blk.cfg_common.data_map[6],
  779. enc_cfg->enc_blk.cfg_common.data_map[7],
  780. enc_cfg->enc_blk.cfg_common.dev_id);
  781. rc = apr_send_pkt(usc->apr, (uint32_t *) enc_cfg);
  782. if (rc < 0) {
  783. pr_err("%s:Comamnd open failed\n", __func__);
  784. rc = -EINVAL;
  785. goto fail_cmd;
  786. }
  787. rc = wait_event_timeout(usc->cmd_wait,
  788. (atomic_read(&usc->cmd_state) == 0),
  789. Q6USM_TIMEOUT_JIFFIES);
  790. if (!rc) {
  791. rc = -ETIME;
  792. pr_err("%s: timeout opcode[0x%x]\n",
  793. __func__, enc_cfg->hdr.opcode);
  794. } else
  795. rc = 0;
  796. fail_cmd:
  797. if (is_allocated == 1)
  798. kfree(enc_cfg);
  799. return rc;
  800. }
  801. int q6usm_dec_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
  802. {
  803. uint32_t int_format = INVALID_FORMAT;
  804. struct usm_stream_media_format_update dec_cfg_obj;
  805. struct usm_stream_media_format_update *dec_cfg = &dec_cfg_obj;
  806. int rc = 0;
  807. uint32_t total_cfg_size = sizeof(struct usm_stream_media_format_update);
  808. uint32_t round_params_size = 0;
  809. uint8_t is_allocated = 0;
  810. if ((usc == NULL) || (us_cfg == NULL)) {
  811. pr_err("%s: wrong input", __func__);
  812. return -EINVAL;
  813. }
  814. int_format = q6usm_ext2int_format(us_cfg->format_id);
  815. if (int_format == INVALID_FORMAT) {
  816. pr_err("%s: wrong input format[%d]",
  817. __func__, us_cfg->format_id);
  818. return -EINVAL;
  819. }
  820. /* Transparent configuration data is after enc_cfg */
  821. /* Integer number of u32s is required */
  822. round_params_size = ((us_cfg->params_size + 3)/4) * 4;
  823. if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
  824. /* Dynamic allocated encdec_cfg_blk is required */
  825. /* static part use */
  826. round_params_size -= USM_MAX_CFG_DATA_SIZE;
  827. total_cfg_size += round_params_size;
  828. dec_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
  829. if (dec_cfg == NULL) {
  830. pr_err("%s:dec_cfg[%d] allocation failed\n",
  831. __func__, total_cfg_size);
  832. return -ENOMEM;
  833. }
  834. is_allocated = 1;
  835. } else { /* static transp_data is enough */
  836. round_params_size = 0;
  837. }
  838. q6usm_add_hdr(usc, &dec_cfg->hdr, total_cfg_size, true);
  839. dec_cfg->hdr.opcode = USM_DATA_CMD_MEDIA_FORMAT_UPDATE;
  840. dec_cfg->format_id = int_format;
  841. dec_cfg->cfg_size = sizeof(struct usm_cfg_common) +
  842. USM_MAX_CFG_DATA_SIZE +
  843. round_params_size;
  844. memcpy(&(dec_cfg->cfg_common), &(us_cfg->cfg_common),
  845. sizeof(struct usm_cfg_common));
  846. /* Transparent data copy */
  847. memcpy(dec_cfg->transp_data, us_cfg->params, us_cfg->params_size);
  848. pr_debug("%s: cfg_size[%d], params_size[%d]; parambytes[%d,%d,%d,%d]\n",
  849. __func__,
  850. dec_cfg->cfg_size,
  851. us_cfg->params_size,
  852. dec_cfg->transp_data[0],
  853. dec_cfg->transp_data[1],
  854. dec_cfg->transp_data[2],
  855. dec_cfg->transp_data[3]
  856. );
  857. rc = apr_send_pkt(usc->apr, (uint32_t *) dec_cfg);
  858. if (rc < 0) {
  859. pr_err("%s:Comamnd open failed\n", __func__);
  860. rc = -EINVAL;
  861. goto fail_cmd;
  862. }
  863. rc = wait_event_timeout(usc->cmd_wait,
  864. (atomic_read(&usc->cmd_state) == 0),
  865. Q6USM_TIMEOUT_JIFFIES);
  866. if (!rc) {
  867. rc = -ETIME;
  868. pr_err("%s: timeout opcode[0x%x]\n",
  869. __func__, dec_cfg->hdr.opcode);
  870. } else
  871. rc = 0;
  872. fail_cmd:
  873. if (is_allocated == 1)
  874. kfree(dec_cfg);
  875. return rc;
  876. }
  877. int q6usm_open_write(struct us_client *usc,
  878. uint32_t format)
  879. {
  880. int rc = 0;
  881. uint32_t int_format = INVALID_FORMAT;
  882. struct usm_stream_cmd_open_write open;
  883. if ((usc == NULL) || (usc->apr == NULL)) {
  884. pr_err("%s: APR handle NULL\n", __func__);
  885. return -EINVAL;
  886. }
  887. pr_debug("%s: session[%d]", __func__, usc->session);
  888. q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
  889. open.hdr.opcode = USM_STREAM_CMD_OPEN_WRITE;
  890. int_format = q6usm_ext2int_format(format);
  891. if (int_format == INVALID_FORMAT) {
  892. pr_err("%s: wrong format[%d]", __func__, format);
  893. return -EINVAL;
  894. }
  895. open.format = int_format;
  896. rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
  897. if (rc < 0) {
  898. pr_err("%s:open failed op[0x%x]rc[%d]\n",
  899. __func__, open.hdr.opcode, rc);
  900. goto fail_cmd;
  901. }
  902. rc = wait_event_timeout(usc->cmd_wait,
  903. (atomic_read(&usc->cmd_state) == 0),
  904. Q6USM_TIMEOUT_JIFFIES);
  905. if (!rc) {
  906. rc = -ETIME;
  907. pr_err("%s:timeout. waited for OPEN_WRITR rc[%d]\n",
  908. __func__, rc);
  909. goto fail_cmd;
  910. } else
  911. rc = 0;
  912. fail_cmd:
  913. return rc;
  914. }
  915. int q6usm_run(struct us_client *usc, uint32_t flags,
  916. uint32_t msw_ts, uint32_t lsw_ts)
  917. {
  918. struct usm_stream_cmd_run run;
  919. int rc = 0;
  920. if ((usc == NULL) || (usc->apr == NULL)) {
  921. pr_err("%s: APR handle NULL\n", __func__);
  922. return -EINVAL;
  923. }
  924. q6usm_add_hdr(usc, &run.hdr, sizeof(run), true);
  925. run.hdr.opcode = USM_SESSION_CMD_RUN;
  926. run.flags = flags;
  927. run.msw_ts = msw_ts;
  928. run.lsw_ts = lsw_ts;
  929. rc = apr_send_pkt(usc->apr, (uint32_t *) &run);
  930. if (rc < 0) {
  931. pr_err("%s: Commmand run failed[%d]\n", __func__, rc);
  932. goto fail_cmd;
  933. }
  934. rc = wait_event_timeout(usc->cmd_wait,
  935. (atomic_read(&usc->cmd_state) == 0),
  936. Q6USM_TIMEOUT_JIFFIES);
  937. if (!rc) {
  938. rc = -ETIME;
  939. pr_err("%s: timeout. waited for run success rc[%d]\n",
  940. __func__, rc);
  941. } else
  942. rc = 0;
  943. fail_cmd:
  944. return rc;
  945. }
  946. int q6usm_read(struct us_client *usc, uint32_t read_ind)
  947. {
  948. struct usm_stream_cmd_read read;
  949. struct us_port_data *port = NULL;
  950. int rc = 0;
  951. u32 read_counter = 0;
  952. u32 loop_ind = 0;
  953. u64 buf_addr = 0;
  954. if ((usc == NULL) || (usc->apr == NULL)) {
  955. pr_err("%s: APR handle NULL\n", __func__);
  956. return -EINVAL;
  957. }
  958. port = &usc->port[OUT];
  959. if (read_ind > port->buf_cnt) {
  960. pr_err("%s: wrong read_ind[%d]\n",
  961. __func__, read_ind);
  962. return -EINVAL;
  963. }
  964. if (read_ind == port->cpu_buf) {
  965. pr_err("%s: no free region\n", __func__);
  966. return 0;
  967. }
  968. if (read_ind > port->cpu_buf) { /* 1 range */
  969. read_counter = read_ind - port->cpu_buf;
  970. } else { /* 2 ranges */
  971. read_counter = (port->buf_cnt - port->cpu_buf) + read_ind;
  972. }
  973. q6usm_add_hdr(usc, &read.hdr, sizeof(read), false);
  974. read.hdr.opcode = USM_DATA_CMD_READ;
  975. read.buf_size = port->buf_size;
  976. buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
  977. read.buf_addr_lsw = lower_32_bits(buf_addr);
  978. read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
  979. read.mem_map_handle = *((uint32_t *)(port->ext));
  980. for (loop_ind = 0; loop_ind < read_counter; ++loop_ind) {
  981. u32 temp_cpu_buf = port->cpu_buf;
  982. buf_addr = (u64)(port->phys) +
  983. port->buf_size * (port->cpu_buf);
  984. read.buf_addr_lsw = lower_32_bits(buf_addr);
  985. read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
  986. read.seq_id = port->cpu_buf;
  987. read.hdr.token = port->cpu_buf;
  988. read.counter = 1;
  989. ++(port->cpu_buf);
  990. if (port->cpu_buf == port->buf_cnt)
  991. port->cpu_buf = 0;
  992. rc = apr_send_pkt(usc->apr, (uint32_t *) &read);
  993. if (rc < 0) {
  994. port->cpu_buf = temp_cpu_buf;
  995. pr_err("%s:read op[0x%x]rc[%d]\n",
  996. __func__, read.hdr.opcode, rc);
  997. break;
  998. }
  999. rc = 0;
  1000. } /* bufs loop */
  1001. return rc;
  1002. }
  1003. int q6usm_write(struct us_client *usc, uint32_t write_ind)
  1004. {
  1005. int rc = 0;
  1006. struct usm_stream_cmd_write cmd_write;
  1007. struct us_port_data *port = NULL;
  1008. u32 current_dsp_buf = 0;
  1009. u64 buf_addr = 0;
  1010. if ((usc == NULL) || (usc->apr == NULL)) {
  1011. pr_err("%s: APR handle NULL\n", __func__);
  1012. return -EINVAL;
  1013. }
  1014. port = &usc->port[IN];
  1015. current_dsp_buf = port->dsp_buf;
  1016. /* free region, caused by new dsp_buf report from DSP, */
  1017. /* can be only extended */
  1018. if (port->cpu_buf >= current_dsp_buf) {
  1019. /* 2 -part free region, including empty buffer */
  1020. if ((write_ind <= port->cpu_buf) &&
  1021. (write_ind > current_dsp_buf)) {
  1022. pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
  1023. __func__, write_ind,
  1024. current_dsp_buf, port->cpu_buf);
  1025. return -EINVAL;
  1026. }
  1027. } else {
  1028. /* 1 -part free region */
  1029. if ((write_ind <= port->cpu_buf) ||
  1030. (write_ind > current_dsp_buf)) {
  1031. pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
  1032. __func__, write_ind,
  1033. current_dsp_buf, port->cpu_buf);
  1034. return -EINVAL;
  1035. }
  1036. }
  1037. q6usm_add_hdr(usc, &cmd_write.hdr, sizeof(cmd_write), false);
  1038. cmd_write.hdr.opcode = USM_DATA_CMD_WRITE;
  1039. cmd_write.buf_size = port->buf_size;
  1040. buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
  1041. cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
  1042. cmd_write.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
  1043. cmd_write.mem_map_handle = *((uint32_t *)(port->ext));
  1044. cmd_write.res0 = 0;
  1045. cmd_write.res1 = 0;
  1046. cmd_write.res2 = 0;
  1047. while (port->cpu_buf != write_ind) {
  1048. u32 temp_cpu_buf = port->cpu_buf;
  1049. buf_addr = (u64)(port->phys) +
  1050. port->buf_size * (port->cpu_buf);
  1051. cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
  1052. cmd_write.buf_addr_msw =
  1053. msm_audio_populate_upper_32_bits(buf_addr);
  1054. cmd_write.seq_id = port->cpu_buf;
  1055. cmd_write.hdr.token = port->cpu_buf;
  1056. ++(port->cpu_buf);
  1057. if (port->cpu_buf == port->buf_cnt)
  1058. port->cpu_buf = 0;
  1059. rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_write);
  1060. if (rc < 0) {
  1061. port->cpu_buf = temp_cpu_buf;
  1062. pr_err("%s:write op[0x%x];rc[%d];cpu_buf[%d]\n",
  1063. __func__, cmd_write.hdr.opcode,
  1064. rc, port->cpu_buf);
  1065. break;
  1066. }
  1067. rc = 0;
  1068. }
  1069. return rc;
  1070. }
  1071. bool q6usm_is_write_buf_full(struct us_client *usc, uint32_t *free_region)
  1072. {
  1073. struct us_port_data *port = NULL;
  1074. u32 cpu_buf = 0;
  1075. if ((usc == NULL) || !free_region) {
  1076. pr_err("%s: input data wrong\n", __func__);
  1077. return false;
  1078. }
  1079. port = &usc->port[IN];
  1080. cpu_buf = port->cpu_buf + 1;
  1081. if (cpu_buf == port->buf_cnt)
  1082. cpu_buf = 0;
  1083. *free_region = port->dsp_buf;
  1084. return cpu_buf == *free_region;
  1085. }
  1086. int q6usm_cmd(struct us_client *usc, int cmd)
  1087. {
  1088. struct apr_hdr hdr;
  1089. int rc = 0;
  1090. atomic_t *state;
  1091. if ((usc == NULL) || (usc->apr == NULL)) {
  1092. pr_err("%s: APR handle NULL\n", __func__);
  1093. return -EINVAL;
  1094. }
  1095. q6usm_add_hdr(usc, &hdr, sizeof(hdr), true);
  1096. switch (cmd) {
  1097. case CMD_CLOSE:
  1098. hdr.opcode = USM_STREAM_CMD_CLOSE;
  1099. state = &usc->cmd_state;
  1100. break;
  1101. default:
  1102. pr_err("%s:Invalid format[%d]\n", __func__, cmd);
  1103. goto fail_cmd;
  1104. }
  1105. rc = apr_send_pkt(usc->apr, (uint32_t *) &hdr);
  1106. if (rc < 0) {
  1107. pr_err("%s: Command 0x%x failed\n", __func__, hdr.opcode);
  1108. goto fail_cmd;
  1109. }
  1110. rc = wait_event_timeout(usc->cmd_wait, (atomic_read(state) == 0),
  1111. Q6USM_TIMEOUT_JIFFIES);
  1112. if (!rc) {
  1113. rc = -ETIME;
  1114. pr_err("%s:timeout. waited for response opcode[0x%x]\n",
  1115. __func__, hdr.opcode);
  1116. } else
  1117. rc = 0;
  1118. fail_cmd:
  1119. return rc;
  1120. }
  1121. int q6usm_set_us_detection(struct us_client *usc,
  1122. struct usm_session_cmd_detect_info *detect_info,
  1123. uint16_t detect_info_size)
  1124. {
  1125. int rc = 0;
  1126. if ((usc == NULL) ||
  1127. (detect_info_size == 0) ||
  1128. (detect_info == NULL)) {
  1129. pr_err("%s: wrong input: usc=0x%pK, inf_size=%d; info=0x%pK",
  1130. __func__,
  1131. usc,
  1132. detect_info_size,
  1133. detect_info);
  1134. return -EINVAL;
  1135. }
  1136. q6usm_add_hdr(usc, &detect_info->hdr, detect_info_size, true);
  1137. detect_info->hdr.opcode = USM_SESSION_CMD_SIGNAL_DETECT_MODE;
  1138. rc = apr_send_pkt(usc->apr, (uint32_t *)detect_info);
  1139. if (rc < 0) {
  1140. pr_err("%s:Comamnd signal detect failed\n", __func__);
  1141. return -EINVAL;
  1142. }
  1143. rc = wait_event_timeout(usc->cmd_wait,
  1144. (atomic_read(&usc->cmd_state) == 0),
  1145. Q6USM_TIMEOUT_JIFFIES);
  1146. if (!rc) {
  1147. rc = -ETIME;
  1148. pr_err("%s: CMD_SIGNAL_DETECT_MODE: timeout=%d\n",
  1149. __func__, Q6USM_TIMEOUT_JIFFIES);
  1150. } else
  1151. rc = 0;
  1152. return rc;
  1153. }
  1154. int q6usm_set_us_stream_param(int dir, struct us_client *usc,
  1155. uint32_t module_id, uint32_t param_id, uint32_t buf_size)
  1156. {
  1157. int rc = 0;
  1158. struct usm_stream_cmd_set_param cmd_set_param;
  1159. struct us_port_data *port = NULL;
  1160. if ((usc == NULL) || (usc->apr == NULL)) {
  1161. pr_err("%s: APR handle NULL\n", __func__);
  1162. return -EINVAL;
  1163. }
  1164. port = &usc->port[dir];
  1165. q6usm_add_hdr(usc, &cmd_set_param.hdr, sizeof(cmd_set_param), true);
  1166. cmd_set_param.hdr.opcode = USM_STREAM_CMD_SET_PARAM;
  1167. cmd_set_param.buf_size = buf_size;
  1168. cmd_set_param.buf_addr_msw =
  1169. msm_audio_populate_upper_32_bits(port->param_phys);
  1170. cmd_set_param.buf_addr_lsw = lower_32_bits(port->param_phys);
  1171. cmd_set_param.mem_map_handle =
  1172. *((uint32_t *)(port->param_buf_mem_handle));
  1173. cmd_set_param.module_id = module_id;
  1174. cmd_set_param.param_id = param_id;
  1175. cmd_set_param.hdr.token = 0;
  1176. rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_set_param);
  1177. if (rc < 0) {
  1178. pr_err("%s:write op[0x%x];rc[%d]\n",
  1179. __func__, cmd_set_param.hdr.opcode, rc);
  1180. }
  1181. rc = wait_event_timeout(usc->cmd_wait,
  1182. (atomic_read(&usc->cmd_state) == 0),
  1183. Q6USM_TIMEOUT_JIFFIES);
  1184. if (!rc) {
  1185. rc = -ETIME;
  1186. pr_err("%s: CMD_SET_PARAM: timeout=%d\n",
  1187. __func__, Q6USM_TIMEOUT_JIFFIES);
  1188. } else
  1189. rc = 0;
  1190. return rc;
  1191. }
  1192. int q6usm_get_us_stream_param(int dir, struct us_client *usc,
  1193. uint32_t module_id, uint32_t param_id, uint32_t buf_size)
  1194. {
  1195. int rc = 0;
  1196. struct usm_stream_cmd_get_param cmd_get_param;
  1197. struct us_port_data *port = NULL;
  1198. if ((usc == NULL) || (usc->apr == NULL)) {
  1199. pr_err("%s: APR handle NULL\n", __func__);
  1200. return -EINVAL;
  1201. }
  1202. port = &usc->port[dir];
  1203. q6usm_add_hdr(usc, &cmd_get_param.hdr, sizeof(cmd_get_param), true);
  1204. cmd_get_param.hdr.opcode = USM_STREAM_CMD_GET_PARAM;
  1205. cmd_get_param.buf_size = buf_size;
  1206. cmd_get_param.buf_addr_msw =
  1207. msm_audio_populate_upper_32_bits(port->param_phys);
  1208. cmd_get_param.buf_addr_lsw = lower_32_bits(port->param_phys);
  1209. cmd_get_param.mem_map_handle =
  1210. *((uint32_t *)(port->param_buf_mem_handle));
  1211. cmd_get_param.module_id = module_id;
  1212. cmd_get_param.param_id = param_id;
  1213. cmd_get_param.hdr.token = 0;
  1214. rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_get_param);
  1215. if (rc < 0) {
  1216. pr_err("%s:write op[0x%x];rc[%d]\n",
  1217. __func__, cmd_get_param.hdr.opcode, rc);
  1218. }
  1219. rc = wait_event_timeout(usc->cmd_wait,
  1220. (atomic_read(&usc->cmd_state) == 0),
  1221. Q6USM_TIMEOUT_JIFFIES);
  1222. if (!rc) {
  1223. rc = -ETIME;
  1224. pr_err("%s: CMD_GET_PARAM: timeout=%d\n",
  1225. __func__, Q6USM_TIMEOUT_JIFFIES);
  1226. } else
  1227. rc = 0;
  1228. return rc;
  1229. }
  1230. int __init q6usm_init(void)
  1231. {
  1232. pr_debug("%s\n", __func__);
  1233. init_waitqueue_head(&this_mmap.cmd_wait);
  1234. memset(session, 0, sizeof(session));
  1235. return 0;
  1236. }