q6usm.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/mutex.h>
  6. #include <linux/wait.h>
  7. #include <linux/sched.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/slab.h>
  10. #include <dsp/msm_audio_ion.h>
  11. #include <dsp/apr_audio-v2.h>
  12. #include <ipc/apr_us.h>
  13. #include "q6usm.h"
  14. #define ADSP_MEMORY_MAP_SHMEM8_4K_POOL 3
  15. #define MEM_4K_OFFSET 4095
  16. #define MEM_4K_MASK 0xfffff000
  17. #define USM_SESSION_MAX 0x02 /* aDSP:USM limit */
  18. #define READDONE_IDX_STATUS 0
  19. #define WRITEDONE_IDX_STATUS 0
  20. /* Standard timeout in the asynchronous ops */
  21. #define Q6USM_TIMEOUT_JIFFIES (1*HZ) /* 1 sec */
  22. static DEFINE_MUTEX(session_lock);
  23. static struct us_client *session[USM_SESSION_MAX];
  24. static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv);
  25. static int32_t q6usm_callback(struct apr_client_data *data, void *priv);
  26. static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
  27. uint32_t pkt_size, bool cmd_flg);
  28. struct usm_mmap {
  29. atomic_t ref_cnt;
  30. atomic_t cmd_state;
  31. wait_queue_head_t cmd_wait;
  32. void *apr;
  33. int mem_handle;
  34. };
  35. static struct usm_mmap this_mmap;
  36. static void q6usm_add_mmaphdr(struct apr_hdr *hdr,
  37. uint32_t pkt_size, bool cmd_flg, u32 token)
  38. {
  39. hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  40. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  41. hdr->src_port = 0;
  42. hdr->dest_port = 0;
  43. if (cmd_flg) {
  44. hdr->token = token;
  45. atomic_set(&this_mmap.cmd_state, 1);
  46. }
  47. hdr->pkt_size = pkt_size;
  48. }
  49. static int q6usm_memory_map(phys_addr_t buf_add, int dir, uint32_t bufsz,
  50. uint32_t bufcnt, uint32_t session, uint32_t *mem_handle)
  51. {
  52. struct usm_cmd_memory_map_region mem_region_map;
  53. int rc = 0;
  54. if (this_mmap.apr == NULL) {
  55. pr_err("%s: APR handle NULL\n", __func__);
  56. return -EINVAL;
  57. }
  58. q6usm_add_mmaphdr(&mem_region_map.hdr,
  59. sizeof(struct usm_cmd_memory_map_region), true,
  60. ((session << 8) | dir));
  61. mem_region_map.hdr.opcode = USM_CMD_SHARED_MEM_MAP_REGION;
  62. mem_region_map.mempool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
  63. mem_region_map.num_regions = 1;
  64. mem_region_map.flags = 0;
  65. mem_region_map.shm_addr_lsw = lower_32_bits(buf_add);
  66. mem_region_map.shm_addr_msw =
  67. msm_audio_populate_upper_32_bits(buf_add);
  68. mem_region_map.mem_size_bytes = bufsz * bufcnt;
  69. rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_region_map);
  70. if (rc < 0) {
  71. pr_err("%s: mem_map op[0x%x]rc[%d]\n",
  72. __func__, mem_region_map.hdr.opcode, rc);
  73. rc = -EINVAL;
  74. goto fail_cmd;
  75. }
  76. rc = wait_event_timeout(this_mmap.cmd_wait,
  77. (atomic_read(&this_mmap.cmd_state) == 0),
  78. Q6USM_TIMEOUT_JIFFIES);
  79. if (!rc) {
  80. rc = -ETIME;
  81. pr_err("%s: timeout. waited for memory_map\n", __func__);
  82. } else {
  83. *mem_handle = this_mmap.mem_handle;
  84. rc = 0;
  85. }
  86. fail_cmd:
  87. return rc;
  88. }
  89. int q6usm_memory_unmap(phys_addr_t buf_add, int dir, uint32_t session,
  90. uint32_t mem_handle)
  91. {
  92. struct usm_cmd_memory_unmap_region mem_unmap;
  93. int rc = 0;
  94. if (this_mmap.apr == NULL) {
  95. pr_err("%s: APR handle NULL\n", __func__);
  96. return -EINVAL;
  97. }
  98. q6usm_add_mmaphdr(&mem_unmap.hdr,
  99. sizeof(struct usm_cmd_memory_unmap_region), true,
  100. ((session << 8) | dir));
  101. mem_unmap.hdr.opcode = USM_CMD_SHARED_MEM_UNMAP_REGION;
  102. mem_unmap.mem_map_handle = mem_handle;
  103. rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_unmap);
  104. if (rc < 0) {
  105. pr_err("%s: mem_unmap op[0x%x] rc[%d]\n",
  106. __func__, mem_unmap.hdr.opcode, rc);
  107. goto fail_cmd;
  108. }
  109. rc = wait_event_timeout(this_mmap.cmd_wait,
  110. (atomic_read(&this_mmap.cmd_state) == 0),
  111. Q6USM_TIMEOUT_JIFFIES);
  112. if (!rc) {
  113. rc = -ETIME;
  114. pr_err("%s: timeout. waited for memory_unmap\n", __func__);
  115. } else
  116. rc = 0;
  117. fail_cmd:
  118. return rc;
  119. }
  120. static int q6usm_session_alloc(struct us_client *usc)
  121. {
  122. int ind = 0;
  123. mutex_lock(&session_lock);
  124. for (ind = 0; ind < USM_SESSION_MAX; ++ind) {
  125. if (!session[ind]) {
  126. session[ind] = usc;
  127. mutex_unlock(&session_lock);
  128. ++ind; /* session id: 0 reserved */
  129. pr_debug("%s: session[%d] was allocated\n",
  130. __func__, ind);
  131. return ind;
  132. }
  133. }
  134. mutex_unlock(&session_lock);
  135. return -ENOMEM;
  136. }
  137. static void q6usm_session_free(struct us_client *usc)
  138. {
  139. /* Session index was incremented during allocation */
  140. uint16_t ind = (uint16_t)usc->session - 1;
  141. pr_debug("%s: to free session[%d]\n", __func__, ind);
  142. if (ind < USM_SESSION_MAX) {
  143. mutex_lock(&session_lock);
  144. session[ind] = NULL;
  145. mutex_unlock(&session_lock);
  146. }
  147. }
  148. static int q6usm_us_client_buf_free(unsigned int dir,
  149. struct us_client *usc)
  150. {
  151. struct us_port_data *port;
  152. int rc = 0;
  153. if ((usc == NULL) ||
  154. ((dir != IN) && (dir != OUT)))
  155. return -EINVAL;
  156. mutex_lock(&usc->cmd_lock);
  157. port = &usc->port[dir];
  158. if (port == NULL) {
  159. mutex_unlock(&usc->cmd_lock);
  160. return -EINVAL;
  161. }
  162. if (port->data == NULL) {
  163. mutex_unlock(&usc->cmd_lock);
  164. return 0;
  165. }
  166. rc = q6usm_memory_unmap(port->phys, dir, usc->session,
  167. *((uint32_t *)port->ext));
  168. pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
  169. (void *)port->data, (u64)port->phys, (void *)&port->phys);
  170. msm_audio_ion_free(port->dma_buf);
  171. port->data = NULL;
  172. port->phys = 0;
  173. port->buf_size = 0;
  174. port->buf_cnt = 0;
  175. port->dma_buf = NULL;
  176. mutex_unlock(&usc->cmd_lock);
  177. return rc;
  178. }
  179. int q6usm_us_param_buf_free(unsigned int dir,
  180. struct us_client *usc)
  181. {
  182. struct us_port_data *port;
  183. int rc = 0;
  184. if ((usc == NULL) ||
  185. ((dir != IN) && (dir != OUT)))
  186. return -EINVAL;
  187. mutex_lock(&usc->cmd_lock);
  188. port = &usc->port[dir];
  189. if (port == NULL) {
  190. mutex_unlock(&usc->cmd_lock);
  191. return -EINVAL;
  192. }
  193. if (port->param_buf == NULL) {
  194. mutex_unlock(&usc->cmd_lock);
  195. return 0;
  196. }
  197. rc = q6usm_memory_unmap(port->param_phys, dir, usc->session,
  198. *((uint32_t *)port->param_buf_mem_handle));
  199. pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
  200. (void *)port->param_buf, (u64)port->param_phys,
  201. (void *)&port->param_phys);
  202. msm_audio_ion_free(port->param_dma_buf);
  203. port->param_buf = NULL;
  204. port->param_phys = 0;
  205. port->param_buf_size = 0;
  206. port->param_dma_buf = NULL;
  207. mutex_unlock(&usc->cmd_lock);
  208. return rc;
  209. }
  210. void q6usm_us_client_free(struct us_client *usc)
  211. {
  212. int loopcnt = 0;
  213. struct us_port_data *port;
  214. uint32_t *p_mem_handle = NULL;
  215. if ((usc == NULL) ||
  216. !(usc->session))
  217. return;
  218. for (loopcnt = 0; loopcnt <= OUT; ++loopcnt) {
  219. port = &usc->port[loopcnt];
  220. if (port->data == NULL)
  221. continue;
  222. pr_debug("%s: loopcnt = %d\n", __func__, loopcnt);
  223. q6usm_us_client_buf_free(loopcnt, usc);
  224. q6usm_us_param_buf_free(loopcnt, usc);
  225. }
  226. q6usm_session_free(usc);
  227. apr_deregister(usc->apr);
  228. pr_debug("%s: APR De-Register\n", __func__);
  229. if (atomic_read(&this_mmap.ref_cnt) <= 0) {
  230. pr_err("%s: APR Common Port Already Closed\n", __func__);
  231. goto done;
  232. }
  233. atomic_dec(&this_mmap.ref_cnt);
  234. if (atomic_read(&this_mmap.ref_cnt) == 0) {
  235. apr_deregister(this_mmap.apr);
  236. pr_debug("%s: APR De-Register common port\n", __func__);
  237. }
  238. done:
  239. p_mem_handle = (uint32_t *)usc->port[IN].ext;
  240. kfree(p_mem_handle);
  241. kfree(usc);
  242. pr_debug("%s:\n", __func__);
  243. }
  244. struct us_client *q6usm_us_client_alloc(
  245. void (*cb)(uint32_t, uint32_t, uint32_t *, void *),
  246. void *priv)
  247. {
  248. struct us_client *usc;
  249. uint32_t *p_mem_handle = NULL;
  250. int n;
  251. int lcnt = 0;
  252. usc = kzalloc(sizeof(struct us_client), GFP_KERNEL);
  253. if (usc == NULL)
  254. return NULL;
  255. p_mem_handle = kzalloc(sizeof(uint32_t) * 4, GFP_KERNEL);
  256. if (p_mem_handle == NULL) {
  257. kfree(usc);
  258. return NULL;
  259. }
  260. n = q6usm_session_alloc(usc);
  261. if (n <= 0)
  262. goto fail_session;
  263. usc->session = n;
  264. usc->cb = cb;
  265. usc->priv = priv;
  266. usc->apr = apr_register("ADSP", "USM",
  267. (apr_fn)q6usm_callback,
  268. ((usc->session) << 8 | 0x0001),
  269. usc);
  270. if (usc->apr == NULL) {
  271. pr_err("%s: Registration with APR failed\n", __func__);
  272. goto fail;
  273. }
  274. pr_debug("%s: Registering the common port with APR\n", __func__);
  275. if (atomic_read(&this_mmap.ref_cnt) == 0) {
  276. this_mmap.apr = apr_register("ADSP", "USM",
  277. (apr_fn)q6usm_mmapcallback,
  278. 0x0FFFFFFFF, &this_mmap);
  279. if (this_mmap.apr == NULL) {
  280. pr_err("%s: USM port registration failed\n",
  281. __func__);
  282. goto fail;
  283. }
  284. }
  285. atomic_inc(&this_mmap.ref_cnt);
  286. init_waitqueue_head(&usc->cmd_wait);
  287. mutex_init(&usc->cmd_lock);
  288. for (lcnt = 0; lcnt <= OUT; ++lcnt) {
  289. mutex_init(&usc->port[lcnt].lock);
  290. spin_lock_init(&usc->port[lcnt].dsp_lock);
  291. usc->port[lcnt].ext = (void *)p_mem_handle++;
  292. usc->port[lcnt].param_buf_mem_handle = (void *)p_mem_handle++;
  293. pr_err("%s: usc->port[%d].ext=%pK;\n",
  294. __func__, lcnt, usc->port[lcnt].ext);
  295. }
  296. atomic_set(&usc->cmd_state, 0);
  297. return usc;
  298. fail:
  299. kfree(p_mem_handle);
  300. q6usm_us_client_free(usc);
  301. return NULL;
  302. fail_session:
  303. kfree(p_mem_handle);
  304. kfree(usc);
  305. return NULL;
  306. }
  307. int q6usm_us_client_buf_alloc(unsigned int dir,
  308. struct us_client *usc,
  309. unsigned int bufsz,
  310. unsigned int bufcnt)
  311. {
  312. int rc = 0;
  313. struct us_port_data *port = NULL;
  314. unsigned int size = bufsz*bufcnt;
  315. size_t len;
  316. if ((usc == NULL) ||
  317. ((dir != IN) && (dir != OUT)) || (size == 0) ||
  318. (usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
  319. pr_err("%s: wrong parameters: size=%d; bufcnt=%d\n",
  320. __func__, size, bufcnt);
  321. return -EINVAL;
  322. }
  323. mutex_lock(&usc->cmd_lock);
  324. port = &usc->port[dir];
  325. /* The size to allocate should be multiple of 4K bytes */
  326. size = PAGE_ALIGN(size);
  327. rc = msm_audio_ion_alloc(&port->dma_buf,
  328. size, &port->phys,
  329. &len, &port->data);
  330. if (rc) {
  331. pr_err("%s: US ION allocation failed, rc = %d\n",
  332. __func__, rc);
  333. mutex_unlock(&usc->cmd_lock);
  334. return -ENOMEM;
  335. }
  336. port->buf_cnt = bufcnt;
  337. port->buf_size = bufsz;
  338. pr_debug("%s: data[%pK]; phys[%llx]; [%pK]\n", __func__,
  339. (void *)port->data,
  340. (u64)port->phys,
  341. (void *)&port->phys);
  342. rc = q6usm_memory_map(port->phys, dir, size, 1, usc->session,
  343. (uint32_t *)port->ext);
  344. if (rc < 0) {
  345. pr_err("%s: CMD Memory_map failed\n", __func__);
  346. mutex_unlock(&usc->cmd_lock);
  347. q6usm_us_client_buf_free(dir, usc);
  348. q6usm_us_param_buf_free(dir, usc);
  349. } else {
  350. mutex_unlock(&usc->cmd_lock);
  351. rc = 0;
  352. }
  353. return rc;
  354. }
  355. int q6usm_us_param_buf_alloc(unsigned int dir,
  356. struct us_client *usc,
  357. unsigned int bufsz)
  358. {
  359. int rc = 0;
  360. struct us_port_data *port = NULL;
  361. unsigned int size = bufsz;
  362. size_t len;
  363. if ((usc == NULL) ||
  364. ((dir != IN) && (dir != OUT)) ||
  365. (usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
  366. pr_err("%s: wrong parameters: direction=%d, bufsz=%d\n",
  367. __func__, dir, bufsz);
  368. return -EINVAL;
  369. }
  370. mutex_lock(&usc->cmd_lock);
  371. port = &usc->port[dir];
  372. if (bufsz == 0) {
  373. pr_debug("%s: bufsz=0, get/set param commands are forbidden\n",
  374. __func__);
  375. port->param_buf = NULL;
  376. mutex_unlock(&usc->cmd_lock);
  377. return rc;
  378. }
  379. /* The size to allocate should be multiple of 4K bytes */
  380. size = PAGE_ALIGN(size);
  381. rc = msm_audio_ion_alloc(&port->param_dma_buf,
  382. size, &port->param_phys,
  383. &len, &port->param_buf);
  384. if (rc) {
  385. pr_err("%s: US ION allocation failed, rc = %d\n",
  386. __func__, rc);
  387. mutex_unlock(&usc->cmd_lock);
  388. return -ENOMEM;
  389. }
  390. port->param_buf_size = bufsz;
  391. pr_debug("%s: param_buf[%pK]; param_phys[%llx]; [%pK]\n", __func__,
  392. (void *)port->param_buf,
  393. (u64)port->param_phys,
  394. (void *)&port->param_phys);
  395. rc = q6usm_memory_map(port->param_phys, (IN | OUT), size, 1,
  396. usc->session, (uint32_t *)port->param_buf_mem_handle);
  397. if (rc < 0) {
  398. pr_err("%s: CMD Memory_map failed\n", __func__);
  399. mutex_unlock(&usc->cmd_lock);
  400. q6usm_us_client_buf_free(dir, usc);
  401. q6usm_us_param_buf_free(dir, usc);
  402. } else {
  403. mutex_unlock(&usc->cmd_lock);
  404. rc = 0;
  405. }
  406. return rc;
  407. }
  408. static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv)
  409. {
  410. uint32_t token;
  411. uint32_t *payload = data->payload;
  412. pr_debug("%s: ptr0[0x%x]; ptr1[0x%x]; opcode[0x%x]\n",
  413. __func__, payload[0], payload[1], data->opcode);
  414. pr_debug("%s: token[0x%x]; payload_size[%d]; src[%d]; dest[%d];\n",
  415. __func__, data->token, data->payload_size,
  416. data->src_port, data->dest_port);
  417. if (data->opcode == APR_BASIC_RSP_RESULT) {
  418. /* status field check */
  419. if (payload[1]) {
  420. pr_err("%s: wrong response[%d] on cmd [%d]\n",
  421. __func__, payload[1], payload[0]);
  422. } else {
  423. token = data->token;
  424. switch (payload[0]) {
  425. case USM_CMD_SHARED_MEM_UNMAP_REGION:
  426. if (atomic_read(&this_mmap.cmd_state)) {
  427. atomic_set(&this_mmap.cmd_state, 0);
  428. wake_up(&this_mmap.cmd_wait);
  429. }
  430. /* fallthrough */
  431. case USM_CMD_SHARED_MEM_MAP_REGION:
  432. /* For MEM_MAP, additional answer is waited, */
  433. /* therfore, no wake-up here */
  434. pr_debug("%s: cmd[0x%x]; result[0x%x]\n",
  435. __func__, payload[0], payload[1]);
  436. break;
  437. default:
  438. pr_debug("%s: wrong command[0x%x]\n",
  439. __func__, payload[0]);
  440. break;
  441. }
  442. }
  443. } else {
  444. if (data->opcode == USM_CMDRSP_SHARED_MEM_MAP_REGION) {
  445. this_mmap.mem_handle = payload[0];
  446. pr_debug("%s: memory map handle = 0x%x",
  447. __func__, payload[0]);
  448. if (atomic_read(&this_mmap.cmd_state)) {
  449. atomic_set(&this_mmap.cmd_state, 0);
  450. wake_up(&this_mmap.cmd_wait);
  451. }
  452. }
  453. }
  454. return 0;
  455. }
  456. static int32_t q6usm_callback(struct apr_client_data *data, void *priv)
  457. {
  458. struct us_client *usc = (struct us_client *)priv;
  459. unsigned long dsp_flags;
  460. uint32_t *payload = data->payload;
  461. uint32_t token = data->token;
  462. uint32_t opcode = Q6USM_EVENT_UNDEF;
  463. if (usc == NULL) {
  464. pr_err("%s: client info is NULL\n", __func__);
  465. return -EINVAL;
  466. }
  467. if (data->opcode == APR_BASIC_RSP_RESULT) {
  468. /* status field check */
  469. if (payload[1]) {
  470. pr_err("%s: wrong response[%d] on cmd [%d]\n",
  471. __func__, payload[1], payload[0]);
  472. if (usc->cb)
  473. usc->cb(data->opcode, token,
  474. (uint32_t *)data->payload, usc->priv);
  475. } else {
  476. switch (payload[0]) {
  477. case USM_SESSION_CMD_RUN:
  478. case USM_STREAM_CMD_CLOSE:
  479. if (token != usc->session) {
  480. pr_err("%s: wrong token[%d]",
  481. __func__, token);
  482. break;
  483. }
  484. case USM_STREAM_CMD_OPEN_READ:
  485. case USM_STREAM_CMD_OPEN_WRITE:
  486. case USM_STREAM_CMD_SET_ENC_PARAM:
  487. case USM_DATA_CMD_MEDIA_FORMAT_UPDATE:
  488. case USM_SESSION_CMD_SIGNAL_DETECT_MODE:
  489. case USM_STREAM_CMD_SET_PARAM:
  490. case USM_STREAM_CMD_GET_PARAM:
  491. if (atomic_read(&usc->cmd_state)) {
  492. atomic_set(&usc->cmd_state, 0);
  493. wake_up(&usc->cmd_wait);
  494. }
  495. if (usc->cb)
  496. usc->cb(data->opcode, token,
  497. (uint32_t *)data->payload,
  498. usc->priv);
  499. break;
  500. default:
  501. break;
  502. }
  503. }
  504. return 0;
  505. }
  506. switch (data->opcode) {
  507. case RESET_EVENTS: {
  508. pr_err("%s: Reset event is received: %d %d\n",
  509. __func__,
  510. data->reset_event,
  511. data->reset_proc);
  512. opcode = RESET_EVENTS;
  513. apr_reset(this_mmap.apr);
  514. this_mmap.apr = NULL;
  515. apr_reset(usc->apr);
  516. usc->apr = NULL;
  517. break;
  518. }
  519. case USM_DATA_EVENT_READ_DONE: {
  520. struct us_port_data *port = &usc->port[OUT];
  521. opcode = Q6USM_EVENT_READ_DONE;
  522. spin_lock_irqsave(&port->dsp_lock, dsp_flags);
  523. if (payload[READDONE_IDX_STATUS]) {
  524. pr_err("%s: wrong READDONE[%d]; token[%d]\n",
  525. __func__,
  526. payload[READDONE_IDX_STATUS],
  527. token);
  528. token = USM_WRONG_TOKEN;
  529. spin_unlock_irqrestore(&port->dsp_lock,
  530. dsp_flags);
  531. break;
  532. }
  533. if (port->expected_token != token) {
  534. u32 cpu_buf = port->cpu_buf;
  535. pr_err("%s: expected[%d] != token[%d]\n",
  536. __func__, port->expected_token, token);
  537. pr_debug("%s: dsp_buf=%d; cpu_buf=%d;\n",
  538. __func__, port->dsp_buf, cpu_buf);
  539. token = USM_WRONG_TOKEN;
  540. /* To prevent data handle continiue */
  541. port->expected_token = USM_WRONG_TOKEN;
  542. spin_unlock_irqrestore(&port->dsp_lock,
  543. dsp_flags);
  544. break;
  545. } /* port->expected_token != data->token */
  546. port->expected_token = token + 1;
  547. if (port->expected_token == port->buf_cnt)
  548. port->expected_token = 0;
  549. /* gap support */
  550. if (port->expected_token != port->cpu_buf) {
  551. port->dsp_buf = port->expected_token;
  552. token = port->dsp_buf; /* for callback */
  553. } else
  554. port->dsp_buf = token;
  555. spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
  556. break;
  557. } /* case USM_DATA_EVENT_READ_DONE */
  558. case USM_DATA_EVENT_WRITE_DONE: {
  559. struct us_port_data *port = &usc->port[IN];
  560. opcode = Q6USM_EVENT_WRITE_DONE;
  561. if (payload[WRITEDONE_IDX_STATUS]) {
  562. pr_err("%s: wrong WRITEDONE_IDX_STATUS[%d]\n",
  563. __func__,
  564. payload[WRITEDONE_IDX_STATUS]);
  565. break;
  566. }
  567. spin_lock_irqsave(&port->dsp_lock, dsp_flags);
  568. port->dsp_buf = token + 1;
  569. if (port->dsp_buf == port->buf_cnt)
  570. port->dsp_buf = 0;
  571. spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
  572. break;
  573. } /* case USM_DATA_EVENT_WRITE_DONE */
  574. case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT: {
  575. pr_debug("%s: US detect result: result=%d",
  576. __func__,
  577. payload[0]);
  578. opcode = Q6USM_EVENT_SIGNAL_DETECT_RESULT;
  579. break;
  580. } /* case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT */
  581. default:
  582. return 0;
  583. } /* switch */
  584. if (usc->cb)
  585. usc->cb(opcode, token,
  586. data->payload, usc->priv);
  587. return 0;
  588. }
  589. uint32_t q6usm_get_virtual_address(int dir,
  590. struct us_client *usc,
  591. struct vm_area_struct *vms)
  592. {
  593. uint32_t ret = 0xffffffff;
  594. if (vms && (usc != NULL) && ((dir == IN) || (dir == OUT))) {
  595. struct us_port_data *port = &usc->port[dir];
  596. int size = PAGE_ALIGN(port->buf_size * port->buf_cnt);
  597. struct audio_buffer ab;
  598. ab.phys = port->phys;
  599. ab.data = port->data;
  600. ab.used = 1;
  601. ab.size = size;
  602. ab.actual_size = size;
  603. ab.dma_buf = port->dma_buf;
  604. ret = msm_audio_ion_mmap(&ab, vms);
  605. }
  606. return ret;
  607. }
  608. static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
  609. uint32_t pkt_size, bool cmd_flg)
  610. {
  611. mutex_lock(&usc->cmd_lock);
  612. hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  613. APR_HDR_LEN(sizeof(struct apr_hdr)),
  614. APR_PKT_VER);
  615. hdr->src_svc = ((struct apr_svc *)usc->apr)->id;
  616. hdr->src_domain = APR_DOMAIN_APPS;
  617. hdr->dest_svc = APR_SVC_USM;
  618. hdr->dest_domain = APR_DOMAIN_ADSP;
  619. hdr->src_port = (usc->session << 8) | 0x0001;
  620. hdr->dest_port = (usc->session << 8) | 0x0001;
  621. if (cmd_flg) {
  622. hdr->token = usc->session;
  623. atomic_set(&usc->cmd_state, 1);
  624. }
  625. hdr->pkt_size = pkt_size;
  626. mutex_unlock(&usc->cmd_lock);
  627. }
  628. static uint32_t q6usm_ext2int_format(uint32_t ext_format)
  629. {
  630. uint32_t int_format = INVALID_FORMAT;
  631. switch (ext_format) {
  632. case FORMAT_USPS_EPOS:
  633. int_format = US_POINT_EPOS_FORMAT_V2;
  634. break;
  635. case FORMAT_USRAW:
  636. int_format = US_RAW_FORMAT_V2;
  637. break;
  638. case FORMAT_USPROX:
  639. int_format = US_PROX_FORMAT_V4;
  640. break;
  641. case FORMAT_USGES_SYNC:
  642. int_format = US_GES_SYNC_FORMAT;
  643. break;
  644. case FORMAT_USRAW_SYNC:
  645. int_format = US_RAW_SYNC_FORMAT;
  646. break;
  647. default:
  648. pr_err("%s: Invalid format[%d]\n", __func__, ext_format);
  649. break;
  650. }
  651. return int_format;
  652. }
  653. int q6usm_open_read(struct us_client *usc,
  654. uint32_t format)
  655. {
  656. uint32_t int_format = INVALID_FORMAT;
  657. int rc = 0x00;
  658. struct usm_stream_cmd_open_read open;
  659. if ((usc == NULL) || (usc->apr == NULL)) {
  660. pr_err("%s: client or its apr is NULL\n", __func__);
  661. return -EINVAL;
  662. }
  663. pr_debug("%s: session[%d]", __func__, usc->session);
  664. q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
  665. open.hdr.opcode = USM_STREAM_CMD_OPEN_READ;
  666. open.src_endpoint = 0; /* AFE */
  667. open.pre_proc_top = 0; /* No preprocessing required */
  668. int_format = q6usm_ext2int_format(format);
  669. if (int_format == INVALID_FORMAT)
  670. return -EINVAL;
  671. open.uMode = STREAM_PRIORITY_NORMAL;
  672. open.format = int_format;
  673. rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
  674. if (rc < 0) {
  675. pr_err("%s: open failed op[0x%x]rc[%d]\n",
  676. __func__, open.hdr.opcode, rc);
  677. goto fail_cmd;
  678. }
  679. rc = wait_event_timeout(usc->cmd_wait,
  680. (atomic_read(&usc->cmd_state) == 0),
  681. Q6USM_TIMEOUT_JIFFIES);
  682. if (!rc) {
  683. rc = -ETIME;
  684. pr_err("%s: timeout, waited for OPEN_READ rc[%d]\n",
  685. __func__, rc);
  686. goto fail_cmd;
  687. } else
  688. rc = 0;
  689. fail_cmd:
  690. return rc;
  691. }
  692. int q6usm_enc_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
  693. {
  694. uint32_t int_format = INVALID_FORMAT;
  695. struct usm_stream_cmd_encdec_cfg_blk enc_cfg_obj;
  696. struct usm_stream_cmd_encdec_cfg_blk *enc_cfg = &enc_cfg_obj;
  697. int rc = 0;
  698. uint32_t total_cfg_size =
  699. sizeof(struct usm_stream_cmd_encdec_cfg_blk);
  700. uint32_t round_params_size = 0;
  701. uint8_t is_allocated = 0;
  702. if ((usc == NULL) || (us_cfg == NULL)) {
  703. pr_err("%s: wrong input", __func__);
  704. return -EINVAL;
  705. }
  706. int_format = q6usm_ext2int_format(us_cfg->format_id);
  707. if (int_format == INVALID_FORMAT) {
  708. pr_err("%s: wrong input format[%d]",
  709. __func__, us_cfg->format_id);
  710. return -EINVAL;
  711. }
  712. /* Transparent configuration data is after enc_cfg */
  713. /* Integer number of u32s is required */
  714. round_params_size = ((us_cfg->params_size + 3)/4) * 4;
  715. if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
  716. /* Dynamic allocated encdec_cfg_blk is required */
  717. /* static part use */
  718. round_params_size -= USM_MAX_CFG_DATA_SIZE;
  719. total_cfg_size += round_params_size;
  720. enc_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
  721. if (enc_cfg == NULL) {
  722. pr_err("%s: enc_cfg[%d] allocation failed\n",
  723. __func__, total_cfg_size);
  724. return -ENOMEM;
  725. }
  726. is_allocated = 1;
  727. } else
  728. round_params_size = 0;
  729. q6usm_add_hdr(usc, &enc_cfg->hdr, total_cfg_size, true);
  730. enc_cfg->hdr.opcode = USM_STREAM_CMD_SET_ENC_PARAM;
  731. enc_cfg->param_id = USM_PARAM_ID_ENCDEC_ENC_CFG_BLK;
  732. enc_cfg->param_size = sizeof(struct usm_encode_cfg_blk)+
  733. round_params_size;
  734. enc_cfg->enc_blk.frames_per_buf = 1;
  735. enc_cfg->enc_blk.format_id = int_format;
  736. enc_cfg->enc_blk.cfg_size = sizeof(struct usm_cfg_common)+
  737. USM_MAX_CFG_DATA_SIZE +
  738. round_params_size;
  739. memcpy(&(enc_cfg->enc_blk.cfg_common), &(us_cfg->cfg_common),
  740. sizeof(struct usm_cfg_common));
  741. /* Transparent data copy */
  742. memcpy(enc_cfg->enc_blk.transp_data, us_cfg->params,
  743. us_cfg->params_size);
  744. pr_debug("%s: cfg_size[%d], params_size[%d]\n",
  745. __func__,
  746. enc_cfg->enc_blk.cfg_size,
  747. us_cfg->params_size);
  748. pr_debug("%s: params[%d,%d,%d,%d, %d,%d,%d,%d]\n",
  749. __func__,
  750. enc_cfg->enc_blk.transp_data[0],
  751. enc_cfg->enc_blk.transp_data[1],
  752. enc_cfg->enc_blk.transp_data[2],
  753. enc_cfg->enc_blk.transp_data[3],
  754. enc_cfg->enc_blk.transp_data[4],
  755. enc_cfg->enc_blk.transp_data[5],
  756. enc_cfg->enc_blk.transp_data[6],
  757. enc_cfg->enc_blk.transp_data[7]
  758. );
  759. pr_debug("%s: srate:%d, ch=%d, bps= %d;\n",
  760. __func__, enc_cfg->enc_blk.cfg_common.sample_rate,
  761. enc_cfg->enc_blk.cfg_common.ch_cfg,
  762. enc_cfg->enc_blk.cfg_common.bits_per_sample);
  763. pr_debug("dmap:[0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x]; dev_id=0x%x\n",
  764. enc_cfg->enc_blk.cfg_common.data_map[0],
  765. enc_cfg->enc_blk.cfg_common.data_map[1],
  766. enc_cfg->enc_blk.cfg_common.data_map[2],
  767. enc_cfg->enc_blk.cfg_common.data_map[3],
  768. enc_cfg->enc_blk.cfg_common.data_map[4],
  769. enc_cfg->enc_blk.cfg_common.data_map[5],
  770. enc_cfg->enc_blk.cfg_common.data_map[6],
  771. enc_cfg->enc_blk.cfg_common.data_map[7],
  772. enc_cfg->enc_blk.cfg_common.dev_id);
  773. rc = apr_send_pkt(usc->apr, (uint32_t *) enc_cfg);
  774. if (rc < 0) {
  775. pr_err("%s:Comamnd open failed\n", __func__);
  776. rc = -EINVAL;
  777. goto fail_cmd;
  778. }
  779. rc = wait_event_timeout(usc->cmd_wait,
  780. (atomic_read(&usc->cmd_state) == 0),
  781. Q6USM_TIMEOUT_JIFFIES);
  782. if (!rc) {
  783. rc = -ETIME;
  784. pr_err("%s: timeout opcode[0x%x]\n",
  785. __func__, enc_cfg->hdr.opcode);
  786. } else
  787. rc = 0;
  788. fail_cmd:
  789. if (is_allocated == 1)
  790. kfree(enc_cfg);
  791. return rc;
  792. }
  793. int q6usm_dec_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
  794. {
  795. uint32_t int_format = INVALID_FORMAT;
  796. struct usm_stream_media_format_update dec_cfg_obj;
  797. struct usm_stream_media_format_update *dec_cfg = &dec_cfg_obj;
  798. int rc = 0;
  799. uint32_t total_cfg_size = sizeof(struct usm_stream_media_format_update);
  800. uint32_t round_params_size = 0;
  801. uint8_t is_allocated = 0;
  802. if ((usc == NULL) || (us_cfg == NULL)) {
  803. pr_err("%s: wrong input", __func__);
  804. return -EINVAL;
  805. }
  806. int_format = q6usm_ext2int_format(us_cfg->format_id);
  807. if (int_format == INVALID_FORMAT) {
  808. pr_err("%s: wrong input format[%d]",
  809. __func__, us_cfg->format_id);
  810. return -EINVAL;
  811. }
  812. /* Transparent configuration data is after enc_cfg */
  813. /* Integer number of u32s is required */
  814. round_params_size = ((us_cfg->params_size + 3)/4) * 4;
  815. if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
  816. /* Dynamic allocated encdec_cfg_blk is required */
  817. /* static part use */
  818. round_params_size -= USM_MAX_CFG_DATA_SIZE;
  819. total_cfg_size += round_params_size;
  820. dec_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
  821. if (dec_cfg == NULL) {
  822. pr_err("%s:dec_cfg[%d] allocation failed\n",
  823. __func__, total_cfg_size);
  824. return -ENOMEM;
  825. }
  826. is_allocated = 1;
  827. } else { /* static transp_data is enough */
  828. round_params_size = 0;
  829. }
  830. q6usm_add_hdr(usc, &dec_cfg->hdr, total_cfg_size, true);
  831. dec_cfg->hdr.opcode = USM_DATA_CMD_MEDIA_FORMAT_UPDATE;
  832. dec_cfg->format_id = int_format;
  833. dec_cfg->cfg_size = sizeof(struct usm_cfg_common) +
  834. USM_MAX_CFG_DATA_SIZE +
  835. round_params_size;
  836. memcpy(&(dec_cfg->cfg_common), &(us_cfg->cfg_common),
  837. sizeof(struct usm_cfg_common));
  838. /* Transparent data copy */
  839. memcpy(dec_cfg->transp_data, us_cfg->params, us_cfg->params_size);
  840. pr_debug("%s: cfg_size[%d], params_size[%d]; parambytes[%d,%d,%d,%d]\n",
  841. __func__,
  842. dec_cfg->cfg_size,
  843. us_cfg->params_size,
  844. dec_cfg->transp_data[0],
  845. dec_cfg->transp_data[1],
  846. dec_cfg->transp_data[2],
  847. dec_cfg->transp_data[3]
  848. );
  849. rc = apr_send_pkt(usc->apr, (uint32_t *) dec_cfg);
  850. if (rc < 0) {
  851. pr_err("%s:Comamnd open failed\n", __func__);
  852. rc = -EINVAL;
  853. goto fail_cmd;
  854. }
  855. rc = wait_event_timeout(usc->cmd_wait,
  856. (atomic_read(&usc->cmd_state) == 0),
  857. Q6USM_TIMEOUT_JIFFIES);
  858. if (!rc) {
  859. rc = -ETIME;
  860. pr_err("%s: timeout opcode[0x%x]\n",
  861. __func__, dec_cfg->hdr.opcode);
  862. } else
  863. rc = 0;
  864. fail_cmd:
  865. if (is_allocated == 1)
  866. kfree(dec_cfg);
  867. return rc;
  868. }
  869. int q6usm_open_write(struct us_client *usc,
  870. uint32_t format)
  871. {
  872. int rc = 0;
  873. uint32_t int_format = INVALID_FORMAT;
  874. struct usm_stream_cmd_open_write open;
  875. if ((usc == NULL) || (usc->apr == NULL)) {
  876. pr_err("%s: APR handle NULL\n", __func__);
  877. return -EINVAL;
  878. }
  879. pr_debug("%s: session[%d]", __func__, usc->session);
  880. q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
  881. open.hdr.opcode = USM_STREAM_CMD_OPEN_WRITE;
  882. int_format = q6usm_ext2int_format(format);
  883. if (int_format == INVALID_FORMAT) {
  884. pr_err("%s: wrong format[%d]", __func__, format);
  885. return -EINVAL;
  886. }
  887. open.format = int_format;
  888. rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
  889. if (rc < 0) {
  890. pr_err("%s:open failed op[0x%x]rc[%d]\n",
  891. __func__, open.hdr.opcode, rc);
  892. goto fail_cmd;
  893. }
  894. rc = wait_event_timeout(usc->cmd_wait,
  895. (atomic_read(&usc->cmd_state) == 0),
  896. Q6USM_TIMEOUT_JIFFIES);
  897. if (!rc) {
  898. rc = -ETIME;
  899. pr_err("%s:timeout. waited for OPEN_WRITR rc[%d]\n",
  900. __func__, rc);
  901. goto fail_cmd;
  902. } else
  903. rc = 0;
  904. fail_cmd:
  905. return rc;
  906. }
  907. int q6usm_run(struct us_client *usc, uint32_t flags,
  908. uint32_t msw_ts, uint32_t lsw_ts)
  909. {
  910. struct usm_stream_cmd_run run;
  911. int rc = 0;
  912. if ((usc == NULL) || (usc->apr == NULL)) {
  913. pr_err("%s: APR handle NULL\n", __func__);
  914. return -EINVAL;
  915. }
  916. q6usm_add_hdr(usc, &run.hdr, sizeof(run), true);
  917. run.hdr.opcode = USM_SESSION_CMD_RUN;
  918. run.flags = flags;
  919. run.msw_ts = msw_ts;
  920. run.lsw_ts = lsw_ts;
  921. rc = apr_send_pkt(usc->apr, (uint32_t *) &run);
  922. if (rc < 0) {
  923. pr_err("%s: Commmand run failed[%d]\n", __func__, rc);
  924. goto fail_cmd;
  925. }
  926. rc = wait_event_timeout(usc->cmd_wait,
  927. (atomic_read(&usc->cmd_state) == 0),
  928. Q6USM_TIMEOUT_JIFFIES);
  929. if (!rc) {
  930. rc = -ETIME;
  931. pr_err("%s: timeout. waited for run success rc[%d]\n",
  932. __func__, rc);
  933. } else
  934. rc = 0;
  935. fail_cmd:
  936. return rc;
  937. }
  938. int q6usm_read(struct us_client *usc, uint32_t read_ind)
  939. {
  940. struct usm_stream_cmd_read read;
  941. struct us_port_data *port = NULL;
  942. int rc = 0;
  943. u32 read_counter = 0;
  944. u32 loop_ind = 0;
  945. u64 buf_addr = 0;
  946. if ((usc == NULL) || (usc->apr == NULL)) {
  947. pr_err("%s: APR handle NULL\n", __func__);
  948. return -EINVAL;
  949. }
  950. port = &usc->port[OUT];
  951. if (read_ind > port->buf_cnt) {
  952. pr_err("%s: wrong read_ind[%d]\n",
  953. __func__, read_ind);
  954. return -EINVAL;
  955. }
  956. if (read_ind == port->cpu_buf) {
  957. pr_err("%s: no free region\n", __func__);
  958. return 0;
  959. }
  960. if (read_ind > port->cpu_buf) { /* 1 range */
  961. read_counter = read_ind - port->cpu_buf;
  962. } else { /* 2 ranges */
  963. read_counter = (port->buf_cnt - port->cpu_buf) + read_ind;
  964. }
  965. q6usm_add_hdr(usc, &read.hdr, sizeof(read), false);
  966. read.hdr.opcode = USM_DATA_CMD_READ;
  967. read.buf_size = port->buf_size;
  968. buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
  969. read.buf_addr_lsw = lower_32_bits(buf_addr);
  970. read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
  971. read.mem_map_handle = *((uint32_t *)(port->ext));
  972. for (loop_ind = 0; loop_ind < read_counter; ++loop_ind) {
  973. u32 temp_cpu_buf = port->cpu_buf;
  974. buf_addr = (u64)(port->phys) +
  975. port->buf_size * (port->cpu_buf);
  976. read.buf_addr_lsw = lower_32_bits(buf_addr);
  977. read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
  978. read.seq_id = port->cpu_buf;
  979. read.hdr.token = port->cpu_buf;
  980. read.counter = 1;
  981. ++(port->cpu_buf);
  982. if (port->cpu_buf == port->buf_cnt)
  983. port->cpu_buf = 0;
  984. rc = apr_send_pkt(usc->apr, (uint32_t *) &read);
  985. if (rc < 0) {
  986. port->cpu_buf = temp_cpu_buf;
  987. pr_err("%s:read op[0x%x]rc[%d]\n",
  988. __func__, read.hdr.opcode, rc);
  989. break;
  990. }
  991. rc = 0;
  992. } /* bufs loop */
  993. return rc;
  994. }
  995. int q6usm_write(struct us_client *usc, uint32_t write_ind)
  996. {
  997. int rc = 0;
  998. struct usm_stream_cmd_write cmd_write;
  999. struct us_port_data *port = NULL;
  1000. u32 current_dsp_buf = 0;
  1001. u64 buf_addr = 0;
  1002. if ((usc == NULL) || (usc->apr == NULL)) {
  1003. pr_err("%s: APR handle NULL\n", __func__);
  1004. return -EINVAL;
  1005. }
  1006. port = &usc->port[IN];
  1007. current_dsp_buf = port->dsp_buf;
  1008. /* free region, caused by new dsp_buf report from DSP, */
  1009. /* can be only extended */
  1010. if (port->cpu_buf >= current_dsp_buf) {
  1011. /* 2 -part free region, including empty buffer */
  1012. if ((write_ind <= port->cpu_buf) &&
  1013. (write_ind > current_dsp_buf)) {
  1014. pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
  1015. __func__, write_ind,
  1016. current_dsp_buf, port->cpu_buf);
  1017. return -EINVAL;
  1018. }
  1019. } else {
  1020. /* 1 -part free region */
  1021. if ((write_ind <= port->cpu_buf) ||
  1022. (write_ind > current_dsp_buf)) {
  1023. pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
  1024. __func__, write_ind,
  1025. current_dsp_buf, port->cpu_buf);
  1026. return -EINVAL;
  1027. }
  1028. }
  1029. q6usm_add_hdr(usc, &cmd_write.hdr, sizeof(cmd_write), false);
  1030. cmd_write.hdr.opcode = USM_DATA_CMD_WRITE;
  1031. cmd_write.buf_size = port->buf_size;
  1032. buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
  1033. cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
  1034. cmd_write.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
  1035. cmd_write.mem_map_handle = *((uint32_t *)(port->ext));
  1036. cmd_write.res0 = 0;
  1037. cmd_write.res1 = 0;
  1038. cmd_write.res2 = 0;
  1039. while (port->cpu_buf != write_ind) {
  1040. u32 temp_cpu_buf = port->cpu_buf;
  1041. buf_addr = (u64)(port->phys) +
  1042. port->buf_size * (port->cpu_buf);
  1043. cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
  1044. cmd_write.buf_addr_msw =
  1045. msm_audio_populate_upper_32_bits(buf_addr);
  1046. cmd_write.seq_id = port->cpu_buf;
  1047. cmd_write.hdr.token = port->cpu_buf;
  1048. ++(port->cpu_buf);
  1049. if (port->cpu_buf == port->buf_cnt)
  1050. port->cpu_buf = 0;
  1051. rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_write);
  1052. if (rc < 0) {
  1053. port->cpu_buf = temp_cpu_buf;
  1054. pr_err("%s:write op[0x%x];rc[%d];cpu_buf[%d]\n",
  1055. __func__, cmd_write.hdr.opcode,
  1056. rc, port->cpu_buf);
  1057. break;
  1058. }
  1059. rc = 0;
  1060. }
  1061. return rc;
  1062. }
  1063. bool q6usm_is_write_buf_full(struct us_client *usc, uint32_t *free_region)
  1064. {
  1065. struct us_port_data *port = NULL;
  1066. u32 cpu_buf = 0;
  1067. if ((usc == NULL) || !free_region) {
  1068. pr_err("%s: input data wrong\n", __func__);
  1069. return false;
  1070. }
  1071. port = &usc->port[IN];
  1072. cpu_buf = port->cpu_buf + 1;
  1073. if (cpu_buf == port->buf_cnt)
  1074. cpu_buf = 0;
  1075. *free_region = port->dsp_buf;
  1076. return cpu_buf == *free_region;
  1077. }
  1078. int q6usm_cmd(struct us_client *usc, int cmd)
  1079. {
  1080. struct apr_hdr hdr;
  1081. int rc = 0;
  1082. atomic_t *state;
  1083. if ((usc == NULL) || (usc->apr == NULL)) {
  1084. pr_err("%s: APR handle NULL\n", __func__);
  1085. return -EINVAL;
  1086. }
  1087. q6usm_add_hdr(usc, &hdr, sizeof(hdr), true);
  1088. switch (cmd) {
  1089. case CMD_CLOSE:
  1090. hdr.opcode = USM_STREAM_CMD_CLOSE;
  1091. state = &usc->cmd_state;
  1092. break;
  1093. default:
  1094. pr_err("%s:Invalid format[%d]\n", __func__, cmd);
  1095. goto fail_cmd;
  1096. }
  1097. rc = apr_send_pkt(usc->apr, (uint32_t *) &hdr);
  1098. if (rc < 0) {
  1099. pr_err("%s: Command 0x%x failed\n", __func__, hdr.opcode);
  1100. goto fail_cmd;
  1101. }
  1102. rc = wait_event_timeout(usc->cmd_wait, (atomic_read(state) == 0),
  1103. Q6USM_TIMEOUT_JIFFIES);
  1104. if (!rc) {
  1105. rc = -ETIME;
  1106. pr_err("%s:timeout. waited for response opcode[0x%x]\n",
  1107. __func__, hdr.opcode);
  1108. } else
  1109. rc = 0;
  1110. fail_cmd:
  1111. return rc;
  1112. }
  1113. int q6usm_set_us_detection(struct us_client *usc,
  1114. struct usm_session_cmd_detect_info *detect_info,
  1115. uint16_t detect_info_size)
  1116. {
  1117. int rc = 0;
  1118. if ((usc == NULL) ||
  1119. (detect_info_size == 0) ||
  1120. (detect_info == NULL)) {
  1121. pr_err("%s: wrong input: usc=0x%pK, inf_size=%d; info=0x%pK",
  1122. __func__,
  1123. usc,
  1124. detect_info_size,
  1125. detect_info);
  1126. return -EINVAL;
  1127. }
  1128. q6usm_add_hdr(usc, &detect_info->hdr, detect_info_size, true);
  1129. detect_info->hdr.opcode = USM_SESSION_CMD_SIGNAL_DETECT_MODE;
  1130. rc = apr_send_pkt(usc->apr, (uint32_t *)detect_info);
  1131. if (rc < 0) {
  1132. pr_err("%s:Comamnd signal detect failed\n", __func__);
  1133. return -EINVAL;
  1134. }
  1135. rc = wait_event_timeout(usc->cmd_wait,
  1136. (atomic_read(&usc->cmd_state) == 0),
  1137. Q6USM_TIMEOUT_JIFFIES);
  1138. if (!rc) {
  1139. rc = -ETIME;
  1140. pr_err("%s: CMD_SIGNAL_DETECT_MODE: timeout=%d\n",
  1141. __func__, Q6USM_TIMEOUT_JIFFIES);
  1142. } else
  1143. rc = 0;
  1144. return rc;
  1145. }
  1146. int q6usm_set_us_stream_param(int dir, struct us_client *usc,
  1147. uint32_t module_id, uint32_t param_id, uint32_t buf_size)
  1148. {
  1149. int rc = 0;
  1150. struct usm_stream_cmd_set_param cmd_set_param;
  1151. struct us_port_data *port = NULL;
  1152. if ((usc == NULL) || (usc->apr == NULL)) {
  1153. pr_err("%s: APR handle NULL\n", __func__);
  1154. return -EINVAL;
  1155. }
  1156. port = &usc->port[dir];
  1157. q6usm_add_hdr(usc, &cmd_set_param.hdr, sizeof(cmd_set_param), true);
  1158. cmd_set_param.hdr.opcode = USM_STREAM_CMD_SET_PARAM;
  1159. cmd_set_param.buf_size = buf_size;
  1160. cmd_set_param.buf_addr_msw =
  1161. msm_audio_populate_upper_32_bits(port->param_phys);
  1162. cmd_set_param.buf_addr_lsw = lower_32_bits(port->param_phys);
  1163. cmd_set_param.mem_map_handle =
  1164. *((uint32_t *)(port->param_buf_mem_handle));
  1165. cmd_set_param.module_id = module_id;
  1166. cmd_set_param.param_id = param_id;
  1167. cmd_set_param.hdr.token = 0;
  1168. rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_set_param);
  1169. if (rc < 0) {
  1170. pr_err("%s:write op[0x%x];rc[%d]\n",
  1171. __func__, cmd_set_param.hdr.opcode, rc);
  1172. }
  1173. rc = wait_event_timeout(usc->cmd_wait,
  1174. (atomic_read(&usc->cmd_state) == 0),
  1175. Q6USM_TIMEOUT_JIFFIES);
  1176. if (!rc) {
  1177. rc = -ETIME;
  1178. pr_err("%s: CMD_SET_PARAM: timeout=%d\n",
  1179. __func__, Q6USM_TIMEOUT_JIFFIES);
  1180. } else
  1181. rc = 0;
  1182. return rc;
  1183. }
  1184. int q6usm_get_us_stream_param(int dir, struct us_client *usc,
  1185. uint32_t module_id, uint32_t param_id, uint32_t buf_size)
  1186. {
  1187. int rc = 0;
  1188. struct usm_stream_cmd_get_param cmd_get_param;
  1189. struct us_port_data *port = NULL;
  1190. if ((usc == NULL) || (usc->apr == NULL)) {
  1191. pr_err("%s: APR handle NULL\n", __func__);
  1192. return -EINVAL;
  1193. }
  1194. port = &usc->port[dir];
  1195. q6usm_add_hdr(usc, &cmd_get_param.hdr, sizeof(cmd_get_param), true);
  1196. cmd_get_param.hdr.opcode = USM_STREAM_CMD_GET_PARAM;
  1197. cmd_get_param.buf_size = buf_size;
  1198. cmd_get_param.buf_addr_msw =
  1199. msm_audio_populate_upper_32_bits(port->param_phys);
  1200. cmd_get_param.buf_addr_lsw = lower_32_bits(port->param_phys);
  1201. cmd_get_param.mem_map_handle =
  1202. *((uint32_t *)(port->param_buf_mem_handle));
  1203. cmd_get_param.module_id = module_id;
  1204. cmd_get_param.param_id = param_id;
  1205. cmd_get_param.hdr.token = 0;
  1206. rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_get_param);
  1207. if (rc < 0) {
  1208. pr_err("%s:write op[0x%x];rc[%d]\n",
  1209. __func__, cmd_get_param.hdr.opcode, rc);
  1210. }
  1211. rc = wait_event_timeout(usc->cmd_wait,
  1212. (atomic_read(&usc->cmd_state) == 0),
  1213. Q6USM_TIMEOUT_JIFFIES);
  1214. if (!rc) {
  1215. rc = -ETIME;
  1216. pr_err("%s: CMD_GET_PARAM: timeout=%d\n",
  1217. __func__, Q6USM_TIMEOUT_JIFFIES);
  1218. } else
  1219. rc = 0;
  1220. return rc;
  1221. }
  1222. int __init q6usm_init(void)
  1223. {
  1224. pr_debug("%s\n", __func__);
  1225. init_waitqueue_head(&this_mmap.cmd_wait);
  1226. memset(session, 0, sizeof(session));
  1227. return 0;
  1228. }