q6usm.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/mutex.h>
  6. #include <linux/wait.h>
  7. #include <linux/sched.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/slab.h>
  10. #include <dsp/msm_audio_ion.h>
  11. #include <dsp/apr_audio-v2.h>
  12. #include <ipc/apr_us.h>
  13. #include "q6usm.h"
  14. #define ADSP_MEMORY_MAP_SHMEM8_4K_POOL 3
  15. #define MEM_4K_OFFSET 4095
  16. #define MEM_4K_MASK 0xfffff000
  17. #define USM_SESSION_MAX 0x02 /* aDSP:USM limit */
  18. #define READDONE_IDX_STATUS 0
  19. #define WRITEDONE_IDX_STATUS 0
  20. /* Standard timeout in the asynchronous ops */
  21. #define Q6USM_TIMEOUT_JIFFIES (1*HZ) /* 1 sec */
  22. static DEFINE_MUTEX(session_lock);
  23. static struct us_client *session[USM_SESSION_MAX];
  24. static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv);
  25. static int32_t q6usm_callback(struct apr_client_data *data, void *priv);
  26. static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
  27. uint32_t pkt_size, bool cmd_flg);
  28. struct usm_mmap {
  29. atomic_t ref_cnt;
  30. atomic_t cmd_state;
  31. wait_queue_head_t cmd_wait;
  32. void *apr;
  33. int mem_handle;
  34. };
  35. static struct usm_mmap this_mmap;
  36. static void q6usm_add_mmaphdr(struct apr_hdr *hdr,
  37. uint32_t pkt_size, bool cmd_flg, u32 token)
  38. {
  39. hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  40. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  41. hdr->src_port = 0;
  42. hdr->dest_port = 0;
  43. if (cmd_flg) {
  44. hdr->token = token;
  45. atomic_set(&this_mmap.cmd_state, 1);
  46. }
  47. hdr->pkt_size = pkt_size;
  48. }
  49. static int q6usm_memory_map(phys_addr_t buf_add, int dir, uint32_t bufsz,
  50. uint32_t bufcnt, uint32_t session, uint32_t *mem_handle)
  51. {
  52. struct usm_cmd_memory_map_region mem_region_map;
  53. int rc = 0;
  54. if (this_mmap.apr == NULL) {
  55. pr_err("%s: APR handle NULL\n", __func__);
  56. return -EINVAL;
  57. }
  58. q6usm_add_mmaphdr(&mem_region_map.hdr,
  59. sizeof(struct usm_cmd_memory_map_region), true,
  60. ((session << 8) | dir));
  61. mem_region_map.hdr.opcode = USM_CMD_SHARED_MEM_MAP_REGION;
  62. mem_region_map.mempool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
  63. mem_region_map.num_regions = 1;
  64. mem_region_map.flags = 0;
  65. mem_region_map.shm_addr_lsw = lower_32_bits(buf_add);
  66. mem_region_map.shm_addr_msw =
  67. msm_audio_populate_upper_32_bits(buf_add);
  68. mem_region_map.mem_size_bytes = bufsz * bufcnt;
  69. rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_region_map);
  70. if (rc < 0) {
  71. pr_err("%s: mem_map op[0x%x]rc[%d]\n",
  72. __func__, mem_region_map.hdr.opcode, rc);
  73. rc = -EINVAL;
  74. goto fail_cmd;
  75. }
  76. rc = wait_event_timeout(this_mmap.cmd_wait,
  77. (atomic_read(&this_mmap.cmd_state) == 0),
  78. Q6USM_TIMEOUT_JIFFIES);
  79. if (!rc) {
  80. rc = -ETIME;
  81. pr_err("%s: timeout. waited for memory_map\n", __func__);
  82. } else {
  83. *mem_handle = this_mmap.mem_handle;
  84. rc = 0;
  85. }
  86. fail_cmd:
  87. return rc;
  88. }
  89. int q6usm_memory_unmap(phys_addr_t buf_add, int dir, uint32_t session,
  90. uint32_t mem_handle)
  91. {
  92. struct usm_cmd_memory_unmap_region mem_unmap;
  93. int rc = 0;
  94. if (this_mmap.apr == NULL) {
  95. pr_err("%s: APR handle NULL\n", __func__);
  96. return -EINVAL;
  97. }
  98. q6usm_add_mmaphdr(&mem_unmap.hdr,
  99. sizeof(struct usm_cmd_memory_unmap_region), true,
  100. ((session << 8) | dir));
  101. mem_unmap.hdr.opcode = USM_CMD_SHARED_MEM_UNMAP_REGION;
  102. mem_unmap.mem_map_handle = mem_handle;
  103. rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_unmap);
  104. if (rc < 0) {
  105. pr_err("%s: mem_unmap op[0x%x] rc[%d]\n",
  106. __func__, mem_unmap.hdr.opcode, rc);
  107. goto fail_cmd;
  108. }
  109. rc = wait_event_timeout(this_mmap.cmd_wait,
  110. (atomic_read(&this_mmap.cmd_state) == 0),
  111. Q6USM_TIMEOUT_JIFFIES);
  112. if (!rc) {
  113. rc = -ETIME;
  114. pr_err("%s: timeout. waited for memory_unmap\n", __func__);
  115. } else
  116. rc = 0;
  117. fail_cmd:
  118. return rc;
  119. }
  120. static int q6usm_session_alloc(struct us_client *usc)
  121. {
  122. int ind = 0;
  123. mutex_lock(&session_lock);
  124. for (ind = 0; ind < USM_SESSION_MAX; ++ind) {
  125. if (!session[ind]) {
  126. session[ind] = usc;
  127. mutex_unlock(&session_lock);
  128. ++ind; /* session id: 0 reserved */
  129. pr_debug("%s: session[%d] was allocated\n",
  130. __func__, ind);
  131. return ind;
  132. }
  133. }
  134. mutex_unlock(&session_lock);
  135. return -ENOMEM;
  136. }
  137. static void q6usm_session_free(struct us_client *usc)
  138. {
  139. /* Session index was incremented during allocation */
  140. uint16_t ind = (uint16_t)usc->session - 1;
  141. pr_debug("%s: to free session[%d]\n", __func__, ind);
  142. if (ind < USM_SESSION_MAX) {
  143. mutex_lock(&session_lock);
  144. session[ind] = NULL;
  145. mutex_unlock(&session_lock);
  146. }
  147. }
  148. static int q6usm_us_client_buf_free(unsigned int dir,
  149. struct us_client *usc)
  150. {
  151. struct us_port_data *port;
  152. int rc = 0;
  153. if ((usc == NULL) ||
  154. ((dir != IN) && (dir != OUT)))
  155. return -EINVAL;
  156. mutex_lock(&usc->cmd_lock);
  157. port = &usc->port[dir];
  158. if (port == NULL) {
  159. mutex_unlock(&usc->cmd_lock);
  160. return -EINVAL;
  161. }
  162. if (port->data == NULL) {
  163. mutex_unlock(&usc->cmd_lock);
  164. return 0;
  165. }
  166. rc = q6usm_memory_unmap(port->phys, dir, usc->session,
  167. *((uint32_t *)port->ext));
  168. pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
  169. (void *)port->data, (u64)port->phys, (void *)&port->phys);
  170. msm_audio_ion_free(port->dma_buf);
  171. port->data = NULL;
  172. port->phys = 0;
  173. port->buf_size = 0;
  174. port->buf_cnt = 0;
  175. port->dma_buf = NULL;
  176. mutex_unlock(&usc->cmd_lock);
  177. return rc;
  178. }
  179. int q6usm_us_param_buf_free(unsigned int dir,
  180. struct us_client *usc)
  181. {
  182. struct us_port_data *port;
  183. int rc = 0;
  184. if ((usc == NULL) ||
  185. ((dir != IN) && (dir != OUT)))
  186. return -EINVAL;
  187. mutex_lock(&usc->cmd_lock);
  188. port = &usc->port[dir];
  189. if (port == NULL) {
  190. mutex_unlock(&usc->cmd_lock);
  191. return -EINVAL;
  192. }
  193. if (port->param_buf == NULL) {
  194. mutex_unlock(&usc->cmd_lock);
  195. return 0;
  196. }
  197. rc = q6usm_memory_unmap(port->param_phys, dir, usc->session,
  198. *((uint32_t *)port->param_buf_mem_handle));
  199. pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
  200. (void *)port->param_buf, (u64)port->param_phys,
  201. (void *)&port->param_phys);
  202. msm_audio_ion_free(port->param_dma_buf);
  203. port->param_buf = NULL;
  204. port->param_phys = 0;
  205. port->param_buf_size = 0;
  206. port->param_dma_buf = NULL;
  207. mutex_unlock(&usc->cmd_lock);
  208. return rc;
  209. }
  210. void q6usm_us_client_free(struct us_client *usc)
  211. {
  212. int loopcnt = 0;
  213. struct us_port_data *port;
  214. uint32_t *p_mem_handle = NULL;
  215. if ((usc == NULL) ||
  216. !(usc->session))
  217. return;
  218. for (loopcnt = 0; loopcnt <= OUT; ++loopcnt) {
  219. port = &usc->port[loopcnt];
  220. if (port->data == NULL)
  221. continue;
  222. pr_debug("%s: loopcnt = %d\n", __func__, loopcnt);
  223. q6usm_us_client_buf_free(loopcnt, usc);
  224. q6usm_us_param_buf_free(loopcnt, usc);
  225. }
  226. q6usm_session_free(usc);
  227. apr_deregister(usc->apr);
  228. pr_debug("%s: APR De-Register\n", __func__);
  229. if (atomic_read(&this_mmap.ref_cnt) <= 0) {
  230. pr_err("%s: APR Common Port Already Closed\n", __func__);
  231. goto done;
  232. }
  233. atomic_dec(&this_mmap.ref_cnt);
  234. if (atomic_read(&this_mmap.ref_cnt) == 0) {
  235. apr_deregister(this_mmap.apr);
  236. pr_debug("%s: APR De-Register common port\n", __func__);
  237. }
  238. done:
  239. p_mem_handle = (uint32_t *)usc->port[IN].ext;
  240. kfree(p_mem_handle);
  241. kfree(usc);
  242. pr_debug("%s:\n", __func__);
  243. }
  244. struct us_client *q6usm_us_client_alloc(
  245. void (*cb)(uint32_t, uint32_t, uint32_t *, void *),
  246. void *priv)
  247. {
  248. struct us_client *usc;
  249. uint32_t *p_mem_handle = NULL;
  250. int n;
  251. int lcnt = 0;
  252. usc = kzalloc(sizeof(struct us_client), GFP_KERNEL);
  253. if (usc == NULL)
  254. return NULL;
  255. p_mem_handle = kzalloc(sizeof(uint32_t) * 4, GFP_KERNEL);
  256. if (p_mem_handle == NULL) {
  257. kfree(usc);
  258. return NULL;
  259. }
  260. n = q6usm_session_alloc(usc);
  261. if (n <= 0)
  262. goto fail_session;
  263. usc->session = n;
  264. usc->cb = cb;
  265. usc->priv = priv;
  266. usc->apr = apr_register("ADSP", "USM",
  267. (apr_fn)q6usm_callback,
  268. ((usc->session) << 8 | 0x0001),
  269. usc);
  270. if (usc->apr == NULL) {
  271. pr_err("%s: Registration with APR failed\n", __func__);
  272. goto fail;
  273. }
  274. pr_debug("%s: Registering the common port with APR\n", __func__);
  275. if (atomic_read(&this_mmap.ref_cnt) == 0) {
  276. this_mmap.apr = apr_register("ADSP", "USM",
  277. (apr_fn)q6usm_mmapcallback,
  278. 0x0FFFFFFFF, &this_mmap);
  279. if (this_mmap.apr == NULL) {
  280. pr_err("%s: USM port registration failed\n",
  281. __func__);
  282. goto fail;
  283. }
  284. }
  285. atomic_inc(&this_mmap.ref_cnt);
  286. init_waitqueue_head(&usc->cmd_wait);
  287. mutex_init(&usc->cmd_lock);
  288. for (lcnt = 0; lcnt <= OUT; ++lcnt) {
  289. mutex_init(&usc->port[lcnt].lock);
  290. spin_lock_init(&usc->port[lcnt].dsp_lock);
  291. usc->port[lcnt].ext = (void *)p_mem_handle++;
  292. usc->port[lcnt].param_buf_mem_handle = (void *)p_mem_handle++;
  293. pr_err("%s: usc->port[%d].ext=%pK;\n",
  294. __func__, lcnt, usc->port[lcnt].ext);
  295. }
  296. atomic_set(&usc->cmd_state, 0);
  297. return usc;
  298. fail:
  299. kfree(p_mem_handle);
  300. q6usm_us_client_free(usc);
  301. return NULL;
  302. fail_session:
  303. kfree(p_mem_handle);
  304. kfree(usc);
  305. return NULL;
  306. }
  307. int q6usm_us_client_buf_alloc(unsigned int dir,
  308. struct us_client *usc,
  309. unsigned int bufsz,
  310. unsigned int bufcnt)
  311. {
  312. int rc = 0;
  313. struct us_port_data *port = NULL;
  314. unsigned int size = bufsz*bufcnt;
  315. size_t len;
  316. if ((usc == NULL) ||
  317. ((dir != IN) && (dir != OUT)) || (size == 0) ||
  318. (usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
  319. pr_err("%s: wrong parameters: size=%d; bufcnt=%d\n",
  320. __func__, size, bufcnt);
  321. return -EINVAL;
  322. }
  323. mutex_lock(&usc->cmd_lock);
  324. port = &usc->port[dir];
  325. /* The size to allocate should be multiple of 4K bytes */
  326. size = PAGE_ALIGN(size);
  327. rc = msm_audio_ion_alloc(&port->dma_buf,
  328. size, &port->phys,
  329. &len, &port->data);
  330. if (rc) {
  331. pr_err("%s: US ION allocation failed, rc = %d\n",
  332. __func__, rc);
  333. mutex_unlock(&usc->cmd_lock);
  334. return -ENOMEM;
  335. }
  336. port->buf_cnt = bufcnt;
  337. port->buf_size = bufsz;
  338. pr_debug("%s: data[%pK]; phys[%llx]; [%pK]\n", __func__,
  339. (void *)port->data,
  340. (u64)port->phys,
  341. (void *)&port->phys);
  342. rc = q6usm_memory_map(port->phys, dir, size, 1, usc->session,
  343. (uint32_t *)port->ext);
  344. if (rc < 0) {
  345. pr_err("%s: CMD Memory_map failed\n", __func__);
  346. mutex_unlock(&usc->cmd_lock);
  347. q6usm_us_client_buf_free(dir, usc);
  348. q6usm_us_param_buf_free(dir, usc);
  349. } else {
  350. mutex_unlock(&usc->cmd_lock);
  351. rc = 0;
  352. }
  353. return rc;
  354. }
  355. int q6usm_us_param_buf_alloc(unsigned int dir,
  356. struct us_client *usc,
  357. unsigned int bufsz)
  358. {
  359. int rc = 0;
  360. struct us_port_data *port = NULL;
  361. unsigned int size = bufsz;
  362. size_t len;
  363. if ((usc == NULL) ||
  364. ((dir != IN) && (dir != OUT)) ||
  365. (usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
  366. pr_err("%s: wrong parameters: direction=%d, bufsz=%d\n",
  367. __func__, dir, bufsz);
  368. return -EINVAL;
  369. }
  370. mutex_lock(&usc->cmd_lock);
  371. port = &usc->port[dir];
  372. if (bufsz == 0) {
  373. pr_debug("%s: bufsz=0, get/set param commands are forbidden\n",
  374. __func__);
  375. port->param_buf = NULL;
  376. mutex_unlock(&usc->cmd_lock);
  377. return rc;
  378. }
  379. /* The size to allocate should be multiple of 4K bytes */
  380. size = PAGE_ALIGN(size);
  381. rc = msm_audio_ion_alloc(&port->param_dma_buf,
  382. size, &port->param_phys,
  383. &len, &port->param_buf);
  384. if (rc) {
  385. pr_err("%s: US ION allocation failed, rc = %d\n",
  386. __func__, rc);
  387. mutex_unlock(&usc->cmd_lock);
  388. return -ENOMEM;
  389. }
  390. port->param_buf_size = bufsz;
  391. pr_debug("%s: param_buf[%pK]; param_phys[%llx]; [%pK]\n", __func__,
  392. (void *)port->param_buf,
  393. (u64)port->param_phys,
  394. (void *)&port->param_phys);
  395. rc = q6usm_memory_map(port->param_phys, (IN | OUT), size, 1,
  396. usc->session, (uint32_t *)port->param_buf_mem_handle);
  397. if (rc < 0) {
  398. pr_err("%s: CMD Memory_map failed\n", __func__);
  399. mutex_unlock(&usc->cmd_lock);
  400. q6usm_us_client_buf_free(dir, usc);
  401. q6usm_us_param_buf_free(dir, usc);
  402. } else {
  403. mutex_unlock(&usc->cmd_lock);
  404. rc = 0;
  405. }
  406. return rc;
  407. }
  408. static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv)
  409. {
  410. uint32_t token;
  411. uint32_t *payload = data->payload;
  412. if (data->payload_size < (2 * sizeof(uint32_t))) {
  413. pr_err("%s: payload has invalid size[%d]\n", __func__,
  414. data->payload_size);
  415. return -EINVAL;
  416. }
  417. pr_debug("%s: ptr0[0x%x]; ptr1[0x%x]; opcode[0x%x]\n",
  418. __func__, payload[0], payload[1], data->opcode);
  419. pr_debug("%s: token[0x%x]; payload_size[%d]; src[%d]; dest[%d];\n",
  420. __func__, data->token, data->payload_size,
  421. data->src_port, data->dest_port);
  422. if (data->opcode == APR_BASIC_RSP_RESULT) {
  423. /* status field check */
  424. if (payload[1]) {
  425. pr_err("%s: wrong response[%d] on cmd [%d]\n",
  426. __func__, payload[1], payload[0]);
  427. } else {
  428. token = data->token;
  429. switch (payload[0]) {
  430. case USM_CMD_SHARED_MEM_UNMAP_REGION:
  431. if (atomic_read(&this_mmap.cmd_state)) {
  432. atomic_set(&this_mmap.cmd_state, 0);
  433. wake_up(&this_mmap.cmd_wait);
  434. }
  435. /* fallthrough */
  436. case USM_CMD_SHARED_MEM_MAP_REGION:
  437. /* For MEM_MAP, additional answer is waited, */
  438. /* therfore, no wake-up here */
  439. pr_debug("%s: cmd[0x%x]; result[0x%x]\n",
  440. __func__, payload[0], payload[1]);
  441. break;
  442. default:
  443. pr_debug("%s: wrong command[0x%x]\n",
  444. __func__, payload[0]);
  445. break;
  446. }
  447. }
  448. } else {
  449. if (data->opcode == USM_CMDRSP_SHARED_MEM_MAP_REGION) {
  450. this_mmap.mem_handle = payload[0];
  451. pr_debug("%s: memory map handle = 0x%x",
  452. __func__, payload[0]);
  453. if (atomic_read(&this_mmap.cmd_state)) {
  454. atomic_set(&this_mmap.cmd_state, 0);
  455. wake_up(&this_mmap.cmd_wait);
  456. }
  457. }
  458. }
  459. return 0;
  460. }
  461. static int32_t q6usm_callback(struct apr_client_data *data, void *priv)
  462. {
  463. struct us_client *usc = (struct us_client *)priv;
  464. unsigned long dsp_flags;
  465. uint32_t *payload = data->payload;
  466. uint32_t token = data->token;
  467. uint32_t opcode = Q6USM_EVENT_UNDEF;
  468. if (usc == NULL) {
  469. pr_err("%s: client info is NULL\n", __func__);
  470. return -EINVAL;
  471. }
  472. if (data->opcode == APR_BASIC_RSP_RESULT) {
  473. if (data->payload_size < (2 * sizeof(uint32_t))) {
  474. pr_err("%s: payload has invalid size[%d]\n", __func__,
  475. data->payload_size);
  476. return -EINVAL;
  477. }
  478. /* status field check */
  479. if (payload[1]) {
  480. pr_err("%s: wrong response[%d] on cmd [%d]\n",
  481. __func__, payload[1], payload[0]);
  482. if (usc->cb)
  483. usc->cb(data->opcode, token,
  484. (uint32_t *)data->payload, usc->priv);
  485. } else {
  486. switch (payload[0]) {
  487. case USM_SESSION_CMD_RUN:
  488. case USM_STREAM_CMD_CLOSE:
  489. if (token != usc->session) {
  490. pr_err("%s: wrong token[%d]",
  491. __func__, token);
  492. break;
  493. }
  494. case USM_STREAM_CMD_OPEN_READ:
  495. case USM_STREAM_CMD_OPEN_WRITE:
  496. case USM_STREAM_CMD_SET_ENC_PARAM:
  497. case USM_DATA_CMD_MEDIA_FORMAT_UPDATE:
  498. case USM_SESSION_CMD_SIGNAL_DETECT_MODE:
  499. case USM_STREAM_CMD_SET_PARAM:
  500. case USM_STREAM_CMD_GET_PARAM:
  501. if (atomic_read(&usc->cmd_state)) {
  502. atomic_set(&usc->cmd_state, 0);
  503. wake_up(&usc->cmd_wait);
  504. }
  505. if (usc->cb)
  506. usc->cb(data->opcode, token,
  507. (uint32_t *)data->payload,
  508. usc->priv);
  509. break;
  510. default:
  511. break;
  512. }
  513. }
  514. return 0;
  515. }
  516. switch (data->opcode) {
  517. case RESET_EVENTS: {
  518. pr_err("%s: Reset event is received: %d %d\n",
  519. __func__,
  520. data->reset_event,
  521. data->reset_proc);
  522. opcode = RESET_EVENTS;
  523. apr_reset(this_mmap.apr);
  524. this_mmap.apr = NULL;
  525. apr_reset(usc->apr);
  526. usc->apr = NULL;
  527. break;
  528. }
  529. case USM_DATA_EVENT_READ_DONE: {
  530. struct us_port_data *port = &usc->port[OUT];
  531. opcode = Q6USM_EVENT_READ_DONE;
  532. spin_lock_irqsave(&port->dsp_lock, dsp_flags);
  533. if (data->payload_size <
  534. (sizeof(uint32_t)*(READDONE_IDX_STATUS + 1))) {
  535. pr_err("%s: Invalid payload size for READDONE[%d]\n",
  536. __func__, data->payload_size);
  537. spin_unlock_irqrestore(&port->dsp_lock,
  538. dsp_flags);
  539. return -EINVAL;
  540. }
  541. if (payload[READDONE_IDX_STATUS]) {
  542. pr_err("%s: wrong READDONE[%d]; token[%d]\n",
  543. __func__,
  544. payload[READDONE_IDX_STATUS],
  545. token);
  546. token = USM_WRONG_TOKEN;
  547. spin_unlock_irqrestore(&port->dsp_lock,
  548. dsp_flags);
  549. break;
  550. }
  551. if (port->expected_token != token) {
  552. u32 cpu_buf = port->cpu_buf;
  553. pr_err("%s: expected[%d] != token[%d]\n",
  554. __func__, port->expected_token, token);
  555. pr_debug("%s: dsp_buf=%d; cpu_buf=%d;\n",
  556. __func__, port->dsp_buf, cpu_buf);
  557. token = USM_WRONG_TOKEN;
  558. /* To prevent data handle continiue */
  559. port->expected_token = USM_WRONG_TOKEN;
  560. spin_unlock_irqrestore(&port->dsp_lock,
  561. dsp_flags);
  562. break;
  563. } /* port->expected_token != data->token */
  564. port->expected_token = token + 1;
  565. if (port->expected_token == port->buf_cnt)
  566. port->expected_token = 0;
  567. /* gap support */
  568. if (port->expected_token != port->cpu_buf) {
  569. port->dsp_buf = port->expected_token;
  570. token = port->dsp_buf; /* for callback */
  571. } else
  572. port->dsp_buf = token;
  573. spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
  574. break;
  575. } /* case USM_DATA_EVENT_READ_DONE */
  576. case USM_DATA_EVENT_WRITE_DONE: {
  577. struct us_port_data *port = &usc->port[IN];
  578. opcode = Q6USM_EVENT_WRITE_DONE;
  579. if (data->payload_size <
  580. (sizeof(uint32_t)*(WRITEDONE_IDX_STATUS + 1))) {
  581. pr_err("%s: Invalid payload size for WRITEDONE[%d]\n",
  582. __func__, data->payload_size);
  583. return -EINVAL;
  584. }
  585. if (payload[WRITEDONE_IDX_STATUS]) {
  586. pr_err("%s: wrong WRITEDONE_IDX_STATUS[%d]\n",
  587. __func__,
  588. payload[WRITEDONE_IDX_STATUS]);
  589. break;
  590. }
  591. spin_lock_irqsave(&port->dsp_lock, dsp_flags);
  592. port->dsp_buf = token + 1;
  593. if (port->dsp_buf == port->buf_cnt)
  594. port->dsp_buf = 0;
  595. spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
  596. break;
  597. } /* case USM_DATA_EVENT_WRITE_DONE */
  598. case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT: {
  599. pr_debug("%s: US detect result: result=%d",
  600. __func__,
  601. payload[0]);
  602. opcode = Q6USM_EVENT_SIGNAL_DETECT_RESULT;
  603. break;
  604. } /* case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT */
  605. default:
  606. return 0;
  607. } /* switch */
  608. if (usc->cb)
  609. usc->cb(opcode, token,
  610. data->payload, usc->priv);
  611. return 0;
  612. }
  613. uint32_t q6usm_get_virtual_address(int dir,
  614. struct us_client *usc,
  615. struct vm_area_struct *vms)
  616. {
  617. uint32_t ret = 0xffffffff;
  618. if (vms && (usc != NULL) && ((dir == IN) || (dir == OUT))) {
  619. struct us_port_data *port = &usc->port[dir];
  620. int size = PAGE_ALIGN(port->buf_size * port->buf_cnt);
  621. struct audio_buffer ab;
  622. ab.phys = port->phys;
  623. ab.data = port->data;
  624. ab.used = 1;
  625. ab.size = size;
  626. ab.actual_size = size;
  627. ab.dma_buf = port->dma_buf;
  628. ret = msm_audio_ion_mmap(&ab, vms);
  629. }
  630. return ret;
  631. }
  632. static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
  633. uint32_t pkt_size, bool cmd_flg)
  634. {
  635. mutex_lock(&usc->cmd_lock);
  636. hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  637. APR_HDR_LEN(sizeof(struct apr_hdr)),
  638. APR_PKT_VER);
  639. hdr->src_svc = ((struct apr_svc *)usc->apr)->id;
  640. hdr->src_domain = APR_DOMAIN_APPS;
  641. hdr->dest_svc = APR_SVC_USM;
  642. hdr->dest_domain = APR_DOMAIN_ADSP;
  643. hdr->src_port = (usc->session << 8) | 0x0001;
  644. hdr->dest_port = (usc->session << 8) | 0x0001;
  645. if (cmd_flg) {
  646. hdr->token = usc->session;
  647. atomic_set(&usc->cmd_state, 1);
  648. }
  649. hdr->pkt_size = pkt_size;
  650. mutex_unlock(&usc->cmd_lock);
  651. }
  652. static uint32_t q6usm_ext2int_format(uint32_t ext_format)
  653. {
  654. uint32_t int_format = INVALID_FORMAT;
  655. switch (ext_format) {
  656. case FORMAT_USPS_EPOS:
  657. int_format = US_POINT_EPOS_FORMAT_V2;
  658. break;
  659. case FORMAT_USRAW:
  660. int_format = US_RAW_FORMAT_V2;
  661. break;
  662. case FORMAT_USPROX:
  663. int_format = US_PROX_FORMAT_V4;
  664. break;
  665. case FORMAT_USGES_SYNC:
  666. int_format = US_GES_SYNC_FORMAT;
  667. break;
  668. case FORMAT_USRAW_SYNC:
  669. int_format = US_RAW_SYNC_FORMAT;
  670. break;
  671. default:
  672. pr_err("%s: Invalid format[%d]\n", __func__, ext_format);
  673. break;
  674. }
  675. return int_format;
  676. }
  677. int q6usm_open_read(struct us_client *usc,
  678. uint32_t format)
  679. {
  680. uint32_t int_format = INVALID_FORMAT;
  681. int rc = 0x00;
  682. struct usm_stream_cmd_open_read open;
  683. if ((usc == NULL) || (usc->apr == NULL)) {
  684. pr_err("%s: client or its apr is NULL\n", __func__);
  685. return -EINVAL;
  686. }
  687. pr_debug("%s: session[%d]", __func__, usc->session);
  688. q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
  689. open.hdr.opcode = USM_STREAM_CMD_OPEN_READ;
  690. open.src_endpoint = 0; /* AFE */
  691. open.pre_proc_top = 0; /* No preprocessing required */
  692. int_format = q6usm_ext2int_format(format);
  693. if (int_format == INVALID_FORMAT)
  694. return -EINVAL;
  695. open.uMode = STREAM_PRIORITY_NORMAL;
  696. open.format = int_format;
  697. rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
  698. if (rc < 0) {
  699. pr_err("%s: open failed op[0x%x]rc[%d]\n",
  700. __func__, open.hdr.opcode, rc);
  701. goto fail_cmd;
  702. }
  703. rc = wait_event_timeout(usc->cmd_wait,
  704. (atomic_read(&usc->cmd_state) == 0),
  705. Q6USM_TIMEOUT_JIFFIES);
  706. if (!rc) {
  707. rc = -ETIME;
  708. pr_err("%s: timeout, waited for OPEN_READ rc[%d]\n",
  709. __func__, rc);
  710. goto fail_cmd;
  711. } else
  712. rc = 0;
  713. fail_cmd:
  714. return rc;
  715. }
  716. int q6usm_enc_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
  717. {
  718. uint32_t int_format = INVALID_FORMAT;
  719. struct usm_stream_cmd_encdec_cfg_blk enc_cfg_obj;
  720. struct usm_stream_cmd_encdec_cfg_blk *enc_cfg = &enc_cfg_obj;
  721. int rc = 0;
  722. uint32_t total_cfg_size =
  723. sizeof(struct usm_stream_cmd_encdec_cfg_blk);
  724. uint32_t round_params_size = 0;
  725. uint8_t is_allocated = 0;
  726. if ((usc == NULL) || (us_cfg == NULL)) {
  727. pr_err("%s: wrong input", __func__);
  728. return -EINVAL;
  729. }
  730. int_format = q6usm_ext2int_format(us_cfg->format_id);
  731. if (int_format == INVALID_FORMAT) {
  732. pr_err("%s: wrong input format[%d]",
  733. __func__, us_cfg->format_id);
  734. return -EINVAL;
  735. }
  736. /* Transparent configuration data is after enc_cfg */
  737. /* Integer number of u32s is required */
  738. round_params_size = ((us_cfg->params_size + 3)/4) * 4;
  739. if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
  740. /* Dynamic allocated encdec_cfg_blk is required */
  741. /* static part use */
  742. round_params_size -= USM_MAX_CFG_DATA_SIZE;
  743. total_cfg_size += round_params_size;
  744. enc_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
  745. if (enc_cfg == NULL) {
  746. pr_err("%s: enc_cfg[%d] allocation failed\n",
  747. __func__, total_cfg_size);
  748. return -ENOMEM;
  749. }
  750. is_allocated = 1;
  751. } else
  752. round_params_size = 0;
  753. q6usm_add_hdr(usc, &enc_cfg->hdr, total_cfg_size, true);
  754. enc_cfg->hdr.opcode = USM_STREAM_CMD_SET_ENC_PARAM;
  755. enc_cfg->param_id = USM_PARAM_ID_ENCDEC_ENC_CFG_BLK;
  756. enc_cfg->param_size = sizeof(struct usm_encode_cfg_blk)+
  757. round_params_size;
  758. enc_cfg->enc_blk.frames_per_buf = 1;
  759. enc_cfg->enc_blk.format_id = int_format;
  760. enc_cfg->enc_blk.cfg_size = sizeof(struct usm_cfg_common)+
  761. USM_MAX_CFG_DATA_SIZE +
  762. round_params_size;
  763. memcpy(&(enc_cfg->enc_blk.cfg_common), &(us_cfg->cfg_common),
  764. sizeof(struct usm_cfg_common));
  765. /* Transparent data copy */
  766. memcpy(enc_cfg->enc_blk.transp_data, us_cfg->params,
  767. us_cfg->params_size);
  768. pr_debug("%s: cfg_size[%d], params_size[%d]\n",
  769. __func__,
  770. enc_cfg->enc_blk.cfg_size,
  771. us_cfg->params_size);
  772. pr_debug("%s: params[%d,%d,%d,%d, %d,%d,%d,%d]\n",
  773. __func__,
  774. enc_cfg->enc_blk.transp_data[0],
  775. enc_cfg->enc_blk.transp_data[1],
  776. enc_cfg->enc_blk.transp_data[2],
  777. enc_cfg->enc_blk.transp_data[3],
  778. enc_cfg->enc_blk.transp_data[4],
  779. enc_cfg->enc_blk.transp_data[5],
  780. enc_cfg->enc_blk.transp_data[6],
  781. enc_cfg->enc_blk.transp_data[7]
  782. );
  783. pr_debug("%s: srate:%d, ch=%d, bps= %d;\n",
  784. __func__, enc_cfg->enc_blk.cfg_common.sample_rate,
  785. enc_cfg->enc_blk.cfg_common.ch_cfg,
  786. enc_cfg->enc_blk.cfg_common.bits_per_sample);
  787. pr_debug("dmap:[0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x]; dev_id=0x%x\n",
  788. enc_cfg->enc_blk.cfg_common.data_map[0],
  789. enc_cfg->enc_blk.cfg_common.data_map[1],
  790. enc_cfg->enc_blk.cfg_common.data_map[2],
  791. enc_cfg->enc_blk.cfg_common.data_map[3],
  792. enc_cfg->enc_blk.cfg_common.data_map[4],
  793. enc_cfg->enc_blk.cfg_common.data_map[5],
  794. enc_cfg->enc_blk.cfg_common.data_map[6],
  795. enc_cfg->enc_blk.cfg_common.data_map[7],
  796. enc_cfg->enc_blk.cfg_common.dev_id);
  797. rc = apr_send_pkt(usc->apr, (uint32_t *) enc_cfg);
  798. if (rc < 0) {
  799. pr_err("%s:Comamnd open failed\n", __func__);
  800. rc = -EINVAL;
  801. goto fail_cmd;
  802. }
  803. rc = wait_event_timeout(usc->cmd_wait,
  804. (atomic_read(&usc->cmd_state) == 0),
  805. Q6USM_TIMEOUT_JIFFIES);
  806. if (!rc) {
  807. rc = -ETIME;
  808. pr_err("%s: timeout opcode[0x%x]\n",
  809. __func__, enc_cfg->hdr.opcode);
  810. } else
  811. rc = 0;
  812. fail_cmd:
  813. if (is_allocated == 1)
  814. kfree(enc_cfg);
  815. return rc;
  816. }
  817. int q6usm_dec_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
  818. {
  819. uint32_t int_format = INVALID_FORMAT;
  820. struct usm_stream_media_format_update dec_cfg_obj;
  821. struct usm_stream_media_format_update *dec_cfg = &dec_cfg_obj;
  822. int rc = 0;
  823. uint32_t total_cfg_size = sizeof(struct usm_stream_media_format_update);
  824. uint32_t round_params_size = 0;
  825. uint8_t is_allocated = 0;
  826. if ((usc == NULL) || (us_cfg == NULL)) {
  827. pr_err("%s: wrong input", __func__);
  828. return -EINVAL;
  829. }
  830. int_format = q6usm_ext2int_format(us_cfg->format_id);
  831. if (int_format == INVALID_FORMAT) {
  832. pr_err("%s: wrong input format[%d]",
  833. __func__, us_cfg->format_id);
  834. return -EINVAL;
  835. }
  836. /* Transparent configuration data is after enc_cfg */
  837. /* Integer number of u32s is required */
  838. round_params_size = ((us_cfg->params_size + 3)/4) * 4;
  839. if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
  840. /* Dynamic allocated encdec_cfg_blk is required */
  841. /* static part use */
  842. round_params_size -= USM_MAX_CFG_DATA_SIZE;
  843. total_cfg_size += round_params_size;
  844. dec_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
  845. if (dec_cfg == NULL) {
  846. pr_err("%s:dec_cfg[%d] allocation failed\n",
  847. __func__, total_cfg_size);
  848. return -ENOMEM;
  849. }
  850. is_allocated = 1;
  851. } else { /* static transp_data is enough */
  852. round_params_size = 0;
  853. }
  854. q6usm_add_hdr(usc, &dec_cfg->hdr, total_cfg_size, true);
  855. dec_cfg->hdr.opcode = USM_DATA_CMD_MEDIA_FORMAT_UPDATE;
  856. dec_cfg->format_id = int_format;
  857. dec_cfg->cfg_size = sizeof(struct usm_cfg_common) +
  858. USM_MAX_CFG_DATA_SIZE +
  859. round_params_size;
  860. memcpy(&(dec_cfg->cfg_common), &(us_cfg->cfg_common),
  861. sizeof(struct usm_cfg_common));
  862. /* Transparent data copy */
  863. memcpy(dec_cfg->transp_data, us_cfg->params, us_cfg->params_size);
  864. pr_debug("%s: cfg_size[%d], params_size[%d]; parambytes[%d,%d,%d,%d]\n",
  865. __func__,
  866. dec_cfg->cfg_size,
  867. us_cfg->params_size,
  868. dec_cfg->transp_data[0],
  869. dec_cfg->transp_data[1],
  870. dec_cfg->transp_data[2],
  871. dec_cfg->transp_data[3]
  872. );
  873. rc = apr_send_pkt(usc->apr, (uint32_t *) dec_cfg);
  874. if (rc < 0) {
  875. pr_err("%s:Comamnd open failed\n", __func__);
  876. rc = -EINVAL;
  877. goto fail_cmd;
  878. }
  879. rc = wait_event_timeout(usc->cmd_wait,
  880. (atomic_read(&usc->cmd_state) == 0),
  881. Q6USM_TIMEOUT_JIFFIES);
  882. if (!rc) {
  883. rc = -ETIME;
  884. pr_err("%s: timeout opcode[0x%x]\n",
  885. __func__, dec_cfg->hdr.opcode);
  886. } else
  887. rc = 0;
  888. fail_cmd:
  889. if (is_allocated == 1)
  890. kfree(dec_cfg);
  891. return rc;
  892. }
  893. int q6usm_open_write(struct us_client *usc,
  894. uint32_t format)
  895. {
  896. int rc = 0;
  897. uint32_t int_format = INVALID_FORMAT;
  898. struct usm_stream_cmd_open_write open;
  899. if ((usc == NULL) || (usc->apr == NULL)) {
  900. pr_err("%s: APR handle NULL\n", __func__);
  901. return -EINVAL;
  902. }
  903. pr_debug("%s: session[%d]", __func__, usc->session);
  904. q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
  905. open.hdr.opcode = USM_STREAM_CMD_OPEN_WRITE;
  906. int_format = q6usm_ext2int_format(format);
  907. if (int_format == INVALID_FORMAT) {
  908. pr_err("%s: wrong format[%d]", __func__, format);
  909. return -EINVAL;
  910. }
  911. open.format = int_format;
  912. rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
  913. if (rc < 0) {
  914. pr_err("%s:open failed op[0x%x]rc[%d]\n",
  915. __func__, open.hdr.opcode, rc);
  916. goto fail_cmd;
  917. }
  918. rc = wait_event_timeout(usc->cmd_wait,
  919. (atomic_read(&usc->cmd_state) == 0),
  920. Q6USM_TIMEOUT_JIFFIES);
  921. if (!rc) {
  922. rc = -ETIME;
  923. pr_err("%s:timeout. waited for OPEN_WRITR rc[%d]\n",
  924. __func__, rc);
  925. goto fail_cmd;
  926. } else
  927. rc = 0;
  928. fail_cmd:
  929. return rc;
  930. }
  931. int q6usm_run(struct us_client *usc, uint32_t flags,
  932. uint32_t msw_ts, uint32_t lsw_ts)
  933. {
  934. struct usm_stream_cmd_run run;
  935. int rc = 0;
  936. if ((usc == NULL) || (usc->apr == NULL)) {
  937. pr_err("%s: APR handle NULL\n", __func__);
  938. return -EINVAL;
  939. }
  940. q6usm_add_hdr(usc, &run.hdr, sizeof(run), true);
  941. run.hdr.opcode = USM_SESSION_CMD_RUN;
  942. run.flags = flags;
  943. run.msw_ts = msw_ts;
  944. run.lsw_ts = lsw_ts;
  945. rc = apr_send_pkt(usc->apr, (uint32_t *) &run);
  946. if (rc < 0) {
  947. pr_err("%s: Commmand run failed[%d]\n", __func__, rc);
  948. goto fail_cmd;
  949. }
  950. rc = wait_event_timeout(usc->cmd_wait,
  951. (atomic_read(&usc->cmd_state) == 0),
  952. Q6USM_TIMEOUT_JIFFIES);
  953. if (!rc) {
  954. rc = -ETIME;
  955. pr_err("%s: timeout. waited for run success rc[%d]\n",
  956. __func__, rc);
  957. } else
  958. rc = 0;
  959. fail_cmd:
  960. return rc;
  961. }
  962. int q6usm_read(struct us_client *usc, uint32_t read_ind)
  963. {
  964. struct usm_stream_cmd_read read;
  965. struct us_port_data *port = NULL;
  966. int rc = 0;
  967. u32 read_counter = 0;
  968. u32 loop_ind = 0;
  969. u64 buf_addr = 0;
  970. if ((usc == NULL) || (usc->apr == NULL)) {
  971. pr_err("%s: APR handle NULL\n", __func__);
  972. return -EINVAL;
  973. }
  974. port = &usc->port[OUT];
  975. if (read_ind > port->buf_cnt) {
  976. pr_err("%s: wrong read_ind[%d]\n",
  977. __func__, read_ind);
  978. return -EINVAL;
  979. }
  980. if (read_ind == port->cpu_buf) {
  981. pr_err("%s: no free region\n", __func__);
  982. return 0;
  983. }
  984. if (read_ind > port->cpu_buf) { /* 1 range */
  985. read_counter = read_ind - port->cpu_buf;
  986. } else { /* 2 ranges */
  987. read_counter = (port->buf_cnt - port->cpu_buf) + read_ind;
  988. }
  989. q6usm_add_hdr(usc, &read.hdr, sizeof(read), false);
  990. read.hdr.opcode = USM_DATA_CMD_READ;
  991. read.buf_size = port->buf_size;
  992. buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
  993. read.buf_addr_lsw = lower_32_bits(buf_addr);
  994. read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
  995. read.mem_map_handle = *((uint32_t *)(port->ext));
  996. for (loop_ind = 0; loop_ind < read_counter; ++loop_ind) {
  997. u32 temp_cpu_buf = port->cpu_buf;
  998. buf_addr = (u64)(port->phys) +
  999. port->buf_size * (port->cpu_buf);
  1000. read.buf_addr_lsw = lower_32_bits(buf_addr);
  1001. read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
  1002. read.seq_id = port->cpu_buf;
  1003. read.hdr.token = port->cpu_buf;
  1004. read.counter = 1;
  1005. ++(port->cpu_buf);
  1006. if (port->cpu_buf == port->buf_cnt)
  1007. port->cpu_buf = 0;
  1008. rc = apr_send_pkt(usc->apr, (uint32_t *) &read);
  1009. if (rc < 0) {
  1010. port->cpu_buf = temp_cpu_buf;
  1011. pr_err("%s:read op[0x%x]rc[%d]\n",
  1012. __func__, read.hdr.opcode, rc);
  1013. break;
  1014. }
  1015. rc = 0;
  1016. } /* bufs loop */
  1017. return rc;
  1018. }
  1019. int q6usm_write(struct us_client *usc, uint32_t write_ind)
  1020. {
  1021. int rc = 0;
  1022. struct usm_stream_cmd_write cmd_write;
  1023. struct us_port_data *port = NULL;
  1024. u32 current_dsp_buf = 0;
  1025. u64 buf_addr = 0;
  1026. if ((usc == NULL) || (usc->apr == NULL)) {
  1027. pr_err("%s: APR handle NULL\n", __func__);
  1028. return -EINVAL;
  1029. }
  1030. port = &usc->port[IN];
  1031. current_dsp_buf = port->dsp_buf;
  1032. /* free region, caused by new dsp_buf report from DSP, */
  1033. /* can be only extended */
  1034. if (port->cpu_buf >= current_dsp_buf) {
  1035. /* 2 -part free region, including empty buffer */
  1036. if ((write_ind <= port->cpu_buf) &&
  1037. (write_ind > current_dsp_buf)) {
  1038. pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
  1039. __func__, write_ind,
  1040. current_dsp_buf, port->cpu_buf);
  1041. return -EINVAL;
  1042. }
  1043. } else {
  1044. /* 1 -part free region */
  1045. if ((write_ind <= port->cpu_buf) ||
  1046. (write_ind > current_dsp_buf)) {
  1047. pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
  1048. __func__, write_ind,
  1049. current_dsp_buf, port->cpu_buf);
  1050. return -EINVAL;
  1051. }
  1052. }
  1053. q6usm_add_hdr(usc, &cmd_write.hdr, sizeof(cmd_write), false);
  1054. cmd_write.hdr.opcode = USM_DATA_CMD_WRITE;
  1055. cmd_write.buf_size = port->buf_size;
  1056. buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
  1057. cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
  1058. cmd_write.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
  1059. cmd_write.mem_map_handle = *((uint32_t *)(port->ext));
  1060. cmd_write.res0 = 0;
  1061. cmd_write.res1 = 0;
  1062. cmd_write.res2 = 0;
  1063. while (port->cpu_buf != write_ind) {
  1064. u32 temp_cpu_buf = port->cpu_buf;
  1065. buf_addr = (u64)(port->phys) +
  1066. port->buf_size * (port->cpu_buf);
  1067. cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
  1068. cmd_write.buf_addr_msw =
  1069. msm_audio_populate_upper_32_bits(buf_addr);
  1070. cmd_write.seq_id = port->cpu_buf;
  1071. cmd_write.hdr.token = port->cpu_buf;
  1072. ++(port->cpu_buf);
  1073. if (port->cpu_buf == port->buf_cnt)
  1074. port->cpu_buf = 0;
  1075. rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_write);
  1076. if (rc < 0) {
  1077. port->cpu_buf = temp_cpu_buf;
  1078. pr_err("%s:write op[0x%x];rc[%d];cpu_buf[%d]\n",
  1079. __func__, cmd_write.hdr.opcode,
  1080. rc, port->cpu_buf);
  1081. break;
  1082. }
  1083. rc = 0;
  1084. }
  1085. return rc;
  1086. }
  1087. bool q6usm_is_write_buf_full(struct us_client *usc, uint32_t *free_region)
  1088. {
  1089. struct us_port_data *port = NULL;
  1090. u32 cpu_buf = 0;
  1091. if ((usc == NULL) || !free_region) {
  1092. pr_err("%s: input data wrong\n", __func__);
  1093. return false;
  1094. }
  1095. port = &usc->port[IN];
  1096. cpu_buf = port->cpu_buf + 1;
  1097. if (cpu_buf == port->buf_cnt)
  1098. cpu_buf = 0;
  1099. *free_region = port->dsp_buf;
  1100. return cpu_buf == *free_region;
  1101. }
  1102. int q6usm_cmd(struct us_client *usc, int cmd)
  1103. {
  1104. struct apr_hdr hdr;
  1105. int rc = 0;
  1106. atomic_t *state;
  1107. if ((usc == NULL) || (usc->apr == NULL)) {
  1108. pr_err("%s: APR handle NULL\n", __func__);
  1109. return -EINVAL;
  1110. }
  1111. q6usm_add_hdr(usc, &hdr, sizeof(hdr), true);
  1112. switch (cmd) {
  1113. case CMD_CLOSE:
  1114. hdr.opcode = USM_STREAM_CMD_CLOSE;
  1115. state = &usc->cmd_state;
  1116. break;
  1117. default:
  1118. pr_err("%s:Invalid format[%d]\n", __func__, cmd);
  1119. goto fail_cmd;
  1120. }
  1121. rc = apr_send_pkt(usc->apr, (uint32_t *) &hdr);
  1122. if (rc < 0) {
  1123. pr_err("%s: Command 0x%x failed\n", __func__, hdr.opcode);
  1124. goto fail_cmd;
  1125. }
  1126. rc = wait_event_timeout(usc->cmd_wait, (atomic_read(state) == 0),
  1127. Q6USM_TIMEOUT_JIFFIES);
  1128. if (!rc) {
  1129. rc = -ETIME;
  1130. pr_err("%s:timeout. waited for response opcode[0x%x]\n",
  1131. __func__, hdr.opcode);
  1132. } else
  1133. rc = 0;
  1134. fail_cmd:
  1135. return rc;
  1136. }
  1137. int q6usm_set_us_detection(struct us_client *usc,
  1138. struct usm_session_cmd_detect_info *detect_info,
  1139. uint16_t detect_info_size)
  1140. {
  1141. int rc = 0;
  1142. if ((usc == NULL) ||
  1143. (detect_info_size == 0) ||
  1144. (detect_info == NULL)) {
  1145. pr_err("%s: wrong input: usc=0x%pK, inf_size=%d; info=0x%pK",
  1146. __func__,
  1147. usc,
  1148. detect_info_size,
  1149. detect_info);
  1150. return -EINVAL;
  1151. }
  1152. q6usm_add_hdr(usc, &detect_info->hdr, detect_info_size, true);
  1153. detect_info->hdr.opcode = USM_SESSION_CMD_SIGNAL_DETECT_MODE;
  1154. rc = apr_send_pkt(usc->apr, (uint32_t *)detect_info);
  1155. if (rc < 0) {
  1156. pr_err("%s:Comamnd signal detect failed\n", __func__);
  1157. return -EINVAL;
  1158. }
  1159. rc = wait_event_timeout(usc->cmd_wait,
  1160. (atomic_read(&usc->cmd_state) == 0),
  1161. Q6USM_TIMEOUT_JIFFIES);
  1162. if (!rc) {
  1163. rc = -ETIME;
  1164. pr_err("%s: CMD_SIGNAL_DETECT_MODE: timeout=%d\n",
  1165. __func__, Q6USM_TIMEOUT_JIFFIES);
  1166. } else
  1167. rc = 0;
  1168. return rc;
  1169. }
  1170. int q6usm_set_us_stream_param(int dir, struct us_client *usc,
  1171. uint32_t module_id, uint32_t param_id, uint32_t buf_size)
  1172. {
  1173. int rc = 0;
  1174. struct usm_stream_cmd_set_param cmd_set_param;
  1175. struct us_port_data *port = NULL;
  1176. if ((usc == NULL) || (usc->apr == NULL)) {
  1177. pr_err("%s: APR handle NULL\n", __func__);
  1178. return -EINVAL;
  1179. }
  1180. port = &usc->port[dir];
  1181. q6usm_add_hdr(usc, &cmd_set_param.hdr, sizeof(cmd_set_param), true);
  1182. cmd_set_param.hdr.opcode = USM_STREAM_CMD_SET_PARAM;
  1183. cmd_set_param.buf_size = buf_size;
  1184. cmd_set_param.buf_addr_msw =
  1185. msm_audio_populate_upper_32_bits(port->param_phys);
  1186. cmd_set_param.buf_addr_lsw = lower_32_bits(port->param_phys);
  1187. cmd_set_param.mem_map_handle =
  1188. *((uint32_t *)(port->param_buf_mem_handle));
  1189. cmd_set_param.module_id = module_id;
  1190. cmd_set_param.param_id = param_id;
  1191. cmd_set_param.hdr.token = 0;
  1192. rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_set_param);
  1193. if (rc < 0) {
  1194. pr_err("%s:write op[0x%x];rc[%d]\n",
  1195. __func__, cmd_set_param.hdr.opcode, rc);
  1196. }
  1197. rc = wait_event_timeout(usc->cmd_wait,
  1198. (atomic_read(&usc->cmd_state) == 0),
  1199. Q6USM_TIMEOUT_JIFFIES);
  1200. if (!rc) {
  1201. rc = -ETIME;
  1202. pr_err("%s: CMD_SET_PARAM: timeout=%d\n",
  1203. __func__, Q6USM_TIMEOUT_JIFFIES);
  1204. } else
  1205. rc = 0;
  1206. return rc;
  1207. }
  1208. int q6usm_get_us_stream_param(int dir, struct us_client *usc,
  1209. uint32_t module_id, uint32_t param_id, uint32_t buf_size)
  1210. {
  1211. int rc = 0;
  1212. struct usm_stream_cmd_get_param cmd_get_param;
  1213. struct us_port_data *port = NULL;
  1214. if ((usc == NULL) || (usc->apr == NULL)) {
  1215. pr_err("%s: APR handle NULL\n", __func__);
  1216. return -EINVAL;
  1217. }
  1218. port = &usc->port[dir];
  1219. q6usm_add_hdr(usc, &cmd_get_param.hdr, sizeof(cmd_get_param), true);
  1220. cmd_get_param.hdr.opcode = USM_STREAM_CMD_GET_PARAM;
  1221. cmd_get_param.buf_size = buf_size;
  1222. cmd_get_param.buf_addr_msw =
  1223. msm_audio_populate_upper_32_bits(port->param_phys);
  1224. cmd_get_param.buf_addr_lsw = lower_32_bits(port->param_phys);
  1225. cmd_get_param.mem_map_handle =
  1226. *((uint32_t *)(port->param_buf_mem_handle));
  1227. cmd_get_param.module_id = module_id;
  1228. cmd_get_param.param_id = param_id;
  1229. cmd_get_param.hdr.token = 0;
  1230. rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_get_param);
  1231. if (rc < 0) {
  1232. pr_err("%s:write op[0x%x];rc[%d]\n",
  1233. __func__, cmd_get_param.hdr.opcode, rc);
  1234. }
  1235. rc = wait_event_timeout(usc->cmd_wait,
  1236. (atomic_read(&usc->cmd_state) == 0),
  1237. Q6USM_TIMEOUT_JIFFIES);
  1238. if (!rc) {
  1239. rc = -ETIME;
  1240. pr_err("%s: CMD_GET_PARAM: timeout=%d\n",
  1241. __func__, Q6USM_TIMEOUT_JIFFIES);
  1242. } else
  1243. rc = 0;
  1244. return rc;
  1245. }
  1246. int __init q6usm_init(void)
  1247. {
  1248. pr_debug("%s\n", __func__);
  1249. init_waitqueue_head(&this_mmap.cmd_wait);
  1250. memset(session, 0, sizeof(session));
  1251. return 0;
  1252. }