q6usm.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464
  1. /* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/mutex.h>
  14. #include <linux/wait.h>
  15. #include <linux/sched.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/slab.h>
  18. #include <dsp/msm_audio_ion.h>
  19. #include <dsp/apr_audio-v2.h>
  20. #include <ipc/apr_us.h>
  21. #include "q6usm.h"
  22. #define ADSP_MEMORY_MAP_SHMEM8_4K_POOL 3
  23. #define MEM_4K_OFFSET 4095
  24. #define MEM_4K_MASK 0xfffff000
  25. #define USM_SESSION_MAX 0x02 /* aDSP:USM limit */
  26. #define READDONE_IDX_STATUS 0
  27. #define WRITEDONE_IDX_STATUS 0
  28. /* Standard timeout in the asynchronous ops */
  29. #define Q6USM_TIMEOUT_JIFFIES (1*HZ) /* 1 sec */
  30. static DEFINE_MUTEX(session_lock);
  31. static struct us_client *session[USM_SESSION_MAX];
  32. static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv);
  33. static int32_t q6usm_callback(struct apr_client_data *data, void *priv);
  34. static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
  35. uint32_t pkt_size, bool cmd_flg);
  36. struct usm_mmap {
  37. atomic_t ref_cnt;
  38. atomic_t cmd_state;
  39. wait_queue_head_t cmd_wait;
  40. void *apr;
  41. int mem_handle;
  42. };
  43. static struct usm_mmap this_mmap;
  44. static void q6usm_add_mmaphdr(struct apr_hdr *hdr,
  45. uint32_t pkt_size, bool cmd_flg, u32 token)
  46. {
  47. hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  48. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  49. hdr->src_port = 0;
  50. hdr->dest_port = 0;
  51. if (cmd_flg) {
  52. hdr->token = token;
  53. atomic_set(&this_mmap.cmd_state, 1);
  54. }
  55. hdr->pkt_size = pkt_size;
  56. }
  57. static int q6usm_memory_map(phys_addr_t buf_add, int dir, uint32_t bufsz,
  58. uint32_t bufcnt, uint32_t session, uint32_t *mem_handle)
  59. {
  60. struct usm_cmd_memory_map_region mem_region_map;
  61. int rc = 0;
  62. if (this_mmap.apr == NULL) {
  63. pr_err("%s: APR handle NULL\n", __func__);
  64. return -EINVAL;
  65. }
  66. q6usm_add_mmaphdr(&mem_region_map.hdr,
  67. sizeof(struct usm_cmd_memory_map_region), true,
  68. ((session << 8) | dir));
  69. mem_region_map.hdr.opcode = USM_CMD_SHARED_MEM_MAP_REGION;
  70. mem_region_map.mempool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
  71. mem_region_map.num_regions = 1;
  72. mem_region_map.flags = 0;
  73. mem_region_map.shm_addr_lsw = lower_32_bits(buf_add);
  74. mem_region_map.shm_addr_msw =
  75. msm_audio_populate_upper_32_bits(buf_add);
  76. mem_region_map.mem_size_bytes = bufsz * bufcnt;
  77. rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_region_map);
  78. if (rc < 0) {
  79. pr_err("%s: mem_map op[0x%x]rc[%d]\n",
  80. __func__, mem_region_map.hdr.opcode, rc);
  81. rc = -EINVAL;
  82. goto fail_cmd;
  83. }
  84. rc = wait_event_timeout(this_mmap.cmd_wait,
  85. (atomic_read(&this_mmap.cmd_state) == 0),
  86. Q6USM_TIMEOUT_JIFFIES);
  87. if (!rc) {
  88. rc = -ETIME;
  89. pr_err("%s: timeout. waited for memory_map\n", __func__);
  90. } else {
  91. *mem_handle = this_mmap.mem_handle;
  92. rc = 0;
  93. }
  94. fail_cmd:
  95. return rc;
  96. }
  97. int q6usm_memory_unmap(phys_addr_t buf_add, int dir, uint32_t session,
  98. uint32_t mem_handle)
  99. {
  100. struct usm_cmd_memory_unmap_region mem_unmap;
  101. int rc = 0;
  102. if (this_mmap.apr == NULL) {
  103. pr_err("%s: APR handle NULL\n", __func__);
  104. return -EINVAL;
  105. }
  106. q6usm_add_mmaphdr(&mem_unmap.hdr,
  107. sizeof(struct usm_cmd_memory_unmap_region), true,
  108. ((session << 8) | dir));
  109. mem_unmap.hdr.opcode = USM_CMD_SHARED_MEM_UNMAP_REGION;
  110. mem_unmap.mem_map_handle = mem_handle;
  111. rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_unmap);
  112. if (rc < 0) {
  113. pr_err("%s: mem_unmap op[0x%x] rc[%d]\n",
  114. __func__, mem_unmap.hdr.opcode, rc);
  115. goto fail_cmd;
  116. }
  117. rc = wait_event_timeout(this_mmap.cmd_wait,
  118. (atomic_read(&this_mmap.cmd_state) == 0),
  119. Q6USM_TIMEOUT_JIFFIES);
  120. if (!rc) {
  121. rc = -ETIME;
  122. pr_err("%s: timeout. waited for memory_unmap\n", __func__);
  123. } else
  124. rc = 0;
  125. fail_cmd:
  126. return rc;
  127. }
  128. static int q6usm_session_alloc(struct us_client *usc)
  129. {
  130. int ind = 0;
  131. mutex_lock(&session_lock);
  132. for (ind = 0; ind < USM_SESSION_MAX; ++ind) {
  133. if (!session[ind]) {
  134. session[ind] = usc;
  135. mutex_unlock(&session_lock);
  136. ++ind; /* session id: 0 reserved */
  137. pr_debug("%s: session[%d] was allocated\n",
  138. __func__, ind);
  139. return ind;
  140. }
  141. }
  142. mutex_unlock(&session_lock);
  143. return -ENOMEM;
  144. }
  145. static void q6usm_session_free(struct us_client *usc)
  146. {
  147. /* Session index was incremented during allocation */
  148. uint16_t ind = (uint16_t)usc->session - 1;
  149. pr_debug("%s: to free session[%d]\n", __func__, ind);
  150. if (ind < USM_SESSION_MAX) {
  151. mutex_lock(&session_lock);
  152. session[ind] = NULL;
  153. mutex_unlock(&session_lock);
  154. }
  155. }
  156. static int q6usm_us_client_buf_free(unsigned int dir,
  157. struct us_client *usc)
  158. {
  159. struct us_port_data *port;
  160. int rc = 0;
  161. if ((usc == NULL) ||
  162. ((dir != IN) && (dir != OUT)))
  163. return -EINVAL;
  164. mutex_lock(&usc->cmd_lock);
  165. port = &usc->port[dir];
  166. if (port == NULL) {
  167. mutex_unlock(&usc->cmd_lock);
  168. return -EINVAL;
  169. }
  170. if (port->data == NULL) {
  171. mutex_unlock(&usc->cmd_lock);
  172. return 0;
  173. }
  174. rc = q6usm_memory_unmap(port->phys, dir, usc->session,
  175. *((uint32_t *)port->ext));
  176. pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
  177. (void *)port->data, (u64)port->phys, (void *)&port->phys);
  178. msm_audio_ion_free(port->client, port->handle);
  179. port->data = NULL;
  180. port->phys = 0;
  181. port->buf_size = 0;
  182. port->buf_cnt = 0;
  183. port->client = NULL;
  184. port->handle = NULL;
  185. mutex_unlock(&usc->cmd_lock);
  186. return rc;
  187. }
  188. int q6usm_us_param_buf_free(unsigned int dir,
  189. struct us_client *usc)
  190. {
  191. struct us_port_data *port;
  192. int rc = 0;
  193. if ((usc == NULL) ||
  194. ((dir != IN) && (dir != OUT)))
  195. return -EINVAL;
  196. mutex_lock(&usc->cmd_lock);
  197. port = &usc->port[dir];
  198. if (port == NULL) {
  199. mutex_unlock(&usc->cmd_lock);
  200. return -EINVAL;
  201. }
  202. if (port->param_buf == NULL) {
  203. mutex_unlock(&usc->cmd_lock);
  204. return 0;
  205. }
  206. rc = q6usm_memory_unmap(port->param_phys, dir, usc->session,
  207. *((uint32_t *)port->param_buf_mem_handle));
  208. pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
  209. (void *)port->param_buf, (u64)port->param_phys,
  210. (void *)&port->param_phys);
  211. msm_audio_ion_free(port->param_client, port->param_handle);
  212. port->param_buf = NULL;
  213. port->param_phys = 0;
  214. port->param_buf_size = 0;
  215. port->param_client = NULL;
  216. port->param_handle = NULL;
  217. mutex_unlock(&usc->cmd_lock);
  218. return rc;
  219. }
  220. void q6usm_us_client_free(struct us_client *usc)
  221. {
  222. int loopcnt = 0;
  223. struct us_port_data *port;
  224. uint32_t *p_mem_handle = NULL;
  225. if ((usc == NULL) ||
  226. !(usc->session))
  227. return;
  228. for (loopcnt = 0; loopcnt <= OUT; ++loopcnt) {
  229. port = &usc->port[loopcnt];
  230. if (port->data == NULL)
  231. continue;
  232. pr_debug("%s: loopcnt = %d\n", __func__, loopcnt);
  233. q6usm_us_client_buf_free(loopcnt, usc);
  234. q6usm_us_param_buf_free(loopcnt, usc);
  235. }
  236. q6usm_session_free(usc);
  237. apr_deregister(usc->apr);
  238. pr_debug("%s: APR De-Register\n", __func__);
  239. if (atomic_read(&this_mmap.ref_cnt) <= 0) {
  240. pr_err("%s: APR Common Port Already Closed\n", __func__);
  241. goto done;
  242. }
  243. atomic_dec(&this_mmap.ref_cnt);
  244. if (atomic_read(&this_mmap.ref_cnt) == 0) {
  245. apr_deregister(this_mmap.apr);
  246. pr_debug("%s: APR De-Register common port\n", __func__);
  247. }
  248. done:
  249. p_mem_handle = (uint32_t *)usc->port[IN].ext;
  250. kfree(p_mem_handle);
  251. kfree(usc);
  252. pr_debug("%s:\n", __func__);
  253. }
  254. struct us_client *q6usm_us_client_alloc(
  255. void (*cb)(uint32_t, uint32_t, uint32_t *, void *),
  256. void *priv)
  257. {
  258. struct us_client *usc;
  259. uint32_t *p_mem_handle = NULL;
  260. int n;
  261. int lcnt = 0;
  262. usc = kzalloc(sizeof(struct us_client), GFP_KERNEL);
  263. if (usc == NULL)
  264. return NULL;
  265. p_mem_handle = kzalloc(sizeof(uint32_t) * 4, GFP_KERNEL);
  266. if (p_mem_handle == NULL) {
  267. kfree(usc);
  268. return NULL;
  269. }
  270. n = q6usm_session_alloc(usc);
  271. if (n <= 0)
  272. goto fail_session;
  273. usc->session = n;
  274. usc->cb = cb;
  275. usc->priv = priv;
  276. usc->apr = apr_register("ADSP", "USM",
  277. (apr_fn)q6usm_callback,
  278. ((usc->session) << 8 | 0x0001),
  279. usc);
  280. if (usc->apr == NULL) {
  281. pr_err("%s: Registration with APR failed\n", __func__);
  282. goto fail;
  283. }
  284. pr_debug("%s: Registering the common port with APR\n", __func__);
  285. if (atomic_read(&this_mmap.ref_cnt) == 0) {
  286. this_mmap.apr = apr_register("ADSP", "USM",
  287. (apr_fn)q6usm_mmapcallback,
  288. 0x0FFFFFFFF, &this_mmap);
  289. if (this_mmap.apr == NULL) {
  290. pr_err("%s: USM port registration failed\n",
  291. __func__);
  292. goto fail;
  293. }
  294. }
  295. atomic_inc(&this_mmap.ref_cnt);
  296. init_waitqueue_head(&usc->cmd_wait);
  297. mutex_init(&usc->cmd_lock);
  298. for (lcnt = 0; lcnt <= OUT; ++lcnt) {
  299. mutex_init(&usc->port[lcnt].lock);
  300. spin_lock_init(&usc->port[lcnt].dsp_lock);
  301. usc->port[lcnt].ext = (void *)p_mem_handle++;
  302. usc->port[lcnt].param_buf_mem_handle = (void *)p_mem_handle++;
  303. pr_err("%s: usc->port[%d].ext=%pK;\n",
  304. __func__, lcnt, usc->port[lcnt].ext);
  305. }
  306. atomic_set(&usc->cmd_state, 0);
  307. return usc;
  308. fail:
  309. kfree(p_mem_handle);
  310. q6usm_us_client_free(usc);
  311. return NULL;
  312. fail_session:
  313. kfree(p_mem_handle);
  314. kfree(usc);
  315. return NULL;
  316. }
  317. int q6usm_us_client_buf_alloc(unsigned int dir,
  318. struct us_client *usc,
  319. unsigned int bufsz,
  320. unsigned int bufcnt)
  321. {
  322. int rc = 0;
  323. struct us_port_data *port = NULL;
  324. unsigned int size = bufsz*bufcnt;
  325. size_t len;
  326. if ((usc == NULL) ||
  327. ((dir != IN) && (dir != OUT)) || (size == 0) ||
  328. (usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
  329. pr_err("%s: wrong parameters: size=%d; bufcnt=%d\n",
  330. __func__, size, bufcnt);
  331. return -EINVAL;
  332. }
  333. mutex_lock(&usc->cmd_lock);
  334. port = &usc->port[dir];
  335. /* The size to allocate should be multiple of 4K bytes */
  336. size = PAGE_ALIGN(size);
  337. rc = msm_audio_ion_alloc("ultrasound_client",
  338. &port->client, &port->handle,
  339. size, &port->phys,
  340. &len, &port->data);
  341. if (rc) {
  342. pr_err("%s: US ION allocation failed, rc = %d\n",
  343. __func__, rc);
  344. mutex_unlock(&usc->cmd_lock);
  345. return -ENOMEM;
  346. }
  347. port->buf_cnt = bufcnt;
  348. port->buf_size = bufsz;
  349. pr_debug("%s: data[%pK]; phys[%llx]; [%pK]\n", __func__,
  350. (void *)port->data,
  351. (u64)port->phys,
  352. (void *)&port->phys);
  353. rc = q6usm_memory_map(port->phys, dir, size, 1, usc->session,
  354. (uint32_t *)port->ext);
  355. if (rc < 0) {
  356. pr_err("%s: CMD Memory_map failed\n", __func__);
  357. mutex_unlock(&usc->cmd_lock);
  358. q6usm_us_client_buf_free(dir, usc);
  359. q6usm_us_param_buf_free(dir, usc);
  360. } else {
  361. mutex_unlock(&usc->cmd_lock);
  362. rc = 0;
  363. }
  364. return rc;
  365. }
  366. int q6usm_us_param_buf_alloc(unsigned int dir,
  367. struct us_client *usc,
  368. unsigned int bufsz)
  369. {
  370. int rc = 0;
  371. struct us_port_data *port = NULL;
  372. unsigned int size = bufsz;
  373. size_t len;
  374. if ((usc == NULL) ||
  375. ((dir != IN) && (dir != OUT)) ||
  376. (usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
  377. pr_err("%s: wrong parameters: direction=%d, bufsz=%d\n",
  378. __func__, dir, bufsz);
  379. return -EINVAL;
  380. }
  381. mutex_lock(&usc->cmd_lock);
  382. port = &usc->port[dir];
  383. if (bufsz == 0) {
  384. pr_debug("%s: bufsz=0, get/set param commands are forbidden\n",
  385. __func__);
  386. port->param_buf = NULL;
  387. mutex_unlock(&usc->cmd_lock);
  388. return rc;
  389. }
  390. /* The size to allocate should be multiple of 4K bytes */
  391. size = PAGE_ALIGN(size);
  392. rc = msm_audio_ion_alloc("ultrasound_client",
  393. &port->param_client, &port->param_handle,
  394. size, &port->param_phys,
  395. &len, &port->param_buf);
  396. if (rc) {
  397. pr_err("%s: US ION allocation failed, rc = %d\n",
  398. __func__, rc);
  399. mutex_unlock(&usc->cmd_lock);
  400. return -ENOMEM;
  401. }
  402. port->param_buf_size = bufsz;
  403. pr_debug("%s: param_buf[%pK]; param_phys[%llx]; [%pK]\n", __func__,
  404. (void *)port->param_buf,
  405. (u64)port->param_phys,
  406. (void *)&port->param_phys);
  407. rc = q6usm_memory_map(port->param_phys, (IN | OUT), size, 1,
  408. usc->session, (uint32_t *)port->param_buf_mem_handle);
  409. if (rc < 0) {
  410. pr_err("%s: CMD Memory_map failed\n", __func__);
  411. mutex_unlock(&usc->cmd_lock);
  412. q6usm_us_client_buf_free(dir, usc);
  413. q6usm_us_param_buf_free(dir, usc);
  414. } else {
  415. mutex_unlock(&usc->cmd_lock);
  416. rc = 0;
  417. }
  418. return rc;
  419. }
  420. static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv)
  421. {
  422. uint32_t token;
  423. uint32_t *payload = data->payload;
  424. pr_debug("%s: ptr0[0x%x]; ptr1[0x%x]; opcode[0x%x]\n",
  425. __func__, payload[0], payload[1], data->opcode);
  426. pr_debug("%s: token[0x%x]; payload_size[%d]; src[%d]; dest[%d];\n",
  427. __func__, data->token, data->payload_size,
  428. data->src_port, data->dest_port);
  429. if (data->opcode == APR_BASIC_RSP_RESULT) {
  430. /* status field check */
  431. if (payload[1]) {
  432. pr_err("%s: wrong response[%d] on cmd [%d]\n",
  433. __func__, payload[1], payload[0]);
  434. } else {
  435. token = data->token;
  436. switch (payload[0]) {
  437. case USM_CMD_SHARED_MEM_UNMAP_REGION:
  438. if (atomic_read(&this_mmap.cmd_state)) {
  439. atomic_set(&this_mmap.cmd_state, 0);
  440. wake_up(&this_mmap.cmd_wait);
  441. }
  442. /* fallthrough */
  443. case USM_CMD_SHARED_MEM_MAP_REGION:
  444. /* For MEM_MAP, additional answer is waited, */
  445. /* therfore, no wake-up here */
  446. pr_debug("%s: cmd[0x%x]; result[0x%x]\n",
  447. __func__, payload[0], payload[1]);
  448. break;
  449. default:
  450. pr_debug("%s: wrong command[0x%x]\n",
  451. __func__, payload[0]);
  452. break;
  453. }
  454. }
  455. } else {
  456. if (data->opcode == USM_CMDRSP_SHARED_MEM_MAP_REGION) {
  457. this_mmap.mem_handle = payload[0];
  458. pr_debug("%s: memory map handle = 0x%x",
  459. __func__, payload[0]);
  460. if (atomic_read(&this_mmap.cmd_state)) {
  461. atomic_set(&this_mmap.cmd_state, 0);
  462. wake_up(&this_mmap.cmd_wait);
  463. }
  464. }
  465. }
  466. return 0;
  467. }
  468. static int32_t q6usm_callback(struct apr_client_data *data, void *priv)
  469. {
  470. struct us_client *usc = (struct us_client *)priv;
  471. unsigned long dsp_flags;
  472. uint32_t *payload = data->payload;
  473. uint32_t token = data->token;
  474. uint32_t opcode = Q6USM_EVENT_UNDEF;
  475. if (usc == NULL) {
  476. pr_err("%s: client info is NULL\n", __func__);
  477. return -EINVAL;
  478. }
  479. if (data->opcode == APR_BASIC_RSP_RESULT) {
  480. /* status field check */
  481. if (payload[1]) {
  482. pr_err("%s: wrong response[%d] on cmd [%d]\n",
  483. __func__, payload[1], payload[0]);
  484. if (usc->cb)
  485. usc->cb(data->opcode, token,
  486. (uint32_t *)data->payload, usc->priv);
  487. } else {
  488. switch (payload[0]) {
  489. case USM_SESSION_CMD_RUN:
  490. case USM_STREAM_CMD_CLOSE:
  491. if (token != usc->session) {
  492. pr_err("%s: wrong token[%d]",
  493. __func__, token);
  494. break;
  495. }
  496. case USM_STREAM_CMD_OPEN_READ:
  497. case USM_STREAM_CMD_OPEN_WRITE:
  498. case USM_STREAM_CMD_SET_ENC_PARAM:
  499. case USM_DATA_CMD_MEDIA_FORMAT_UPDATE:
  500. case USM_SESSION_CMD_SIGNAL_DETECT_MODE:
  501. case USM_STREAM_CMD_SET_PARAM:
  502. case USM_STREAM_CMD_GET_PARAM:
  503. if (atomic_read(&usc->cmd_state)) {
  504. atomic_set(&usc->cmd_state, 0);
  505. wake_up(&usc->cmd_wait);
  506. }
  507. if (usc->cb)
  508. usc->cb(data->opcode, token,
  509. (uint32_t *)data->payload,
  510. usc->priv);
  511. break;
  512. default:
  513. break;
  514. }
  515. }
  516. return 0;
  517. }
  518. switch (data->opcode) {
  519. case RESET_EVENTS: {
  520. pr_err("%s: Reset event is received: %d %d\n",
  521. __func__,
  522. data->reset_event,
  523. data->reset_proc);
  524. opcode = RESET_EVENTS;
  525. apr_reset(this_mmap.apr);
  526. this_mmap.apr = NULL;
  527. apr_reset(usc->apr);
  528. usc->apr = NULL;
  529. break;
  530. }
  531. case USM_DATA_EVENT_READ_DONE: {
  532. struct us_port_data *port = &usc->port[OUT];
  533. opcode = Q6USM_EVENT_READ_DONE;
  534. spin_lock_irqsave(&port->dsp_lock, dsp_flags);
  535. if (payload[READDONE_IDX_STATUS]) {
  536. pr_err("%s: wrong READDONE[%d]; token[%d]\n",
  537. __func__,
  538. payload[READDONE_IDX_STATUS],
  539. token);
  540. token = USM_WRONG_TOKEN;
  541. spin_unlock_irqrestore(&port->dsp_lock,
  542. dsp_flags);
  543. break;
  544. }
  545. if (port->expected_token != token) {
  546. u32 cpu_buf = port->cpu_buf;
  547. pr_err("%s: expected[%d] != token[%d]\n",
  548. __func__, port->expected_token, token);
  549. pr_debug("%s: dsp_buf=%d; cpu_buf=%d;\n",
  550. __func__, port->dsp_buf, cpu_buf);
  551. token = USM_WRONG_TOKEN;
  552. /* To prevent data handle continiue */
  553. port->expected_token = USM_WRONG_TOKEN;
  554. spin_unlock_irqrestore(&port->dsp_lock,
  555. dsp_flags);
  556. break;
  557. } /* port->expected_token != data->token */
  558. port->expected_token = token + 1;
  559. if (port->expected_token == port->buf_cnt)
  560. port->expected_token = 0;
  561. /* gap support */
  562. if (port->expected_token != port->cpu_buf) {
  563. port->dsp_buf = port->expected_token;
  564. token = port->dsp_buf; /* for callback */
  565. } else
  566. port->dsp_buf = token;
  567. spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
  568. break;
  569. } /* case USM_DATA_EVENT_READ_DONE */
  570. case USM_DATA_EVENT_WRITE_DONE: {
  571. struct us_port_data *port = &usc->port[IN];
  572. opcode = Q6USM_EVENT_WRITE_DONE;
  573. if (payload[WRITEDONE_IDX_STATUS]) {
  574. pr_err("%s: wrong WRITEDONE_IDX_STATUS[%d]\n",
  575. __func__,
  576. payload[WRITEDONE_IDX_STATUS]);
  577. break;
  578. }
  579. spin_lock_irqsave(&port->dsp_lock, dsp_flags);
  580. port->dsp_buf = token + 1;
  581. if (port->dsp_buf == port->buf_cnt)
  582. port->dsp_buf = 0;
  583. spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
  584. break;
  585. } /* case USM_DATA_EVENT_WRITE_DONE */
  586. case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT: {
  587. pr_debug("%s: US detect result: result=%d",
  588. __func__,
  589. payload[0]);
  590. opcode = Q6USM_EVENT_SIGNAL_DETECT_RESULT;
  591. break;
  592. } /* case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT */
  593. default:
  594. return 0;
  595. } /* switch */
  596. if (usc->cb)
  597. usc->cb(opcode, token,
  598. data->payload, usc->priv);
  599. return 0;
  600. }
  601. uint32_t q6usm_get_virtual_address(int dir,
  602. struct us_client *usc,
  603. struct vm_area_struct *vms)
  604. {
  605. uint32_t ret = 0xffffffff;
  606. if (vms && (usc != NULL) && ((dir == IN) || (dir == OUT))) {
  607. struct us_port_data *port = &usc->port[dir];
  608. int size = PAGE_ALIGN(port->buf_size * port->buf_cnt);
  609. struct audio_buffer ab;
  610. ab.phys = port->phys;
  611. ab.data = port->data;
  612. ab.used = 1;
  613. ab.size = size;
  614. ab.actual_size = size;
  615. ab.handle = port->handle;
  616. ab.client = port->client;
  617. ret = msm_audio_ion_mmap(&ab, vms);
  618. }
  619. return ret;
  620. }
  621. static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
  622. uint32_t pkt_size, bool cmd_flg)
  623. {
  624. mutex_lock(&usc->cmd_lock);
  625. hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  626. APR_HDR_LEN(sizeof(struct apr_hdr)),
  627. APR_PKT_VER);
  628. hdr->src_svc = ((struct apr_svc *)usc->apr)->id;
  629. hdr->src_domain = APR_DOMAIN_APPS;
  630. hdr->dest_svc = APR_SVC_USM;
  631. hdr->dest_domain = APR_DOMAIN_ADSP;
  632. hdr->src_port = (usc->session << 8) | 0x0001;
  633. hdr->dest_port = (usc->session << 8) | 0x0001;
  634. if (cmd_flg) {
  635. hdr->token = usc->session;
  636. atomic_set(&usc->cmd_state, 1);
  637. }
  638. hdr->pkt_size = pkt_size;
  639. mutex_unlock(&usc->cmd_lock);
  640. }
  641. static uint32_t q6usm_ext2int_format(uint32_t ext_format)
  642. {
  643. uint32_t int_format = INVALID_FORMAT;
  644. switch (ext_format) {
  645. case FORMAT_USPS_EPOS:
  646. int_format = US_POINT_EPOS_FORMAT_V2;
  647. break;
  648. case FORMAT_USRAW:
  649. int_format = US_RAW_FORMAT_V2;
  650. break;
  651. case FORMAT_USPROX:
  652. int_format = US_PROX_FORMAT_V4;
  653. break;
  654. case FORMAT_USGES_SYNC:
  655. int_format = US_GES_SYNC_FORMAT;
  656. break;
  657. case FORMAT_USRAW_SYNC:
  658. int_format = US_RAW_SYNC_FORMAT;
  659. break;
  660. default:
  661. pr_err("%s: Invalid format[%d]\n", __func__, ext_format);
  662. break;
  663. }
  664. return int_format;
  665. }
  666. int q6usm_open_read(struct us_client *usc,
  667. uint32_t format)
  668. {
  669. uint32_t int_format = INVALID_FORMAT;
  670. int rc = 0x00;
  671. struct usm_stream_cmd_open_read open;
  672. if ((usc == NULL) || (usc->apr == NULL)) {
  673. pr_err("%s: client or its apr is NULL\n", __func__);
  674. return -EINVAL;
  675. }
  676. pr_debug("%s: session[%d]", __func__, usc->session);
  677. q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
  678. open.hdr.opcode = USM_STREAM_CMD_OPEN_READ;
  679. open.src_endpoint = 0; /* AFE */
  680. open.pre_proc_top = 0; /* No preprocessing required */
  681. int_format = q6usm_ext2int_format(format);
  682. if (int_format == INVALID_FORMAT)
  683. return -EINVAL;
  684. open.uMode = STREAM_PRIORITY_NORMAL;
  685. open.format = int_format;
  686. rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
  687. if (rc < 0) {
  688. pr_err("%s: open failed op[0x%x]rc[%d]\n",
  689. __func__, open.hdr.opcode, rc);
  690. goto fail_cmd;
  691. }
  692. rc = wait_event_timeout(usc->cmd_wait,
  693. (atomic_read(&usc->cmd_state) == 0),
  694. Q6USM_TIMEOUT_JIFFIES);
  695. if (!rc) {
  696. rc = -ETIME;
  697. pr_err("%s: timeout, waited for OPEN_READ rc[%d]\n",
  698. __func__, rc);
  699. goto fail_cmd;
  700. } else
  701. rc = 0;
  702. fail_cmd:
  703. return rc;
  704. }
  705. int q6usm_enc_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
  706. {
  707. uint32_t int_format = INVALID_FORMAT;
  708. struct usm_stream_cmd_encdec_cfg_blk enc_cfg_obj;
  709. struct usm_stream_cmd_encdec_cfg_blk *enc_cfg = &enc_cfg_obj;
  710. int rc = 0;
  711. uint32_t total_cfg_size =
  712. sizeof(struct usm_stream_cmd_encdec_cfg_blk);
  713. uint32_t round_params_size = 0;
  714. uint8_t is_allocated = 0;
  715. if ((usc == NULL) || (us_cfg == NULL)) {
  716. pr_err("%s: wrong input", __func__);
  717. return -EINVAL;
  718. }
  719. int_format = q6usm_ext2int_format(us_cfg->format_id);
  720. if (int_format == INVALID_FORMAT) {
  721. pr_err("%s: wrong input format[%d]",
  722. __func__, us_cfg->format_id);
  723. return -EINVAL;
  724. }
  725. /* Transparent configuration data is after enc_cfg */
  726. /* Integer number of u32s is required */
  727. round_params_size = ((us_cfg->params_size + 3)/4) * 4;
  728. if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
  729. /* Dynamic allocated encdec_cfg_blk is required */
  730. /* static part use */
  731. round_params_size -= USM_MAX_CFG_DATA_SIZE;
  732. total_cfg_size += round_params_size;
  733. enc_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
  734. if (enc_cfg == NULL) {
  735. pr_err("%s: enc_cfg[%d] allocation failed\n",
  736. __func__, total_cfg_size);
  737. return -ENOMEM;
  738. }
  739. is_allocated = 1;
  740. } else
  741. round_params_size = 0;
  742. q6usm_add_hdr(usc, &enc_cfg->hdr, total_cfg_size, true);
  743. enc_cfg->hdr.opcode = USM_STREAM_CMD_SET_ENC_PARAM;
  744. enc_cfg->param_id = USM_PARAM_ID_ENCDEC_ENC_CFG_BLK;
  745. enc_cfg->param_size = sizeof(struct usm_encode_cfg_blk)+
  746. round_params_size;
  747. enc_cfg->enc_blk.frames_per_buf = 1;
  748. enc_cfg->enc_blk.format_id = int_format;
  749. enc_cfg->enc_blk.cfg_size = sizeof(struct usm_cfg_common)+
  750. USM_MAX_CFG_DATA_SIZE +
  751. round_params_size;
  752. memcpy(&(enc_cfg->enc_blk.cfg_common), &(us_cfg->cfg_common),
  753. sizeof(struct usm_cfg_common));
  754. /* Transparent data copy */
  755. memcpy(enc_cfg->enc_blk.transp_data, us_cfg->params,
  756. us_cfg->params_size);
  757. pr_debug("%s: cfg_size[%d], params_size[%d]\n",
  758. __func__,
  759. enc_cfg->enc_blk.cfg_size,
  760. us_cfg->params_size);
  761. pr_debug("%s: params[%d,%d,%d,%d, %d,%d,%d,%d]\n",
  762. __func__,
  763. enc_cfg->enc_blk.transp_data[0],
  764. enc_cfg->enc_blk.transp_data[1],
  765. enc_cfg->enc_blk.transp_data[2],
  766. enc_cfg->enc_blk.transp_data[3],
  767. enc_cfg->enc_blk.transp_data[4],
  768. enc_cfg->enc_blk.transp_data[5],
  769. enc_cfg->enc_blk.transp_data[6],
  770. enc_cfg->enc_blk.transp_data[7]
  771. );
  772. pr_debug("%s: srate:%d, ch=%d, bps= %d;\n",
  773. __func__, enc_cfg->enc_blk.cfg_common.sample_rate,
  774. enc_cfg->enc_blk.cfg_common.ch_cfg,
  775. enc_cfg->enc_blk.cfg_common.bits_per_sample);
  776. pr_debug("dmap:[0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x]; dev_id=0x%x\n",
  777. enc_cfg->enc_blk.cfg_common.data_map[0],
  778. enc_cfg->enc_blk.cfg_common.data_map[1],
  779. enc_cfg->enc_blk.cfg_common.data_map[2],
  780. enc_cfg->enc_blk.cfg_common.data_map[3],
  781. enc_cfg->enc_blk.cfg_common.data_map[4],
  782. enc_cfg->enc_blk.cfg_common.data_map[5],
  783. enc_cfg->enc_blk.cfg_common.data_map[6],
  784. enc_cfg->enc_blk.cfg_common.data_map[7],
  785. enc_cfg->enc_blk.cfg_common.dev_id);
  786. rc = apr_send_pkt(usc->apr, (uint32_t *) enc_cfg);
  787. if (rc < 0) {
  788. pr_err("%s:Comamnd open failed\n", __func__);
  789. rc = -EINVAL;
  790. goto fail_cmd;
  791. }
  792. rc = wait_event_timeout(usc->cmd_wait,
  793. (atomic_read(&usc->cmd_state) == 0),
  794. Q6USM_TIMEOUT_JIFFIES);
  795. if (!rc) {
  796. rc = -ETIME;
  797. pr_err("%s: timeout opcode[0x%x]\n",
  798. __func__, enc_cfg->hdr.opcode);
  799. } else
  800. rc = 0;
  801. fail_cmd:
  802. if (is_allocated == 1)
  803. kfree(enc_cfg);
  804. return rc;
  805. }
  806. int q6usm_dec_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
  807. {
  808. uint32_t int_format = INVALID_FORMAT;
  809. struct usm_stream_media_format_update dec_cfg_obj;
  810. struct usm_stream_media_format_update *dec_cfg = &dec_cfg_obj;
  811. int rc = 0;
  812. uint32_t total_cfg_size = sizeof(struct usm_stream_media_format_update);
  813. uint32_t round_params_size = 0;
  814. uint8_t is_allocated = 0;
  815. if ((usc == NULL) || (us_cfg == NULL)) {
  816. pr_err("%s: wrong input", __func__);
  817. return -EINVAL;
  818. }
  819. int_format = q6usm_ext2int_format(us_cfg->format_id);
  820. if (int_format == INVALID_FORMAT) {
  821. pr_err("%s: wrong input format[%d]",
  822. __func__, us_cfg->format_id);
  823. return -EINVAL;
  824. }
  825. /* Transparent configuration data is after enc_cfg */
  826. /* Integer number of u32s is required */
  827. round_params_size = ((us_cfg->params_size + 3)/4) * 4;
  828. if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
  829. /* Dynamic allocated encdec_cfg_blk is required */
  830. /* static part use */
  831. round_params_size -= USM_MAX_CFG_DATA_SIZE;
  832. total_cfg_size += round_params_size;
  833. dec_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
  834. if (dec_cfg == NULL) {
  835. pr_err("%s:dec_cfg[%d] allocation failed\n",
  836. __func__, total_cfg_size);
  837. return -ENOMEM;
  838. }
  839. is_allocated = 1;
  840. } else { /* static transp_data is enough */
  841. round_params_size = 0;
  842. }
  843. q6usm_add_hdr(usc, &dec_cfg->hdr, total_cfg_size, true);
  844. dec_cfg->hdr.opcode = USM_DATA_CMD_MEDIA_FORMAT_UPDATE;
  845. dec_cfg->format_id = int_format;
  846. dec_cfg->cfg_size = sizeof(struct usm_cfg_common) +
  847. USM_MAX_CFG_DATA_SIZE +
  848. round_params_size;
  849. memcpy(&(dec_cfg->cfg_common), &(us_cfg->cfg_common),
  850. sizeof(struct usm_cfg_common));
  851. /* Transparent data copy */
  852. memcpy(dec_cfg->transp_data, us_cfg->params, us_cfg->params_size);
  853. pr_debug("%s: cfg_size[%d], params_size[%d]; parambytes[%d,%d,%d,%d]\n",
  854. __func__,
  855. dec_cfg->cfg_size,
  856. us_cfg->params_size,
  857. dec_cfg->transp_data[0],
  858. dec_cfg->transp_data[1],
  859. dec_cfg->transp_data[2],
  860. dec_cfg->transp_data[3]
  861. );
  862. rc = apr_send_pkt(usc->apr, (uint32_t *) dec_cfg);
  863. if (rc < 0) {
  864. pr_err("%s:Comamnd open failed\n", __func__);
  865. rc = -EINVAL;
  866. goto fail_cmd;
  867. }
  868. rc = wait_event_timeout(usc->cmd_wait,
  869. (atomic_read(&usc->cmd_state) == 0),
  870. Q6USM_TIMEOUT_JIFFIES);
  871. if (!rc) {
  872. rc = -ETIME;
  873. pr_err("%s: timeout opcode[0x%x]\n",
  874. __func__, dec_cfg->hdr.opcode);
  875. } else
  876. rc = 0;
  877. fail_cmd:
  878. if (is_allocated == 1)
  879. kfree(dec_cfg);
  880. return rc;
  881. }
  882. int q6usm_open_write(struct us_client *usc,
  883. uint32_t format)
  884. {
  885. int rc = 0;
  886. uint32_t int_format = INVALID_FORMAT;
  887. struct usm_stream_cmd_open_write open;
  888. if ((usc == NULL) || (usc->apr == NULL)) {
  889. pr_err("%s: APR handle NULL\n", __func__);
  890. return -EINVAL;
  891. }
  892. pr_debug("%s: session[%d]", __func__, usc->session);
  893. q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
  894. open.hdr.opcode = USM_STREAM_CMD_OPEN_WRITE;
  895. int_format = q6usm_ext2int_format(format);
  896. if (int_format == INVALID_FORMAT) {
  897. pr_err("%s: wrong format[%d]", __func__, format);
  898. return -EINVAL;
  899. }
  900. open.format = int_format;
  901. rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
  902. if (rc < 0) {
  903. pr_err("%s:open failed op[0x%x]rc[%d]\n",
  904. __func__, open.hdr.opcode, rc);
  905. goto fail_cmd;
  906. }
  907. rc = wait_event_timeout(usc->cmd_wait,
  908. (atomic_read(&usc->cmd_state) == 0),
  909. Q6USM_TIMEOUT_JIFFIES);
  910. if (!rc) {
  911. rc = -ETIME;
  912. pr_err("%s:timeout. waited for OPEN_WRITR rc[%d]\n",
  913. __func__, rc);
  914. goto fail_cmd;
  915. } else
  916. rc = 0;
  917. fail_cmd:
  918. return rc;
  919. }
  920. int q6usm_run(struct us_client *usc, uint32_t flags,
  921. uint32_t msw_ts, uint32_t lsw_ts)
  922. {
  923. struct usm_stream_cmd_run run;
  924. int rc = 0;
  925. if ((usc == NULL) || (usc->apr == NULL)) {
  926. pr_err("%s: APR handle NULL\n", __func__);
  927. return -EINVAL;
  928. }
  929. q6usm_add_hdr(usc, &run.hdr, sizeof(run), true);
  930. run.hdr.opcode = USM_SESSION_CMD_RUN;
  931. run.flags = flags;
  932. run.msw_ts = msw_ts;
  933. run.lsw_ts = lsw_ts;
  934. rc = apr_send_pkt(usc->apr, (uint32_t *) &run);
  935. if (rc < 0) {
  936. pr_err("%s: Commmand run failed[%d]\n", __func__, rc);
  937. goto fail_cmd;
  938. }
  939. rc = wait_event_timeout(usc->cmd_wait,
  940. (atomic_read(&usc->cmd_state) == 0),
  941. Q6USM_TIMEOUT_JIFFIES);
  942. if (!rc) {
  943. rc = -ETIME;
  944. pr_err("%s: timeout. waited for run success rc[%d]\n",
  945. __func__, rc);
  946. } else
  947. rc = 0;
  948. fail_cmd:
  949. return rc;
  950. }
  951. int q6usm_read(struct us_client *usc, uint32_t read_ind)
  952. {
  953. struct usm_stream_cmd_read read;
  954. struct us_port_data *port = NULL;
  955. int rc = 0;
  956. u32 read_counter = 0;
  957. u32 loop_ind = 0;
  958. u64 buf_addr = 0;
  959. if ((usc == NULL) || (usc->apr == NULL)) {
  960. pr_err("%s: APR handle NULL\n", __func__);
  961. return -EINVAL;
  962. }
  963. port = &usc->port[OUT];
  964. if (read_ind > port->buf_cnt) {
  965. pr_err("%s: wrong read_ind[%d]\n",
  966. __func__, read_ind);
  967. return -EINVAL;
  968. }
  969. if (read_ind == port->cpu_buf) {
  970. pr_err("%s: no free region\n", __func__);
  971. return 0;
  972. }
  973. if (read_ind > port->cpu_buf) { /* 1 range */
  974. read_counter = read_ind - port->cpu_buf;
  975. } else { /* 2 ranges */
  976. read_counter = (port->buf_cnt - port->cpu_buf) + read_ind;
  977. }
  978. q6usm_add_hdr(usc, &read.hdr, sizeof(read), false);
  979. read.hdr.opcode = USM_DATA_CMD_READ;
  980. read.buf_size = port->buf_size;
  981. buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
  982. read.buf_addr_lsw = lower_32_bits(buf_addr);
  983. read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
  984. read.mem_map_handle = *((uint32_t *)(port->ext));
  985. for (loop_ind = 0; loop_ind < read_counter; ++loop_ind) {
  986. u32 temp_cpu_buf = port->cpu_buf;
  987. buf_addr = (u64)(port->phys) +
  988. port->buf_size * (port->cpu_buf);
  989. read.buf_addr_lsw = lower_32_bits(buf_addr);
  990. read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
  991. read.seq_id = port->cpu_buf;
  992. read.hdr.token = port->cpu_buf;
  993. read.counter = 1;
  994. ++(port->cpu_buf);
  995. if (port->cpu_buf == port->buf_cnt)
  996. port->cpu_buf = 0;
  997. rc = apr_send_pkt(usc->apr, (uint32_t *) &read);
  998. if (rc < 0) {
  999. port->cpu_buf = temp_cpu_buf;
  1000. pr_err("%s:read op[0x%x]rc[%d]\n",
  1001. __func__, read.hdr.opcode, rc);
  1002. break;
  1003. }
  1004. rc = 0;
  1005. } /* bufs loop */
  1006. return rc;
  1007. }
  1008. int q6usm_write(struct us_client *usc, uint32_t write_ind)
  1009. {
  1010. int rc = 0;
  1011. struct usm_stream_cmd_write cmd_write;
  1012. struct us_port_data *port = NULL;
  1013. u32 current_dsp_buf = 0;
  1014. u64 buf_addr = 0;
  1015. if ((usc == NULL) || (usc->apr == NULL)) {
  1016. pr_err("%s: APR handle NULL\n", __func__);
  1017. return -EINVAL;
  1018. }
  1019. port = &usc->port[IN];
  1020. current_dsp_buf = port->dsp_buf;
  1021. /* free region, caused by new dsp_buf report from DSP, */
  1022. /* can be only extended */
  1023. if (port->cpu_buf >= current_dsp_buf) {
  1024. /* 2 -part free region, including empty buffer */
  1025. if ((write_ind <= port->cpu_buf) &&
  1026. (write_ind > current_dsp_buf)) {
  1027. pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
  1028. __func__, write_ind,
  1029. current_dsp_buf, port->cpu_buf);
  1030. return -EINVAL;
  1031. }
  1032. } else {
  1033. /* 1 -part free region */
  1034. if ((write_ind <= port->cpu_buf) ||
  1035. (write_ind > current_dsp_buf)) {
  1036. pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
  1037. __func__, write_ind,
  1038. current_dsp_buf, port->cpu_buf);
  1039. return -EINVAL;
  1040. }
  1041. }
  1042. q6usm_add_hdr(usc, &cmd_write.hdr, sizeof(cmd_write), false);
  1043. cmd_write.hdr.opcode = USM_DATA_CMD_WRITE;
  1044. cmd_write.buf_size = port->buf_size;
  1045. buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
  1046. cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
  1047. cmd_write.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
  1048. cmd_write.mem_map_handle = *((uint32_t *)(port->ext));
  1049. cmd_write.res0 = 0;
  1050. cmd_write.res1 = 0;
  1051. cmd_write.res2 = 0;
  1052. while (port->cpu_buf != write_ind) {
  1053. u32 temp_cpu_buf = port->cpu_buf;
  1054. buf_addr = (u64)(port->phys) +
  1055. port->buf_size * (port->cpu_buf);
  1056. cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
  1057. cmd_write.buf_addr_msw =
  1058. msm_audio_populate_upper_32_bits(buf_addr);
  1059. cmd_write.seq_id = port->cpu_buf;
  1060. cmd_write.hdr.token = port->cpu_buf;
  1061. ++(port->cpu_buf);
  1062. if (port->cpu_buf == port->buf_cnt)
  1063. port->cpu_buf = 0;
  1064. rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_write);
  1065. if (rc < 0) {
  1066. port->cpu_buf = temp_cpu_buf;
  1067. pr_err("%s:write op[0x%x];rc[%d];cpu_buf[%d]\n",
  1068. __func__, cmd_write.hdr.opcode,
  1069. rc, port->cpu_buf);
  1070. break;
  1071. }
  1072. rc = 0;
  1073. }
  1074. return rc;
  1075. }
  1076. bool q6usm_is_write_buf_full(struct us_client *usc, uint32_t *free_region)
  1077. {
  1078. struct us_port_data *port = NULL;
  1079. u32 cpu_buf = 0;
  1080. if ((usc == NULL) || !free_region) {
  1081. pr_err("%s: input data wrong\n", __func__);
  1082. return false;
  1083. }
  1084. port = &usc->port[IN];
  1085. cpu_buf = port->cpu_buf + 1;
  1086. if (cpu_buf == port->buf_cnt)
  1087. cpu_buf = 0;
  1088. *free_region = port->dsp_buf;
  1089. return cpu_buf == *free_region;
  1090. }
  1091. int q6usm_cmd(struct us_client *usc, int cmd)
  1092. {
  1093. struct apr_hdr hdr;
  1094. int rc = 0;
  1095. atomic_t *state;
  1096. if ((usc == NULL) || (usc->apr == NULL)) {
  1097. pr_err("%s: APR handle NULL\n", __func__);
  1098. return -EINVAL;
  1099. }
  1100. q6usm_add_hdr(usc, &hdr, sizeof(hdr), true);
  1101. switch (cmd) {
  1102. case CMD_CLOSE:
  1103. hdr.opcode = USM_STREAM_CMD_CLOSE;
  1104. state = &usc->cmd_state;
  1105. break;
  1106. default:
  1107. pr_err("%s:Invalid format[%d]\n", __func__, cmd);
  1108. goto fail_cmd;
  1109. }
  1110. rc = apr_send_pkt(usc->apr, (uint32_t *) &hdr);
  1111. if (rc < 0) {
  1112. pr_err("%s: Command 0x%x failed\n", __func__, hdr.opcode);
  1113. goto fail_cmd;
  1114. }
  1115. rc = wait_event_timeout(usc->cmd_wait, (atomic_read(state) == 0),
  1116. Q6USM_TIMEOUT_JIFFIES);
  1117. if (!rc) {
  1118. rc = -ETIME;
  1119. pr_err("%s:timeout. waited for response opcode[0x%x]\n",
  1120. __func__, hdr.opcode);
  1121. } else
  1122. rc = 0;
  1123. fail_cmd:
  1124. return rc;
  1125. }
  1126. int q6usm_set_us_detection(struct us_client *usc,
  1127. struct usm_session_cmd_detect_info *detect_info,
  1128. uint16_t detect_info_size)
  1129. {
  1130. int rc = 0;
  1131. if ((usc == NULL) ||
  1132. (detect_info_size == 0) ||
  1133. (detect_info == NULL)) {
  1134. pr_err("%s: wrong input: usc=0x%pK, inf_size=%d; info=0x%pK",
  1135. __func__,
  1136. usc,
  1137. detect_info_size,
  1138. detect_info);
  1139. return -EINVAL;
  1140. }
  1141. q6usm_add_hdr(usc, &detect_info->hdr, detect_info_size, true);
  1142. detect_info->hdr.opcode = USM_SESSION_CMD_SIGNAL_DETECT_MODE;
  1143. rc = apr_send_pkt(usc->apr, (uint32_t *)detect_info);
  1144. if (rc < 0) {
  1145. pr_err("%s:Comamnd signal detect failed\n", __func__);
  1146. return -EINVAL;
  1147. }
  1148. rc = wait_event_timeout(usc->cmd_wait,
  1149. (atomic_read(&usc->cmd_state) == 0),
  1150. Q6USM_TIMEOUT_JIFFIES);
  1151. if (!rc) {
  1152. rc = -ETIME;
  1153. pr_err("%s: CMD_SIGNAL_DETECT_MODE: timeout=%d\n",
  1154. __func__, Q6USM_TIMEOUT_JIFFIES);
  1155. } else
  1156. rc = 0;
  1157. return rc;
  1158. }
  1159. int q6usm_set_us_stream_param(int dir, struct us_client *usc,
  1160. uint32_t module_id, uint32_t param_id, uint32_t buf_size)
  1161. {
  1162. int rc = 0;
  1163. struct usm_stream_cmd_set_param cmd_set_param;
  1164. struct us_port_data *port = NULL;
  1165. if ((usc == NULL) || (usc->apr == NULL)) {
  1166. pr_err("%s: APR handle NULL\n", __func__);
  1167. return -EINVAL;
  1168. }
  1169. port = &usc->port[dir];
  1170. q6usm_add_hdr(usc, &cmd_set_param.hdr, sizeof(cmd_set_param), true);
  1171. cmd_set_param.hdr.opcode = USM_STREAM_CMD_SET_PARAM;
  1172. cmd_set_param.buf_size = buf_size;
  1173. cmd_set_param.buf_addr_msw =
  1174. msm_audio_populate_upper_32_bits(port->param_phys);
  1175. cmd_set_param.buf_addr_lsw = lower_32_bits(port->param_phys);
  1176. cmd_set_param.mem_map_handle =
  1177. *((uint32_t *)(port->param_buf_mem_handle));
  1178. cmd_set_param.module_id = module_id;
  1179. cmd_set_param.param_id = param_id;
  1180. cmd_set_param.hdr.token = 0;
  1181. rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_set_param);
  1182. if (rc < 0) {
  1183. pr_err("%s:write op[0x%x];rc[%d]\n",
  1184. __func__, cmd_set_param.hdr.opcode, rc);
  1185. }
  1186. rc = wait_event_timeout(usc->cmd_wait,
  1187. (atomic_read(&usc->cmd_state) == 0),
  1188. Q6USM_TIMEOUT_JIFFIES);
  1189. if (!rc) {
  1190. rc = -ETIME;
  1191. pr_err("%s: CMD_SET_PARAM: timeout=%d\n",
  1192. __func__, Q6USM_TIMEOUT_JIFFIES);
  1193. } else
  1194. rc = 0;
  1195. return rc;
  1196. }
  1197. int q6usm_get_us_stream_param(int dir, struct us_client *usc,
  1198. uint32_t module_id, uint32_t param_id, uint32_t buf_size)
  1199. {
  1200. int rc = 0;
  1201. struct usm_stream_cmd_get_param cmd_get_param;
  1202. struct us_port_data *port = NULL;
  1203. if ((usc == NULL) || (usc->apr == NULL)) {
  1204. pr_err("%s: APR handle NULL\n", __func__);
  1205. return -EINVAL;
  1206. }
  1207. port = &usc->port[dir];
  1208. q6usm_add_hdr(usc, &cmd_get_param.hdr, sizeof(cmd_get_param), true);
  1209. cmd_get_param.hdr.opcode = USM_STREAM_CMD_GET_PARAM;
  1210. cmd_get_param.buf_size = buf_size;
  1211. cmd_get_param.buf_addr_msw =
  1212. msm_audio_populate_upper_32_bits(port->param_phys);
  1213. cmd_get_param.buf_addr_lsw = lower_32_bits(port->param_phys);
  1214. cmd_get_param.mem_map_handle =
  1215. *((uint32_t *)(port->param_buf_mem_handle));
  1216. cmd_get_param.module_id = module_id;
  1217. cmd_get_param.param_id = param_id;
  1218. cmd_get_param.hdr.token = 0;
  1219. rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_get_param);
  1220. if (rc < 0) {
  1221. pr_err("%s:write op[0x%x];rc[%d]\n",
  1222. __func__, cmd_get_param.hdr.opcode, rc);
  1223. }
  1224. rc = wait_event_timeout(usc->cmd_wait,
  1225. (atomic_read(&usc->cmd_state) == 0),
  1226. Q6USM_TIMEOUT_JIFFIES);
  1227. if (!rc) {
  1228. rc = -ETIME;
  1229. pr_err("%s: CMD_GET_PARAM: timeout=%d\n",
  1230. __func__, Q6USM_TIMEOUT_JIFFIES);
  1231. } else
  1232. rc = 0;
  1233. return rc;
  1234. }
  1235. int __init q6usm_init(void)
  1236. {
  1237. pr_debug("%s\n", __func__);
  1238. init_waitqueue_head(&this_mmap.cmd_wait);
  1239. memset(session, 0, sizeof(session));
  1240. return 0;
  1241. }