q6usm.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/mutex.h>
  6. #include <linux/wait.h>
  7. #include <linux/sched.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/slab.h>
  10. #include <dsp/msm_audio_ion.h>
  11. #include <dsp/apr_audio-v2.h>
  12. #include <ipc/apr_us.h>
  13. #include "q6usm.h"
  14. #define ADSP_MEMORY_MAP_SHMEM8_4K_POOL 3
  15. #define MEM_4K_OFFSET 4095
  16. #define MEM_4K_MASK 0xfffff000
  17. #define USM_SESSION_MAX 0x02 /* aDSP:USM limit */
  18. #define READDONE_IDX_STATUS 0
  19. #define WRITEDONE_IDX_STATUS 0
  20. /* Standard timeout in the asynchronous ops */
  21. #define Q6USM_TIMEOUT_JIFFIES (1*HZ) /* 1 sec */
  22. static DEFINE_MUTEX(session_lock);
  23. static struct us_client *session[USM_SESSION_MAX];
  24. static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv);
  25. static int32_t q6usm_callback(struct apr_client_data *data, void *priv);
  26. static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
  27. uint32_t pkt_size, bool cmd_flg);
  28. struct usm_mmap {
  29. atomic_t ref_cnt;
  30. atomic_t cmd_state;
  31. wait_queue_head_t cmd_wait;
  32. void *apr;
  33. int mem_handle;
  34. };
  35. static struct usm_mmap this_mmap;
  36. static void q6usm_add_mmaphdr(struct apr_hdr *hdr,
  37. uint32_t pkt_size, bool cmd_flg, u32 token)
  38. {
  39. hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  40. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  41. hdr->src_port = 0;
  42. hdr->dest_port = 0;
  43. if (cmd_flg) {
  44. hdr->token = token;
  45. atomic_set(&this_mmap.cmd_state, 1);
  46. }
  47. hdr->pkt_size = pkt_size;
  48. }
  49. static int q6usm_memory_map(phys_addr_t buf_add, int dir, uint32_t bufsz,
  50. uint32_t bufcnt, uint32_t session, uint32_t *mem_handle)
  51. {
  52. struct usm_cmd_memory_map_region mem_region_map;
  53. int rc = 0;
  54. if (this_mmap.apr == NULL) {
  55. pr_err("%s: APR handle NULL\n", __func__);
  56. return -EINVAL;
  57. }
  58. q6usm_add_mmaphdr(&mem_region_map.hdr,
  59. sizeof(struct usm_cmd_memory_map_region), true,
  60. ((session << 8) | dir));
  61. mem_region_map.hdr.opcode = USM_CMD_SHARED_MEM_MAP_REGION;
  62. mem_region_map.mempool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
  63. mem_region_map.num_regions = 1;
  64. mem_region_map.flags = 0;
  65. mem_region_map.shm_addr_lsw = lower_32_bits(buf_add);
  66. mem_region_map.shm_addr_msw =
  67. msm_audio_populate_upper_32_bits(buf_add);
  68. mem_region_map.mem_size_bytes = bufsz * bufcnt;
  69. rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_region_map);
  70. if (rc < 0) {
  71. pr_err("%s: mem_map op[0x%x]rc[%d]\n",
  72. __func__, mem_region_map.hdr.opcode, rc);
  73. rc = -EINVAL;
  74. goto fail_cmd;
  75. }
  76. rc = wait_event_timeout(this_mmap.cmd_wait,
  77. (atomic_read(&this_mmap.cmd_state) == 0),
  78. Q6USM_TIMEOUT_JIFFIES);
  79. if (!rc) {
  80. rc = -ETIME;
  81. pr_err("%s: timeout. waited for memory_map\n", __func__);
  82. } else {
  83. *mem_handle = this_mmap.mem_handle;
  84. rc = 0;
  85. }
  86. fail_cmd:
  87. return rc;
  88. }
  89. int q6usm_memory_unmap(phys_addr_t buf_add, int dir, uint32_t session,
  90. uint32_t mem_handle)
  91. {
  92. struct usm_cmd_memory_unmap_region mem_unmap;
  93. int rc = 0;
  94. if (this_mmap.apr == NULL) {
  95. pr_err("%s: APR handle NULL\n", __func__);
  96. return -EINVAL;
  97. }
  98. q6usm_add_mmaphdr(&mem_unmap.hdr,
  99. sizeof(struct usm_cmd_memory_unmap_region), true,
  100. ((session << 8) | dir));
  101. mem_unmap.hdr.opcode = USM_CMD_SHARED_MEM_UNMAP_REGION;
  102. mem_unmap.mem_map_handle = mem_handle;
  103. rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_unmap);
  104. if (rc < 0) {
  105. pr_err("%s: mem_unmap op[0x%x] rc[%d]\n",
  106. __func__, mem_unmap.hdr.opcode, rc);
  107. goto fail_cmd;
  108. }
  109. rc = wait_event_timeout(this_mmap.cmd_wait,
  110. (atomic_read(&this_mmap.cmd_state) == 0),
  111. Q6USM_TIMEOUT_JIFFIES);
  112. if (!rc) {
  113. rc = -ETIME;
  114. pr_err("%s: timeout. waited for memory_unmap\n", __func__);
  115. } else
  116. rc = 0;
  117. fail_cmd:
  118. return rc;
  119. }
  120. static int q6usm_session_alloc(struct us_client *usc)
  121. {
  122. int ind = 0;
  123. mutex_lock(&session_lock);
  124. for (ind = 0; ind < USM_SESSION_MAX; ++ind) {
  125. if (!session[ind]) {
  126. session[ind] = usc;
  127. mutex_unlock(&session_lock);
  128. ++ind; /* session id: 0 reserved */
  129. pr_debug("%s: session[%d] was allocated\n",
  130. __func__, ind);
  131. return ind;
  132. }
  133. }
  134. mutex_unlock(&session_lock);
  135. return -ENOMEM;
  136. }
  137. static void q6usm_session_free(struct us_client *usc)
  138. {
  139. /* Session index was incremented during allocation */
  140. uint16_t ind = (uint16_t)usc->session - 1;
  141. pr_debug("%s: to free session[%d]\n", __func__, ind);
  142. if (ind < USM_SESSION_MAX) {
  143. mutex_lock(&session_lock);
  144. session[ind] = NULL;
  145. mutex_unlock(&session_lock);
  146. }
  147. }
  148. static int q6usm_us_client_buf_free(unsigned int dir,
  149. struct us_client *usc)
  150. {
  151. struct us_port_data *port;
  152. int rc = 0;
  153. if ((usc == NULL) ||
  154. ((dir != IN) && (dir != OUT)))
  155. return -EINVAL;
  156. mutex_lock(&usc->cmd_lock);
  157. port = &usc->port[dir];
  158. if (port == NULL) {
  159. mutex_unlock(&usc->cmd_lock);
  160. return -EINVAL;
  161. }
  162. if (port->data == NULL) {
  163. mutex_unlock(&usc->cmd_lock);
  164. return 0;
  165. }
  166. rc = q6usm_memory_unmap(port->phys, dir, usc->session,
  167. *((uint32_t *)port->ext));
  168. pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
  169. (void *)port->data, (u64)port->phys, (void *)&port->phys);
  170. msm_audio_ion_free(port->dma_buf);
  171. port->data = NULL;
  172. port->phys = 0;
  173. port->buf_size = 0;
  174. port->buf_cnt = 0;
  175. port->dma_buf = NULL;
  176. mutex_unlock(&usc->cmd_lock);
  177. return rc;
  178. }
  179. int q6usm_us_param_buf_free(unsigned int dir,
  180. struct us_client *usc)
  181. {
  182. struct us_port_data *port;
  183. int rc = 0;
  184. if ((usc == NULL) ||
  185. ((dir != IN) && (dir != OUT)))
  186. return -EINVAL;
  187. mutex_lock(&usc->cmd_lock);
  188. port = &usc->port[dir];
  189. if (port == NULL) {
  190. mutex_unlock(&usc->cmd_lock);
  191. return -EINVAL;
  192. }
  193. if (port->param_buf == NULL) {
  194. mutex_unlock(&usc->cmd_lock);
  195. return 0;
  196. }
  197. rc = q6usm_memory_unmap(port->param_phys, dir, usc->session,
  198. *((uint32_t *)port->param_buf_mem_handle));
  199. pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__,
  200. (void *)port->param_buf, (u64)port->param_phys,
  201. (void *)&port->param_phys);
  202. msm_audio_ion_free(port->param_dma_buf);
  203. port->param_buf = NULL;
  204. port->param_phys = 0;
  205. port->param_buf_size = 0;
  206. port->param_dma_buf = NULL;
  207. mutex_unlock(&usc->cmd_lock);
  208. return rc;
  209. }
  210. void q6usm_us_client_free(struct us_client *usc)
  211. {
  212. int loopcnt = 0;
  213. struct us_port_data *port;
  214. uint32_t *p_mem_handle = NULL;
  215. if ((usc == NULL) ||
  216. !(usc->session))
  217. return;
  218. for (loopcnt = 0; loopcnt <= OUT; ++loopcnt) {
  219. port = &usc->port[loopcnt];
  220. if (port->data == NULL)
  221. continue;
  222. pr_debug("%s: loopcnt = %d\n", __func__, loopcnt);
  223. q6usm_us_client_buf_free(loopcnt, usc);
  224. q6usm_us_param_buf_free(loopcnt, usc);
  225. }
  226. q6usm_session_free(usc);
  227. apr_deregister(usc->apr);
  228. pr_debug("%s: APR De-Register\n", __func__);
  229. if (atomic_read(&this_mmap.ref_cnt) <= 0) {
  230. pr_err("%s: APR Common Port Already Closed\n", __func__);
  231. goto done;
  232. }
  233. atomic_dec(&this_mmap.ref_cnt);
  234. if (atomic_read(&this_mmap.ref_cnt) == 0) {
  235. apr_deregister(this_mmap.apr);
  236. pr_debug("%s: APR De-Register common port\n", __func__);
  237. }
  238. done:
  239. p_mem_handle = (uint32_t *)usc->port[IN].ext;
  240. kfree(p_mem_handle);
  241. kfree(usc);
  242. pr_debug("%s:\n", __func__);
  243. }
  244. struct us_client *q6usm_us_client_alloc(
  245. void (*cb)(uint32_t, uint32_t, uint32_t *, void *),
  246. void *priv)
  247. {
  248. struct us_client *usc;
  249. uint32_t *p_mem_handle = NULL;
  250. int n;
  251. int lcnt = 0;
  252. usc = kzalloc(sizeof(struct us_client), GFP_KERNEL);
  253. if (usc == NULL)
  254. return NULL;
  255. p_mem_handle = kzalloc(sizeof(uint32_t) * 4, GFP_KERNEL);
  256. if (p_mem_handle == NULL) {
  257. kfree(usc);
  258. return NULL;
  259. }
  260. n = q6usm_session_alloc(usc);
  261. if (n <= 0)
  262. goto fail_session;
  263. usc->session = n;
  264. usc->cb = cb;
  265. usc->priv = priv;
  266. usc->apr = apr_register("ADSP", "USM",
  267. (apr_fn)q6usm_callback,
  268. ((usc->session) << 8 | 0x0001),
  269. usc);
  270. if (usc->apr == NULL) {
  271. pr_err("%s: Registration with APR failed\n", __func__);
  272. goto fail;
  273. }
  274. pr_debug("%s: Registering the common port with APR\n", __func__);
  275. if (atomic_read(&this_mmap.ref_cnt) == 0) {
  276. this_mmap.apr = apr_register("ADSP", "USM",
  277. (apr_fn)q6usm_mmapcallback,
  278. 0x0FFFFFFFF, &this_mmap);
  279. if (this_mmap.apr == NULL) {
  280. pr_err("%s: USM port registration failed\n",
  281. __func__);
  282. goto fail;
  283. }
  284. }
  285. atomic_inc(&this_mmap.ref_cnt);
  286. init_waitqueue_head(&usc->cmd_wait);
  287. mutex_init(&usc->cmd_lock);
  288. for (lcnt = 0; lcnt <= OUT; ++lcnt) {
  289. mutex_init(&usc->port[lcnt].lock);
  290. spin_lock_init(&usc->port[lcnt].dsp_lock);
  291. usc->port[lcnt].ext = (void *)p_mem_handle++;
  292. usc->port[lcnt].param_buf_mem_handle = (void *)p_mem_handle++;
  293. pr_err("%s: usc->port[%d].ext=%pK;\n",
  294. __func__, lcnt, usc->port[lcnt].ext);
  295. }
  296. atomic_set(&usc->cmd_state, 0);
  297. return usc;
  298. fail:
  299. kfree(p_mem_handle);
  300. q6usm_us_client_free(usc);
  301. return NULL;
  302. fail_session:
  303. kfree(p_mem_handle);
  304. kfree(usc);
  305. return NULL;
  306. }
  307. int q6usm_us_client_buf_alloc(unsigned int dir,
  308. struct us_client *usc,
  309. unsigned int bufsz,
  310. unsigned int bufcnt)
  311. {
  312. int rc = 0;
  313. struct us_port_data *port = NULL;
  314. unsigned int size = bufsz*bufcnt;
  315. size_t len;
  316. if ((usc == NULL) ||
  317. ((dir != IN) && (dir != OUT)) || (size == 0) ||
  318. (usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
  319. pr_err("%s: wrong parameters: size=%d; bufcnt=%d\n",
  320. __func__, size, bufcnt);
  321. return -EINVAL;
  322. }
  323. mutex_lock(&usc->cmd_lock);
  324. port = &usc->port[dir];
  325. /* The size to allocate should be multiple of 4K bytes */
  326. size = PAGE_ALIGN(size);
  327. rc = msm_audio_ion_alloc(&port->dma_buf,
  328. size, &port->phys,
  329. &len, &port->data);
  330. if (rc) {
  331. pr_err("%s: US ION allocation failed, rc = %d\n",
  332. __func__, rc);
  333. mutex_unlock(&usc->cmd_lock);
  334. return -ENOMEM;
  335. }
  336. port->buf_cnt = bufcnt;
  337. port->buf_size = bufsz;
  338. pr_debug("%s: data[%pK]; phys[%llx]; [%pK]\n", __func__,
  339. (void *)port->data,
  340. (u64)port->phys,
  341. (void *)&port->phys);
  342. rc = q6usm_memory_map(port->phys, dir, size, 1, usc->session,
  343. (uint32_t *)port->ext);
  344. if (rc < 0) {
  345. pr_err("%s: CMD Memory_map failed\n", __func__);
  346. mutex_unlock(&usc->cmd_lock);
  347. q6usm_us_client_buf_free(dir, usc);
  348. q6usm_us_param_buf_free(dir, usc);
  349. } else {
  350. mutex_unlock(&usc->cmd_lock);
  351. rc = 0;
  352. }
  353. return rc;
  354. }
  355. int q6usm_us_param_buf_alloc(unsigned int dir,
  356. struct us_client *usc,
  357. unsigned int bufsz)
  358. {
  359. int rc = 0;
  360. struct us_port_data *port = NULL;
  361. unsigned int size = bufsz;
  362. size_t len;
  363. if ((usc == NULL) ||
  364. ((dir != IN) && (dir != OUT)) ||
  365. (usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
  366. pr_err("%s: wrong parameters: direction=%d, bufsz=%d\n",
  367. __func__, dir, bufsz);
  368. return -EINVAL;
  369. }
  370. mutex_lock(&usc->cmd_lock);
  371. port = &usc->port[dir];
  372. if (bufsz == 0) {
  373. pr_debug("%s: bufsz=0, get/set param commands are forbidden\n",
  374. __func__);
  375. port->param_buf = NULL;
  376. mutex_unlock(&usc->cmd_lock);
  377. return rc;
  378. }
  379. /* The size to allocate should be multiple of 4K bytes */
  380. size = PAGE_ALIGN(size);
  381. rc = msm_audio_ion_alloc(&port->param_dma_buf,
  382. size, &port->param_phys,
  383. &len, &port->param_buf);
  384. if (rc) {
  385. pr_err("%s: US ION allocation failed, rc = %d\n",
  386. __func__, rc);
  387. mutex_unlock(&usc->cmd_lock);
  388. return -ENOMEM;
  389. }
  390. port->param_buf_size = bufsz;
  391. pr_debug("%s: param_buf[%pK]; param_phys[%llx]; [%pK]\n", __func__,
  392. (void *)port->param_buf,
  393. (u64)port->param_phys,
  394. (void *)&port->param_phys);
  395. rc = q6usm_memory_map(port->param_phys, (IN | OUT), size, 1,
  396. usc->session, (uint32_t *)port->param_buf_mem_handle);
  397. if (rc < 0) {
  398. pr_err("%s: CMD Memory_map failed\n", __func__);
  399. mutex_unlock(&usc->cmd_lock);
  400. q6usm_us_client_buf_free(dir, usc);
  401. q6usm_us_param_buf_free(dir, usc);
  402. } else {
  403. mutex_unlock(&usc->cmd_lock);
  404. rc = 0;
  405. }
  406. return rc;
  407. }
  408. static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv)
  409. {
  410. uint32_t token;
  411. uint32_t *payload = data->payload;
  412. if (data->payload_size < (2 * sizeof(uint32_t))) {
  413. pr_err("%s: payload has invalid size[%d]\n", __func__,
  414. data->payload_size);
  415. return -EINVAL;
  416. }
  417. pr_debug("%s: ptr0[0x%x]; ptr1[0x%x]; opcode[0x%x]\n",
  418. __func__, payload[0], payload[1], data->opcode);
  419. pr_debug("%s: token[0x%x]; payload_size[%d]; src[%d]; dest[%d];\n",
  420. __func__, data->token, data->payload_size,
  421. data->src_port, data->dest_port);
  422. if (data->opcode == APR_BASIC_RSP_RESULT) {
  423. /* status field check */
  424. if (payload[1]) {
  425. pr_err("%s: wrong response[%d] on cmd [%d]\n",
  426. __func__, payload[1], payload[0]);
  427. } else {
  428. token = data->token;
  429. switch (payload[0]) {
  430. case USM_CMD_SHARED_MEM_UNMAP_REGION:
  431. if (atomic_read(&this_mmap.cmd_state)) {
  432. atomic_set(&this_mmap.cmd_state, 0);
  433. wake_up(&this_mmap.cmd_wait);
  434. }
  435. /* fallthrough */
  436. case USM_CMD_SHARED_MEM_MAP_REGION:
  437. /* For MEM_MAP, additional answer is waited, */
  438. /* therfore, no wake-up here */
  439. pr_debug("%s: cmd[0x%x]; result[0x%x]\n",
  440. __func__, payload[0], payload[1]);
  441. break;
  442. default:
  443. pr_debug("%s: wrong command[0x%x]\n",
  444. __func__, payload[0]);
  445. break;
  446. }
  447. }
  448. } else {
  449. if (data->opcode == USM_CMDRSP_SHARED_MEM_MAP_REGION) {
  450. this_mmap.mem_handle = payload[0];
  451. pr_debug("%s: memory map handle = 0x%x",
  452. __func__, payload[0]);
  453. if (atomic_read(&this_mmap.cmd_state)) {
  454. atomic_set(&this_mmap.cmd_state, 0);
  455. wake_up(&this_mmap.cmd_wait);
  456. }
  457. }
  458. }
  459. return 0;
  460. }
  461. static int32_t q6usm_callback(struct apr_client_data *data, void *priv)
  462. {
  463. struct us_client *usc = (struct us_client *)priv;
  464. unsigned long dsp_flags;
  465. uint32_t *payload = data->payload;
  466. uint32_t token = data->token;
  467. uint32_t opcode = Q6USM_EVENT_UNDEF;
  468. if (usc == NULL) {
  469. pr_err("%s: client info is NULL\n", __func__);
  470. return -EINVAL;
  471. }
  472. if (data->opcode == APR_BASIC_RSP_RESULT) {
  473. if (data->payload_size < (2 * sizeof(uint32_t))) {
  474. pr_err("%s: payload has invalid size[%d]\n", __func__,
  475. data->payload_size);
  476. return -EINVAL;
  477. }
  478. /* status field check */
  479. if (payload[1]) {
  480. pr_err("%s: wrong response[%d] on cmd [%d]\n",
  481. __func__, payload[1], payload[0]);
  482. if (usc->cb)
  483. usc->cb(data->opcode, token,
  484. (uint32_t *)data->payload, usc->priv);
  485. } else {
  486. switch (payload[0]) {
  487. case USM_SESSION_CMD_RUN:
  488. case USM_STREAM_CMD_CLOSE:
  489. if (token != usc->session) {
  490. pr_err("%s: wrong token[%d]",
  491. __func__, token);
  492. break;
  493. }
  494. case USM_STREAM_CMD_OPEN_READ:
  495. case USM_STREAM_CMD_OPEN_WRITE:
  496. case USM_STREAM_CMD_SET_ENC_PARAM:
  497. case USM_DATA_CMD_MEDIA_FORMAT_UPDATE:
  498. case USM_SESSION_CMD_SIGNAL_DETECT_MODE:
  499. case USM_STREAM_CMD_SET_PARAM:
  500. case USM_STREAM_CMD_GET_PARAM:
  501. if (atomic_read(&usc->cmd_state)) {
  502. atomic_set(&usc->cmd_state, 0);
  503. wake_up(&usc->cmd_wait);
  504. }
  505. if (usc->cb)
  506. usc->cb(data->opcode, token,
  507. (uint32_t *)data->payload,
  508. usc->priv);
  509. break;
  510. default:
  511. break;
  512. }
  513. }
  514. return 0;
  515. }
  516. switch (data->opcode) {
  517. case RESET_EVENTS: {
  518. pr_err("%s: Reset event is received: %d %d\n",
  519. __func__,
  520. data->reset_event,
  521. data->reset_proc);
  522. opcode = RESET_EVENTS;
  523. apr_reset(this_mmap.apr);
  524. this_mmap.apr = NULL;
  525. apr_reset(usc->apr);
  526. usc->apr = NULL;
  527. break;
  528. }
  529. case USM_DATA_EVENT_READ_DONE: {
  530. struct us_port_data *port = &usc->port[OUT];
  531. opcode = Q6USM_EVENT_READ_DONE;
  532. spin_lock_irqsave(&port->dsp_lock, dsp_flags);
  533. if (data->payload_size <
  534. (sizeof(uint32_t)*(READDONE_IDX_STATUS + 1))) {
  535. pr_err("%s: Invalid payload size for READDONE[%d]\n",
  536. __func__, data->payload_size);
  537. return -EINVAL;
  538. }
  539. if (payload[READDONE_IDX_STATUS]) {
  540. pr_err("%s: wrong READDONE[%d]; token[%d]\n",
  541. __func__,
  542. payload[READDONE_IDX_STATUS],
  543. token);
  544. token = USM_WRONG_TOKEN;
  545. spin_unlock_irqrestore(&port->dsp_lock,
  546. dsp_flags);
  547. break;
  548. }
  549. if (port->expected_token != token) {
  550. u32 cpu_buf = port->cpu_buf;
  551. pr_err("%s: expected[%d] != token[%d]\n",
  552. __func__, port->expected_token, token);
  553. pr_debug("%s: dsp_buf=%d; cpu_buf=%d;\n",
  554. __func__, port->dsp_buf, cpu_buf);
  555. token = USM_WRONG_TOKEN;
  556. /* To prevent data handle continiue */
  557. port->expected_token = USM_WRONG_TOKEN;
  558. spin_unlock_irqrestore(&port->dsp_lock,
  559. dsp_flags);
  560. break;
  561. } /* port->expected_token != data->token */
  562. port->expected_token = token + 1;
  563. if (port->expected_token == port->buf_cnt)
  564. port->expected_token = 0;
  565. /* gap support */
  566. if (port->expected_token != port->cpu_buf) {
  567. port->dsp_buf = port->expected_token;
  568. token = port->dsp_buf; /* for callback */
  569. } else
  570. port->dsp_buf = token;
  571. spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
  572. break;
  573. } /* case USM_DATA_EVENT_READ_DONE */
  574. case USM_DATA_EVENT_WRITE_DONE: {
  575. struct us_port_data *port = &usc->port[IN];
  576. opcode = Q6USM_EVENT_WRITE_DONE;
  577. if (data->payload_size <
  578. (sizeof(uint32_t)*(WRITEDONE_IDX_STATUS + 1))) {
  579. pr_err("%s: Invalid payload size for WRITEDONE[%d]\n",
  580. __func__, data->payload_size);
  581. return -EINVAL;
  582. }
  583. if (payload[WRITEDONE_IDX_STATUS]) {
  584. pr_err("%s: wrong WRITEDONE_IDX_STATUS[%d]\n",
  585. __func__,
  586. payload[WRITEDONE_IDX_STATUS]);
  587. break;
  588. }
  589. spin_lock_irqsave(&port->dsp_lock, dsp_flags);
  590. port->dsp_buf = token + 1;
  591. if (port->dsp_buf == port->buf_cnt)
  592. port->dsp_buf = 0;
  593. spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
  594. break;
  595. } /* case USM_DATA_EVENT_WRITE_DONE */
  596. case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT: {
  597. pr_debug("%s: US detect result: result=%d",
  598. __func__,
  599. payload[0]);
  600. opcode = Q6USM_EVENT_SIGNAL_DETECT_RESULT;
  601. break;
  602. } /* case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT */
  603. default:
  604. return 0;
  605. } /* switch */
  606. if (usc->cb)
  607. usc->cb(opcode, token,
  608. data->payload, usc->priv);
  609. return 0;
  610. }
  611. uint32_t q6usm_get_virtual_address(int dir,
  612. struct us_client *usc,
  613. struct vm_area_struct *vms)
  614. {
  615. uint32_t ret = 0xffffffff;
  616. if (vms && (usc != NULL) && ((dir == IN) || (dir == OUT))) {
  617. struct us_port_data *port = &usc->port[dir];
  618. int size = PAGE_ALIGN(port->buf_size * port->buf_cnt);
  619. struct audio_buffer ab;
  620. ab.phys = port->phys;
  621. ab.data = port->data;
  622. ab.used = 1;
  623. ab.size = size;
  624. ab.actual_size = size;
  625. ab.dma_buf = port->dma_buf;
  626. ret = msm_audio_ion_mmap(&ab, vms);
  627. }
  628. return ret;
  629. }
  630. static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr,
  631. uint32_t pkt_size, bool cmd_flg)
  632. {
  633. mutex_lock(&usc->cmd_lock);
  634. hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  635. APR_HDR_LEN(sizeof(struct apr_hdr)),
  636. APR_PKT_VER);
  637. hdr->src_svc = ((struct apr_svc *)usc->apr)->id;
  638. hdr->src_domain = APR_DOMAIN_APPS;
  639. hdr->dest_svc = APR_SVC_USM;
  640. hdr->dest_domain = APR_DOMAIN_ADSP;
  641. hdr->src_port = (usc->session << 8) | 0x0001;
  642. hdr->dest_port = (usc->session << 8) | 0x0001;
  643. if (cmd_flg) {
  644. hdr->token = usc->session;
  645. atomic_set(&usc->cmd_state, 1);
  646. }
  647. hdr->pkt_size = pkt_size;
  648. mutex_unlock(&usc->cmd_lock);
  649. }
  650. static uint32_t q6usm_ext2int_format(uint32_t ext_format)
  651. {
  652. uint32_t int_format = INVALID_FORMAT;
  653. switch (ext_format) {
  654. case FORMAT_USPS_EPOS:
  655. int_format = US_POINT_EPOS_FORMAT_V2;
  656. break;
  657. case FORMAT_USRAW:
  658. int_format = US_RAW_FORMAT_V2;
  659. break;
  660. case FORMAT_USPROX:
  661. int_format = US_PROX_FORMAT_V4;
  662. break;
  663. case FORMAT_USGES_SYNC:
  664. int_format = US_GES_SYNC_FORMAT;
  665. break;
  666. case FORMAT_USRAW_SYNC:
  667. int_format = US_RAW_SYNC_FORMAT;
  668. break;
  669. default:
  670. pr_err("%s: Invalid format[%d]\n", __func__, ext_format);
  671. break;
  672. }
  673. return int_format;
  674. }
  675. int q6usm_open_read(struct us_client *usc,
  676. uint32_t format)
  677. {
  678. uint32_t int_format = INVALID_FORMAT;
  679. int rc = 0x00;
  680. struct usm_stream_cmd_open_read open;
  681. if ((usc == NULL) || (usc->apr == NULL)) {
  682. pr_err("%s: client or its apr is NULL\n", __func__);
  683. return -EINVAL;
  684. }
  685. pr_debug("%s: session[%d]", __func__, usc->session);
  686. q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
  687. open.hdr.opcode = USM_STREAM_CMD_OPEN_READ;
  688. open.src_endpoint = 0; /* AFE */
  689. open.pre_proc_top = 0; /* No preprocessing required */
  690. int_format = q6usm_ext2int_format(format);
  691. if (int_format == INVALID_FORMAT)
  692. return -EINVAL;
  693. open.uMode = STREAM_PRIORITY_NORMAL;
  694. open.format = int_format;
  695. rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
  696. if (rc < 0) {
  697. pr_err("%s: open failed op[0x%x]rc[%d]\n",
  698. __func__, open.hdr.opcode, rc);
  699. goto fail_cmd;
  700. }
  701. rc = wait_event_timeout(usc->cmd_wait,
  702. (atomic_read(&usc->cmd_state) == 0),
  703. Q6USM_TIMEOUT_JIFFIES);
  704. if (!rc) {
  705. rc = -ETIME;
  706. pr_err("%s: timeout, waited for OPEN_READ rc[%d]\n",
  707. __func__, rc);
  708. goto fail_cmd;
  709. } else
  710. rc = 0;
  711. fail_cmd:
  712. return rc;
  713. }
  714. int q6usm_enc_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
  715. {
  716. uint32_t int_format = INVALID_FORMAT;
  717. struct usm_stream_cmd_encdec_cfg_blk enc_cfg_obj;
  718. struct usm_stream_cmd_encdec_cfg_blk *enc_cfg = &enc_cfg_obj;
  719. int rc = 0;
  720. uint32_t total_cfg_size =
  721. sizeof(struct usm_stream_cmd_encdec_cfg_blk);
  722. uint32_t round_params_size = 0;
  723. uint8_t is_allocated = 0;
  724. if ((usc == NULL) || (us_cfg == NULL)) {
  725. pr_err("%s: wrong input", __func__);
  726. return -EINVAL;
  727. }
  728. int_format = q6usm_ext2int_format(us_cfg->format_id);
  729. if (int_format == INVALID_FORMAT) {
  730. pr_err("%s: wrong input format[%d]",
  731. __func__, us_cfg->format_id);
  732. return -EINVAL;
  733. }
  734. /* Transparent configuration data is after enc_cfg */
  735. /* Integer number of u32s is required */
  736. round_params_size = ((us_cfg->params_size + 3)/4) * 4;
  737. if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
  738. /* Dynamic allocated encdec_cfg_blk is required */
  739. /* static part use */
  740. round_params_size -= USM_MAX_CFG_DATA_SIZE;
  741. total_cfg_size += round_params_size;
  742. enc_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
  743. if (enc_cfg == NULL) {
  744. pr_err("%s: enc_cfg[%d] allocation failed\n",
  745. __func__, total_cfg_size);
  746. return -ENOMEM;
  747. }
  748. is_allocated = 1;
  749. } else
  750. round_params_size = 0;
  751. q6usm_add_hdr(usc, &enc_cfg->hdr, total_cfg_size, true);
  752. enc_cfg->hdr.opcode = USM_STREAM_CMD_SET_ENC_PARAM;
  753. enc_cfg->param_id = USM_PARAM_ID_ENCDEC_ENC_CFG_BLK;
  754. enc_cfg->param_size = sizeof(struct usm_encode_cfg_blk)+
  755. round_params_size;
  756. enc_cfg->enc_blk.frames_per_buf = 1;
  757. enc_cfg->enc_blk.format_id = int_format;
  758. enc_cfg->enc_blk.cfg_size = sizeof(struct usm_cfg_common)+
  759. USM_MAX_CFG_DATA_SIZE +
  760. round_params_size;
  761. memcpy(&(enc_cfg->enc_blk.cfg_common), &(us_cfg->cfg_common),
  762. sizeof(struct usm_cfg_common));
  763. /* Transparent data copy */
  764. memcpy(enc_cfg->enc_blk.transp_data, us_cfg->params,
  765. us_cfg->params_size);
  766. pr_debug("%s: cfg_size[%d], params_size[%d]\n",
  767. __func__,
  768. enc_cfg->enc_blk.cfg_size,
  769. us_cfg->params_size);
  770. pr_debug("%s: params[%d,%d,%d,%d, %d,%d,%d,%d]\n",
  771. __func__,
  772. enc_cfg->enc_blk.transp_data[0],
  773. enc_cfg->enc_blk.transp_data[1],
  774. enc_cfg->enc_blk.transp_data[2],
  775. enc_cfg->enc_blk.transp_data[3],
  776. enc_cfg->enc_blk.transp_data[4],
  777. enc_cfg->enc_blk.transp_data[5],
  778. enc_cfg->enc_blk.transp_data[6],
  779. enc_cfg->enc_blk.transp_data[7]
  780. );
  781. pr_debug("%s: srate:%d, ch=%d, bps= %d;\n",
  782. __func__, enc_cfg->enc_blk.cfg_common.sample_rate,
  783. enc_cfg->enc_blk.cfg_common.ch_cfg,
  784. enc_cfg->enc_blk.cfg_common.bits_per_sample);
  785. pr_debug("dmap:[0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x]; dev_id=0x%x\n",
  786. enc_cfg->enc_blk.cfg_common.data_map[0],
  787. enc_cfg->enc_blk.cfg_common.data_map[1],
  788. enc_cfg->enc_blk.cfg_common.data_map[2],
  789. enc_cfg->enc_blk.cfg_common.data_map[3],
  790. enc_cfg->enc_blk.cfg_common.data_map[4],
  791. enc_cfg->enc_blk.cfg_common.data_map[5],
  792. enc_cfg->enc_blk.cfg_common.data_map[6],
  793. enc_cfg->enc_blk.cfg_common.data_map[7],
  794. enc_cfg->enc_blk.cfg_common.dev_id);
  795. rc = apr_send_pkt(usc->apr, (uint32_t *) enc_cfg);
  796. if (rc < 0) {
  797. pr_err("%s:Comamnd open failed\n", __func__);
  798. rc = -EINVAL;
  799. goto fail_cmd;
  800. }
  801. rc = wait_event_timeout(usc->cmd_wait,
  802. (atomic_read(&usc->cmd_state) == 0),
  803. Q6USM_TIMEOUT_JIFFIES);
  804. if (!rc) {
  805. rc = -ETIME;
  806. pr_err("%s: timeout opcode[0x%x]\n",
  807. __func__, enc_cfg->hdr.opcode);
  808. } else
  809. rc = 0;
  810. fail_cmd:
  811. if (is_allocated == 1)
  812. kfree(enc_cfg);
  813. return rc;
  814. }
  815. int q6usm_dec_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
  816. {
  817. uint32_t int_format = INVALID_FORMAT;
  818. struct usm_stream_media_format_update dec_cfg_obj;
  819. struct usm_stream_media_format_update *dec_cfg = &dec_cfg_obj;
  820. int rc = 0;
  821. uint32_t total_cfg_size = sizeof(struct usm_stream_media_format_update);
  822. uint32_t round_params_size = 0;
  823. uint8_t is_allocated = 0;
  824. if ((usc == NULL) || (us_cfg == NULL)) {
  825. pr_err("%s: wrong input", __func__);
  826. return -EINVAL;
  827. }
  828. int_format = q6usm_ext2int_format(us_cfg->format_id);
  829. if (int_format == INVALID_FORMAT) {
  830. pr_err("%s: wrong input format[%d]",
  831. __func__, us_cfg->format_id);
  832. return -EINVAL;
  833. }
  834. /* Transparent configuration data is after enc_cfg */
  835. /* Integer number of u32s is required */
  836. round_params_size = ((us_cfg->params_size + 3)/4) * 4;
  837. if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
  838. /* Dynamic allocated encdec_cfg_blk is required */
  839. /* static part use */
  840. round_params_size -= USM_MAX_CFG_DATA_SIZE;
  841. total_cfg_size += round_params_size;
  842. dec_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
  843. if (dec_cfg == NULL) {
  844. pr_err("%s:dec_cfg[%d] allocation failed\n",
  845. __func__, total_cfg_size);
  846. return -ENOMEM;
  847. }
  848. is_allocated = 1;
  849. } else { /* static transp_data is enough */
  850. round_params_size = 0;
  851. }
  852. q6usm_add_hdr(usc, &dec_cfg->hdr, total_cfg_size, true);
  853. dec_cfg->hdr.opcode = USM_DATA_CMD_MEDIA_FORMAT_UPDATE;
  854. dec_cfg->format_id = int_format;
  855. dec_cfg->cfg_size = sizeof(struct usm_cfg_common) +
  856. USM_MAX_CFG_DATA_SIZE +
  857. round_params_size;
  858. memcpy(&(dec_cfg->cfg_common), &(us_cfg->cfg_common),
  859. sizeof(struct usm_cfg_common));
  860. /* Transparent data copy */
  861. memcpy(dec_cfg->transp_data, us_cfg->params, us_cfg->params_size);
  862. pr_debug("%s: cfg_size[%d], params_size[%d]; parambytes[%d,%d,%d,%d]\n",
  863. __func__,
  864. dec_cfg->cfg_size,
  865. us_cfg->params_size,
  866. dec_cfg->transp_data[0],
  867. dec_cfg->transp_data[1],
  868. dec_cfg->transp_data[2],
  869. dec_cfg->transp_data[3]
  870. );
  871. rc = apr_send_pkt(usc->apr, (uint32_t *) dec_cfg);
  872. if (rc < 0) {
  873. pr_err("%s:Comamnd open failed\n", __func__);
  874. rc = -EINVAL;
  875. goto fail_cmd;
  876. }
  877. rc = wait_event_timeout(usc->cmd_wait,
  878. (atomic_read(&usc->cmd_state) == 0),
  879. Q6USM_TIMEOUT_JIFFIES);
  880. if (!rc) {
  881. rc = -ETIME;
  882. pr_err("%s: timeout opcode[0x%x]\n",
  883. __func__, dec_cfg->hdr.opcode);
  884. } else
  885. rc = 0;
  886. fail_cmd:
  887. if (is_allocated == 1)
  888. kfree(dec_cfg);
  889. return rc;
  890. }
  891. int q6usm_open_write(struct us_client *usc,
  892. uint32_t format)
  893. {
  894. int rc = 0;
  895. uint32_t int_format = INVALID_FORMAT;
  896. struct usm_stream_cmd_open_write open;
  897. if ((usc == NULL) || (usc->apr == NULL)) {
  898. pr_err("%s: APR handle NULL\n", __func__);
  899. return -EINVAL;
  900. }
  901. pr_debug("%s: session[%d]", __func__, usc->session);
  902. q6usm_add_hdr(usc, &open.hdr, sizeof(open), true);
  903. open.hdr.opcode = USM_STREAM_CMD_OPEN_WRITE;
  904. int_format = q6usm_ext2int_format(format);
  905. if (int_format == INVALID_FORMAT) {
  906. pr_err("%s: wrong format[%d]", __func__, format);
  907. return -EINVAL;
  908. }
  909. open.format = int_format;
  910. rc = apr_send_pkt(usc->apr, (uint32_t *) &open);
  911. if (rc < 0) {
  912. pr_err("%s:open failed op[0x%x]rc[%d]\n",
  913. __func__, open.hdr.opcode, rc);
  914. goto fail_cmd;
  915. }
  916. rc = wait_event_timeout(usc->cmd_wait,
  917. (atomic_read(&usc->cmd_state) == 0),
  918. Q6USM_TIMEOUT_JIFFIES);
  919. if (!rc) {
  920. rc = -ETIME;
  921. pr_err("%s:timeout. waited for OPEN_WRITR rc[%d]\n",
  922. __func__, rc);
  923. goto fail_cmd;
  924. } else
  925. rc = 0;
  926. fail_cmd:
  927. return rc;
  928. }
  929. int q6usm_run(struct us_client *usc, uint32_t flags,
  930. uint32_t msw_ts, uint32_t lsw_ts)
  931. {
  932. struct usm_stream_cmd_run run;
  933. int rc = 0;
  934. if ((usc == NULL) || (usc->apr == NULL)) {
  935. pr_err("%s: APR handle NULL\n", __func__);
  936. return -EINVAL;
  937. }
  938. q6usm_add_hdr(usc, &run.hdr, sizeof(run), true);
  939. run.hdr.opcode = USM_SESSION_CMD_RUN;
  940. run.flags = flags;
  941. run.msw_ts = msw_ts;
  942. run.lsw_ts = lsw_ts;
  943. rc = apr_send_pkt(usc->apr, (uint32_t *) &run);
  944. if (rc < 0) {
  945. pr_err("%s: Commmand run failed[%d]\n", __func__, rc);
  946. goto fail_cmd;
  947. }
  948. rc = wait_event_timeout(usc->cmd_wait,
  949. (atomic_read(&usc->cmd_state) == 0),
  950. Q6USM_TIMEOUT_JIFFIES);
  951. if (!rc) {
  952. rc = -ETIME;
  953. pr_err("%s: timeout. waited for run success rc[%d]\n",
  954. __func__, rc);
  955. } else
  956. rc = 0;
  957. fail_cmd:
  958. return rc;
  959. }
  960. int q6usm_read(struct us_client *usc, uint32_t read_ind)
  961. {
  962. struct usm_stream_cmd_read read;
  963. struct us_port_data *port = NULL;
  964. int rc = 0;
  965. u32 read_counter = 0;
  966. u32 loop_ind = 0;
  967. u64 buf_addr = 0;
  968. if ((usc == NULL) || (usc->apr == NULL)) {
  969. pr_err("%s: APR handle NULL\n", __func__);
  970. return -EINVAL;
  971. }
  972. port = &usc->port[OUT];
  973. if (read_ind > port->buf_cnt) {
  974. pr_err("%s: wrong read_ind[%d]\n",
  975. __func__, read_ind);
  976. return -EINVAL;
  977. }
  978. if (read_ind == port->cpu_buf) {
  979. pr_err("%s: no free region\n", __func__);
  980. return 0;
  981. }
  982. if (read_ind > port->cpu_buf) { /* 1 range */
  983. read_counter = read_ind - port->cpu_buf;
  984. } else { /* 2 ranges */
  985. read_counter = (port->buf_cnt - port->cpu_buf) + read_ind;
  986. }
  987. q6usm_add_hdr(usc, &read.hdr, sizeof(read), false);
  988. read.hdr.opcode = USM_DATA_CMD_READ;
  989. read.buf_size = port->buf_size;
  990. buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
  991. read.buf_addr_lsw = lower_32_bits(buf_addr);
  992. read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
  993. read.mem_map_handle = *((uint32_t *)(port->ext));
  994. for (loop_ind = 0; loop_ind < read_counter; ++loop_ind) {
  995. u32 temp_cpu_buf = port->cpu_buf;
  996. buf_addr = (u64)(port->phys) +
  997. port->buf_size * (port->cpu_buf);
  998. read.buf_addr_lsw = lower_32_bits(buf_addr);
  999. read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
  1000. read.seq_id = port->cpu_buf;
  1001. read.hdr.token = port->cpu_buf;
  1002. read.counter = 1;
  1003. ++(port->cpu_buf);
  1004. if (port->cpu_buf == port->buf_cnt)
  1005. port->cpu_buf = 0;
  1006. rc = apr_send_pkt(usc->apr, (uint32_t *) &read);
  1007. if (rc < 0) {
  1008. port->cpu_buf = temp_cpu_buf;
  1009. pr_err("%s:read op[0x%x]rc[%d]\n",
  1010. __func__, read.hdr.opcode, rc);
  1011. break;
  1012. }
  1013. rc = 0;
  1014. } /* bufs loop */
  1015. return rc;
  1016. }
  1017. int q6usm_write(struct us_client *usc, uint32_t write_ind)
  1018. {
  1019. int rc = 0;
  1020. struct usm_stream_cmd_write cmd_write;
  1021. struct us_port_data *port = NULL;
  1022. u32 current_dsp_buf = 0;
  1023. u64 buf_addr = 0;
  1024. if ((usc == NULL) || (usc->apr == NULL)) {
  1025. pr_err("%s: APR handle NULL\n", __func__);
  1026. return -EINVAL;
  1027. }
  1028. port = &usc->port[IN];
  1029. current_dsp_buf = port->dsp_buf;
  1030. /* free region, caused by new dsp_buf report from DSP, */
  1031. /* can be only extended */
  1032. if (port->cpu_buf >= current_dsp_buf) {
  1033. /* 2 -part free region, including empty buffer */
  1034. if ((write_ind <= port->cpu_buf) &&
  1035. (write_ind > current_dsp_buf)) {
  1036. pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
  1037. __func__, write_ind,
  1038. current_dsp_buf, port->cpu_buf);
  1039. return -EINVAL;
  1040. }
  1041. } else {
  1042. /* 1 -part free region */
  1043. if ((write_ind <= port->cpu_buf) ||
  1044. (write_ind > current_dsp_buf)) {
  1045. pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n",
  1046. __func__, write_ind,
  1047. current_dsp_buf, port->cpu_buf);
  1048. return -EINVAL;
  1049. }
  1050. }
  1051. q6usm_add_hdr(usc, &cmd_write.hdr, sizeof(cmd_write), false);
  1052. cmd_write.hdr.opcode = USM_DATA_CMD_WRITE;
  1053. cmd_write.buf_size = port->buf_size;
  1054. buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf);
  1055. cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
  1056. cmd_write.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr);
  1057. cmd_write.mem_map_handle = *((uint32_t *)(port->ext));
  1058. cmd_write.res0 = 0;
  1059. cmd_write.res1 = 0;
  1060. cmd_write.res2 = 0;
  1061. while (port->cpu_buf != write_ind) {
  1062. u32 temp_cpu_buf = port->cpu_buf;
  1063. buf_addr = (u64)(port->phys) +
  1064. port->buf_size * (port->cpu_buf);
  1065. cmd_write.buf_addr_lsw = lower_32_bits(buf_addr);
  1066. cmd_write.buf_addr_msw =
  1067. msm_audio_populate_upper_32_bits(buf_addr);
  1068. cmd_write.seq_id = port->cpu_buf;
  1069. cmd_write.hdr.token = port->cpu_buf;
  1070. ++(port->cpu_buf);
  1071. if (port->cpu_buf == port->buf_cnt)
  1072. port->cpu_buf = 0;
  1073. rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_write);
  1074. if (rc < 0) {
  1075. port->cpu_buf = temp_cpu_buf;
  1076. pr_err("%s:write op[0x%x];rc[%d];cpu_buf[%d]\n",
  1077. __func__, cmd_write.hdr.opcode,
  1078. rc, port->cpu_buf);
  1079. break;
  1080. }
  1081. rc = 0;
  1082. }
  1083. return rc;
  1084. }
  1085. bool q6usm_is_write_buf_full(struct us_client *usc, uint32_t *free_region)
  1086. {
  1087. struct us_port_data *port = NULL;
  1088. u32 cpu_buf = 0;
  1089. if ((usc == NULL) || !free_region) {
  1090. pr_err("%s: input data wrong\n", __func__);
  1091. return false;
  1092. }
  1093. port = &usc->port[IN];
  1094. cpu_buf = port->cpu_buf + 1;
  1095. if (cpu_buf == port->buf_cnt)
  1096. cpu_buf = 0;
  1097. *free_region = port->dsp_buf;
  1098. return cpu_buf == *free_region;
  1099. }
  1100. int q6usm_cmd(struct us_client *usc, int cmd)
  1101. {
  1102. struct apr_hdr hdr;
  1103. int rc = 0;
  1104. atomic_t *state;
  1105. if ((usc == NULL) || (usc->apr == NULL)) {
  1106. pr_err("%s: APR handle NULL\n", __func__);
  1107. return -EINVAL;
  1108. }
  1109. q6usm_add_hdr(usc, &hdr, sizeof(hdr), true);
  1110. switch (cmd) {
  1111. case CMD_CLOSE:
  1112. hdr.opcode = USM_STREAM_CMD_CLOSE;
  1113. state = &usc->cmd_state;
  1114. break;
  1115. default:
  1116. pr_err("%s:Invalid format[%d]\n", __func__, cmd);
  1117. goto fail_cmd;
  1118. }
  1119. rc = apr_send_pkt(usc->apr, (uint32_t *) &hdr);
  1120. if (rc < 0) {
  1121. pr_err("%s: Command 0x%x failed\n", __func__, hdr.opcode);
  1122. goto fail_cmd;
  1123. }
  1124. rc = wait_event_timeout(usc->cmd_wait, (atomic_read(state) == 0),
  1125. Q6USM_TIMEOUT_JIFFIES);
  1126. if (!rc) {
  1127. rc = -ETIME;
  1128. pr_err("%s:timeout. waited for response opcode[0x%x]\n",
  1129. __func__, hdr.opcode);
  1130. } else
  1131. rc = 0;
  1132. fail_cmd:
  1133. return rc;
  1134. }
  1135. int q6usm_set_us_detection(struct us_client *usc,
  1136. struct usm_session_cmd_detect_info *detect_info,
  1137. uint16_t detect_info_size)
  1138. {
  1139. int rc = 0;
  1140. if ((usc == NULL) ||
  1141. (detect_info_size == 0) ||
  1142. (detect_info == NULL)) {
  1143. pr_err("%s: wrong input: usc=0x%pK, inf_size=%d; info=0x%pK",
  1144. __func__,
  1145. usc,
  1146. detect_info_size,
  1147. detect_info);
  1148. return -EINVAL;
  1149. }
  1150. q6usm_add_hdr(usc, &detect_info->hdr, detect_info_size, true);
  1151. detect_info->hdr.opcode = USM_SESSION_CMD_SIGNAL_DETECT_MODE;
  1152. rc = apr_send_pkt(usc->apr, (uint32_t *)detect_info);
  1153. if (rc < 0) {
  1154. pr_err("%s:Comamnd signal detect failed\n", __func__);
  1155. return -EINVAL;
  1156. }
  1157. rc = wait_event_timeout(usc->cmd_wait,
  1158. (atomic_read(&usc->cmd_state) == 0),
  1159. Q6USM_TIMEOUT_JIFFIES);
  1160. if (!rc) {
  1161. rc = -ETIME;
  1162. pr_err("%s: CMD_SIGNAL_DETECT_MODE: timeout=%d\n",
  1163. __func__, Q6USM_TIMEOUT_JIFFIES);
  1164. } else
  1165. rc = 0;
  1166. return rc;
  1167. }
  1168. int q6usm_set_us_stream_param(int dir, struct us_client *usc,
  1169. uint32_t module_id, uint32_t param_id, uint32_t buf_size)
  1170. {
  1171. int rc = 0;
  1172. struct usm_stream_cmd_set_param cmd_set_param;
  1173. struct us_port_data *port = NULL;
  1174. if ((usc == NULL) || (usc->apr == NULL)) {
  1175. pr_err("%s: APR handle NULL\n", __func__);
  1176. return -EINVAL;
  1177. }
  1178. port = &usc->port[dir];
  1179. q6usm_add_hdr(usc, &cmd_set_param.hdr, sizeof(cmd_set_param), true);
  1180. cmd_set_param.hdr.opcode = USM_STREAM_CMD_SET_PARAM;
  1181. cmd_set_param.buf_size = buf_size;
  1182. cmd_set_param.buf_addr_msw =
  1183. msm_audio_populate_upper_32_bits(port->param_phys);
  1184. cmd_set_param.buf_addr_lsw = lower_32_bits(port->param_phys);
  1185. cmd_set_param.mem_map_handle =
  1186. *((uint32_t *)(port->param_buf_mem_handle));
  1187. cmd_set_param.module_id = module_id;
  1188. cmd_set_param.param_id = param_id;
  1189. cmd_set_param.hdr.token = 0;
  1190. rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_set_param);
  1191. if (rc < 0) {
  1192. pr_err("%s:write op[0x%x];rc[%d]\n",
  1193. __func__, cmd_set_param.hdr.opcode, rc);
  1194. }
  1195. rc = wait_event_timeout(usc->cmd_wait,
  1196. (atomic_read(&usc->cmd_state) == 0),
  1197. Q6USM_TIMEOUT_JIFFIES);
  1198. if (!rc) {
  1199. rc = -ETIME;
  1200. pr_err("%s: CMD_SET_PARAM: timeout=%d\n",
  1201. __func__, Q6USM_TIMEOUT_JIFFIES);
  1202. } else
  1203. rc = 0;
  1204. return rc;
  1205. }
  1206. int q6usm_get_us_stream_param(int dir, struct us_client *usc,
  1207. uint32_t module_id, uint32_t param_id, uint32_t buf_size)
  1208. {
  1209. int rc = 0;
  1210. struct usm_stream_cmd_get_param cmd_get_param;
  1211. struct us_port_data *port = NULL;
  1212. if ((usc == NULL) || (usc->apr == NULL)) {
  1213. pr_err("%s: APR handle NULL\n", __func__);
  1214. return -EINVAL;
  1215. }
  1216. port = &usc->port[dir];
  1217. q6usm_add_hdr(usc, &cmd_get_param.hdr, sizeof(cmd_get_param), true);
  1218. cmd_get_param.hdr.opcode = USM_STREAM_CMD_GET_PARAM;
  1219. cmd_get_param.buf_size = buf_size;
  1220. cmd_get_param.buf_addr_msw =
  1221. msm_audio_populate_upper_32_bits(port->param_phys);
  1222. cmd_get_param.buf_addr_lsw = lower_32_bits(port->param_phys);
  1223. cmd_get_param.mem_map_handle =
  1224. *((uint32_t *)(port->param_buf_mem_handle));
  1225. cmd_get_param.module_id = module_id;
  1226. cmd_get_param.param_id = param_id;
  1227. cmd_get_param.hdr.token = 0;
  1228. rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_get_param);
  1229. if (rc < 0) {
  1230. pr_err("%s:write op[0x%x];rc[%d]\n",
  1231. __func__, cmd_get_param.hdr.opcode, rc);
  1232. }
  1233. rc = wait_event_timeout(usc->cmd_wait,
  1234. (atomic_read(&usc->cmd_state) == 0),
  1235. Q6USM_TIMEOUT_JIFFIES);
  1236. if (!rc) {
  1237. rc = -ETIME;
  1238. pr_err("%s: CMD_GET_PARAM: timeout=%d\n",
  1239. __func__, Q6USM_TIMEOUT_JIFFIES);
  1240. } else
  1241. rc = 0;
  1242. return rc;
  1243. }
  1244. int __init q6usm_init(void)
  1245. {
  1246. pr_debug("%s\n", __func__);
  1247. init_waitqueue_head(&this_mmap.cmd_wait);
  1248. memset(session, 0, sizeof(session));
  1249. return 0;
  1250. }