q6core.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124
  1. /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/string.h>
  15. #include <linux/types.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/mutex.h>
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. #include <dsp/q6core.h>
  21. #include <dsp/audio_cal_utils.h>
  22. #include <dsp/apr_audio-v2.h>
  23. #include <ipc/apr.h>
  24. #include "adsp_err.h"
  25. #define TIMEOUT_MS 1000
  26. /*
  27. * AVS bring up in the modem is optimitized for the new
  28. * Sub System Restart design and 100 milliseconds timeout
  29. * is sufficient to make sure the Q6 will be ready.
  30. */
  31. #define Q6_READY_TIMEOUT_MS 100
  32. enum {
  33. META_CAL,
  34. CUST_TOP_CAL,
  35. CORE_MAX_CAL
  36. };
  37. enum ver_query_status {
  38. VER_QUERY_UNATTEMPTED,
  39. VER_QUERY_UNSUPPORTED,
  40. VER_QUERY_SUPPORTED
  41. };
  42. struct q6core_avcs_ver_info {
  43. enum ver_query_status status;
  44. struct avcs_fwk_ver_info *ver_info;
  45. };
  46. struct q6core_str {
  47. struct apr_svc *core_handle_q;
  48. wait_queue_head_t bus_bw_req_wait;
  49. wait_queue_head_t cmd_req_wait;
  50. wait_queue_head_t avcs_fwk_ver_req_wait;
  51. u32 bus_bw_resp_received;
  52. enum cmd_flags {
  53. FLAG_NONE,
  54. FLAG_CMDRSP_LICENSE_RESULT
  55. } cmd_resp_received_flag;
  56. u32 avcs_fwk_ver_resp_received;
  57. struct mutex cmd_lock;
  58. struct mutex ver_lock;
  59. union {
  60. struct avcs_cmdrsp_get_license_validation_result
  61. cmdrsp_license_result;
  62. } cmd_resp_payload;
  63. u32 param;
  64. struct cal_type_data *cal_data[CORE_MAX_CAL];
  65. uint32_t mem_map_cal_handle;
  66. int32_t adsp_status;
  67. struct q6core_avcs_ver_info q6core_avcs_ver_info;
  68. };
  69. static struct q6core_str q6core_lcl;
  70. struct generic_get_data_ {
  71. int valid;
  72. int size_in_ints;
  73. int ints[];
  74. };
  75. static struct generic_get_data_ *generic_get_data;
  76. static int parse_fwk_version_info(uint32_t *payload)
  77. {
  78. size_t ver_size;
  79. int num_services;
  80. pr_debug("%s: Payload info num services %d\n",
  81. __func__, payload[4]);
  82. /*
  83. * payload1[4] is the number of services running on DSP
  84. * Based on this info, we copy the payload into core
  85. * avcs version info structure.
  86. */
  87. num_services = payload[4];
  88. if (num_services > VSS_MAX_AVCS_NUM_SERVICES) {
  89. pr_err("%s: num_services: %d greater than max services: %d\n",
  90. __func__, num_services, VSS_MAX_AVCS_NUM_SERVICES);
  91. return -EINVAL;
  92. }
  93. /*
  94. * Dynamically allocate memory for all
  95. * the services based on num_services
  96. */
  97. ver_size = sizeof(struct avcs_get_fwk_version) +
  98. num_services * sizeof(struct avs_svc_api_info);
  99. q6core_lcl.q6core_avcs_ver_info.ver_info =
  100. kzalloc(ver_size, GFP_ATOMIC);
  101. if (q6core_lcl.q6core_avcs_ver_info.ver_info == NULL)
  102. return -ENOMEM;
  103. memcpy(q6core_lcl.q6core_avcs_ver_info.ver_info, (uint8_t *) payload,
  104. ver_size);
  105. return 0;
  106. }
  107. static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
  108. {
  109. uint32_t *payload1;
  110. int ret = 0;
  111. if (data == NULL) {
  112. pr_err("%s: data argument is null\n", __func__);
  113. return -EINVAL;
  114. }
  115. pr_debug("%s: core msg: payload len = %u, apr resp opcode = 0x%x\n",
  116. __func__,
  117. data->payload_size, data->opcode);
  118. switch (data->opcode) {
  119. case APR_BASIC_RSP_RESULT:{
  120. if (data->payload_size == 0) {
  121. pr_err("%s: APR_BASIC_RSP_RESULT No Payload ",
  122. __func__);
  123. return 0;
  124. }
  125. payload1 = data->payload;
  126. switch (payload1[0]) {
  127. case AVCS_CMD_SHARED_MEM_UNMAP_REGIONS:
  128. pr_debug("%s: Cmd = AVCS_CMD_SHARED_MEM_UNMAP_REGIONS status[0x%x]\n",
  129. __func__, payload1[1]);
  130. q6core_lcl.bus_bw_resp_received = 1;
  131. wake_up(&q6core_lcl.bus_bw_req_wait);
  132. break;
  133. case AVCS_CMD_SHARED_MEM_MAP_REGIONS:
  134. pr_debug("%s: Cmd = AVCS_CMD_SHARED_MEM_MAP_REGIONS status[0x%x]\n",
  135. __func__, payload1[1]);
  136. q6core_lcl.bus_bw_resp_received = 1;
  137. wake_up(&q6core_lcl.bus_bw_req_wait);
  138. break;
  139. case AVCS_CMD_REGISTER_TOPOLOGIES:
  140. pr_debug("%s: Cmd = AVCS_CMD_REGISTER_TOPOLOGIES status[0x%x]\n",
  141. __func__, payload1[1]);
  142. /* -ADSP status to match Linux error standard */
  143. q6core_lcl.adsp_status = -payload1[1];
  144. q6core_lcl.bus_bw_resp_received = 1;
  145. wake_up(&q6core_lcl.bus_bw_req_wait);
  146. break;
  147. case AVCS_CMD_DEREGISTER_TOPOLOGIES:
  148. pr_debug("%s: Cmd = AVCS_CMD_DEREGISTER_TOPOLOGIES status[0x%x]\n",
  149. __func__, payload1[1]);
  150. q6core_lcl.bus_bw_resp_received = 1;
  151. wake_up(&q6core_lcl.bus_bw_req_wait);
  152. break;
  153. case AVCS_CMD_GET_FWK_VERSION:
  154. pr_debug("%s: Cmd = AVCS_CMD_GET_FWK_VERSION status[%s]\n",
  155. __func__, adsp_err_get_err_str(payload1[1]));
  156. /* ADSP status to match Linux error standard */
  157. q6core_lcl.adsp_status = -payload1[1];
  158. if (payload1[1] == ADSP_EUNSUPPORTED)
  159. q6core_lcl.q6core_avcs_ver_info.status =
  160. VER_QUERY_UNSUPPORTED;
  161. q6core_lcl.avcs_fwk_ver_resp_received = 1;
  162. wake_up(&q6core_lcl.avcs_fwk_ver_req_wait);
  163. break;
  164. default:
  165. pr_err("%s: Invalid cmd rsp[0x%x][0x%x] opcode %d\n",
  166. __func__,
  167. payload1[0], payload1[1], data->opcode);
  168. break;
  169. }
  170. break;
  171. }
  172. case RESET_EVENTS:{
  173. pr_debug("%s: Reset event received in Core service\n",
  174. __func__);
  175. /*
  176. * no reset for q6core_avcs_ver_info done as
  177. * the data will not change after SSR
  178. */
  179. apr_reset(q6core_lcl.core_handle_q);
  180. q6core_lcl.core_handle_q = NULL;
  181. break;
  182. }
  183. case AVCS_CMDRSP_SHARED_MEM_MAP_REGIONS:
  184. payload1 = data->payload;
  185. pr_debug("%s: AVCS_CMDRSP_SHARED_MEM_MAP_REGIONS handle %d\n",
  186. __func__, payload1[0]);
  187. q6core_lcl.mem_map_cal_handle = payload1[0];
  188. q6core_lcl.bus_bw_resp_received = 1;
  189. wake_up(&q6core_lcl.bus_bw_req_wait);
  190. break;
  191. case AVCS_CMDRSP_ADSP_EVENT_GET_STATE:
  192. payload1 = data->payload;
  193. q6core_lcl.param = payload1[0];
  194. pr_debug("%s: Received ADSP get state response 0x%x\n",
  195. __func__, q6core_lcl.param);
  196. /* ensure .param is updated prior to .bus_bw_resp_received */
  197. wmb();
  198. q6core_lcl.bus_bw_resp_received = 1;
  199. wake_up(&q6core_lcl.bus_bw_req_wait);
  200. break;
  201. case AVCS_CMDRSP_GET_LICENSE_VALIDATION_RESULT:
  202. payload1 = data->payload;
  203. pr_debug("%s: cmd = LICENSE_VALIDATION_RESULT, result = 0x%x\n",
  204. __func__, payload1[0]);
  205. q6core_lcl.cmd_resp_payload.cmdrsp_license_result.result
  206. = payload1[0];
  207. q6core_lcl.cmd_resp_received_flag = FLAG_CMDRSP_LICENSE_RESULT;
  208. wake_up(&q6core_lcl.cmd_req_wait);
  209. break;
  210. case AVCS_CMDRSP_GET_FWK_VERSION:
  211. pr_debug("%s: Received AVCS_CMDRSP_GET_FWK_VERSION\n",
  212. __func__);
  213. payload1 = data->payload;
  214. ret = parse_fwk_version_info(payload1);
  215. if (ret < 0) {
  216. q6core_lcl.adsp_status = ret;
  217. pr_err("%s: Failed to parse payload:%d\n",
  218. __func__, ret);
  219. } else {
  220. q6core_lcl.q6core_avcs_ver_info.status =
  221. VER_QUERY_SUPPORTED;
  222. }
  223. q6core_lcl.avcs_fwk_ver_resp_received = 1;
  224. wake_up(&q6core_lcl.avcs_fwk_ver_req_wait);
  225. break;
  226. default:
  227. pr_err("%s: Message id from adsp core svc: 0x%x\n",
  228. __func__, data->opcode);
  229. if (generic_get_data) {
  230. generic_get_data->valid = 1;
  231. generic_get_data->size_in_ints =
  232. data->payload_size/sizeof(int);
  233. pr_debug("callback size = %i\n",
  234. data->payload_size);
  235. memcpy(generic_get_data->ints, data->payload,
  236. data->payload_size);
  237. q6core_lcl.bus_bw_resp_received = 1;
  238. wake_up(&q6core_lcl.bus_bw_req_wait);
  239. break;
  240. }
  241. break;
  242. }
  243. return 0;
  244. }
  245. void ocm_core_open(void)
  246. {
  247. if (q6core_lcl.core_handle_q == NULL)
  248. q6core_lcl.core_handle_q = apr_register("ADSP", "CORE",
  249. aprv2_core_fn_q, 0xFFFFFFFF, NULL);
  250. pr_debug("%s: Open_q %pK\n", __func__, q6core_lcl.core_handle_q);
  251. if (q6core_lcl.core_handle_q == NULL)
  252. pr_err("%s: Unable to register CORE\n", __func__);
  253. }
  254. struct cal_block_data *cal_utils_get_cal_block_by_key(
  255. struct cal_type_data *cal_type, uint32_t key)
  256. {
  257. struct list_head *ptr, *next;
  258. struct cal_block_data *cal_block = NULL;
  259. struct audio_cal_info_metainfo *metainfo;
  260. list_for_each_safe(ptr, next,
  261. &cal_type->cal_blocks) {
  262. cal_block = list_entry(ptr,
  263. struct cal_block_data, list);
  264. metainfo = (struct audio_cal_info_metainfo *)
  265. cal_block->cal_info;
  266. if (metainfo->nKey != key) {
  267. pr_debug("%s: metainfo key mismatch!!! found:%x, needed:%x\n",
  268. __func__, metainfo->nKey, key);
  269. } else {
  270. pr_debug("%s: metainfo key match found", __func__);
  271. return cal_block;
  272. }
  273. }
  274. return NULL;
  275. }
  276. static int q6core_send_get_avcs_fwk_ver_cmd(void)
  277. {
  278. struct apr_hdr avcs_ver_cmd;
  279. int ret;
  280. avcs_ver_cmd.hdr_field =
  281. APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
  282. APR_PKT_VER);
  283. avcs_ver_cmd.pkt_size = sizeof(struct apr_hdr);
  284. avcs_ver_cmd.src_port = 0;
  285. avcs_ver_cmd.dest_port = 0;
  286. avcs_ver_cmd.token = 0;
  287. avcs_ver_cmd.opcode = AVCS_CMD_GET_FWK_VERSION;
  288. q6core_lcl.adsp_status = 0;
  289. q6core_lcl.avcs_fwk_ver_resp_received = 0;
  290. ret = apr_send_pkt(q6core_lcl.core_handle_q,
  291. (uint32_t *) &avcs_ver_cmd);
  292. if (ret < 0) {
  293. pr_err("%s: failed to send apr packet, ret=%d\n", __func__,
  294. ret);
  295. goto done;
  296. }
  297. ret = wait_event_timeout(q6core_lcl.avcs_fwk_ver_req_wait,
  298. (q6core_lcl.avcs_fwk_ver_resp_received == 1),
  299. msecs_to_jiffies(TIMEOUT_MS));
  300. if (!ret) {
  301. pr_err("%s: wait_event timeout for AVCS fwk version info\n",
  302. __func__);
  303. ret = -ETIMEDOUT;
  304. goto done;
  305. }
  306. if (q6core_lcl.adsp_status < 0) {
  307. /*
  308. * adsp_err_get_err_str expects a positive value but we store
  309. * the DSP error as negative to match the Linux error standard.
  310. * Pass in the negated value so adsp_err_get_err_str returns
  311. * the correct string.
  312. */
  313. pr_err("%s: DSP returned error[%s]\n", __func__,
  314. adsp_err_get_err_str(-q6core_lcl.adsp_status));
  315. ret = adsp_err_get_lnx_err_code(q6core_lcl.adsp_status);
  316. goto done;
  317. }
  318. ret = 0;
  319. done:
  320. return ret;
  321. }
  322. int q6core_get_service_version(uint32_t service_id,
  323. struct avcs_fwk_ver_info *ver_info,
  324. size_t size)
  325. {
  326. struct avcs_fwk_ver_info *cached_ver_info = NULL;
  327. int i;
  328. uint32_t num_services;
  329. size_t ver_size;
  330. int ret;
  331. if (ver_info == NULL) {
  332. pr_err("%s: ver_info is NULL\n", __func__);
  333. return -EINVAL;
  334. }
  335. ret = q6core_get_fwk_version_size(service_id);
  336. if (ret < 0) {
  337. pr_err("%s: Failed to get service size for service id %d with error %d\n",
  338. __func__, service_id, ret);
  339. return ret;
  340. }
  341. ver_size = ret;
  342. if (ver_size != size) {
  343. pr_err("%s: Expected size %zu and provided size %zu do not match\n",
  344. __func__, ver_size, size);
  345. return -EINVAL;
  346. }
  347. cached_ver_info = q6core_lcl.q6core_avcs_ver_info.ver_info;
  348. num_services = cached_ver_info->avcs_fwk_version.num_services;
  349. if (service_id == AVCS_SERVICE_ID_ALL) {
  350. memcpy(ver_info, cached_ver_info, ver_size);
  351. return 0;
  352. }
  353. ver_info->avcs_fwk_version = cached_ver_info->avcs_fwk_version;
  354. for (i = 0; i < num_services; i++) {
  355. if (cached_ver_info->services[i].service_id == service_id) {
  356. ver_info->services[0] = cached_ver_info->services[i];
  357. return 0;
  358. }
  359. }
  360. pr_err("%s: No service matching service ID %d\n", __func__, service_id);
  361. return -EINVAL;
  362. }
  363. EXPORT_SYMBOL(q6core_get_service_version);
  364. size_t q6core_get_fwk_version_size(uint32_t service_id)
  365. {
  366. int ret = 0;
  367. uint32_t num_services;
  368. mutex_lock(&(q6core_lcl.ver_lock));
  369. pr_debug("%s: q6core_avcs_ver_info.status(%d)\n", __func__,
  370. q6core_lcl.q6core_avcs_ver_info.status);
  371. switch (q6core_lcl.q6core_avcs_ver_info.status) {
  372. case VER_QUERY_SUPPORTED:
  373. pr_debug("%s: AVCS FWK version query already attempted\n",
  374. __func__);
  375. break;
  376. case VER_QUERY_UNSUPPORTED:
  377. ret = -EOPNOTSUPP;
  378. break;
  379. case VER_QUERY_UNATTEMPTED:
  380. pr_debug("%s: Attempting AVCS FWK version query\n", __func__);
  381. if (q6core_is_adsp_ready()) {
  382. ret = q6core_send_get_avcs_fwk_ver_cmd();
  383. } else {
  384. pr_err("%s: ADSP is not ready to query version\n",
  385. __func__);
  386. ret = -ENODEV;
  387. }
  388. break;
  389. default:
  390. pr_err("%s: Invalid version query status %d\n", __func__,
  391. q6core_lcl.q6core_avcs_ver_info.status);
  392. ret = -EINVAL;
  393. break;
  394. }
  395. mutex_unlock(&(q6core_lcl.ver_lock));
  396. if (ret)
  397. goto done;
  398. if (q6core_lcl.q6core_avcs_ver_info.ver_info != NULL) {
  399. num_services = q6core_lcl.q6core_avcs_ver_info.ver_info
  400. ->avcs_fwk_version.num_services;
  401. } else {
  402. pr_err("%s: ver_info is NULL\n", __func__);
  403. ret = -EINVAL;
  404. goto done;
  405. }
  406. ret = sizeof(struct avcs_get_fwk_version);
  407. if (service_id == AVCS_SERVICE_ID_ALL)
  408. ret += num_services * sizeof(struct avs_svc_api_info);
  409. else
  410. ret += sizeof(struct avs_svc_api_info);
  411. done:
  412. return ret;
  413. }
  414. EXPORT_SYMBOL(q6core_get_fwk_version_size);
  415. /**
  416. * core_set_license -
  417. * command to set license for module
  418. *
  419. * @key: license key hash
  420. * @module_id: DSP Module ID
  421. *
  422. * Returns 0 on success or error on failure
  423. */
  424. int32_t core_set_license(uint32_t key, uint32_t module_id)
  425. {
  426. struct avcs_cmd_set_license *cmd_setl = NULL;
  427. struct cal_block_data *cal_block = NULL;
  428. int rc = 0, packet_size = 0;
  429. pr_debug("%s: key:0x%x, id:0x%x\n", __func__, key, module_id);
  430. mutex_lock(&(q6core_lcl.cmd_lock));
  431. if (q6core_lcl.cal_data[META_CAL] == NULL) {
  432. pr_err("%s: cal_data not initialized yet!!\n", __func__);
  433. rc = -EINVAL;
  434. goto cmd_unlock;
  435. }
  436. mutex_lock(&((q6core_lcl.cal_data[META_CAL])->lock));
  437. cal_block = cal_utils_get_cal_block_by_key(
  438. q6core_lcl.cal_data[META_CAL], key);
  439. if (cal_block == NULL ||
  440. cal_block->cal_data.kvaddr == NULL ||
  441. cal_block->cal_data.size <= 0) {
  442. pr_err("%s: Invalid cal block to send", __func__);
  443. rc = -EINVAL;
  444. goto cal_data_unlock;
  445. }
  446. packet_size = sizeof(struct avcs_cmd_set_license) +
  447. cal_block->cal_data.size;
  448. /*round up total packet_size to next 4 byte boundary*/
  449. packet_size = ((packet_size + 0x3)>>2)<<2;
  450. cmd_setl = kzalloc(packet_size, GFP_KERNEL);
  451. if (cmd_setl == NULL) {
  452. rc = -ENOMEM;
  453. goto cal_data_unlock;
  454. }
  455. ocm_core_open();
  456. if (q6core_lcl.core_handle_q == NULL) {
  457. pr_err("%s: apr registration for CORE failed\n", __func__);
  458. rc = -ENODEV;
  459. goto fail_cmd;
  460. }
  461. cmd_setl->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
  462. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  463. cmd_setl->hdr.pkt_size = packet_size;
  464. cmd_setl->hdr.src_port = 0;
  465. cmd_setl->hdr.dest_port = 0;
  466. cmd_setl->hdr.token = 0;
  467. cmd_setl->hdr.opcode = AVCS_CMD_SET_LICENSE;
  468. cmd_setl->id = module_id;
  469. cmd_setl->overwrite = 1;
  470. cmd_setl->size = cal_block->cal_data.size;
  471. memcpy((uint8_t *)cmd_setl + sizeof(struct avcs_cmd_set_license),
  472. cal_block->cal_data.kvaddr,
  473. cal_block->cal_data.size);
  474. pr_info("%s: Set license opcode=0x%x, id =0x%x, size = %d\n",
  475. __func__, cmd_setl->hdr.opcode,
  476. cmd_setl->id, cmd_setl->size);
  477. rc = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)cmd_setl);
  478. if (rc < 0)
  479. pr_err("%s: SET_LICENSE failed op[0x%x]rc[%d]\n",
  480. __func__, cmd_setl->hdr.opcode, rc);
  481. fail_cmd:
  482. kfree(cmd_setl);
  483. cal_data_unlock:
  484. mutex_unlock(&((q6core_lcl.cal_data[META_CAL])->lock));
  485. cmd_unlock:
  486. mutex_unlock(&(q6core_lcl.cmd_lock));
  487. return rc;
  488. }
  489. EXPORT_SYMBOL(core_set_license);
  490. /**
  491. * core_get_license_status -
  492. * command to retrieve license status for module
  493. *
  494. * @module_id: DSP Module ID
  495. *
  496. * Returns 0 on success or error on failure
  497. */
  498. int32_t core_get_license_status(uint32_t module_id)
  499. {
  500. struct avcs_cmd_get_license_validation_result get_lvr_cmd;
  501. int ret = 0;
  502. pr_debug("%s: module_id 0x%x", __func__, module_id);
  503. mutex_lock(&(q6core_lcl.cmd_lock));
  504. ocm_core_open();
  505. if (q6core_lcl.core_handle_q == NULL) {
  506. pr_err("%s: apr registration for CORE failed\n", __func__);
  507. ret = -ENODEV;
  508. goto fail_cmd;
  509. }
  510. get_lvr_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  511. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  512. get_lvr_cmd.hdr.pkt_size =
  513. sizeof(struct avcs_cmd_get_license_validation_result);
  514. get_lvr_cmd.hdr.src_port = 0;
  515. get_lvr_cmd.hdr.dest_port = 0;
  516. get_lvr_cmd.hdr.token = 0;
  517. get_lvr_cmd.hdr.opcode = AVCS_CMD_GET_LICENSE_VALIDATION_RESULT;
  518. get_lvr_cmd.id = module_id;
  519. ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &get_lvr_cmd);
  520. if (ret < 0) {
  521. pr_err("%s: license_validation request failed, err %d\n",
  522. __func__, ret);
  523. ret = -EREMOTE;
  524. goto fail_cmd;
  525. }
  526. q6core_lcl.cmd_resp_received_flag &= ~(FLAG_CMDRSP_LICENSE_RESULT);
  527. mutex_unlock(&(q6core_lcl.cmd_lock));
  528. ret = wait_event_timeout(q6core_lcl.cmd_req_wait,
  529. (q6core_lcl.cmd_resp_received_flag ==
  530. FLAG_CMDRSP_LICENSE_RESULT),
  531. msecs_to_jiffies(TIMEOUT_MS));
  532. mutex_lock(&(q6core_lcl.cmd_lock));
  533. if (!ret) {
  534. pr_err("%s: wait_event timeout for CMDRSP_LICENSE_RESULT\n",
  535. __func__);
  536. ret = -ETIME;
  537. goto fail_cmd;
  538. }
  539. q6core_lcl.cmd_resp_received_flag &= ~(FLAG_CMDRSP_LICENSE_RESULT);
  540. ret = q6core_lcl.cmd_resp_payload.cmdrsp_license_result.result;
  541. fail_cmd:
  542. mutex_unlock(&(q6core_lcl.cmd_lock));
  543. pr_info("%s: cmdrsp_license_result.result = 0x%x for module 0x%x\n",
  544. __func__, ret, module_id);
  545. return ret;
  546. }
  547. EXPORT_SYMBOL(core_get_license_status);
  548. /**
  549. * core_set_dolby_manufacturer_id -
  550. * command to set dolby manufacturer id
  551. *
  552. * @manufacturer_id: Dolby manufacturer id
  553. *
  554. * Returns 0 on success or error on failure
  555. */
  556. uint32_t core_set_dolby_manufacturer_id(int manufacturer_id)
  557. {
  558. struct adsp_dolby_manufacturer_id payload;
  559. int rc = 0;
  560. pr_debug("%s: manufacturer_id :%d\n", __func__, manufacturer_id);
  561. mutex_lock(&(q6core_lcl.cmd_lock));
  562. ocm_core_open();
  563. if (q6core_lcl.core_handle_q) {
  564. payload.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
  565. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  566. payload.hdr.pkt_size =
  567. sizeof(struct adsp_dolby_manufacturer_id);
  568. payload.hdr.src_port = 0;
  569. payload.hdr.dest_port = 0;
  570. payload.hdr.token = 0;
  571. payload.hdr.opcode = ADSP_CMD_SET_DOLBY_MANUFACTURER_ID;
  572. payload.manufacturer_id = manufacturer_id;
  573. pr_debug("%s: Send Dolby security opcode=0x%x manufacturer ID = %d\n",
  574. __func__,
  575. payload.hdr.opcode, payload.manufacturer_id);
  576. rc = apr_send_pkt(q6core_lcl.core_handle_q,
  577. (uint32_t *)&payload);
  578. if (rc < 0)
  579. pr_err("%s: SET_DOLBY_MANUFACTURER_ID failed op[0x%x]rc[%d]\n",
  580. __func__, payload.hdr.opcode, rc);
  581. }
  582. mutex_unlock(&(q6core_lcl.cmd_lock));
  583. return rc;
  584. }
  585. EXPORT_SYMBOL(core_set_dolby_manufacturer_id);
  586. /**
  587. * q6core_is_adsp_ready - check adsp ready status
  588. *
  589. * Returns true if adsp is ready otherwise returns false
  590. */
  591. bool q6core_is_adsp_ready(void)
  592. {
  593. int rc = 0;
  594. bool ret = false;
  595. struct apr_hdr hdr;
  596. pr_debug("%s: enter\n", __func__);
  597. memset(&hdr, 0, sizeof(hdr));
  598. hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  599. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  600. hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, 0);
  601. hdr.opcode = AVCS_CMD_ADSP_EVENT_GET_STATE;
  602. mutex_lock(&(q6core_lcl.cmd_lock));
  603. ocm_core_open();
  604. if (q6core_lcl.core_handle_q) {
  605. q6core_lcl.bus_bw_resp_received = 0;
  606. rc = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)&hdr);
  607. if (rc < 0) {
  608. pr_err("%s: Get ADSP state APR packet send event %d\n",
  609. __func__, rc);
  610. goto bail;
  611. }
  612. rc = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
  613. (q6core_lcl.bus_bw_resp_received == 1),
  614. msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
  615. if (rc > 0 && q6core_lcl.bus_bw_resp_received) {
  616. /* ensure to read updated param by callback thread */
  617. rmb();
  618. ret = !!q6core_lcl.param;
  619. }
  620. }
  621. bail:
  622. pr_debug("%s: leave, rc %d, adsp ready %d\n", __func__, rc, ret);
  623. mutex_unlock(&(q6core_lcl.cmd_lock));
  624. return ret;
  625. }
  626. EXPORT_SYMBOL(q6core_is_adsp_ready);
  627. static int q6core_map_memory_regions(phys_addr_t *buf_add, uint32_t mempool_id,
  628. uint32_t *bufsz, uint32_t bufcnt, uint32_t *map_handle)
  629. {
  630. struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
  631. struct avs_shared_map_region_payload *mregions = NULL;
  632. void *mmap_region_cmd = NULL;
  633. void *payload = NULL;
  634. int ret = 0;
  635. int i = 0;
  636. int cmd_size = 0;
  637. cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
  638. + sizeof(struct avs_shared_map_region_payload)
  639. * bufcnt;
  640. mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
  641. if (mmap_region_cmd == NULL)
  642. return -ENOMEM;
  643. mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
  644. mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  645. APR_HDR_LEN(APR_HDR_SIZE),
  646. APR_PKT_VER);
  647. mmap_regions->hdr.pkt_size = cmd_size;
  648. mmap_regions->hdr.src_port = 0;
  649. mmap_regions->hdr.dest_port = 0;
  650. mmap_regions->hdr.token = 0;
  651. mmap_regions->hdr.opcode = AVCS_CMD_SHARED_MEM_MAP_REGIONS;
  652. mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL & 0x00ff;
  653. mmap_regions->num_regions = bufcnt & 0x00ff;
  654. mmap_regions->property_flag = 0x00;
  655. payload = ((u8 *) mmap_region_cmd +
  656. sizeof(struct avs_cmd_shared_mem_map_regions));
  657. mregions = (struct avs_shared_map_region_payload *)payload;
  658. for (i = 0; i < bufcnt; i++) {
  659. mregions->shm_addr_lsw = lower_32_bits(buf_add[i]);
  660. mregions->shm_addr_msw =
  661. msm_audio_populate_upper_32_bits(buf_add[i]);
  662. mregions->mem_size_bytes = bufsz[i];
  663. ++mregions;
  664. }
  665. pr_debug("%s: sending memory map, addr %pK, size %d, bufcnt = %d\n",
  666. __func__, buf_add, bufsz[0], mmap_regions->num_regions);
  667. *map_handle = 0;
  668. q6core_lcl.bus_bw_resp_received = 0;
  669. ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)
  670. mmap_regions);
  671. if (ret < 0) {
  672. pr_err("%s: mmap regions failed %d\n",
  673. __func__, ret);
  674. ret = -EINVAL;
  675. goto done;
  676. }
  677. ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
  678. (q6core_lcl.bus_bw_resp_received == 1),
  679. msecs_to_jiffies(TIMEOUT_MS));
  680. if (!ret) {
  681. pr_err("%s: timeout. waited for memory map\n", __func__);
  682. ret = -ETIME;
  683. goto done;
  684. }
  685. *map_handle = q6core_lcl.mem_map_cal_handle;
  686. done:
  687. kfree(mmap_region_cmd);
  688. return ret;
  689. }
  690. static int q6core_memory_unmap_regions(uint32_t mem_map_handle)
  691. {
  692. struct avs_cmd_shared_mem_unmap_regions unmap_regions;
  693. int ret = 0;
  694. memset(&unmap_regions, 0, sizeof(unmap_regions));
  695. unmap_regions.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  696. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  697. unmap_regions.hdr.pkt_size = sizeof(unmap_regions);
  698. unmap_regions.hdr.src_svc = APR_SVC_ADSP_CORE;
  699. unmap_regions.hdr.src_domain = APR_DOMAIN_APPS;
  700. unmap_regions.hdr.src_port = 0;
  701. unmap_regions.hdr.dest_svc = APR_SVC_ADSP_CORE;
  702. unmap_regions.hdr.dest_domain = APR_DOMAIN_ADSP;
  703. unmap_regions.hdr.dest_port = 0;
  704. unmap_regions.hdr.token = 0;
  705. unmap_regions.hdr.opcode = AVCS_CMD_SHARED_MEM_UNMAP_REGIONS;
  706. unmap_regions.mem_map_handle = mem_map_handle;
  707. q6core_lcl.bus_bw_resp_received = 0;
  708. pr_debug("%s: unmap regions map handle %d\n",
  709. __func__, mem_map_handle);
  710. ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)
  711. &unmap_regions);
  712. if (ret < 0) {
  713. pr_err("%s: unmap regions failed %d\n",
  714. __func__, ret);
  715. ret = -EINVAL;
  716. goto done;
  717. }
  718. ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
  719. (q6core_lcl.bus_bw_resp_received == 1),
  720. msecs_to_jiffies(TIMEOUT_MS));
  721. if (!ret) {
  722. pr_err("%s: timeout. waited for memory_unmap\n",
  723. __func__);
  724. ret = -ETIME;
  725. goto done;
  726. }
  727. done:
  728. return ret;
  729. }
  730. static int q6core_dereg_all_custom_topologies(void)
  731. {
  732. int ret = 0;
  733. struct avcs_cmd_deregister_topologies dereg_top;
  734. memset(&dereg_top, 0, sizeof(dereg_top));
  735. dereg_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  736. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  737. dereg_top.hdr.pkt_size = sizeof(dereg_top);
  738. dereg_top.hdr.src_svc = APR_SVC_ADSP_CORE;
  739. dereg_top.hdr.src_domain = APR_DOMAIN_APPS;
  740. dereg_top.hdr.src_port = 0;
  741. dereg_top.hdr.dest_svc = APR_SVC_ADSP_CORE;
  742. dereg_top.hdr.dest_domain = APR_DOMAIN_ADSP;
  743. dereg_top.hdr.dest_port = 0;
  744. dereg_top.hdr.token = 0;
  745. dereg_top.hdr.opcode = AVCS_CMD_DEREGISTER_TOPOLOGIES;
  746. dereg_top.payload_addr_lsw = 0;
  747. dereg_top.payload_addr_msw = 0;
  748. dereg_top.mem_map_handle = 0;
  749. dereg_top.payload_size = 0;
  750. dereg_top.mode = AVCS_MODE_DEREGISTER_ALL_CUSTOM_TOPOLOGIES;
  751. q6core_lcl.bus_bw_resp_received = 0;
  752. pr_debug("%s: Deregister topologies mode %d\n",
  753. __func__, dereg_top.mode);
  754. ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &dereg_top);
  755. if (ret < 0) {
  756. pr_err("%s: Deregister topologies failed %d\n",
  757. __func__, ret);
  758. goto done;
  759. }
  760. ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
  761. (q6core_lcl.bus_bw_resp_received == 1),
  762. msecs_to_jiffies(TIMEOUT_MS));
  763. if (!ret) {
  764. pr_err("%s: wait_event timeout for Deregister topologies\n",
  765. __func__);
  766. goto done;
  767. }
  768. done:
  769. return ret;
  770. }
  771. static int q6core_send_custom_topologies(void)
  772. {
  773. int ret = 0;
  774. int ret2 = 0;
  775. struct cal_block_data *cal_block = NULL;
  776. struct avcs_cmd_register_topologies reg_top;
  777. if (!q6core_is_adsp_ready()) {
  778. pr_err("%s: ADSP is not ready!\n", __func__);
  779. return -ENODEV;
  780. }
  781. memset(&reg_top, 0, sizeof(reg_top));
  782. mutex_lock(&q6core_lcl.cal_data[CUST_TOP_CAL]->lock);
  783. mutex_lock(&q6core_lcl.cmd_lock);
  784. cal_block = cal_utils_get_only_cal_block(
  785. q6core_lcl.cal_data[CUST_TOP_CAL]);
  786. if (cal_block == NULL) {
  787. pr_debug("%s: cal block is NULL!\n", __func__);
  788. goto unlock;
  789. }
  790. if (cal_block->cal_data.size <= 0) {
  791. pr_debug("%s: cal size is %zd not sending\n",
  792. __func__, cal_block->cal_data.size);
  793. goto unlock;
  794. }
  795. q6core_dereg_all_custom_topologies();
  796. ret = q6core_map_memory_regions(&cal_block->cal_data.paddr, 0,
  797. (uint32_t *)&cal_block->map_data.map_size, 1,
  798. &cal_block->map_data.q6map_handle);
  799. if (!ret) {
  800. pr_err("%s: q6core_map_memory_regions failed\n", __func__);
  801. goto unlock;
  802. }
  803. reg_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  804. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  805. reg_top.hdr.pkt_size = sizeof(reg_top);
  806. reg_top.hdr.src_svc = APR_SVC_ADSP_CORE;
  807. reg_top.hdr.src_domain = APR_DOMAIN_APPS;
  808. reg_top.hdr.src_port = 0;
  809. reg_top.hdr.dest_svc = APR_SVC_ADSP_CORE;
  810. reg_top.hdr.dest_domain = APR_DOMAIN_ADSP;
  811. reg_top.hdr.dest_port = 0;
  812. reg_top.hdr.token = 0;
  813. reg_top.hdr.opcode = AVCS_CMD_REGISTER_TOPOLOGIES;
  814. reg_top.payload_addr_lsw =
  815. lower_32_bits(cal_block->cal_data.paddr);
  816. reg_top.payload_addr_msw =
  817. msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
  818. reg_top.mem_map_handle = cal_block->map_data.q6map_handle;
  819. reg_top.payload_size = cal_block->cal_data.size;
  820. q6core_lcl.adsp_status = 0;
  821. q6core_lcl.bus_bw_resp_received = 0;
  822. pr_debug("%s: Register topologies addr %pK, size %zd, map handle %d\n",
  823. __func__, &cal_block->cal_data.paddr, cal_block->cal_data.size,
  824. cal_block->map_data.q6map_handle);
  825. ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &reg_top);
  826. if (ret < 0) {
  827. pr_err("%s: Register topologies failed %d\n",
  828. __func__, ret);
  829. goto unmap;
  830. }
  831. ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
  832. (q6core_lcl.bus_bw_resp_received == 1),
  833. msecs_to_jiffies(TIMEOUT_MS));
  834. if (!ret) {
  835. pr_err("%s: wait_event timeout for Register topologies\n",
  836. __func__);
  837. goto unmap;
  838. }
  839. if (q6core_lcl.adsp_status < 0)
  840. ret = q6core_lcl.adsp_status;
  841. unmap:
  842. ret2 = q6core_memory_unmap_regions(cal_block->map_data.q6map_handle);
  843. if (!ret2) {
  844. pr_err("%s: q6core_memory_unmap_regions failed for map handle %d\n",
  845. __func__, cal_block->map_data.q6map_handle);
  846. ret = ret2;
  847. goto unlock;
  848. }
  849. unlock:
  850. mutex_unlock(&q6core_lcl.cmd_lock);
  851. mutex_unlock(&q6core_lcl.cal_data[CUST_TOP_CAL]->lock);
  852. return ret;
  853. }
  854. static int get_cal_type_index(int32_t cal_type)
  855. {
  856. int ret = -EINVAL;
  857. switch (cal_type) {
  858. case AUDIO_CORE_METAINFO_CAL_TYPE:
  859. ret = META_CAL;
  860. break;
  861. case CORE_CUSTOM_TOPOLOGIES_CAL_TYPE:
  862. ret = CUST_TOP_CAL;
  863. break;
  864. default:
  865. pr_err("%s: invalid cal type %d!\n", __func__, cal_type);
  866. }
  867. return ret;
  868. }
  869. static int q6core_alloc_cal(int32_t cal_type,
  870. size_t data_size, void *data)
  871. {
  872. int ret = 0;
  873. int cal_index;
  874. cal_index = get_cal_type_index(cal_type);
  875. if (cal_index < 0) {
  876. pr_err("%s: could not get cal index %d!\n",
  877. __func__, cal_index);
  878. ret = -EINVAL;
  879. goto done;
  880. }
  881. ret = cal_utils_alloc_cal(data_size, data,
  882. q6core_lcl.cal_data[cal_index], 0, NULL);
  883. if (ret < 0) {
  884. pr_err("%s: cal_utils_alloc_block failed, ret = %d, cal type = %d!\n",
  885. __func__, ret, cal_type);
  886. goto done;
  887. }
  888. done:
  889. return ret;
  890. }
  891. static int q6core_dealloc_cal(int32_t cal_type,
  892. size_t data_size, void *data)
  893. {
  894. int ret = 0;
  895. int cal_index;
  896. cal_index = get_cal_type_index(cal_type);
  897. if (cal_index < 0) {
  898. pr_err("%s: could not get cal index %d!\n",
  899. __func__, cal_index);
  900. ret = -EINVAL;
  901. goto done;
  902. }
  903. ret = cal_utils_dealloc_cal(data_size, data,
  904. q6core_lcl.cal_data[cal_index]);
  905. if (ret < 0) {
  906. pr_err("%s: cal_utils_dealloc_block failed, ret = %d, cal type = %d!\n",
  907. __func__, ret, cal_type);
  908. goto done;
  909. }
  910. done:
  911. return ret;
  912. }
  913. static int q6core_set_cal(int32_t cal_type,
  914. size_t data_size, void *data)
  915. {
  916. int ret = 0;
  917. int cal_index;
  918. cal_index = get_cal_type_index(cal_type);
  919. if (cal_index < 0) {
  920. pr_err("%s: could not get cal index %d!\n",
  921. __func__, cal_index);
  922. ret = -EINVAL;
  923. goto done;
  924. }
  925. ret = cal_utils_set_cal(data_size, data,
  926. q6core_lcl.cal_data[cal_index], 0, NULL);
  927. if (ret < 0) {
  928. pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
  929. __func__, ret, cal_type);
  930. goto done;
  931. }
  932. if (cal_index == CUST_TOP_CAL)
  933. ret = q6core_send_custom_topologies();
  934. done:
  935. return ret;
  936. }
  937. static void q6core_delete_cal_data(void)
  938. {
  939. pr_debug("%s:\n", __func__);
  940. cal_utils_destroy_cal_types(CORE_MAX_CAL, q6core_lcl.cal_data);
  941. }
  942. static int q6core_init_cal_data(void)
  943. {
  944. int ret = 0;
  945. struct cal_type_info cal_type_info[] = {
  946. {{AUDIO_CORE_METAINFO_CAL_TYPE,
  947. {q6core_alloc_cal, q6core_dealloc_cal, NULL,
  948. q6core_set_cal, NULL, NULL} },
  949. {NULL, NULL, cal_utils_match_buf_num} },
  950. {{CORE_CUSTOM_TOPOLOGIES_CAL_TYPE,
  951. {q6core_alloc_cal, q6core_dealloc_cal, NULL,
  952. q6core_set_cal, NULL, NULL} },
  953. {NULL, NULL, cal_utils_match_buf_num} }
  954. };
  955. pr_debug("%s:\n", __func__);
  956. ret = cal_utils_create_cal_types(CORE_MAX_CAL,
  957. q6core_lcl.cal_data, cal_type_info);
  958. if (ret < 0) {
  959. pr_err("%s: could not create cal type!\n",
  960. __func__);
  961. goto err;
  962. }
  963. return ret;
  964. err:
  965. q6core_delete_cal_data();
  966. return ret;
  967. }
  968. int __init core_init(void)
  969. {
  970. memset(&q6core_lcl, 0, sizeof(struct q6core_str));
  971. init_waitqueue_head(&q6core_lcl.bus_bw_req_wait);
  972. init_waitqueue_head(&q6core_lcl.cmd_req_wait);
  973. init_waitqueue_head(&q6core_lcl.avcs_fwk_ver_req_wait);
  974. q6core_lcl.cmd_resp_received_flag = FLAG_NONE;
  975. mutex_init(&q6core_lcl.cmd_lock);
  976. mutex_init(&q6core_lcl.ver_lock);
  977. q6core_init_cal_data();
  978. return 0;
  979. }
  980. void core_exit(void)
  981. {
  982. mutex_destroy(&q6core_lcl.cmd_lock);
  983. mutex_destroy(&q6core_lcl.ver_lock);
  984. q6core_delete_cal_data();
  985. }
  986. MODULE_DESCRIPTION("ADSP core driver");
  987. MODULE_LICENSE("GPL v2");