q6core.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/module.h>
  7. #include <linux/platform_device.h>
  8. #include <linux/of_device.h>
  9. #include <linux/string.h>
  10. #include <linux/types.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/mutex.h>
  13. #include <linux/sched.h>
  14. #include <linux/slab.h>
  15. #include <linux/sysfs.h>
  16. #include <linux/kobject.h>
  17. #include <linux/delay.h>
  18. #include <dsp/q6core.h>
  19. #include <dsp/audio_cal_utils.h>
  20. #include <dsp/apr_audio-v2.h>
  21. #include <soc/snd_event.h>
  22. #include <ipc/apr.h>
  23. #include "adsp_err.h"
  24. #define TIMEOUT_MS 1000
  25. /*
  26. * AVS bring up in the modem is optimized for the new
  27. * Sub System Restart design and 100 milliseconds timeout
  28. * is sufficient to make sure the Q6 will be ready.
  29. */
  30. #define Q6_READY_TIMEOUT_MS 100
  31. #define ADSP_STATE_READY_TIMEOUT_MS 3000
  32. #define APR_ENOTREADY 10
  33. #define MEMPOOL_ID_MASK 0xFF
  34. #define MDF_MAP_TOKEN 0xF000
  35. enum {
  36. META_CAL,
  37. CUST_TOP_CAL,
  38. CORE_MAX_CAL
  39. };
  40. enum ver_query_status {
  41. VER_QUERY_UNATTEMPTED,
  42. VER_QUERY_UNSUPPORTED,
  43. VER_QUERY_SUPPORTED
  44. };
  45. struct q6core_avcs_ver_info {
  46. enum ver_query_status status;
  47. struct avcs_fwk_ver_info *ver_info;
  48. };
  49. struct q6core_str {
  50. struct apr_svc *core_handle_q;
  51. wait_queue_head_t bus_bw_req_wait;
  52. wait_queue_head_t mdf_map_resp_wait;
  53. wait_queue_head_t cmd_req_wait;
  54. wait_queue_head_t avcs_fwk_ver_req_wait;
  55. wait_queue_head_t lpass_npa_rsc_wait;
  56. u32 lpass_npa_rsc_rsp_rcvd;
  57. u32 bus_bw_resp_received;
  58. u32 mdf_map_resp_received;
  59. enum cmd_flags {
  60. FLAG_NONE,
  61. FLAG_CMDRSP_LICENSE_RESULT
  62. } cmd_resp_received_flag;
  63. u32 avcs_fwk_ver_resp_received;
  64. struct mutex cmd_lock;
  65. struct mutex ver_lock;
  66. union {
  67. struct avcs_cmdrsp_get_license_validation_result
  68. cmdrsp_license_result;
  69. } cmd_resp_payload;
  70. u32 param;
  71. struct cal_type_data *cal_data[CORE_MAX_CAL];
  72. uint32_t mem_map_cal_handle;
  73. uint32_t mdf_mem_map_cal_handle;
  74. uint32_t npa_client_handle;
  75. int32_t adsp_status;
  76. int32_t avs_state;
  77. struct q6core_avcs_ver_info q6core_avcs_ver_info;
  78. };
  79. static struct q6core_str q6core_lcl;
  80. struct generic_get_data_ {
  81. int valid;
  82. int size_in_ints;
  83. int ints[];
  84. };
  85. static struct generic_get_data_ *generic_get_data;
  86. static DEFINE_MUTEX(kset_lock);
  87. static struct kset *audio_uevent_kset;
  88. static int q6core_init_uevent_kset(void)
  89. {
  90. int ret = 0;
  91. mutex_lock(&kset_lock);
  92. if (audio_uevent_kset)
  93. goto done;
  94. /* Create a kset under /sys/kernel/ */
  95. audio_uevent_kset = kset_create_and_add("q6audio", NULL, kernel_kobj);
  96. if (!audio_uevent_kset) {
  97. pr_err("%s: error creating uevent kernel set", __func__);
  98. ret = -EINVAL;
  99. }
  100. done:
  101. mutex_unlock(&kset_lock);
  102. return ret;
  103. }
  104. static void q6core_destroy_uevent_kset(void)
  105. {
  106. if (audio_uevent_kset) {
  107. kset_unregister(audio_uevent_kset);
  108. audio_uevent_kset = NULL;
  109. }
  110. }
  111. /**
  112. * q6core_init_uevent_data - initialize kernel object required to send uevents.
  113. *
  114. * @uevent_data: uevent data (dynamically allocated memory).
  115. * @name: name of the kernel object.
  116. *
  117. * Returns 0 on success or error otherwise.
  118. */
  119. int q6core_init_uevent_data(struct audio_uevent_data *uevent_data, char *name)
  120. {
  121. int ret = -EINVAL;
  122. if (!uevent_data || !name)
  123. return ret;
  124. ret = q6core_init_uevent_kset();
  125. if (ret)
  126. return ret;
  127. /* Set kset for kobject before initializing the kobject */
  128. uevent_data->kobj.kset = audio_uevent_kset;
  129. /* Initialize kobject and add it to kernel */
  130. ret = kobject_init_and_add(&uevent_data->kobj, &uevent_data->ktype,
  131. NULL, "%s", name);
  132. if (ret) {
  133. pr_err("%s: error initializing uevent kernel object: %d",
  134. __func__, ret);
  135. kobject_put(&uevent_data->kobj);
  136. return ret;
  137. }
  138. /* Send kobject add event to the system */
  139. kobject_uevent(&uevent_data->kobj, KOBJ_ADD);
  140. return ret;
  141. }
  142. EXPORT_SYMBOL(q6core_init_uevent_data);
  143. /**
  144. * q6core_destroy_uevent_data - destroy kernel object.
  145. *
  146. * @uevent_data: uevent data.
  147. */
  148. void q6core_destroy_uevent_data(struct audio_uevent_data *uevent_data)
  149. {
  150. if (uevent_data)
  151. kobject_put(&uevent_data->kobj);
  152. }
  153. EXPORT_SYMBOL(q6core_destroy_uevent_data);
  154. /**
  155. * q6core_send_uevent - send uevent to userspace.
  156. *
  157. * @uevent_data: uevent data.
  158. * @event: event to send.
  159. *
  160. * Returns 0 on success or error otherwise.
  161. */
  162. int q6core_send_uevent(struct audio_uevent_data *uevent_data, char *event)
  163. {
  164. char *env[] = { event, NULL };
  165. if (!event || !uevent_data)
  166. return -EINVAL;
  167. return kobject_uevent_env(&uevent_data->kobj, KOBJ_CHANGE, env);
  168. }
  169. EXPORT_SYMBOL(q6core_send_uevent);
  170. static int parse_fwk_version_info(uint32_t *payload, uint16_t payload_size)
  171. {
  172. size_t ver_size;
  173. int num_services;
  174. pr_debug("%s: Payload info num services %d\n",
  175. __func__, payload[4]);
  176. /*
  177. * payload1[4] is the number of services running on DSP
  178. * Based on this info, we copy the payload into core
  179. * avcs version info structure.
  180. */
  181. if (payload_size < 5 * sizeof(uint32_t)) {
  182. pr_err("%s: payload has invalid size %d\n",
  183. __func__, payload_size);
  184. return -EINVAL;
  185. }
  186. num_services = payload[4];
  187. if (num_services > VSS_MAX_AVCS_NUM_SERVICES) {
  188. pr_err("%s: num_services: %d greater than max services: %d\n",
  189. __func__, num_services, VSS_MAX_AVCS_NUM_SERVICES);
  190. return -EINVAL;
  191. }
  192. /*
  193. * Dynamically allocate memory for all
  194. * the services based on num_services
  195. */
  196. ver_size = sizeof(struct avcs_get_fwk_version) +
  197. num_services * sizeof(struct avs_svc_api_info);
  198. if (payload_size < ver_size) {
  199. pr_err("%s: payload has invalid size %d, expected size %zu\n",
  200. __func__, payload_size, ver_size);
  201. return -EINVAL;
  202. }
  203. q6core_lcl.q6core_avcs_ver_info.ver_info =
  204. kzalloc(ver_size, GFP_ATOMIC);
  205. if (q6core_lcl.q6core_avcs_ver_info.ver_info == NULL)
  206. return -ENOMEM;
  207. memcpy(q6core_lcl.q6core_avcs_ver_info.ver_info, (uint8_t *) payload,
  208. ver_size);
  209. return 0;
  210. }
  211. static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
  212. {
  213. uint32_t *payload1;
  214. int ret = 0;
  215. if (data == NULL) {
  216. pr_err("%s: data argument is null\n", __func__);
  217. return -EINVAL;
  218. }
  219. pr_debug("%s: core msg: payload len = %u, apr resp opcode = 0x%x\n",
  220. __func__,
  221. data->payload_size, data->opcode);
  222. switch (data->opcode) {
  223. case APR_BASIC_RSP_RESULT:{
  224. if (data->payload_size == 0) {
  225. pr_err("%s: APR_BASIC_RSP_RESULT No Payload ",
  226. __func__);
  227. return 0;
  228. }
  229. payload1 = data->payload;
  230. if (data->payload_size < 2 * sizeof(uint32_t)) {
  231. pr_err("%s: payload has invalid size %d\n",
  232. __func__, data->payload_size);
  233. return -EINVAL;
  234. }
  235. switch (payload1[0]) {
  236. case AVCS_CMD_SHARED_MEM_UNMAP_REGIONS:
  237. pr_debug("%s: Cmd = AVCS_CMD_SHARED_MEM_UNMAP_REGIONS status[0x%x]\n",
  238. __func__, payload1[1]);
  239. /* -ADSP status to match Linux error standard */
  240. q6core_lcl.adsp_status = -payload1[1];
  241. q6core_lcl.bus_bw_resp_received = 1;
  242. wake_up(&q6core_lcl.bus_bw_req_wait);
  243. break;
  244. case AVCS_CMD_SHARED_MEM_MAP_REGIONS:
  245. pr_debug("%s: Cmd = AVCS_CMD_SHARED_MEM_MAP_REGIONS status[0x%x]\n",
  246. __func__, payload1[1]);
  247. /* -ADSP status to match Linux error standard */
  248. q6core_lcl.adsp_status = -payload1[1];
  249. q6core_lcl.bus_bw_resp_received = 1;
  250. wake_up(&q6core_lcl.bus_bw_req_wait);
  251. break;
  252. case AVCS_CMD_MAP_MDF_SHARED_MEMORY:
  253. pr_debug("%s: Cmd = AVCS_CMD_MAP_MDF_SHARED_MEMORY status[0x%x]\n",
  254. __func__, payload1[1]);
  255. /* -ADSP status to match Linux error standard */
  256. q6core_lcl.adsp_status = -payload1[1];
  257. q6core_lcl.bus_bw_resp_received = 1;
  258. wake_up(&q6core_lcl.bus_bw_req_wait);
  259. break;
  260. case AVCS_CMD_REGISTER_TOPOLOGIES:
  261. pr_debug("%s: Cmd = AVCS_CMD_REGISTER_TOPOLOGIES status[0x%x]\n",
  262. __func__, payload1[1]);
  263. /* -ADSP status to match Linux error standard */
  264. q6core_lcl.adsp_status = -payload1[1];
  265. q6core_lcl.bus_bw_resp_received = 1;
  266. wake_up(&q6core_lcl.bus_bw_req_wait);
  267. break;
  268. case AVCS_CMD_DEREGISTER_TOPOLOGIES:
  269. pr_debug("%s: Cmd = AVCS_CMD_DEREGISTER_TOPOLOGIES status[0x%x]\n",
  270. __func__, payload1[1]);
  271. q6core_lcl.bus_bw_resp_received = 1;
  272. wake_up(&q6core_lcl.bus_bw_req_wait);
  273. break;
  274. case AVCS_CMD_GET_FWK_VERSION:
  275. pr_debug("%s: Cmd = AVCS_CMD_GET_FWK_VERSION status[%s]\n",
  276. __func__, adsp_err_get_err_str(payload1[1]));
  277. /* ADSP status to match Linux error standard */
  278. q6core_lcl.adsp_status = -payload1[1];
  279. if (payload1[1] == ADSP_EUNSUPPORTED)
  280. q6core_lcl.q6core_avcs_ver_info.status =
  281. VER_QUERY_UNSUPPORTED;
  282. q6core_lcl.avcs_fwk_ver_resp_received = 1;
  283. wake_up(&q6core_lcl.avcs_fwk_ver_req_wait);
  284. break;
  285. case AVCS_CMD_LOAD_TOPO_MODULES:
  286. case AVCS_CMD_UNLOAD_TOPO_MODULES:
  287. pr_debug("%s: Cmd = %s status[%s]\n",
  288. __func__,
  289. (payload1[0] == AVCS_CMD_LOAD_TOPO_MODULES) ?
  290. "AVCS_CMD_LOAD_TOPO_MODULES" :
  291. "AVCS_CMD_UNLOAD_TOPO_MODULES",
  292. adsp_err_get_err_str(payload1[1]));
  293. break;
  294. case AVCS_CMD_DESTROY_LPASS_NPA_CLIENT:
  295. case AVCS_CMD_REQUEST_LPASS_NPA_RESOURCES:
  296. pr_debug("%s: Cmd = AVCS_CMD_CREATE_LPASS_NPA_CLIENT/AVCS_CMD_DESTROY_LPASS_NPA_CLIENT status[%s]\n",
  297. __func__, adsp_err_get_err_str(payload1[1]));
  298. /* ADSP status to match Linux error standard */
  299. q6core_lcl.adsp_status = -payload1[1];
  300. q6core_lcl.lpass_npa_rsc_rsp_rcvd = 1;
  301. wake_up(&q6core_lcl.lpass_npa_rsc_wait);
  302. break;
  303. default:
  304. pr_err("%s: Invalid cmd rsp[0x%x][0x%x] opcode %d\n",
  305. __func__,
  306. payload1[0], payload1[1], data->opcode);
  307. break;
  308. }
  309. break;
  310. }
  311. case RESET_EVENTS:{
  312. pr_debug("%s: Reset event received in Core service\n",
  313. __func__);
  314. /*
  315. * no reset for q6core_avcs_ver_info done as
  316. * the data will not change after SSR
  317. */
  318. apr_reset(q6core_lcl.core_handle_q);
  319. q6core_lcl.core_handle_q = NULL;
  320. break;
  321. }
  322. case AVCS_CMDRSP_SHARED_MEM_MAP_REGIONS:
  323. if (data->payload_size < sizeof(uint32_t)) {
  324. pr_err("%s: payload has invalid size %d\n",
  325. __func__, data->payload_size);
  326. return -EINVAL;
  327. }
  328. payload1 = data->payload;
  329. pr_debug("%s: AVCS_CMDRSP_SHARED_MEM_MAP_REGIONS handle %d\n",
  330. __func__, payload1[0]);
  331. if (data->token == MDF_MAP_TOKEN) {
  332. q6core_lcl.mdf_mem_map_cal_handle = payload1[0];
  333. q6core_lcl.mdf_map_resp_received = 1;
  334. wake_up(&q6core_lcl.mdf_map_resp_wait);
  335. } else {
  336. q6core_lcl.mem_map_cal_handle = payload1[0];
  337. q6core_lcl.bus_bw_resp_received = 1;
  338. wake_up(&q6core_lcl.bus_bw_req_wait);
  339. }
  340. break;
  341. case AVCS_CMDRSP_CREATE_LPASS_NPA_CLIENT:
  342. if (data->payload_size < 2 * sizeof(uint32_t)) {
  343. pr_err("%s: payload has invalid size %d\n",
  344. __func__, data->payload_size);
  345. return -EINVAL;
  346. }
  347. payload1 = data->payload;
  348. pr_debug("%s: AVCS_CMDRSP_CREATE_LPASS_NPA_CLIENT handle %d\n",
  349. __func__, payload1[1]);
  350. q6core_lcl.adsp_status = payload1[0];
  351. q6core_lcl.npa_client_handle = payload1[1];
  352. q6core_lcl.lpass_npa_rsc_rsp_rcvd = 1;
  353. wake_up(&q6core_lcl.lpass_npa_rsc_wait);
  354. break;
  355. case AVCS_CMDRSP_ADSP_EVENT_GET_STATE:
  356. if (data->payload_size < sizeof(uint32_t)) {
  357. pr_err("%s: payload has invalid size %d\n",
  358. __func__, data->payload_size);
  359. return -EINVAL;
  360. }
  361. payload1 = data->payload;
  362. q6core_lcl.param = payload1[0];
  363. pr_debug("%s: Received ADSP get state response 0x%x\n",
  364. __func__, q6core_lcl.param);
  365. /* ensure .param is updated prior to .bus_bw_resp_received */
  366. wmb();
  367. q6core_lcl.bus_bw_resp_received = 1;
  368. wake_up(&q6core_lcl.bus_bw_req_wait);
  369. break;
  370. case AVCS_CMDRSP_GET_LICENSE_VALIDATION_RESULT:
  371. if (data->payload_size < sizeof(uint32_t)) {
  372. pr_err("%s: payload has invalid size %d\n",
  373. __func__, data->payload_size);
  374. return -EINVAL;
  375. }
  376. payload1 = data->payload;
  377. pr_debug("%s: cmd = LICENSE_VALIDATION_RESULT, result = 0x%x\n",
  378. __func__, payload1[0]);
  379. q6core_lcl.cmd_resp_payload.cmdrsp_license_result.result
  380. = payload1[0];
  381. q6core_lcl.cmd_resp_received_flag = FLAG_CMDRSP_LICENSE_RESULT;
  382. wake_up(&q6core_lcl.cmd_req_wait);
  383. break;
  384. case AVCS_CMDRSP_GET_FWK_VERSION:
  385. pr_debug("%s: Received AVCS_CMDRSP_GET_FWK_VERSION\n",
  386. __func__);
  387. payload1 = data->payload;
  388. ret = parse_fwk_version_info(payload1, data->payload_size);
  389. if (ret < 0) {
  390. q6core_lcl.adsp_status = ret;
  391. pr_err("%s: Failed to parse payload:%d\n",
  392. __func__, ret);
  393. } else {
  394. q6core_lcl.q6core_avcs_ver_info.status =
  395. VER_QUERY_SUPPORTED;
  396. }
  397. q6core_lcl.avcs_fwk_ver_resp_received = 1;
  398. wake_up(&q6core_lcl.avcs_fwk_ver_req_wait);
  399. break;
  400. default:
  401. pr_err("%s: Message id from adsp core svc: 0x%x\n",
  402. __func__, data->opcode);
  403. if (generic_get_data) {
  404. generic_get_data->valid = 1;
  405. generic_get_data->size_in_ints =
  406. data->payload_size/sizeof(int);
  407. pr_debug("callback size = %i\n",
  408. data->payload_size);
  409. memcpy(generic_get_data->ints, data->payload,
  410. data->payload_size);
  411. q6core_lcl.bus_bw_resp_received = 1;
  412. wake_up(&q6core_lcl.bus_bw_req_wait);
  413. break;
  414. }
  415. break;
  416. }
  417. return 0;
  418. }
  419. void ocm_core_open(void)
  420. {
  421. if (q6core_lcl.core_handle_q == NULL)
  422. q6core_lcl.core_handle_q = apr_register("ADSP", "CORE",
  423. aprv2_core_fn_q, 0xFFFFFFFF, NULL);
  424. pr_debug("%s: Open_q %pK\n", __func__, q6core_lcl.core_handle_q);
  425. if (q6core_lcl.core_handle_q == NULL)
  426. pr_err_ratelimited("%s: Unable to register CORE\n", __func__);
  427. }
  428. struct cal_block_data *cal_utils_get_cal_block_by_key(
  429. struct cal_type_data *cal_type, uint32_t key)
  430. {
  431. struct list_head *ptr, *next;
  432. struct cal_block_data *cal_block = NULL;
  433. struct audio_cal_info_metainfo *metainfo;
  434. list_for_each_safe(ptr, next,
  435. &cal_type->cal_blocks) {
  436. cal_block = list_entry(ptr,
  437. struct cal_block_data, list);
  438. metainfo = (struct audio_cal_info_metainfo *)
  439. cal_block->cal_info;
  440. if (metainfo->nKey != key) {
  441. pr_debug("%s: metainfo key mismatch!!! found:%x, needed:%x\n",
  442. __func__, metainfo->nKey, key);
  443. } else {
  444. pr_debug("%s: metainfo key match found", __func__);
  445. return cal_block;
  446. }
  447. }
  448. return NULL;
  449. }
  450. static int q6core_send_get_avcs_fwk_ver_cmd(void)
  451. {
  452. struct apr_hdr avcs_ver_cmd;
  453. int ret;
  454. avcs_ver_cmd.hdr_field =
  455. APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
  456. APR_PKT_VER);
  457. avcs_ver_cmd.pkt_size = sizeof(struct apr_hdr);
  458. avcs_ver_cmd.src_port = 0;
  459. avcs_ver_cmd.dest_port = 0;
  460. avcs_ver_cmd.token = 0;
  461. avcs_ver_cmd.opcode = AVCS_CMD_GET_FWK_VERSION;
  462. q6core_lcl.adsp_status = 0;
  463. q6core_lcl.avcs_fwk_ver_resp_received = 0;
  464. ret = apr_send_pkt(q6core_lcl.core_handle_q,
  465. (uint32_t *) &avcs_ver_cmd);
  466. if (ret < 0) {
  467. pr_err("%s: failed to send apr packet, ret=%d\n", __func__,
  468. ret);
  469. goto done;
  470. }
  471. ret = wait_event_timeout(q6core_lcl.avcs_fwk_ver_req_wait,
  472. (q6core_lcl.avcs_fwk_ver_resp_received == 1),
  473. msecs_to_jiffies(TIMEOUT_MS));
  474. if (!ret) {
  475. pr_err("%s: wait_event timeout for AVCS fwk version info\n",
  476. __func__);
  477. ret = -ETIMEDOUT;
  478. goto done;
  479. }
  480. if (q6core_lcl.adsp_status < 0) {
  481. /*
  482. * adsp_err_get_err_str expects a positive value but we store
  483. * the DSP error as negative to match the Linux error standard.
  484. * Pass in the negated value so adsp_err_get_err_str returns
  485. * the correct string.
  486. */
  487. pr_err("%s: DSP returned error[%s]\n", __func__,
  488. adsp_err_get_err_str(-q6core_lcl.adsp_status));
  489. ret = adsp_err_get_lnx_err_code(q6core_lcl.adsp_status);
  490. goto done;
  491. }
  492. ret = 0;
  493. done:
  494. return ret;
  495. }
  496. int q6core_get_service_version(uint32_t service_id,
  497. struct avcs_fwk_ver_info *ver_info,
  498. size_t size)
  499. {
  500. struct avcs_fwk_ver_info *cached_ver_info = NULL;
  501. int i;
  502. uint32_t num_services;
  503. size_t ver_size;
  504. int ret;
  505. if (ver_info == NULL) {
  506. pr_err("%s: ver_info is NULL\n", __func__);
  507. return -EINVAL;
  508. }
  509. ret = q6core_get_fwk_version_size(service_id);
  510. if (ret < 0) {
  511. pr_err("%s: Failed to get service size for service id %d with error %d\n",
  512. __func__, service_id, ret);
  513. return ret;
  514. }
  515. ver_size = ret;
  516. if (ver_size != size) {
  517. pr_err("%s: Expected size %zu and provided size %zu do not match\n",
  518. __func__, ver_size, size);
  519. return -EINVAL;
  520. }
  521. cached_ver_info = q6core_lcl.q6core_avcs_ver_info.ver_info;
  522. num_services = cached_ver_info->avcs_fwk_version.num_services;
  523. if (service_id == AVCS_SERVICE_ID_ALL) {
  524. memcpy(ver_info, cached_ver_info, ver_size);
  525. return 0;
  526. }
  527. ver_info->avcs_fwk_version = cached_ver_info->avcs_fwk_version;
  528. for (i = 0; i < num_services; i++) {
  529. if (cached_ver_info->services[i].service_id == service_id) {
  530. ver_info->services[0] = cached_ver_info->services[i];
  531. return 0;
  532. }
  533. }
  534. pr_err("%s: No service matching service ID %d\n", __func__, service_id);
  535. return -EINVAL;
  536. }
  537. EXPORT_SYMBOL(q6core_get_service_version);
  538. static int q6core_get_avcs_fwk_version(void)
  539. {
  540. int ret = 0;
  541. mutex_lock(&(q6core_lcl.ver_lock));
  542. pr_debug("%s: q6core_avcs_ver_info.status(%d)\n", __func__,
  543. q6core_lcl.q6core_avcs_ver_info.status);
  544. switch (q6core_lcl.q6core_avcs_ver_info.status) {
  545. case VER_QUERY_SUPPORTED:
  546. pr_debug("%s: AVCS FWK version query already attempted\n",
  547. __func__);
  548. break;
  549. case VER_QUERY_UNSUPPORTED:
  550. ret = -EOPNOTSUPP;
  551. break;
  552. case VER_QUERY_UNATTEMPTED:
  553. pr_debug("%s: Attempting AVCS FWK version query\n", __func__);
  554. if (q6core_is_adsp_ready()) {
  555. ret = q6core_send_get_avcs_fwk_ver_cmd();
  556. } else {
  557. pr_err("%s: ADSP is not ready to query version\n",
  558. __func__);
  559. ret = -ENODEV;
  560. }
  561. break;
  562. default:
  563. pr_err("%s: Invalid version query status %d\n", __func__,
  564. q6core_lcl.q6core_avcs_ver_info.status);
  565. ret = -EINVAL;
  566. break;
  567. }
  568. mutex_unlock(&(q6core_lcl.ver_lock));
  569. return ret;
  570. }
  571. size_t q6core_get_fwk_version_size(uint32_t service_id)
  572. {
  573. int ret = 0;
  574. uint32_t num_services;
  575. ret = q6core_get_avcs_fwk_version();
  576. if (ret)
  577. goto done;
  578. if (q6core_lcl.q6core_avcs_ver_info.ver_info != NULL) {
  579. num_services = q6core_lcl.q6core_avcs_ver_info.ver_info
  580. ->avcs_fwk_version.num_services;
  581. } else {
  582. pr_err("%s: ver_info is NULL\n", __func__);
  583. ret = -EINVAL;
  584. goto done;
  585. }
  586. ret = sizeof(struct avcs_get_fwk_version);
  587. if (service_id == AVCS_SERVICE_ID_ALL)
  588. ret += num_services * sizeof(struct avs_svc_api_info);
  589. else
  590. ret += sizeof(struct avs_svc_api_info);
  591. done:
  592. return ret;
  593. }
  594. EXPORT_SYMBOL(q6core_get_fwk_version_size);
  595. /**
  596. * q6core_get_avcs_version_per_service -
  597. * to get api version of a particular service
  598. *
  599. * @service_id: id of the service
  600. *
  601. * Returns valid version on success or error (negative value) on failure
  602. */
  603. int q6core_get_avcs_api_version_per_service(uint32_t service_id)
  604. {
  605. struct avcs_fwk_ver_info *cached_ver_info = NULL;
  606. int i;
  607. uint32_t num_services;
  608. int ret = 0;
  609. if (service_id == AVCS_SERVICE_ID_ALL)
  610. return -EINVAL;
  611. ret = q6core_get_avcs_fwk_version();
  612. if (ret < 0) {
  613. pr_err("%s: failure in getting AVCS version\n", __func__);
  614. return ret;
  615. }
  616. cached_ver_info = q6core_lcl.q6core_avcs_ver_info.ver_info;
  617. num_services = cached_ver_info->avcs_fwk_version.num_services;
  618. for (i = 0; i < num_services; i++) {
  619. if (cached_ver_info->services[i].service_id == service_id)
  620. return cached_ver_info->services[i].api_version;
  621. }
  622. pr_err("%s: No service matching service ID %d\n", __func__, service_id);
  623. return -EINVAL;
  624. }
  625. EXPORT_SYMBOL(q6core_get_avcs_api_version_per_service);
  626. /**
  627. * core_set_license -
  628. * command to set license for module
  629. *
  630. * @key: license key hash
  631. * @module_id: DSP Module ID
  632. *
  633. * Returns 0 on success or error on failure
  634. */
  635. int32_t core_set_license(uint32_t key, uint32_t module_id)
  636. {
  637. struct avcs_cmd_set_license *cmd_setl = NULL;
  638. struct cal_block_data *cal_block = NULL;
  639. int rc = 0, packet_size = 0;
  640. pr_debug("%s: key:0x%x, id:0x%x\n", __func__, key, module_id);
  641. mutex_lock(&(q6core_lcl.cmd_lock));
  642. if (q6core_lcl.cal_data[META_CAL] == NULL) {
  643. pr_err("%s: cal_data not initialized yet!!\n", __func__);
  644. rc = -EINVAL;
  645. goto cmd_unlock;
  646. }
  647. mutex_lock(&((q6core_lcl.cal_data[META_CAL])->lock));
  648. cal_block = cal_utils_get_cal_block_by_key(
  649. q6core_lcl.cal_data[META_CAL], key);
  650. if (cal_block == NULL ||
  651. cal_block->cal_data.kvaddr == NULL ||
  652. cal_block->cal_data.size <= 0) {
  653. pr_err("%s: Invalid cal block to send", __func__);
  654. rc = -EINVAL;
  655. goto cal_data_unlock;
  656. }
  657. packet_size = sizeof(struct avcs_cmd_set_license) +
  658. cal_block->cal_data.size;
  659. /*round up total packet_size to next 4 byte boundary*/
  660. packet_size = ((packet_size + 0x3)>>2)<<2;
  661. cmd_setl = kzalloc(packet_size, GFP_KERNEL);
  662. if (cmd_setl == NULL) {
  663. rc = -ENOMEM;
  664. goto cal_data_unlock;
  665. }
  666. ocm_core_open();
  667. if (q6core_lcl.core_handle_q == NULL) {
  668. pr_err("%s: apr registration for CORE failed\n", __func__);
  669. rc = -ENODEV;
  670. goto fail_cmd;
  671. }
  672. cmd_setl->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
  673. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  674. cmd_setl->hdr.pkt_size = packet_size;
  675. cmd_setl->hdr.src_port = 0;
  676. cmd_setl->hdr.dest_port = 0;
  677. cmd_setl->hdr.token = 0;
  678. cmd_setl->hdr.opcode = AVCS_CMD_SET_LICENSE;
  679. cmd_setl->id = module_id;
  680. cmd_setl->overwrite = 1;
  681. cmd_setl->size = cal_block->cal_data.size;
  682. memcpy((uint8_t *)cmd_setl + sizeof(struct avcs_cmd_set_license),
  683. cal_block->cal_data.kvaddr,
  684. cal_block->cal_data.size);
  685. pr_info("%s: Set license opcode=0x%x, id =0x%x, size = %d\n",
  686. __func__, cmd_setl->hdr.opcode,
  687. cmd_setl->id, cmd_setl->size);
  688. rc = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)cmd_setl);
  689. if (rc < 0)
  690. pr_err("%s: SET_LICENSE failed op[0x%x]rc[%d]\n",
  691. __func__, cmd_setl->hdr.opcode, rc);
  692. fail_cmd:
  693. kfree(cmd_setl);
  694. cal_data_unlock:
  695. mutex_unlock(&((q6core_lcl.cal_data[META_CAL])->lock));
  696. cmd_unlock:
  697. mutex_unlock(&(q6core_lcl.cmd_lock));
  698. return rc;
  699. }
  700. EXPORT_SYMBOL(core_set_license);
  701. /**
  702. * core_get_license_status -
  703. * command to retrieve license status for module
  704. *
  705. * @module_id: DSP Module ID
  706. *
  707. * Returns 0 on success or error on failure
  708. */
  709. int32_t core_get_license_status(uint32_t module_id)
  710. {
  711. struct avcs_cmd_get_license_validation_result get_lvr_cmd;
  712. int ret = 0;
  713. pr_debug("%s: module_id 0x%x", __func__, module_id);
  714. mutex_lock(&(q6core_lcl.cmd_lock));
  715. ocm_core_open();
  716. if (q6core_lcl.core_handle_q == NULL) {
  717. pr_err("%s: apr registration for CORE failed\n", __func__);
  718. ret = -ENODEV;
  719. goto fail_cmd;
  720. }
  721. get_lvr_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  722. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  723. get_lvr_cmd.hdr.pkt_size =
  724. sizeof(struct avcs_cmd_get_license_validation_result);
  725. get_lvr_cmd.hdr.src_port = 0;
  726. get_lvr_cmd.hdr.dest_port = 0;
  727. get_lvr_cmd.hdr.token = 0;
  728. get_lvr_cmd.hdr.opcode = AVCS_CMD_GET_LICENSE_VALIDATION_RESULT;
  729. get_lvr_cmd.id = module_id;
  730. ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &get_lvr_cmd);
  731. if (ret < 0) {
  732. pr_err("%s: license_validation request failed, err %d\n",
  733. __func__, ret);
  734. ret = -EREMOTE;
  735. goto fail_cmd;
  736. }
  737. q6core_lcl.cmd_resp_received_flag &= ~(FLAG_CMDRSP_LICENSE_RESULT);
  738. mutex_unlock(&(q6core_lcl.cmd_lock));
  739. ret = wait_event_timeout(q6core_lcl.cmd_req_wait,
  740. (q6core_lcl.cmd_resp_received_flag ==
  741. FLAG_CMDRSP_LICENSE_RESULT),
  742. msecs_to_jiffies(TIMEOUT_MS));
  743. mutex_lock(&(q6core_lcl.cmd_lock));
  744. if (!ret) {
  745. pr_err("%s: wait_event timeout for CMDRSP_LICENSE_RESULT\n",
  746. __func__);
  747. ret = -ETIME;
  748. goto fail_cmd;
  749. }
  750. q6core_lcl.cmd_resp_received_flag &= ~(FLAG_CMDRSP_LICENSE_RESULT);
  751. ret = q6core_lcl.cmd_resp_payload.cmdrsp_license_result.result;
  752. fail_cmd:
  753. mutex_unlock(&(q6core_lcl.cmd_lock));
  754. pr_info("%s: cmdrsp_license_result.result = 0x%x for module 0x%x\n",
  755. __func__, ret, module_id);
  756. return ret;
  757. }
  758. EXPORT_SYMBOL(core_get_license_status);
  759. /**
  760. * core_set_dolby_manufacturer_id -
  761. * command to set dolby manufacturer id
  762. *
  763. * @manufacturer_id: Dolby manufacturer id
  764. *
  765. * Returns 0 on success or error on failure
  766. */
  767. uint32_t core_set_dolby_manufacturer_id(int manufacturer_id)
  768. {
  769. struct adsp_dolby_manufacturer_id payload;
  770. int rc = 0;
  771. pr_debug("%s: manufacturer_id :%d\n", __func__, manufacturer_id);
  772. mutex_lock(&(q6core_lcl.cmd_lock));
  773. ocm_core_open();
  774. if (q6core_lcl.core_handle_q) {
  775. payload.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
  776. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  777. payload.hdr.pkt_size =
  778. sizeof(struct adsp_dolby_manufacturer_id);
  779. payload.hdr.src_port = 0;
  780. payload.hdr.dest_port = 0;
  781. payload.hdr.token = 0;
  782. payload.hdr.opcode = ADSP_CMD_SET_DOLBY_MANUFACTURER_ID;
  783. payload.manufacturer_id = manufacturer_id;
  784. pr_debug("%s: Send Dolby security opcode=0x%x manufacturer ID = %d\n",
  785. __func__,
  786. payload.hdr.opcode, payload.manufacturer_id);
  787. rc = apr_send_pkt(q6core_lcl.core_handle_q,
  788. (uint32_t *)&payload);
  789. if (rc < 0)
  790. pr_err("%s: SET_DOLBY_MANUFACTURER_ID failed op[0x%x]rc[%d]\n",
  791. __func__, payload.hdr.opcode, rc);
  792. }
  793. mutex_unlock(&(q6core_lcl.cmd_lock));
  794. return rc;
  795. }
  796. EXPORT_SYMBOL(core_set_dolby_manufacturer_id);
  797. int32_t q6core_load_unload_topo_modules(uint32_t topo_id,
  798. bool preload_type)
  799. {
  800. struct avcs_cmd_load_unload_topo_modules load_unload_topo_modules;
  801. int ret = 0;
  802. mutex_lock(&(q6core_lcl.cmd_lock));
  803. ocm_core_open();
  804. if (q6core_lcl.core_handle_q == NULL) {
  805. pr_err("%s: apr registration for CORE failed\n", __func__);
  806. ret = -ENODEV;
  807. goto done;
  808. }
  809. memset(&load_unload_topo_modules, 0, sizeof(load_unload_topo_modules));
  810. load_unload_topo_modules.hdr.hdr_field =
  811. APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  812. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  813. load_unload_topo_modules.hdr.pkt_size =
  814. sizeof(struct avcs_cmd_load_unload_topo_modules);
  815. load_unload_topo_modules.hdr.src_port = 0;
  816. load_unload_topo_modules.hdr.dest_port = 0;
  817. load_unload_topo_modules.hdr.token = 0;
  818. if (preload_type == CORE_LOAD_TOPOLOGY)
  819. load_unload_topo_modules.hdr.opcode =
  820. AVCS_CMD_LOAD_TOPO_MODULES;
  821. else
  822. load_unload_topo_modules.hdr.opcode =
  823. AVCS_CMD_UNLOAD_TOPO_MODULES;
  824. load_unload_topo_modules.topology_id = topo_id;
  825. ret = apr_send_pkt(q6core_lcl.core_handle_q,
  826. (uint32_t *) &load_unload_topo_modules);
  827. if (ret < 0) {
  828. pr_err("%s: Load/unload topo modules failed for topology = %d ret = %d\n",
  829. __func__, topo_id, ret);
  830. ret = -EINVAL;
  831. }
  832. done:
  833. mutex_unlock(&(q6core_lcl.cmd_lock));
  834. return ret;
  835. }
  836. EXPORT_SYMBOL(q6core_load_unload_topo_modules);
  837. /**
  838. * q6core_is_adsp_ready - check adsp ready status
  839. *
  840. * Returns true if adsp is ready otherwise returns false
  841. */
  842. bool q6core_is_adsp_ready(void)
  843. {
  844. int rc = 0;
  845. bool ret = false;
  846. struct apr_hdr hdr;
  847. pr_debug("%s: enter\n", __func__);
  848. memset(&hdr, 0, sizeof(hdr));
  849. hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  850. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  851. hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, 0);
  852. hdr.opcode = AVCS_CMD_ADSP_EVENT_GET_STATE;
  853. mutex_lock(&(q6core_lcl.cmd_lock));
  854. ocm_core_open();
  855. if (q6core_lcl.core_handle_q) {
  856. q6core_lcl.bus_bw_resp_received = 0;
  857. rc = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)&hdr);
  858. if (rc < 0) {
  859. pr_err_ratelimited("%s: Get ADSP state APR packet send event %d\n",
  860. __func__, rc);
  861. goto bail;
  862. }
  863. rc = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
  864. (q6core_lcl.bus_bw_resp_received == 1),
  865. msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
  866. if (rc > 0 && q6core_lcl.bus_bw_resp_received) {
  867. /* ensure to read updated param by callback thread */
  868. rmb();
  869. ret = !!q6core_lcl.param;
  870. }
  871. }
  872. bail:
  873. pr_debug("%s: leave, rc %d, adsp ready %d\n", __func__, rc, ret);
  874. mutex_unlock(&(q6core_lcl.cmd_lock));
  875. return ret;
  876. }
  877. EXPORT_SYMBOL(q6core_is_adsp_ready);
  878. int q6core_create_lpass_npa_client(uint32_t node_id, char *client_name,
  879. uint32_t *client_handle)
  880. {
  881. struct avcs_cmd_create_lpass_npa_client_t create_lpass_npa_client;
  882. struct avcs_cmd_create_lpass_npa_client_t *cmd_ptr =
  883. &create_lpass_npa_client;
  884. int ret = 0;
  885. if (!client_name) {
  886. pr_err("%s: Invalid params\n", __func__);
  887. return -EINVAL;
  888. }
  889. mutex_lock(&(q6core_lcl.cmd_lock));
  890. memset(cmd_ptr, 0, sizeof(create_lpass_npa_client));
  891. cmd_ptr->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  892. APR_HDR_LEN(APR_HDR_SIZE),
  893. APR_PKT_VER);
  894. cmd_ptr->hdr.pkt_size = sizeof(create_lpass_npa_client);
  895. cmd_ptr->hdr.src_port = 0;
  896. cmd_ptr->hdr.dest_port = 0;
  897. cmd_ptr->hdr.token = 0;
  898. cmd_ptr->hdr.opcode = AVCS_CMD_CREATE_LPASS_NPA_CLIENT;
  899. cmd_ptr->node_id = AVCS_SLEEP_ISLAND_CORE_DRIVER_NODE_ID;
  900. strlcpy(cmd_ptr->client_name, client_name,
  901. sizeof(cmd_ptr->client_name));
  902. pr_debug("%s: create lpass npa client opcode[0x%x] node id[0x%x]\n",
  903. __func__, cmd_ptr->hdr.opcode, cmd_ptr->node_id);
  904. *client_handle = 0;
  905. q6core_lcl.adsp_status = 0;
  906. q6core_lcl.lpass_npa_rsc_rsp_rcvd = 0;
  907. ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) cmd_ptr);
  908. if (ret < 0) {
  909. pr_err("%s: create lpass npa client failed %d\n",
  910. __func__, ret);
  911. ret = -EINVAL;
  912. goto done;
  913. }
  914. ret = wait_event_timeout(q6core_lcl.lpass_npa_rsc_wait,
  915. (q6core_lcl.lpass_npa_rsc_rsp_rcvd == 1),
  916. msecs_to_jiffies(TIMEOUT_MS));
  917. if (!ret) {
  918. pr_err("%s: timeout. waited for create lpass npa rsc client\n",
  919. __func__);
  920. ret = -ETIMEDOUT;
  921. goto done;
  922. } else {
  923. /* set ret to 0 as no timeout happened */
  924. ret = 0;
  925. }
  926. if (q6core_lcl.adsp_status < 0) {
  927. pr_err("%s: DSP returned error %d\n",
  928. __func__, q6core_lcl.adsp_status);
  929. ret = q6core_lcl.adsp_status;
  930. goto done;
  931. }
  932. *client_handle = q6core_lcl.npa_client_handle;
  933. pr_debug("%s: q6core_lcl.npa_client_handle %d\n", __func__,
  934. q6core_lcl.npa_client_handle);
  935. done:
  936. mutex_unlock(&q6core_lcl.cmd_lock);
  937. return ret;
  938. }
  939. EXPORT_SYMBOL(q6core_create_lpass_npa_client);
  940. int q6core_destroy_lpass_npa_client(uint32_t client_handle)
  941. {
  942. struct avcs_cmd_destroy_lpass_npa_client_t destroy_lpass_npa_client;
  943. struct avcs_cmd_destroy_lpass_npa_client_t *cmd_ptr =
  944. &destroy_lpass_npa_client;
  945. int ret = 0;
  946. mutex_lock(&(q6core_lcl.cmd_lock));
  947. memset(cmd_ptr, 0, sizeof(destroy_lpass_npa_client));
  948. cmd_ptr->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  949. APR_HDR_LEN(APR_HDR_SIZE),
  950. APR_PKT_VER);
  951. cmd_ptr->hdr.pkt_size = sizeof(destroy_lpass_npa_client);
  952. cmd_ptr->hdr.src_port = 0;
  953. cmd_ptr->hdr.dest_port = 0;
  954. cmd_ptr->hdr.token = 0;
  955. cmd_ptr->hdr.opcode = AVCS_CMD_DESTROY_LPASS_NPA_CLIENT;
  956. cmd_ptr->client_handle = client_handle;
  957. pr_debug("%s: dstry lpass npa client opcode[0x%x] client hdl[0x%x]\n",
  958. __func__, cmd_ptr->hdr.opcode, cmd_ptr->client_handle);
  959. q6core_lcl.adsp_status = 0;
  960. q6core_lcl.lpass_npa_rsc_rsp_rcvd = 0;
  961. ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) cmd_ptr);
  962. if (ret < 0) {
  963. pr_err("%s: destroy lpass npa client failed %d\n",
  964. __func__, ret);
  965. ret = -EINVAL;
  966. goto done;
  967. }
  968. ret = wait_event_timeout(q6core_lcl.lpass_npa_rsc_wait,
  969. (q6core_lcl.lpass_npa_rsc_rsp_rcvd == 1),
  970. msecs_to_jiffies(TIMEOUT_MS));
  971. if (!ret) {
  972. pr_err("%s: timeout. waited for destroy lpass npa rsc client\n",
  973. __func__);
  974. ret = -ETIMEDOUT;
  975. goto done;
  976. } else {
  977. /* set ret to 0 as no timeout happened */
  978. ret = 0;
  979. }
  980. if (q6core_lcl.adsp_status < 0) {
  981. pr_err("%s: DSP returned error %d\n",
  982. __func__, q6core_lcl.adsp_status);
  983. ret = q6core_lcl.adsp_status;
  984. }
  985. done:
  986. mutex_unlock(&q6core_lcl.cmd_lock);
  987. return ret;
  988. }
  989. EXPORT_SYMBOL(q6core_destroy_lpass_npa_client);
  990. int q6core_request_island_transition(uint32_t client_handle,
  991. uint32_t island_allow_mode)
  992. {
  993. struct avcs_sleep_node_island_transition_config_t island_tsn_cfg;
  994. struct avcs_sleep_node_island_transition_config_t *cmd_ptr =
  995. &island_tsn_cfg;
  996. int ret = 0;
  997. mutex_lock(&(q6core_lcl.cmd_lock));
  998. memset(cmd_ptr, 0, sizeof(island_tsn_cfg));
  999. cmd_ptr->req_lpass_npa_rsc.hdr.hdr_field =
  1000. APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1001. APR_HDR_LEN(APR_HDR_SIZE),
  1002. APR_PKT_VER);
  1003. cmd_ptr->req_lpass_npa_rsc.hdr.pkt_size = sizeof(island_tsn_cfg);
  1004. cmd_ptr->req_lpass_npa_rsc.hdr.src_port = 0;
  1005. cmd_ptr->req_lpass_npa_rsc.hdr.dest_port = 0;
  1006. cmd_ptr->req_lpass_npa_rsc.hdr.token = 0;
  1007. cmd_ptr->req_lpass_npa_rsc.hdr.opcode =
  1008. AVCS_CMD_REQUEST_LPASS_NPA_RESOURCES;
  1009. cmd_ptr->req_lpass_npa_rsc.client_handle = client_handle;
  1010. cmd_ptr->req_lpass_npa_rsc.resource_id =
  1011. AVCS_SLEEP_NODE_ISLAND_TRANSITION_RESOURCE_ID;
  1012. cmd_ptr->island_allow_mode = island_allow_mode;
  1013. pr_debug("%s: req islnd tnsn opcode[0x%x] island_allow_mode[0x%x]\n",
  1014. __func__, cmd_ptr->req_lpass_npa_rsc.hdr.opcode,
  1015. cmd_ptr->island_allow_mode);
  1016. q6core_lcl.adsp_status = 0;
  1017. q6core_lcl.lpass_npa_rsc_rsp_rcvd = 0;
  1018. ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) cmd_ptr);
  1019. if (ret < 0) {
  1020. pr_err("%s: island tnsn cmd send failed %d\n",
  1021. __func__, ret);
  1022. ret = -EINVAL;
  1023. goto done;
  1024. }
  1025. ret = wait_event_timeout(q6core_lcl.lpass_npa_rsc_wait,
  1026. (q6core_lcl.lpass_npa_rsc_rsp_rcvd == 1),
  1027. msecs_to_jiffies(TIMEOUT_MS));
  1028. if (!ret) {
  1029. pr_err("%s: timeout. waited for island lpass npa rsc req\n",
  1030. __func__);
  1031. ret = -ETIMEDOUT;
  1032. goto done;
  1033. } else {
  1034. /* set ret to 0 as no timeout happened */
  1035. ret = 0;
  1036. }
  1037. if (q6core_lcl.adsp_status < 0) {
  1038. pr_err("%s: DSP returned error %d\n",
  1039. __func__, q6core_lcl.adsp_status);
  1040. ret = q6core_lcl.adsp_status;
  1041. }
  1042. done:
  1043. mutex_unlock(&q6core_lcl.cmd_lock);
  1044. return ret;
  1045. }
  1046. EXPORT_SYMBOL(q6core_request_island_transition);
  1047. int q6core_map_memory_regions(phys_addr_t *buf_add, uint32_t mempool_id,
  1048. uint32_t *bufsz, uint32_t bufcnt, uint32_t *map_handle)
  1049. {
  1050. struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
  1051. struct avs_shared_map_region_payload *mregions = NULL;
  1052. void *mmap_region_cmd = NULL;
  1053. void *payload = NULL;
  1054. int ret = 0;
  1055. int i = 0;
  1056. int cmd_size = 0;
  1057. cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
  1058. + sizeof(struct avs_shared_map_region_payload)
  1059. * bufcnt;
  1060. mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
  1061. if (mmap_region_cmd == NULL)
  1062. return -ENOMEM;
  1063. mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
  1064. mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1065. APR_HDR_LEN(APR_HDR_SIZE),
  1066. APR_PKT_VER);
  1067. mmap_regions->hdr.pkt_size = cmd_size;
  1068. mmap_regions->hdr.src_port = 0;
  1069. mmap_regions->hdr.dest_port = 0;
  1070. mmap_regions->hdr.token = 0;
  1071. mmap_regions->hdr.opcode = AVCS_CMD_SHARED_MEM_MAP_REGIONS;
  1072. mmap_regions->mem_pool_id = mempool_id & 0x00ff;
  1073. mmap_regions->num_regions = bufcnt & 0x00ff;
  1074. mmap_regions->property_flag = 0x00;
  1075. payload = ((u8 *) mmap_region_cmd +
  1076. sizeof(struct avs_cmd_shared_mem_map_regions));
  1077. mregions = (struct avs_shared_map_region_payload *)payload;
  1078. for (i = 0; i < bufcnt; i++) {
  1079. mregions->shm_addr_lsw = lower_32_bits(buf_add[i]);
  1080. mregions->shm_addr_msw =
  1081. msm_audio_populate_upper_32_bits(buf_add[i]);
  1082. mregions->mem_size_bytes = bufsz[i];
  1083. ++mregions;
  1084. }
  1085. pr_debug("%s: sending memory map, addr %pK, size %d, bufcnt = %d\n",
  1086. __func__, buf_add, bufsz[0], mmap_regions->num_regions);
  1087. *map_handle = 0;
  1088. q6core_lcl.adsp_status = 0;
  1089. q6core_lcl.bus_bw_resp_received = 0;
  1090. ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)
  1091. mmap_regions);
  1092. if (ret < 0) {
  1093. pr_err("%s: mmap regions failed %d\n",
  1094. __func__, ret);
  1095. ret = -EINVAL;
  1096. goto done;
  1097. }
  1098. ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
  1099. (q6core_lcl.bus_bw_resp_received == 1),
  1100. msecs_to_jiffies(TIMEOUT_MS));
  1101. if (!ret) {
  1102. pr_err("%s: timeout. waited for memory map\n", __func__);
  1103. ret = -ETIME;
  1104. goto done;
  1105. } else {
  1106. /* set ret to 0 as no timeout happened */
  1107. ret = 0;
  1108. }
  1109. if (q6core_lcl.adsp_status < 0) {
  1110. pr_err("%s: DSP returned error %d\n",
  1111. __func__, q6core_lcl.adsp_status);
  1112. ret = q6core_lcl.adsp_status;
  1113. goto done;
  1114. }
  1115. *map_handle = q6core_lcl.mem_map_cal_handle;
  1116. done:
  1117. kfree(mmap_region_cmd);
  1118. return ret;
  1119. }
  1120. /**
  1121. * q6core_map_mdf_memory_regions - for sending MDF shared memory map information
  1122. * to ADSP.
  1123. *
  1124. * @buf_add: array of buffers.
  1125. * @mempool_id: memory pool ID
  1126. * @bufsz: size of the buffer
  1127. * @bufcnt: buffers count
  1128. * @map_handle: map handle received from ADSP
  1129. */
  1130. int q6core_map_mdf_memory_regions(uint64_t *buf_add, uint32_t mempool_id,
  1131. uint32_t *bufsz, uint32_t bufcnt, uint32_t *map_handle)
  1132. {
  1133. struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
  1134. struct avs_shared_map_region_payload *mregions = NULL;
  1135. void *mmap_region_cmd = NULL;
  1136. void *payload = NULL;
  1137. int ret = 0;
  1138. int i = 0;
  1139. int cmd_size = 0;
  1140. mutex_lock(&q6core_lcl.cmd_lock);
  1141. cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
  1142. + sizeof(struct avs_shared_map_region_payload)
  1143. * bufcnt;
  1144. mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
  1145. if (mmap_region_cmd == NULL)
  1146. return -ENOMEM;
  1147. mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
  1148. mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1149. APR_HDR_LEN(APR_HDR_SIZE),
  1150. APR_PKT_VER);
  1151. mmap_regions->hdr.pkt_size = cmd_size;
  1152. mmap_regions->hdr.src_port = 0;
  1153. mmap_regions->hdr.dest_port = 0;
  1154. mmap_regions->hdr.token = MDF_MAP_TOKEN;
  1155. mmap_regions->hdr.opcode = AVCS_CMD_SHARED_MEM_MAP_REGIONS;
  1156. mmap_regions->mem_pool_id = mempool_id & MEMPOOL_ID_MASK;
  1157. mmap_regions->num_regions = bufcnt & 0x00ff;
  1158. mmap_regions->property_flag = 0x00;
  1159. payload = ((u8 *) mmap_region_cmd +
  1160. sizeof(struct avs_cmd_shared_mem_map_regions));
  1161. mregions = (struct avs_shared_map_region_payload *)payload;
  1162. for (i = 0; i < bufcnt; i++) {
  1163. mregions->shm_addr_lsw = lower_32_bits(buf_add[i]);
  1164. mregions->shm_addr_msw = upper_32_bits(buf_add[i]);
  1165. mregions->mem_size_bytes = bufsz[i];
  1166. ++mregions;
  1167. }
  1168. pr_debug("%s: sending MDF memory map, addr %pK, size %d, bufcnt = %d\n",
  1169. __func__, buf_add, bufsz[0], mmap_regions->num_regions);
  1170. *map_handle = 0;
  1171. q6core_lcl.adsp_status = 0;
  1172. q6core_lcl.mdf_map_resp_received = 0;
  1173. ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)
  1174. mmap_regions);
  1175. if (ret < 0) {
  1176. pr_err("%s: mmap regions failed %d\n",
  1177. __func__, ret);
  1178. ret = -EINVAL;
  1179. goto done;
  1180. }
  1181. ret = wait_event_timeout(q6core_lcl.mdf_map_resp_wait,
  1182. (q6core_lcl.mdf_map_resp_received == 1),
  1183. msecs_to_jiffies(TIMEOUT_MS));
  1184. if (!ret) {
  1185. pr_err("%s: timeout. waited for memory map\n", __func__);
  1186. ret = -ETIMEDOUT;
  1187. goto done;
  1188. } else {
  1189. /* set ret to 0 as no timeout happened */
  1190. ret = 0;
  1191. }
  1192. if (q6core_lcl.adsp_status < 0) {
  1193. pr_err("%s: DSP returned error %d\n",
  1194. __func__, q6core_lcl.adsp_status);
  1195. ret = q6core_lcl.adsp_status;
  1196. goto done;
  1197. }
  1198. *map_handle = q6core_lcl.mdf_mem_map_cal_handle;
  1199. done:
  1200. kfree(mmap_region_cmd);
  1201. mutex_unlock(&q6core_lcl.cmd_lock);
  1202. return ret;
  1203. }
  1204. EXPORT_SYMBOL(q6core_map_mdf_memory_regions);
  1205. int q6core_memory_unmap_regions(uint32_t mem_map_handle)
  1206. {
  1207. struct avs_cmd_shared_mem_unmap_regions unmap_regions;
  1208. int ret = 0;
  1209. memset(&unmap_regions, 0, sizeof(unmap_regions));
  1210. unmap_regions.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1211. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  1212. unmap_regions.hdr.pkt_size = sizeof(unmap_regions);
  1213. unmap_regions.hdr.src_svc = APR_SVC_ADSP_CORE;
  1214. unmap_regions.hdr.src_domain = APR_DOMAIN_APPS;
  1215. unmap_regions.hdr.src_port = 0;
  1216. unmap_regions.hdr.dest_svc = APR_SVC_ADSP_CORE;
  1217. unmap_regions.hdr.dest_domain = APR_DOMAIN_ADSP;
  1218. unmap_regions.hdr.dest_port = 0;
  1219. unmap_regions.hdr.token = 0;
  1220. unmap_regions.hdr.opcode = AVCS_CMD_SHARED_MEM_UNMAP_REGIONS;
  1221. unmap_regions.mem_map_handle = mem_map_handle;
  1222. q6core_lcl.adsp_status = 0;
  1223. q6core_lcl.bus_bw_resp_received = 0;
  1224. pr_debug("%s: unmap regions map handle %d\n",
  1225. __func__, mem_map_handle);
  1226. ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)
  1227. &unmap_regions);
  1228. if (ret < 0) {
  1229. pr_err("%s: unmap regions failed %d\n",
  1230. __func__, ret);
  1231. ret = -EINVAL;
  1232. goto done;
  1233. }
  1234. ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
  1235. (q6core_lcl.bus_bw_resp_received == 1),
  1236. msecs_to_jiffies(TIMEOUT_MS));
  1237. if (!ret) {
  1238. pr_err("%s: timeout. waited for memory_unmap\n",
  1239. __func__);
  1240. ret = -ETIME;
  1241. goto done;
  1242. } else {
  1243. /* set ret to 0 as no timeout happened */
  1244. ret = 0;
  1245. }
  1246. if (q6core_lcl.adsp_status < 0) {
  1247. pr_err("%s: DSP returned error %d\n",
  1248. __func__, q6core_lcl.adsp_status);
  1249. ret = q6core_lcl.adsp_status;
  1250. goto done;
  1251. }
  1252. done:
  1253. return ret;
  1254. }
  1255. int q6core_map_mdf_shared_memory(uint32_t map_handle, uint64_t *buf_add,
  1256. uint32_t proc_id, uint32_t *bufsz, uint32_t bufcnt)
  1257. {
  1258. struct avs_cmd_map_mdf_shared_memory *mmap_regions = NULL;
  1259. struct avs_shared_map_region_payload *mregions = NULL;
  1260. void *mmap_region_cmd = NULL;
  1261. void *payload = NULL;
  1262. int ret = 0;
  1263. int i = 0;
  1264. int cmd_size = 0;
  1265. cmd_size = sizeof(struct avs_cmd_map_mdf_shared_memory)
  1266. + sizeof(struct avs_shared_map_region_payload)
  1267. * bufcnt;
  1268. mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
  1269. if (mmap_region_cmd == NULL)
  1270. return -ENOMEM;
  1271. mmap_regions = (struct avs_cmd_map_mdf_shared_memory *)mmap_region_cmd;
  1272. mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1273. APR_HDR_LEN(APR_HDR_SIZE),
  1274. APR_PKT_VER);
  1275. mmap_regions->hdr.pkt_size = cmd_size;
  1276. mmap_regions->hdr.src_port = 0;
  1277. mmap_regions->hdr.dest_port = 0;
  1278. mmap_regions->hdr.token = 0;
  1279. mmap_regions->hdr.opcode = AVCS_CMD_MAP_MDF_SHARED_MEMORY;
  1280. mmap_regions->mem_map_handle = map_handle;
  1281. mmap_regions->proc_id = proc_id & 0x00ff;
  1282. mmap_regions->num_regions = bufcnt & 0x00ff;
  1283. payload = ((u8 *) mmap_region_cmd +
  1284. sizeof(struct avs_cmd_map_mdf_shared_memory));
  1285. mregions = (struct avs_shared_map_region_payload *)payload;
  1286. for (i = 0; i < bufcnt; i++) {
  1287. mregions->shm_addr_lsw = lower_32_bits(buf_add[i]);
  1288. mregions->shm_addr_msw = upper_32_bits(buf_add[i]);
  1289. mregions->mem_size_bytes = bufsz[i];
  1290. ++mregions;
  1291. }
  1292. pr_debug("%s: sending mdf memory map, addr %pa, size %d, bufcnt = %d\n",
  1293. __func__, buf_add, bufsz[0], mmap_regions->num_regions);
  1294. q6core_lcl.adsp_status = 0;
  1295. q6core_lcl.bus_bw_resp_received = 0;
  1296. ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)
  1297. mmap_regions);
  1298. if (ret < 0) {
  1299. pr_err("%s: mdf memory map failed %d\n",
  1300. __func__, ret);
  1301. ret = -EINVAL;
  1302. goto done;
  1303. }
  1304. ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
  1305. (q6core_lcl.bus_bw_resp_received == 1),
  1306. msecs_to_jiffies(TIMEOUT_MS));
  1307. if (!ret) {
  1308. pr_err("%s: timeout. waited for mdf memory map\n",
  1309. __func__);
  1310. ret = -ETIME;
  1311. goto done;
  1312. } else {
  1313. /* set ret to 0 as no timeout happened */
  1314. ret = 0;
  1315. }
  1316. /*
  1317. * When the remote DSP is not ready, the ADSP will validate and store
  1318. * the memory information and return APR_ENOTREADY to HLOS. The ADSP
  1319. * will map the memory with remote DSP when it is ready. HLOS should
  1320. * not treat APR_ENOTREADY as an error.
  1321. */
  1322. if (q6core_lcl.adsp_status != -APR_ENOTREADY) {
  1323. pr_err("%s: DSP returned error %d\n",
  1324. __func__, q6core_lcl.adsp_status);
  1325. ret = q6core_lcl.adsp_status;
  1326. goto done;
  1327. }
  1328. done:
  1329. kfree(mmap_region_cmd);
  1330. return ret;
  1331. }
  1332. static int q6core_dereg_all_custom_topologies(void)
  1333. {
  1334. int ret = 0;
  1335. struct avcs_cmd_deregister_topologies dereg_top;
  1336. memset(&dereg_top, 0, sizeof(dereg_top));
  1337. dereg_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1338. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  1339. dereg_top.hdr.pkt_size = sizeof(dereg_top);
  1340. dereg_top.hdr.src_svc = APR_SVC_ADSP_CORE;
  1341. dereg_top.hdr.src_domain = APR_DOMAIN_APPS;
  1342. dereg_top.hdr.src_port = 0;
  1343. dereg_top.hdr.dest_svc = APR_SVC_ADSP_CORE;
  1344. dereg_top.hdr.dest_domain = APR_DOMAIN_ADSP;
  1345. dereg_top.hdr.dest_port = 0;
  1346. dereg_top.hdr.token = 0;
  1347. dereg_top.hdr.opcode = AVCS_CMD_DEREGISTER_TOPOLOGIES;
  1348. dereg_top.payload_addr_lsw = 0;
  1349. dereg_top.payload_addr_msw = 0;
  1350. dereg_top.mem_map_handle = 0;
  1351. dereg_top.payload_size = 0;
  1352. dereg_top.mode = AVCS_MODE_DEREGISTER_ALL_CUSTOM_TOPOLOGIES;
  1353. q6core_lcl.bus_bw_resp_received = 0;
  1354. pr_debug("%s: Deregister topologies mode %d\n",
  1355. __func__, dereg_top.mode);
  1356. ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &dereg_top);
  1357. if (ret < 0) {
  1358. pr_err("%s: Deregister topologies failed %d\n",
  1359. __func__, ret);
  1360. goto done;
  1361. }
  1362. ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
  1363. (q6core_lcl.bus_bw_resp_received == 1),
  1364. msecs_to_jiffies(TIMEOUT_MS));
  1365. if (!ret) {
  1366. pr_err("%s: wait_event timeout for Deregister topologies\n",
  1367. __func__);
  1368. goto done;
  1369. }
  1370. done:
  1371. return ret;
  1372. }
  1373. static int q6core_send_custom_topologies(void)
  1374. {
  1375. int ret = 0;
  1376. int ret2 = 0;
  1377. struct cal_block_data *cal_block = NULL;
  1378. struct avcs_cmd_register_topologies reg_top;
  1379. if (!q6core_is_adsp_ready()) {
  1380. pr_err("%s: ADSP is not ready!\n", __func__);
  1381. return -ENODEV;
  1382. }
  1383. memset(&reg_top, 0, sizeof(reg_top));
  1384. mutex_lock(&q6core_lcl.cal_data[CUST_TOP_CAL]->lock);
  1385. mutex_lock(&q6core_lcl.cmd_lock);
  1386. cal_block = cal_utils_get_only_cal_block(
  1387. q6core_lcl.cal_data[CUST_TOP_CAL]);
  1388. if (cal_block == NULL) {
  1389. pr_debug("%s: cal block is NULL!\n", __func__);
  1390. goto unlock;
  1391. }
  1392. if (cal_block->cal_data.size <= 0) {
  1393. pr_debug("%s: cal size is %zd not sending\n",
  1394. __func__, cal_block->cal_data.size);
  1395. goto unlock;
  1396. }
  1397. q6core_dereg_all_custom_topologies();
  1398. ret = q6core_map_memory_regions(&cal_block->cal_data.paddr,
  1399. ADSP_MEMORY_MAP_SHMEM8_4K_POOL,
  1400. (uint32_t *)&cal_block->map_data.map_size, 1,
  1401. &cal_block->map_data.q6map_handle);
  1402. if (ret) {
  1403. pr_err("%s: q6core_map_memory_regions failed\n", __func__);
  1404. goto unlock;
  1405. }
  1406. reg_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
  1407. APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
  1408. reg_top.hdr.pkt_size = sizeof(reg_top);
  1409. reg_top.hdr.src_svc = APR_SVC_ADSP_CORE;
  1410. reg_top.hdr.src_domain = APR_DOMAIN_APPS;
  1411. reg_top.hdr.src_port = 0;
  1412. reg_top.hdr.dest_svc = APR_SVC_ADSP_CORE;
  1413. reg_top.hdr.dest_domain = APR_DOMAIN_ADSP;
  1414. reg_top.hdr.dest_port = 0;
  1415. reg_top.hdr.token = 0;
  1416. reg_top.hdr.opcode = AVCS_CMD_REGISTER_TOPOLOGIES;
  1417. reg_top.payload_addr_lsw =
  1418. lower_32_bits(cal_block->cal_data.paddr);
  1419. reg_top.payload_addr_msw =
  1420. msm_audio_populate_upper_32_bits(cal_block->cal_data.paddr);
  1421. reg_top.mem_map_handle = cal_block->map_data.q6map_handle;
  1422. reg_top.payload_size = cal_block->cal_data.size;
  1423. q6core_lcl.adsp_status = 0;
  1424. q6core_lcl.bus_bw_resp_received = 0;
  1425. pr_debug("%s: Register topologies addr %pK, size %zd, map handle %d\n",
  1426. __func__, &cal_block->cal_data.paddr, cal_block->cal_data.size,
  1427. cal_block->map_data.q6map_handle);
  1428. ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &reg_top);
  1429. if (ret < 0) {
  1430. pr_err("%s: Register topologies failed %d\n",
  1431. __func__, ret);
  1432. goto unmap;
  1433. }
  1434. ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
  1435. (q6core_lcl.bus_bw_resp_received == 1),
  1436. msecs_to_jiffies(TIMEOUT_MS));
  1437. if (!ret) {
  1438. pr_err("%s: wait_event timeout for Register topologies\n",
  1439. __func__);
  1440. goto unmap;
  1441. }
  1442. if (q6core_lcl.adsp_status < 0)
  1443. ret = q6core_lcl.adsp_status;
  1444. unmap:
  1445. ret2 = q6core_memory_unmap_regions(cal_block->map_data.q6map_handle);
  1446. if (ret2) {
  1447. pr_err("%s: q6core_memory_unmap_regions failed for map handle %d\n",
  1448. __func__, cal_block->map_data.q6map_handle);
  1449. ret = ret2;
  1450. goto unlock;
  1451. }
  1452. unlock:
  1453. mutex_unlock(&q6core_lcl.cmd_lock);
  1454. mutex_unlock(&q6core_lcl.cal_data[CUST_TOP_CAL]->lock);
  1455. return ret;
  1456. }
  1457. static int get_cal_type_index(int32_t cal_type)
  1458. {
  1459. int ret = -EINVAL;
  1460. switch (cal_type) {
  1461. case AUDIO_CORE_METAINFO_CAL_TYPE:
  1462. ret = META_CAL;
  1463. break;
  1464. case CORE_CUSTOM_TOPOLOGIES_CAL_TYPE:
  1465. ret = CUST_TOP_CAL;
  1466. break;
  1467. default:
  1468. pr_err("%s: invalid cal type %d!\n", __func__, cal_type);
  1469. }
  1470. return ret;
  1471. }
  1472. static int q6core_alloc_cal(int32_t cal_type,
  1473. size_t data_size, void *data)
  1474. {
  1475. int ret = 0;
  1476. int cal_index;
  1477. cal_index = get_cal_type_index(cal_type);
  1478. if (cal_index < 0) {
  1479. pr_err("%s: could not get cal index %d!\n",
  1480. __func__, cal_index);
  1481. ret = -EINVAL;
  1482. goto done;
  1483. }
  1484. ret = cal_utils_alloc_cal(data_size, data,
  1485. q6core_lcl.cal_data[cal_index], 0, NULL);
  1486. if (ret < 0) {
  1487. pr_err("%s: cal_utils_alloc_block failed, ret = %d, cal type = %d!\n",
  1488. __func__, ret, cal_type);
  1489. goto done;
  1490. }
  1491. done:
  1492. return ret;
  1493. }
  1494. static int q6core_dealloc_cal(int32_t cal_type,
  1495. size_t data_size, void *data)
  1496. {
  1497. int ret = 0;
  1498. int cal_index;
  1499. cal_index = get_cal_type_index(cal_type);
  1500. if (cal_index < 0) {
  1501. pr_err("%s: could not get cal index %d!\n",
  1502. __func__, cal_index);
  1503. ret = -EINVAL;
  1504. goto done;
  1505. }
  1506. ret = cal_utils_dealloc_cal(data_size, data,
  1507. q6core_lcl.cal_data[cal_index]);
  1508. if (ret < 0) {
  1509. pr_err("%s: cal_utils_dealloc_block failed, ret = %d, cal type = %d!\n",
  1510. __func__, ret, cal_type);
  1511. goto done;
  1512. }
  1513. done:
  1514. return ret;
  1515. }
  1516. static int q6core_set_cal(int32_t cal_type,
  1517. size_t data_size, void *data)
  1518. {
  1519. int ret = 0;
  1520. int cal_index;
  1521. cal_index = get_cal_type_index(cal_type);
  1522. if (cal_index < 0) {
  1523. pr_err("%s: could not get cal index %d!\n",
  1524. __func__, cal_index);
  1525. ret = -EINVAL;
  1526. goto done;
  1527. }
  1528. ret = cal_utils_set_cal(data_size, data,
  1529. q6core_lcl.cal_data[cal_index], 0, NULL);
  1530. if (ret < 0) {
  1531. pr_err("%s: cal_utils_set_cal failed, ret = %d, cal type = %d!\n",
  1532. __func__, ret, cal_type);
  1533. goto done;
  1534. }
  1535. if (cal_index == CUST_TOP_CAL)
  1536. ret = q6core_send_custom_topologies();
  1537. done:
  1538. return ret;
  1539. }
  1540. static void q6core_delete_cal_data(void)
  1541. {
  1542. pr_debug("%s:\n", __func__);
  1543. cal_utils_destroy_cal_types(CORE_MAX_CAL, q6core_lcl.cal_data);
  1544. }
  1545. static int q6core_init_cal_data(void)
  1546. {
  1547. int ret = 0;
  1548. struct cal_type_info cal_type_info[] = {
  1549. {{AUDIO_CORE_METAINFO_CAL_TYPE,
  1550. {q6core_alloc_cal, q6core_dealloc_cal, NULL,
  1551. q6core_set_cal, NULL, NULL} },
  1552. {NULL, NULL, cal_utils_match_buf_num} },
  1553. {{CORE_CUSTOM_TOPOLOGIES_CAL_TYPE,
  1554. {q6core_alloc_cal, q6core_dealloc_cal, NULL,
  1555. q6core_set_cal, NULL, NULL} },
  1556. {NULL, NULL, cal_utils_match_buf_num} }
  1557. };
  1558. pr_debug("%s:\n", __func__);
  1559. ret = cal_utils_create_cal_types(CORE_MAX_CAL,
  1560. q6core_lcl.cal_data, cal_type_info);
  1561. if (ret < 0) {
  1562. pr_err("%s: could not create cal type!\n",
  1563. __func__);
  1564. goto err;
  1565. }
  1566. return ret;
  1567. err:
  1568. q6core_delete_cal_data();
  1569. return ret;
  1570. }
  1571. static int q6core_is_avs_up(int32_t *avs_state)
  1572. {
  1573. unsigned long timeout;
  1574. int32_t adsp_ready = 0;
  1575. int ret = 0;
  1576. timeout = jiffies +
  1577. msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
  1578. /* sleep for 100ms before querying AVS up */
  1579. msleep(100);
  1580. do {
  1581. adsp_ready = q6core_is_adsp_ready();
  1582. pr_debug("%s: ADSP Audio is %s\n", __func__,
  1583. adsp_ready ? "ready" : "not ready");
  1584. if (adsp_ready)
  1585. break;
  1586. /*
  1587. * ADSP will be coming up after boot up and AVS might
  1588. * not be fully up when the control reaches here.
  1589. * So, wait for 50msec before checking ADSP state again.
  1590. */
  1591. msleep(50);
  1592. } while (time_after(timeout, jiffies));
  1593. *avs_state = adsp_ready;
  1594. pr_debug("%s: ADSP Audio is %s\n", __func__,
  1595. adsp_ready ? "ready" : "not ready");
  1596. if (!adsp_ready) {
  1597. pr_err_ratelimited("%s: Timeout. ADSP Audio is not ready\n",
  1598. __func__);
  1599. ret = -ETIMEDOUT;
  1600. }
  1601. return ret;
  1602. }
  1603. static int q6core_ssr_enable(struct device *dev, void *data)
  1604. {
  1605. int32_t avs_state = 0;
  1606. int ret = 0;
  1607. if (!dev) {
  1608. pr_err("%s: dev is NULL\n", __func__);
  1609. return -EINVAL;
  1610. }
  1611. if (!q6core_lcl.avs_state) {
  1612. ret = q6core_is_avs_up(&avs_state);
  1613. if (ret < 0)
  1614. goto err;
  1615. q6core_lcl.avs_state = avs_state;
  1616. }
  1617. err:
  1618. return ret;
  1619. }
  1620. static void q6core_ssr_disable(struct device *dev, void *data)
  1621. {
  1622. /* Reset AVS state to 0 */
  1623. q6core_lcl.avs_state = 0;
  1624. }
  1625. static const struct snd_event_ops q6core_ssr_ops = {
  1626. .enable = q6core_ssr_enable,
  1627. .disable = q6core_ssr_disable,
  1628. };
  1629. static int q6core_probe(struct platform_device *pdev)
  1630. {
  1631. int32_t avs_state = 0;
  1632. int rc = 0;
  1633. rc = q6core_is_avs_up(&avs_state);
  1634. if (rc < 0)
  1635. goto err;
  1636. q6core_lcl.avs_state = avs_state;
  1637. rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
  1638. if (rc) {
  1639. dev_err(&pdev->dev, "%s: failed to add child nodes, rc=%d\n",
  1640. __func__, rc);
  1641. rc = -EINVAL;
  1642. goto err;
  1643. }
  1644. dev_dbg(&pdev->dev, "%s: added child node\n", __func__);
  1645. rc = snd_event_client_register(&pdev->dev, &q6core_ssr_ops, NULL);
  1646. if (!rc) {
  1647. snd_event_notify(&pdev->dev, SND_EVENT_UP);
  1648. } else {
  1649. dev_err(&pdev->dev,
  1650. "%s: Registration with SND event fwk failed rc = %d\n",
  1651. __func__, rc);
  1652. rc = 0;
  1653. }
  1654. err:
  1655. return rc;
  1656. }
  1657. static int q6core_remove(struct platform_device *pdev)
  1658. {
  1659. snd_event_client_deregister(&pdev->dev);
  1660. of_platform_depopulate(&pdev->dev);
  1661. return 0;
  1662. }
  1663. static const struct of_device_id q6core_of_match[] = {
  1664. { .compatible = "qcom,q6core-audio", },
  1665. {},
  1666. };
  1667. static struct platform_driver q6core_driver = {
  1668. .probe = q6core_probe,
  1669. .remove = q6core_remove,
  1670. .driver = {
  1671. .name = "q6core_audio",
  1672. .owner = THIS_MODULE,
  1673. .of_match_table = q6core_of_match,
  1674. .suppress_bind_attrs = true,
  1675. }
  1676. };
  1677. int __init core_init(void)
  1678. {
  1679. memset(&q6core_lcl, 0, sizeof(struct q6core_str));
  1680. init_waitqueue_head(&q6core_lcl.bus_bw_req_wait);
  1681. init_waitqueue_head(&q6core_lcl.cmd_req_wait);
  1682. init_waitqueue_head(&q6core_lcl.avcs_fwk_ver_req_wait);
  1683. init_waitqueue_head(&q6core_lcl.mdf_map_resp_wait);
  1684. init_waitqueue_head(&q6core_lcl.lpass_npa_rsc_wait);
  1685. q6core_lcl.cmd_resp_received_flag = FLAG_NONE;
  1686. mutex_init(&q6core_lcl.cmd_lock);
  1687. mutex_init(&q6core_lcl.ver_lock);
  1688. q6core_init_cal_data();
  1689. q6core_init_uevent_kset();
  1690. return platform_driver_register(&q6core_driver);
  1691. }
  1692. void core_exit(void)
  1693. {
  1694. mutex_destroy(&q6core_lcl.cmd_lock);
  1695. mutex_destroy(&q6core_lcl.ver_lock);
  1696. q6core_delete_cal_data();
  1697. q6core_destroy_uevent_kset();
  1698. platform_driver_unregister(&q6core_driver);
  1699. }
  1700. MODULE_DESCRIPTION("ADSP core driver");
  1701. MODULE_LICENSE("GPL v2");