bmi.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. /*
  2. * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "i_bmi.h"
  20. #include "cds_api.h"
  21. /* APIs visible to the driver */
  22. QDF_STATUS bmi_init(struct ol_context *ol_ctx)
  23. {
  24. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  25. struct hif_opaque_softc *scn = ol_ctx->scn;
  26. qdf_device_t qdf_dev = ol_ctx->qdf_dev;
  27. if (!scn) {
  28. BMI_ERR("Invalid scn Context");
  29. bmi_assert(0);
  30. return QDF_STATUS_NOT_INITIALIZED;
  31. }
  32. if (!qdf_dev->dev) {
  33. BMI_ERR("%s: Invalid Device Pointer", __func__);
  34. return QDF_STATUS_NOT_INITIALIZED;
  35. }
  36. info->bmi_done = false;
  37. if (!info->bmi_cmd_buff) {
  38. info->bmi_cmd_buff =
  39. qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev,
  40. MAX_BMI_CMDBUF_SZ,
  41. &info->bmi_cmd_da);
  42. if (!info->bmi_cmd_buff) {
  43. BMI_ERR("No Memory for BMI Command");
  44. return QDF_STATUS_E_NOMEM;
  45. }
  46. }
  47. if (!info->bmi_rsp_buff) {
  48. info->bmi_rsp_buff =
  49. qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev,
  50. MAX_BMI_CMDBUF_SZ,
  51. &info->bmi_rsp_da);
  52. if (!info->bmi_rsp_buff) {
  53. BMI_ERR("No Memory for BMI Response");
  54. goto end;
  55. }
  56. }
  57. return QDF_STATUS_SUCCESS;
  58. end:
  59. qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, MAX_BMI_CMDBUF_SZ,
  60. info->bmi_cmd_buff, info->bmi_cmd_da, 0);
  61. info->bmi_cmd_buff = NULL;
  62. return QDF_STATUS_E_NOMEM;
  63. }
  64. void bmi_cleanup(struct ol_context *ol_ctx)
  65. {
  66. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  67. qdf_device_t qdf_dev;
  68. if (!info || !ol_ctx) {
  69. BMI_WARN("%s: no bmi to cleanup", __func__);
  70. return;
  71. }
  72. qdf_dev = ol_ctx->qdf_dev;
  73. if (!qdf_dev || !qdf_dev->dev) {
  74. BMI_ERR("%s: Invalid Device Pointer", __func__);
  75. return;
  76. }
  77. if (info->bmi_cmd_buff) {
  78. qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
  79. MAX_BMI_CMDBUF_SZ,
  80. info->bmi_cmd_buff, info->bmi_cmd_da, 0);
  81. info->bmi_cmd_buff = NULL;
  82. info->bmi_cmd_da = 0;
  83. }
  84. if (info->bmi_rsp_buff) {
  85. qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
  86. MAX_BMI_CMDBUF_SZ,
  87. info->bmi_rsp_buff, info->bmi_rsp_da, 0);
  88. info->bmi_rsp_buff = NULL;
  89. info->bmi_rsp_da = 0;
  90. }
  91. }
  92. /**
  93. * bmi_done() - finish the bmi operation
  94. * @ol_ctx: the bmi context
  95. *
  96. * does some sanity checking.
  97. * exchanges one last message with firmware.
  98. * frees some buffers.
  99. *
  100. * Return: QDF_STATUS_SUCCESS if bmi isn't needed.
  101. * QDF_STATUS_SUCCESS if bmi finishes.
  102. * otherwise returns failure.
  103. */
  104. QDF_STATUS bmi_done(struct ol_context *ol_ctx)
  105. {
  106. QDF_STATUS status = QDF_STATUS_SUCCESS;
  107. if (NO_BMI)
  108. return QDF_STATUS_SUCCESS;
  109. if (!ol_ctx) {
  110. BMI_ERR("%s: null context", __func__);
  111. return QDF_STATUS_E_NOMEM;
  112. }
  113. hif_claim_device(ol_ctx->scn);
  114. if (!hif_needs_bmi(ol_ctx->scn))
  115. return QDF_STATUS_SUCCESS;
  116. status = bmi_done_local(ol_ctx);
  117. if (status != QDF_STATUS_SUCCESS)
  118. BMI_ERR("BMI_DONE Failed status:%d", status);
  119. return status;
  120. }
  121. void bmi_target_ready(struct hif_opaque_softc *scn, void *cfg_ctx)
  122. {
  123. ol_target_ready(scn, cfg_ctx);
  124. }
  125. static QDF_STATUS
  126. bmi_get_target_info_message_based(struct bmi_target_info *targ_info,
  127. struct ol_context *ol_ctx)
  128. {
  129. int status = 0;
  130. struct hif_opaque_softc *scn = ol_ctx->scn;
  131. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  132. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  133. uint8_t *bmi_rsp_buff = info->bmi_rsp_buff;
  134. uint32_t cid, length;
  135. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  136. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  137. if (!bmi_cmd_buff || !bmi_rsp_buff) {
  138. BMI_ERR("%s:BMI CMD/RSP Buffer is NULL", __func__);
  139. return QDF_STATUS_NOT_INITIALIZED;
  140. }
  141. cid = BMI_GET_TARGET_INFO;
  142. qdf_mem_copy(bmi_cmd_buff, &cid, sizeof(cid));
  143. length = sizeof(struct bmi_target_info);
  144. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, sizeof(cid),
  145. (uint8_t *)bmi_rsp_buff, &length,
  146. BMI_EXCHANGE_TIMEOUT_MS);
  147. if (status) {
  148. BMI_ERR("Failed to target info: status:%d", status);
  149. return QDF_STATUS_E_FAILURE;
  150. }
  151. qdf_mem_copy(targ_info, bmi_rsp_buff, length);
  152. return QDF_STATUS_SUCCESS;
  153. }
  154. QDF_STATUS
  155. bmi_get_target_info(struct bmi_target_info *targ_info,
  156. struct ol_context *ol_ctx)
  157. {
  158. struct hif_opaque_softc *scn = ol_ctx->scn;
  159. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  160. QDF_STATUS status;
  161. if (info->bmi_done) {
  162. BMI_ERR("BMI Phase is Already Done");
  163. return QDF_STATUS_E_PERM;
  164. }
  165. switch (hif_get_bus_type(scn)) {
  166. case QDF_BUS_TYPE_PCI:
  167. case QDF_BUS_TYPE_SNOC:
  168. case QDF_BUS_TYPE_USB:
  169. status = bmi_get_target_info_message_based(targ_info, ol_ctx);
  170. break;
  171. #ifdef HIF_SDIO
  172. case QDF_BUS_TYPE_SDIO:
  173. status = hif_reg_based_get_target_info(scn, targ_info);
  174. break;
  175. #endif
  176. default:
  177. status = QDF_STATUS_E_FAILURE;
  178. break;
  179. }
  180. return status;
  181. }
  182. QDF_STATUS bmi_download_firmware(struct ol_context *ol_ctx)
  183. {
  184. struct hif_opaque_softc *scn;
  185. if (!ol_ctx) {
  186. if (NO_BMI) {
  187. /* ol_ctx is not allocated in NO_BMI case */
  188. return QDF_STATUS_SUCCESS;
  189. }
  190. BMI_ERR("ol_ctx is NULL");
  191. bmi_assert(0);
  192. return QDF_STATUS_NOT_INITIALIZED;
  193. }
  194. scn = ol_ctx->scn;
  195. if (!scn) {
  196. BMI_ERR("Invalid scn context");
  197. bmi_assert(0);
  198. return QDF_STATUS_NOT_INITIALIZED;
  199. }
  200. if (!hif_needs_bmi(scn))
  201. return QDF_STATUS_SUCCESS;
  202. else
  203. hif_register_bmi_callbacks(scn);
  204. return bmi_firmware_download(ol_ctx);
  205. }
  206. QDF_STATUS bmi_read_soc_register(uint32_t address, uint32_t *param,
  207. struct ol_context *ol_ctx)
  208. {
  209. struct hif_opaque_softc *scn = ol_ctx->scn;
  210. uint32_t cid;
  211. int status;
  212. uint32_t offset, param_len;
  213. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  214. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  215. uint8_t *bmi_rsp_buff = info->bmi_rsp_buff;
  216. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  217. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  218. bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
  219. qdf_mem_zero(bmi_cmd_buff, sizeof(cid) + sizeof(address));
  220. qdf_mem_zero(bmi_rsp_buff, sizeof(cid) + sizeof(address));
  221. if (info->bmi_done) {
  222. BMI_DBG("Command disallowed");
  223. return QDF_STATUS_E_PERM;
  224. }
  225. BMI_DBG("BMI Read SOC Register:device: 0x%pK, address: 0x%x",
  226. scn, address);
  227. cid = BMI_READ_SOC_REGISTER;
  228. offset = 0;
  229. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  230. offset += sizeof(cid);
  231. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
  232. offset += sizeof(address);
  233. param_len = sizeof(*param);
  234. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, offset,
  235. bmi_rsp_buff, &param_len, BMI_EXCHANGE_TIMEOUT_MS);
  236. if (status) {
  237. BMI_DBG("Unable to read from the device; status:%d", status);
  238. return QDF_STATUS_E_FAILURE;
  239. }
  240. qdf_mem_copy(param, bmi_rsp_buff, sizeof(*param));
  241. BMI_DBG("BMI Read SOC Register: Exit value: %d", *param);
  242. return QDF_STATUS_SUCCESS;
  243. }
  244. QDF_STATUS bmi_write_soc_register(uint32_t address, uint32_t param,
  245. struct ol_context *ol_ctx)
  246. {
  247. struct hif_opaque_softc *scn = ol_ctx->scn;
  248. uint32_t cid;
  249. int status;
  250. uint32_t offset;
  251. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  252. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  253. uint32_t size = sizeof(cid) + sizeof(address) + sizeof(param);
  254. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  255. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  256. bmi_assert(BMI_COMMAND_FITS(size));
  257. qdf_mem_zero(bmi_cmd_buff, size);
  258. if (info->bmi_done) {
  259. BMI_DBG("Command disallowed");
  260. return QDF_STATUS_E_FAILURE;
  261. }
  262. BMI_DBG("SOC Register Write:device:0x%pK, addr:0x%x, param:%d",
  263. scn, address, param);
  264. cid = BMI_WRITE_SOC_REGISTER;
  265. offset = 0;
  266. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  267. offset += sizeof(cid);
  268. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
  269. offset += sizeof(address);
  270. qdf_mem_copy(&(bmi_cmd_buff[offset]), &param, sizeof(param));
  271. offset += sizeof(param);
  272. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, offset,
  273. NULL, NULL, 0);
  274. if (status) {
  275. BMI_ERR("Unable to write to the device: status:%d", status);
  276. return QDF_STATUS_E_FAILURE;
  277. }
  278. BMI_DBG("BMI Read SOC Register: Exit");
  279. return QDF_STATUS_SUCCESS;
  280. }
  281. static QDF_STATUS
  282. bmilz_data(uint8_t *buffer, uint32_t length, struct ol_context *ol_ctx)
  283. {
  284. uint32_t cid;
  285. int status;
  286. uint32_t offset;
  287. uint32_t remaining, txlen;
  288. const uint32_t header = sizeof(cid) + sizeof(length);
  289. struct hif_opaque_softc *scn = ol_ctx->scn;
  290. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  291. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  292. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  293. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  294. bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
  295. qdf_mem_zero(bmi_cmd_buff, BMI_DATASZ_MAX + header);
  296. if (info->bmi_done) {
  297. BMI_ERR("Command disallowed");
  298. return QDF_STATUS_E_PERM;
  299. }
  300. BMI_DBG("BMI Send LZ Data: device: 0x%pK, length: %d",
  301. scn, length);
  302. cid = BMI_LZ_DATA;
  303. remaining = length;
  304. while (remaining) {
  305. txlen = (remaining < (BMI_DATASZ_MAX - header)) ?
  306. remaining : (BMI_DATASZ_MAX - header);
  307. offset = 0;
  308. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  309. offset += sizeof(cid);
  310. qdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
  311. offset += sizeof(txlen);
  312. qdf_mem_copy(&(bmi_cmd_buff[offset]),
  313. &buffer[length - remaining], txlen);
  314. offset += txlen;
  315. status = hif_exchange_bmi_msg(scn, cmd, rsp,
  316. bmi_cmd_buff, offset,
  317. NULL, NULL, 0);
  318. if (status) {
  319. BMI_ERR("Failed to write to the device: status:%d",
  320. status);
  321. return QDF_STATUS_E_FAILURE;
  322. }
  323. remaining -= txlen;
  324. }
  325. BMI_DBG("BMI LZ Data: Exit");
  326. return QDF_STATUS_SUCCESS;
  327. }
  328. QDF_STATUS bmi_sign_stream_start(uint32_t address, uint8_t *buffer,
  329. uint32_t length, struct ol_context *ol_ctx)
  330. {
  331. uint32_t cid;
  332. int status;
  333. uint32_t offset;
  334. const uint32_t header = sizeof(cid) + sizeof(address) + sizeof(length);
  335. uint8_t aligned_buf[BMI_DATASZ_MAX + 4];
  336. uint8_t *src;
  337. struct hif_opaque_softc *scn = ol_ctx->scn;
  338. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  339. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  340. uint32_t remaining, txlen;
  341. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  342. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  343. bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
  344. qdf_mem_zero(bmi_cmd_buff, BMI_DATASZ_MAX + header);
  345. if (info->bmi_done) {
  346. BMI_ERR("Command disallowed");
  347. return QDF_STATUS_E_PERM;
  348. }
  349. BMI_ERR("Sign Stream start:device:0x%pK, addr:0x%x, length:%d",
  350. scn, address, length);
  351. cid = BMI_SIGN_STREAM_START;
  352. remaining = length;
  353. while (remaining) {
  354. src = &buffer[length - remaining];
  355. if (remaining < (BMI_DATASZ_MAX - header)) {
  356. if (remaining & 0x3) {
  357. memcpy(aligned_buf, src, remaining);
  358. remaining = remaining + (4 - (remaining & 0x3));
  359. src = aligned_buf;
  360. }
  361. txlen = remaining;
  362. } else {
  363. txlen = (BMI_DATASZ_MAX - header);
  364. }
  365. offset = 0;
  366. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  367. offset += sizeof(cid);
  368. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address,
  369. sizeof(address));
  370. offset += sizeof(offset);
  371. qdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
  372. offset += sizeof(txlen);
  373. qdf_mem_copy(&(bmi_cmd_buff[offset]), src, txlen);
  374. offset += txlen;
  375. status = hif_exchange_bmi_msg(scn, cmd, rsp,
  376. bmi_cmd_buff, offset, NULL,
  377. NULL, BMI_EXCHANGE_TIMEOUT_MS);
  378. if (status) {
  379. BMI_ERR("Unable to write to the device: status:%d",
  380. status);
  381. return QDF_STATUS_E_FAILURE;
  382. }
  383. remaining -= txlen;
  384. }
  385. BMI_DBG("BMI SIGN Stream Start: Exit");
  386. return QDF_STATUS_SUCCESS;
  387. }
  388. static QDF_STATUS
  389. bmilz_stream_start(uint32_t address, struct ol_context *ol_ctx)
  390. {
  391. uint32_t cid;
  392. int status;
  393. uint32_t offset;
  394. struct hif_opaque_softc *scn = ol_ctx->scn;
  395. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  396. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  397. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  398. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  399. bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
  400. qdf_mem_zero(bmi_cmd_buff, sizeof(cid) + sizeof(address));
  401. if (info->bmi_done) {
  402. BMI_DBG("Command disallowed");
  403. return QDF_STATUS_E_PERM;
  404. }
  405. BMI_DBG("BMI LZ Stream Start: (device: 0x%pK, address: 0x%x)",
  406. scn, address);
  407. cid = BMI_LZ_STREAM_START;
  408. offset = 0;
  409. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  410. offset += sizeof(cid);
  411. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
  412. offset += sizeof(address);
  413. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, offset,
  414. NULL, NULL, 0);
  415. if (status) {
  416. BMI_ERR("Unable to Start LZ Stream to the device status:%d",
  417. status);
  418. return QDF_STATUS_E_FAILURE;
  419. }
  420. BMI_DBG("BMI LZ Stream: Exit");
  421. return QDF_STATUS_SUCCESS;
  422. }
  423. QDF_STATUS
  424. bmi_fast_download(uint32_t address, uint8_t *buffer,
  425. uint32_t length, struct ol_context *ol_ctx)
  426. {
  427. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  428. uint32_t last_word = 0;
  429. uint32_t last_word_offset = length & ~0x3;
  430. uint32_t unaligned_bytes = length & 0x3;
  431. status = bmilz_stream_start(address, ol_ctx);
  432. if (status != QDF_STATUS_SUCCESS)
  433. goto end;
  434. /* copy the last word into a zero padded buffer */
  435. if (unaligned_bytes)
  436. qdf_mem_copy(&last_word, &buffer[last_word_offset],
  437. unaligned_bytes);
  438. status = bmilz_data(buffer, last_word_offset, ol_ctx);
  439. if (status != QDF_STATUS_SUCCESS)
  440. goto end;
  441. if (unaligned_bytes)
  442. status = bmilz_data((uint8_t *) &last_word, 4, ol_ctx);
  443. if (status != QDF_STATUS_SUCCESS)
  444. /*
  445. * Close compressed stream and open a new (fake) one.
  446. * This serves mainly to flush Target caches.
  447. */
  448. status = bmilz_stream_start(0x00, ol_ctx);
  449. end:
  450. return status;
  451. }
  452. /**
  453. * ol_cds_init() - API to initialize global CDS OL Context
  454. * @qdf_dev: QDF Device
  455. * @hif_ctx: HIF Context
  456. *
  457. * Return: Success/Failure
  458. */
  459. QDF_STATUS ol_cds_init(qdf_device_t qdf_dev, void *hif_ctx)
  460. {
  461. struct ol_context *ol_info;
  462. QDF_STATUS status = QDF_STATUS_SUCCESS;
  463. if (NO_BMI)
  464. return QDF_STATUS_SUCCESS; /* no BMI for Q6 bring up */
  465. status = cds_alloc_context(QDF_MODULE_ID_BMI,
  466. (void **)&ol_info, sizeof(*ol_info));
  467. if (status != QDF_STATUS_SUCCESS) {
  468. BMI_ERR("%s: CDS Allocation failed for ol_bmi context",
  469. __func__);
  470. return status;
  471. }
  472. ol_info->qdf_dev = qdf_dev;
  473. ol_info->scn = hif_ctx;
  474. ol_info->tgt_def.targetdef = hif_get_targetdef(hif_ctx);
  475. qdf_create_work(qdf_dev, &ol_info->ramdump_work,
  476. ramdump_work_handler, ol_info);
  477. qdf_create_work(qdf_dev, &ol_info->fw_indication_work,
  478. fw_indication_work_handler, ol_info);
  479. qdf_wake_lock_create(&ol_info->fw_dl_wakelock,
  480. "fw_download_wakelock");
  481. return status;
  482. }
  483. /**
  484. * ol_cds_free() - API to free the global CDS OL Context
  485. *
  486. * Return: void
  487. */
  488. void ol_cds_free(void)
  489. {
  490. struct ol_context *ol_info = cds_get_context(QDF_MODULE_ID_BMI);
  491. if (NO_BMI)
  492. return;
  493. qdf_wake_lock_destroy(&ol_info->fw_dl_wakelock);
  494. cds_free_context(QDF_MODULE_ID_BMI, ol_info);
  495. }