bmi.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577
  1. /*
  2. * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "i_bmi.h"
  19. #include "cds_api.h"
  20. /* APIs visible to the driver */
  21. QDF_STATUS bmi_init(struct ol_context *ol_ctx)
  22. {
  23. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  24. struct hif_opaque_softc *scn = ol_ctx->scn;
  25. qdf_device_t qdf_dev = ol_ctx->qdf_dev;
  26. if (!scn) {
  27. BMI_ERR("Invalid scn Context");
  28. bmi_assert(0);
  29. return QDF_STATUS_NOT_INITIALIZED;
  30. }
  31. if (!qdf_dev->dev) {
  32. BMI_ERR("%s: Invalid Device Pointer", __func__);
  33. return QDF_STATUS_NOT_INITIALIZED;
  34. }
  35. info->bmi_done = false;
  36. if (!info->bmi_cmd_buff) {
  37. info->bmi_cmd_buff =
  38. qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev,
  39. MAX_BMI_CMDBUF_SZ,
  40. &info->bmi_cmd_da);
  41. if (!info->bmi_cmd_buff) {
  42. BMI_ERR("No Memory for BMI Command");
  43. return QDF_STATUS_E_NOMEM;
  44. }
  45. }
  46. if (!info->bmi_rsp_buff) {
  47. info->bmi_rsp_buff =
  48. qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev,
  49. MAX_BMI_CMDBUF_SZ,
  50. &info->bmi_rsp_da);
  51. if (!info->bmi_rsp_buff) {
  52. BMI_ERR("No Memory for BMI Response");
  53. goto end;
  54. }
  55. }
  56. return QDF_STATUS_SUCCESS;
  57. end:
  58. qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, MAX_BMI_CMDBUF_SZ,
  59. info->bmi_cmd_buff, info->bmi_cmd_da, 0);
  60. info->bmi_cmd_buff = NULL;
  61. return QDF_STATUS_E_NOMEM;
  62. }
  63. void bmi_cleanup(struct ol_context *ol_ctx)
  64. {
  65. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  66. qdf_device_t qdf_dev;
  67. if (!info || !ol_ctx) {
  68. BMI_WARN("%s: no bmi to cleanup", __func__);
  69. return;
  70. }
  71. qdf_dev = ol_ctx->qdf_dev;
  72. if (!qdf_dev || !qdf_dev->dev) {
  73. BMI_ERR("%s: Invalid Device Pointer", __func__);
  74. return;
  75. }
  76. if (info->bmi_cmd_buff) {
  77. qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
  78. MAX_BMI_CMDBUF_SZ,
  79. info->bmi_cmd_buff, info->bmi_cmd_da, 0);
  80. info->bmi_cmd_buff = NULL;
  81. info->bmi_cmd_da = 0;
  82. }
  83. if (info->bmi_rsp_buff) {
  84. qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
  85. MAX_BMI_CMDBUF_SZ,
  86. info->bmi_rsp_buff, info->bmi_rsp_da, 0);
  87. info->bmi_rsp_buff = NULL;
  88. info->bmi_rsp_da = 0;
  89. }
  90. }
  91. /**
  92. * bmi_done() - finish the bmi opperation
  93. * @ol_ctx: the bmi context
  94. *
  95. * does some sanity checking.
  96. * exchanges one last message with firmware.
  97. * frees some buffers.
  98. *
  99. * Return: QDF_STATUS_SUCCESS if bmi isn't needed.
  100. * QDF_STATUS_SUCCESS if bmi finishes.
  101. * otherwise returns failure.
  102. */
  103. QDF_STATUS bmi_done(struct ol_context *ol_ctx)
  104. {
  105. QDF_STATUS status = QDF_STATUS_SUCCESS;
  106. if (NO_BMI)
  107. return QDF_STATUS_SUCCESS;
  108. if (!ol_ctx) {
  109. BMI_ERR("%s: null context", __func__);
  110. return QDF_STATUS_E_NOMEM;
  111. }
  112. hif_claim_device(ol_ctx->scn);
  113. if (!hif_needs_bmi(ol_ctx->scn))
  114. return QDF_STATUS_SUCCESS;
  115. status = bmi_done_local(ol_ctx);
  116. if (status != QDF_STATUS_SUCCESS)
  117. BMI_ERR("BMI_DONE Failed status:%d", status);
  118. return status;
  119. }
  120. void bmi_target_ready(struct hif_opaque_softc *scn, void *cfg_ctx)
  121. {
  122. ol_target_ready(scn, cfg_ctx);
  123. }
  124. static QDF_STATUS
  125. bmi_get_target_info_message_based(struct bmi_target_info *targ_info,
  126. struct ol_context *ol_ctx)
  127. {
  128. int status = 0;
  129. struct hif_opaque_softc *scn = ol_ctx->scn;
  130. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  131. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  132. uint8_t *bmi_rsp_buff = info->bmi_rsp_buff;
  133. uint32_t cid, length;
  134. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  135. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  136. if (!bmi_cmd_buff || !bmi_rsp_buff) {
  137. BMI_ERR("%s:BMI CMD/RSP Buffer is NULL", __func__);
  138. return QDF_STATUS_NOT_INITIALIZED;
  139. }
  140. cid = BMI_GET_TARGET_INFO;
  141. qdf_mem_copy(bmi_cmd_buff, &cid, sizeof(cid));
  142. length = sizeof(struct bmi_target_info);
  143. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, sizeof(cid),
  144. (uint8_t *)bmi_rsp_buff, &length,
  145. BMI_EXCHANGE_TIMEOUT_MS);
  146. if (status) {
  147. BMI_ERR("Failed to target info: status:%d", status);
  148. return QDF_STATUS_E_FAILURE;
  149. }
  150. qdf_mem_copy(targ_info, bmi_rsp_buff, length);
  151. return QDF_STATUS_SUCCESS;
  152. }
  153. QDF_STATUS
  154. bmi_get_target_info(struct bmi_target_info *targ_info,
  155. struct ol_context *ol_ctx)
  156. {
  157. struct hif_opaque_softc *scn = ol_ctx->scn;
  158. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  159. QDF_STATUS status;
  160. if (info->bmi_done) {
  161. BMI_ERR("BMI Phase is Already Done");
  162. return QDF_STATUS_E_PERM;
  163. }
  164. switch (hif_get_bus_type(scn)) {
  165. case QDF_BUS_TYPE_PCI:
  166. case QDF_BUS_TYPE_SNOC:
  167. case QDF_BUS_TYPE_USB:
  168. status = bmi_get_target_info_message_based(targ_info, ol_ctx);
  169. break;
  170. #ifdef HIF_SDIO
  171. case QDF_BUS_TYPE_SDIO:
  172. status = hif_reg_based_get_target_info(scn, targ_info);
  173. break;
  174. #endif
  175. default:
  176. status = QDF_STATUS_E_FAILURE;
  177. break;
  178. }
  179. return status;
  180. }
  181. QDF_STATUS bmi_download_firmware(struct ol_context *ol_ctx)
  182. {
  183. struct hif_opaque_softc *scn;
  184. if (!ol_ctx) {
  185. if (NO_BMI) {
  186. /* ol_ctx is not allocated in NO_BMI case */
  187. return QDF_STATUS_SUCCESS;
  188. }
  189. BMI_ERR("ol_ctx is NULL");
  190. bmi_assert(0);
  191. return QDF_STATUS_NOT_INITIALIZED;
  192. }
  193. scn = ol_ctx->scn;
  194. if (!scn) {
  195. BMI_ERR("Invalid scn context");
  196. bmi_assert(0);
  197. return QDF_STATUS_NOT_INITIALIZED;
  198. }
  199. if (!hif_needs_bmi(scn))
  200. return QDF_STATUS_SUCCESS;
  201. else
  202. hif_register_bmi_callbacks(scn);
  203. return bmi_firmware_download(ol_ctx);
  204. }
  205. QDF_STATUS bmi_read_soc_register(uint32_t address, uint32_t *param,
  206. struct ol_context *ol_ctx)
  207. {
  208. struct hif_opaque_softc *scn = ol_ctx->scn;
  209. uint32_t cid;
  210. int status;
  211. uint32_t offset, param_len;
  212. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  213. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  214. uint8_t *bmi_rsp_buff = info->bmi_rsp_buff;
  215. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  216. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  217. bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
  218. qdf_mem_zero(bmi_cmd_buff, sizeof(cid) + sizeof(address));
  219. qdf_mem_zero(bmi_rsp_buff, sizeof(cid) + sizeof(address));
  220. if (info->bmi_done) {
  221. BMI_DBG("Command disallowed");
  222. return QDF_STATUS_E_PERM;
  223. }
  224. BMI_DBG("BMI Read SOC Register:device: 0x%pK, address: 0x%x",
  225. scn, address);
  226. cid = BMI_READ_SOC_REGISTER;
  227. offset = 0;
  228. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  229. offset += sizeof(cid);
  230. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
  231. offset += sizeof(address);
  232. param_len = sizeof(*param);
  233. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, offset,
  234. bmi_rsp_buff, &param_len, BMI_EXCHANGE_TIMEOUT_MS);
  235. if (status) {
  236. BMI_DBG("Unable to read from the device; status:%d", status);
  237. return QDF_STATUS_E_FAILURE;
  238. }
  239. qdf_mem_copy(param, bmi_rsp_buff, sizeof(*param));
  240. BMI_DBG("BMI Read SOC Register: Exit value: %d", *param);
  241. return QDF_STATUS_SUCCESS;
  242. }
  243. QDF_STATUS bmi_write_soc_register(uint32_t address, uint32_t param,
  244. struct ol_context *ol_ctx)
  245. {
  246. struct hif_opaque_softc *scn = ol_ctx->scn;
  247. uint32_t cid;
  248. int status;
  249. uint32_t offset;
  250. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  251. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  252. uint32_t size = sizeof(cid) + sizeof(address) + sizeof(param);
  253. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  254. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  255. bmi_assert(BMI_COMMAND_FITS(size));
  256. qdf_mem_zero(bmi_cmd_buff, size);
  257. if (info->bmi_done) {
  258. BMI_DBG("Command disallowed");
  259. return QDF_STATUS_E_FAILURE;
  260. }
  261. BMI_DBG("SOC Register Write:device:0x%pK, addr:0x%x, param:%d",
  262. scn, address, param);
  263. cid = BMI_WRITE_SOC_REGISTER;
  264. offset = 0;
  265. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  266. offset += sizeof(cid);
  267. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
  268. offset += sizeof(address);
  269. qdf_mem_copy(&(bmi_cmd_buff[offset]), &param, sizeof(param));
  270. offset += sizeof(param);
  271. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, offset,
  272. NULL, NULL, 0);
  273. if (status) {
  274. BMI_ERR("Unable to write to the device: status:%d", status);
  275. return QDF_STATUS_E_FAILURE;
  276. }
  277. BMI_DBG("BMI Read SOC Register: Exit");
  278. return QDF_STATUS_SUCCESS;
  279. }
  280. static QDF_STATUS
  281. bmilz_data(uint8_t *buffer, uint32_t length, struct ol_context *ol_ctx)
  282. {
  283. uint32_t cid;
  284. int status;
  285. uint32_t offset;
  286. uint32_t remaining, txlen;
  287. const uint32_t header = sizeof(cid) + sizeof(length);
  288. struct hif_opaque_softc *scn = ol_ctx->scn;
  289. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  290. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  291. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  292. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  293. bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
  294. qdf_mem_zero(bmi_cmd_buff, BMI_DATASZ_MAX + header);
  295. if (info->bmi_done) {
  296. BMI_ERR("Command disallowed");
  297. return QDF_STATUS_E_PERM;
  298. }
  299. BMI_DBG("BMI Send LZ Data: device: 0x%pK, length: %d",
  300. scn, length);
  301. cid = BMI_LZ_DATA;
  302. remaining = length;
  303. while (remaining) {
  304. txlen = (remaining < (BMI_DATASZ_MAX - header)) ?
  305. remaining : (BMI_DATASZ_MAX - header);
  306. offset = 0;
  307. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  308. offset += sizeof(cid);
  309. qdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
  310. offset += sizeof(txlen);
  311. qdf_mem_copy(&(bmi_cmd_buff[offset]),
  312. &buffer[length - remaining], txlen);
  313. offset += txlen;
  314. status = hif_exchange_bmi_msg(scn, cmd, rsp,
  315. bmi_cmd_buff, offset,
  316. NULL, NULL, 0);
  317. if (status) {
  318. BMI_ERR("Failed to write to the device: status:%d",
  319. status);
  320. return QDF_STATUS_E_FAILURE;
  321. }
  322. remaining -= txlen;
  323. }
  324. BMI_DBG("BMI LZ Data: Exit");
  325. return QDF_STATUS_SUCCESS;
  326. }
  327. QDF_STATUS bmi_sign_stream_start(uint32_t address, uint8_t *buffer,
  328. uint32_t length, struct ol_context *ol_ctx)
  329. {
  330. uint32_t cid;
  331. int status;
  332. uint32_t offset;
  333. const uint32_t header = sizeof(cid) + sizeof(address) + sizeof(length);
  334. uint8_t aligned_buf[BMI_DATASZ_MAX + 4];
  335. uint8_t *src;
  336. struct hif_opaque_softc *scn = ol_ctx->scn;
  337. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  338. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  339. uint32_t remaining, txlen;
  340. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  341. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  342. bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
  343. qdf_mem_zero(bmi_cmd_buff, BMI_DATASZ_MAX + header);
  344. if (info->bmi_done) {
  345. BMI_ERR("Command disallowed");
  346. return QDF_STATUS_E_PERM;
  347. }
  348. BMI_ERR("Sign Stream start:device:0x%pK, addr:0x%x, length:%d",
  349. scn, address, length);
  350. cid = BMI_SIGN_STREAM_START;
  351. remaining = length;
  352. while (remaining) {
  353. src = &buffer[length - remaining];
  354. if (remaining < (BMI_DATASZ_MAX - header)) {
  355. if (remaining & 0x3) {
  356. memcpy(aligned_buf, src, remaining);
  357. remaining = remaining + (4 - (remaining & 0x3));
  358. src = aligned_buf;
  359. }
  360. txlen = remaining;
  361. } else {
  362. txlen = (BMI_DATASZ_MAX - header);
  363. }
  364. offset = 0;
  365. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  366. offset += sizeof(cid);
  367. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address,
  368. sizeof(address));
  369. offset += sizeof(offset);
  370. qdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
  371. offset += sizeof(txlen);
  372. qdf_mem_copy(&(bmi_cmd_buff[offset]), src, txlen);
  373. offset += txlen;
  374. status = hif_exchange_bmi_msg(scn, cmd, rsp,
  375. bmi_cmd_buff, offset, NULL,
  376. NULL, BMI_EXCHANGE_TIMEOUT_MS);
  377. if (status) {
  378. BMI_ERR("Unable to write to the device: status:%d",
  379. status);
  380. return QDF_STATUS_E_FAILURE;
  381. }
  382. remaining -= txlen;
  383. }
  384. BMI_DBG("BMI SIGN Stream Start: Exit");
  385. return QDF_STATUS_SUCCESS;
  386. }
  387. static QDF_STATUS
  388. bmilz_stream_start(uint32_t address, struct ol_context *ol_ctx)
  389. {
  390. uint32_t cid;
  391. int status;
  392. uint32_t offset;
  393. struct hif_opaque_softc *scn = ol_ctx->scn;
  394. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  395. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  396. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  397. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  398. bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
  399. qdf_mem_zero(bmi_cmd_buff, sizeof(cid) + sizeof(address));
  400. if (info->bmi_done) {
  401. BMI_DBG("Command disallowed");
  402. return QDF_STATUS_E_PERM;
  403. }
  404. BMI_DBG("BMI LZ Stream Start: (device: 0x%pK, address: 0x%x)",
  405. scn, address);
  406. cid = BMI_LZ_STREAM_START;
  407. offset = 0;
  408. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  409. offset += sizeof(cid);
  410. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
  411. offset += sizeof(address);
  412. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, offset,
  413. NULL, NULL, 0);
  414. if (status) {
  415. BMI_ERR("Unable to Start LZ Stream to the device status:%d",
  416. status);
  417. return QDF_STATUS_E_FAILURE;
  418. }
  419. BMI_DBG("BMI LZ Stream: Exit");
  420. return QDF_STATUS_SUCCESS;
  421. }
  422. QDF_STATUS
  423. bmi_fast_download(uint32_t address, uint8_t *buffer,
  424. uint32_t length, struct ol_context *ol_ctx)
  425. {
  426. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  427. uint32_t last_word = 0;
  428. uint32_t last_word_offset = length & ~0x3;
  429. uint32_t unaligned_bytes = length & 0x3;
  430. status = bmilz_stream_start(address, ol_ctx);
  431. if (status != QDF_STATUS_SUCCESS)
  432. goto end;
  433. /* copy the last word into a zero padded buffer */
  434. if (unaligned_bytes)
  435. qdf_mem_copy(&last_word, &buffer[last_word_offset],
  436. unaligned_bytes);
  437. status = bmilz_data(buffer, last_word_offset, ol_ctx);
  438. if (status != QDF_STATUS_SUCCESS)
  439. goto end;
  440. if (unaligned_bytes)
  441. status = bmilz_data((uint8_t *) &last_word, 4, ol_ctx);
  442. if (status != QDF_STATUS_SUCCESS)
  443. /*
  444. * Close compressed stream and open a new (fake) one.
  445. * This serves mainly to flush Target caches.
  446. */
  447. status = bmilz_stream_start(0x00, ol_ctx);
  448. end:
  449. return status;
  450. }
  451. /**
  452. * ol_cds_init() - API to initialize global CDS OL Context
  453. * @qdf_dev: QDF Device
  454. * @hif_ctx: HIF Context
  455. *
  456. * Return: Success/Failure
  457. */
  458. QDF_STATUS ol_cds_init(qdf_device_t qdf_dev, void *hif_ctx)
  459. {
  460. struct ol_context *ol_info;
  461. QDF_STATUS status = QDF_STATUS_SUCCESS;
  462. if (NO_BMI)
  463. return QDF_STATUS_SUCCESS; /* no BMI for Q6 bring up */
  464. status = cds_alloc_context(QDF_MODULE_ID_BMI,
  465. (void **)&ol_info, sizeof(*ol_info));
  466. if (status != QDF_STATUS_SUCCESS) {
  467. BMI_ERR("%s: CDS Allocation failed for ol_bmi context",
  468. __func__);
  469. return status;
  470. }
  471. ol_info->qdf_dev = qdf_dev;
  472. ol_info->scn = hif_ctx;
  473. ol_info->tgt_def.targetdef = hif_get_targetdef(hif_ctx);
  474. qdf_create_work(qdf_dev, &ol_info->ramdump_work,
  475. ramdump_work_handler, ol_info);
  476. qdf_create_work(qdf_dev, &ol_info->fw_indication_work,
  477. fw_indication_work_handler, ol_info);
  478. return status;
  479. }
  480. /**
  481. * ol_cds_free() - API to free the global CDS OL Context
  482. *
  483. * Return: void
  484. */
  485. void ol_cds_free(void)
  486. {
  487. struct ol_context *ol_info = cds_get_context(QDF_MODULE_ID_BMI);
  488. if (NO_BMI)
  489. return;
  490. cds_free_context(QDF_MODULE_ID_BMI, ol_info);
  491. }