bmi.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. /*
  2. * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #include "i_bmi.h"
  27. #include "cds_api.h"
  28. /* APIs visible to the driver */
  29. QDF_STATUS bmi_init(struct ol_context *ol_ctx)
  30. {
  31. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  32. struct hif_opaque_softc *scn = ol_ctx->scn;
  33. qdf_device_t qdf_dev = ol_ctx->qdf_dev;
  34. if (!scn) {
  35. BMI_ERR("Invalid scn Context");
  36. bmi_assert(0);
  37. return QDF_STATUS_NOT_INITIALIZED;
  38. }
  39. if (!qdf_dev->dev) {
  40. BMI_ERR("%s: Invalid Device Pointer", __func__);
  41. return QDF_STATUS_NOT_INITIALIZED;
  42. }
  43. info->bmi_done = false;
  44. if (!info->bmi_cmd_buff) {
  45. info->bmi_cmd_buff =
  46. qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev,
  47. MAX_BMI_CMDBUF_SZ,
  48. &info->bmi_cmd_da);
  49. if (!info->bmi_cmd_buff) {
  50. BMI_ERR("No Memory for BMI Command");
  51. return QDF_STATUS_E_NOMEM;
  52. }
  53. }
  54. if (!info->bmi_rsp_buff) {
  55. info->bmi_rsp_buff =
  56. qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev,
  57. MAX_BMI_CMDBUF_SZ,
  58. &info->bmi_rsp_da);
  59. if (!info->bmi_rsp_buff) {
  60. BMI_ERR("No Memory for BMI Response");
  61. goto end;
  62. }
  63. }
  64. return QDF_STATUS_SUCCESS;
  65. end:
  66. qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, MAX_BMI_CMDBUF_SZ,
  67. info->bmi_cmd_buff, info->bmi_cmd_da, 0);
  68. info->bmi_cmd_buff = NULL;
  69. return QDF_STATUS_E_NOMEM;
  70. }
  71. void bmi_cleanup(struct ol_context *ol_ctx)
  72. {
  73. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  74. qdf_device_t qdf_dev;
  75. if (!info || !ol_ctx) {
  76. BMI_WARN("%s: no bmi to cleanup", __func__);
  77. return;
  78. }
  79. qdf_dev = ol_ctx->qdf_dev;
  80. if (!qdf_dev || !qdf_dev->dev) {
  81. BMI_ERR("%s: Invalid Device Pointer", __func__);
  82. return;
  83. }
  84. if (info->bmi_cmd_buff) {
  85. qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
  86. MAX_BMI_CMDBUF_SZ,
  87. info->bmi_cmd_buff, info->bmi_cmd_da, 0);
  88. info->bmi_cmd_buff = NULL;
  89. info->bmi_cmd_da = 0;
  90. }
  91. if (info->bmi_rsp_buff) {
  92. qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
  93. MAX_BMI_CMDBUF_SZ,
  94. info->bmi_rsp_buff, info->bmi_rsp_da, 0);
  95. info->bmi_rsp_buff = NULL;
  96. info->bmi_rsp_da = 0;
  97. }
  98. }
  99. /**
  100. * bmi_done() - finish the bmi opperation
  101. * @ol_ctx: the bmi context
  102. *
  103. * does some sanity checking.
  104. * exchanges one last message with firmware.
  105. * frees some buffers.
  106. *
  107. * Return: QDF_STATUS_SUCCESS if bmi isn't needed.
  108. * QDF_STATUS_SUCCESS if bmi finishes.
  109. * otherwise returns failure.
  110. */
  111. QDF_STATUS bmi_done(struct ol_context *ol_ctx)
  112. {
  113. QDF_STATUS status = QDF_STATUS_SUCCESS;
  114. if (NO_BMI)
  115. return QDF_STATUS_SUCCESS;
  116. if (!ol_ctx) {
  117. BMI_ERR("%s: null context", __func__);
  118. return QDF_STATUS_E_NOMEM;
  119. }
  120. hif_claim_device(ol_ctx->scn);
  121. if (!hif_needs_bmi(ol_ctx->scn))
  122. return QDF_STATUS_SUCCESS;
  123. status = bmi_done_local(ol_ctx);
  124. if (status != QDF_STATUS_SUCCESS)
  125. BMI_ERR("BMI_DONE Failed status:%d", status);
  126. return status;
  127. }
  128. void bmi_target_ready(struct hif_opaque_softc *scn, void *cfg_ctx)
  129. {
  130. ol_target_ready(scn, cfg_ctx);
  131. }
  132. static QDF_STATUS
  133. bmi_get_target_info_message_based(struct bmi_target_info *targ_info,
  134. struct ol_context *ol_ctx)
  135. {
  136. int status = 0;
  137. struct hif_opaque_softc *scn = ol_ctx->scn;
  138. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  139. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  140. uint8_t *bmi_rsp_buff = info->bmi_rsp_buff;
  141. uint32_t cid, length;
  142. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  143. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  144. if (!bmi_cmd_buff || !bmi_rsp_buff) {
  145. BMI_ERR("%s:BMI CMD/RSP Buffer is NULL", __func__);
  146. return QDF_STATUS_NOT_INITIALIZED;
  147. }
  148. cid = BMI_GET_TARGET_INFO;
  149. qdf_mem_copy(bmi_cmd_buff, &cid, sizeof(cid));
  150. length = sizeof(struct bmi_target_info);
  151. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, sizeof(cid),
  152. (uint8_t *)bmi_rsp_buff, &length,
  153. BMI_EXCHANGE_TIMEOUT_MS);
  154. if (status) {
  155. BMI_ERR("Failed to target info: status:%d", status);
  156. return QDF_STATUS_E_FAILURE;
  157. }
  158. qdf_mem_copy(targ_info, bmi_rsp_buff, length);
  159. return QDF_STATUS_SUCCESS;
  160. }
  161. QDF_STATUS
  162. bmi_get_target_info(struct bmi_target_info *targ_info,
  163. struct ol_context *ol_ctx)
  164. {
  165. struct hif_opaque_softc *scn = ol_ctx->scn;
  166. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  167. QDF_STATUS status;
  168. if (info->bmi_done) {
  169. BMI_ERR("BMI Phase is Already Done");
  170. return QDF_STATUS_E_PERM;
  171. }
  172. switch (hif_get_bus_type(scn)) {
  173. case QDF_BUS_TYPE_PCI:
  174. case QDF_BUS_TYPE_SNOC:
  175. case QDF_BUS_TYPE_USB:
  176. status = bmi_get_target_info_message_based(targ_info, ol_ctx);
  177. break;
  178. #ifdef HIF_SDIO
  179. case QDF_BUS_TYPE_SDIO:
  180. status = hif_reg_based_get_target_info(scn, targ_info);
  181. break;
  182. #endif
  183. default:
  184. status = QDF_STATUS_E_FAILURE;
  185. break;
  186. }
  187. return status;
  188. }
  189. QDF_STATUS bmi_download_firmware(struct ol_context *ol_ctx)
  190. {
  191. struct hif_opaque_softc *scn;
  192. if (!ol_ctx) {
  193. if (NO_BMI) {
  194. /* ol_ctx is not allocated in NO_BMI case */
  195. return QDF_STATUS_SUCCESS;
  196. }
  197. BMI_ERR("ol_ctx is NULL");
  198. bmi_assert(0);
  199. return QDF_STATUS_NOT_INITIALIZED;
  200. }
  201. scn = ol_ctx->scn;
  202. if (!scn) {
  203. BMI_ERR("Invalid scn context");
  204. bmi_assert(0);
  205. return QDF_STATUS_NOT_INITIALIZED;
  206. }
  207. if (!hif_needs_bmi(scn))
  208. return QDF_STATUS_SUCCESS;
  209. return bmi_firmware_download(ol_ctx);
  210. }
  211. QDF_STATUS bmi_read_soc_register(uint32_t address, uint32_t *param,
  212. struct ol_context *ol_ctx)
  213. {
  214. struct hif_opaque_softc *scn = ol_ctx->scn;
  215. uint32_t cid;
  216. int status;
  217. uint32_t offset, param_len;
  218. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  219. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  220. uint8_t *bmi_rsp_buff = info->bmi_rsp_buff;
  221. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  222. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  223. bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
  224. qdf_mem_set(bmi_cmd_buff, 0, sizeof(cid) + sizeof(address));
  225. qdf_mem_set(bmi_rsp_buff, 0, sizeof(cid) + sizeof(address));
  226. if (info->bmi_done) {
  227. BMI_DBG("Command disallowed");
  228. return QDF_STATUS_E_PERM;
  229. }
  230. BMI_DBG("BMI Read SOC Register:device: 0x%pK, address: 0x%x",
  231. scn, address);
  232. cid = BMI_READ_SOC_REGISTER;
  233. offset = 0;
  234. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  235. offset += sizeof(cid);
  236. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
  237. offset += sizeof(address);
  238. param_len = sizeof(*param);
  239. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, offset,
  240. bmi_rsp_buff, &param_len, BMI_EXCHANGE_TIMEOUT_MS);
  241. if (status) {
  242. BMI_DBG("Unable to read from the device; status:%d", status);
  243. return QDF_STATUS_E_FAILURE;
  244. }
  245. qdf_mem_copy(param, bmi_rsp_buff, sizeof(*param));
  246. BMI_DBG("BMI Read SOC Register: Exit value: %d", *param);
  247. return QDF_STATUS_SUCCESS;
  248. }
  249. QDF_STATUS bmi_write_soc_register(uint32_t address, uint32_t param,
  250. struct ol_context *ol_ctx)
  251. {
  252. struct hif_opaque_softc *scn = ol_ctx->scn;
  253. uint32_t cid;
  254. int status;
  255. uint32_t offset;
  256. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  257. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  258. uint32_t size = sizeof(cid) + sizeof(address) + sizeof(param);
  259. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  260. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  261. bmi_assert(BMI_COMMAND_FITS(size));
  262. qdf_mem_set(bmi_cmd_buff, 0, size);
  263. if (info->bmi_done) {
  264. BMI_DBG("Command disallowed");
  265. return QDF_STATUS_E_FAILURE;
  266. }
  267. BMI_DBG("SOC Register Write:device:0x%pK, addr:0x%x, param:%d",
  268. scn, address, param);
  269. cid = BMI_WRITE_SOC_REGISTER;
  270. offset = 0;
  271. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  272. offset += sizeof(cid);
  273. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
  274. offset += sizeof(address);
  275. qdf_mem_copy(&(bmi_cmd_buff[offset]), &param, sizeof(param));
  276. offset += sizeof(param);
  277. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, offset,
  278. NULL, NULL, 0);
  279. if (status) {
  280. BMI_ERR("Unable to write to the device: status:%d", status);
  281. return QDF_STATUS_E_FAILURE;
  282. }
  283. BMI_DBG("BMI Read SOC Register: Exit");
  284. return QDF_STATUS_SUCCESS;
  285. }
  286. static QDF_STATUS
  287. bmilz_data(uint8_t *buffer, uint32_t length, struct ol_context *ol_ctx)
  288. {
  289. uint32_t cid;
  290. int status;
  291. uint32_t offset;
  292. uint32_t remaining, txlen;
  293. const uint32_t header = sizeof(cid) + sizeof(length);
  294. struct hif_opaque_softc *scn = ol_ctx->scn;
  295. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  296. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  297. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  298. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  299. bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
  300. qdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + header);
  301. if (info->bmi_done) {
  302. BMI_ERR("Command disallowed");
  303. return QDF_STATUS_E_PERM;
  304. }
  305. BMI_DBG("BMI Send LZ Data: device: 0x%pK, length: %d",
  306. scn, length);
  307. cid = BMI_LZ_DATA;
  308. remaining = length;
  309. while (remaining) {
  310. txlen = (remaining < (BMI_DATASZ_MAX - header)) ?
  311. remaining : (BMI_DATASZ_MAX - header);
  312. offset = 0;
  313. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  314. offset += sizeof(cid);
  315. qdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
  316. offset += sizeof(txlen);
  317. qdf_mem_copy(&(bmi_cmd_buff[offset]),
  318. &buffer[length - remaining], txlen);
  319. offset += txlen;
  320. status = hif_exchange_bmi_msg(scn, cmd, rsp,
  321. bmi_cmd_buff, offset,
  322. NULL, NULL, 0);
  323. if (status) {
  324. BMI_ERR("Failed to write to the device: status:%d",
  325. status);
  326. return QDF_STATUS_E_FAILURE;
  327. }
  328. remaining -= txlen;
  329. }
  330. BMI_DBG("BMI LZ Data: Exit");
  331. return QDF_STATUS_SUCCESS;
  332. }
  333. QDF_STATUS bmi_sign_stream_start(uint32_t address, uint8_t *buffer,
  334. uint32_t length, struct ol_context *ol_ctx)
  335. {
  336. uint32_t cid;
  337. int status;
  338. uint32_t offset;
  339. const uint32_t header = sizeof(cid) + sizeof(address) + sizeof(length);
  340. uint8_t aligned_buf[BMI_DATASZ_MAX + 4];
  341. uint8_t *src;
  342. struct hif_opaque_softc *scn = ol_ctx->scn;
  343. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  344. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  345. uint32_t remaining, txlen;
  346. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  347. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  348. bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
  349. qdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + header);
  350. if (info->bmi_done) {
  351. BMI_ERR("Command disallowed");
  352. return QDF_STATUS_E_PERM;
  353. }
  354. BMI_ERR("Sign Stream start:device:0x%pK, addr:0x%x, length:%d",
  355. scn, address, length);
  356. cid = BMI_SIGN_STREAM_START;
  357. remaining = length;
  358. while (remaining) {
  359. src = &buffer[length - remaining];
  360. if (remaining < (BMI_DATASZ_MAX - header)) {
  361. if (remaining & 0x3) {
  362. remaining = remaining + (4 - (remaining & 0x3));
  363. memcpy(aligned_buf, src, remaining);
  364. src = aligned_buf;
  365. }
  366. txlen = remaining;
  367. } else {
  368. txlen = (BMI_DATASZ_MAX - header);
  369. }
  370. offset = 0;
  371. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  372. offset += sizeof(cid);
  373. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address,
  374. sizeof(address));
  375. offset += sizeof(offset);
  376. qdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
  377. offset += sizeof(txlen);
  378. qdf_mem_copy(&(bmi_cmd_buff[offset]), src, txlen);
  379. offset += txlen;
  380. status = hif_exchange_bmi_msg(scn, cmd, rsp,
  381. bmi_cmd_buff, offset, NULL,
  382. NULL, BMI_EXCHANGE_TIMEOUT_MS);
  383. if (status) {
  384. BMI_ERR("Unable to write to the device: status:%d",
  385. status);
  386. return QDF_STATUS_E_FAILURE;
  387. }
  388. remaining -= txlen;
  389. }
  390. BMI_DBG("BMI SIGN Stream Start: Exit");
  391. return QDF_STATUS_SUCCESS;
  392. }
  393. static QDF_STATUS
  394. bmilz_stream_start(uint32_t address, struct ol_context *ol_ctx)
  395. {
  396. uint32_t cid;
  397. int status;
  398. uint32_t offset;
  399. struct hif_opaque_softc *scn = ol_ctx->scn;
  400. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  401. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  402. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  403. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  404. bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
  405. qdf_mem_set(bmi_cmd_buff, 0, sizeof(cid) + sizeof(address));
  406. if (info->bmi_done) {
  407. BMI_DBG("Command disallowed");
  408. return QDF_STATUS_E_PERM;
  409. }
  410. BMI_DBG("BMI LZ Stream Start: (device: 0x%pK, address: 0x%x)",
  411. scn, address);
  412. cid = BMI_LZ_STREAM_START;
  413. offset = 0;
  414. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  415. offset += sizeof(cid);
  416. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
  417. offset += sizeof(address);
  418. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, offset,
  419. NULL, NULL, 0);
  420. if (status) {
  421. BMI_ERR("Unable to Start LZ Stream to the device status:%d",
  422. status);
  423. return QDF_STATUS_E_FAILURE;
  424. }
  425. BMI_DBG("BMI LZ Stream: Exit");
  426. return QDF_STATUS_SUCCESS;
  427. }
  428. QDF_STATUS
  429. bmi_fast_download(uint32_t address, uint8_t *buffer,
  430. uint32_t length, struct ol_context *ol_ctx)
  431. {
  432. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  433. uint32_t last_word = 0;
  434. uint32_t last_word_offset = length & ~0x3;
  435. uint32_t unaligned_bytes = length & 0x3;
  436. status = bmilz_stream_start(address, ol_ctx);
  437. if (status != QDF_STATUS_SUCCESS)
  438. goto end;
  439. /* copy the last word into a zero padded buffer */
  440. if (unaligned_bytes)
  441. qdf_mem_copy(&last_word, &buffer[last_word_offset],
  442. unaligned_bytes);
  443. status = bmilz_data(buffer, last_word_offset, ol_ctx);
  444. if (status != QDF_STATUS_SUCCESS)
  445. goto end;
  446. if (unaligned_bytes)
  447. status = bmilz_data((uint8_t *) &last_word, 4, ol_ctx);
  448. if (status != QDF_STATUS_SUCCESS)
  449. /*
  450. * Close compressed stream and open a new (fake) one.
  451. * This serves mainly to flush Target caches.
  452. */
  453. status = bmilz_stream_start(0x00, ol_ctx);
  454. end:
  455. return status;
  456. }
  457. /**
  458. * ol_cds_init() - API to initialize global CDS OL Context
  459. * @qdf_dev: QDF Device
  460. * @hif_ctx: HIF Context
  461. *
  462. * Return: Success/Failure
  463. */
  464. QDF_STATUS ol_cds_init(qdf_device_t qdf_dev, void *hif_ctx)
  465. {
  466. struct ol_context *ol_info;
  467. QDF_STATUS status = QDF_STATUS_SUCCESS;
  468. if (NO_BMI)
  469. return QDF_STATUS_SUCCESS; /* no BMI for Q6 bring up */
  470. status = cds_alloc_context(QDF_MODULE_ID_BMI,
  471. (void **)&ol_info, sizeof(*ol_info));
  472. if (status != QDF_STATUS_SUCCESS) {
  473. BMI_ERR("%s: CDS Allocation failed for ol_bmi context",
  474. __func__);
  475. return status;
  476. }
  477. ol_info->qdf_dev = qdf_dev;
  478. ol_info->scn = hif_ctx;
  479. ol_info->tgt_def.targetdef = hif_get_targetdef(hif_ctx);
  480. qdf_create_work(qdf_dev, &ol_info->ramdump_work,
  481. ramdump_work_handler, ol_info);
  482. qdf_create_work(qdf_dev, &ol_info->fw_indication_work,
  483. fw_indication_work_handler, ol_info);
  484. return status;
  485. }
  486. /**
  487. * ol_cds_free() - API to free the global CDS OL Context
  488. *
  489. * Return: void
  490. */
  491. void ol_cds_free(void)
  492. {
  493. struct ol_context *ol_info = cds_get_context(QDF_MODULE_ID_BMI);
  494. if (NO_BMI)
  495. return;
  496. cds_free_context(QDF_MODULE_ID_BMI, ol_info);
  497. }