bmi.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. /*
  2. * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #include "i_bmi.h"
  27. #include "cds_api.h"
  28. /* APIs visible to the driver */
  29. QDF_STATUS bmi_init(struct ol_context *ol_ctx)
  30. {
  31. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  32. struct hif_opaque_softc *scn = ol_ctx->scn;
  33. qdf_device_t qdf_dev = ol_ctx->qdf_dev;
  34. if (!scn) {
  35. BMI_ERR("Invalid scn Context");
  36. bmi_assert(0);
  37. return QDF_STATUS_NOT_INITIALIZED;
  38. }
  39. if (!qdf_dev->dev) {
  40. BMI_ERR("%s: Invalid Device Pointer", __func__);
  41. return QDF_STATUS_NOT_INITIALIZED;
  42. }
  43. info->bmi_done = false;
  44. if (!info->bmi_cmd_buff) {
  45. info->bmi_cmd_buff =
  46. qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, MAX_BMI_CMDBUF_SZ,
  47. &info->bmi_cmd_da);
  48. if (!info->bmi_cmd_buff) {
  49. BMI_ERR("No Memory for BMI Command");
  50. return QDF_STATUS_E_NOMEM;
  51. }
  52. }
  53. if (!info->bmi_rsp_buff) {
  54. info->bmi_rsp_buff =
  55. qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev, MAX_BMI_CMDBUF_SZ,
  56. &info->bmi_rsp_da);
  57. if (!info->bmi_rsp_buff) {
  58. BMI_ERR("No Memory for BMI Response");
  59. goto end;
  60. }
  61. }
  62. return QDF_STATUS_SUCCESS;
  63. end:
  64. qdf_mem_free_consistent(qdf_dev, qdf_dev->dev, MAX_BMI_CMDBUF_SZ,
  65. info->bmi_cmd_buff, info->bmi_cmd_da, 0);
  66. info->bmi_cmd_buff = NULL;
  67. return QDF_STATUS_E_NOMEM;
  68. }
  69. void bmi_cleanup(struct ol_context *ol_ctx)
  70. {
  71. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  72. qdf_device_t qdf_dev;
  73. if (!info || !ol_ctx) {
  74. BMI_WARN("%s: no bmi to cleanup", __func__);
  75. return;
  76. }
  77. qdf_dev = ol_ctx->qdf_dev;
  78. if (!qdf_dev || !qdf_dev->dev) {
  79. BMI_ERR("%s: Invalid Device Pointer", __func__);
  80. return;
  81. }
  82. if (info->bmi_cmd_buff) {
  83. qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
  84. MAX_BMI_CMDBUF_SZ,
  85. info->bmi_cmd_buff, info->bmi_cmd_da, 0);
  86. info->bmi_cmd_buff = NULL;
  87. info->bmi_cmd_da = 0;
  88. }
  89. if (info->bmi_rsp_buff) {
  90. qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
  91. MAX_BMI_CMDBUF_SZ,
  92. info->bmi_rsp_buff, info->bmi_rsp_da, 0);
  93. info->bmi_rsp_buff = NULL;
  94. info->bmi_rsp_da = 0;
  95. }
  96. }
  97. /**
  98. * bmi_done() - finish the bmi opperation
  99. * @ol_ctx: the bmi context
  100. *
  101. * does some sanity checking.
  102. * exchanges one last message with firmware.
  103. * frees some buffers.
  104. *
  105. * Return: QDF_STATUS_SUCCESS if bmi isn't needed.
  106. * QDF_STATUS_SUCCESS if bmi finishes.
  107. * otherwise returns failure.
  108. */
  109. QDF_STATUS bmi_done(struct ol_context *ol_ctx)
  110. {
  111. QDF_STATUS status = QDF_STATUS_SUCCESS;
  112. if (NO_BMI)
  113. return QDF_STATUS_SUCCESS;
  114. if (!ol_ctx) {
  115. BMI_ERR("%s: null context", __func__);
  116. return QDF_STATUS_E_NOMEM;
  117. }
  118. hif_claim_device(ol_ctx->scn);
  119. if (!hif_needs_bmi(ol_ctx->scn))
  120. return QDF_STATUS_SUCCESS;
  121. status = bmi_done_local(ol_ctx);
  122. if (status != QDF_STATUS_SUCCESS)
  123. BMI_ERR("BMI_DONE Failed status:%d", status);
  124. return status;
  125. }
  126. void bmi_target_ready(struct hif_opaque_softc *scn, void *cfg_ctx)
  127. {
  128. ol_target_ready(scn, cfg_ctx);
  129. }
  130. static QDF_STATUS
  131. bmi_get_target_info_message_based(struct bmi_target_info *targ_info,
  132. struct ol_context *ol_ctx)
  133. {
  134. int status = 0;
  135. struct hif_opaque_softc *scn = ol_ctx->scn;
  136. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  137. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  138. uint8_t *bmi_rsp_buff = info->bmi_rsp_buff;
  139. uint32_t cid, length;
  140. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  141. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  142. if (!bmi_cmd_buff || !bmi_rsp_buff) {
  143. BMI_ERR("%s:BMI CMD/RSP Buffer is NULL", __func__);
  144. return QDF_STATUS_NOT_INITIALIZED;
  145. }
  146. cid = BMI_GET_TARGET_INFO;
  147. qdf_mem_copy(bmi_cmd_buff, &cid, sizeof(cid));
  148. length = sizeof(struct bmi_target_info);
  149. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, sizeof(cid),
  150. (uint8_t *)bmi_rsp_buff, &length,
  151. BMI_EXCHANGE_TIMEOUT_MS);
  152. if (status) {
  153. BMI_ERR("Failed to target info: status:%d", status);
  154. return QDF_STATUS_E_FAILURE;
  155. }
  156. qdf_mem_copy(targ_info, bmi_rsp_buff, length);
  157. return QDF_STATUS_SUCCESS;
  158. }
  159. QDF_STATUS
  160. bmi_get_target_info(struct bmi_target_info *targ_info,
  161. struct ol_context *ol_ctx)
  162. {
  163. struct hif_opaque_softc *scn = ol_ctx->scn;
  164. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  165. QDF_STATUS status;
  166. if (info->bmi_done) {
  167. BMI_ERR("BMI Phase is Already Done");
  168. return QDF_STATUS_E_PERM;
  169. }
  170. switch (hif_get_bus_type(scn)) {
  171. case QDF_BUS_TYPE_PCI:
  172. case QDF_BUS_TYPE_SNOC:
  173. case QDF_BUS_TYPE_USB:
  174. status = bmi_get_target_info_message_based(targ_info, ol_ctx);
  175. break;
  176. #ifdef HIF_SDIO
  177. case QDF_BUS_TYPE_SDIO:
  178. status = hif_reg_based_get_target_info(scn, targ_info);
  179. break;
  180. #endif
  181. default:
  182. status = QDF_STATUS_E_FAILURE;
  183. break;
  184. }
  185. return status;
  186. }
  187. QDF_STATUS bmi_download_firmware(struct ol_context *ol_ctx)
  188. {
  189. struct hif_opaque_softc *scn;
  190. if (!ol_ctx) {
  191. if (NO_BMI) {
  192. /* ol_ctx is not allocated in NO_BMI case */
  193. return QDF_STATUS_SUCCESS;
  194. } else {
  195. BMI_ERR("ol_ctx is NULL");
  196. bmi_assert(0);
  197. return QDF_STATUS_NOT_INITIALIZED;
  198. }
  199. }
  200. scn = ol_ctx->scn;
  201. if (!scn) {
  202. BMI_ERR("Invalid scn context");
  203. bmi_assert(0);
  204. return QDF_STATUS_NOT_INITIALIZED;
  205. }
  206. if (!hif_needs_bmi(scn))
  207. return QDF_STATUS_SUCCESS;
  208. return bmi_firmware_download(ol_ctx);
  209. }
  210. QDF_STATUS bmi_read_soc_register(uint32_t address, uint32_t *param,
  211. struct ol_context *ol_ctx)
  212. {
  213. struct hif_opaque_softc *scn = ol_ctx->scn;
  214. uint32_t cid;
  215. int status;
  216. uint32_t offset, param_len;
  217. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  218. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  219. uint8_t *bmi_rsp_buff = info->bmi_rsp_buff;
  220. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  221. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  222. bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
  223. qdf_mem_set(bmi_cmd_buff, 0, sizeof(cid) + sizeof(address));
  224. qdf_mem_set(bmi_rsp_buff, 0, sizeof(cid) + sizeof(address));
  225. if (info->bmi_done) {
  226. BMI_DBG("Command disallowed");
  227. return QDF_STATUS_E_PERM;
  228. }
  229. BMI_DBG("BMI Read SOC Register:device: 0x%p, address: 0x%x",
  230. scn, address);
  231. cid = BMI_READ_SOC_REGISTER;
  232. offset = 0;
  233. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  234. offset += sizeof(cid);
  235. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
  236. offset += sizeof(address);
  237. param_len = sizeof(*param);
  238. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, offset,
  239. bmi_rsp_buff, &param_len, BMI_EXCHANGE_TIMEOUT_MS);
  240. if (status) {
  241. BMI_DBG("Unable to read from the device; status:%d", status);
  242. return QDF_STATUS_E_FAILURE;
  243. }
  244. qdf_mem_copy(param, bmi_rsp_buff, sizeof(*param));
  245. BMI_DBG("BMI Read SOC Register: Exit value: %d", *param);
  246. return QDF_STATUS_SUCCESS;
  247. }
  248. QDF_STATUS bmi_write_soc_register(uint32_t address, uint32_t param,
  249. struct ol_context *ol_ctx)
  250. {
  251. struct hif_opaque_softc *scn = ol_ctx->scn;
  252. uint32_t cid;
  253. int status;
  254. uint32_t offset;
  255. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  256. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  257. uint32_t size = sizeof(cid) + sizeof(address) + sizeof(param);
  258. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  259. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  260. bmi_assert(BMI_COMMAND_FITS(size));
  261. qdf_mem_set(bmi_cmd_buff, 0, size);
  262. if (info->bmi_done) {
  263. BMI_DBG("Command disallowed");
  264. return QDF_STATUS_E_FAILURE;
  265. }
  266. BMI_DBG("SOC Register Write:device:0x%p, addr:0x%x, param:%d",
  267. scn, address, param);
  268. cid = BMI_WRITE_SOC_REGISTER;
  269. offset = 0;
  270. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  271. offset += sizeof(cid);
  272. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
  273. offset += sizeof(address);
  274. qdf_mem_copy(&(bmi_cmd_buff[offset]), &param, sizeof(param));
  275. offset += sizeof(param);
  276. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, offset,
  277. NULL, NULL, 0);
  278. if (status) {
  279. BMI_ERR("Unable to write to the device: status:%d", status);
  280. return QDF_STATUS_E_FAILURE;
  281. }
  282. BMI_DBG("BMI Read SOC Register: Exit");
  283. return QDF_STATUS_SUCCESS;
  284. }
  285. static QDF_STATUS
  286. bmilz_data(uint8_t *buffer, uint32_t length, struct ol_context *ol_ctx)
  287. {
  288. uint32_t cid;
  289. int status;
  290. uint32_t offset;
  291. uint32_t remaining, txlen;
  292. const uint32_t header = sizeof(cid) + sizeof(length);
  293. struct hif_opaque_softc *scn = ol_ctx->scn;
  294. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  295. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  296. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  297. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  298. bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
  299. qdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + header);
  300. if (info->bmi_done) {
  301. BMI_ERR("Command disallowed");
  302. return QDF_STATUS_E_PERM;
  303. }
  304. BMI_DBG("BMI Send LZ Data: device: 0x%p, length: %d",
  305. scn, length);
  306. cid = BMI_LZ_DATA;
  307. remaining = length;
  308. while (remaining) {
  309. txlen = (remaining < (BMI_DATASZ_MAX - header)) ?
  310. remaining : (BMI_DATASZ_MAX - header);
  311. offset = 0;
  312. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  313. offset += sizeof(cid);
  314. qdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
  315. offset += sizeof(txlen);
  316. qdf_mem_copy(&(bmi_cmd_buff[offset]),
  317. &buffer[length - remaining], txlen);
  318. offset += txlen;
  319. status = hif_exchange_bmi_msg(scn, cmd, rsp,
  320. bmi_cmd_buff, offset,
  321. NULL, NULL, 0);
  322. if (status) {
  323. BMI_ERR("Failed to write to the device: status:%d",
  324. status);
  325. return QDF_STATUS_E_FAILURE;
  326. }
  327. remaining -= txlen;
  328. }
  329. BMI_DBG("BMI LZ Data: Exit");
  330. return QDF_STATUS_SUCCESS;
  331. }
  332. QDF_STATUS bmi_sign_stream_start(uint32_t address, uint8_t *buffer,
  333. uint32_t length, struct ol_context *ol_ctx)
  334. {
  335. uint32_t cid;
  336. int status;
  337. uint32_t offset;
  338. const uint32_t header = sizeof(cid) + sizeof(address) + sizeof(length);
  339. uint8_t aligned_buf[BMI_DATASZ_MAX + 4];
  340. uint8_t *src;
  341. struct hif_opaque_softc *scn = ol_ctx->scn;
  342. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  343. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  344. uint32_t remaining, txlen;
  345. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  346. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  347. bmi_assert(BMI_COMMAND_FITS(BMI_DATASZ_MAX + header));
  348. qdf_mem_set(bmi_cmd_buff, 0, BMI_DATASZ_MAX + header);
  349. if (info->bmi_done) {
  350. BMI_ERR("Command disallowed");
  351. return QDF_STATUS_E_PERM;
  352. }
  353. BMI_ERR("Sign Stream start:device:0x%p, addr:0x%x, length:%d",
  354. scn, address, length);
  355. cid = BMI_SIGN_STREAM_START;
  356. remaining = length;
  357. while (remaining) {
  358. src = &buffer[length - remaining];
  359. if (remaining < (BMI_DATASZ_MAX - header)) {
  360. if (remaining & 0x3) {
  361. remaining = remaining + (4 - (remaining & 0x3));
  362. memcpy(aligned_buf, src, remaining);
  363. src = aligned_buf;
  364. }
  365. txlen = remaining;
  366. } else {
  367. txlen = (BMI_DATASZ_MAX - header);
  368. }
  369. offset = 0;
  370. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  371. offset += sizeof(cid);
  372. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address,
  373. sizeof(address));
  374. offset += sizeof(offset);
  375. qdf_mem_copy(&(bmi_cmd_buff[offset]), &txlen, sizeof(txlen));
  376. offset += sizeof(txlen);
  377. qdf_mem_copy(&(bmi_cmd_buff[offset]), src, txlen);
  378. offset += txlen;
  379. status = hif_exchange_bmi_msg(scn, cmd, rsp,
  380. bmi_cmd_buff, offset, NULL,
  381. NULL, BMI_EXCHANGE_TIMEOUT_MS);
  382. if (status) {
  383. BMI_ERR("Unable to write to the device: status:%d",
  384. status);
  385. return QDF_STATUS_E_FAILURE;
  386. }
  387. remaining -= txlen;
  388. }
  389. BMI_DBG("BMI SIGN Stream Start: Exit");
  390. return QDF_STATUS_SUCCESS;
  391. }
  392. static QDF_STATUS
  393. bmilz_stream_start(uint32_t address, struct ol_context *ol_ctx)
  394. {
  395. uint32_t cid;
  396. int status;
  397. uint32_t offset;
  398. struct hif_opaque_softc *scn = ol_ctx->scn;
  399. struct bmi_info *info = GET_BMI_CONTEXT(ol_ctx);
  400. uint8_t *bmi_cmd_buff = info->bmi_cmd_buff;
  401. qdf_dma_addr_t cmd = info->bmi_cmd_da;
  402. qdf_dma_addr_t rsp = info->bmi_rsp_da;
  403. bmi_assert(BMI_COMMAND_FITS(sizeof(cid) + sizeof(address)));
  404. qdf_mem_set(bmi_cmd_buff, 0, sizeof(cid) + sizeof(address));
  405. if (info->bmi_done) {
  406. BMI_DBG("Command disallowed");
  407. return QDF_STATUS_E_PERM;
  408. }
  409. BMI_DBG("BMI LZ Stream Start: (device: 0x%p, address: 0x%x)",
  410. scn, address);
  411. cid = BMI_LZ_STREAM_START;
  412. offset = 0;
  413. qdf_mem_copy(&(bmi_cmd_buff[offset]), &cid, sizeof(cid));
  414. offset += sizeof(cid);
  415. qdf_mem_copy(&(bmi_cmd_buff[offset]), &address, sizeof(address));
  416. offset += sizeof(address);
  417. status = hif_exchange_bmi_msg(scn, cmd, rsp, bmi_cmd_buff, offset,
  418. NULL, NULL, 0);
  419. if (status) {
  420. BMI_ERR("Unable to Start LZ Stream to the device status:%d",
  421. status);
  422. return QDF_STATUS_E_FAILURE;
  423. }
  424. BMI_DBG("BMI LZ Stream: Exit");
  425. return QDF_STATUS_SUCCESS;
  426. }
  427. QDF_STATUS
  428. bmi_fast_download(uint32_t address, uint8_t *buffer,
  429. uint32_t length, struct ol_context *ol_ctx)
  430. {
  431. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  432. uint32_t last_word = 0;
  433. uint32_t last_word_offset = length & ~0x3;
  434. uint32_t unaligned_bytes = length & 0x3;
  435. status = bmilz_stream_start(address, ol_ctx);
  436. if (status != QDF_STATUS_SUCCESS)
  437. goto end;
  438. /* copy the last word into a zero padded buffer */
  439. if (unaligned_bytes)
  440. qdf_mem_copy(&last_word, &buffer[last_word_offset],
  441. unaligned_bytes);
  442. status = bmilz_data(buffer, last_word_offset, ol_ctx);
  443. if (status != QDF_STATUS_SUCCESS)
  444. goto end;
  445. if (unaligned_bytes)
  446. status = bmilz_data((uint8_t *) &last_word, 4, ol_ctx);
  447. if (status != QDF_STATUS_SUCCESS)
  448. /*
  449. * Close compressed stream and open a new (fake) one.
  450. * This serves mainly to flush Target caches.
  451. */
  452. status = bmilz_stream_start(0x00, ol_ctx);
  453. end:
  454. return status;
  455. }
  456. /**
  457. * ol_cds_init() - API to initialize global CDS OL Context
  458. * @qdf_dev: QDF Device
  459. * @hif_ctx: HIF Context
  460. *
  461. * Return: Success/Failure
  462. */
  463. QDF_STATUS ol_cds_init(qdf_device_t qdf_dev, void *hif_ctx)
  464. {
  465. struct ol_context *ol_info;
  466. QDF_STATUS status = QDF_STATUS_SUCCESS;
  467. if (NO_BMI)
  468. return QDF_STATUS_SUCCESS; /* no BMI for Q6 bring up */
  469. status = cds_alloc_context(cds_get_global_context(), QDF_MODULE_ID_BMI,
  470. (void **)&ol_info, sizeof(*ol_info));
  471. if (status != QDF_STATUS_SUCCESS) {
  472. BMI_ERR("%s: CDS Allocation failed for ol_bmi context",
  473. __func__);
  474. return status;
  475. }
  476. ol_info->qdf_dev = qdf_dev;
  477. ol_info->scn = hif_ctx;
  478. ol_info->tgt_def.targetdef = hif_get_targetdef(hif_ctx);
  479. qdf_create_work(qdf_dev, &ol_info->ramdump_work,
  480. ramdump_work_handler, ol_info);
  481. qdf_create_work(qdf_dev, &ol_info->fw_indication_work,
  482. fw_indication_work_handler, ol_info);
  483. return status;
  484. }
  485. /**
  486. * ol_cds_free() - API to free the global CDS OL Context
  487. *
  488. * Return: void
  489. */
  490. void ol_cds_free(void)
  491. {
  492. struct ol_context *ol_info = cds_get_context(QDF_MODULE_ID_BMI);
  493. if (NO_BMI)
  494. return;
  495. cds_free_context(cds_get_global_context(), QDF_MODULE_ID_BMI, ol_info);
  496. }