ipa_uc.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include "ipa_i.h"
  6. #include <linux/delay.h>
  7. #define IPA_RAM_UC_SMEM_SIZE 128
  8. #define IPA_HW_INTERFACE_VERSION 0x2000
  9. #define IPA_PKT_FLUSH_TO_US 100
  10. #define IPA_UC_POLL_SLEEP_USEC 100
  11. #define IPA_UC_POLL_MAX_RETRY 10000
  12. #define IPA_UC_DBG_STATS_GET_PROT_ID(x) (0xff & ((x) >> 24))
  13. #define IPA_UC_DBG_STATS_GET_OFFSET(x) (0x00ffffff & (x))
  14. /**
  15. * Mailbox register to Interrupt HWP for CPU cmd
  16. * Usage of IPA_UC_MAILBOX_m_n doorbell instead of IPA_IRQ_EE_UC_0
  17. * due to HW limitation.
  18. *
  19. */
  20. #define IPA_CPU_2_HW_CMD_MBOX_m 0
  21. #define IPA_CPU_2_HW_CMD_MBOX_n 23
  22. /**
  23. * enum ipa3_cpu_2_hw_commands - Values that represent the commands from the CPU
  24. * IPA_CPU_2_HW_CMD_NO_OP : No operation is required.
  25. * IPA_CPU_2_HW_CMD_UPDATE_FLAGS : Update SW flags which defines the behavior
  26. * of HW.
  27. * IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST : Launch predefined test over HW.
  28. * IPA_CPU_2_HW_CMD_DEBUG_GET_INFO : Read HW internal debug information.
  29. * IPA_CPU_2_HW_CMD_ERR_FATAL : CPU instructs HW to perform error fatal
  30. * handling.
  31. * IPA_CPU_2_HW_CMD_CLK_GATE : CPU instructs HW to goto Clock Gated state.
  32. * IPA_CPU_2_HW_CMD_CLK_UNGATE : CPU instructs HW to goto Clock Ungated state.
  33. * IPA_CPU_2_HW_CMD_MEMCPY : CPU instructs HW to do memcopy using QMB.
  34. * IPA_CPU_2_HW_CMD_RESET_PIPE : Command to reset a pipe - SW WA for a HW bug.
  35. * IPA_CPU_2_HW_CMD_GSI_CH_EMPTY : Command to check for GSI channel emptiness.
  36. * IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO: Command to store remote IPA Info
  37. */
  38. enum ipa3_cpu_2_hw_commands {
  39. IPA_CPU_2_HW_CMD_NO_OP =
  40. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
  41. IPA_CPU_2_HW_CMD_UPDATE_FLAGS =
  42. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
  43. IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST =
  44. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
  45. IPA_CPU_2_HW_CMD_DEBUG_GET_INFO =
  46. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
  47. IPA_CPU_2_HW_CMD_ERR_FATAL =
  48. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
  49. IPA_CPU_2_HW_CMD_CLK_GATE =
  50. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
  51. IPA_CPU_2_HW_CMD_CLK_UNGATE =
  52. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
  53. IPA_CPU_2_HW_CMD_MEMCPY =
  54. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
  55. IPA_CPU_2_HW_CMD_RESET_PIPE =
  56. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8),
  57. IPA_CPU_2_HW_CMD_REG_WRITE =
  58. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9),
  59. IPA_CPU_2_HW_CMD_GSI_CH_EMPTY =
  60. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 10),
  61. IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO =
  62. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 11),
  63. };
  64. /**
  65. * enum ipa3_hw_2_cpu_responses - Values that represent common HW responses
  66. * to CPU commands.
  67. * @IPA_HW_2_CPU_RESPONSE_NO_OP : No operation response
  68. * @IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED : HW shall send this command once
  69. * boot sequence is completed and HW is ready to serve commands from CPU
  70. * @IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED: Response to CPU commands
  71. * @IPA_HW_2_CPU_RESPONSE_DEBUG_GET_INFO : Response to
  72. * IPA_CPU_2_HW_CMD_DEBUG_GET_INFO command
  73. */
  74. enum ipa3_hw_2_cpu_responses {
  75. IPA_HW_2_CPU_RESPONSE_NO_OP =
  76. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
  77. IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED =
  78. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
  79. IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED =
  80. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
  81. IPA_HW_2_CPU_RESPONSE_DEBUG_GET_INFO =
  82. FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
  83. };
  84. /**
  85. * struct IpaHwMemCopyData_t - Structure holding the parameters
  86. * for IPA_CPU_2_HW_CMD_MEMCPY command.
  87. *
  88. * The parameters are passed as immediate params in the shared memory
  89. */
  90. struct IpaHwMemCopyData_t {
  91. u32 destination_addr;
  92. u32 source_addr;
  93. u32 dest_buffer_size;
  94. u32 source_buffer_size;
  95. };
  96. /**
  97. * struct IpaHwRegWriteCmdData_t - holds the parameters for
  98. * IPA_CPU_2_HW_CMD_REG_WRITE command. Parameters are
  99. * sent as 64b immediate parameters.
  100. * @RegisterAddress: RG10 register address where the value needs to be written
  101. * @RegisterValue: 32-Bit value to be written into the register
  102. */
  103. struct IpaHwRegWriteCmdData_t {
  104. u32 RegisterAddress;
  105. u32 RegisterValue;
  106. };
  107. /**
  108. * union IpaHwCpuCmdCompletedResponseData_t - Structure holding the parameters
  109. * for IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED response.
  110. * @originalCmdOp : The original command opcode
  111. * @status : 0 for success indication, otherwise failure
  112. * @responseData : 16b responseData
  113. *
  114. * Parameters are sent as 32b immediate parameters.
  115. */
  116. union IpaHwCpuCmdCompletedResponseData_t {
  117. struct IpaHwCpuCmdCompletedResponseParams_t {
  118. u32 originalCmdOp:8;
  119. u32 status:8;
  120. u32 responseData:16;
  121. } __packed params;
  122. u32 raw32b;
  123. } __packed;
  124. /**
  125. * union IpaHwUpdateFlagsCmdData_t - Structure holding the parameters for
  126. * IPA_CPU_2_HW_CMD_UPDATE_FLAGS command
  127. * @newFlags: SW flags defined the behavior of HW.
  128. * This field is expected to be used as bitmask for enum ipa3_hw_flags
  129. */
  130. union IpaHwUpdateFlagsCmdData_t {
  131. struct IpaHwUpdateFlagsCmdParams_t {
  132. u32 newFlags;
  133. } params;
  134. u32 raw32b;
  135. };
  136. /**
  137. * union IpaHwChkChEmptyCmdData_t - Structure holding the parameters for
  138. * IPA_CPU_2_HW_CMD_GSI_CH_EMPTY command. Parameters are sent as 32b
  139. * immediate parameters.
  140. * @ee_n : EE owner of the channel
  141. * @vir_ch_id : GSI virtual channel ID of the channel to checked of emptiness
  142. * @reserved_02_04 : Reserved
  143. */
  144. union IpaHwChkChEmptyCmdData_t {
  145. struct IpaHwChkChEmptyCmdParams_t {
  146. u8 ee_n;
  147. u8 vir_ch_id;
  148. u16 reserved_02_04;
  149. } __packed params;
  150. u32 raw32b;
  151. } __packed;
  152. /**
  153. * Structure holding the parameters for IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO
  154. * command.
  155. * @remoteIPAAddr: 5G IPA address : uC proxies Q6 doorbell to this address
  156. * @mboxN: mbox on which Q6 will interrupt uC
  157. */
  158. struct IpaHwDbAddrInfo_t {
  159. u32 remoteIPAAddr;
  160. uint32_t mboxN;
  161. } __packed;
  162. static DEFINE_MUTEX(uc_loaded_nb_lock);
  163. static BLOCKING_NOTIFIER_HEAD(uc_loaded_notifier);
  164. struct ipa3_uc_hdlrs ipa3_uc_hdlrs[IPA_HW_NUM_FEATURES] = { { 0 } };
  165. const char *ipa_hw_error_str(enum ipa3_hw_errors err_type)
  166. {
  167. const char *str;
  168. switch (err_type) {
  169. case IPA_HW_ERROR_NONE:
  170. str = "IPA_HW_ERROR_NONE";
  171. break;
  172. case IPA_HW_INVALID_DOORBELL_ERROR:
  173. str = "IPA_HW_INVALID_DOORBELL_ERROR";
  174. break;
  175. case IPA_HW_DMA_ERROR:
  176. str = "IPA_HW_DMA_ERROR";
  177. break;
  178. case IPA_HW_FATAL_SYSTEM_ERROR:
  179. str = "IPA_HW_FATAL_SYSTEM_ERROR";
  180. break;
  181. case IPA_HW_INVALID_OPCODE:
  182. str = "IPA_HW_INVALID_OPCODE";
  183. break;
  184. case IPA_HW_INVALID_PARAMS:
  185. str = "IPA_HW_INVALID_PARAMS";
  186. break;
  187. case IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE:
  188. str = "IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE";
  189. break;
  190. case IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE:
  191. str = "IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE";
  192. break;
  193. case IPA_HW_GSI_CH_NOT_EMPTY_FAILURE:
  194. str = "IPA_HW_GSI_CH_NOT_EMPTY_FAILURE";
  195. break;
  196. default:
  197. str = "INVALID ipa_hw_errors type";
  198. }
  199. return str;
  200. }
  201. static void ipa3_uc_save_dbg_stats(u32 size)
  202. {
  203. u8 protocol_id;
  204. u32 addr_offset;
  205. void __iomem *mmio;
  206. protocol_id = IPA_UC_DBG_STATS_GET_PROT_ID(
  207. ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams_1);
  208. addr_offset = IPA_UC_DBG_STATS_GET_OFFSET(
  209. ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams_1);
  210. mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
  211. addr_offset, sizeof(struct IpaHwRingStats_t) *
  212. MAX_CH_STATS_SUPPORTED);
  213. if (mmio == NULL) {
  214. IPAERR("unexpected NULL mmio\n");
  215. return;
  216. }
  217. switch (protocol_id) {
  218. case IPA_HW_PROTOCOL_AQC:
  219. break;
  220. case IPA_HW_PROTOCOL_11ad:
  221. break;
  222. case IPA_HW_PROTOCOL_WDI:
  223. ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_size = size;
  224. ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_ofst = addr_offset;
  225. ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio = mmio;
  226. break;
  227. case IPA_HW_PROTOCOL_WDI3:
  228. ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_size = size;
  229. ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_ofst = addr_offset;
  230. ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio = mmio;
  231. break;
  232. case IPA_HW_PROTOCOL_ETH:
  233. break;
  234. case IPA_HW_PROTOCOL_MHIP:
  235. ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_size = size;
  236. ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_ofst = addr_offset;
  237. ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio = mmio;
  238. break;
  239. case IPA_HW_PROTOCOL_USB:
  240. ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_size = size;
  241. ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_ofst = addr_offset;
  242. ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio = mmio;
  243. break;
  244. default:
  245. IPAERR("unknown protocols %d\n", protocol_id);
  246. }
  247. }
  248. static void ipa3_log_evt_hdlr(void)
  249. {
  250. int i;
  251. if (!ipa3_ctx->uc_ctx.uc_event_top_ofst) {
  252. ipa3_ctx->uc_ctx.uc_event_top_ofst =
  253. ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams;
  254. if (ipa3_ctx->uc_ctx.uc_event_top_ofst +
  255. sizeof(struct IpaHwEventLogInfoData_t) >=
  256. ipa3_ctx->ctrl->ipa_reg_base_ofst +
  257. ipahal_get_reg_n_ofst(
  258. IPA_SW_AREA_RAM_DIRECT_ACCESS_n, 0) +
  259. ipa3_ctx->smem_sz) {
  260. IPAERR("uc_top 0x%x outside SRAM\n",
  261. ipa3_ctx->uc_ctx.uc_event_top_ofst);
  262. goto bad_uc_top_ofst;
  263. }
  264. ipa3_ctx->uc_ctx.uc_event_top_mmio = ioremap(
  265. ipa3_ctx->ipa_wrapper_base +
  266. ipa3_ctx->uc_ctx.uc_event_top_ofst,
  267. sizeof(struct IpaHwEventLogInfoData_t));
  268. if (!ipa3_ctx->uc_ctx.uc_event_top_mmio) {
  269. IPAERR("fail to ioremap uc top\n");
  270. goto bad_uc_top_ofst;
  271. }
  272. for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
  273. if (ipa3_uc_hdlrs[i].ipa_uc_event_log_info_hdlr)
  274. ipa3_uc_hdlrs[i].ipa_uc_event_log_info_hdlr
  275. (ipa3_ctx->uc_ctx.uc_event_top_mmio);
  276. }
  277. } else {
  278. if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams !=
  279. ipa3_ctx->uc_ctx.uc_event_top_ofst) {
  280. IPAERR("uc top ofst changed new=%u cur=%u\n",
  281. ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams,
  282. ipa3_ctx->uc_ctx.uc_event_top_ofst);
  283. }
  284. }
  285. return;
  286. bad_uc_top_ofst:
  287. ipa3_ctx->uc_ctx.uc_event_top_ofst = 0;
  288. }
  289. /**
  290. * ipa3_uc_state_check() - Check the status of the uC interface
  291. *
  292. * Return value: 0 if the uC is loaded, interface is initialized
  293. * and there was no recent failure in one of the commands.
  294. * A negative value is returned otherwise.
  295. */
  296. int ipa3_uc_state_check(void)
  297. {
  298. if (!ipa3_ctx->uc_ctx.uc_inited) {
  299. IPAERR("uC interface not initialized\n");
  300. return -EFAULT;
  301. }
  302. if (!ipa3_ctx->uc_ctx.uc_loaded) {
  303. IPAERR("uC is not loaded\n");
  304. return -EFAULT;
  305. }
  306. if (ipa3_ctx->uc_ctx.uc_failed) {
  307. IPAERR("uC has failed its last command\n");
  308. return -EFAULT;
  309. }
  310. return 0;
  311. }
  312. /**
  313. * ipa3_uc_loaded_check() - Check the uC has been loaded
  314. *
  315. * Return value: 1 if the uC is loaded, 0 otherwise
  316. */
  317. int ipa3_uc_loaded_check(void)
  318. {
  319. return ipa3_ctx->uc_ctx.uc_loaded;
  320. }
  321. EXPORT_SYMBOL(ipa3_uc_loaded_check);
  322. /**
  323. * ipa3_uc_register_ready_cb() - register a uC ready callback notifier block
  324. * @nb: notifier
  325. *
  326. * Register a callback to be called when uC is ready to receive commands. uC is
  327. * considered to be ready when it sends %IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED.
  328. *
  329. * Return: 0 on successful registration, negative errno otherwise
  330. *
  331. * See blocking_notifier_chain_register() for possible errno values
  332. */
  333. int ipa3_uc_register_ready_cb(struct notifier_block *nb)
  334. {
  335. int rc;
  336. mutex_lock(&uc_loaded_nb_lock);
  337. rc = blocking_notifier_chain_register(&uc_loaded_notifier, nb);
  338. if (!rc && ipa3_ctx->uc_ctx.uc_loaded)
  339. (void) nb->notifier_call(nb, false, ipa3_ctx);
  340. mutex_unlock(&uc_loaded_nb_lock);
  341. return rc;
  342. }
  343. EXPORT_SYMBOL(ipa3_uc_register_ready_cb);
  344. /**
  345. * ipa3_uc_unregister_ready_cb() - unregister a uC ready callback
  346. * @nb: notifier
  347. *
  348. * Unregister a uC loaded notifier block that was previously registered by
  349. * ipa3_uc_register_ready_cb().
  350. *
  351. * Return: 0 on successful unregistration, negative errno otherwise
  352. *
  353. * See blocking_notifier_chain_unregister() for possible errno values
  354. */
  355. int ipa3_uc_unregister_ready_cb(struct notifier_block *nb)
  356. {
  357. return blocking_notifier_chain_unregister(&uc_loaded_notifier, nb);
  358. }
  359. EXPORT_SYMBOL(ipa3_uc_unregister_ready_cb);
  360. static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
  361. void *private_data,
  362. void *interrupt_data)
  363. {
  364. union IpaHwErrorEventData_t evt;
  365. u8 feature;
  366. WARN_ON(private_data != ipa3_ctx);
  367. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  368. IPADBG("uC evt opcode=%u\n",
  369. ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
  370. feature = EXTRACT_UC_FEATURE(ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
  371. if (feature >= IPA_HW_FEATURE_MAX) {
  372. IPAERR("Invalid feature %u for event %u\n",
  373. feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
  374. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  375. return;
  376. }
  377. /* Feature specific handling */
  378. if (ipa3_uc_hdlrs[feature].ipa_uc_event_hdlr)
  379. ipa3_uc_hdlrs[feature].ipa_uc_event_hdlr
  380. (ipa3_ctx->uc_ctx.uc_sram_mmio);
  381. /* General handling */
  382. if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
  383. IPA_HW_2_CPU_EVENT_ERROR) {
  384. evt.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams;
  385. IPAERR("uC Error, evt errorType = %s\n",
  386. ipa_hw_error_str(evt.params.errorType));
  387. ipa3_ctx->uc_ctx.uc_failed = true;
  388. ipa3_ctx->uc_ctx.uc_error_type = evt.params.errorType;
  389. ipa3_ctx->uc_ctx.uc_error_timestamp =
  390. ipahal_read_reg(IPA_TAG_TIMER);
  391. /* Unexpected UC hardware state */
  392. ipa_assert();
  393. } else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
  394. IPA_HW_2_CPU_EVENT_LOG_INFO) {
  395. IPADBG("uC evt log info ofst=0x%x\n",
  396. ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams);
  397. ipa3_log_evt_hdlr();
  398. } else {
  399. IPADBG("unsupported uC evt opcode=%u\n",
  400. ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
  401. }
  402. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  403. }
  404. int ipa3_uc_panic_notifier(struct notifier_block *this,
  405. unsigned long event, void *ptr)
  406. {
  407. int result = 0;
  408. struct ipa_active_client_logging_info log_info;
  409. IPADBG("this=%pK evt=%lu ptr=%pK\n", this, event, ptr);
  410. result = ipa3_uc_state_check();
  411. if (result)
  412. goto fail;
  413. IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
  414. if (ipa3_inc_client_enable_clks_no_block(&log_info))
  415. goto fail;
  416. ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp =
  417. IPA_CPU_2_HW_CMD_ERR_FATAL;
  418. ipa3_ctx->uc_ctx.pending_cmd = ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp;
  419. /* ensure write to shared memory is done before triggering uc */
  420. wmb();
  421. ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1);
  422. /* give uc enough time to save state */
  423. udelay(IPA_PKT_FLUSH_TO_US);
  424. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  425. IPADBG("err_fatal issued\n");
  426. fail:
  427. return NOTIFY_DONE;
  428. }
  429. static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
  430. void *private_data,
  431. void *interrupt_data)
  432. {
  433. union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
  434. u8 feature;
  435. int res;
  436. int i;
  437. WARN_ON(private_data != ipa3_ctx);
  438. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  439. IPADBG("uC rsp opcode=%u\n",
  440. ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
  441. feature = EXTRACT_UC_FEATURE(ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
  442. if (feature >= IPA_HW_FEATURE_MAX) {
  443. IPAERR("Invalid feature %u for event %u\n",
  444. feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
  445. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  446. return;
  447. }
  448. /* Feature specific handling */
  449. if (ipa3_uc_hdlrs[feature].ipa3_uc_response_hdlr) {
  450. res = ipa3_uc_hdlrs[feature].ipa3_uc_response_hdlr(
  451. ipa3_ctx->uc_ctx.uc_sram_mmio,
  452. &ipa3_ctx->uc_ctx.uc_status);
  453. if (res == 0) {
  454. IPADBG("feature %d specific response handler\n",
  455. feature);
  456. complete_all(&ipa3_ctx->uc_ctx.uc_completion);
  457. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  458. return;
  459. }
  460. }
  461. /* General handling */
  462. if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp ==
  463. IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED) {
  464. if (ipa3_ctx->uc_ctx.uc_loaded) {
  465. IPADBG("uC resp op INIT_COMPLETED is unexpected\n");
  466. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  467. return;
  468. }
  469. mutex_lock(&uc_loaded_nb_lock);
  470. ipa3_ctx->uc_ctx.uc_loaded = true;
  471. (void) blocking_notifier_call_chain(&uc_loaded_notifier, true,
  472. ipa3_ctx);
  473. mutex_unlock(&uc_loaded_nb_lock);
  474. IPADBG("IPA uC loaded\n");
  475. /*
  476. * The proxy vote is held until uC is loaded to ensure that
  477. * IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED is received.
  478. */
  479. ipa3_proxy_clk_unvote();
  480. /*
  481. * To enable ipa power collapse we need to enable rpmh and uc
  482. * handshake So that uc can do register retention. To enable
  483. * this handshake we need to send the below message to rpmh.
  484. */
  485. ipa_pc_qmp_enable();
  486. for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
  487. if (ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr)
  488. ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr();
  489. }
  490. } else if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp ==
  491. IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) {
  492. uc_rsp.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams;
  493. IPADBG("uC cmd response opcode=%u status=%u\n",
  494. uc_rsp.params.originalCmdOp,
  495. uc_rsp.params.status);
  496. if (uc_rsp.params.originalCmdOp ==
  497. ipa3_ctx->uc_ctx.pending_cmd) {
  498. ipa3_ctx->uc_ctx.uc_status = uc_rsp.params.status;
  499. if (uc_rsp.params.originalCmdOp ==
  500. IPA_CPU_2_HW_CMD_OFFLOAD_STATS_ALLOC)
  501. ipa3_uc_save_dbg_stats(
  502. uc_rsp.params.responseData);
  503. complete_all(&ipa3_ctx->uc_ctx.uc_completion);
  504. } else {
  505. IPAERR("Expected cmd=%u rcvd cmd=%u\n",
  506. ipa3_ctx->uc_ctx.pending_cmd,
  507. uc_rsp.params.originalCmdOp);
  508. }
  509. } else {
  510. IPAERR("Unsupported uC rsp opcode = %u\n",
  511. ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
  512. }
  513. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  514. }
  515. static void ipa3_uc_wigig_misc_int_handler(enum ipa_irq_type interrupt,
  516. void *private_data,
  517. void *interrupt_data)
  518. {
  519. IPADBG("\n");
  520. WARN_ON(private_data != ipa3_ctx);
  521. if (ipa3_ctx->uc_wigig_ctx.misc_notify_cb)
  522. ipa3_ctx->uc_wigig_ctx.misc_notify_cb(
  523. ipa3_ctx->uc_wigig_ctx.priv);
  524. IPADBG("exit\n");
  525. }
  526. static int ipa3_uc_send_cmd_64b_param(u32 cmd_lo, u32 cmd_hi, u32 opcode,
  527. u32 expected_status, bool polling_mode, unsigned long timeout_jiffies)
  528. {
  529. int index;
  530. union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
  531. int retries = 0;
  532. u32 uc_error_type;
  533. send_cmd_lock:
  534. mutex_lock(&ipa3_ctx->uc_ctx.uc_lock);
  535. if (ipa3_uc_state_check()) {
  536. IPADBG("uC send command aborted\n");
  537. mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
  538. return -EBADF;
  539. }
  540. send_cmd:
  541. init_completion(&ipa3_ctx->uc_ctx.uc_completion);
  542. ipa3_ctx->uc_ctx.uc_sram_mmio->cmdParams = cmd_lo;
  543. ipa3_ctx->uc_ctx.uc_sram_mmio->cmdParams_hi = cmd_hi;
  544. ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp = opcode;
  545. ipa3_ctx->uc_ctx.pending_cmd = opcode;
  546. ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp = 0;
  547. ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams = 0;
  548. ipa3_ctx->uc_ctx.uc_status = 0;
  549. /* ensure write to shared memory is done before triggering uc */
  550. wmb();
  551. ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1);
  552. if (polling_mode) {
  553. struct IpaHwSharedMemCommonMapping_t *uc_sram_ptr =
  554. ipa3_ctx->uc_ctx.uc_sram_mmio;
  555. for (index = 0; index < IPA_UC_POLL_MAX_RETRY; index++) {
  556. if (uc_sram_ptr->responseOp ==
  557. IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) {
  558. uc_rsp.raw32b = uc_sram_ptr->responseParams;
  559. if (uc_rsp.params.originalCmdOp ==
  560. ipa3_ctx->uc_ctx.pending_cmd) {
  561. ipa3_ctx->uc_ctx.uc_status =
  562. uc_rsp.params.status;
  563. break;
  564. }
  565. }
  566. usleep_range(IPA_UC_POLL_SLEEP_USEC,
  567. IPA_UC_POLL_SLEEP_USEC);
  568. }
  569. if (index == IPA_UC_POLL_MAX_RETRY) {
  570. IPAERR("uC max polling retries reached\n");
  571. if (ipa3_ctx->uc_ctx.uc_failed) {
  572. uc_error_type = ipa3_ctx->uc_ctx.uc_error_type;
  573. IPAERR("uC reported on Error, errorType = %s\n",
  574. ipa_hw_error_str(uc_error_type));
  575. }
  576. mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
  577. /* Unexpected UC hardware state */
  578. ipa_assert();
  579. }
  580. } else {
  581. if (wait_for_completion_timeout(&ipa3_ctx->uc_ctx.uc_completion,
  582. timeout_jiffies) == 0) {
  583. IPAERR("uC timed out\n");
  584. if (ipa3_ctx->uc_ctx.uc_failed) {
  585. uc_error_type = ipa3_ctx->uc_ctx.uc_error_type;
  586. IPAERR("uC reported on Error, errorType = %s\n",
  587. ipa_hw_error_str(uc_error_type));
  588. }
  589. mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
  590. /* Unexpected UC hardware state */
  591. ipa_assert();
  592. }
  593. }
  594. if (ipa3_ctx->uc_ctx.uc_status != expected_status) {
  595. if (ipa3_ctx->uc_ctx.uc_status ==
  596. IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE ||
  597. ipa3_ctx->uc_ctx.uc_status ==
  598. IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE ||
  599. ipa3_ctx->uc_ctx.uc_status ==
  600. IPA_HW_CONS_STOP_FAILURE ||
  601. ipa3_ctx->uc_ctx.uc_status ==
  602. IPA_HW_PROD_STOP_FAILURE) {
  603. retries++;
  604. if (retries == IPA_GSI_CHANNEL_STOP_MAX_RETRY) {
  605. IPAERR("Failed after %d tries\n", retries);
  606. mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
  607. /* Unexpected UC hardware state */
  608. ipa_assert();
  609. }
  610. mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
  611. if (ipa3_ctx->uc_ctx.uc_status ==
  612. IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE)
  613. ipa3_inject_dma_task_for_gsi();
  614. /* sleep for short period to flush IPA */
  615. usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
  616. IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
  617. goto send_cmd_lock;
  618. }
  619. if (ipa3_ctx->uc_ctx.uc_status ==
  620. IPA_HW_GSI_CH_NOT_EMPTY_FAILURE) {
  621. retries++;
  622. if (retries >= IPA_GSI_CHANNEL_EMPTY_MAX_RETRY) {
  623. IPAERR("Failed after %d tries\n", retries);
  624. mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
  625. return -EFAULT;
  626. }
  627. usleep_range(
  628. IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC,
  629. IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC);
  630. goto send_cmd;
  631. }
  632. IPAERR("Recevied status %u, Expected status %u\n",
  633. ipa3_ctx->uc_ctx.uc_status, expected_status);
  634. mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
  635. return -EFAULT;
  636. }
  637. mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
  638. IPADBG("uC cmd %u send succeeded\n", opcode);
  639. return 0;
  640. }
  641. /**
  642. * ipa3_uc_interface_init() - Initialize the interface with the uC
  643. *
  644. * Return value: 0 on success, negative value otherwise
  645. */
  646. int ipa3_uc_interface_init(void)
  647. {
  648. int result;
  649. unsigned long phys_addr;
  650. if (ipa3_ctx->uc_ctx.uc_inited) {
  651. IPADBG("uC interface already initialized\n");
  652. return 0;
  653. }
  654. mutex_init(&ipa3_ctx->uc_ctx.uc_lock);
  655. spin_lock_init(&ipa3_ctx->uc_ctx.uc_spinlock);
  656. phys_addr = ipa3_ctx->ipa_wrapper_base +
  657. ipa3_ctx->ctrl->ipa_reg_base_ofst +
  658. ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, 0);
  659. ipa3_ctx->uc_ctx.uc_sram_mmio = ioremap(phys_addr,
  660. IPA_RAM_UC_SMEM_SIZE);
  661. if (!ipa3_ctx->uc_ctx.uc_sram_mmio) {
  662. IPAERR("Fail to ioremap IPA uC SRAM\n");
  663. result = -ENOMEM;
  664. goto remap_fail;
  665. }
  666. result = ipa3_add_interrupt_handler(IPA_UC_IRQ_0,
  667. ipa3_uc_event_handler, true,
  668. ipa3_ctx);
  669. if (result) {
  670. IPAERR("Fail to register for UC_IRQ0 event interrupt\n");
  671. result = -EFAULT;
  672. goto irq_fail0;
  673. }
  674. result = ipa3_add_interrupt_handler(IPA_UC_IRQ_1,
  675. ipa3_uc_response_hdlr, true,
  676. ipa3_ctx);
  677. if (result) {
  678. IPAERR("fail to register for UC_IRQ1 rsp interrupt\n");
  679. result = -EFAULT;
  680. goto irq_fail1;
  681. }
  682. result = ipa3_add_interrupt_handler(IPA_UC_IRQ_2,
  683. ipa3_uc_wigig_misc_int_handler, true,
  684. ipa3_ctx);
  685. if (result) {
  686. IPAERR("fail to register for UC_IRQ2 wigig misc interrupt\n");
  687. result = -EFAULT;
  688. goto irq_fail2;
  689. }
  690. ipa3_ctx->uc_ctx.uc_inited = true;
  691. IPADBG("IPA uC interface is initialized\n");
  692. return 0;
  693. irq_fail2:
  694. ipa3_remove_interrupt_handler(IPA_UC_IRQ_1);
  695. irq_fail1:
  696. ipa3_remove_interrupt_handler(IPA_UC_IRQ_0);
  697. irq_fail0:
  698. iounmap(ipa3_ctx->uc_ctx.uc_sram_mmio);
  699. remap_fail:
  700. return result;
  701. }
  702. /**
  703. * ipa3_uc_send_cmd() - Send a command to the uC
  704. *
  705. * Note1: This function sends command with 32bit parameter and do not
  706. * use the higher 32bit of the command parameter (set to zero).
  707. *
  708. * Note2: In case the operation times out (No response from the uC) or
  709. * polling maximal amount of retries has reached, the logic
  710. * considers it as an invalid state of the uC/IPA, and
  711. * issues a kernel panic.
  712. *
  713. * Returns: 0 on success.
  714. * -EINVAL in case of invalid input.
  715. * -EBADF in case uC interface is not initialized /
  716. * or the uC has failed previously.
  717. * -EFAULT in case the received status doesn't match
  718. * the expected.
  719. */
  720. int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
  721. bool polling_mode, unsigned long timeout_jiffies)
  722. {
  723. return ipa3_uc_send_cmd_64b_param(cmd, 0, opcode,
  724. expected_status, polling_mode, timeout_jiffies);
  725. }
  726. /**
  727. * ipa3_uc_register_handlers() - Registers event, response and log event
  728. * handlers for a specific feature.Please note
  729. * that currently only one handler can be
  730. * registered per feature.
  731. *
  732. * Return value: None
  733. */
  734. void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
  735. struct ipa3_uc_hdlrs *hdlrs)
  736. {
  737. if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
  738. IPAERR("Feature %u is invalid, not registering hdlrs\n",
  739. feature);
  740. return;
  741. }
  742. mutex_lock(&ipa3_ctx->uc_ctx.uc_lock);
  743. ipa3_uc_hdlrs[feature] = *hdlrs;
  744. mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);
  745. IPADBG("uC handlers registered for feature %u\n", feature);
  746. }
  747. int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client)
  748. {
  749. const struct ipa_gsi_ep_config *gsi_ep_info;
  750. union IpaHwChkChEmptyCmdData_t cmd;
  751. int ret;
  752. gsi_ep_info = ipa3_get_gsi_ep_info(ipa_client);
  753. if (!gsi_ep_info) {
  754. IPAERR("Failed getting GSI EP info for client=%d\n",
  755. ipa_client);
  756. return 0;
  757. }
  758. if (ipa3_uc_state_check()) {
  759. IPADBG("uC cannot be used to validate ch emptiness clnt=%d\n"
  760. , ipa_client);
  761. return 0;
  762. }
  763. cmd.params.ee_n = gsi_ep_info->ee;
  764. cmd.params.vir_ch_id = gsi_ep_info->ipa_gsi_chan_num;
  765. IPADBG("uC emptiness check for IPA GSI Channel %d\n",
  766. gsi_ep_info->ipa_gsi_chan_num);
  767. ret = ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_GSI_CH_EMPTY, 0,
  768. false, 10*HZ);
  769. return ret;
  770. }
  771. /**
  772. * ipa3_uc_notify_clk_state() - notify to uC of clock enable / disable
  773. * @enabled: true if clock are enabled
  774. *
  775. * The function uses the uC interface in order to notify uC before IPA clocks
  776. * are disabled to make sure uC is not in the middle of operation.
  777. * Also after clocks are enabled ned to notify uC to start processing.
  778. *
  779. * Returns: 0 on success, negative on failure
  780. */
  781. int ipa3_uc_notify_clk_state(bool enabled)
  782. {
  783. u32 opcode;
  784. if (ipa3_ctx->ipa_hw_type > IPA_HW_v4_0) {
  785. IPADBG_LOW("not supported past IPA v4.0\n");
  786. return 0;
  787. }
  788. /*
  789. * If the uC interface has not been initialized yet,
  790. * don't notify the uC on the enable/disable
  791. */
  792. if (ipa3_uc_state_check()) {
  793. IPADBG("uC interface will not notify the UC on clock state\n");
  794. return 0;
  795. }
  796. IPADBG("uC clock %s notification\n", (enabled) ? "UNGATE" : "GATE");
  797. opcode = (enabled) ? IPA_CPU_2_HW_CMD_CLK_UNGATE :
  798. IPA_CPU_2_HW_CMD_CLK_GATE;
  799. return ipa3_uc_send_cmd(0, opcode, 0, true, 0);
  800. }
  801. /**
  802. * ipa3_uc_update_hw_flags() - send uC the HW flags to be used
  803. * @flags: This field is expected to be used as bitmask for enum ipa3_hw_flags
  804. *
  805. * Returns: 0 on success, negative on failure
  806. */
  807. int ipa3_uc_update_hw_flags(u32 flags)
  808. {
  809. union IpaHwUpdateFlagsCmdData_t cmd;
  810. memset(&cmd, 0, sizeof(cmd));
  811. cmd.params.newFlags = flags;
  812. return ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_UPDATE_FLAGS, 0,
  813. false, HZ);
  814. }
  815. /**
  816. * ipa3_uc_memcpy() - Perform a memcpy action using IPA uC
  817. * @dest: physical address to store the copied data.
  818. * @src: physical address of the source data to copy.
  819. * @len: number of bytes to copy.
  820. *
  821. * Returns: 0 on success, negative on failure
  822. */
  823. int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
  824. {
  825. int res;
  826. struct ipa_mem_buffer mem;
  827. struct IpaHwMemCopyData_t *cmd;
  828. IPADBG("dest 0x%pa src 0x%pa len %d\n", &dest, &src, len);
  829. mem.size = sizeof(cmd);
  830. mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
  831. GFP_KERNEL);
  832. if (!mem.base) {
  833. IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
  834. return -ENOMEM;
  835. }
  836. cmd = (struct IpaHwMemCopyData_t *)mem.base;
  837. memset(cmd, 0, sizeof(*cmd));
  838. cmd->destination_addr = dest;
  839. cmd->dest_buffer_size = len;
  840. cmd->source_addr = src;
  841. cmd->source_buffer_size = len;
  842. res = ipa3_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MEMCPY, 0,
  843. true, 10 * HZ);
  844. if (res) {
  845. IPAERR("ipa3_uc_send_cmd failed %d\n", res);
  846. goto free_coherent;
  847. }
  848. res = 0;
  849. free_coherent:
  850. dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
  851. return res;
  852. }
  853. int ipa3_uc_send_remote_ipa_info(u32 remote_addr, uint32_t mbox_n)
  854. {
  855. int res;
  856. struct ipa_mem_buffer cmd;
  857. struct IpaHwDbAddrInfo_t *uc_info;
  858. cmd.size = sizeof(*uc_info);
  859. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  860. &cmd.phys_base, GFP_KERNEL);
  861. if (cmd.base == NULL)
  862. return -ENOMEM;
  863. uc_info = (struct IpaHwDbAddrInfo_t *) cmd.base;
  864. uc_info->remoteIPAAddr = remote_addr;
  865. uc_info->mboxN = mbox_n;
  866. res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
  867. IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO, 0,
  868. false, 10 * HZ);
  869. if (res) {
  870. IPAERR("fail to map 0x%x to mbox %d\n",
  871. uc_info->remoteIPAAddr,
  872. uc_info->mboxN);
  873. goto free_coherent;
  874. }
  875. res = 0;
  876. free_coherent:
  877. dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
  878. return res;
  879. }
  880. int ipa3_uc_debug_stats_alloc(
  881. struct IpaHwOffloadStatsAllocCmdData_t cmdinfo)
  882. {
  883. int result;
  884. struct ipa_mem_buffer cmd;
  885. enum ipa_cpu_2_hw_offload_commands command;
  886. struct IpaHwOffloadStatsAllocCmdData_t *cmd_data;
  887. cmd.size = sizeof(*cmd_data);
  888. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  889. &cmd.phys_base, GFP_KERNEL);
  890. if (cmd.base == NULL) {
  891. result = -ENOMEM;
  892. return result;
  893. }
  894. cmd_data = (struct IpaHwOffloadStatsAllocCmdData_t *)cmd.base;
  895. memcpy(cmd_data, &cmdinfo,
  896. sizeof(struct IpaHwOffloadStatsAllocCmdData_t));
  897. command = IPA_CPU_2_HW_CMD_OFFLOAD_STATS_ALLOC;
  898. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  899. result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
  900. command,
  901. IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
  902. false, 10 * HZ);
  903. if (result) {
  904. IPAERR("fail to alloc offload stats\n");
  905. goto cleanup;
  906. }
  907. result = 0;
  908. cleanup:
  909. dma_free_coherent(ipa3_ctx->uc_pdev,
  910. cmd.size,
  911. cmd.base, cmd.phys_base);
  912. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  913. IPADBG("exit\n");
  914. return result;
  915. }
  916. int ipa3_uc_debug_stats_dealloc(uint32_t protocol)
  917. {
  918. int result;
  919. struct ipa_mem_buffer cmd;
  920. enum ipa_cpu_2_hw_offload_commands command;
  921. struct IpaHwOffloadStatsDeAllocCmdData_t *cmd_data;
  922. cmd.size = sizeof(*cmd_data);
  923. cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
  924. &cmd.phys_base, GFP_KERNEL);
  925. if (cmd.base == NULL) {
  926. result = -ENOMEM;
  927. return result;
  928. }
  929. cmd_data = (struct IpaHwOffloadStatsDeAllocCmdData_t *)
  930. cmd.base;
  931. cmd_data->protocol = protocol;
  932. command = IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC;
  933. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  934. result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
  935. command,
  936. IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
  937. false, 10 * HZ);
  938. if (result) {
  939. IPAERR("fail to dealloc offload stats\n");
  940. goto cleanup;
  941. }
  942. switch (protocol) {
  943. case IPA_HW_PROTOCOL_AQC:
  944. break;
  945. case IPA_HW_PROTOCOL_11ad:
  946. break;
  947. case IPA_HW_PROTOCOL_WDI:
  948. iounmap(ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio);
  949. ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
  950. break;
  951. case IPA_HW_PROTOCOL_WDI3:
  952. iounmap(ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio);
  953. ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
  954. break;
  955. case IPA_HW_PROTOCOL_ETH:
  956. break;
  957. default:
  958. IPAERR("unknown protocols %d\n", protocol);
  959. }
  960. result = 0;
  961. cleanup:
  962. dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size,
  963. cmd.base, cmd.phys_base);
  964. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  965. IPADBG("exit\n");
  966. return result;
  967. }