bfa_ioc_ct.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  4. * Copyright (c) 2014- QLogic Corporation.
  5. * All rights reserved
  6. * www.qlogic.com
  7. *
  8. * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
  9. */
  10. #include "bfad_drv.h"
  11. #include "bfa_ioc.h"
  12. #include "bfi_reg.h"
  13. #include "bfa_defs.h"
  14. BFA_TRC_FILE(CNA, IOC_CT);
  15. #define bfa_ioc_ct_sync_pos(__ioc) \
  16. ((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
  17. #define BFA_IOC_SYNC_REQD_SH 16
  18. #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
  19. #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
  20. #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
  21. #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
  22. (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
  23. /*
  24. * forward declarations
  25. */
  26. static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
  27. static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
  28. static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
  29. static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
  30. static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
  31. static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
  32. static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
  33. static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
  34. static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
  35. static void bfa_ioc_ct_set_cur_ioc_fwstate(
  36. struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
  37. static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
  38. static void bfa_ioc_ct_set_alt_ioc_fwstate(
  39. struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
  40. static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
  41. static struct bfa_ioc_hwif_s hwif_ct;
  42. static struct bfa_ioc_hwif_s hwif_ct2;
  43. /*
  44. * Return true if firmware of current driver matches the running firmware.
  45. */
  46. static bfa_boolean_t
  47. bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
  48. {
  49. enum bfi_ioc_state ioc_fwstate;
  50. u32 usecnt;
  51. struct bfi_ioc_image_hdr_s fwhdr;
  52. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  53. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  54. /*
  55. * If usage count is 0, always return TRUE.
  56. */
  57. if (usecnt == 0) {
  58. writel(1, ioc->ioc_regs.ioc_usage_reg);
  59. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  60. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  61. writel(0, ioc->ioc_regs.ioc_fail_sync);
  62. bfa_trc(ioc, usecnt);
  63. return BFA_TRUE;
  64. }
  65. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  66. bfa_trc(ioc, ioc_fwstate);
  67. /*
  68. * Use count cannot be non-zero and chip in uninitialized state.
  69. */
  70. WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
  71. /*
  72. * Check if another driver with a different firmware is active
  73. */
  74. bfa_ioc_fwver_get(ioc, &fwhdr);
  75. if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
  76. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  77. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  78. bfa_trc(ioc, usecnt);
  79. return BFA_FALSE;
  80. }
  81. /*
  82. * Same firmware version. Increment the reference count.
  83. */
  84. usecnt++;
  85. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  86. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  87. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  88. bfa_trc(ioc, usecnt);
  89. return BFA_TRUE;
  90. }
  91. static void
  92. bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
  93. {
  94. u32 usecnt;
  95. /*
  96. * decrement usage count
  97. */
  98. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  99. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  100. WARN_ON(usecnt <= 0);
  101. usecnt--;
  102. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  103. bfa_trc(ioc, usecnt);
  104. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  105. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  106. }
  107. /*
  108. * Notify other functions on HB failure.
  109. */
  110. static void
  111. bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
  112. {
  113. if (bfa_ioc_is_cna(ioc)) {
  114. writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
  115. writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
  116. /* Wait for halt to take effect */
  117. readl(ioc->ioc_regs.ll_halt);
  118. readl(ioc->ioc_regs.alt_ll_halt);
  119. } else {
  120. writel(~0U, ioc->ioc_regs.err_set);
  121. readl(ioc->ioc_regs.err_set);
  122. }
  123. }
  124. /*
  125. * Host to LPU mailbox message addresses
  126. */
  127. static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
  128. { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
  129. { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
  130. { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
  131. { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
  132. };
  133. /*
  134. * Host <-> LPU mailbox command/status registers - port 0
  135. */
  136. static struct { u32 hfn, lpu; } ct_p0reg[] = {
  137. { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
  138. { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
  139. { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
  140. { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
  141. };
  142. /*
  143. * Host <-> LPU mailbox command/status registers - port 1
  144. */
  145. static struct { u32 hfn, lpu; } ct_p1reg[] = {
  146. { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
  147. { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
  148. { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
  149. { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
  150. };
  151. static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
  152. ct2_reg[] = {
  153. { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  154. CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
  155. CT2_HOSTFN_LPU0_READ_STAT},
  156. { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  157. CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
  158. CT2_HOSTFN_LPU1_READ_STAT},
  159. };
  160. static void
  161. bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
  162. {
  163. void __iomem *rb;
  164. int pcifn = bfa_ioc_pcifn(ioc);
  165. rb = bfa_ioc_bar0(ioc);
  166. ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
  167. ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
  168. ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
  169. if (ioc->port_id == 0) {
  170. ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
  171. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  172. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  173. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
  174. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
  175. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  176. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  177. } else {
  178. ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
  179. ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
  180. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  181. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
  182. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
  183. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  184. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  185. }
  186. /*
  187. * PSS control registers
  188. */
  189. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  190. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  191. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
  192. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
  193. /*
  194. * IOC semaphore registers and serialization
  195. */
  196. ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
  197. ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
  198. ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
  199. ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
  200. ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
  201. /*
  202. * sram memory access
  203. */
  204. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  205. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  206. /*
  207. * err set reg : for notification of hb failure in fcmode
  208. */
  209. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  210. }
  211. static void
  212. bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
  213. {
  214. void __iomem *rb;
  215. int port = bfa_ioc_portid(ioc);
  216. rb = bfa_ioc_bar0(ioc);
  217. ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
  218. ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
  219. ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
  220. ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
  221. ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
  222. ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
  223. if (port == 0) {
  224. ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
  225. ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  226. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
  227. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  228. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  229. } else {
  230. ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
  231. ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
  232. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  233. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  234. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  235. }
  236. /*
  237. * PSS control registers
  238. */
  239. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  240. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  241. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
  242. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
  243. /*
  244. * IOC semaphore registers and serialization
  245. */
  246. ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
  247. ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
  248. ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
  249. ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
  250. ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
  251. /*
  252. * sram memory access
  253. */
  254. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  255. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  256. /*
  257. * err set reg : for notification of hb failure in fcmode
  258. */
  259. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  260. }
  261. /*
  262. * Initialize IOC to port mapping.
  263. */
  264. #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
  265. static void
  266. bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
  267. {
  268. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  269. u32 r32;
  270. /*
  271. * For catapult, base port id on personality register and IOC type
  272. */
  273. r32 = readl(rb + FNC_PERS_REG);
  274. r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
  275. ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
  276. bfa_trc(ioc, bfa_ioc_pcifn(ioc));
  277. bfa_trc(ioc, ioc->port_id);
  278. }
  279. static void
  280. bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
  281. {
  282. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  283. u32 r32;
  284. r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
  285. ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
  286. bfa_trc(ioc, bfa_ioc_pcifn(ioc));
  287. bfa_trc(ioc, ioc->port_id);
  288. }
  289. /*
  290. * Set interrupt mode for a function: INTX or MSIX
  291. */
  292. static void
  293. bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
  294. {
  295. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  296. u32 r32, mode;
  297. r32 = readl(rb + FNC_PERS_REG);
  298. bfa_trc(ioc, r32);
  299. mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
  300. __F0_INTX_STATUS;
  301. /*
  302. * If already in desired mode, do not change anything
  303. */
  304. if ((!msix && mode) || (msix && !mode))
  305. return;
  306. if (msix)
  307. mode = __F0_INTX_STATUS_MSIX;
  308. else
  309. mode = __F0_INTX_STATUS_INTA;
  310. r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  311. r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  312. bfa_trc(ioc, r32);
  313. writel(r32, rb + FNC_PERS_REG);
  314. }
  315. static bfa_boolean_t
  316. bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
  317. {
  318. u32 r32;
  319. r32 = readl(ioc->ioc_regs.lpu_read_stat);
  320. if (r32) {
  321. writel(1, ioc->ioc_regs.lpu_read_stat);
  322. return BFA_TRUE;
  323. }
  324. return BFA_FALSE;
  325. }
  326. /*
  327. * Cleanup hw semaphore and usecnt registers
  328. */
  329. static void
  330. bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
  331. {
  332. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  333. writel(0, ioc->ioc_regs.ioc_usage_reg);
  334. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  335. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  336. writel(0, ioc->ioc_regs.ioc_fail_sync);
  337. /*
  338. * Read the hw sem reg to make sure that it is locked
  339. * before we clear it. If it is not locked, writing 1
  340. * will lock it instead of clearing it.
  341. */
  342. readl(ioc->ioc_regs.ioc_sem_reg);
  343. writel(1, ioc->ioc_regs.ioc_sem_reg);
  344. }
  345. static bfa_boolean_t
  346. bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
  347. {
  348. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  349. uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  350. /*
  351. * Driver load time. If the sync required bit for this PCI fn
  352. * is set, it is due to an unclean exit by the driver for this
  353. * PCI fn in the previous incarnation. Whoever comes here first
  354. * should clean it up, no matter which PCI fn.
  355. */
  356. if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
  357. writel(0, ioc->ioc_regs.ioc_fail_sync);
  358. writel(1, ioc->ioc_regs.ioc_usage_reg);
  359. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  360. writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
  361. return BFA_TRUE;
  362. }
  363. return bfa_ioc_ct_sync_complete(ioc);
  364. }
  365. /*
  366. * Synchronized IOC failure processing routines
  367. */
  368. static void
  369. bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
  370. {
  371. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  372. uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
  373. writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
  374. }
  375. static void
  376. bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
  377. {
  378. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  379. uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
  380. bfa_ioc_ct_sync_pos(ioc);
  381. writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
  382. }
  383. static void
  384. bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
  385. {
  386. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  387. writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
  388. ioc->ioc_regs.ioc_fail_sync);
  389. }
  390. static bfa_boolean_t
  391. bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
  392. {
  393. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  394. uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  395. uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
  396. uint32_t tmp_ackd;
  397. if (sync_ackd == 0)
  398. return BFA_TRUE;
  399. /*
  400. * The check below is to see whether any other PCI fn
  401. * has reinitialized the ASIC (reset sync_ackd bits)
  402. * and failed again while this IOC was waiting for hw
  403. * semaphore (in bfa_iocpf_sm_semwait()).
  404. */
  405. tmp_ackd = sync_ackd;
  406. if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
  407. !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
  408. sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
  409. if (sync_reqd == sync_ackd) {
  410. writel(bfa_ioc_ct_clear_sync_ackd(r32),
  411. ioc->ioc_regs.ioc_fail_sync);
  412. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  413. writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
  414. return BFA_TRUE;
  415. }
  416. /*
  417. * If another PCI fn reinitialized and failed again while
  418. * this IOC was waiting for hw sem, the sync_ackd bit for
  419. * this IOC need to be set again to allow reinitialization.
  420. */
  421. if (tmp_ackd != sync_ackd)
  422. writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
  423. return BFA_FALSE;
  424. }
  425. /*
  426. * Called from bfa_ioc_attach() to map asic specific calls.
  427. */
  428. static void
  429. bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
  430. {
  431. hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
  432. hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
  433. hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
  434. hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
  435. hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
  436. hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
  437. hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
  438. hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
  439. hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
  440. hwif->ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate;
  441. hwif->ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate;
  442. hwif->ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate;
  443. hwif->ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate;
  444. }
  445. /*
  446. * Called from bfa_ioc_attach() to map asic specific calls.
  447. */
  448. void
  449. bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
  450. {
  451. bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
  452. hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
  453. hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
  454. hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
  455. hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
  456. ioc->ioc_hwif = &hwif_ct;
  457. }
  458. /*
  459. * Called from bfa_ioc_attach() to map asic specific calls.
  460. */
  461. void
  462. bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
  463. {
  464. bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
  465. hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
  466. hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
  467. hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
  468. hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
  469. hwif_ct2.ioc_isr_mode_set = NULL;
  470. ioc->ioc_hwif = &hwif_ct2;
  471. }
  472. /*
  473. * Workaround for MSI-X resource allocation for catapult-2 with no asic block
  474. */
  475. #define HOSTFN_MSIX_DEFAULT 64
  476. #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
  477. #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
  478. #define __MSIX_VT_NUMVT__MK 0x003ff800
  479. #define __MSIX_VT_NUMVT__SH 11
  480. #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
  481. #define __MSIX_VT_OFST_ 0x000007ff
  482. void
  483. bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
  484. {
  485. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  486. u32 r32;
  487. r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  488. if (r32 & __MSIX_VT_NUMVT__MK) {
  489. writel(r32 & __MSIX_VT_OFST_,
  490. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  491. return;
  492. }
  493. writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
  494. HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  495. rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  496. writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  497. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  498. }
  499. bfa_status_t
  500. bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
  501. {
  502. u32 pll_sclk, pll_fclk, r32;
  503. bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
  504. pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
  505. __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
  506. __APP_PLL_SCLK_JITLMT0_1(3U) |
  507. __APP_PLL_SCLK_CNTLMT0_1(1U);
  508. pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
  509. __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
  510. __APP_PLL_LCLK_JITLMT0_1(3U) |
  511. __APP_PLL_LCLK_CNTLMT0_1(1U);
  512. if (fcmode) {
  513. writel(0, (rb + OP_MODE));
  514. writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
  515. __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
  516. } else {
  517. writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
  518. writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
  519. }
  520. writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
  521. writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
  522. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  523. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  524. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  525. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  526. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  527. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  528. writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
  529. rb + APP_PLL_SCLK_CTL_REG);
  530. writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
  531. rb + APP_PLL_LCLK_CTL_REG);
  532. writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
  533. __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
  534. writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
  535. __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
  536. readl(rb + HOSTFN0_INT_MSK);
  537. udelay(2000);
  538. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  539. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  540. writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
  541. writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
  542. if (!fcmode) {
  543. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
  544. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
  545. }
  546. r32 = readl((rb + PSS_CTL_REG));
  547. r32 &= ~__PSS_LMEM_RESET;
  548. writel(r32, (rb + PSS_CTL_REG));
  549. udelay(1000);
  550. if (!fcmode) {
  551. writel(0, (rb + PMM_1T_RESET_REG_P0));
  552. writel(0, (rb + PMM_1T_RESET_REG_P1));
  553. }
  554. writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
  555. udelay(1000);
  556. r32 = readl((rb + MBIST_STAT_REG));
  557. writel(0, (rb + MBIST_CTL_REG));
  558. return BFA_STATUS_OK;
  559. }
  560. static void
  561. bfa_ioc_ct2_sclk_init(void __iomem *rb)
  562. {
  563. u32 r32;
  564. /*
  565. * put s_clk PLL and PLL FSM in reset
  566. */
  567. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  568. r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
  569. r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
  570. __APP_PLL_SCLK_LOGIC_SOFT_RESET);
  571. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  572. /*
  573. * Ignore mode and program for the max clock (which is FC16)
  574. * Firmware/NFC will do the PLL init appropiately
  575. */
  576. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  577. r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
  578. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  579. /*
  580. * while doing PLL init dont clock gate ethernet subsystem
  581. */
  582. r32 = readl((rb + CT2_CHIP_MISC_PRG));
  583. writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
  584. r32 = readl((rb + CT2_PCIE_MISC_REG));
  585. writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
  586. /*
  587. * set sclk value
  588. */
  589. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  590. r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
  591. __APP_PLL_SCLK_CLK_DIV2);
  592. writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  593. /*
  594. * poll for s_clk lock or delay 1ms
  595. */
  596. udelay(1000);
  597. }
  598. static void
  599. bfa_ioc_ct2_lclk_init(void __iomem *rb)
  600. {
  601. u32 r32;
  602. /*
  603. * put l_clk PLL and PLL FSM in reset
  604. */
  605. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  606. r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
  607. r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
  608. __APP_PLL_LCLK_LOGIC_SOFT_RESET);
  609. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  610. /*
  611. * set LPU speed (set for FC16 which will work for other modes)
  612. */
  613. r32 = readl((rb + CT2_CHIP_MISC_PRG));
  614. writel(r32, (rb + CT2_CHIP_MISC_PRG));
  615. /*
  616. * set LPU half speed (set for FC16 which will work for other modes)
  617. */
  618. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  619. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  620. /*
  621. * set lclk for mode (set for FC16)
  622. */
  623. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  624. r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
  625. r32 |= 0x20c1731b;
  626. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  627. /*
  628. * poll for s_clk lock or delay 1ms
  629. */
  630. udelay(1000);
  631. }
  632. static void
  633. bfa_ioc_ct2_mem_init(void __iomem *rb)
  634. {
  635. u32 r32;
  636. r32 = readl((rb + PSS_CTL_REG));
  637. r32 &= ~__PSS_LMEM_RESET;
  638. writel(r32, (rb + PSS_CTL_REG));
  639. udelay(1000);
  640. writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
  641. udelay(1000);
  642. writel(0, (rb + CT2_MBIST_CTL_REG));
  643. }
  644. static void
  645. bfa_ioc_ct2_mac_reset(void __iomem *rb)
  646. {
  647. /* put port0, port1 MAC & AHB in reset */
  648. writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
  649. rb + CT2_CSI_MAC_CONTROL_REG(0));
  650. writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
  651. rb + CT2_CSI_MAC_CONTROL_REG(1));
  652. }
  653. static void
  654. bfa_ioc_ct2_enable_flash(void __iomem *rb)
  655. {
  656. u32 r32;
  657. r32 = readl((rb + PSS_GPIO_OUT_REG));
  658. writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
  659. r32 = readl((rb + PSS_GPIO_OE_REG));
  660. writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
  661. }
  662. #define CT2_NFC_MAX_DELAY 1000
  663. #define CT2_NFC_PAUSE_MAX_DELAY 4000
  664. #define CT2_NFC_VER_VALID 0x147
  665. #define CT2_NFC_STATE_RUNNING 0x20000001
  666. #define BFA_IOC_PLL_POLL 1000000
  667. static bfa_boolean_t
  668. bfa_ioc_ct2_nfc_halted(void __iomem *rb)
  669. {
  670. u32 r32;
  671. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  672. if (r32 & __NFC_CONTROLLER_HALTED)
  673. return BFA_TRUE;
  674. return BFA_FALSE;
  675. }
  676. static void
  677. bfa_ioc_ct2_nfc_halt(void __iomem *rb)
  678. {
  679. int i;
  680. writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
  681. for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
  682. if (bfa_ioc_ct2_nfc_halted(rb))
  683. break;
  684. udelay(1000);
  685. }
  686. WARN_ON(!bfa_ioc_ct2_nfc_halted(rb));
  687. }
  688. static void
  689. bfa_ioc_ct2_nfc_resume(void __iomem *rb)
  690. {
  691. u32 r32;
  692. int i;
  693. writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
  694. for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
  695. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  696. if (!(r32 & __NFC_CONTROLLER_HALTED))
  697. return;
  698. udelay(1000);
  699. }
  700. WARN_ON(1);
  701. }
  702. static void
  703. bfa_ioc_ct2_clk_reset(void __iomem *rb)
  704. {
  705. u32 r32;
  706. bfa_ioc_ct2_sclk_init(rb);
  707. bfa_ioc_ct2_lclk_init(rb);
  708. /*
  709. * release soft reset on s_clk & l_clk
  710. */
  711. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  712. writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
  713. (rb + CT2_APP_PLL_SCLK_CTL_REG));
  714. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  715. writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
  716. (rb + CT2_APP_PLL_LCLK_CTL_REG));
  717. }
  718. static void
  719. bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb)
  720. {
  721. u32 r32, i;
  722. r32 = readl((rb + PSS_CTL_REG));
  723. r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
  724. writel(r32, (rb + PSS_CTL_REG));
  725. writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG);
  726. for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
  727. r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
  728. if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
  729. break;
  730. }
  731. WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
  732. for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
  733. r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
  734. if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
  735. break;
  736. }
  737. WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
  738. r32 = readl(rb + CT2_CSI_FW_CTL_REG);
  739. WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
  740. }
  741. static void
  742. bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb)
  743. {
  744. u32 r32;
  745. int i;
  746. if (bfa_ioc_ct2_nfc_halted(rb))
  747. bfa_ioc_ct2_nfc_resume(rb);
  748. for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) {
  749. r32 = readl(rb + CT2_NFC_STS_REG);
  750. if (r32 == CT2_NFC_STATE_RUNNING)
  751. return;
  752. udelay(1000);
  753. }
  754. r32 = readl(rb + CT2_NFC_STS_REG);
  755. WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING));
  756. }
  757. bfa_status_t
  758. bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
  759. {
  760. u32 wgn, r32, nfc_ver;
  761. wgn = readl(rb + CT2_WGN_STATUS);
  762. if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
  763. /*
  764. * If flash is corrupted, enable flash explicitly
  765. */
  766. bfa_ioc_ct2_clk_reset(rb);
  767. bfa_ioc_ct2_enable_flash(rb);
  768. bfa_ioc_ct2_mac_reset(rb);
  769. bfa_ioc_ct2_clk_reset(rb);
  770. bfa_ioc_ct2_enable_flash(rb);
  771. } else {
  772. nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
  773. if ((nfc_ver >= CT2_NFC_VER_VALID) &&
  774. (wgn == (__A2T_AHB_LOAD | __WGN_READY))) {
  775. bfa_ioc_ct2_wait_till_nfc_running(rb);
  776. bfa_ioc_ct2_nfc_clk_reset(rb);
  777. } else {
  778. bfa_ioc_ct2_nfc_halt(rb);
  779. bfa_ioc_ct2_clk_reset(rb);
  780. bfa_ioc_ct2_mac_reset(rb);
  781. bfa_ioc_ct2_clk_reset(rb);
  782. }
  783. }
  784. /*
  785. * The very first PCIe DMA Read done by LPU fails with a fatal error,
  786. * when Address Translation Cache (ATC) has been enabled by system BIOS.
  787. *
  788. * Workaround:
  789. * Disable Invalidated Tag Match Enable capability by setting the bit 26
  790. * of CHIP_MISC_PRG to 0, by default it is set to 1.
  791. */
  792. r32 = readl(rb + CT2_CHIP_MISC_PRG);
  793. writel((r32 & 0xfbffffff), (rb + CT2_CHIP_MISC_PRG));
  794. /*
  795. * Mask the interrupts and clear any
  796. * pending interrupts left by BIOS/EFI
  797. */
  798. writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
  799. writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
  800. /* For first time initialization, no need to clear interrupts */
  801. r32 = readl(rb + HOST_SEM5_REG);
  802. if (r32 & 0x1) {
  803. r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
  804. if (r32 == 1) {
  805. writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
  806. readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
  807. }
  808. r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
  809. if (r32 == 1) {
  810. writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
  811. readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
  812. }
  813. }
  814. bfa_ioc_ct2_mem_init(rb);
  815. writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
  816. writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
  817. return BFA_STATUS_OK;
  818. }
  819. static void
  820. bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
  821. enum bfi_ioc_state fwstate)
  822. {
  823. writel(fwstate, ioc->ioc_regs.ioc_fwstate);
  824. }
  825. static enum bfi_ioc_state
  826. bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
  827. {
  828. return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
  829. }
  830. static void
  831. bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
  832. enum bfi_ioc_state fwstate)
  833. {
  834. writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
  835. }
  836. static enum bfi_ioc_state
  837. bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
  838. {
  839. return (enum bfi_ioc_state) readl(ioc->ioc_regs.alt_ioc_fwstate);
  840. }