bfa_ioc_cb.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  4. * Copyright (c) 2014- QLogic Corporation.
  5. * All rights reserved
  6. * www.qlogic.com
  7. *
  8. * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
  9. */
  10. #include "bfad_drv.h"
  11. #include "bfa_ioc.h"
  12. #include "bfi_reg.h"
  13. #include "bfa_defs.h"
  14. BFA_TRC_FILE(CNA, IOC_CB);
  15. #define bfa_ioc_cb_join_pos(__ioc) ((u32) (1 << BFA_IOC_CB_JOIN_SH))
  16. /*
  17. * forward declarations
  18. */
  19. static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc);
  20. static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
  21. static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
  22. static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
  23. static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
  24. static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
  25. static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
  26. static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc);
  27. static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
  28. static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
  29. static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
  30. static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc);
  31. static void bfa_ioc_cb_set_cur_ioc_fwstate(
  32. struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
  33. static enum bfi_ioc_state bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
  34. static void bfa_ioc_cb_set_alt_ioc_fwstate(
  35. struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
  36. static enum bfi_ioc_state bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
  37. static struct bfa_ioc_hwif_s hwif_cb;
  38. /*
  39. * Called from bfa_ioc_attach() to map asic specific calls.
  40. */
  41. void
  42. bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
  43. {
  44. hwif_cb.ioc_pll_init = bfa_ioc_cb_pll_init;
  45. hwif_cb.ioc_firmware_lock = bfa_ioc_cb_firmware_lock;
  46. hwif_cb.ioc_firmware_unlock = bfa_ioc_cb_firmware_unlock;
  47. hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init;
  48. hwif_cb.ioc_map_port = bfa_ioc_cb_map_port;
  49. hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
  50. hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
  51. hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
  52. hwif_cb.ioc_sync_start = bfa_ioc_cb_sync_start;
  53. hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
  54. hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
  55. hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
  56. hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete;
  57. hwif_cb.ioc_set_fwstate = bfa_ioc_cb_set_cur_ioc_fwstate;
  58. hwif_cb.ioc_get_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate;
  59. hwif_cb.ioc_set_alt_fwstate = bfa_ioc_cb_set_alt_ioc_fwstate;
  60. hwif_cb.ioc_get_alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate;
  61. ioc->ioc_hwif = &hwif_cb;
  62. }
  63. /*
  64. * Return true if firmware of current driver matches the running firmware.
  65. */
  66. static bfa_boolean_t
  67. bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
  68. {
  69. enum bfi_ioc_state alt_fwstate, cur_fwstate;
  70. struct bfi_ioc_image_hdr_s fwhdr;
  71. cur_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc);
  72. bfa_trc(ioc, cur_fwstate);
  73. alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc);
  74. bfa_trc(ioc, alt_fwstate);
  75. /*
  76. * Uninit implies this is the only driver as of now.
  77. */
  78. if (cur_fwstate == BFI_IOC_UNINIT)
  79. return BFA_TRUE;
  80. /*
  81. * Check if another driver with a different firmware is active
  82. */
  83. bfa_ioc_fwver_get(ioc, &fwhdr);
  84. if (!bfa_ioc_fwver_cmp(ioc, &fwhdr) &&
  85. alt_fwstate != BFI_IOC_DISABLED) {
  86. bfa_trc(ioc, alt_fwstate);
  87. return BFA_FALSE;
  88. }
  89. return BFA_TRUE;
  90. }
  91. static void
  92. bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
  93. {
  94. }
  95. /*
  96. * Notify other functions on HB failure.
  97. */
  98. static void
  99. bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc)
  100. {
  101. writel(~0U, ioc->ioc_regs.err_set);
  102. readl(ioc->ioc_regs.err_set);
  103. }
  104. /*
  105. * Host to LPU mailbox message addresses
  106. */
  107. static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
  108. { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
  109. { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
  110. };
  111. /*
  112. * Host <-> LPU mailbox command/status registers
  113. */
  114. static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
  115. { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
  116. { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }
  117. };
  118. static void
  119. bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
  120. {
  121. void __iomem *rb;
  122. int pcifn = bfa_ioc_pcifn(ioc);
  123. rb = bfa_ioc_bar0(ioc);
  124. ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
  125. ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
  126. ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
  127. if (ioc->port_id == 0) {
  128. ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
  129. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  130. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  131. } else {
  132. ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
  133. ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
  134. ioc->ioc_regs.alt_ioc_fwstate = (rb + BFA_IOC0_STATE_REG);
  135. }
  136. /*
  137. * Host <-> LPU mailbox command/status registers
  138. */
  139. ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
  140. ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd[pcifn].lpu;
  141. /*
  142. * PSS control registers
  143. */
  144. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  145. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  146. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
  147. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
  148. /*
  149. * IOC semaphore registers and serialization
  150. */
  151. ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
  152. ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
  153. /*
  154. * sram memory access
  155. */
  156. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  157. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
  158. /*
  159. * err set reg : for notification of hb failure
  160. */
  161. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  162. }
  163. /*
  164. * Initialize IOC to port mapping.
  165. */
  166. static void
  167. bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
  168. {
  169. /*
  170. * For crossbow, port id is same as pci function.
  171. */
  172. ioc->port_id = bfa_ioc_pcifn(ioc);
  173. bfa_trc(ioc, ioc->port_id);
  174. }
  175. /*
  176. * Set interrupt mode for a function: INTX or MSIX
  177. */
  178. static void
  179. bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
  180. {
  181. }
  182. /*
  183. * Synchronized IOC failure processing routines
  184. */
  185. static bfa_boolean_t
  186. bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc)
  187. {
  188. u32 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  189. /**
  190. * Driver load time. If the join bit is set,
  191. * it is due to an unclean exit by the driver for this
  192. * PCI fn in the previous incarnation. Whoever comes here first
  193. * should clean it up, no matter which PCI fn.
  194. */
  195. if (ioc_fwstate & BFA_IOC_CB_JOIN_MASK) {
  196. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  197. writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
  198. return BFA_TRUE;
  199. }
  200. return bfa_ioc_cb_sync_complete(ioc);
  201. }
  202. /*
  203. * Cleanup hw semaphore and usecnt registers
  204. */
  205. static void
  206. bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
  207. {
  208. /*
  209. * Read the hw sem reg to make sure that it is locked
  210. * before we clear it. If it is not locked, writing 1
  211. * will lock it instead of clearing it.
  212. */
  213. readl(ioc->ioc_regs.ioc_sem_reg);
  214. writel(1, ioc->ioc_regs.ioc_sem_reg);
  215. }
  216. /*
  217. * Synchronized IOC failure processing routines
  218. */
  219. static void
  220. bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc)
  221. {
  222. u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
  223. u32 join_pos = bfa_ioc_cb_join_pos(ioc);
  224. writel((r32 | join_pos), ioc->ioc_regs.ioc_fwstate);
  225. }
  226. static void
  227. bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc)
  228. {
  229. u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
  230. u32 join_pos = bfa_ioc_cb_join_pos(ioc);
  231. writel((r32 & ~join_pos), ioc->ioc_regs.ioc_fwstate);
  232. }
  233. static void
  234. bfa_ioc_cb_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
  235. enum bfi_ioc_state fwstate)
  236. {
  237. u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
  238. writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)),
  239. ioc->ioc_regs.ioc_fwstate);
  240. }
  241. static enum bfi_ioc_state
  242. bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
  243. {
  244. return (enum bfi_ioc_state)(readl(ioc->ioc_regs.ioc_fwstate) &
  245. BFA_IOC_CB_FWSTATE_MASK);
  246. }
  247. static void
  248. bfa_ioc_cb_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
  249. enum bfi_ioc_state fwstate)
  250. {
  251. u32 r32 = readl(ioc->ioc_regs.alt_ioc_fwstate);
  252. writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)),
  253. ioc->ioc_regs.alt_ioc_fwstate);
  254. }
  255. static enum bfi_ioc_state
  256. bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
  257. {
  258. return (enum bfi_ioc_state)(readl(ioc->ioc_regs.alt_ioc_fwstate) &
  259. BFA_IOC_CB_FWSTATE_MASK);
  260. }
  261. static void
  262. bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc)
  263. {
  264. bfa_ioc_cb_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
  265. }
  266. static bfa_boolean_t
  267. bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
  268. {
  269. u32 fwstate, alt_fwstate;
  270. fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc);
  271. /*
  272. * At this point, this IOC is hoding the hw sem in the
  273. * start path (fwcheck) OR in the disable/enable path
  274. * OR to check if the other IOC has acknowledged failure.
  275. *
  276. * So, this IOC can be in UNINIT, INITING, DISABLED, FAIL
  277. * or in MEMTEST states. In a normal scenario, this IOC
  278. * can not be in OP state when this function is called.
  279. *
  280. * However, this IOC could still be in OP state when
  281. * the OS driver is starting up, if the OptROM code has
  282. * left it in that state.
  283. *
  284. * If we had marked this IOC's fwstate as BFI_IOC_FAIL
  285. * in the failure case and now, if the fwstate is not
  286. * BFI_IOC_FAIL it implies that the other PCI fn have
  287. * reinitialized the ASIC or this IOC got disabled, so
  288. * return TRUE.
  289. */
  290. if (fwstate == BFI_IOC_UNINIT ||
  291. fwstate == BFI_IOC_INITING ||
  292. fwstate == BFI_IOC_DISABLED ||
  293. fwstate == BFI_IOC_MEMTEST ||
  294. fwstate == BFI_IOC_OP)
  295. return BFA_TRUE;
  296. else {
  297. alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc);
  298. if (alt_fwstate == BFI_IOC_FAIL ||
  299. alt_fwstate == BFI_IOC_DISABLED ||
  300. alt_fwstate == BFI_IOC_UNINIT ||
  301. alt_fwstate == BFI_IOC_INITING ||
  302. alt_fwstate == BFI_IOC_MEMTEST)
  303. return BFA_TRUE;
  304. else
  305. return BFA_FALSE;
  306. }
  307. }
  308. bfa_status_t
  309. bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode)
  310. {
  311. u32 pll_sclk, pll_fclk, join_bits;
  312. pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN |
  313. __APP_PLL_SCLK_P0_1(3U) |
  314. __APP_PLL_SCLK_JITLMT0_1(3U) |
  315. __APP_PLL_SCLK_CNTLMT0_1(3U);
  316. pll_fclk = __APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN |
  317. __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
  318. __APP_PLL_LCLK_JITLMT0_1(3U) |
  319. __APP_PLL_LCLK_CNTLMT0_1(3U);
  320. join_bits = readl(rb + BFA_IOC0_STATE_REG) &
  321. BFA_IOC_CB_JOIN_MASK;
  322. writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC0_STATE_REG));
  323. join_bits = readl(rb + BFA_IOC1_STATE_REG) &
  324. BFA_IOC_CB_JOIN_MASK;
  325. writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC1_STATE_REG));
  326. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  327. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  328. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  329. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  330. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  331. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  332. writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
  333. writel(__APP_PLL_SCLK_BYPASS | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
  334. rb + APP_PLL_SCLK_CTL_REG);
  335. writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
  336. writel(__APP_PLL_LCLK_BYPASS | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
  337. rb + APP_PLL_LCLK_CTL_REG);
  338. udelay(2);
  339. writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
  340. writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
  341. writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
  342. rb + APP_PLL_SCLK_CTL_REG);
  343. writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
  344. rb + APP_PLL_LCLK_CTL_REG);
  345. udelay(2000);
  346. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  347. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  348. writel(pll_sclk, (rb + APP_PLL_SCLK_CTL_REG));
  349. writel(pll_fclk, (rb + APP_PLL_LCLK_CTL_REG));
  350. return BFA_STATUS_OK;
  351. }