mcdi_functions.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /****************************************************************************
  3. * Driver for Solarflare network controllers and boards
  4. * Copyright 2019 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include "net_driver.h"
  11. #include "efx.h"
  12. #include "nic.h"
  13. #include "mcdi_functions.h"
  14. #include "mcdi.h"
  15. #include "mcdi_pcol.h"
  16. int efx_mcdi_free_vis(struct efx_nic *efx)
  17. {
  18. MCDI_DECLARE_BUF_ERR(outbuf);
  19. size_t outlen;
  20. int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
  21. outbuf, sizeof(outbuf), &outlen);
  22. /* -EALREADY means nothing to free, so ignore */
  23. if (rc == -EALREADY)
  24. rc = 0;
  25. if (rc)
  26. efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
  27. rc);
  28. return rc;
  29. }
  30. int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis,
  31. unsigned int max_vis, unsigned int *vi_base,
  32. unsigned int *allocated_vis)
  33. {
  34. MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
  35. MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
  36. size_t outlen;
  37. int rc;
  38. MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
  39. MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
  40. rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
  41. outbuf, sizeof(outbuf), &outlen);
  42. if (rc != 0)
  43. return rc;
  44. if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
  45. return -EIO;
  46. netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
  47. MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
  48. if (vi_base)
  49. *vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
  50. if (allocated_vis)
  51. *allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
  52. return 0;
  53. }
  54. int efx_mcdi_ev_probe(struct efx_channel *channel)
  55. {
  56. return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
  57. (channel->eventq_mask + 1) *
  58. sizeof(efx_qword_t),
  59. GFP_KERNEL);
  60. }
  61. int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2)
  62. {
  63. MCDI_DECLARE_BUF(inbuf,
  64. MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
  65. EFX_BUF_SIZE));
  66. MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
  67. size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
  68. struct efx_nic *efx = channel->efx;
  69. size_t inlen, outlen;
  70. dma_addr_t dma_addr;
  71. int rc, i;
  72. /* Fill event queue with all ones (i.e. empty events) */
  73. memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
  74. MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
  75. MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
  76. /* INIT_EVQ expects index in vector table, not absolute */
  77. MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
  78. MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
  79. MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
  80. MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
  81. MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
  82. MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
  83. MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
  84. MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
  85. if (v2) {
  86. /* Use the new generic approach to specifying event queue
  87. * configuration, requesting lower latency or higher throughput.
  88. * The options that actually get used appear in the output.
  89. */
  90. MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
  91. INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
  92. INIT_EVQ_V2_IN_FLAG_TYPE,
  93. MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
  94. } else {
  95. MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
  96. INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
  97. INIT_EVQ_IN_FLAG_RX_MERGE, 1,
  98. INIT_EVQ_IN_FLAG_TX_MERGE, 1,
  99. INIT_EVQ_IN_FLAG_CUT_THRU, v1_cut_thru);
  100. }
  101. dma_addr = channel->eventq.buf.dma_addr;
  102. for (i = 0; i < entries; ++i) {
  103. MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
  104. dma_addr += EFX_BUF_SIZE;
  105. }
  106. inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
  107. rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
  108. outbuf, sizeof(outbuf), &outlen);
  109. if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
  110. netif_dbg(efx, drv, efx->net_dev,
  111. "Channel %d using event queue flags %08x\n",
  112. channel->channel,
  113. MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
  114. return rc;
  115. }
  116. void efx_mcdi_ev_remove(struct efx_channel *channel)
  117. {
  118. efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
  119. }
  120. void efx_mcdi_ev_fini(struct efx_channel *channel)
  121. {
  122. MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
  123. MCDI_DECLARE_BUF_ERR(outbuf);
  124. struct efx_nic *efx = channel->efx;
  125. size_t outlen;
  126. int rc;
  127. MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
  128. rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
  129. outbuf, sizeof(outbuf), &outlen);
  130. if (rc && rc != -EALREADY)
  131. goto fail;
  132. return;
  133. fail:
  134. efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
  135. outbuf, outlen, rc);
  136. }
  137. int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue)
  138. {
  139. MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
  140. EFX_BUF_SIZE));
  141. bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
  142. bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM;
  143. size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
  144. struct efx_channel *channel = tx_queue->channel;
  145. struct efx_nic *efx = tx_queue->efx;
  146. dma_addr_t dma_addr;
  147. size_t inlen;
  148. int rc, i;
  149. BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
  150. MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
  151. MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
  152. MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->label);
  153. MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
  154. MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
  155. MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id);
  156. dma_addr = tx_queue->txd.buf.dma_addr;
  157. netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
  158. tx_queue->queue, entries, (u64)dma_addr);
  159. for (i = 0; i < entries; ++i) {
  160. MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
  161. dma_addr += EFX_BUF_SIZE;
  162. }
  163. inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
  164. do {
  165. bool tso_v2 = tx_queue->tso_version == 2;
  166. /* TSOv2 implies IP header checksum offload for TSO frames,
  167. * so we can safely disable IP header checksum offload for
  168. * everything else. If we don't have TSOv2, then we have to
  169. * enable IP header checksum offload, which is strictly
  170. * incorrect but better than breaking TSO.
  171. */
  172. MCDI_POPULATE_DWORD_6(inbuf, INIT_TXQ_IN_FLAGS,
  173. /* This flag was removed from mcdi_pcol.h for
  174. * the non-_EXT version of INIT_TXQ. However,
  175. * firmware still honours it.
  176. */
  177. INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
  178. INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !(csum_offload && tso_v2),
  179. INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
  180. INIT_TXQ_EXT_IN_FLAG_TIMESTAMP, tx_queue->timestamping,
  181. INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN, inner_csum && !tso_v2,
  182. INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN, inner_csum);
  183. rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
  184. NULL, 0, NULL);
  185. if (rc == -ENOSPC && tso_v2) {
  186. /* Retry without TSOv2 if we're short on contexts. */
  187. tx_queue->tso_version = 0;
  188. netif_warn(efx, probe, efx->net_dev,
  189. "TSOv2 context not available to segment in "
  190. "hardware. TCP performance may be reduced.\n"
  191. );
  192. } else if (rc) {
  193. efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
  194. MC_CMD_INIT_TXQ_EXT_IN_LEN,
  195. NULL, 0, rc);
  196. goto fail;
  197. }
  198. } while (rc);
  199. return 0;
  200. fail:
  201. return rc;
  202. }
  203. void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue)
  204. {
  205. efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
  206. }
  207. void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue)
  208. {
  209. MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
  210. MCDI_DECLARE_BUF_ERR(outbuf);
  211. struct efx_nic *efx = tx_queue->efx;
  212. size_t outlen;
  213. int rc;
  214. MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
  215. tx_queue->queue);
  216. rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
  217. outbuf, sizeof(outbuf), &outlen);
  218. if (rc && rc != -EALREADY)
  219. goto fail;
  220. return;
  221. fail:
  222. efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
  223. outbuf, outlen, rc);
  224. }
  225. int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue)
  226. {
  227. return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
  228. (rx_queue->ptr_mask + 1) *
  229. sizeof(efx_qword_t),
  230. GFP_KERNEL);
  231. }
  232. void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
  233. {
  234. struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
  235. size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
  236. MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_RXQ_V4_IN_LEN);
  237. struct efx_nic *efx = rx_queue->efx;
  238. unsigned int buffer_size;
  239. dma_addr_t dma_addr;
  240. int rc;
  241. int i;
  242. BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
  243. rx_queue->scatter_n = 0;
  244. rx_queue->scatter_len = 0;
  245. if (efx->type->revision == EFX_REV_EF100)
  246. buffer_size = efx->rx_page_buf_step;
  247. else
  248. buffer_size = 0;
  249. MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
  250. MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
  251. MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
  252. MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
  253. efx_rx_queue_index(rx_queue));
  254. MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
  255. INIT_RXQ_IN_FLAG_PREFIX, 1,
  256. INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
  257. MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
  258. MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id);
  259. MCDI_SET_DWORD(inbuf, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, buffer_size);
  260. dma_addr = rx_queue->rxd.buf.dma_addr;
  261. netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
  262. efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
  263. for (i = 0; i < entries; ++i) {
  264. MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
  265. dma_addr += EFX_BUF_SIZE;
  266. }
  267. rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, sizeof(inbuf),
  268. NULL, 0, NULL);
  269. if (rc)
  270. netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
  271. efx_rx_queue_index(rx_queue));
  272. }
  273. void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue)
  274. {
  275. efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
  276. }
  277. void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue)
  278. {
  279. MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
  280. MCDI_DECLARE_BUF_ERR(outbuf);
  281. struct efx_nic *efx = rx_queue->efx;
  282. size_t outlen;
  283. int rc;
  284. MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
  285. efx_rx_queue_index(rx_queue));
  286. rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
  287. outbuf, sizeof(outbuf), &outlen);
  288. if (rc && rc != -EALREADY)
  289. goto fail;
  290. return;
  291. fail:
  292. efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
  293. outbuf, outlen, rc);
  294. }
  295. int efx_fini_dmaq(struct efx_nic *efx)
  296. {
  297. struct efx_tx_queue *tx_queue;
  298. struct efx_rx_queue *rx_queue;
  299. struct efx_channel *channel;
  300. int pending;
  301. /* If the MC has just rebooted, the TX/RX queues will have already been
  302. * torn down, but efx->active_queues needs to be set to zero.
  303. */
  304. if (efx->must_realloc_vis) {
  305. atomic_set(&efx->active_queues, 0);
  306. return 0;
  307. }
  308. /* Do not attempt to write to the NIC during EEH recovery */
  309. if (efx->state != STATE_RECOVERY) {
  310. efx_for_each_channel(channel, efx) {
  311. efx_for_each_channel_rx_queue(rx_queue, channel)
  312. efx_mcdi_rx_fini(rx_queue);
  313. efx_for_each_channel_tx_queue(tx_queue, channel)
  314. efx_mcdi_tx_fini(tx_queue);
  315. }
  316. wait_event_timeout(efx->flush_wq,
  317. atomic_read(&efx->active_queues) == 0,
  318. msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
  319. pending = atomic_read(&efx->active_queues);
  320. if (pending) {
  321. netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
  322. pending);
  323. return -ETIMEDOUT;
  324. }
  325. }
  326. return 0;
  327. }
  328. int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode)
  329. {
  330. switch (vi_window_mode) {
  331. case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
  332. efx->vi_stride = 8192;
  333. break;
  334. case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
  335. efx->vi_stride = 16384;
  336. break;
  337. case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
  338. efx->vi_stride = 65536;
  339. break;
  340. default:
  341. netif_err(efx, probe, efx->net_dev,
  342. "Unrecognised VI window mode %d\n",
  343. vi_window_mode);
  344. return -EIO;
  345. }
  346. netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
  347. efx->vi_stride);
  348. return 0;
  349. }
  350. int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index)
  351. {
  352. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
  353. size_t outlen;
  354. int rc;
  355. rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
  356. sizeof(outbuf), &outlen);
  357. if (rc)
  358. return rc;
  359. if (outlen < sizeof(outbuf))
  360. return -EIO;
  361. *pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
  362. return 0;
  363. }