ef100_nic.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /****************************************************************************
  3. * Driver for Solarflare network controllers and boards
  4. * Copyright 2018 Solarflare Communications Inc.
  5. * Copyright 2019-2022 Xilinx Inc.
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as published
  9. * by the Free Software Foundation, incorporated herein by reference.
  10. */
  11. #include "ef100_nic.h"
  12. #include "efx_common.h"
  13. #include "efx_channels.h"
  14. #include "io.h"
  15. #include "selftest.h"
  16. #include "ef100_regs.h"
  17. #include "mcdi.h"
  18. #include "mcdi_pcol.h"
  19. #include "mcdi_port_common.h"
  20. #include "mcdi_functions.h"
  21. #include "mcdi_filters.h"
  22. #include "ef100_rx.h"
  23. #include "ef100_tx.h"
  24. #include "ef100_sriov.h"
  25. #include "ef100_netdev.h"
  26. #include "tc.h"
  27. #include "mae.h"
  28. #include "rx_common.h"
  29. #define EF100_MAX_VIS 4096
  30. #define EF100_NUM_MCDI_BUFFERS 1
  31. #define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
  32. #define EF100_RESET_PORT ((ETH_RESET_MAC | ETH_RESET_PHY) << ETH_RESET_SHARED_SHIFT)
  33. /* MCDI
  34. */
  35. static u8 *ef100_mcdi_buf(struct efx_nic *efx, u8 bufid, dma_addr_t *dma_addr)
  36. {
  37. struct ef100_nic_data *nic_data = efx->nic_data;
  38. if (dma_addr)
  39. *dma_addr = nic_data->mcdi_buf.dma_addr +
  40. bufid * ALIGN(MCDI_BUF_LEN, 256);
  41. return nic_data->mcdi_buf.addr + bufid * ALIGN(MCDI_BUF_LEN, 256);
  42. }
  43. static int ef100_get_warm_boot_count(struct efx_nic *efx)
  44. {
  45. efx_dword_t reg;
  46. efx_readd(efx, &reg, efx_reg(efx, ER_GZ_MC_SFT_STATUS));
  47. if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) == 0xffffffff) {
  48. netif_err(efx, hw, efx->net_dev, "Hardware unavailable\n");
  49. efx->state = STATE_DISABLED;
  50. return -ENETDOWN;
  51. } else {
  52. return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
  53. EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
  54. }
  55. }
  56. static void ef100_mcdi_request(struct efx_nic *efx,
  57. const efx_dword_t *hdr, size_t hdr_len,
  58. const efx_dword_t *sdu, size_t sdu_len)
  59. {
  60. dma_addr_t dma_addr;
  61. u8 *pdu = ef100_mcdi_buf(efx, 0, &dma_addr);
  62. memcpy(pdu, hdr, hdr_len);
  63. memcpy(pdu + hdr_len, sdu, sdu_len);
  64. wmb();
  65. /* The hardware provides 'low' and 'high' (doorbell) registers
  66. * for passing the 64-bit address of an MCDI request to
  67. * firmware. However the dwords are swapped by firmware. The
  68. * least significant bits of the doorbell are then 0 for all
  69. * MCDI requests due to alignment.
  70. */
  71. _efx_writed(efx, cpu_to_le32((u64)dma_addr >> 32), efx_reg(efx, ER_GZ_MC_DB_LWRD));
  72. _efx_writed(efx, cpu_to_le32((u32)dma_addr), efx_reg(efx, ER_GZ_MC_DB_HWRD));
  73. }
  74. static bool ef100_mcdi_poll_response(struct efx_nic *efx)
  75. {
  76. const efx_dword_t hdr =
  77. *(const efx_dword_t *)(ef100_mcdi_buf(efx, 0, NULL));
  78. rmb();
  79. return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
  80. }
  81. static void ef100_mcdi_read_response(struct efx_nic *efx,
  82. efx_dword_t *outbuf, size_t offset,
  83. size_t outlen)
  84. {
  85. const u8 *pdu = ef100_mcdi_buf(efx, 0, NULL);
  86. memcpy(outbuf, pdu + offset, outlen);
  87. }
  88. static int ef100_mcdi_poll_reboot(struct efx_nic *efx)
  89. {
  90. struct ef100_nic_data *nic_data = efx->nic_data;
  91. int rc;
  92. rc = ef100_get_warm_boot_count(efx);
  93. if (rc < 0) {
  94. /* The firmware is presumably in the process of
  95. * rebooting. However, we are supposed to report each
  96. * reboot just once, so we must only do that once we
  97. * can read and store the updated warm boot count.
  98. */
  99. return 0;
  100. }
  101. if (rc == nic_data->warm_boot_count)
  102. return 0;
  103. nic_data->warm_boot_count = rc;
  104. return -EIO;
  105. }
  106. static void ef100_mcdi_reboot_detected(struct efx_nic *efx)
  107. {
  108. }
  109. /* MCDI calls
  110. */
  111. static int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address)
  112. {
  113. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
  114. size_t outlen;
  115. int rc;
  116. BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
  117. rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
  118. outbuf, sizeof(outbuf), &outlen);
  119. if (rc)
  120. return rc;
  121. if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
  122. return -EIO;
  123. ether_addr_copy(mac_address,
  124. MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
  125. return 0;
  126. }
  127. int efx_ef100_init_datapath_caps(struct efx_nic *efx)
  128. {
  129. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V7_OUT_LEN);
  130. struct ef100_nic_data *nic_data = efx->nic_data;
  131. u8 vi_window_mode;
  132. size_t outlen;
  133. int rc;
  134. BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
  135. rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
  136. outbuf, sizeof(outbuf), &outlen);
  137. if (rc)
  138. return rc;
  139. if (outlen < MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
  140. netif_err(efx, drv, efx->net_dev,
  141. "unable to read datapath firmware capabilities\n");
  142. return -EIO;
  143. }
  144. nic_data->datapath_caps = MCDI_DWORD(outbuf,
  145. GET_CAPABILITIES_OUT_FLAGS1);
  146. nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
  147. GET_CAPABILITIES_V2_OUT_FLAGS2);
  148. if (outlen < MC_CMD_GET_CAPABILITIES_V7_OUT_LEN)
  149. nic_data->datapath_caps3 = 0;
  150. else
  151. nic_data->datapath_caps3 = MCDI_DWORD(outbuf,
  152. GET_CAPABILITIES_V7_OUT_FLAGS3);
  153. vi_window_mode = MCDI_BYTE(outbuf,
  154. GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
  155. rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode);
  156. if (rc)
  157. return rc;
  158. if (efx_ef100_has_cap(nic_data->datapath_caps2, TX_TSO_V3)) {
  159. struct net_device *net_dev = efx->net_dev;
  160. netdev_features_t tso = NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_PARTIAL |
  161. NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM |
  162. NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM;
  163. net_dev->features |= tso;
  164. net_dev->hw_features |= tso;
  165. net_dev->hw_enc_features |= tso;
  166. /* EF100 HW can only offload outer checksums if they are UDP,
  167. * so for GRE_CSUM we have to use GSO_PARTIAL.
  168. */
  169. net_dev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
  170. }
  171. efx->num_mac_stats = MCDI_WORD(outbuf,
  172. GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
  173. netif_dbg(efx, probe, efx->net_dev,
  174. "firmware reports num_mac_stats = %u\n",
  175. efx->num_mac_stats);
  176. return 0;
  177. }
  178. /* Event handling
  179. */
  180. static int ef100_ev_probe(struct efx_channel *channel)
  181. {
  182. /* Allocate an extra descriptor for the QMDA status completion entry */
  183. return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
  184. (channel->eventq_mask + 2) *
  185. sizeof(efx_qword_t),
  186. GFP_KERNEL);
  187. }
  188. static int ef100_ev_init(struct efx_channel *channel)
  189. {
  190. struct ef100_nic_data *nic_data = channel->efx->nic_data;
  191. /* initial phase is 0 */
  192. clear_bit(channel->channel, nic_data->evq_phases);
  193. return efx_mcdi_ev_init(channel, false, false);
  194. }
  195. static void ef100_ev_read_ack(struct efx_channel *channel)
  196. {
  197. efx_dword_t evq_prime;
  198. EFX_POPULATE_DWORD_2(evq_prime,
  199. ERF_GZ_EVQ_ID, channel->channel,
  200. ERF_GZ_IDX, channel->eventq_read_ptr &
  201. channel->eventq_mask);
  202. efx_writed(channel->efx, &evq_prime,
  203. efx_reg(channel->efx, ER_GZ_EVQ_INT_PRIME));
  204. }
  205. #define EFX_NAPI_MAX_TX 512
  206. static int ef100_ev_process(struct efx_channel *channel, int quota)
  207. {
  208. struct efx_nic *efx = channel->efx;
  209. struct ef100_nic_data *nic_data;
  210. bool evq_phase, old_evq_phase;
  211. unsigned int read_ptr;
  212. efx_qword_t *p_event;
  213. int spent_tx = 0;
  214. int spent = 0;
  215. bool ev_phase;
  216. int ev_type;
  217. if (unlikely(!channel->enabled))
  218. return 0;
  219. nic_data = efx->nic_data;
  220. evq_phase = test_bit(channel->channel, nic_data->evq_phases);
  221. old_evq_phase = evq_phase;
  222. read_ptr = channel->eventq_read_ptr;
  223. BUILD_BUG_ON(ESF_GZ_EV_RXPKTS_PHASE_LBN != ESF_GZ_EV_TXCMPL_PHASE_LBN);
  224. while (spent < quota) {
  225. p_event = efx_event(channel, read_ptr);
  226. ev_phase = !!EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_PHASE);
  227. if (ev_phase != evq_phase)
  228. break;
  229. netif_vdbg(efx, drv, efx->net_dev,
  230. "processing event on %d " EFX_QWORD_FMT "\n",
  231. channel->channel, EFX_QWORD_VAL(*p_event));
  232. ev_type = EFX_QWORD_FIELD(*p_event, ESF_GZ_E_TYPE);
  233. switch (ev_type) {
  234. case ESE_GZ_EF100_EV_RX_PKTS:
  235. efx_ef100_ev_rx(channel, p_event);
  236. ++spent;
  237. break;
  238. case ESE_GZ_EF100_EV_MCDI:
  239. efx_mcdi_process_event(channel, p_event);
  240. break;
  241. case ESE_GZ_EF100_EV_TX_COMPLETION:
  242. spent_tx += ef100_ev_tx(channel, p_event);
  243. if (spent_tx >= EFX_NAPI_MAX_TX)
  244. spent = quota;
  245. break;
  246. case ESE_GZ_EF100_EV_DRIVER:
  247. netif_info(efx, drv, efx->net_dev,
  248. "Driver initiated event " EFX_QWORD_FMT "\n",
  249. EFX_QWORD_VAL(*p_event));
  250. break;
  251. default:
  252. netif_info(efx, drv, efx->net_dev,
  253. "Unhandled event " EFX_QWORD_FMT "\n",
  254. EFX_QWORD_VAL(*p_event));
  255. }
  256. ++read_ptr;
  257. if ((read_ptr & channel->eventq_mask) == 0)
  258. evq_phase = !evq_phase;
  259. }
  260. channel->eventq_read_ptr = read_ptr;
  261. if (evq_phase != old_evq_phase)
  262. change_bit(channel->channel, nic_data->evq_phases);
  263. return spent;
  264. }
  265. static irqreturn_t ef100_msi_interrupt(int irq, void *dev_id)
  266. {
  267. struct efx_msi_context *context = dev_id;
  268. struct efx_nic *efx = context->efx;
  269. netif_vdbg(efx, intr, efx->net_dev,
  270. "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
  271. if (likely(READ_ONCE(efx->irq_soft_enabled))) {
  272. /* Note test interrupts */
  273. if (context->index == efx->irq_level)
  274. efx->last_irq_cpu = raw_smp_processor_id();
  275. /* Schedule processing of the channel */
  276. efx_schedule_channel_irq(efx->channel[context->index]);
  277. }
  278. return IRQ_HANDLED;
  279. }
  280. int ef100_phy_probe(struct efx_nic *efx)
  281. {
  282. struct efx_mcdi_phy_data *phy_data;
  283. int rc;
  284. /* Probe for the PHY */
  285. efx->phy_data = kzalloc(sizeof(struct efx_mcdi_phy_data), GFP_KERNEL);
  286. if (!efx->phy_data)
  287. return -ENOMEM;
  288. rc = efx_mcdi_get_phy_cfg(efx, efx->phy_data);
  289. if (rc)
  290. return rc;
  291. /* Populate driver and ethtool settings */
  292. phy_data = efx->phy_data;
  293. mcdi_to_ethtool_linkset(phy_data->media, phy_data->supported_cap,
  294. efx->link_advertising);
  295. efx->fec_config = mcdi_fec_caps_to_ethtool(phy_data->supported_cap,
  296. false);
  297. /* Default to Autonegotiated flow control if the PHY supports it */
  298. efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
  299. if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
  300. efx->wanted_fc |= EFX_FC_AUTO;
  301. efx_link_set_wanted_fc(efx, efx->wanted_fc);
  302. /* Push settings to the PHY. Failure is not fatal, the user can try to
  303. * fix it using ethtool.
  304. */
  305. rc = efx_mcdi_port_reconfigure(efx);
  306. if (rc && rc != -EPERM)
  307. netif_warn(efx, drv, efx->net_dev,
  308. "could not initialise PHY settings\n");
  309. return 0;
  310. }
  311. int ef100_filter_table_probe(struct efx_nic *efx)
  312. {
  313. return efx_mcdi_filter_table_probe(efx, true);
  314. }
  315. static int ef100_filter_table_up(struct efx_nic *efx)
  316. {
  317. int rc;
  318. down_write(&efx->filter_sem);
  319. rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
  320. if (rc)
  321. goto fail_unspec;
  322. rc = efx_mcdi_filter_add_vlan(efx, 0);
  323. if (rc)
  324. goto fail_vlan0;
  325. /* Drop the lock: we've finished altering table existence, and
  326. * filter insertion will need to take the lock for read.
  327. */
  328. up_write(&efx->filter_sem);
  329. #ifdef CONFIG_SFC_SRIOV
  330. rc = efx_tc_insert_rep_filters(efx);
  331. /* Rep filter failure is nonfatal */
  332. if (rc)
  333. netif_warn(efx, drv, efx->net_dev,
  334. "Failed to insert representor filters, rc %d\n",
  335. rc);
  336. #endif
  337. return 0;
  338. fail_vlan0:
  339. efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
  340. fail_unspec:
  341. efx_mcdi_filter_table_down(efx);
  342. up_write(&efx->filter_sem);
  343. return rc;
  344. }
  345. static void ef100_filter_table_down(struct efx_nic *efx)
  346. {
  347. #ifdef CONFIG_SFC_SRIOV
  348. efx_tc_remove_rep_filters(efx);
  349. #endif
  350. down_write(&efx->filter_sem);
  351. efx_mcdi_filter_del_vlan(efx, 0);
  352. efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
  353. efx_mcdi_filter_table_down(efx);
  354. up_write(&efx->filter_sem);
  355. }
  356. /* Other
  357. */
  358. static int ef100_reconfigure_mac(struct efx_nic *efx, bool mtu_only)
  359. {
  360. WARN_ON(!mutex_is_locked(&efx->mac_lock));
  361. efx_mcdi_filter_sync_rx_mode(efx);
  362. if (mtu_only && efx_has_cap(efx, SET_MAC_ENHANCED))
  363. return efx_mcdi_set_mtu(efx);
  364. return efx_mcdi_set_mac(efx);
  365. }
  366. static enum reset_type ef100_map_reset_reason(enum reset_type reason)
  367. {
  368. if (reason == RESET_TYPE_TX_WATCHDOG)
  369. return reason;
  370. return RESET_TYPE_DISABLE;
  371. }
  372. static int ef100_map_reset_flags(u32 *flags)
  373. {
  374. /* Only perform a RESET_TYPE_ALL because we don't support MC_REBOOTs */
  375. if ((*flags & EF100_RESET_PORT)) {
  376. *flags &= ~EF100_RESET_PORT;
  377. return RESET_TYPE_ALL;
  378. }
  379. if (*flags & ETH_RESET_MGMT) {
  380. *flags &= ~ETH_RESET_MGMT;
  381. return RESET_TYPE_DISABLE;
  382. }
  383. return -EINVAL;
  384. }
  385. static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type)
  386. {
  387. int rc;
  388. dev_close(efx->net_dev);
  389. if (reset_type == RESET_TYPE_TX_WATCHDOG) {
  390. netif_device_attach(efx->net_dev);
  391. __clear_bit(reset_type, &efx->reset_pending);
  392. rc = dev_open(efx->net_dev, NULL);
  393. } else if (reset_type == RESET_TYPE_ALL) {
  394. rc = efx_mcdi_reset(efx, reset_type);
  395. if (rc)
  396. return rc;
  397. netif_device_attach(efx->net_dev);
  398. rc = dev_open(efx->net_dev, NULL);
  399. } else {
  400. rc = 1; /* Leave the device closed */
  401. }
  402. return rc;
  403. }
  404. static void ef100_common_stat_mask(unsigned long *mask)
  405. {
  406. __set_bit(EF100_STAT_port_rx_packets, mask);
  407. __set_bit(EF100_STAT_port_tx_packets, mask);
  408. __set_bit(EF100_STAT_port_rx_bytes, mask);
  409. __set_bit(EF100_STAT_port_tx_bytes, mask);
  410. __set_bit(EF100_STAT_port_rx_multicast, mask);
  411. __set_bit(EF100_STAT_port_rx_bad, mask);
  412. __set_bit(EF100_STAT_port_rx_align_error, mask);
  413. __set_bit(EF100_STAT_port_rx_overflow, mask);
  414. }
  415. static void ef100_ethtool_stat_mask(unsigned long *mask)
  416. {
  417. __set_bit(EF100_STAT_port_tx_pause, mask);
  418. __set_bit(EF100_STAT_port_tx_unicast, mask);
  419. __set_bit(EF100_STAT_port_tx_multicast, mask);
  420. __set_bit(EF100_STAT_port_tx_broadcast, mask);
  421. __set_bit(EF100_STAT_port_tx_lt64, mask);
  422. __set_bit(EF100_STAT_port_tx_64, mask);
  423. __set_bit(EF100_STAT_port_tx_65_to_127, mask);
  424. __set_bit(EF100_STAT_port_tx_128_to_255, mask);
  425. __set_bit(EF100_STAT_port_tx_256_to_511, mask);
  426. __set_bit(EF100_STAT_port_tx_512_to_1023, mask);
  427. __set_bit(EF100_STAT_port_tx_1024_to_15xx, mask);
  428. __set_bit(EF100_STAT_port_tx_15xx_to_jumbo, mask);
  429. __set_bit(EF100_STAT_port_rx_good, mask);
  430. __set_bit(EF100_STAT_port_rx_pause, mask);
  431. __set_bit(EF100_STAT_port_rx_unicast, mask);
  432. __set_bit(EF100_STAT_port_rx_broadcast, mask);
  433. __set_bit(EF100_STAT_port_rx_lt64, mask);
  434. __set_bit(EF100_STAT_port_rx_64, mask);
  435. __set_bit(EF100_STAT_port_rx_65_to_127, mask);
  436. __set_bit(EF100_STAT_port_rx_128_to_255, mask);
  437. __set_bit(EF100_STAT_port_rx_256_to_511, mask);
  438. __set_bit(EF100_STAT_port_rx_512_to_1023, mask);
  439. __set_bit(EF100_STAT_port_rx_1024_to_15xx, mask);
  440. __set_bit(EF100_STAT_port_rx_15xx_to_jumbo, mask);
  441. __set_bit(EF100_STAT_port_rx_gtjumbo, mask);
  442. __set_bit(EF100_STAT_port_rx_bad_gtjumbo, mask);
  443. __set_bit(EF100_STAT_port_rx_length_error, mask);
  444. __set_bit(EF100_STAT_port_rx_nodesc_drops, mask);
  445. __set_bit(GENERIC_STAT_rx_nodesc_trunc, mask);
  446. __set_bit(GENERIC_STAT_rx_noskb_drops, mask);
  447. }
  448. #define EF100_DMA_STAT(ext_name, mcdi_name) \
  449. [EF100_STAT_ ## ext_name] = \
  450. { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
  451. static const struct efx_hw_stat_desc ef100_stat_desc[EF100_STAT_COUNT] = {
  452. EF100_DMA_STAT(port_tx_bytes, TX_BYTES),
  453. EF100_DMA_STAT(port_tx_packets, TX_PKTS),
  454. EF100_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
  455. EF100_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
  456. EF100_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
  457. EF100_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
  458. EF100_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
  459. EF100_DMA_STAT(port_tx_64, TX_64_PKTS),
  460. EF100_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
  461. EF100_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
  462. EF100_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
  463. EF100_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
  464. EF100_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
  465. EF100_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
  466. EF100_DMA_STAT(port_rx_bytes, RX_BYTES),
  467. EF100_DMA_STAT(port_rx_packets, RX_PKTS),
  468. EF100_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
  469. EF100_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
  470. EF100_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
  471. EF100_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
  472. EF100_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
  473. EF100_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
  474. EF100_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
  475. EF100_DMA_STAT(port_rx_64, RX_64_PKTS),
  476. EF100_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
  477. EF100_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
  478. EF100_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
  479. EF100_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
  480. EF100_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
  481. EF100_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
  482. EF100_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
  483. EF100_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
  484. EF100_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
  485. EF100_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
  486. EF100_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
  487. EF100_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
  488. EFX_GENERIC_SW_STAT(rx_nodesc_trunc),
  489. EFX_GENERIC_SW_STAT(rx_noskb_drops),
  490. };
  491. static size_t ef100_describe_stats(struct efx_nic *efx, u8 *names)
  492. {
  493. DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {};
  494. ef100_ethtool_stat_mask(mask);
  495. return efx_nic_describe_stats(ef100_stat_desc, EF100_STAT_COUNT,
  496. mask, names);
  497. }
  498. static size_t ef100_update_stats_common(struct efx_nic *efx, u64 *full_stats,
  499. struct rtnl_link_stats64 *core_stats)
  500. {
  501. struct ef100_nic_data *nic_data = efx->nic_data;
  502. DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {};
  503. size_t stats_count = 0, index;
  504. u64 *stats = nic_data->stats;
  505. ef100_ethtool_stat_mask(mask);
  506. if (full_stats) {
  507. for_each_set_bit(index, mask, EF100_STAT_COUNT) {
  508. if (ef100_stat_desc[index].name) {
  509. *full_stats++ = stats[index];
  510. ++stats_count;
  511. }
  512. }
  513. }
  514. if (!core_stats)
  515. return stats_count;
  516. core_stats->rx_packets = stats[EF100_STAT_port_rx_packets];
  517. core_stats->tx_packets = stats[EF100_STAT_port_tx_packets];
  518. core_stats->rx_bytes = stats[EF100_STAT_port_rx_bytes];
  519. core_stats->tx_bytes = stats[EF100_STAT_port_tx_bytes];
  520. core_stats->rx_dropped = stats[EF100_STAT_port_rx_nodesc_drops] +
  521. stats[GENERIC_STAT_rx_nodesc_trunc] +
  522. stats[GENERIC_STAT_rx_noskb_drops];
  523. core_stats->multicast = stats[EF100_STAT_port_rx_multicast];
  524. core_stats->rx_length_errors =
  525. stats[EF100_STAT_port_rx_gtjumbo] +
  526. stats[EF100_STAT_port_rx_length_error];
  527. core_stats->rx_crc_errors = stats[EF100_STAT_port_rx_bad];
  528. core_stats->rx_frame_errors =
  529. stats[EF100_STAT_port_rx_align_error];
  530. core_stats->rx_fifo_errors = stats[EF100_STAT_port_rx_overflow];
  531. core_stats->rx_errors = (core_stats->rx_length_errors +
  532. core_stats->rx_crc_errors +
  533. core_stats->rx_frame_errors);
  534. return stats_count;
  535. }
  536. static size_t ef100_update_stats(struct efx_nic *efx,
  537. u64 *full_stats,
  538. struct rtnl_link_stats64 *core_stats)
  539. {
  540. __le64 *mc_stats = kmalloc(array_size(efx->num_mac_stats, sizeof(__le64)), GFP_ATOMIC);
  541. struct ef100_nic_data *nic_data = efx->nic_data;
  542. DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {};
  543. u64 *stats = nic_data->stats;
  544. ef100_common_stat_mask(mask);
  545. ef100_ethtool_stat_mask(mask);
  546. if (!mc_stats)
  547. return 0;
  548. efx_nic_copy_stats(efx, mc_stats);
  549. efx_nic_update_stats(ef100_stat_desc, EF100_STAT_COUNT, mask,
  550. stats, mc_stats, false);
  551. kfree(mc_stats);
  552. return ef100_update_stats_common(efx, full_stats, core_stats);
  553. }
  554. static int efx_ef100_get_phys_port_id(struct efx_nic *efx,
  555. struct netdev_phys_item_id *ppid)
  556. {
  557. struct ef100_nic_data *nic_data = efx->nic_data;
  558. if (!is_valid_ether_addr(nic_data->port_id))
  559. return -EOPNOTSUPP;
  560. ppid->id_len = ETH_ALEN;
  561. memcpy(ppid->id, nic_data->port_id, ppid->id_len);
  562. return 0;
  563. }
  564. static int efx_ef100_irq_test_generate(struct efx_nic *efx)
  565. {
  566. MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
  567. BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
  568. MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
  569. return efx_mcdi_rpc_quiet(efx, MC_CMD_TRIGGER_INTERRUPT,
  570. inbuf, sizeof(inbuf), NULL, 0, NULL);
  571. }
  572. #define EFX_EF100_TEST 1
  573. static void efx_ef100_ev_test_generate(struct efx_channel *channel)
  574. {
  575. MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
  576. struct efx_nic *efx = channel->efx;
  577. efx_qword_t event;
  578. int rc;
  579. EFX_POPULATE_QWORD_2(event,
  580. ESF_GZ_E_TYPE, ESE_GZ_EF100_EV_DRIVER,
  581. ESF_GZ_DRIVER_DATA, EFX_EF100_TEST);
  582. MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
  583. /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
  584. * already swapped the data to little-endian order.
  585. */
  586. memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
  587. sizeof(efx_qword_t));
  588. rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
  589. NULL, 0, NULL);
  590. if (rc && (rc != -ENETDOWN))
  591. goto fail;
  592. return;
  593. fail:
  594. WARN_ON(true);
  595. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  596. }
  597. static unsigned int ef100_check_caps(const struct efx_nic *efx,
  598. u8 flag, u32 offset)
  599. {
  600. const struct ef100_nic_data *nic_data = efx->nic_data;
  601. switch (offset) {
  602. case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST:
  603. return nic_data->datapath_caps & BIT_ULL(flag);
  604. case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST:
  605. return nic_data->datapath_caps2 & BIT_ULL(flag);
  606. case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST:
  607. return nic_data->datapath_caps3 & BIT_ULL(flag);
  608. default:
  609. return 0;
  610. }
  611. }
  612. static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx)
  613. {
  614. /* Maximum link speed for Riverhead is 100G */
  615. return 10 * EFX_RECYCLE_RING_SIZE_10G;
  616. }
  617. #ifdef CONFIG_SFC_SRIOV
  618. static int efx_ef100_get_base_mport(struct efx_nic *efx)
  619. {
  620. struct ef100_nic_data *nic_data = efx->nic_data;
  621. u32 selector, id;
  622. int rc;
  623. /* Construct mport selector for "physical network port" */
  624. efx_mae_mport_wire(efx, &selector);
  625. /* Look up actual mport ID */
  626. rc = efx_mae_lookup_mport(efx, selector, &id);
  627. if (rc)
  628. return rc;
  629. /* The ID should always fit in 16 bits, because that's how wide the
  630. * corresponding fields in the RX prefix & TX override descriptor are
  631. */
  632. if (id >> 16)
  633. netif_warn(efx, probe, efx->net_dev, "Bad base m-port id %#x\n",
  634. id);
  635. nic_data->base_mport = id;
  636. nic_data->have_mport = true;
  637. return 0;
  638. }
  639. #endif
  640. static int compare_versions(const char *a, const char *b)
  641. {
  642. int a_major, a_minor, a_point, a_patch;
  643. int b_major, b_minor, b_point, b_patch;
  644. int a_matched, b_matched;
  645. a_matched = sscanf(a, "%d.%d.%d.%d", &a_major, &a_minor, &a_point, &a_patch);
  646. b_matched = sscanf(b, "%d.%d.%d.%d", &b_major, &b_minor, &b_point, &b_patch);
  647. if (a_matched == 4 && b_matched != 4)
  648. return +1;
  649. if (a_matched != 4 && b_matched == 4)
  650. return -1;
  651. if (a_matched != 4 && b_matched != 4)
  652. return 0;
  653. if (a_major != b_major)
  654. return a_major - b_major;
  655. if (a_minor != b_minor)
  656. return a_minor - b_minor;
  657. if (a_point != b_point)
  658. return a_point - b_point;
  659. return a_patch - b_patch;
  660. }
  661. enum ef100_tlv_state_machine {
  662. EF100_TLV_TYPE,
  663. EF100_TLV_TYPE_CONT,
  664. EF100_TLV_LENGTH,
  665. EF100_TLV_VALUE
  666. };
  667. struct ef100_tlv_state {
  668. enum ef100_tlv_state_machine state;
  669. u64 value;
  670. u32 value_offset;
  671. u16 type;
  672. u8 len;
  673. };
  674. static int ef100_tlv_feed(struct ef100_tlv_state *state, u8 byte)
  675. {
  676. switch (state->state) {
  677. case EF100_TLV_TYPE:
  678. state->type = byte & 0x7f;
  679. state->state = (byte & 0x80) ? EF100_TLV_TYPE_CONT
  680. : EF100_TLV_LENGTH;
  681. /* Clear ready to read in a new entry */
  682. state->value = 0;
  683. state->value_offset = 0;
  684. return 0;
  685. case EF100_TLV_TYPE_CONT:
  686. state->type |= byte << 7;
  687. state->state = EF100_TLV_LENGTH;
  688. return 0;
  689. case EF100_TLV_LENGTH:
  690. state->len = byte;
  691. /* We only handle TLVs that fit in a u64 */
  692. if (state->len > sizeof(state->value))
  693. return -EOPNOTSUPP;
  694. /* len may be zero, implying a value of zero */
  695. state->state = state->len ? EF100_TLV_VALUE : EF100_TLV_TYPE;
  696. return 0;
  697. case EF100_TLV_VALUE:
  698. state->value |= ((u64)byte) << (state->value_offset * 8);
  699. state->value_offset++;
  700. if (state->value_offset >= state->len)
  701. state->state = EF100_TLV_TYPE;
  702. return 0;
  703. default: /* state machine error, can't happen */
  704. WARN_ON_ONCE(1);
  705. return -EIO;
  706. }
  707. }
  708. static int ef100_process_design_param(struct efx_nic *efx,
  709. const struct ef100_tlv_state *reader)
  710. {
  711. struct ef100_nic_data *nic_data = efx->nic_data;
  712. switch (reader->type) {
  713. case ESE_EF100_DP_GZ_PAD: /* padding, skip it */
  714. return 0;
  715. case ESE_EF100_DP_GZ_PARTIAL_TSTAMP_SUB_NANO_BITS:
  716. /* Driver doesn't support timestamping yet, so we don't care */
  717. return 0;
  718. case ESE_EF100_DP_GZ_EVQ_UNSOL_CREDIT_SEQ_BITS:
  719. /* Driver doesn't support unsolicited-event credits yet, so
  720. * we don't care
  721. */
  722. return 0;
  723. case ESE_EF100_DP_GZ_NMMU_GROUP_SIZE:
  724. /* Driver doesn't manage the NMMU (so we don't care) */
  725. return 0;
  726. case ESE_EF100_DP_GZ_RX_L4_CSUM_PROTOCOLS:
  727. /* Driver uses CHECKSUM_COMPLETE, so we don't care about
  728. * protocol checksum validation
  729. */
  730. return 0;
  731. case ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN:
  732. nic_data->tso_max_hdr_len = min_t(u64, reader->value, 0xffff);
  733. return 0;
  734. case ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS:
  735. /* We always put HDR_NUM_SEGS=1 in our TSO descriptors */
  736. if (!reader->value) {
  737. netif_err(efx, probe, efx->net_dev,
  738. "TSO_MAX_HDR_NUM_SEGS < 1\n");
  739. return -EOPNOTSUPP;
  740. }
  741. return 0;
  742. case ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY:
  743. case ESE_EF100_DP_GZ_TXQ_SIZE_GRANULARITY:
  744. /* Our TXQ and RXQ sizes are always power-of-two and thus divisible by
  745. * EFX_MIN_DMAQ_SIZE, so we just need to check that
  746. * EFX_MIN_DMAQ_SIZE is divisible by GRANULARITY.
  747. * This is very unlikely to fail.
  748. */
  749. if (!reader->value || reader->value > EFX_MIN_DMAQ_SIZE ||
  750. EFX_MIN_DMAQ_SIZE % (u32)reader->value) {
  751. netif_err(efx, probe, efx->net_dev,
  752. "%s size granularity is %llu, can't guarantee safety\n",
  753. reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ",
  754. reader->value);
  755. return -EOPNOTSUPP;
  756. }
  757. return 0;
  758. case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN:
  759. nic_data->tso_max_payload_len = min_t(u64, reader->value,
  760. GSO_LEGACY_MAX_SIZE);
  761. netif_set_tso_max_size(efx->net_dev,
  762. nic_data->tso_max_payload_len);
  763. return 0;
  764. case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS:
  765. nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff);
  766. netif_set_tso_max_segs(efx->net_dev,
  767. nic_data->tso_max_payload_num_segs);
  768. return 0;
  769. case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES:
  770. nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff);
  771. return 0;
  772. case ESE_EF100_DP_GZ_COMPAT:
  773. if (reader->value) {
  774. netif_err(efx, probe, efx->net_dev,
  775. "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n",
  776. reader->value);
  777. return -EOPNOTSUPP;
  778. }
  779. return 0;
  780. case ESE_EF100_DP_GZ_MEM2MEM_MAX_LEN:
  781. /* Driver doesn't use mem2mem transfers */
  782. return 0;
  783. case ESE_EF100_DP_GZ_EVQ_TIMER_TICK_NANOS:
  784. /* Driver doesn't currently use EVQ_TIMER */
  785. return 0;
  786. case ESE_EF100_DP_GZ_NMMU_PAGE_SIZES:
  787. /* Driver doesn't manage the NMMU (so we don't care) */
  788. return 0;
  789. case ESE_EF100_DP_GZ_VI_STRIDES:
  790. /* We never try to set the VI stride, and we don't rely on
  791. * being able to find VIs past VI 0 until after we've learned
  792. * the current stride from MC_CMD_GET_CAPABILITIES.
  793. * So the value of this shouldn't matter.
  794. */
  795. if (reader->value != ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT)
  796. netif_dbg(efx, probe, efx->net_dev,
  797. "NIC has other than default VI_STRIDES (mask "
  798. "%#llx), early probing might use wrong one\n",
  799. reader->value);
  800. return 0;
  801. case ESE_EF100_DP_GZ_RX_MAX_RUNT:
  802. /* Driver doesn't look at L2_STATUS:LEN_ERR bit, so we don't
  803. * care whether it indicates runt or overlength for any given
  804. * packet, so we don't care about this parameter.
  805. */
  806. return 0;
  807. default:
  808. /* Host interface says "Drivers should ignore design parameters
  809. * that they do not recognise."
  810. */
  811. netif_dbg(efx, probe, efx->net_dev,
  812. "Ignoring unrecognised design parameter %u\n",
  813. reader->type);
  814. return 0;
  815. }
  816. }
  817. static int ef100_check_design_params(struct efx_nic *efx)
  818. {
  819. struct ef100_tlv_state reader = {};
  820. u32 total_len, offset = 0;
  821. efx_dword_t reg;
  822. int rc = 0, i;
  823. u32 data;
  824. efx_readd(efx, &reg, ER_GZ_PARAMS_TLV_LEN);
  825. total_len = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
  826. pci_dbg(efx->pci_dev, "%u bytes of design parameters\n", total_len);
  827. while (offset < total_len) {
  828. efx_readd(efx, &reg, ER_GZ_PARAMS_TLV + offset);
  829. data = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
  830. for (i = 0; i < sizeof(data); i++) {
  831. rc = ef100_tlv_feed(&reader, data);
  832. /* Got a complete value? */
  833. if (!rc && reader.state == EF100_TLV_TYPE)
  834. rc = ef100_process_design_param(efx, &reader);
  835. if (rc)
  836. goto out;
  837. data >>= 8;
  838. offset++;
  839. }
  840. }
  841. /* Check we didn't end halfway through a TLV entry, which could either
  842. * mean that the TLV stream is truncated or just that it's corrupted
  843. * and our state machine is out of sync.
  844. */
  845. if (reader.state != EF100_TLV_TYPE) {
  846. if (reader.state == EF100_TLV_TYPE_CONT)
  847. netif_err(efx, probe, efx->net_dev,
  848. "truncated design parameter (incomplete type %u)\n",
  849. reader.type);
  850. else
  851. netif_err(efx, probe, efx->net_dev,
  852. "truncated design parameter %u\n",
  853. reader.type);
  854. rc = -EIO;
  855. }
  856. out:
  857. return rc;
  858. }
  859. /* NIC probe and remove
  860. */
  861. static int ef100_probe_main(struct efx_nic *efx)
  862. {
  863. unsigned int bar_size = resource_size(&efx->pci_dev->resource[efx->mem_bar]);
  864. struct ef100_nic_data *nic_data;
  865. char fw_version[32];
  866. u32 priv_mask = 0;
  867. int i, rc;
  868. if (WARN_ON(bar_size == 0))
  869. return -EIO;
  870. nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
  871. if (!nic_data)
  872. return -ENOMEM;
  873. efx->nic_data = nic_data;
  874. nic_data->efx = efx;
  875. efx->max_vis = EF100_MAX_VIS;
  876. /* Populate design-parameter defaults */
  877. nic_data->tso_max_hdr_len = ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT;
  878. nic_data->tso_max_frames = ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT;
  879. nic_data->tso_max_payload_num_segs = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT;
  880. nic_data->tso_max_payload_len = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT;
  881. /* Read design parameters */
  882. rc = ef100_check_design_params(efx);
  883. if (rc) {
  884. pci_err(efx->pci_dev, "Unsupported design parameters\n");
  885. goto fail;
  886. }
  887. /* we assume later that we can copy from this buffer in dwords */
  888. BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
  889. /* MCDI buffers must be 256 byte aligned. */
  890. rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, MCDI_BUF_LEN,
  891. GFP_KERNEL);
  892. if (rc)
  893. goto fail;
  894. /* Get the MC's warm boot count. In case it's rebooting right
  895. * now, be prepared to retry.
  896. */
  897. i = 0;
  898. for (;;) {
  899. rc = ef100_get_warm_boot_count(efx);
  900. if (rc >= 0)
  901. break;
  902. if (++i == 5)
  903. goto fail;
  904. ssleep(1);
  905. }
  906. nic_data->warm_boot_count = rc;
  907. /* In case we're recovering from a crash (kexec), we want to
  908. * cancel any outstanding request by the previous user of this
  909. * function. We send a special message using the least
  910. * significant bits of the 'high' (doorbell) register.
  911. */
  912. _efx_writed(efx, cpu_to_le32(1), efx_reg(efx, ER_GZ_MC_DB_HWRD));
  913. /* Post-IO section. */
  914. rc = efx_mcdi_init(efx);
  915. if (rc)
  916. goto fail;
  917. /* Reset (most) configuration for this function */
  918. rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
  919. if (rc)
  920. goto fail;
  921. /* Enable event logging */
  922. rc = efx_mcdi_log_ctrl(efx, true, false, 0);
  923. if (rc)
  924. goto fail;
  925. rc = efx_get_pf_index(efx, &nic_data->pf_index);
  926. if (rc)
  927. goto fail;
  928. rc = efx_mcdi_port_get_number(efx);
  929. if (rc < 0)
  930. goto fail;
  931. efx->port_num = rc;
  932. efx_mcdi_print_fwver(efx, fw_version, sizeof(fw_version));
  933. pci_dbg(efx->pci_dev, "Firmware version %s\n", fw_version);
  934. rc = efx_mcdi_get_privilege_mask(efx, &priv_mask);
  935. if (rc) /* non-fatal, and priv_mask will still be 0 */
  936. pci_info(efx->pci_dev,
  937. "Failed to get privilege mask from FW, rc %d\n", rc);
  938. nic_data->grp_mae = !!(priv_mask & MC_CMD_PRIVILEGE_MASK_IN_GRP_MAE);
  939. if (compare_versions(fw_version, "1.1.0.1000") < 0) {
  940. pci_info(efx->pci_dev, "Firmware uses old event descriptors\n");
  941. rc = -EINVAL;
  942. goto fail;
  943. }
  944. if (efx_has_cap(efx, UNSOL_EV_CREDIT_SUPPORTED)) {
  945. pci_info(efx->pci_dev, "Firmware uses unsolicited-event credits\n");
  946. rc = -EINVAL;
  947. goto fail;
  948. }
  949. return 0;
  950. fail:
  951. return rc;
  952. }
  953. int ef100_probe_netdev_pf(struct efx_nic *efx)
  954. {
  955. struct ef100_nic_data *nic_data = efx->nic_data;
  956. struct net_device *net_dev = efx->net_dev;
  957. int rc;
  958. rc = ef100_get_mac_address(efx, net_dev->perm_addr);
  959. if (rc)
  960. goto fail;
  961. /* Assign MAC address */
  962. eth_hw_addr_set(net_dev, net_dev->perm_addr);
  963. memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
  964. if (!nic_data->grp_mae)
  965. return 0;
  966. #ifdef CONFIG_SFC_SRIOV
  967. rc = efx_init_struct_tc(efx);
  968. if (rc)
  969. return rc;
  970. rc = efx_ef100_get_base_mport(efx);
  971. if (rc) {
  972. netif_warn(efx, probe, net_dev,
  973. "Failed to probe base mport rc %d; representors will not function\n",
  974. rc);
  975. }
  976. rc = efx_init_tc(efx);
  977. if (rc) {
  978. /* Either we don't have an MAE at all (i.e. legacy v-switching),
  979. * or we do but we failed to probe it. In the latter case, we
  980. * may not have set up default rules, in which case we won't be
  981. * able to pass any traffic. However, we don't fail the probe,
  982. * because the user might need to use the netdevice to apply
  983. * configuration changes to fix whatever's wrong with the MAE.
  984. */
  985. netif_warn(efx, probe, net_dev, "Failed to probe MAE rc %d\n",
  986. rc);
  987. } else {
  988. net_dev->features |= NETIF_F_HW_TC;
  989. efx->fixed_features |= NETIF_F_HW_TC;
  990. }
  991. #endif
  992. return 0;
  993. fail:
  994. return rc;
  995. }
  996. int ef100_probe_vf(struct efx_nic *efx)
  997. {
  998. return ef100_probe_main(efx);
  999. }
  1000. void ef100_remove(struct efx_nic *efx)
  1001. {
  1002. struct ef100_nic_data *nic_data = efx->nic_data;
  1003. efx_mcdi_detach(efx);
  1004. efx_mcdi_fini(efx);
  1005. if (nic_data)
  1006. efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
  1007. kfree(nic_data);
  1008. efx->nic_data = NULL;
  1009. }
  1010. /* NIC level access functions
  1011. */
  1012. #define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
  1013. NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \
  1014. NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
  1015. NETIF_F_HW_VLAN_CTAG_TX)
  1016. const struct efx_nic_type ef100_pf_nic_type = {
  1017. .revision = EFX_REV_EF100,
  1018. .is_vf = false,
  1019. .probe = ef100_probe_main,
  1020. .offload_features = EF100_OFFLOAD_FEATURES,
  1021. .mcdi_max_ver = 2,
  1022. .mcdi_request = ef100_mcdi_request,
  1023. .mcdi_poll_response = ef100_mcdi_poll_response,
  1024. .mcdi_read_response = ef100_mcdi_read_response,
  1025. .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
  1026. .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
  1027. .irq_enable_master = efx_port_dummy_op_void,
  1028. .irq_test_generate = efx_ef100_irq_test_generate,
  1029. .irq_disable_non_ev = efx_port_dummy_op_void,
  1030. .push_irq_moderation = efx_channel_dummy_op_void,
  1031. .min_interrupt_mode = EFX_INT_MODE_MSIX,
  1032. .map_reset_reason = ef100_map_reset_reason,
  1033. .map_reset_flags = ef100_map_reset_flags,
  1034. .reset = ef100_reset,
  1035. .check_caps = ef100_check_caps,
  1036. .ev_probe = ef100_ev_probe,
  1037. .ev_init = ef100_ev_init,
  1038. .ev_fini = efx_mcdi_ev_fini,
  1039. .ev_remove = efx_mcdi_ev_remove,
  1040. .irq_handle_msi = ef100_msi_interrupt,
  1041. .ev_process = ef100_ev_process,
  1042. .ev_read_ack = ef100_ev_read_ack,
  1043. .ev_test_generate = efx_ef100_ev_test_generate,
  1044. .tx_probe = ef100_tx_probe,
  1045. .tx_init = ef100_tx_init,
  1046. .tx_write = ef100_tx_write,
  1047. .tx_enqueue = ef100_enqueue_skb,
  1048. .rx_probe = efx_mcdi_rx_probe,
  1049. .rx_init = efx_mcdi_rx_init,
  1050. .rx_remove = efx_mcdi_rx_remove,
  1051. .rx_write = ef100_rx_write,
  1052. .rx_packet = __ef100_rx_packet,
  1053. .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
  1054. .fini_dmaq = efx_fini_dmaq,
  1055. .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
  1056. .filter_table_probe = ef100_filter_table_up,
  1057. .filter_table_restore = efx_mcdi_filter_table_restore,
  1058. .filter_table_remove = ef100_filter_table_down,
  1059. .filter_insert = efx_mcdi_filter_insert,
  1060. .filter_remove_safe = efx_mcdi_filter_remove_safe,
  1061. .filter_get_safe = efx_mcdi_filter_get_safe,
  1062. .filter_clear_rx = efx_mcdi_filter_clear_rx,
  1063. .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
  1064. .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
  1065. .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
  1066. #ifdef CONFIG_RFS_ACCEL
  1067. .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
  1068. #endif
  1069. .get_phys_port_id = efx_ef100_get_phys_port_id,
  1070. .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
  1071. .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
  1072. .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
  1073. .rx_hash_key_size = 40,
  1074. .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
  1075. .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
  1076. .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
  1077. .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
  1078. .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
  1079. .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
  1080. .reconfigure_mac = ef100_reconfigure_mac,
  1081. .reconfigure_port = efx_mcdi_port_reconfigure,
  1082. .test_nvram = efx_new_mcdi_nvram_test_all,
  1083. .describe_stats = ef100_describe_stats,
  1084. .start_stats = efx_mcdi_mac_start_stats,
  1085. .update_stats = ef100_update_stats,
  1086. .pull_stats = efx_mcdi_mac_pull_stats,
  1087. .stop_stats = efx_mcdi_mac_stop_stats,
  1088. #ifdef CONFIG_SFC_SRIOV
  1089. .sriov_configure = efx_ef100_sriov_configure,
  1090. #endif
  1091. /* Per-type bar/size configuration not used on ef100. Location of
  1092. * registers is defined by extended capabilities.
  1093. */
  1094. .mem_bar = NULL,
  1095. .mem_map_size = NULL,
  1096. };
  1097. const struct efx_nic_type ef100_vf_nic_type = {
  1098. .revision = EFX_REV_EF100,
  1099. .is_vf = true,
  1100. .probe = ef100_probe_vf,
  1101. .offload_features = EF100_OFFLOAD_FEATURES,
  1102. .mcdi_max_ver = 2,
  1103. .mcdi_request = ef100_mcdi_request,
  1104. .mcdi_poll_response = ef100_mcdi_poll_response,
  1105. .mcdi_read_response = ef100_mcdi_read_response,
  1106. .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
  1107. .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
  1108. .irq_enable_master = efx_port_dummy_op_void,
  1109. .irq_test_generate = efx_ef100_irq_test_generate,
  1110. .irq_disable_non_ev = efx_port_dummy_op_void,
  1111. .push_irq_moderation = efx_channel_dummy_op_void,
  1112. .min_interrupt_mode = EFX_INT_MODE_MSIX,
  1113. .map_reset_reason = ef100_map_reset_reason,
  1114. .map_reset_flags = ef100_map_reset_flags,
  1115. .reset = ef100_reset,
  1116. .check_caps = ef100_check_caps,
  1117. .ev_probe = ef100_ev_probe,
  1118. .ev_init = ef100_ev_init,
  1119. .ev_fini = efx_mcdi_ev_fini,
  1120. .ev_remove = efx_mcdi_ev_remove,
  1121. .irq_handle_msi = ef100_msi_interrupt,
  1122. .ev_process = ef100_ev_process,
  1123. .ev_read_ack = ef100_ev_read_ack,
  1124. .ev_test_generate = efx_ef100_ev_test_generate,
  1125. .tx_probe = ef100_tx_probe,
  1126. .tx_init = ef100_tx_init,
  1127. .tx_write = ef100_tx_write,
  1128. .tx_enqueue = ef100_enqueue_skb,
  1129. .rx_probe = efx_mcdi_rx_probe,
  1130. .rx_init = efx_mcdi_rx_init,
  1131. .rx_remove = efx_mcdi_rx_remove,
  1132. .rx_write = ef100_rx_write,
  1133. .rx_packet = __ef100_rx_packet,
  1134. .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
  1135. .fini_dmaq = efx_fini_dmaq,
  1136. .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
  1137. .filter_table_probe = ef100_filter_table_up,
  1138. .filter_table_restore = efx_mcdi_filter_table_restore,
  1139. .filter_table_remove = ef100_filter_table_down,
  1140. .filter_insert = efx_mcdi_filter_insert,
  1141. .filter_remove_safe = efx_mcdi_filter_remove_safe,
  1142. .filter_get_safe = efx_mcdi_filter_get_safe,
  1143. .filter_clear_rx = efx_mcdi_filter_clear_rx,
  1144. .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
  1145. .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
  1146. .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
  1147. #ifdef CONFIG_RFS_ACCEL
  1148. .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
  1149. #endif
  1150. .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
  1151. .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
  1152. .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
  1153. .rx_hash_key_size = 40,
  1154. .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
  1155. .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
  1156. .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
  1157. .rx_recycle_ring_size = efx_ef100_recycle_ring_size,
  1158. .reconfigure_mac = ef100_reconfigure_mac,
  1159. .test_nvram = efx_new_mcdi_nvram_test_all,
  1160. .describe_stats = ef100_describe_stats,
  1161. .start_stats = efx_mcdi_mac_start_stats,
  1162. .update_stats = ef100_update_stats,
  1163. .pull_stats = efx_mcdi_mac_pull_stats,
  1164. .stop_stats = efx_mcdi_mac_stop_stats,
  1165. .mem_bar = NULL,
  1166. .mem_map_size = NULL,
  1167. };