ethtool_common.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /****************************************************************************
  3. * Driver for Solarflare network controllers and boards
  4. * Copyright 2019 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/netdevice.h>
  12. #include "net_driver.h"
  13. #include "mcdi.h"
  14. #include "nic.h"
  15. #include "selftest.h"
  16. #include "rx_common.h"
  17. #include "ethtool_common.h"
  18. #include "mcdi_port_common.h"
  19. struct efx_sw_stat_desc {
  20. const char *name;
  21. enum {
  22. EFX_ETHTOOL_STAT_SOURCE_nic,
  23. EFX_ETHTOOL_STAT_SOURCE_channel,
  24. EFX_ETHTOOL_STAT_SOURCE_tx_queue
  25. } source;
  26. unsigned int offset;
  27. u64 (*get_stat)(void *field); /* Reader function */
  28. };
  29. /* Initialiser for a struct efx_sw_stat_desc with type-checking */
  30. #define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
  31. get_stat_function) { \
  32. .name = #stat_name, \
  33. .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
  34. .offset = ((((field_type *) 0) == \
  35. &((struct efx_##source_name *)0)->field) ? \
  36. offsetof(struct efx_##source_name, field) : \
  37. offsetof(struct efx_##source_name, field)), \
  38. .get_stat = get_stat_function, \
  39. }
  40. static u64 efx_get_uint_stat(void *field)
  41. {
  42. return *(unsigned int *)field;
  43. }
  44. static u64 efx_get_atomic_stat(void *field)
  45. {
  46. return atomic_read((atomic_t *) field);
  47. }
  48. #define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
  49. EFX_ETHTOOL_STAT(field, nic, field, \
  50. atomic_t, efx_get_atomic_stat)
  51. #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
  52. EFX_ETHTOOL_STAT(field, channel, n_##field, \
  53. unsigned int, efx_get_uint_stat)
  54. #define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \
  55. EFX_ETHTOOL_STAT(field, channel, field, \
  56. unsigned int, efx_get_uint_stat)
  57. #define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
  58. EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
  59. unsigned int, efx_get_uint_stat)
  60. static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
  61. EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
  62. EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
  63. EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
  64. EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
  65. EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks),
  66. EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
  67. EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
  68. EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
  69. EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
  70. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
  71. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
  72. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
  73. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
  74. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
  75. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
  76. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
  77. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
  78. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
  79. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
  80. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
  81. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
  82. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
  83. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
  84. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
  85. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
  86. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mport_bad),
  87. #ifdef CONFIG_RFS_ACCEL
  88. EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
  89. EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
  90. EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed),
  91. #endif
  92. };
  93. #define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
  94. static const char efx_ethtool_priv_flags_strings[][ETH_GSTRING_LEN] = {
  95. "log-tc-errors",
  96. };
  97. #define EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS BIT(0)
  98. #define EFX_ETHTOOL_PRIV_FLAGS_COUNT ARRAY_SIZE(efx_ethtool_priv_flags_strings)
  99. void efx_ethtool_get_drvinfo(struct net_device *net_dev,
  100. struct ethtool_drvinfo *info)
  101. {
  102. struct efx_nic *efx = efx_netdev_priv(net_dev);
  103. strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  104. efx_mcdi_print_fwver(efx, info->fw_version,
  105. sizeof(info->fw_version));
  106. strscpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
  107. }
  108. u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
  109. {
  110. struct efx_nic *efx = efx_netdev_priv(net_dev);
  111. return efx->msg_enable;
  112. }
  113. void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
  114. {
  115. struct efx_nic *efx = efx_netdev_priv(net_dev);
  116. efx->msg_enable = msg_enable;
  117. }
  118. void efx_ethtool_self_test(struct net_device *net_dev,
  119. struct ethtool_test *test, u64 *data)
  120. {
  121. struct efx_nic *efx = efx_netdev_priv(net_dev);
  122. struct efx_self_tests *efx_tests;
  123. bool already_up;
  124. int rc = -ENOMEM;
  125. efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
  126. if (!efx_tests)
  127. goto fail;
  128. if (!efx_net_active(efx->state)) {
  129. rc = -EBUSY;
  130. goto out;
  131. }
  132. netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
  133. (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
  134. /* We need rx buffers and interrupts. */
  135. already_up = (efx->net_dev->flags & IFF_UP);
  136. if (!already_up) {
  137. rc = dev_open(efx->net_dev, NULL);
  138. if (rc) {
  139. netif_err(efx, drv, efx->net_dev,
  140. "failed opening device.\n");
  141. goto out;
  142. }
  143. }
  144. rc = efx_selftest(efx, efx_tests, test->flags);
  145. if (!already_up)
  146. dev_close(efx->net_dev);
  147. netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
  148. rc == 0 ? "passed" : "failed",
  149. (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
  150. out:
  151. efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
  152. kfree(efx_tests);
  153. fail:
  154. if (rc)
  155. test->flags |= ETH_TEST_FL_FAILED;
  156. }
  157. void efx_ethtool_get_pauseparam(struct net_device *net_dev,
  158. struct ethtool_pauseparam *pause)
  159. {
  160. struct efx_nic *efx = efx_netdev_priv(net_dev);
  161. pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
  162. pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
  163. pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
  164. }
  165. int efx_ethtool_set_pauseparam(struct net_device *net_dev,
  166. struct ethtool_pauseparam *pause)
  167. {
  168. struct efx_nic *efx = efx_netdev_priv(net_dev);
  169. u8 wanted_fc, old_fc;
  170. u32 old_adv;
  171. int rc = 0;
  172. mutex_lock(&efx->mac_lock);
  173. wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
  174. (pause->tx_pause ? EFX_FC_TX : 0) |
  175. (pause->autoneg ? EFX_FC_AUTO : 0));
  176. if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
  177. netif_dbg(efx, drv, efx->net_dev,
  178. "Flow control unsupported: tx ON rx OFF\n");
  179. rc = -EINVAL;
  180. goto out;
  181. }
  182. if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) {
  183. netif_dbg(efx, drv, efx->net_dev,
  184. "Autonegotiation is disabled\n");
  185. rc = -EINVAL;
  186. goto out;
  187. }
  188. /* Hook for Falcon bug 11482 workaround */
  189. if (efx->type->prepare_enable_fc_tx &&
  190. (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
  191. efx->type->prepare_enable_fc_tx(efx);
  192. old_adv = efx->link_advertising[0];
  193. old_fc = efx->wanted_fc;
  194. efx_link_set_wanted_fc(efx, wanted_fc);
  195. if (efx->link_advertising[0] != old_adv ||
  196. (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
  197. rc = efx_mcdi_port_reconfigure(efx);
  198. if (rc) {
  199. netif_err(efx, drv, efx->net_dev,
  200. "Unable to advertise requested flow "
  201. "control setting\n");
  202. goto out;
  203. }
  204. }
  205. /* Reconfigure the MAC. The PHY *may* generate a link state change event
  206. * if the user just changed the advertised capabilities, but there's no
  207. * harm doing this twice */
  208. efx_mac_reconfigure(efx, false);
  209. out:
  210. mutex_unlock(&efx->mac_lock);
  211. return rc;
  212. }
  213. /**
  214. * efx_fill_test - fill in an individual self-test entry
  215. * @test_index: Index of the test
  216. * @strings: Ethtool strings, or %NULL
  217. * @data: Ethtool test results, or %NULL
  218. * @test: Pointer to test result (used only if data != %NULL)
  219. * @unit_format: Unit name format (e.g. "chan\%d")
  220. * @unit_id: Unit id (e.g. 0 for "chan0")
  221. * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
  222. * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
  223. *
  224. * Fill in an individual self-test entry.
  225. */
  226. static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
  227. int *test, const char *unit_format, int unit_id,
  228. const char *test_format, const char *test_id)
  229. {
  230. char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
  231. /* Fill data value, if applicable */
  232. if (data)
  233. data[test_index] = *test;
  234. /* Fill string, if applicable */
  235. if (strings) {
  236. if (strchr(unit_format, '%'))
  237. snprintf(unit_str, sizeof(unit_str),
  238. unit_format, unit_id);
  239. else
  240. strcpy(unit_str, unit_format);
  241. snprintf(test_str, sizeof(test_str), test_format, test_id);
  242. snprintf(strings + test_index * ETH_GSTRING_LEN,
  243. ETH_GSTRING_LEN,
  244. "%-6s %-24s", unit_str, test_str);
  245. }
  246. }
  247. #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
  248. #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->label
  249. #define EFX_LOOPBACK_NAME(_mode, _counter) \
  250. "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
  251. /**
  252. * efx_fill_loopback_test - fill in a block of loopback self-test entries
  253. * @efx: Efx NIC
  254. * @lb_tests: Efx loopback self-test results structure
  255. * @mode: Loopback test mode
  256. * @test_index: Starting index of the test
  257. * @strings: Ethtool strings, or %NULL
  258. * @data: Ethtool test results, or %NULL
  259. *
  260. * Fill in a block of loopback self-test entries. Return new test
  261. * index.
  262. */
  263. static int efx_fill_loopback_test(struct efx_nic *efx,
  264. struct efx_loopback_self_tests *lb_tests,
  265. enum efx_loopback_mode mode,
  266. unsigned int test_index,
  267. u8 *strings, u64 *data)
  268. {
  269. struct efx_channel *channel =
  270. efx_get_channel(efx, efx->tx_channel_offset);
  271. struct efx_tx_queue *tx_queue;
  272. efx_for_each_channel_tx_queue(tx_queue, channel) {
  273. efx_fill_test(test_index++, strings, data,
  274. &lb_tests->tx_sent[tx_queue->label],
  275. EFX_TX_QUEUE_NAME(tx_queue),
  276. EFX_LOOPBACK_NAME(mode, "tx_sent"));
  277. efx_fill_test(test_index++, strings, data,
  278. &lb_tests->tx_done[tx_queue->label],
  279. EFX_TX_QUEUE_NAME(tx_queue),
  280. EFX_LOOPBACK_NAME(mode, "tx_done"));
  281. }
  282. efx_fill_test(test_index++, strings, data,
  283. &lb_tests->rx_good,
  284. "rx", 0,
  285. EFX_LOOPBACK_NAME(mode, "rx_good"));
  286. efx_fill_test(test_index++, strings, data,
  287. &lb_tests->rx_bad,
  288. "rx", 0,
  289. EFX_LOOPBACK_NAME(mode, "rx_bad"));
  290. return test_index;
  291. }
  292. /**
  293. * efx_ethtool_fill_self_tests - get self-test details
  294. * @efx: Efx NIC
  295. * @tests: Efx self-test results structure, or %NULL
  296. * @strings: Ethtool strings, or %NULL
  297. * @data: Ethtool test results, or %NULL
  298. *
  299. * Get self-test number of strings, strings, and/or test results.
  300. * Return number of strings (== number of test results).
  301. *
  302. * The reason for merging these three functions is to make sure that
  303. * they can never be inconsistent.
  304. */
  305. int efx_ethtool_fill_self_tests(struct efx_nic *efx,
  306. struct efx_self_tests *tests,
  307. u8 *strings, u64 *data)
  308. {
  309. struct efx_channel *channel;
  310. unsigned int n = 0, i;
  311. enum efx_loopback_mode mode;
  312. efx_fill_test(n++, strings, data, &tests->phy_alive,
  313. "phy", 0, "alive", NULL);
  314. efx_fill_test(n++, strings, data, &tests->nvram,
  315. "core", 0, "nvram", NULL);
  316. efx_fill_test(n++, strings, data, &tests->interrupt,
  317. "core", 0, "interrupt", NULL);
  318. /* Event queues */
  319. efx_for_each_channel(channel, efx) {
  320. efx_fill_test(n++, strings, data,
  321. &tests->eventq_dma[channel->channel],
  322. EFX_CHANNEL_NAME(channel),
  323. "eventq.dma", NULL);
  324. efx_fill_test(n++, strings, data,
  325. &tests->eventq_int[channel->channel],
  326. EFX_CHANNEL_NAME(channel),
  327. "eventq.int", NULL);
  328. }
  329. efx_fill_test(n++, strings, data, &tests->memory,
  330. "core", 0, "memory", NULL);
  331. efx_fill_test(n++, strings, data, &tests->registers,
  332. "core", 0, "registers", NULL);
  333. for (i = 0; true; ++i) {
  334. const char *name;
  335. EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
  336. name = efx_mcdi_phy_test_name(efx, i);
  337. if (name == NULL)
  338. break;
  339. efx_fill_test(n++, strings, data, &tests->phy_ext[i], "phy", 0, name, NULL);
  340. }
  341. /* Loopback tests */
  342. for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
  343. if (!(efx->loopback_modes & (1 << mode)))
  344. continue;
  345. n = efx_fill_loopback_test(efx,
  346. &tests->loopback[mode], mode, n,
  347. strings, data);
  348. }
  349. return n;
  350. }
  351. static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
  352. {
  353. size_t n_stats = 0;
  354. struct efx_channel *channel;
  355. efx_for_each_channel(channel, efx) {
  356. if (efx_channel_has_tx_queues(channel)) {
  357. n_stats++;
  358. if (strings != NULL) {
  359. snprintf(strings, ETH_GSTRING_LEN,
  360. "tx-%u.tx_packets",
  361. channel->tx_queue[0].queue /
  362. EFX_MAX_TXQ_PER_CHANNEL);
  363. strings += ETH_GSTRING_LEN;
  364. }
  365. }
  366. }
  367. efx_for_each_channel(channel, efx) {
  368. if (efx_channel_has_rx_queue(channel)) {
  369. n_stats++;
  370. if (strings != NULL) {
  371. snprintf(strings, ETH_GSTRING_LEN,
  372. "rx-%d.rx_packets", channel->channel);
  373. strings += ETH_GSTRING_LEN;
  374. }
  375. }
  376. }
  377. if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
  378. unsigned short xdp;
  379. for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
  380. n_stats++;
  381. if (strings) {
  382. snprintf(strings, ETH_GSTRING_LEN,
  383. "tx-xdp-cpu-%hu.tx_packets", xdp);
  384. strings += ETH_GSTRING_LEN;
  385. }
  386. }
  387. }
  388. return n_stats;
  389. }
  390. int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
  391. {
  392. struct efx_nic *efx = efx_netdev_priv(net_dev);
  393. switch (string_set) {
  394. case ETH_SS_STATS:
  395. return efx->type->describe_stats(efx, NULL) +
  396. EFX_ETHTOOL_SW_STAT_COUNT +
  397. efx_describe_per_queue_stats(efx, NULL) +
  398. efx_ptp_describe_stats(efx, NULL);
  399. case ETH_SS_TEST:
  400. return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
  401. case ETH_SS_PRIV_FLAGS:
  402. return EFX_ETHTOOL_PRIV_FLAGS_COUNT;
  403. default:
  404. return -EINVAL;
  405. }
  406. }
  407. void efx_ethtool_get_strings(struct net_device *net_dev,
  408. u32 string_set, u8 *strings)
  409. {
  410. struct efx_nic *efx = efx_netdev_priv(net_dev);
  411. int i;
  412. switch (string_set) {
  413. case ETH_SS_STATS:
  414. strings += (efx->type->describe_stats(efx, strings) *
  415. ETH_GSTRING_LEN);
  416. for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
  417. strscpy(strings + i * ETH_GSTRING_LEN,
  418. efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
  419. strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
  420. strings += (efx_describe_per_queue_stats(efx, strings) *
  421. ETH_GSTRING_LEN);
  422. efx_ptp_describe_stats(efx, strings);
  423. break;
  424. case ETH_SS_TEST:
  425. efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
  426. break;
  427. case ETH_SS_PRIV_FLAGS:
  428. for (i = 0; i < EFX_ETHTOOL_PRIV_FLAGS_COUNT; i++)
  429. strscpy(strings + i * ETH_GSTRING_LEN,
  430. efx_ethtool_priv_flags_strings[i],
  431. ETH_GSTRING_LEN);
  432. break;
  433. default:
  434. /* No other string sets */
  435. break;
  436. }
  437. }
  438. u32 efx_ethtool_get_priv_flags(struct net_device *net_dev)
  439. {
  440. struct efx_nic *efx = efx_netdev_priv(net_dev);
  441. u32 ret_flags = 0;
  442. if (efx->log_tc_errs)
  443. ret_flags |= EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS;
  444. return ret_flags;
  445. }
  446. int efx_ethtool_set_priv_flags(struct net_device *net_dev, u32 flags)
  447. {
  448. struct efx_nic *efx = efx_netdev_priv(net_dev);
  449. efx->log_tc_errs =
  450. !!(flags & EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS);
  451. return 0;
  452. }
  453. void efx_ethtool_get_stats(struct net_device *net_dev,
  454. struct ethtool_stats *stats,
  455. u64 *data)
  456. {
  457. struct efx_nic *efx = efx_netdev_priv(net_dev);
  458. const struct efx_sw_stat_desc *stat;
  459. struct efx_channel *channel;
  460. struct efx_tx_queue *tx_queue;
  461. struct efx_rx_queue *rx_queue;
  462. int i;
  463. spin_lock_bh(&efx->stats_lock);
  464. /* Get NIC statistics */
  465. data += efx->type->update_stats(efx, data, NULL);
  466. /* Get software statistics */
  467. for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
  468. stat = &efx_sw_stat_desc[i];
  469. switch (stat->source) {
  470. case EFX_ETHTOOL_STAT_SOURCE_nic:
  471. data[i] = stat->get_stat((void *)efx + stat->offset);
  472. break;
  473. case EFX_ETHTOOL_STAT_SOURCE_channel:
  474. data[i] = 0;
  475. efx_for_each_channel(channel, efx)
  476. data[i] += stat->get_stat((void *)channel +
  477. stat->offset);
  478. break;
  479. case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
  480. data[i] = 0;
  481. efx_for_each_channel(channel, efx) {
  482. efx_for_each_channel_tx_queue(tx_queue, channel)
  483. data[i] +=
  484. stat->get_stat((void *)tx_queue
  485. + stat->offset);
  486. }
  487. break;
  488. }
  489. }
  490. data += EFX_ETHTOOL_SW_STAT_COUNT;
  491. spin_unlock_bh(&efx->stats_lock);
  492. efx_for_each_channel(channel, efx) {
  493. if (efx_channel_has_tx_queues(channel)) {
  494. *data = 0;
  495. efx_for_each_channel_tx_queue(tx_queue, channel) {
  496. *data += tx_queue->tx_packets;
  497. }
  498. data++;
  499. }
  500. }
  501. efx_for_each_channel(channel, efx) {
  502. if (efx_channel_has_rx_queue(channel)) {
  503. *data = 0;
  504. efx_for_each_channel_rx_queue(rx_queue, channel) {
  505. *data += rx_queue->rx_packets;
  506. }
  507. data++;
  508. }
  509. }
  510. if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
  511. int xdp;
  512. for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
  513. data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
  514. data++;
  515. }
  516. }
  517. efx_ptp_update_stats(efx, data);
  518. }
  519. /* This must be called with rtnl_lock held. */
  520. int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
  521. struct ethtool_link_ksettings *cmd)
  522. {
  523. struct efx_nic *efx = efx_netdev_priv(net_dev);
  524. struct efx_link_state *link_state = &efx->link_state;
  525. mutex_lock(&efx->mac_lock);
  526. efx_mcdi_phy_get_link_ksettings(efx, cmd);
  527. mutex_unlock(&efx->mac_lock);
  528. /* Both MACs support pause frames (bidirectional and respond-only) */
  529. ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
  530. ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
  531. if (LOOPBACK_INTERNAL(efx)) {
  532. cmd->base.speed = link_state->speed;
  533. cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
  534. }
  535. return 0;
  536. }
  537. /* This must be called with rtnl_lock held. */
  538. int efx_ethtool_set_link_ksettings(struct net_device *net_dev,
  539. const struct ethtool_link_ksettings *cmd)
  540. {
  541. struct efx_nic *efx = efx_netdev_priv(net_dev);
  542. int rc;
  543. /* GMAC does not support 1000Mbps HD */
  544. if ((cmd->base.speed == SPEED_1000) &&
  545. (cmd->base.duplex != DUPLEX_FULL)) {
  546. netif_dbg(efx, drv, efx->net_dev,
  547. "rejecting unsupported 1000Mbps HD setting\n");
  548. return -EINVAL;
  549. }
  550. mutex_lock(&efx->mac_lock);
  551. rc = efx_mcdi_phy_set_link_ksettings(efx, cmd);
  552. mutex_unlock(&efx->mac_lock);
  553. return rc;
  554. }
  555. int efx_ethtool_get_fecparam(struct net_device *net_dev,
  556. struct ethtool_fecparam *fecparam)
  557. {
  558. struct efx_nic *efx = efx_netdev_priv(net_dev);
  559. int rc;
  560. mutex_lock(&efx->mac_lock);
  561. rc = efx_mcdi_phy_get_fecparam(efx, fecparam);
  562. mutex_unlock(&efx->mac_lock);
  563. return rc;
  564. }
  565. int efx_ethtool_set_fecparam(struct net_device *net_dev,
  566. struct ethtool_fecparam *fecparam)
  567. {
  568. struct efx_nic *efx = efx_netdev_priv(net_dev);
  569. int rc;
  570. mutex_lock(&efx->mac_lock);
  571. rc = efx_mcdi_phy_set_fecparam(efx, fecparam);
  572. mutex_unlock(&efx->mac_lock);
  573. return rc;
  574. }
  575. /* MAC address mask including only I/G bit */
  576. static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
  577. #define IP4_ADDR_FULL_MASK ((__force __be32)~0)
  578. #define IP_PROTO_FULL_MASK 0xFF
  579. #define PORT_FULL_MASK ((__force __be16)~0)
  580. #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
  581. static inline void ip6_fill_mask(__be32 *mask)
  582. {
  583. mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0;
  584. }
  585. static int efx_ethtool_get_class_rule(struct efx_nic *efx,
  586. struct ethtool_rx_flow_spec *rule,
  587. u32 *rss_context)
  588. {
  589. struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
  590. struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
  591. struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
  592. struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
  593. struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
  594. struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
  595. struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
  596. struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
  597. struct ethhdr *mac_entry = &rule->h_u.ether_spec;
  598. struct ethhdr *mac_mask = &rule->m_u.ether_spec;
  599. struct efx_filter_spec spec;
  600. int rc;
  601. rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
  602. rule->location, &spec);
  603. if (rc)
  604. return rc;
  605. if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
  606. rule->ring_cookie = RX_CLS_FLOW_DISC;
  607. else
  608. rule->ring_cookie = spec.dmaq_id;
  609. if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
  610. spec.ether_type == htons(ETH_P_IP) &&
  611. (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
  612. (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
  613. !(spec.match_flags &
  614. ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
  615. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
  616. EFX_FILTER_MATCH_IP_PROTO |
  617. EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
  618. rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
  619. TCP_V4_FLOW : UDP_V4_FLOW);
  620. if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
  621. ip_entry->ip4dst = spec.loc_host[0];
  622. ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
  623. }
  624. if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
  625. ip_entry->ip4src = spec.rem_host[0];
  626. ip_mask->ip4src = IP4_ADDR_FULL_MASK;
  627. }
  628. if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
  629. ip_entry->pdst = spec.loc_port;
  630. ip_mask->pdst = PORT_FULL_MASK;
  631. }
  632. if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
  633. ip_entry->psrc = spec.rem_port;
  634. ip_mask->psrc = PORT_FULL_MASK;
  635. }
  636. } else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
  637. spec.ether_type == htons(ETH_P_IPV6) &&
  638. (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
  639. (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
  640. !(spec.match_flags &
  641. ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
  642. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
  643. EFX_FILTER_MATCH_IP_PROTO |
  644. EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
  645. rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
  646. TCP_V6_FLOW : UDP_V6_FLOW);
  647. if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
  648. memcpy(ip6_entry->ip6dst, spec.loc_host,
  649. sizeof(ip6_entry->ip6dst));
  650. ip6_fill_mask(ip6_mask->ip6dst);
  651. }
  652. if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
  653. memcpy(ip6_entry->ip6src, spec.rem_host,
  654. sizeof(ip6_entry->ip6src));
  655. ip6_fill_mask(ip6_mask->ip6src);
  656. }
  657. if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
  658. ip6_entry->pdst = spec.loc_port;
  659. ip6_mask->pdst = PORT_FULL_MASK;
  660. }
  661. if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
  662. ip6_entry->psrc = spec.rem_port;
  663. ip6_mask->psrc = PORT_FULL_MASK;
  664. }
  665. } else if (!(spec.match_flags &
  666. ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
  667. EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
  668. EFX_FILTER_MATCH_OUTER_VID))) {
  669. rule->flow_type = ETHER_FLOW;
  670. if (spec.match_flags &
  671. (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
  672. ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
  673. if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
  674. eth_broadcast_addr(mac_mask->h_dest);
  675. else
  676. ether_addr_copy(mac_mask->h_dest,
  677. mac_addr_ig_mask);
  678. }
  679. if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
  680. ether_addr_copy(mac_entry->h_source, spec.rem_mac);
  681. eth_broadcast_addr(mac_mask->h_source);
  682. }
  683. if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
  684. mac_entry->h_proto = spec.ether_type;
  685. mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
  686. }
  687. } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
  688. spec.ether_type == htons(ETH_P_IP) &&
  689. !(spec.match_flags &
  690. ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
  691. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
  692. EFX_FILTER_MATCH_IP_PROTO))) {
  693. rule->flow_type = IPV4_USER_FLOW;
  694. uip_entry->ip_ver = ETH_RX_NFC_IP4;
  695. if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
  696. uip_mask->proto = IP_PROTO_FULL_MASK;
  697. uip_entry->proto = spec.ip_proto;
  698. }
  699. if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
  700. uip_entry->ip4dst = spec.loc_host[0];
  701. uip_mask->ip4dst = IP4_ADDR_FULL_MASK;
  702. }
  703. if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
  704. uip_entry->ip4src = spec.rem_host[0];
  705. uip_mask->ip4src = IP4_ADDR_FULL_MASK;
  706. }
  707. } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
  708. spec.ether_type == htons(ETH_P_IPV6) &&
  709. !(spec.match_flags &
  710. ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
  711. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
  712. EFX_FILTER_MATCH_IP_PROTO))) {
  713. rule->flow_type = IPV6_USER_FLOW;
  714. if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
  715. uip6_mask->l4_proto = IP_PROTO_FULL_MASK;
  716. uip6_entry->l4_proto = spec.ip_proto;
  717. }
  718. if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
  719. memcpy(uip6_entry->ip6dst, spec.loc_host,
  720. sizeof(uip6_entry->ip6dst));
  721. ip6_fill_mask(uip6_mask->ip6dst);
  722. }
  723. if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
  724. memcpy(uip6_entry->ip6src, spec.rem_host,
  725. sizeof(uip6_entry->ip6src));
  726. ip6_fill_mask(uip6_mask->ip6src);
  727. }
  728. } else {
  729. /* The above should handle all filters that we insert */
  730. WARN_ON(1);
  731. return -EINVAL;
  732. }
  733. if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
  734. rule->flow_type |= FLOW_EXT;
  735. rule->h_ext.vlan_tci = spec.outer_vid;
  736. rule->m_ext.vlan_tci = htons(0xfff);
  737. }
  738. if (spec.flags & EFX_FILTER_FLAG_RX_RSS) {
  739. rule->flow_type |= FLOW_RSS;
  740. *rss_context = spec.rss_context;
  741. }
  742. return rc;
  743. }
  744. int efx_ethtool_get_rxnfc(struct net_device *net_dev,
  745. struct ethtool_rxnfc *info, u32 *rule_locs)
  746. {
  747. struct efx_nic *efx = efx_netdev_priv(net_dev);
  748. u32 rss_context = 0;
  749. s32 rc = 0;
  750. switch (info->cmd) {
  751. case ETHTOOL_GRXRINGS:
  752. info->data = efx->n_rx_channels;
  753. return 0;
  754. case ETHTOOL_GRXFH: {
  755. struct efx_rss_context *ctx = &efx->rss_context;
  756. __u64 data;
  757. mutex_lock(&efx->rss_lock);
  758. if (info->flow_type & FLOW_RSS && info->rss_context) {
  759. ctx = efx_find_rss_context_entry(efx, info->rss_context);
  760. if (!ctx) {
  761. rc = -ENOENT;
  762. goto out_unlock;
  763. }
  764. }
  765. data = 0;
  766. if (!efx_rss_active(ctx)) /* No RSS */
  767. goto out_setdata_unlock;
  768. switch (info->flow_type & ~FLOW_RSS) {
  769. case UDP_V4_FLOW:
  770. case UDP_V6_FLOW:
  771. if (ctx->rx_hash_udp_4tuple)
  772. data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
  773. RXH_IP_SRC | RXH_IP_DST);
  774. else
  775. data = RXH_IP_SRC | RXH_IP_DST;
  776. break;
  777. case TCP_V4_FLOW:
  778. case TCP_V6_FLOW:
  779. data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
  780. RXH_IP_SRC | RXH_IP_DST);
  781. break;
  782. case SCTP_V4_FLOW:
  783. case SCTP_V6_FLOW:
  784. case AH_ESP_V4_FLOW:
  785. case AH_ESP_V6_FLOW:
  786. case IPV4_FLOW:
  787. case IPV6_FLOW:
  788. data = RXH_IP_SRC | RXH_IP_DST;
  789. break;
  790. default:
  791. break;
  792. }
  793. out_setdata_unlock:
  794. info->data = data;
  795. out_unlock:
  796. mutex_unlock(&efx->rss_lock);
  797. return rc;
  798. }
  799. case ETHTOOL_GRXCLSRLCNT:
  800. info->data = efx_filter_get_rx_id_limit(efx);
  801. if (info->data == 0)
  802. return -EOPNOTSUPP;
  803. info->data |= RX_CLS_LOC_SPECIAL;
  804. info->rule_cnt =
  805. efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
  806. return 0;
  807. case ETHTOOL_GRXCLSRULE:
  808. if (efx_filter_get_rx_id_limit(efx) == 0)
  809. return -EOPNOTSUPP;
  810. rc = efx_ethtool_get_class_rule(efx, &info->fs, &rss_context);
  811. if (rc < 0)
  812. return rc;
  813. if (info->fs.flow_type & FLOW_RSS)
  814. info->rss_context = rss_context;
  815. return 0;
  816. case ETHTOOL_GRXCLSRLALL:
  817. info->data = efx_filter_get_rx_id_limit(efx);
  818. if (info->data == 0)
  819. return -EOPNOTSUPP;
  820. rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
  821. rule_locs, info->rule_cnt);
  822. if (rc < 0)
  823. return rc;
  824. info->rule_cnt = rc;
  825. return 0;
  826. default:
  827. return -EOPNOTSUPP;
  828. }
  829. }
  830. static inline bool ip6_mask_is_full(__be32 mask[4])
  831. {
  832. return !~(mask[0] & mask[1] & mask[2] & mask[3]);
  833. }
  834. static inline bool ip6_mask_is_empty(__be32 mask[4])
  835. {
  836. return !(mask[0] | mask[1] | mask[2] | mask[3]);
  837. }
  838. static int efx_ethtool_set_class_rule(struct efx_nic *efx,
  839. struct ethtool_rx_flow_spec *rule,
  840. u32 rss_context)
  841. {
  842. struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
  843. struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
  844. struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
  845. struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
  846. struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
  847. struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
  848. struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
  849. struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
  850. u32 flow_type = rule->flow_type & ~(FLOW_EXT | FLOW_RSS);
  851. struct ethhdr *mac_entry = &rule->h_u.ether_spec;
  852. struct ethhdr *mac_mask = &rule->m_u.ether_spec;
  853. enum efx_filter_flags flags = 0;
  854. struct efx_filter_spec spec;
  855. int rc;
  856. /* Check that user wants us to choose the location */
  857. if (rule->location != RX_CLS_LOC_ANY)
  858. return -EINVAL;
  859. /* Range-check ring_cookie */
  860. if (rule->ring_cookie >= efx->n_rx_channels &&
  861. rule->ring_cookie != RX_CLS_FLOW_DISC)
  862. return -EINVAL;
  863. /* Check for unsupported extensions */
  864. if ((rule->flow_type & FLOW_EXT) &&
  865. (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
  866. rule->m_ext.data[1]))
  867. return -EINVAL;
  868. if (efx->rx_scatter)
  869. flags |= EFX_FILTER_FLAG_RX_SCATTER;
  870. if (rule->flow_type & FLOW_RSS)
  871. flags |= EFX_FILTER_FLAG_RX_RSS;
  872. efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, flags,
  873. (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
  874. EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
  875. if (rule->flow_type & FLOW_RSS)
  876. spec.rss_context = rss_context;
  877. switch (flow_type) {
  878. case TCP_V4_FLOW:
  879. case UDP_V4_FLOW:
  880. spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
  881. EFX_FILTER_MATCH_IP_PROTO);
  882. spec.ether_type = htons(ETH_P_IP);
  883. spec.ip_proto = flow_type == TCP_V4_FLOW ? IPPROTO_TCP
  884. : IPPROTO_UDP;
  885. if (ip_mask->ip4dst) {
  886. if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
  887. return -EINVAL;
  888. spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
  889. spec.loc_host[0] = ip_entry->ip4dst;
  890. }
  891. if (ip_mask->ip4src) {
  892. if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
  893. return -EINVAL;
  894. spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
  895. spec.rem_host[0] = ip_entry->ip4src;
  896. }
  897. if (ip_mask->pdst) {
  898. if (ip_mask->pdst != PORT_FULL_MASK)
  899. return -EINVAL;
  900. spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
  901. spec.loc_port = ip_entry->pdst;
  902. }
  903. if (ip_mask->psrc) {
  904. if (ip_mask->psrc != PORT_FULL_MASK)
  905. return -EINVAL;
  906. spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
  907. spec.rem_port = ip_entry->psrc;
  908. }
  909. if (ip_mask->tos)
  910. return -EINVAL;
  911. break;
  912. case TCP_V6_FLOW:
  913. case UDP_V6_FLOW:
  914. spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
  915. EFX_FILTER_MATCH_IP_PROTO);
  916. spec.ether_type = htons(ETH_P_IPV6);
  917. spec.ip_proto = flow_type == TCP_V6_FLOW ? IPPROTO_TCP
  918. : IPPROTO_UDP;
  919. if (!ip6_mask_is_empty(ip6_mask->ip6dst)) {
  920. if (!ip6_mask_is_full(ip6_mask->ip6dst))
  921. return -EINVAL;
  922. spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
  923. memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host));
  924. }
  925. if (!ip6_mask_is_empty(ip6_mask->ip6src)) {
  926. if (!ip6_mask_is_full(ip6_mask->ip6src))
  927. return -EINVAL;
  928. spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
  929. memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host));
  930. }
  931. if (ip6_mask->pdst) {
  932. if (ip6_mask->pdst != PORT_FULL_MASK)
  933. return -EINVAL;
  934. spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
  935. spec.loc_port = ip6_entry->pdst;
  936. }
  937. if (ip6_mask->psrc) {
  938. if (ip6_mask->psrc != PORT_FULL_MASK)
  939. return -EINVAL;
  940. spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
  941. spec.rem_port = ip6_entry->psrc;
  942. }
  943. if (ip6_mask->tclass)
  944. return -EINVAL;
  945. break;
  946. case IPV4_USER_FLOW:
  947. if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver ||
  948. uip_entry->ip_ver != ETH_RX_NFC_IP4)
  949. return -EINVAL;
  950. spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
  951. spec.ether_type = htons(ETH_P_IP);
  952. if (uip_mask->ip4dst) {
  953. if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK)
  954. return -EINVAL;
  955. spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
  956. spec.loc_host[0] = uip_entry->ip4dst;
  957. }
  958. if (uip_mask->ip4src) {
  959. if (uip_mask->ip4src != IP4_ADDR_FULL_MASK)
  960. return -EINVAL;
  961. spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
  962. spec.rem_host[0] = uip_entry->ip4src;
  963. }
  964. if (uip_mask->proto) {
  965. if (uip_mask->proto != IP_PROTO_FULL_MASK)
  966. return -EINVAL;
  967. spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
  968. spec.ip_proto = uip_entry->proto;
  969. }
  970. break;
  971. case IPV6_USER_FLOW:
  972. if (uip6_mask->l4_4_bytes || uip6_mask->tclass)
  973. return -EINVAL;
  974. spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
  975. spec.ether_type = htons(ETH_P_IPV6);
  976. if (!ip6_mask_is_empty(uip6_mask->ip6dst)) {
  977. if (!ip6_mask_is_full(uip6_mask->ip6dst))
  978. return -EINVAL;
  979. spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
  980. memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host));
  981. }
  982. if (!ip6_mask_is_empty(uip6_mask->ip6src)) {
  983. if (!ip6_mask_is_full(uip6_mask->ip6src))
  984. return -EINVAL;
  985. spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
  986. memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host));
  987. }
  988. if (uip6_mask->l4_proto) {
  989. if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK)
  990. return -EINVAL;
  991. spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
  992. spec.ip_proto = uip6_entry->l4_proto;
  993. }
  994. break;
  995. case ETHER_FLOW:
  996. if (!is_zero_ether_addr(mac_mask->h_dest)) {
  997. if (ether_addr_equal(mac_mask->h_dest,
  998. mac_addr_ig_mask))
  999. spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
  1000. else if (is_broadcast_ether_addr(mac_mask->h_dest))
  1001. spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
  1002. else
  1003. return -EINVAL;
  1004. ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
  1005. }
  1006. if (!is_zero_ether_addr(mac_mask->h_source)) {
  1007. if (!is_broadcast_ether_addr(mac_mask->h_source))
  1008. return -EINVAL;
  1009. spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
  1010. ether_addr_copy(spec.rem_mac, mac_entry->h_source);
  1011. }
  1012. if (mac_mask->h_proto) {
  1013. if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
  1014. return -EINVAL;
  1015. spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
  1016. spec.ether_type = mac_entry->h_proto;
  1017. }
  1018. break;
  1019. default:
  1020. return -EINVAL;
  1021. }
  1022. if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
  1023. if (rule->m_ext.vlan_tci != htons(0xfff))
  1024. return -EINVAL;
  1025. spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
  1026. spec.outer_vid = rule->h_ext.vlan_tci;
  1027. }
  1028. rc = efx_filter_insert_filter(efx, &spec, true);
  1029. if (rc < 0)
  1030. return rc;
  1031. rule->location = rc;
  1032. return 0;
  1033. }
  1034. int efx_ethtool_set_rxnfc(struct net_device *net_dev,
  1035. struct ethtool_rxnfc *info)
  1036. {
  1037. struct efx_nic *efx = efx_netdev_priv(net_dev);
  1038. if (efx_filter_get_rx_id_limit(efx) == 0)
  1039. return -EOPNOTSUPP;
  1040. switch (info->cmd) {
  1041. case ETHTOOL_SRXCLSRLINS:
  1042. return efx_ethtool_set_class_rule(efx, &info->fs,
  1043. info->rss_context);
  1044. case ETHTOOL_SRXCLSRLDEL:
  1045. return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
  1046. info->fs.location);
  1047. default:
  1048. return -EOPNOTSUPP;
  1049. }
  1050. }
  1051. u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
  1052. {
  1053. struct efx_nic *efx = efx_netdev_priv(net_dev);
  1054. if (efx->n_rx_channels == 1)
  1055. return 0;
  1056. return ARRAY_SIZE(efx->rss_context.rx_indir_table);
  1057. }
  1058. u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
  1059. {
  1060. struct efx_nic *efx = efx_netdev_priv(net_dev);
  1061. return efx->type->rx_hash_key_size;
  1062. }
  1063. int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
  1064. u8 *hfunc)
  1065. {
  1066. struct efx_nic *efx = efx_netdev_priv(net_dev);
  1067. int rc;
  1068. rc = efx->type->rx_pull_rss_config(efx);
  1069. if (rc)
  1070. return rc;
  1071. if (hfunc)
  1072. *hfunc = ETH_RSS_HASH_TOP;
  1073. if (indir)
  1074. memcpy(indir, efx->rss_context.rx_indir_table,
  1075. sizeof(efx->rss_context.rx_indir_table));
  1076. if (key)
  1077. memcpy(key, efx->rss_context.rx_hash_key,
  1078. efx->type->rx_hash_key_size);
  1079. return 0;
  1080. }
  1081. int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
  1082. const u8 *key, const u8 hfunc)
  1083. {
  1084. struct efx_nic *efx = efx_netdev_priv(net_dev);
  1085. /* Hash function is Toeplitz, cannot be changed */
  1086. if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
  1087. return -EOPNOTSUPP;
  1088. if (!indir && !key)
  1089. return 0;
  1090. if (!key)
  1091. key = efx->rss_context.rx_hash_key;
  1092. if (!indir)
  1093. indir = efx->rss_context.rx_indir_table;
  1094. return efx->type->rx_push_rss_config(efx, true, indir, key);
  1095. }
  1096. int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
  1097. u8 *key, u8 *hfunc, u32 rss_context)
  1098. {
  1099. struct efx_nic *efx = efx_netdev_priv(net_dev);
  1100. struct efx_rss_context *ctx;
  1101. int rc = 0;
  1102. if (!efx->type->rx_pull_rss_context_config)
  1103. return -EOPNOTSUPP;
  1104. mutex_lock(&efx->rss_lock);
  1105. ctx = efx_find_rss_context_entry(efx, rss_context);
  1106. if (!ctx) {
  1107. rc = -ENOENT;
  1108. goto out_unlock;
  1109. }
  1110. rc = efx->type->rx_pull_rss_context_config(efx, ctx);
  1111. if (rc)
  1112. goto out_unlock;
  1113. if (hfunc)
  1114. *hfunc = ETH_RSS_HASH_TOP;
  1115. if (indir)
  1116. memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table));
  1117. if (key)
  1118. memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size);
  1119. out_unlock:
  1120. mutex_unlock(&efx->rss_lock);
  1121. return rc;
  1122. }
  1123. int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
  1124. const u32 *indir, const u8 *key,
  1125. const u8 hfunc, u32 *rss_context,
  1126. bool delete)
  1127. {
  1128. struct efx_nic *efx = efx_netdev_priv(net_dev);
  1129. struct efx_rss_context *ctx;
  1130. bool allocated = false;
  1131. int rc;
  1132. if (!efx->type->rx_push_rss_context_config)
  1133. return -EOPNOTSUPP;
  1134. /* Hash function is Toeplitz, cannot be changed */
  1135. if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
  1136. return -EOPNOTSUPP;
  1137. mutex_lock(&efx->rss_lock);
  1138. if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
  1139. if (delete) {
  1140. /* alloc + delete == Nothing to do */
  1141. rc = -EINVAL;
  1142. goto out_unlock;
  1143. }
  1144. ctx = efx_alloc_rss_context_entry(efx);
  1145. if (!ctx) {
  1146. rc = -ENOMEM;
  1147. goto out_unlock;
  1148. }
  1149. ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
  1150. /* Initialise indir table and key to defaults */
  1151. efx_set_default_rx_indir_table(efx, ctx);
  1152. netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
  1153. allocated = true;
  1154. } else {
  1155. ctx = efx_find_rss_context_entry(efx, *rss_context);
  1156. if (!ctx) {
  1157. rc = -ENOENT;
  1158. goto out_unlock;
  1159. }
  1160. }
  1161. if (delete) {
  1162. /* delete this context */
  1163. rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
  1164. if (!rc)
  1165. efx_free_rss_context_entry(ctx);
  1166. goto out_unlock;
  1167. }
  1168. if (!key)
  1169. key = ctx->rx_hash_key;
  1170. if (!indir)
  1171. indir = ctx->rx_indir_table;
  1172. rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
  1173. if (rc && allocated)
  1174. efx_free_rss_context_entry(ctx);
  1175. else
  1176. *rss_context = ctx->user_id;
  1177. out_unlock:
  1178. mutex_unlock(&efx->rss_lock);
  1179. return rc;
  1180. }
  1181. int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
  1182. {
  1183. struct efx_nic *efx = efx_netdev_priv(net_dev);
  1184. int rc;
  1185. rc = efx->type->map_reset_flags(flags);
  1186. if (rc < 0)
  1187. return rc;
  1188. return efx_reset(efx, rc);
  1189. }
  1190. int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
  1191. struct ethtool_eeprom *ee,
  1192. u8 *data)
  1193. {
  1194. struct efx_nic *efx = efx_netdev_priv(net_dev);
  1195. int ret;
  1196. mutex_lock(&efx->mac_lock);
  1197. ret = efx_mcdi_phy_get_module_eeprom(efx, ee, data);
  1198. mutex_unlock(&efx->mac_lock);
  1199. return ret;
  1200. }
  1201. int efx_ethtool_get_module_info(struct net_device *net_dev,
  1202. struct ethtool_modinfo *modinfo)
  1203. {
  1204. struct efx_nic *efx = efx_netdev_priv(net_dev);
  1205. int ret;
  1206. mutex_lock(&efx->mac_lock);
  1207. ret = efx_mcdi_phy_get_module_info(efx, modinfo);
  1208. mutex_unlock(&efx->mac_lock);
  1209. return ret;
  1210. }