ethtool.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 1999 - 2018 Intel Corporation. */
  3. /* ethtool support for ixgbevf */
  4. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  5. #include <linux/types.h>
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/pci.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/ethtool.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/if_vlan.h>
  13. #include <linux/uaccess.h>
  14. #include "ixgbevf.h"
  15. enum {NETDEV_STATS, IXGBEVF_STATS};
  16. struct ixgbe_stats {
  17. char stat_string[ETH_GSTRING_LEN];
  18. int type;
  19. int sizeof_stat;
  20. int stat_offset;
  21. };
  22. #define IXGBEVF_STAT(_name, _stat) { \
  23. .stat_string = _name, \
  24. .type = IXGBEVF_STATS, \
  25. .sizeof_stat = sizeof_field(struct ixgbevf_adapter, _stat), \
  26. .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
  27. }
  28. #define IXGBEVF_NETDEV_STAT(_net_stat) { \
  29. .stat_string = #_net_stat, \
  30. .type = NETDEV_STATS, \
  31. .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \
  32. .stat_offset = offsetof(struct net_device_stats, _net_stat) \
  33. }
  34. static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
  35. IXGBEVF_NETDEV_STAT(rx_packets),
  36. IXGBEVF_NETDEV_STAT(tx_packets),
  37. IXGBEVF_NETDEV_STAT(rx_bytes),
  38. IXGBEVF_NETDEV_STAT(tx_bytes),
  39. IXGBEVF_STAT("tx_busy", tx_busy),
  40. IXGBEVF_STAT("tx_restart_queue", restart_queue),
  41. IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
  42. IXGBEVF_NETDEV_STAT(multicast),
  43. IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
  44. IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
  45. IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
  46. IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
  47. IXGBEVF_STAT("tx_ipsec", tx_ipsec),
  48. IXGBEVF_STAT("rx_ipsec", rx_ipsec),
  49. };
  50. #define IXGBEVF_QUEUE_STATS_LEN ( \
  51. (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
  52. ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \
  53. ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
  54. (sizeof(struct ixgbevf_stats) / sizeof(u64)))
  55. #define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
  56. #define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
  57. static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
  58. "Register test (offline)",
  59. "Link test (on/offline)"
  60. };
  61. #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
  62. static const char ixgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = {
  63. #define IXGBEVF_PRIV_FLAGS_LEGACY_RX BIT(0)
  64. "legacy-rx",
  65. };
  66. #define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings)
  67. static int ixgbevf_get_link_ksettings(struct net_device *netdev,
  68. struct ethtool_link_ksettings *cmd)
  69. {
  70. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  71. ethtool_link_ksettings_zero_link_mode(cmd, supported);
  72. ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
  73. cmd->base.autoneg = AUTONEG_DISABLE;
  74. cmd->base.port = -1;
  75. if (adapter->link_up) {
  76. __u32 speed = SPEED_10000;
  77. switch (adapter->link_speed) {
  78. case IXGBE_LINK_SPEED_10GB_FULL:
  79. speed = SPEED_10000;
  80. break;
  81. case IXGBE_LINK_SPEED_1GB_FULL:
  82. speed = SPEED_1000;
  83. break;
  84. case IXGBE_LINK_SPEED_100_FULL:
  85. speed = SPEED_100;
  86. break;
  87. }
  88. cmd->base.speed = speed;
  89. cmd->base.duplex = DUPLEX_FULL;
  90. } else {
  91. cmd->base.speed = SPEED_UNKNOWN;
  92. cmd->base.duplex = DUPLEX_UNKNOWN;
  93. }
  94. return 0;
  95. }
  96. static u32 ixgbevf_get_msglevel(struct net_device *netdev)
  97. {
  98. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  99. return adapter->msg_enable;
  100. }
  101. static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
  102. {
  103. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  104. adapter->msg_enable = data;
  105. }
  106. static int ixgbevf_get_regs_len(struct net_device *netdev)
  107. {
  108. #define IXGBE_REGS_LEN 45
  109. return IXGBE_REGS_LEN * sizeof(u32);
  110. }
  111. static void ixgbevf_get_regs(struct net_device *netdev,
  112. struct ethtool_regs *regs,
  113. void *p)
  114. {
  115. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  116. struct ixgbe_hw *hw = &adapter->hw;
  117. u32 *regs_buff = p;
  118. u32 regs_len = ixgbevf_get_regs_len(netdev);
  119. u8 i;
  120. memset(p, 0, regs_len);
  121. /* generate a number suitable for ethtool's register version */
  122. regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
  123. /* General Registers */
  124. regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
  125. regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
  126. regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
  127. regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
  128. regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
  129. /* Interrupt */
  130. /* don't read EICR because it can clear interrupt causes, instead
  131. * read EICS which is a shadow but doesn't clear EICR
  132. */
  133. regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
  134. regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
  135. regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
  136. regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
  137. regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
  138. regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
  139. regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
  140. regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
  141. regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
  142. /* Receive DMA */
  143. for (i = 0; i < 2; i++)
  144. regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
  145. for (i = 0; i < 2; i++)
  146. regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
  147. for (i = 0; i < 2; i++)
  148. regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
  149. for (i = 0; i < 2; i++)
  150. regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
  151. for (i = 0; i < 2; i++)
  152. regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
  153. for (i = 0; i < 2; i++)
  154. regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
  155. for (i = 0; i < 2; i++)
  156. regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
  157. /* Receive */
  158. regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
  159. /* Transmit */
  160. for (i = 0; i < 2; i++)
  161. regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
  162. for (i = 0; i < 2; i++)
  163. regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
  164. for (i = 0; i < 2; i++)
  165. regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
  166. for (i = 0; i < 2; i++)
  167. regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
  168. for (i = 0; i < 2; i++)
  169. regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
  170. for (i = 0; i < 2; i++)
  171. regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
  172. for (i = 0; i < 2; i++)
  173. regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
  174. for (i = 0; i < 2; i++)
  175. regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
  176. }
  177. static void ixgbevf_get_drvinfo(struct net_device *netdev,
  178. struct ethtool_drvinfo *drvinfo)
  179. {
  180. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  181. strscpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
  182. strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
  183. sizeof(drvinfo->bus_info));
  184. drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN;
  185. }
  186. static void ixgbevf_get_ringparam(struct net_device *netdev,
  187. struct ethtool_ringparam *ring,
  188. struct kernel_ethtool_ringparam *kernel_ring,
  189. struct netlink_ext_ack *extack)
  190. {
  191. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  192. ring->rx_max_pending = IXGBEVF_MAX_RXD;
  193. ring->tx_max_pending = IXGBEVF_MAX_TXD;
  194. ring->rx_pending = adapter->rx_ring_count;
  195. ring->tx_pending = adapter->tx_ring_count;
  196. }
  197. static int ixgbevf_set_ringparam(struct net_device *netdev,
  198. struct ethtool_ringparam *ring,
  199. struct kernel_ethtool_ringparam *kernel_ring,
  200. struct netlink_ext_ack *extack)
  201. {
  202. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  203. struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
  204. u32 new_rx_count, new_tx_count;
  205. int i, j, err = 0;
  206. if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
  207. return -EINVAL;
  208. new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD);
  209. new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD);
  210. new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
  211. new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD);
  212. new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD);
  213. new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
  214. /* if nothing to do return success */
  215. if ((new_tx_count == adapter->tx_ring_count) &&
  216. (new_rx_count == adapter->rx_ring_count))
  217. return 0;
  218. while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
  219. usleep_range(1000, 2000);
  220. if (!netif_running(adapter->netdev)) {
  221. for (i = 0; i < adapter->num_tx_queues; i++)
  222. adapter->tx_ring[i]->count = new_tx_count;
  223. for (i = 0; i < adapter->num_xdp_queues; i++)
  224. adapter->xdp_ring[i]->count = new_tx_count;
  225. for (i = 0; i < adapter->num_rx_queues; i++)
  226. adapter->rx_ring[i]->count = new_rx_count;
  227. adapter->tx_ring_count = new_tx_count;
  228. adapter->xdp_ring_count = new_tx_count;
  229. adapter->rx_ring_count = new_rx_count;
  230. goto clear_reset;
  231. }
  232. if (new_tx_count != adapter->tx_ring_count) {
  233. tx_ring = vmalloc(array_size(sizeof(*tx_ring),
  234. adapter->num_tx_queues +
  235. adapter->num_xdp_queues));
  236. if (!tx_ring) {
  237. err = -ENOMEM;
  238. goto clear_reset;
  239. }
  240. for (i = 0; i < adapter->num_tx_queues; i++) {
  241. /* clone ring and setup updated count */
  242. tx_ring[i] = *adapter->tx_ring[i];
  243. tx_ring[i].count = new_tx_count;
  244. err = ixgbevf_setup_tx_resources(&tx_ring[i]);
  245. if (err) {
  246. while (i) {
  247. i--;
  248. ixgbevf_free_tx_resources(&tx_ring[i]);
  249. }
  250. vfree(tx_ring);
  251. tx_ring = NULL;
  252. goto clear_reset;
  253. }
  254. }
  255. for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
  256. /* clone ring and setup updated count */
  257. tx_ring[i] = *adapter->xdp_ring[j];
  258. tx_ring[i].count = new_tx_count;
  259. err = ixgbevf_setup_tx_resources(&tx_ring[i]);
  260. if (err) {
  261. while (i) {
  262. i--;
  263. ixgbevf_free_tx_resources(&tx_ring[i]);
  264. }
  265. vfree(tx_ring);
  266. tx_ring = NULL;
  267. goto clear_reset;
  268. }
  269. }
  270. }
  271. if (new_rx_count != adapter->rx_ring_count) {
  272. rx_ring = vmalloc(array_size(sizeof(*rx_ring),
  273. adapter->num_rx_queues));
  274. if (!rx_ring) {
  275. err = -ENOMEM;
  276. goto clear_reset;
  277. }
  278. for (i = 0; i < adapter->num_rx_queues; i++) {
  279. /* clone ring and setup updated count */
  280. rx_ring[i] = *adapter->rx_ring[i];
  281. /* Clear copied XDP RX-queue info */
  282. memset(&rx_ring[i].xdp_rxq, 0,
  283. sizeof(rx_ring[i].xdp_rxq));
  284. rx_ring[i].count = new_rx_count;
  285. err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
  286. if (err) {
  287. while (i) {
  288. i--;
  289. ixgbevf_free_rx_resources(&rx_ring[i]);
  290. }
  291. vfree(rx_ring);
  292. rx_ring = NULL;
  293. goto clear_reset;
  294. }
  295. }
  296. }
  297. /* bring interface down to prepare for update */
  298. ixgbevf_down(adapter);
  299. /* Tx */
  300. if (tx_ring) {
  301. for (i = 0; i < adapter->num_tx_queues; i++) {
  302. ixgbevf_free_tx_resources(adapter->tx_ring[i]);
  303. *adapter->tx_ring[i] = tx_ring[i];
  304. }
  305. adapter->tx_ring_count = new_tx_count;
  306. for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
  307. ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
  308. *adapter->xdp_ring[j] = tx_ring[i];
  309. }
  310. adapter->xdp_ring_count = new_tx_count;
  311. vfree(tx_ring);
  312. tx_ring = NULL;
  313. }
  314. /* Rx */
  315. if (rx_ring) {
  316. for (i = 0; i < adapter->num_rx_queues; i++) {
  317. ixgbevf_free_rx_resources(adapter->rx_ring[i]);
  318. *adapter->rx_ring[i] = rx_ring[i];
  319. }
  320. adapter->rx_ring_count = new_rx_count;
  321. vfree(rx_ring);
  322. rx_ring = NULL;
  323. }
  324. /* restore interface using new values */
  325. ixgbevf_up(adapter);
  326. clear_reset:
  327. /* free Tx resources if Rx error is encountered */
  328. if (tx_ring) {
  329. for (i = 0;
  330. i < adapter->num_tx_queues + adapter->num_xdp_queues; i++)
  331. ixgbevf_free_tx_resources(&tx_ring[i]);
  332. vfree(tx_ring);
  333. }
  334. clear_bit(__IXGBEVF_RESETTING, &adapter->state);
  335. return err;
  336. }
  337. static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
  338. {
  339. switch (stringset) {
  340. case ETH_SS_TEST:
  341. return IXGBEVF_TEST_LEN;
  342. case ETH_SS_STATS:
  343. return IXGBEVF_STATS_LEN;
  344. case ETH_SS_PRIV_FLAGS:
  345. return IXGBEVF_PRIV_FLAGS_STR_LEN;
  346. default:
  347. return -EINVAL;
  348. }
  349. }
  350. static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
  351. struct ethtool_stats *stats, u64 *data)
  352. {
  353. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  354. struct rtnl_link_stats64 temp;
  355. const struct rtnl_link_stats64 *net_stats;
  356. unsigned int start;
  357. struct ixgbevf_ring *ring;
  358. int i, j;
  359. char *p;
  360. ixgbevf_update_stats(adapter);
  361. net_stats = dev_get_stats(netdev, &temp);
  362. for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
  363. switch (ixgbevf_gstrings_stats[i].type) {
  364. case NETDEV_STATS:
  365. p = (char *)net_stats +
  366. ixgbevf_gstrings_stats[i].stat_offset;
  367. break;
  368. case IXGBEVF_STATS:
  369. p = (char *)adapter +
  370. ixgbevf_gstrings_stats[i].stat_offset;
  371. break;
  372. default:
  373. data[i] = 0;
  374. continue;
  375. }
  376. data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat ==
  377. sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
  378. }
  379. /* populate Tx queue data */
  380. for (j = 0; j < adapter->num_tx_queues; j++) {
  381. ring = adapter->tx_ring[j];
  382. if (!ring) {
  383. data[i++] = 0;
  384. data[i++] = 0;
  385. continue;
  386. }
  387. do {
  388. start = u64_stats_fetch_begin_irq(&ring->syncp);
  389. data[i] = ring->stats.packets;
  390. data[i + 1] = ring->stats.bytes;
  391. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  392. i += 2;
  393. }
  394. /* populate XDP queue data */
  395. for (j = 0; j < adapter->num_xdp_queues; j++) {
  396. ring = adapter->xdp_ring[j];
  397. if (!ring) {
  398. data[i++] = 0;
  399. data[i++] = 0;
  400. continue;
  401. }
  402. do {
  403. start = u64_stats_fetch_begin_irq(&ring->syncp);
  404. data[i] = ring->stats.packets;
  405. data[i + 1] = ring->stats.bytes;
  406. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  407. i += 2;
  408. }
  409. /* populate Rx queue data */
  410. for (j = 0; j < adapter->num_rx_queues; j++) {
  411. ring = adapter->rx_ring[j];
  412. if (!ring) {
  413. data[i++] = 0;
  414. data[i++] = 0;
  415. continue;
  416. }
  417. do {
  418. start = u64_stats_fetch_begin_irq(&ring->syncp);
  419. data[i] = ring->stats.packets;
  420. data[i + 1] = ring->stats.bytes;
  421. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  422. i += 2;
  423. }
  424. }
  425. static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
  426. u8 *data)
  427. {
  428. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  429. char *p = (char *)data;
  430. int i;
  431. switch (stringset) {
  432. case ETH_SS_TEST:
  433. memcpy(data, *ixgbe_gstrings_test,
  434. IXGBEVF_TEST_LEN * ETH_GSTRING_LEN);
  435. break;
  436. case ETH_SS_STATS:
  437. for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
  438. memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
  439. ETH_GSTRING_LEN);
  440. p += ETH_GSTRING_LEN;
  441. }
  442. for (i = 0; i < adapter->num_tx_queues; i++) {
  443. sprintf(p, "tx_queue_%u_packets", i);
  444. p += ETH_GSTRING_LEN;
  445. sprintf(p, "tx_queue_%u_bytes", i);
  446. p += ETH_GSTRING_LEN;
  447. }
  448. for (i = 0; i < adapter->num_xdp_queues; i++) {
  449. sprintf(p, "xdp_queue_%u_packets", i);
  450. p += ETH_GSTRING_LEN;
  451. sprintf(p, "xdp_queue_%u_bytes", i);
  452. p += ETH_GSTRING_LEN;
  453. }
  454. for (i = 0; i < adapter->num_rx_queues; i++) {
  455. sprintf(p, "rx_queue_%u_packets", i);
  456. p += ETH_GSTRING_LEN;
  457. sprintf(p, "rx_queue_%u_bytes", i);
  458. p += ETH_GSTRING_LEN;
  459. }
  460. break;
  461. case ETH_SS_PRIV_FLAGS:
  462. memcpy(data, ixgbevf_priv_flags_strings,
  463. IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
  464. break;
  465. }
  466. }
  467. static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
  468. {
  469. struct ixgbe_hw *hw = &adapter->hw;
  470. bool link_up;
  471. u32 link_speed = 0;
  472. *data = 0;
  473. hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
  474. if (!link_up)
  475. *data = 1;
  476. return *data;
  477. }
  478. /* ethtool register test data */
  479. struct ixgbevf_reg_test {
  480. u16 reg;
  481. u8 array_len;
  482. u8 test_type;
  483. u32 mask;
  484. u32 write;
  485. };
  486. /* In the hardware, registers are laid out either singly, in arrays
  487. * spaced 0x40 bytes apart, or in contiguous tables. We assume
  488. * most tests take place on arrays or single registers (handled
  489. * as a single-element array) and special-case the tables.
  490. * Table tests are always pattern tests.
  491. *
  492. * We also make provision for some required setup steps by specifying
  493. * registers to be written without any read-back testing.
  494. */
  495. #define PATTERN_TEST 1
  496. #define SET_READ_TEST 2
  497. #define WRITE_NO_TEST 3
  498. #define TABLE32_TEST 4
  499. #define TABLE64_TEST_LO 5
  500. #define TABLE64_TEST_HI 6
  501. /* default VF register test */
  502. static const struct ixgbevf_reg_test reg_test_vf[] = {
  503. { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
  504. { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  505. { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
  506. { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
  507. { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
  508. { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
  509. { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
  510. { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
  511. { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
  512. { .reg = 0 }
  513. };
  514. static const u32 register_test_patterns[] = {
  515. 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
  516. };
  517. static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data,
  518. int reg, u32 mask, u32 write)
  519. {
  520. u32 pat, val, before;
  521. if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
  522. *data = 1;
  523. return true;
  524. }
  525. for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {
  526. before = ixgbevf_read_reg(&adapter->hw, reg);
  527. ixgbe_write_reg(&adapter->hw, reg,
  528. register_test_patterns[pat] & write);
  529. val = ixgbevf_read_reg(&adapter->hw, reg);
  530. if (val != (register_test_patterns[pat] & write & mask)) {
  531. hw_dbg(&adapter->hw,
  532. "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
  533. reg, val,
  534. register_test_patterns[pat] & write & mask);
  535. *data = reg;
  536. ixgbe_write_reg(&adapter->hw, reg, before);
  537. return true;
  538. }
  539. ixgbe_write_reg(&adapter->hw, reg, before);
  540. }
  541. return false;
  542. }
  543. static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data,
  544. int reg, u32 mask, u32 write)
  545. {
  546. u32 val, before;
  547. if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
  548. *data = 1;
  549. return true;
  550. }
  551. before = ixgbevf_read_reg(&adapter->hw, reg);
  552. ixgbe_write_reg(&adapter->hw, reg, write & mask);
  553. val = ixgbevf_read_reg(&adapter->hw, reg);
  554. if ((write & mask) != (val & mask)) {
  555. pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
  556. reg, (val & mask), write & mask);
  557. *data = reg;
  558. ixgbe_write_reg(&adapter->hw, reg, before);
  559. return true;
  560. }
  561. ixgbe_write_reg(&adapter->hw, reg, before);
  562. return false;
  563. }
  564. static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
  565. {
  566. const struct ixgbevf_reg_test *test;
  567. u32 i;
  568. if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
  569. dev_err(&adapter->pdev->dev,
  570. "Adapter removed - register test blocked\n");
  571. *data = 1;
  572. return 1;
  573. }
  574. test = reg_test_vf;
  575. /* Perform the register test, looping through the test table
  576. * until we either fail or reach the null entry.
  577. */
  578. while (test->reg) {
  579. for (i = 0; i < test->array_len; i++) {
  580. bool b = false;
  581. switch (test->test_type) {
  582. case PATTERN_TEST:
  583. b = reg_pattern_test(adapter, data,
  584. test->reg + (i * 0x40),
  585. test->mask,
  586. test->write);
  587. break;
  588. case SET_READ_TEST:
  589. b = reg_set_and_check(adapter, data,
  590. test->reg + (i * 0x40),
  591. test->mask,
  592. test->write);
  593. break;
  594. case WRITE_NO_TEST:
  595. ixgbe_write_reg(&adapter->hw,
  596. test->reg + (i * 0x40),
  597. test->write);
  598. break;
  599. case TABLE32_TEST:
  600. b = reg_pattern_test(adapter, data,
  601. test->reg + (i * 4),
  602. test->mask,
  603. test->write);
  604. break;
  605. case TABLE64_TEST_LO:
  606. b = reg_pattern_test(adapter, data,
  607. test->reg + (i * 8),
  608. test->mask,
  609. test->write);
  610. break;
  611. case TABLE64_TEST_HI:
  612. b = reg_pattern_test(adapter, data,
  613. test->reg + 4 + (i * 8),
  614. test->mask,
  615. test->write);
  616. break;
  617. }
  618. if (b)
  619. return 1;
  620. }
  621. test++;
  622. }
  623. *data = 0;
  624. return *data;
  625. }
  626. static void ixgbevf_diag_test(struct net_device *netdev,
  627. struct ethtool_test *eth_test, u64 *data)
  628. {
  629. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  630. bool if_running = netif_running(netdev);
  631. if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
  632. dev_err(&adapter->pdev->dev,
  633. "Adapter removed - test blocked\n");
  634. data[0] = 1;
  635. data[1] = 1;
  636. eth_test->flags |= ETH_TEST_FL_FAILED;
  637. return;
  638. }
  639. set_bit(__IXGBEVF_TESTING, &adapter->state);
  640. if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
  641. /* Offline tests */
  642. hw_dbg(&adapter->hw, "offline testing starting\n");
  643. /* Link test performed before hardware reset so autoneg doesn't
  644. * interfere with test result
  645. */
  646. if (ixgbevf_link_test(adapter, &data[1]))
  647. eth_test->flags |= ETH_TEST_FL_FAILED;
  648. if (if_running)
  649. /* indicate we're in test mode */
  650. ixgbevf_close(netdev);
  651. else
  652. ixgbevf_reset(adapter);
  653. hw_dbg(&adapter->hw, "register testing starting\n");
  654. if (ixgbevf_reg_test(adapter, &data[0]))
  655. eth_test->flags |= ETH_TEST_FL_FAILED;
  656. ixgbevf_reset(adapter);
  657. clear_bit(__IXGBEVF_TESTING, &adapter->state);
  658. if (if_running)
  659. ixgbevf_open(netdev);
  660. } else {
  661. hw_dbg(&adapter->hw, "online testing starting\n");
  662. /* Online tests */
  663. if (ixgbevf_link_test(adapter, &data[1]))
  664. eth_test->flags |= ETH_TEST_FL_FAILED;
  665. /* Online tests aren't run; pass by default */
  666. data[0] = 0;
  667. clear_bit(__IXGBEVF_TESTING, &adapter->state);
  668. }
  669. msleep_interruptible(4 * 1000);
  670. }
  671. static int ixgbevf_nway_reset(struct net_device *netdev)
  672. {
  673. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  674. if (netif_running(netdev))
  675. ixgbevf_reinit_locked(adapter);
  676. return 0;
  677. }
  678. static int ixgbevf_get_coalesce(struct net_device *netdev,
  679. struct ethtool_coalesce *ec,
  680. struct kernel_ethtool_coalesce *kernel_coal,
  681. struct netlink_ext_ack *extack)
  682. {
  683. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  684. /* only valid if in constant ITR mode */
  685. if (adapter->rx_itr_setting <= 1)
  686. ec->rx_coalesce_usecs = adapter->rx_itr_setting;
  687. else
  688. ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
  689. /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */
  690. if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
  691. return 0;
  692. /* only valid if in constant ITR mode */
  693. if (adapter->tx_itr_setting <= 1)
  694. ec->tx_coalesce_usecs = adapter->tx_itr_setting;
  695. else
  696. ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
  697. return 0;
  698. }
  699. static int ixgbevf_set_coalesce(struct net_device *netdev,
  700. struct ethtool_coalesce *ec,
  701. struct kernel_ethtool_coalesce *kernel_coal,
  702. struct netlink_ext_ack *extack)
  703. {
  704. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  705. struct ixgbevf_q_vector *q_vector;
  706. int num_vectors, i;
  707. u16 tx_itr_param, rx_itr_param;
  708. /* don't accept Tx specific changes if we've got mixed RxTx vectors */
  709. if (adapter->q_vector[0]->tx.count &&
  710. adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs)
  711. return -EINVAL;
  712. if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
  713. (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
  714. return -EINVAL;
  715. if (ec->rx_coalesce_usecs > 1)
  716. adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
  717. else
  718. adapter->rx_itr_setting = ec->rx_coalesce_usecs;
  719. if (adapter->rx_itr_setting == 1)
  720. rx_itr_param = IXGBE_20K_ITR;
  721. else
  722. rx_itr_param = adapter->rx_itr_setting;
  723. if (ec->tx_coalesce_usecs > 1)
  724. adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
  725. else
  726. adapter->tx_itr_setting = ec->tx_coalesce_usecs;
  727. if (adapter->tx_itr_setting == 1)
  728. tx_itr_param = IXGBE_12K_ITR;
  729. else
  730. tx_itr_param = adapter->tx_itr_setting;
  731. num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
  732. for (i = 0; i < num_vectors; i++) {
  733. q_vector = adapter->q_vector[i];
  734. if (q_vector->tx.count && !q_vector->rx.count)
  735. /* Tx only */
  736. q_vector->itr = tx_itr_param;
  737. else
  738. /* Rx only or mixed */
  739. q_vector->itr = rx_itr_param;
  740. ixgbevf_write_eitr(q_vector);
  741. }
  742. return 0;
  743. }
  744. static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
  745. u32 *rules __always_unused)
  746. {
  747. struct ixgbevf_adapter *adapter = netdev_priv(dev);
  748. switch (info->cmd) {
  749. case ETHTOOL_GRXRINGS:
  750. info->data = adapter->num_rx_queues;
  751. return 0;
  752. default:
  753. hw_dbg(&adapter->hw, "Command parameters not supported\n");
  754. return -EOPNOTSUPP;
  755. }
  756. }
  757. static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
  758. {
  759. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  760. if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
  761. return IXGBEVF_X550_VFRETA_SIZE;
  762. return IXGBEVF_82599_RETA_SIZE;
  763. }
  764. static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
  765. {
  766. return IXGBEVF_RSS_HASH_KEY_SIZE;
  767. }
  768. static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
  769. u8 *hfunc)
  770. {
  771. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  772. int err = 0;
  773. if (hfunc)
  774. *hfunc = ETH_RSS_HASH_TOP;
  775. if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
  776. if (key)
  777. memcpy(key, adapter->rss_key,
  778. ixgbevf_get_rxfh_key_size(netdev));
  779. if (indir) {
  780. int i;
  781. for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
  782. indir[i] = adapter->rss_indir_tbl[i];
  783. }
  784. } else {
  785. /* If neither indirection table nor hash key was requested
  786. * - just return a success avoiding taking any locks.
  787. */
  788. if (!indir && !key)
  789. return 0;
  790. spin_lock_bh(&adapter->mbx_lock);
  791. if (indir)
  792. err = ixgbevf_get_reta_locked(&adapter->hw, indir,
  793. adapter->num_rx_queues);
  794. if (!err && key)
  795. err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
  796. spin_unlock_bh(&adapter->mbx_lock);
  797. }
  798. return err;
  799. }
  800. static u32 ixgbevf_get_priv_flags(struct net_device *netdev)
  801. {
  802. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  803. u32 priv_flags = 0;
  804. if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
  805. priv_flags |= IXGBEVF_PRIV_FLAGS_LEGACY_RX;
  806. return priv_flags;
  807. }
  808. static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags)
  809. {
  810. struct ixgbevf_adapter *adapter = netdev_priv(netdev);
  811. unsigned int flags = adapter->flags;
  812. flags &= ~IXGBEVF_FLAGS_LEGACY_RX;
  813. if (priv_flags & IXGBEVF_PRIV_FLAGS_LEGACY_RX)
  814. flags |= IXGBEVF_FLAGS_LEGACY_RX;
  815. if (flags != adapter->flags) {
  816. adapter->flags = flags;
  817. /* reset interface to repopulate queues */
  818. if (netif_running(netdev))
  819. ixgbevf_reinit_locked(adapter);
  820. }
  821. return 0;
  822. }
  823. static const struct ethtool_ops ixgbevf_ethtool_ops = {
  824. .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
  825. .get_drvinfo = ixgbevf_get_drvinfo,
  826. .get_regs_len = ixgbevf_get_regs_len,
  827. .get_regs = ixgbevf_get_regs,
  828. .nway_reset = ixgbevf_nway_reset,
  829. .get_link = ethtool_op_get_link,
  830. .get_ringparam = ixgbevf_get_ringparam,
  831. .set_ringparam = ixgbevf_set_ringparam,
  832. .get_msglevel = ixgbevf_get_msglevel,
  833. .set_msglevel = ixgbevf_set_msglevel,
  834. .self_test = ixgbevf_diag_test,
  835. .get_sset_count = ixgbevf_get_sset_count,
  836. .get_strings = ixgbevf_get_strings,
  837. .get_ethtool_stats = ixgbevf_get_ethtool_stats,
  838. .get_coalesce = ixgbevf_get_coalesce,
  839. .set_coalesce = ixgbevf_set_coalesce,
  840. .get_rxnfc = ixgbevf_get_rxnfc,
  841. .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size,
  842. .get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
  843. .get_rxfh = ixgbevf_get_rxfh,
  844. .get_link_ksettings = ixgbevf_get_link_ksettings,
  845. .get_priv_flags = ixgbevf_get_priv_flags,
  846. .set_priv_flags = ixgbevf_set_priv_flags,
  847. };
  848. void ixgbevf_set_ethtool_ops(struct net_device *netdev)
  849. {
  850. netdev->ethtool_ops = &ixgbevf_ethtool_ops;
  851. }