dpaa2-ethtool.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899
  1. // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
  2. /* Copyright 2014-2016 Freescale Semiconductor Inc.
  3. * Copyright 2016 NXP
  4. * Copyright 2020 NXP
  5. */
  6. #include <linux/net_tstamp.h>
  7. #include <linux/nospec.h>
  8. #include "dpni.h" /* DPNI_LINK_OPT_* */
  9. #include "dpaa2-eth.h"
  10. /* To be kept in sync with DPNI statistics */
  11. static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
  12. "[hw] rx frames",
  13. "[hw] rx bytes",
  14. "[hw] rx mcast frames",
  15. "[hw] rx mcast bytes",
  16. "[hw] rx bcast frames",
  17. "[hw] rx bcast bytes",
  18. "[hw] tx frames",
  19. "[hw] tx bytes",
  20. "[hw] tx mcast frames",
  21. "[hw] tx mcast bytes",
  22. "[hw] tx bcast frames",
  23. "[hw] tx bcast bytes",
  24. "[hw] rx filtered frames",
  25. "[hw] rx discarded frames",
  26. "[hw] rx nobuffer discards",
  27. "[hw] tx discarded frames",
  28. "[hw] tx confirmed frames",
  29. "[hw] tx dequeued bytes",
  30. "[hw] tx dequeued frames",
  31. "[hw] tx rejected bytes",
  32. "[hw] tx rejected frames",
  33. "[hw] tx pending frames",
  34. };
  35. #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
  36. static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
  37. /* per-cpu stats */
  38. "[drv] tx conf frames",
  39. "[drv] tx conf bytes",
  40. "[drv] tx sg frames",
  41. "[drv] tx sg bytes",
  42. "[drv] tx tso frames",
  43. "[drv] tx tso bytes",
  44. "[drv] rx sg frames",
  45. "[drv] rx sg bytes",
  46. "[drv] tx converted sg frames",
  47. "[drv] tx converted sg bytes",
  48. "[drv] enqueue portal busy",
  49. /* Channel stats */
  50. "[drv] dequeue portal busy",
  51. "[drv] channel pull errors",
  52. "[drv] cdan",
  53. "[drv] xdp drop",
  54. "[drv] xdp tx",
  55. "[drv] xdp tx errors",
  56. "[drv] xdp redirect",
  57. /* FQ stats */
  58. "[qbman] rx pending frames",
  59. "[qbman] rx pending bytes",
  60. "[qbman] tx conf pending frames",
  61. "[qbman] tx conf pending bytes",
  62. "[qbman] buffer count",
  63. };
  64. #define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
  65. static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
  66. struct ethtool_drvinfo *drvinfo)
  67. {
  68. struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  69. strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
  70. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  71. "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
  72. strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
  73. sizeof(drvinfo->bus_info));
  74. }
  75. static int dpaa2_eth_nway_reset(struct net_device *net_dev)
  76. {
  77. struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  78. if (dpaa2_eth_is_type_phy(priv))
  79. return phylink_ethtool_nway_reset(priv->mac->phylink);
  80. return -EOPNOTSUPP;
  81. }
  82. static int
  83. dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
  84. struct ethtool_link_ksettings *link_settings)
  85. {
  86. struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  87. if (dpaa2_eth_is_type_phy(priv))
  88. return phylink_ethtool_ksettings_get(priv->mac->phylink,
  89. link_settings);
  90. link_settings->base.autoneg = AUTONEG_DISABLE;
  91. if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
  92. link_settings->base.duplex = DUPLEX_FULL;
  93. link_settings->base.speed = priv->link_state.rate;
  94. return 0;
  95. }
  96. static int
  97. dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
  98. const struct ethtool_link_ksettings *link_settings)
  99. {
  100. struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  101. if (!dpaa2_eth_is_type_phy(priv))
  102. return -ENOTSUPP;
  103. return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
  104. }
  105. static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
  106. struct ethtool_pauseparam *pause)
  107. {
  108. struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  109. u64 link_options = priv->link_state.options;
  110. if (dpaa2_eth_is_type_phy(priv)) {
  111. phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
  112. return;
  113. }
  114. pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options);
  115. pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options);
  116. pause->autoneg = AUTONEG_DISABLE;
  117. }
  118. static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
  119. struct ethtool_pauseparam *pause)
  120. {
  121. struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  122. struct dpni_link_cfg cfg = {0};
  123. int err;
  124. if (!dpaa2_eth_has_pause_support(priv)) {
  125. netdev_info(net_dev, "No pause frame support for DPNI version < %d.%d\n",
  126. DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR);
  127. return -EOPNOTSUPP;
  128. }
  129. if (dpaa2_eth_is_type_phy(priv))
  130. return phylink_ethtool_set_pauseparam(priv->mac->phylink,
  131. pause);
  132. if (pause->autoneg)
  133. return -EOPNOTSUPP;
  134. cfg.rate = priv->link_state.rate;
  135. cfg.options = priv->link_state.options;
  136. if (pause->rx_pause)
  137. cfg.options |= DPNI_LINK_OPT_PAUSE;
  138. else
  139. cfg.options &= ~DPNI_LINK_OPT_PAUSE;
  140. if (!!pause->rx_pause ^ !!pause->tx_pause)
  141. cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
  142. else
  143. cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
  144. if (cfg.options == priv->link_state.options)
  145. return 0;
  146. err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
  147. if (err) {
  148. netdev_err(net_dev, "dpni_set_link_state failed\n");
  149. return err;
  150. }
  151. priv->link_state.options = cfg.options;
  152. return 0;
  153. }
  154. static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
  155. u8 *data)
  156. {
  157. struct dpaa2_eth_priv *priv = netdev_priv(netdev);
  158. u8 *p = data;
  159. int i;
  160. switch (stringset) {
  161. case ETH_SS_STATS:
  162. for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
  163. strscpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
  164. p += ETH_GSTRING_LEN;
  165. }
  166. for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
  167. strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
  168. p += ETH_GSTRING_LEN;
  169. }
  170. if (dpaa2_eth_has_mac(priv))
  171. dpaa2_mac_get_strings(p);
  172. break;
  173. }
  174. }
  175. static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
  176. {
  177. int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
  178. struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  179. switch (sset) {
  180. case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
  181. if (dpaa2_eth_has_mac(priv))
  182. num_ss_stats += dpaa2_mac_get_sset_count();
  183. return num_ss_stats;
  184. default:
  185. return -EOPNOTSUPP;
  186. }
  187. }
  188. /** Fill in hardware counters, as returned by MC.
  189. */
  190. static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
  191. struct ethtool_stats *stats,
  192. u64 *data)
  193. {
  194. int i = 0;
  195. int j, k, err;
  196. int num_cnt;
  197. union dpni_statistics dpni_stats;
  198. u32 fcnt, bcnt;
  199. u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
  200. u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
  201. u32 buf_cnt;
  202. struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  203. struct dpaa2_eth_drv_stats *extras;
  204. struct dpaa2_eth_ch_stats *ch_stats;
  205. int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
  206. sizeof(dpni_stats.page_0),
  207. sizeof(dpni_stats.page_1),
  208. sizeof(dpni_stats.page_2),
  209. sizeof(dpni_stats.page_3),
  210. sizeof(dpni_stats.page_4),
  211. sizeof(dpni_stats.page_5),
  212. sizeof(dpni_stats.page_6),
  213. };
  214. memset(data, 0,
  215. sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
  216. /* Print standard counters, from DPNI statistics */
  217. for (j = 0; j <= 6; j++) {
  218. /* We're not interested in pages 4 & 5 for now */
  219. if (j == 4 || j == 5)
  220. continue;
  221. err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
  222. j, &dpni_stats);
  223. if (err == -EINVAL)
  224. /* Older firmware versions don't support all pages */
  225. memset(&dpni_stats, 0, sizeof(dpni_stats));
  226. else if (err)
  227. netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
  228. num_cnt = dpni_stats_page_size[j] / sizeof(u64);
  229. for (k = 0; k < num_cnt; k++)
  230. *(data + i++) = dpni_stats.raw.counter[k];
  231. }
  232. /* Print per-cpu extra stats */
  233. for_each_online_cpu(k) {
  234. extras = per_cpu_ptr(priv->percpu_extras, k);
  235. for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
  236. *((__u64 *)data + i + j) += *((__u64 *)extras + j);
  237. }
  238. i += j;
  239. /* Per-channel stats */
  240. for (k = 0; k < priv->num_channels; k++) {
  241. ch_stats = &priv->channel[k]->stats;
  242. for (j = 0; j < DPAA2_ETH_CH_STATS; j++)
  243. *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
  244. }
  245. i += j;
  246. for (j = 0; j < priv->num_fqs; j++) {
  247. /* Print FQ instantaneous counts */
  248. err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
  249. &fcnt, &bcnt);
  250. if (err) {
  251. netdev_warn(net_dev, "FQ query error %d", err);
  252. return;
  253. }
  254. if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
  255. fcnt_tx_total += fcnt;
  256. bcnt_tx_total += bcnt;
  257. } else {
  258. fcnt_rx_total += fcnt;
  259. bcnt_rx_total += bcnt;
  260. }
  261. }
  262. *(data + i++) = fcnt_rx_total;
  263. *(data + i++) = bcnt_rx_total;
  264. *(data + i++) = fcnt_tx_total;
  265. *(data + i++) = bcnt_tx_total;
  266. err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
  267. if (err) {
  268. netdev_warn(net_dev, "Buffer count query error %d\n", err);
  269. return;
  270. }
  271. *(data + i++) = buf_cnt;
  272. if (dpaa2_eth_has_mac(priv))
  273. dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
  274. }
  275. static int dpaa2_eth_prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
  276. void *key, void *mask, u64 *fields)
  277. {
  278. int off;
  279. if (eth_mask->h_proto) {
  280. off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
  281. *(__be16 *)(key + off) = eth_value->h_proto;
  282. *(__be16 *)(mask + off) = eth_mask->h_proto;
  283. *fields |= DPAA2_ETH_DIST_ETHTYPE;
  284. }
  285. if (!is_zero_ether_addr(eth_mask->h_source)) {
  286. off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
  287. ether_addr_copy(key + off, eth_value->h_source);
  288. ether_addr_copy(mask + off, eth_mask->h_source);
  289. *fields |= DPAA2_ETH_DIST_ETHSRC;
  290. }
  291. if (!is_zero_ether_addr(eth_mask->h_dest)) {
  292. off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
  293. ether_addr_copy(key + off, eth_value->h_dest);
  294. ether_addr_copy(mask + off, eth_mask->h_dest);
  295. *fields |= DPAA2_ETH_DIST_ETHDST;
  296. }
  297. return 0;
  298. }
  299. static int dpaa2_eth_prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
  300. struct ethtool_usrip4_spec *uip_mask,
  301. void *key, void *mask, u64 *fields)
  302. {
  303. int off;
  304. u32 tmp_value, tmp_mask;
  305. if (uip_mask->tos || uip_mask->ip_ver)
  306. return -EOPNOTSUPP;
  307. if (uip_mask->ip4src) {
  308. off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
  309. *(__be32 *)(key + off) = uip_value->ip4src;
  310. *(__be32 *)(mask + off) = uip_mask->ip4src;
  311. *fields |= DPAA2_ETH_DIST_IPSRC;
  312. }
  313. if (uip_mask->ip4dst) {
  314. off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
  315. *(__be32 *)(key + off) = uip_value->ip4dst;
  316. *(__be32 *)(mask + off) = uip_mask->ip4dst;
  317. *fields |= DPAA2_ETH_DIST_IPDST;
  318. }
  319. if (uip_mask->proto) {
  320. off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
  321. *(u8 *)(key + off) = uip_value->proto;
  322. *(u8 *)(mask + off) = uip_mask->proto;
  323. *fields |= DPAA2_ETH_DIST_IPPROTO;
  324. }
  325. if (uip_mask->l4_4_bytes) {
  326. tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
  327. tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
  328. off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
  329. *(__be16 *)(key + off) = htons(tmp_value >> 16);
  330. *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
  331. *fields |= DPAA2_ETH_DIST_L4SRC;
  332. off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
  333. *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
  334. *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
  335. *fields |= DPAA2_ETH_DIST_L4DST;
  336. }
  337. /* Only apply the rule for IPv4 frames */
  338. off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
  339. *(__be16 *)(key + off) = htons(ETH_P_IP);
  340. *(__be16 *)(mask + off) = htons(0xFFFF);
  341. *fields |= DPAA2_ETH_DIST_ETHTYPE;
  342. return 0;
  343. }
  344. static int dpaa2_eth_prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
  345. struct ethtool_tcpip4_spec *l4_mask,
  346. void *key, void *mask, u8 l4_proto, u64 *fields)
  347. {
  348. int off;
  349. if (l4_mask->tos)
  350. return -EOPNOTSUPP;
  351. if (l4_mask->ip4src) {
  352. off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
  353. *(__be32 *)(key + off) = l4_value->ip4src;
  354. *(__be32 *)(mask + off) = l4_mask->ip4src;
  355. *fields |= DPAA2_ETH_DIST_IPSRC;
  356. }
  357. if (l4_mask->ip4dst) {
  358. off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
  359. *(__be32 *)(key + off) = l4_value->ip4dst;
  360. *(__be32 *)(mask + off) = l4_mask->ip4dst;
  361. *fields |= DPAA2_ETH_DIST_IPDST;
  362. }
  363. if (l4_mask->psrc) {
  364. off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
  365. *(__be16 *)(key + off) = l4_value->psrc;
  366. *(__be16 *)(mask + off) = l4_mask->psrc;
  367. *fields |= DPAA2_ETH_DIST_L4SRC;
  368. }
  369. if (l4_mask->pdst) {
  370. off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
  371. *(__be16 *)(key + off) = l4_value->pdst;
  372. *(__be16 *)(mask + off) = l4_mask->pdst;
  373. *fields |= DPAA2_ETH_DIST_L4DST;
  374. }
  375. /* Only apply the rule for IPv4 frames with the specified L4 proto */
  376. off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
  377. *(__be16 *)(key + off) = htons(ETH_P_IP);
  378. *(__be16 *)(mask + off) = htons(0xFFFF);
  379. *fields |= DPAA2_ETH_DIST_ETHTYPE;
  380. off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
  381. *(u8 *)(key + off) = l4_proto;
  382. *(u8 *)(mask + off) = 0xFF;
  383. *fields |= DPAA2_ETH_DIST_IPPROTO;
  384. return 0;
  385. }
  386. static int dpaa2_eth_prep_ext_rule(struct ethtool_flow_ext *ext_value,
  387. struct ethtool_flow_ext *ext_mask,
  388. void *key, void *mask, u64 *fields)
  389. {
  390. int off;
  391. if (ext_mask->vlan_etype)
  392. return -EOPNOTSUPP;
  393. if (ext_mask->vlan_tci) {
  394. off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
  395. *(__be16 *)(key + off) = ext_value->vlan_tci;
  396. *(__be16 *)(mask + off) = ext_mask->vlan_tci;
  397. *fields |= DPAA2_ETH_DIST_VLAN;
  398. }
  399. return 0;
  400. }
  401. static int dpaa2_eth_prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
  402. struct ethtool_flow_ext *ext_mask,
  403. void *key, void *mask, u64 *fields)
  404. {
  405. int off;
  406. if (!is_zero_ether_addr(ext_mask->h_dest)) {
  407. off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
  408. ether_addr_copy(key + off, ext_value->h_dest);
  409. ether_addr_copy(mask + off, ext_mask->h_dest);
  410. *fields |= DPAA2_ETH_DIST_ETHDST;
  411. }
  412. return 0;
  413. }
  414. static int dpaa2_eth_prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key,
  415. void *mask, u64 *fields)
  416. {
  417. int err;
  418. switch (fs->flow_type & 0xFF) {
  419. case ETHER_FLOW:
  420. err = dpaa2_eth_prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
  421. key, mask, fields);
  422. break;
  423. case IP_USER_FLOW:
  424. err = dpaa2_eth_prep_uip_rule(&fs->h_u.usr_ip4_spec,
  425. &fs->m_u.usr_ip4_spec, key, mask, fields);
  426. break;
  427. case TCP_V4_FLOW:
  428. err = dpaa2_eth_prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
  429. key, mask, IPPROTO_TCP, fields);
  430. break;
  431. case UDP_V4_FLOW:
  432. err = dpaa2_eth_prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
  433. key, mask, IPPROTO_UDP, fields);
  434. break;
  435. case SCTP_V4_FLOW:
  436. err = dpaa2_eth_prep_l4_rule(&fs->h_u.sctp_ip4_spec,
  437. &fs->m_u.sctp_ip4_spec, key, mask,
  438. IPPROTO_SCTP, fields);
  439. break;
  440. default:
  441. return -EOPNOTSUPP;
  442. }
  443. if (err)
  444. return err;
  445. if (fs->flow_type & FLOW_EXT) {
  446. err = dpaa2_eth_prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
  447. if (err)
  448. return err;
  449. }
  450. if (fs->flow_type & FLOW_MAC_EXT) {
  451. err = dpaa2_eth_prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key,
  452. mask, fields);
  453. if (err)
  454. return err;
  455. }
  456. return 0;
  457. }
  458. static int dpaa2_eth_do_cls_rule(struct net_device *net_dev,
  459. struct ethtool_rx_flow_spec *fs,
  460. bool add)
  461. {
  462. struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  463. struct device *dev = net_dev->dev.parent;
  464. struct dpni_rule_cfg rule_cfg = { 0 };
  465. struct dpni_fs_action_cfg fs_act = { 0 };
  466. dma_addr_t key_iova;
  467. u64 fields = 0;
  468. void *key_buf;
  469. int i, err;
  470. if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
  471. fs->ring_cookie >= dpaa2_eth_queue_count(priv))
  472. return -EINVAL;
  473. rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
  474. /* allocate twice the key size, for the actual key and for mask */
  475. key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
  476. if (!key_buf)
  477. return -ENOMEM;
  478. /* Fill the key and mask memory areas */
  479. err = dpaa2_eth_prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
  480. if (err)
  481. goto free_mem;
  482. if (!dpaa2_eth_fs_mask_enabled(priv)) {
  483. /* Masking allows us to configure a maximal key during init and
  484. * use it for all flow steering rules. Without it, we include
  485. * in the key only the fields actually used, so we need to
  486. * extract the others from the final key buffer.
  487. *
  488. * Program the FS key if needed, or return error if previously
  489. * set key can't be used for the current rule. User needs to
  490. * delete existing rules in this case to allow for the new one.
  491. */
  492. if (!priv->rx_cls_fields) {
  493. err = dpaa2_eth_set_cls(net_dev, fields);
  494. if (err)
  495. goto free_mem;
  496. priv->rx_cls_fields = fields;
  497. } else if (priv->rx_cls_fields != fields) {
  498. netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
  499. err = -EOPNOTSUPP;
  500. goto free_mem;
  501. }
  502. dpaa2_eth_cls_trim_rule(key_buf, fields);
  503. rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
  504. }
  505. key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
  506. DMA_TO_DEVICE);
  507. if (dma_mapping_error(dev, key_iova)) {
  508. err = -ENOMEM;
  509. goto free_mem;
  510. }
  511. rule_cfg.key_iova = key_iova;
  512. if (dpaa2_eth_fs_mask_enabled(priv))
  513. rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
  514. if (add) {
  515. if (fs->ring_cookie == RX_CLS_FLOW_DISC)
  516. fs_act.options |= DPNI_FS_OPT_DISCARD;
  517. else
  518. fs_act.flow_id = fs->ring_cookie;
  519. }
  520. for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
  521. if (add)
  522. err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
  523. i, fs->location, &rule_cfg,
  524. &fs_act);
  525. else
  526. err = dpni_remove_fs_entry(priv->mc_io, 0,
  527. priv->mc_token, i,
  528. &rule_cfg);
  529. if (err || priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
  530. break;
  531. }
  532. dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
  533. free_mem:
  534. kfree(key_buf);
  535. return err;
  536. }
  537. static int dpaa2_eth_num_cls_rules(struct dpaa2_eth_priv *priv)
  538. {
  539. int i, rules = 0;
  540. for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
  541. if (priv->cls_rules[i].in_use)
  542. rules++;
  543. return rules;
  544. }
  545. static int dpaa2_eth_update_cls_rule(struct net_device *net_dev,
  546. struct ethtool_rx_flow_spec *new_fs,
  547. unsigned int location)
  548. {
  549. struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  550. struct dpaa2_eth_cls_rule *rule;
  551. int err = -EINVAL;
  552. if (!priv->rx_cls_enabled)
  553. return -EOPNOTSUPP;
  554. if (location >= dpaa2_eth_fs_count(priv))
  555. return -EINVAL;
  556. rule = &priv->cls_rules[location];
  557. /* If a rule is present at the specified location, delete it. */
  558. if (rule->in_use) {
  559. err = dpaa2_eth_do_cls_rule(net_dev, &rule->fs, false);
  560. if (err)
  561. return err;
  562. rule->in_use = 0;
  563. if (!dpaa2_eth_fs_mask_enabled(priv) &&
  564. !dpaa2_eth_num_cls_rules(priv))
  565. priv->rx_cls_fields = 0;
  566. }
  567. /* If no new entry to add, return here */
  568. if (!new_fs)
  569. return err;
  570. err = dpaa2_eth_do_cls_rule(net_dev, new_fs, true);
  571. if (err)
  572. return err;
  573. rule->in_use = 1;
  574. rule->fs = *new_fs;
  575. return 0;
  576. }
  577. static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
  578. struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
  579. {
  580. struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  581. int max_rules = dpaa2_eth_fs_count(priv);
  582. int i, j = 0;
  583. switch (rxnfc->cmd) {
  584. case ETHTOOL_GRXFH:
  585. /* we purposely ignore cmd->flow_type for now, because the
  586. * classifier only supports a single set of fields for all
  587. * protocols
  588. */
  589. rxnfc->data = priv->rx_hash_fields;
  590. break;
  591. case ETHTOOL_GRXRINGS:
  592. rxnfc->data = dpaa2_eth_queue_count(priv);
  593. break;
  594. case ETHTOOL_GRXCLSRLCNT:
  595. rxnfc->rule_cnt = 0;
  596. rxnfc->rule_cnt = dpaa2_eth_num_cls_rules(priv);
  597. rxnfc->data = max_rules;
  598. break;
  599. case ETHTOOL_GRXCLSRULE:
  600. if (rxnfc->fs.location >= max_rules)
  601. return -EINVAL;
  602. rxnfc->fs.location = array_index_nospec(rxnfc->fs.location,
  603. max_rules);
  604. if (!priv->cls_rules[rxnfc->fs.location].in_use)
  605. return -EINVAL;
  606. rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
  607. break;
  608. case ETHTOOL_GRXCLSRLALL:
  609. for (i = 0; i < max_rules; i++) {
  610. if (!priv->cls_rules[i].in_use)
  611. continue;
  612. if (j == rxnfc->rule_cnt)
  613. return -EMSGSIZE;
  614. rule_locs[j++] = i;
  615. }
  616. rxnfc->rule_cnt = j;
  617. rxnfc->data = max_rules;
  618. break;
  619. default:
  620. return -EOPNOTSUPP;
  621. }
  622. return 0;
  623. }
  624. static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
  625. struct ethtool_rxnfc *rxnfc)
  626. {
  627. int err = 0;
  628. switch (rxnfc->cmd) {
  629. case ETHTOOL_SRXFH:
  630. if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
  631. return -EOPNOTSUPP;
  632. err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
  633. break;
  634. case ETHTOOL_SRXCLSRLINS:
  635. err = dpaa2_eth_update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
  636. break;
  637. case ETHTOOL_SRXCLSRLDEL:
  638. err = dpaa2_eth_update_cls_rule(net_dev, NULL, rxnfc->fs.location);
  639. break;
  640. default:
  641. err = -EOPNOTSUPP;
  642. }
  643. return err;
  644. }
  645. int dpaa2_phc_index = -1;
  646. EXPORT_SYMBOL(dpaa2_phc_index);
  647. static int dpaa2_eth_get_ts_info(struct net_device *dev,
  648. struct ethtool_ts_info *info)
  649. {
  650. if (!dpaa2_ptp)
  651. return ethtool_op_get_ts_info(dev, info);
  652. info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
  653. SOF_TIMESTAMPING_RX_HARDWARE |
  654. SOF_TIMESTAMPING_RAW_HARDWARE;
  655. info->phc_index = dpaa2_phc_index;
  656. info->tx_types = (1 << HWTSTAMP_TX_OFF) |
  657. (1 << HWTSTAMP_TX_ON) |
  658. (1 << HWTSTAMP_TX_ONESTEP_SYNC);
  659. info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
  660. (1 << HWTSTAMP_FILTER_ALL);
  661. return 0;
  662. }
  663. static int dpaa2_eth_get_tunable(struct net_device *net_dev,
  664. const struct ethtool_tunable *tuna,
  665. void *data)
  666. {
  667. struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  668. int err = 0;
  669. switch (tuna->id) {
  670. case ETHTOOL_RX_COPYBREAK:
  671. *(u32 *)data = priv->rx_copybreak;
  672. break;
  673. default:
  674. err = -EOPNOTSUPP;
  675. break;
  676. }
  677. return err;
  678. }
  679. static int dpaa2_eth_set_tunable(struct net_device *net_dev,
  680. const struct ethtool_tunable *tuna,
  681. const void *data)
  682. {
  683. struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  684. int err = 0;
  685. switch (tuna->id) {
  686. case ETHTOOL_RX_COPYBREAK:
  687. priv->rx_copybreak = *(u32 *)data;
  688. break;
  689. default:
  690. err = -EOPNOTSUPP;
  691. break;
  692. }
  693. return err;
  694. }
  695. static int dpaa2_eth_get_coalesce(struct net_device *dev,
  696. struct ethtool_coalesce *ic,
  697. struct kernel_ethtool_coalesce *kernel_coal,
  698. struct netlink_ext_ack *extack)
  699. {
  700. struct dpaa2_eth_priv *priv = netdev_priv(dev);
  701. struct dpaa2_io *dpio = priv->channel[0]->dpio;
  702. dpaa2_io_get_irq_coalescing(dpio, &ic->rx_coalesce_usecs);
  703. ic->use_adaptive_rx_coalesce = dpaa2_io_get_adaptive_coalescing(dpio);
  704. return 0;
  705. }
  706. static int dpaa2_eth_set_coalesce(struct net_device *dev,
  707. struct ethtool_coalesce *ic,
  708. struct kernel_ethtool_coalesce *kernel_coal,
  709. struct netlink_ext_ack *extack)
  710. {
  711. struct dpaa2_eth_priv *priv = netdev_priv(dev);
  712. struct dpaa2_io *dpio;
  713. int prev_adaptive;
  714. u32 prev_rx_usecs;
  715. int i, j, err;
  716. /* Keep track of the previous value, just in case we fail */
  717. dpio = priv->channel[0]->dpio;
  718. dpaa2_io_get_irq_coalescing(dpio, &prev_rx_usecs);
  719. prev_adaptive = dpaa2_io_get_adaptive_coalescing(dpio);
  720. /* Setup new value for rx coalescing */
  721. for (i = 0; i < priv->num_channels; i++) {
  722. dpio = priv->channel[i]->dpio;
  723. dpaa2_io_set_adaptive_coalescing(dpio,
  724. ic->use_adaptive_rx_coalesce);
  725. err = dpaa2_io_set_irq_coalescing(dpio, ic->rx_coalesce_usecs);
  726. if (err)
  727. goto restore_rx_usecs;
  728. }
  729. return 0;
  730. restore_rx_usecs:
  731. for (j = 0; j < i; j++) {
  732. dpio = priv->channel[j]->dpio;
  733. dpaa2_io_set_irq_coalescing(dpio, prev_rx_usecs);
  734. dpaa2_io_set_adaptive_coalescing(dpio, prev_adaptive);
  735. }
  736. return err;
  737. }
  738. const struct ethtool_ops dpaa2_ethtool_ops = {
  739. .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
  740. ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
  741. .get_drvinfo = dpaa2_eth_get_drvinfo,
  742. .nway_reset = dpaa2_eth_nway_reset,
  743. .get_link = ethtool_op_get_link,
  744. .get_link_ksettings = dpaa2_eth_get_link_ksettings,
  745. .set_link_ksettings = dpaa2_eth_set_link_ksettings,
  746. .get_pauseparam = dpaa2_eth_get_pauseparam,
  747. .set_pauseparam = dpaa2_eth_set_pauseparam,
  748. .get_sset_count = dpaa2_eth_get_sset_count,
  749. .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
  750. .get_strings = dpaa2_eth_get_strings,
  751. .get_rxnfc = dpaa2_eth_get_rxnfc,
  752. .set_rxnfc = dpaa2_eth_set_rxnfc,
  753. .get_ts_info = dpaa2_eth_get_ts_info,
  754. .get_tunable = dpaa2_eth_get_tunable,
  755. .set_tunable = dpaa2_eth_set_tunable,
  756. .get_coalesce = dpaa2_eth_get_coalesce,
  757. .set_coalesce = dpaa2_eth_set_coalesce,
  758. };