gianfar_ethtool.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * drivers/net/ethernet/freescale/gianfar_ethtool.c
  4. *
  5. * Gianfar Ethernet Driver
  6. * Ethtool support for Gianfar Enet
  7. * Based on e1000 ethtool support
  8. *
  9. * Author: Andy Fleming
  10. * Maintainer: Kumar Gala
  11. * Modifier: Sandeep Gopalpet <[email protected]>
  12. *
  13. * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
  14. */
  15. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16. #include <linux/kernel.h>
  17. #include <linux/string.h>
  18. #include <linux/errno.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/delay.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/etherdevice.h>
  23. #include <linux/net_tstamp.h>
  24. #include <linux/skbuff.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/mm.h>
  27. #include <asm/io.h>
  28. #include <asm/irq.h>
  29. #include <linux/uaccess.h>
  30. #include <linux/module.h>
  31. #include <linux/crc32.h>
  32. #include <asm/types.h>
  33. #include <linux/ethtool.h>
  34. #include <linux/mii.h>
  35. #include <linux/phy.h>
  36. #include <linux/sort.h>
  37. #include <linux/if_vlan.h>
  38. #include <linux/of_platform.h>
  39. #include <linux/fsl/ptp_qoriq.h>
  40. #include "gianfar.h"
  41. #define GFAR_MAX_COAL_USECS 0xffff
  42. #define GFAR_MAX_COAL_FRAMES 0xff
  43. static const char stat_gstrings[][ETH_GSTRING_LEN] = {
  44. /* extra stats */
  45. "rx-allocation-errors",
  46. "rx-large-frame-errors",
  47. "rx-short-frame-errors",
  48. "rx-non-octet-errors",
  49. "rx-crc-errors",
  50. "rx-overrun-errors",
  51. "rx-busy-errors",
  52. "rx-babbling-errors",
  53. "rx-truncated-frames",
  54. "ethernet-bus-error",
  55. "tx-babbling-errors",
  56. "tx-underrun-errors",
  57. "tx-timeout-errors",
  58. /* rmon stats */
  59. "tx-rx-64-frames",
  60. "tx-rx-65-127-frames",
  61. "tx-rx-128-255-frames",
  62. "tx-rx-256-511-frames",
  63. "tx-rx-512-1023-frames",
  64. "tx-rx-1024-1518-frames",
  65. "tx-rx-1519-1522-good-vlan",
  66. "rx-bytes",
  67. "rx-packets",
  68. "rx-fcs-errors",
  69. "receive-multicast-packet",
  70. "receive-broadcast-packet",
  71. "rx-control-frame-packets",
  72. "rx-pause-frame-packets",
  73. "rx-unknown-op-code",
  74. "rx-alignment-error",
  75. "rx-frame-length-error",
  76. "rx-code-error",
  77. "rx-carrier-sense-error",
  78. "rx-undersize-packets",
  79. "rx-oversize-packets",
  80. "rx-fragmented-frames",
  81. "rx-jabber-frames",
  82. "rx-dropped-frames",
  83. "tx-byte-counter",
  84. "tx-packets",
  85. "tx-multicast-packets",
  86. "tx-broadcast-packets",
  87. "tx-pause-control-frames",
  88. "tx-deferral-packets",
  89. "tx-excessive-deferral-packets",
  90. "tx-single-collision-packets",
  91. "tx-multiple-collision-packets",
  92. "tx-late-collision-packets",
  93. "tx-excessive-collision-packets",
  94. "tx-total-collision",
  95. "reserved",
  96. "tx-dropped-frames",
  97. "tx-jabber-frames",
  98. "tx-fcs-errors",
  99. "tx-control-frames",
  100. "tx-oversize-frames",
  101. "tx-undersize-frames",
  102. "tx-fragmented-frames",
  103. };
  104. /* Fill in a buffer with the strings which correspond to the
  105. * stats */
  106. static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
  107. {
  108. struct gfar_private *priv = netdev_priv(dev);
  109. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
  110. memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
  111. else
  112. memcpy(buf, stat_gstrings,
  113. GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
  114. }
  115. /* Fill in an array of 64-bit statistics from various sources.
  116. * This array will be appended to the end of the ethtool_stats
  117. * structure, and returned to user space
  118. */
  119. static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
  120. u64 *buf)
  121. {
  122. int i;
  123. struct gfar_private *priv = netdev_priv(dev);
  124. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  125. atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
  126. for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
  127. buf[i] = atomic64_read(&extra[i]);
  128. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
  129. u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
  130. for (; i < GFAR_STATS_LEN; i++, rmon++)
  131. buf[i] = (u64) gfar_read(rmon);
  132. }
  133. }
  134. static int gfar_sset_count(struct net_device *dev, int sset)
  135. {
  136. struct gfar_private *priv = netdev_priv(dev);
  137. switch (sset) {
  138. case ETH_SS_STATS:
  139. if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
  140. return GFAR_STATS_LEN;
  141. else
  142. return GFAR_EXTRA_STATS_LEN;
  143. default:
  144. return -EOPNOTSUPP;
  145. }
  146. }
  147. /* Fills in the drvinfo structure with some basic info */
  148. static void gfar_gdrvinfo(struct net_device *dev,
  149. struct ethtool_drvinfo *drvinfo)
  150. {
  151. strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
  152. }
  153. /* Return the length of the register structure */
  154. static int gfar_reglen(struct net_device *dev)
  155. {
  156. return sizeof (struct gfar);
  157. }
  158. /* Return a dump of the GFAR register space */
  159. static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
  160. void *regbuf)
  161. {
  162. int i;
  163. struct gfar_private *priv = netdev_priv(dev);
  164. u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
  165. u32 *buf = (u32 *) regbuf;
  166. for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
  167. buf[i] = gfar_read(&theregs[i]);
  168. }
  169. /* Convert microseconds to ethernet clock ticks, which changes
  170. * depending on what speed the controller is running at */
  171. static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
  172. unsigned int usecs)
  173. {
  174. struct net_device *ndev = priv->ndev;
  175. struct phy_device *phydev = ndev->phydev;
  176. unsigned int count;
  177. /* The timer is different, depending on the interface speed */
  178. switch (phydev->speed) {
  179. case SPEED_1000:
  180. count = GFAR_GBIT_TIME;
  181. break;
  182. case SPEED_100:
  183. count = GFAR_100_TIME;
  184. break;
  185. case SPEED_10:
  186. default:
  187. count = GFAR_10_TIME;
  188. break;
  189. }
  190. /* Make sure we return a number greater than 0
  191. * if usecs > 0 */
  192. return DIV_ROUND_UP(usecs * 1000, count);
  193. }
  194. /* Convert ethernet clock ticks to microseconds */
  195. static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
  196. unsigned int ticks)
  197. {
  198. struct net_device *ndev = priv->ndev;
  199. struct phy_device *phydev = ndev->phydev;
  200. unsigned int count;
  201. /* The timer is different, depending on the interface speed */
  202. switch (phydev->speed) {
  203. case SPEED_1000:
  204. count = GFAR_GBIT_TIME;
  205. break;
  206. case SPEED_100:
  207. count = GFAR_100_TIME;
  208. break;
  209. case SPEED_10:
  210. default:
  211. count = GFAR_10_TIME;
  212. break;
  213. }
  214. /* Make sure we return a number greater than 0 */
  215. /* if ticks is > 0 */
  216. return (ticks * count) / 1000;
  217. }
  218. /* Get the coalescing parameters, and put them in the cvals
  219. * structure. */
  220. static int gfar_gcoalesce(struct net_device *dev,
  221. struct ethtool_coalesce *cvals,
  222. struct kernel_ethtool_coalesce *kernel_coal,
  223. struct netlink_ext_ack *extack)
  224. {
  225. struct gfar_private *priv = netdev_priv(dev);
  226. struct gfar_priv_rx_q *rx_queue = NULL;
  227. struct gfar_priv_tx_q *tx_queue = NULL;
  228. unsigned long rxtime;
  229. unsigned long rxcount;
  230. unsigned long txtime;
  231. unsigned long txcount;
  232. if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
  233. return -EOPNOTSUPP;
  234. if (!dev->phydev)
  235. return -ENODEV;
  236. rx_queue = priv->rx_queue[0];
  237. tx_queue = priv->tx_queue[0];
  238. rxtime = get_ictt_value(rx_queue->rxic);
  239. rxcount = get_icft_value(rx_queue->rxic);
  240. txtime = get_ictt_value(tx_queue->txic);
  241. txcount = get_icft_value(tx_queue->txic);
  242. cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
  243. cvals->rx_max_coalesced_frames = rxcount;
  244. cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
  245. cvals->tx_max_coalesced_frames = txcount;
  246. return 0;
  247. }
  248. /* Change the coalescing values.
  249. * Both cvals->*_usecs and cvals->*_frames have to be > 0
  250. * in order for coalescing to be active
  251. */
  252. static int gfar_scoalesce(struct net_device *dev,
  253. struct ethtool_coalesce *cvals,
  254. struct kernel_ethtool_coalesce *kernel_coal,
  255. struct netlink_ext_ack *extack)
  256. {
  257. struct gfar_private *priv = netdev_priv(dev);
  258. int i, err = 0;
  259. if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
  260. return -EOPNOTSUPP;
  261. if (!dev->phydev)
  262. return -ENODEV;
  263. /* Check the bounds of the values */
  264. if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
  265. netdev_info(dev, "Coalescing is limited to %d microseconds\n",
  266. GFAR_MAX_COAL_USECS);
  267. return -EINVAL;
  268. }
  269. if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
  270. netdev_info(dev, "Coalescing is limited to %d frames\n",
  271. GFAR_MAX_COAL_FRAMES);
  272. return -EINVAL;
  273. }
  274. /* Check the bounds of the values */
  275. if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
  276. netdev_info(dev, "Coalescing is limited to %d microseconds\n",
  277. GFAR_MAX_COAL_USECS);
  278. return -EINVAL;
  279. }
  280. if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
  281. netdev_info(dev, "Coalescing is limited to %d frames\n",
  282. GFAR_MAX_COAL_FRAMES);
  283. return -EINVAL;
  284. }
  285. while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
  286. cpu_relax();
  287. /* Set up rx coalescing */
  288. if ((cvals->rx_coalesce_usecs == 0) ||
  289. (cvals->rx_max_coalesced_frames == 0)) {
  290. for (i = 0; i < priv->num_rx_queues; i++)
  291. priv->rx_queue[i]->rxcoalescing = 0;
  292. } else {
  293. for (i = 0; i < priv->num_rx_queues; i++)
  294. priv->rx_queue[i]->rxcoalescing = 1;
  295. }
  296. for (i = 0; i < priv->num_rx_queues; i++) {
  297. priv->rx_queue[i]->rxic = mk_ic_value(
  298. cvals->rx_max_coalesced_frames,
  299. gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
  300. }
  301. /* Set up tx coalescing */
  302. if ((cvals->tx_coalesce_usecs == 0) ||
  303. (cvals->tx_max_coalesced_frames == 0)) {
  304. for (i = 0; i < priv->num_tx_queues; i++)
  305. priv->tx_queue[i]->txcoalescing = 0;
  306. } else {
  307. for (i = 0; i < priv->num_tx_queues; i++)
  308. priv->tx_queue[i]->txcoalescing = 1;
  309. }
  310. for (i = 0; i < priv->num_tx_queues; i++) {
  311. priv->tx_queue[i]->txic = mk_ic_value(
  312. cvals->tx_max_coalesced_frames,
  313. gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
  314. }
  315. if (dev->flags & IFF_UP) {
  316. stop_gfar(dev);
  317. err = startup_gfar(dev);
  318. } else {
  319. gfar_mac_reset(priv);
  320. }
  321. clear_bit_unlock(GFAR_RESETTING, &priv->state);
  322. return err;
  323. }
  324. /* Fills in rvals with the current ring parameters. Currently,
  325. * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
  326. * jumbo are ignored by the driver */
  327. static void gfar_gringparam(struct net_device *dev,
  328. struct ethtool_ringparam *rvals,
  329. struct kernel_ethtool_ringparam *kernel_rvals,
  330. struct netlink_ext_ack *extack)
  331. {
  332. struct gfar_private *priv = netdev_priv(dev);
  333. struct gfar_priv_tx_q *tx_queue = NULL;
  334. struct gfar_priv_rx_q *rx_queue = NULL;
  335. tx_queue = priv->tx_queue[0];
  336. rx_queue = priv->rx_queue[0];
  337. rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
  338. rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
  339. rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
  340. rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
  341. /* Values changeable by the user. The valid values are
  342. * in the range 1 to the "*_max_pending" counterpart above.
  343. */
  344. rvals->rx_pending = rx_queue->rx_ring_size;
  345. rvals->rx_mini_pending = rx_queue->rx_ring_size;
  346. rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
  347. rvals->tx_pending = tx_queue->tx_ring_size;
  348. }
  349. /* Change the current ring parameters, stopping the controller if
  350. * necessary so that we don't mess things up while we're in motion.
  351. */
  352. static int gfar_sringparam(struct net_device *dev,
  353. struct ethtool_ringparam *rvals,
  354. struct kernel_ethtool_ringparam *kernel_rvals,
  355. struct netlink_ext_ack *extack)
  356. {
  357. struct gfar_private *priv = netdev_priv(dev);
  358. int err = 0, i;
  359. if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
  360. return -EINVAL;
  361. if (!is_power_of_2(rvals->rx_pending)) {
  362. netdev_err(dev, "Ring sizes must be a power of 2\n");
  363. return -EINVAL;
  364. }
  365. if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
  366. return -EINVAL;
  367. if (!is_power_of_2(rvals->tx_pending)) {
  368. netdev_err(dev, "Ring sizes must be a power of 2\n");
  369. return -EINVAL;
  370. }
  371. while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
  372. cpu_relax();
  373. if (dev->flags & IFF_UP)
  374. stop_gfar(dev);
  375. /* Change the sizes */
  376. for (i = 0; i < priv->num_rx_queues; i++)
  377. priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
  378. for (i = 0; i < priv->num_tx_queues; i++)
  379. priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
  380. /* Rebuild the rings with the new size */
  381. if (dev->flags & IFF_UP)
  382. err = startup_gfar(dev);
  383. clear_bit_unlock(GFAR_RESETTING, &priv->state);
  384. return err;
  385. }
  386. static void gfar_gpauseparam(struct net_device *dev,
  387. struct ethtool_pauseparam *epause)
  388. {
  389. struct gfar_private *priv = netdev_priv(dev);
  390. epause->autoneg = !!priv->pause_aneg_en;
  391. epause->rx_pause = !!priv->rx_pause_en;
  392. epause->tx_pause = !!priv->tx_pause_en;
  393. }
  394. static int gfar_spauseparam(struct net_device *dev,
  395. struct ethtool_pauseparam *epause)
  396. {
  397. struct gfar_private *priv = netdev_priv(dev);
  398. struct phy_device *phydev = dev->phydev;
  399. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  400. if (!phydev)
  401. return -ENODEV;
  402. if (!phy_validate_pause(phydev, epause))
  403. return -EINVAL;
  404. priv->rx_pause_en = priv->tx_pause_en = 0;
  405. phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
  406. if (epause->rx_pause) {
  407. priv->rx_pause_en = 1;
  408. if (epause->tx_pause) {
  409. priv->tx_pause_en = 1;
  410. }
  411. } else if (epause->tx_pause) {
  412. priv->tx_pause_en = 1;
  413. }
  414. if (epause->autoneg)
  415. priv->pause_aneg_en = 1;
  416. else
  417. priv->pause_aneg_en = 0;
  418. if (!epause->autoneg) {
  419. u32 tempval = gfar_read(&regs->maccfg1);
  420. tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
  421. priv->tx_actual_en = 0;
  422. if (priv->tx_pause_en) {
  423. priv->tx_actual_en = 1;
  424. tempval |= MACCFG1_TX_FLOW;
  425. }
  426. if (priv->rx_pause_en)
  427. tempval |= MACCFG1_RX_FLOW;
  428. gfar_write(&regs->maccfg1, tempval);
  429. }
  430. return 0;
  431. }
  432. int gfar_set_features(struct net_device *dev, netdev_features_t features)
  433. {
  434. netdev_features_t changed = dev->features ^ features;
  435. struct gfar_private *priv = netdev_priv(dev);
  436. int err = 0;
  437. if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
  438. NETIF_F_RXCSUM)))
  439. return 0;
  440. while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
  441. cpu_relax();
  442. dev->features = features;
  443. if (dev->flags & IFF_UP) {
  444. /* Now we take down the rings to rebuild them */
  445. stop_gfar(dev);
  446. err = startup_gfar(dev);
  447. } else {
  448. gfar_mac_reset(priv);
  449. }
  450. clear_bit_unlock(GFAR_RESETTING, &priv->state);
  451. return err;
  452. }
  453. static uint32_t gfar_get_msglevel(struct net_device *dev)
  454. {
  455. struct gfar_private *priv = netdev_priv(dev);
  456. return priv->msg_enable;
  457. }
  458. static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
  459. {
  460. struct gfar_private *priv = netdev_priv(dev);
  461. priv->msg_enable = data;
  462. }
  463. #ifdef CONFIG_PM
  464. static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  465. {
  466. struct gfar_private *priv = netdev_priv(dev);
  467. wol->supported = 0;
  468. wol->wolopts = 0;
  469. if (priv->wol_supported & GFAR_WOL_MAGIC)
  470. wol->supported |= WAKE_MAGIC;
  471. if (priv->wol_supported & GFAR_WOL_FILER_UCAST)
  472. wol->supported |= WAKE_UCAST;
  473. if (priv->wol_opts & GFAR_WOL_MAGIC)
  474. wol->wolopts |= WAKE_MAGIC;
  475. if (priv->wol_opts & GFAR_WOL_FILER_UCAST)
  476. wol->wolopts |= WAKE_UCAST;
  477. }
  478. static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  479. {
  480. struct gfar_private *priv = netdev_priv(dev);
  481. u16 wol_opts = 0;
  482. int err;
  483. if (!priv->wol_supported && wol->wolopts)
  484. return -EINVAL;
  485. if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST))
  486. return -EINVAL;
  487. if (wol->wolopts & WAKE_MAGIC) {
  488. wol_opts |= GFAR_WOL_MAGIC;
  489. } else {
  490. if (wol->wolopts & WAKE_UCAST)
  491. wol_opts |= GFAR_WOL_FILER_UCAST;
  492. }
  493. wol_opts &= priv->wol_supported;
  494. priv->wol_opts = 0;
  495. err = device_set_wakeup_enable(priv->dev, wol_opts);
  496. if (err)
  497. return err;
  498. priv->wol_opts = wol_opts;
  499. return 0;
  500. }
  501. #endif
  502. static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
  503. {
  504. u32 fcr = 0x0, fpr = FPR_FILER_MASK;
  505. if (ethflow & RXH_L2DA) {
  506. fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
  507. RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
  508. priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
  509. priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
  510. gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
  511. priv->cur_filer_idx = priv->cur_filer_idx - 1;
  512. fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH |
  513. RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
  514. priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
  515. priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
  516. gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
  517. priv->cur_filer_idx = priv->cur_filer_idx - 1;
  518. }
  519. if (ethflow & RXH_VLAN) {
  520. fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
  521. RQFCR_AND | RQFCR_HASHTBL_0;
  522. gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
  523. priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
  524. priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
  525. priv->cur_filer_idx = priv->cur_filer_idx - 1;
  526. }
  527. if (ethflow & RXH_IP_SRC) {
  528. fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
  529. RQFCR_AND | RQFCR_HASHTBL_0;
  530. priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
  531. priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
  532. gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
  533. priv->cur_filer_idx = priv->cur_filer_idx - 1;
  534. }
  535. if (ethflow & (RXH_IP_DST)) {
  536. fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
  537. RQFCR_AND | RQFCR_HASHTBL_0;
  538. priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
  539. priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
  540. gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
  541. priv->cur_filer_idx = priv->cur_filer_idx - 1;
  542. }
  543. if (ethflow & RXH_L3_PROTO) {
  544. fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
  545. RQFCR_AND | RQFCR_HASHTBL_0;
  546. priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
  547. priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
  548. gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
  549. priv->cur_filer_idx = priv->cur_filer_idx - 1;
  550. }
  551. if (ethflow & RXH_L4_B_0_1) {
  552. fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
  553. RQFCR_AND | RQFCR_HASHTBL_0;
  554. priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
  555. priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
  556. gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
  557. priv->cur_filer_idx = priv->cur_filer_idx - 1;
  558. }
  559. if (ethflow & RXH_L4_B_2_3) {
  560. fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
  561. RQFCR_AND | RQFCR_HASHTBL_0;
  562. priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
  563. priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
  564. gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
  565. priv->cur_filer_idx = priv->cur_filer_idx - 1;
  566. }
  567. }
  568. static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
  569. u64 class)
  570. {
  571. unsigned int cmp_rqfpr;
  572. unsigned int *local_rqfpr;
  573. unsigned int *local_rqfcr;
  574. int i = 0x0, k = 0x0;
  575. int j = MAX_FILER_IDX, l = 0x0;
  576. int ret = 1;
  577. local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
  578. GFP_KERNEL);
  579. local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
  580. GFP_KERNEL);
  581. if (!local_rqfpr || !local_rqfcr) {
  582. ret = 0;
  583. goto err;
  584. }
  585. switch (class) {
  586. case TCP_V4_FLOW:
  587. cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
  588. break;
  589. case UDP_V4_FLOW:
  590. cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
  591. break;
  592. case TCP_V6_FLOW:
  593. cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
  594. break;
  595. case UDP_V6_FLOW:
  596. cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
  597. break;
  598. default:
  599. netdev_err(priv->ndev,
  600. "Right now this class is not supported\n");
  601. ret = 0;
  602. goto err;
  603. }
  604. for (i = 0; i < MAX_FILER_IDX + 1; i++) {
  605. local_rqfpr[j] = priv->ftp_rqfpr[i];
  606. local_rqfcr[j] = priv->ftp_rqfcr[i];
  607. j--;
  608. if ((priv->ftp_rqfcr[i] ==
  609. (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
  610. (priv->ftp_rqfpr[i] == cmp_rqfpr))
  611. break;
  612. }
  613. if (i == MAX_FILER_IDX + 1) {
  614. netdev_err(priv->ndev,
  615. "No parse rule found, can't create hash rules\n");
  616. ret = 0;
  617. goto err;
  618. }
  619. /* If a match was found, then it begins the starting of a cluster rule
  620. * if it was already programmed, we need to overwrite these rules
  621. */
  622. for (l = i+1; l < MAX_FILER_IDX; l++) {
  623. if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
  624. !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
  625. priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
  626. RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
  627. priv->ftp_rqfpr[l] = FPR_FILER_MASK;
  628. gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
  629. priv->ftp_rqfpr[l]);
  630. break;
  631. }
  632. if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
  633. (priv->ftp_rqfcr[l] & RQFCR_AND))
  634. continue;
  635. else {
  636. local_rqfpr[j] = priv->ftp_rqfpr[l];
  637. local_rqfcr[j] = priv->ftp_rqfcr[l];
  638. j--;
  639. }
  640. }
  641. priv->cur_filer_idx = l - 1;
  642. /* hash rules */
  643. ethflow_to_filer_rules(priv, ethflow);
  644. /* Write back the popped out rules again */
  645. for (k = j+1; k < MAX_FILER_IDX; k++) {
  646. priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
  647. priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
  648. gfar_write_filer(priv, priv->cur_filer_idx,
  649. local_rqfcr[k], local_rqfpr[k]);
  650. if (!priv->cur_filer_idx)
  651. break;
  652. priv->cur_filer_idx = priv->cur_filer_idx - 1;
  653. }
  654. err:
  655. kfree(local_rqfcr);
  656. kfree(local_rqfpr);
  657. return ret;
  658. }
  659. static int gfar_set_hash_opts(struct gfar_private *priv,
  660. struct ethtool_rxnfc *cmd)
  661. {
  662. /* write the filer rules here */
  663. if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
  664. return -EINVAL;
  665. return 0;
  666. }
  667. static int gfar_check_filer_hardware(struct gfar_private *priv)
  668. {
  669. struct gfar __iomem *regs = priv->gfargrp[0].regs;
  670. u32 i;
  671. /* Check if we are in FIFO mode */
  672. i = gfar_read(&regs->ecntrl);
  673. i &= ECNTRL_FIFM;
  674. if (i == ECNTRL_FIFM) {
  675. netdev_notice(priv->ndev, "Interface in FIFO mode\n");
  676. i = gfar_read(&regs->rctrl);
  677. i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
  678. if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
  679. netdev_info(priv->ndev,
  680. "Receive Queue Filtering enabled\n");
  681. } else {
  682. netdev_warn(priv->ndev,
  683. "Receive Queue Filtering disabled\n");
  684. return -EOPNOTSUPP;
  685. }
  686. }
  687. /* Or in standard mode */
  688. else {
  689. i = gfar_read(&regs->rctrl);
  690. i &= RCTRL_PRSDEP_MASK;
  691. if (i == RCTRL_PRSDEP_MASK) {
  692. netdev_info(priv->ndev,
  693. "Receive Queue Filtering enabled\n");
  694. } else {
  695. netdev_warn(priv->ndev,
  696. "Receive Queue Filtering disabled\n");
  697. return -EOPNOTSUPP;
  698. }
  699. }
  700. /* Sets the properties for arbitrary filer rule
  701. * to the first 4 Layer 4 Bytes
  702. */
  703. gfar_write(&regs->rbifx, 0xC0C1C2C3);
  704. return 0;
  705. }
  706. /* Write a mask to filer cache */
  707. static void gfar_set_mask(u32 mask, struct filer_table *tab)
  708. {
  709. tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
  710. tab->fe[tab->index].prop = mask;
  711. tab->index++;
  712. }
  713. /* Sets parse bits (e.g. IP or TCP) */
  714. static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
  715. {
  716. gfar_set_mask(mask, tab);
  717. tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
  718. RQFCR_AND;
  719. tab->fe[tab->index].prop = value;
  720. tab->index++;
  721. }
  722. static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
  723. struct filer_table *tab)
  724. {
  725. gfar_set_mask(mask, tab);
  726. tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
  727. tab->fe[tab->index].prop = value;
  728. tab->index++;
  729. }
  730. /* For setting a tuple of value and mask of type flag
  731. * Example:
  732. * IP-Src = 10.0.0.0/255.0.0.0
  733. * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
  734. *
  735. * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
  736. * For a don't care mask it gives us a 0
  737. *
  738. * The check if don't care and the mask adjustment if mask=0 is done for VLAN
  739. * and MAC stuff on an upper level (due to missing information on this level).
  740. * For these guys we can discard them if they are value=0 and mask=0.
  741. *
  742. * Further the all masks are one-padded for better hardware efficiency.
  743. */
  744. static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
  745. struct filer_table *tab)
  746. {
  747. switch (flag) {
  748. /* 3bit */
  749. case RQFCR_PID_PRI:
  750. if (!(value | mask))
  751. return;
  752. mask |= RQFCR_PID_PRI_MASK;
  753. break;
  754. /* 8bit */
  755. case RQFCR_PID_L4P:
  756. case RQFCR_PID_TOS:
  757. if (!~(mask | RQFCR_PID_L4P_MASK))
  758. return;
  759. if (!mask)
  760. mask = ~0;
  761. else
  762. mask |= RQFCR_PID_L4P_MASK;
  763. break;
  764. /* 12bit */
  765. case RQFCR_PID_VID:
  766. if (!(value | mask))
  767. return;
  768. mask |= RQFCR_PID_VID_MASK;
  769. break;
  770. /* 16bit */
  771. case RQFCR_PID_DPT:
  772. case RQFCR_PID_SPT:
  773. case RQFCR_PID_ETY:
  774. if (!~(mask | RQFCR_PID_PORT_MASK))
  775. return;
  776. if (!mask)
  777. mask = ~0;
  778. else
  779. mask |= RQFCR_PID_PORT_MASK;
  780. break;
  781. /* 24bit */
  782. case RQFCR_PID_DAH:
  783. case RQFCR_PID_DAL:
  784. case RQFCR_PID_SAH:
  785. case RQFCR_PID_SAL:
  786. if (!(value | mask))
  787. return;
  788. mask |= RQFCR_PID_MAC_MASK;
  789. break;
  790. /* for all real 32bit masks */
  791. default:
  792. if (!~mask)
  793. return;
  794. if (!mask)
  795. mask = ~0;
  796. break;
  797. }
  798. gfar_set_general_attribute(value, mask, flag, tab);
  799. }
  800. /* Translates value and mask for UDP, TCP or SCTP */
  801. static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
  802. struct ethtool_tcpip4_spec *mask,
  803. struct filer_table *tab)
  804. {
  805. gfar_set_attribute(be32_to_cpu(value->ip4src),
  806. be32_to_cpu(mask->ip4src),
  807. RQFCR_PID_SIA, tab);
  808. gfar_set_attribute(be32_to_cpu(value->ip4dst),
  809. be32_to_cpu(mask->ip4dst),
  810. RQFCR_PID_DIA, tab);
  811. gfar_set_attribute(be16_to_cpu(value->pdst),
  812. be16_to_cpu(mask->pdst),
  813. RQFCR_PID_DPT, tab);
  814. gfar_set_attribute(be16_to_cpu(value->psrc),
  815. be16_to_cpu(mask->psrc),
  816. RQFCR_PID_SPT, tab);
  817. gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
  818. }
  819. /* Translates value and mask for RAW-IP4 */
  820. static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
  821. struct ethtool_usrip4_spec *mask,
  822. struct filer_table *tab)
  823. {
  824. gfar_set_attribute(be32_to_cpu(value->ip4src),
  825. be32_to_cpu(mask->ip4src),
  826. RQFCR_PID_SIA, tab);
  827. gfar_set_attribute(be32_to_cpu(value->ip4dst),
  828. be32_to_cpu(mask->ip4dst),
  829. RQFCR_PID_DIA, tab);
  830. gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
  831. gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
  832. gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
  833. be32_to_cpu(mask->l4_4_bytes),
  834. RQFCR_PID_ARB, tab);
  835. }
  836. /* Translates value and mask for ETHER spec */
  837. static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
  838. struct filer_table *tab)
  839. {
  840. u32 upper_temp_mask = 0;
  841. u32 lower_temp_mask = 0;
  842. /* Source address */
  843. if (!is_broadcast_ether_addr(mask->h_source)) {
  844. if (is_zero_ether_addr(mask->h_source)) {
  845. upper_temp_mask = 0xFFFFFFFF;
  846. lower_temp_mask = 0xFFFFFFFF;
  847. } else {
  848. upper_temp_mask = mask->h_source[0] << 16 |
  849. mask->h_source[1] << 8 |
  850. mask->h_source[2];
  851. lower_temp_mask = mask->h_source[3] << 16 |
  852. mask->h_source[4] << 8 |
  853. mask->h_source[5];
  854. }
  855. /* Upper 24bit */
  856. gfar_set_attribute(value->h_source[0] << 16 |
  857. value->h_source[1] << 8 |
  858. value->h_source[2],
  859. upper_temp_mask, RQFCR_PID_SAH, tab);
  860. /* And the same for the lower part */
  861. gfar_set_attribute(value->h_source[3] << 16 |
  862. value->h_source[4] << 8 |
  863. value->h_source[5],
  864. lower_temp_mask, RQFCR_PID_SAL, tab);
  865. }
  866. /* Destination address */
  867. if (!is_broadcast_ether_addr(mask->h_dest)) {
  868. /* Special for destination is limited broadcast */
  869. if ((is_broadcast_ether_addr(value->h_dest) &&
  870. is_zero_ether_addr(mask->h_dest))) {
  871. gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
  872. } else {
  873. if (is_zero_ether_addr(mask->h_dest)) {
  874. upper_temp_mask = 0xFFFFFFFF;
  875. lower_temp_mask = 0xFFFFFFFF;
  876. } else {
  877. upper_temp_mask = mask->h_dest[0] << 16 |
  878. mask->h_dest[1] << 8 |
  879. mask->h_dest[2];
  880. lower_temp_mask = mask->h_dest[3] << 16 |
  881. mask->h_dest[4] << 8 |
  882. mask->h_dest[5];
  883. }
  884. /* Upper 24bit */
  885. gfar_set_attribute(value->h_dest[0] << 16 |
  886. value->h_dest[1] << 8 |
  887. value->h_dest[2],
  888. upper_temp_mask, RQFCR_PID_DAH, tab);
  889. /* And the same for the lower part */
  890. gfar_set_attribute(value->h_dest[3] << 16 |
  891. value->h_dest[4] << 8 |
  892. value->h_dest[5],
  893. lower_temp_mask, RQFCR_PID_DAL, tab);
  894. }
  895. }
  896. gfar_set_attribute(be16_to_cpu(value->h_proto),
  897. be16_to_cpu(mask->h_proto),
  898. RQFCR_PID_ETY, tab);
  899. }
  900. static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
  901. {
  902. return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
  903. }
  904. static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
  905. {
  906. return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
  907. }
  908. static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
  909. {
  910. return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
  911. }
  912. static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
  913. {
  914. return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
  915. }
  916. static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
  917. {
  918. return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
  919. VLAN_PRIO_SHIFT;
  920. }
  921. static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
  922. {
  923. return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
  924. VLAN_PRIO_SHIFT;
  925. }
  926. /* Convert a rule to binary filter format of gianfar */
  927. static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
  928. struct filer_table *tab)
  929. {
  930. u32 vlan = 0, vlan_mask = 0;
  931. u32 id = 0, id_mask = 0;
  932. u32 cfi = 0, cfi_mask = 0;
  933. u32 prio = 0, prio_mask = 0;
  934. u32 old_index = tab->index;
  935. /* Check if vlan is wanted */
  936. if ((rule->flow_type & FLOW_EXT) &&
  937. (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
  938. if (!rule->m_ext.vlan_tci)
  939. rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
  940. vlan = RQFPR_VLN;
  941. vlan_mask = RQFPR_VLN;
  942. /* Separate the fields */
  943. id = vlan_tci_vid(rule);
  944. id_mask = vlan_tci_vidm(rule);
  945. cfi = vlan_tci_cfi(rule);
  946. cfi_mask = vlan_tci_cfim(rule);
  947. prio = vlan_tci_prio(rule);
  948. prio_mask = vlan_tci_priom(rule);
  949. if (cfi_mask) {
  950. if (cfi)
  951. vlan |= RQFPR_CFI;
  952. vlan_mask |= RQFPR_CFI;
  953. }
  954. }
  955. switch (rule->flow_type & ~FLOW_EXT) {
  956. case TCP_V4_FLOW:
  957. gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
  958. RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
  959. gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
  960. &rule->m_u.tcp_ip4_spec, tab);
  961. break;
  962. case UDP_V4_FLOW:
  963. gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
  964. RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
  965. gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
  966. &rule->m_u.udp_ip4_spec, tab);
  967. break;
  968. case SCTP_V4_FLOW:
  969. gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
  970. tab);
  971. gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
  972. gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
  973. (struct ethtool_tcpip4_spec *)&rule->m_u,
  974. tab);
  975. break;
  976. case IP_USER_FLOW:
  977. gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
  978. tab);
  979. gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
  980. (struct ethtool_usrip4_spec *) &rule->m_u,
  981. tab);
  982. break;
  983. case ETHER_FLOW:
  984. if (vlan)
  985. gfar_set_parse_bits(vlan, vlan_mask, tab);
  986. gfar_set_ether((struct ethhdr *) &rule->h_u,
  987. (struct ethhdr *) &rule->m_u, tab);
  988. break;
  989. default:
  990. return -1;
  991. }
  992. /* Set the vlan attributes in the end */
  993. if (vlan) {
  994. gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
  995. gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
  996. }
  997. /* If there has been nothing written till now, it must be a default */
  998. if (tab->index == old_index) {
  999. gfar_set_mask(0xFFFFFFFF, tab);
  1000. tab->fe[tab->index].ctrl = 0x20;
  1001. tab->fe[tab->index].prop = 0x0;
  1002. tab->index++;
  1003. }
  1004. /* Remove last AND */
  1005. tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
  1006. /* Specify which queue to use or to drop */
  1007. if (rule->ring_cookie == RX_CLS_FLOW_DISC)
  1008. tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
  1009. else
  1010. tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
  1011. /* Only big enough entries can be clustered */
  1012. if (tab->index > (old_index + 2)) {
  1013. tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
  1014. tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
  1015. }
  1016. /* In rare cases the cache can be full while there is
  1017. * free space in hw
  1018. */
  1019. if (tab->index > MAX_FILER_CACHE_IDX - 1)
  1020. return -EBUSY;
  1021. return 0;
  1022. }
  1023. /* Write the bit-pattern from software's buffer to hardware registers */
  1024. static int gfar_write_filer_table(struct gfar_private *priv,
  1025. struct filer_table *tab)
  1026. {
  1027. u32 i = 0;
  1028. if (tab->index > MAX_FILER_IDX - 1)
  1029. return -EBUSY;
  1030. /* Fill regular entries */
  1031. for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
  1032. gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
  1033. /* Fill the rest with fall-troughs */
  1034. for (; i < MAX_FILER_IDX; i++)
  1035. gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
  1036. /* Last entry must be default accept
  1037. * because that's what people expect
  1038. */
  1039. gfar_write_filer(priv, i, 0x20, 0x0);
  1040. return 0;
  1041. }
  1042. static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
  1043. struct gfar_private *priv)
  1044. {
  1045. if (flow->flow_type & FLOW_EXT) {
  1046. if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
  1047. netdev_warn(priv->ndev,
  1048. "User-specific data not supported!\n");
  1049. if (~flow->m_ext.vlan_etype)
  1050. netdev_warn(priv->ndev,
  1051. "VLAN-etype not supported!\n");
  1052. }
  1053. if (flow->flow_type == IP_USER_FLOW)
  1054. if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
  1055. netdev_warn(priv->ndev,
  1056. "IP-Version differing from IPv4 not supported!\n");
  1057. return 0;
  1058. }
  1059. static int gfar_process_filer_changes(struct gfar_private *priv)
  1060. {
  1061. struct ethtool_flow_spec_container *j;
  1062. struct filer_table *tab;
  1063. s32 ret = 0;
  1064. /* So index is set to zero, too! */
  1065. tab = kzalloc(sizeof(*tab), GFP_KERNEL);
  1066. if (tab == NULL)
  1067. return -ENOMEM;
  1068. /* Now convert the existing filer data from flow_spec into
  1069. * filer tables binary format
  1070. */
  1071. list_for_each_entry(j, &priv->rx_list.list, list) {
  1072. ret = gfar_convert_to_filer(&j->fs, tab);
  1073. if (ret == -EBUSY) {
  1074. netdev_err(priv->ndev,
  1075. "Rule not added: No free space!\n");
  1076. goto end;
  1077. }
  1078. if (ret == -1) {
  1079. netdev_err(priv->ndev,
  1080. "Rule not added: Unsupported Flow-type!\n");
  1081. goto end;
  1082. }
  1083. }
  1084. /* Write everything to hardware */
  1085. ret = gfar_write_filer_table(priv, tab);
  1086. if (ret == -EBUSY) {
  1087. netdev_err(priv->ndev, "Rule not added: No free space!\n");
  1088. goto end;
  1089. }
  1090. end:
  1091. kfree(tab);
  1092. return ret;
  1093. }
  1094. static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
  1095. {
  1096. u32 i = 0;
  1097. for (i = 0; i < sizeof(flow->m_u); i++)
  1098. flow->m_u.hdata[i] ^= 0xFF;
  1099. flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
  1100. flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
  1101. flow->m_ext.data[0] ^= cpu_to_be32(~0);
  1102. flow->m_ext.data[1] ^= cpu_to_be32(~0);
  1103. }
  1104. static int gfar_add_cls(struct gfar_private *priv,
  1105. struct ethtool_rx_flow_spec *flow)
  1106. {
  1107. struct ethtool_flow_spec_container *temp, *comp;
  1108. int ret = 0;
  1109. temp = kmalloc(sizeof(*temp), GFP_KERNEL);
  1110. if (temp == NULL)
  1111. return -ENOMEM;
  1112. memcpy(&temp->fs, flow, sizeof(temp->fs));
  1113. gfar_invert_masks(&temp->fs);
  1114. ret = gfar_check_capability(&temp->fs, priv);
  1115. if (ret)
  1116. goto clean_mem;
  1117. /* Link in the new element at the right @location */
  1118. if (list_empty(&priv->rx_list.list)) {
  1119. ret = gfar_check_filer_hardware(priv);
  1120. if (ret != 0)
  1121. goto clean_mem;
  1122. list_add(&temp->list, &priv->rx_list.list);
  1123. goto process;
  1124. } else {
  1125. list_for_each_entry(comp, &priv->rx_list.list, list) {
  1126. if (comp->fs.location > flow->location) {
  1127. list_add_tail(&temp->list, &comp->list);
  1128. goto process;
  1129. }
  1130. if (comp->fs.location == flow->location) {
  1131. netdev_err(priv->ndev,
  1132. "Rule not added: ID %d not free!\n",
  1133. flow->location);
  1134. ret = -EBUSY;
  1135. goto clean_mem;
  1136. }
  1137. }
  1138. list_add_tail(&temp->list, &priv->rx_list.list);
  1139. }
  1140. process:
  1141. priv->rx_list.count++;
  1142. ret = gfar_process_filer_changes(priv);
  1143. if (ret)
  1144. goto clean_list;
  1145. return ret;
  1146. clean_list:
  1147. priv->rx_list.count--;
  1148. list_del(&temp->list);
  1149. clean_mem:
  1150. kfree(temp);
  1151. return ret;
  1152. }
  1153. static int gfar_del_cls(struct gfar_private *priv, u32 loc)
  1154. {
  1155. struct ethtool_flow_spec_container *comp;
  1156. u32 ret = -EINVAL;
  1157. if (list_empty(&priv->rx_list.list))
  1158. return ret;
  1159. list_for_each_entry(comp, &priv->rx_list.list, list) {
  1160. if (comp->fs.location == loc) {
  1161. list_del(&comp->list);
  1162. kfree(comp);
  1163. priv->rx_list.count--;
  1164. gfar_process_filer_changes(priv);
  1165. ret = 0;
  1166. break;
  1167. }
  1168. }
  1169. return ret;
  1170. }
  1171. static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
  1172. {
  1173. struct ethtool_flow_spec_container *comp;
  1174. u32 ret = -EINVAL;
  1175. list_for_each_entry(comp, &priv->rx_list.list, list) {
  1176. if (comp->fs.location == cmd->fs.location) {
  1177. memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
  1178. gfar_invert_masks(&cmd->fs);
  1179. ret = 0;
  1180. break;
  1181. }
  1182. }
  1183. return ret;
  1184. }
  1185. static int gfar_get_cls_all(struct gfar_private *priv,
  1186. struct ethtool_rxnfc *cmd, u32 *rule_locs)
  1187. {
  1188. struct ethtool_flow_spec_container *comp;
  1189. u32 i = 0;
  1190. list_for_each_entry(comp, &priv->rx_list.list, list) {
  1191. if (i == cmd->rule_cnt)
  1192. return -EMSGSIZE;
  1193. rule_locs[i] = comp->fs.location;
  1194. i++;
  1195. }
  1196. cmd->data = MAX_FILER_IDX;
  1197. cmd->rule_cnt = i;
  1198. return 0;
  1199. }
  1200. static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  1201. {
  1202. struct gfar_private *priv = netdev_priv(dev);
  1203. int ret = 0;
  1204. if (test_bit(GFAR_RESETTING, &priv->state))
  1205. return -EBUSY;
  1206. mutex_lock(&priv->rx_queue_access);
  1207. switch (cmd->cmd) {
  1208. case ETHTOOL_SRXFH:
  1209. ret = gfar_set_hash_opts(priv, cmd);
  1210. break;
  1211. case ETHTOOL_SRXCLSRLINS:
  1212. if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
  1213. cmd->fs.ring_cookie >= priv->num_rx_queues) ||
  1214. cmd->fs.location >= MAX_FILER_IDX) {
  1215. ret = -EINVAL;
  1216. break;
  1217. }
  1218. ret = gfar_add_cls(priv, &cmd->fs);
  1219. break;
  1220. case ETHTOOL_SRXCLSRLDEL:
  1221. ret = gfar_del_cls(priv, cmd->fs.location);
  1222. break;
  1223. default:
  1224. ret = -EINVAL;
  1225. }
  1226. mutex_unlock(&priv->rx_queue_access);
  1227. return ret;
  1228. }
  1229. static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1230. u32 *rule_locs)
  1231. {
  1232. struct gfar_private *priv = netdev_priv(dev);
  1233. int ret = 0;
  1234. switch (cmd->cmd) {
  1235. case ETHTOOL_GRXRINGS:
  1236. cmd->data = priv->num_rx_queues;
  1237. break;
  1238. case ETHTOOL_GRXCLSRLCNT:
  1239. cmd->rule_cnt = priv->rx_list.count;
  1240. break;
  1241. case ETHTOOL_GRXCLSRULE:
  1242. ret = gfar_get_cls(priv, cmd);
  1243. break;
  1244. case ETHTOOL_GRXCLSRLALL:
  1245. ret = gfar_get_cls_all(priv, cmd, rule_locs);
  1246. break;
  1247. default:
  1248. ret = -EINVAL;
  1249. break;
  1250. }
  1251. return ret;
  1252. }
  1253. static int gfar_get_ts_info(struct net_device *dev,
  1254. struct ethtool_ts_info *info)
  1255. {
  1256. struct gfar_private *priv = netdev_priv(dev);
  1257. struct platform_device *ptp_dev;
  1258. struct device_node *ptp_node;
  1259. struct ptp_qoriq *ptp = NULL;
  1260. info->phc_index = -1;
  1261. if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
  1262. info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
  1263. SOF_TIMESTAMPING_TX_SOFTWARE |
  1264. SOF_TIMESTAMPING_SOFTWARE;
  1265. return 0;
  1266. }
  1267. ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
  1268. if (ptp_node) {
  1269. ptp_dev = of_find_device_by_node(ptp_node);
  1270. of_node_put(ptp_node);
  1271. if (ptp_dev)
  1272. ptp = platform_get_drvdata(ptp_dev);
  1273. }
  1274. if (ptp)
  1275. info->phc_index = ptp->phc_index;
  1276. info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
  1277. SOF_TIMESTAMPING_RX_HARDWARE |
  1278. SOF_TIMESTAMPING_RAW_HARDWARE |
  1279. SOF_TIMESTAMPING_RX_SOFTWARE |
  1280. SOF_TIMESTAMPING_TX_SOFTWARE |
  1281. SOF_TIMESTAMPING_SOFTWARE;
  1282. info->tx_types = (1 << HWTSTAMP_TX_OFF) |
  1283. (1 << HWTSTAMP_TX_ON);
  1284. info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
  1285. (1 << HWTSTAMP_FILTER_ALL);
  1286. return 0;
  1287. }
  1288. const struct ethtool_ops gfar_ethtool_ops = {
  1289. .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
  1290. ETHTOOL_COALESCE_MAX_FRAMES,
  1291. .get_drvinfo = gfar_gdrvinfo,
  1292. .get_regs_len = gfar_reglen,
  1293. .get_regs = gfar_get_regs,
  1294. .get_link = ethtool_op_get_link,
  1295. .get_coalesce = gfar_gcoalesce,
  1296. .set_coalesce = gfar_scoalesce,
  1297. .get_ringparam = gfar_gringparam,
  1298. .set_ringparam = gfar_sringparam,
  1299. .get_pauseparam = gfar_gpauseparam,
  1300. .set_pauseparam = gfar_spauseparam,
  1301. .get_strings = gfar_gstrings,
  1302. .get_sset_count = gfar_sset_count,
  1303. .get_ethtool_stats = gfar_fill_stats,
  1304. .get_msglevel = gfar_get_msglevel,
  1305. .set_msglevel = gfar_set_msglevel,
  1306. #ifdef CONFIG_PM
  1307. .get_wol = gfar_get_wol,
  1308. .set_wol = gfar_set_wol,
  1309. #endif
  1310. .set_rxnfc = gfar_set_nfc,
  1311. .get_rxnfc = gfar_get_nfc,
  1312. .get_ts_info = gfar_get_ts_info,
  1313. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  1314. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  1315. };