cpsw_priv.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Texas Instruments Ethernet Switch Driver
  4. *
  5. * Copyright (C) 2019 Texas Instruments
  6. */
  7. #include <linux/bpf.h>
  8. #include <linux/bpf_trace.h>
  9. #include <linux/if_ether.h>
  10. #include <linux/if_vlan.h>
  11. #include <linux/kmemleak.h>
  12. #include <linux/module.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/net_tstamp.h>
  15. #include <linux/of.h>
  16. #include <linux/phy.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/skbuff.h>
  20. #include <net/page_pool.h>
  21. #include <net/pkt_cls.h>
  22. #include "cpsw.h"
  23. #include "cpts.h"
  24. #include "cpsw_ale.h"
  25. #include "cpsw_priv.h"
  26. #include "cpsw_sl.h"
  27. #include "davinci_cpdma.h"
  28. #define CPTS_N_ETX_TS 4
  29. int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
  30. void cpsw_intr_enable(struct cpsw_common *cpsw)
  31. {
  32. writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
  33. writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
  34. cpdma_ctlr_int_ctrl(cpsw->dma, true);
  35. }
  36. void cpsw_intr_disable(struct cpsw_common *cpsw)
  37. {
  38. writel_relaxed(0, &cpsw->wr_regs->tx_en);
  39. writel_relaxed(0, &cpsw->wr_regs->rx_en);
  40. cpdma_ctlr_int_ctrl(cpsw->dma, false);
  41. }
  42. void cpsw_tx_handler(void *token, int len, int status)
  43. {
  44. struct cpsw_meta_xdp *xmeta;
  45. struct xdp_frame *xdpf;
  46. struct net_device *ndev;
  47. struct netdev_queue *txq;
  48. struct sk_buff *skb;
  49. int ch;
  50. if (cpsw_is_xdpf_handle(token)) {
  51. xdpf = cpsw_handle_to_xdpf(token);
  52. xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
  53. ndev = xmeta->ndev;
  54. ch = xmeta->ch;
  55. xdp_return_frame(xdpf);
  56. } else {
  57. skb = token;
  58. ndev = skb->dev;
  59. ch = skb_get_queue_mapping(skb);
  60. cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
  61. dev_kfree_skb_any(skb);
  62. }
  63. /* Check whether the queue is stopped due to stalled tx dma, if the
  64. * queue is stopped then start the queue as we have free desc for tx
  65. */
  66. txq = netdev_get_tx_queue(ndev, ch);
  67. if (unlikely(netif_tx_queue_stopped(txq)))
  68. netif_tx_wake_queue(txq);
  69. ndev->stats.tx_packets++;
  70. ndev->stats.tx_bytes += len;
  71. }
  72. irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
  73. {
  74. struct cpsw_common *cpsw = dev_id;
  75. writel(0, &cpsw->wr_regs->tx_en);
  76. cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
  77. if (cpsw->quirk_irq) {
  78. disable_irq_nosync(cpsw->irqs_table[1]);
  79. cpsw->tx_irq_disabled = true;
  80. }
  81. napi_schedule(&cpsw->napi_tx);
  82. return IRQ_HANDLED;
  83. }
  84. irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
  85. {
  86. struct cpsw_common *cpsw = dev_id;
  87. writel(0, &cpsw->wr_regs->rx_en);
  88. cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
  89. if (cpsw->quirk_irq) {
  90. disable_irq_nosync(cpsw->irqs_table[0]);
  91. cpsw->rx_irq_disabled = true;
  92. }
  93. napi_schedule(&cpsw->napi_rx);
  94. return IRQ_HANDLED;
  95. }
  96. irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id)
  97. {
  98. struct cpsw_common *cpsw = dev_id;
  99. writel(0, &cpsw->wr_regs->misc_en);
  100. cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_MISC);
  101. cpts_misc_interrupt(cpsw->cpts);
  102. writel(0x10, &cpsw->wr_regs->misc_en);
  103. return IRQ_HANDLED;
  104. }
  105. int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
  106. {
  107. struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
  108. int num_tx, cur_budget, ch;
  109. u32 ch_map;
  110. struct cpsw_vector *txv;
  111. /* process every unprocessed channel */
  112. ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
  113. for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
  114. if (!(ch_map & 0x80))
  115. continue;
  116. txv = &cpsw->txv[ch];
  117. if (unlikely(txv->budget > budget - num_tx))
  118. cur_budget = budget - num_tx;
  119. else
  120. cur_budget = txv->budget;
  121. num_tx += cpdma_chan_process(txv->ch, cur_budget);
  122. if (num_tx >= budget)
  123. break;
  124. }
  125. if (num_tx < budget) {
  126. napi_complete(napi_tx);
  127. writel(0xff, &cpsw->wr_regs->tx_en);
  128. }
  129. return num_tx;
  130. }
  131. int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
  132. {
  133. struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
  134. int num_tx;
  135. num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
  136. if (num_tx < budget) {
  137. napi_complete(napi_tx);
  138. writel(0xff, &cpsw->wr_regs->tx_en);
  139. if (cpsw->tx_irq_disabled) {
  140. cpsw->tx_irq_disabled = false;
  141. enable_irq(cpsw->irqs_table[1]);
  142. }
  143. }
  144. return num_tx;
  145. }
  146. int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
  147. {
  148. struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
  149. int num_rx, cur_budget, ch;
  150. u32 ch_map;
  151. struct cpsw_vector *rxv;
  152. /* process every unprocessed channel */
  153. ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
  154. for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
  155. if (!(ch_map & 0x01))
  156. continue;
  157. rxv = &cpsw->rxv[ch];
  158. if (unlikely(rxv->budget > budget - num_rx))
  159. cur_budget = budget - num_rx;
  160. else
  161. cur_budget = rxv->budget;
  162. num_rx += cpdma_chan_process(rxv->ch, cur_budget);
  163. if (num_rx >= budget)
  164. break;
  165. }
  166. if (num_rx < budget) {
  167. napi_complete_done(napi_rx, num_rx);
  168. writel(0xff, &cpsw->wr_regs->rx_en);
  169. }
  170. return num_rx;
  171. }
  172. int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
  173. {
  174. struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
  175. int num_rx;
  176. num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
  177. if (num_rx < budget) {
  178. napi_complete_done(napi_rx, num_rx);
  179. writel(0xff, &cpsw->wr_regs->rx_en);
  180. if (cpsw->rx_irq_disabled) {
  181. cpsw->rx_irq_disabled = false;
  182. enable_irq(cpsw->irqs_table[0]);
  183. }
  184. }
  185. return num_rx;
  186. }
  187. void cpsw_rx_vlan_encap(struct sk_buff *skb)
  188. {
  189. struct cpsw_priv *priv = netdev_priv(skb->dev);
  190. u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
  191. struct cpsw_common *cpsw = priv->cpsw;
  192. u16 vtag, vid, prio, pkt_type;
  193. /* Remove VLAN header encapsulation word */
  194. skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
  195. pkt_type = (rx_vlan_encap_hdr >>
  196. CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
  197. CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
  198. /* Ignore unknown & Priority-tagged packets*/
  199. if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
  200. pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
  201. return;
  202. vid = (rx_vlan_encap_hdr >>
  203. CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
  204. VLAN_VID_MASK;
  205. /* Ignore vid 0 and pass packet as is */
  206. if (!vid)
  207. return;
  208. /* Untag P0 packets if set for vlan */
  209. if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) {
  210. prio = (rx_vlan_encap_hdr >>
  211. CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
  212. CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
  213. vtag = (prio << VLAN_PRIO_SHIFT) | vid;
  214. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
  215. }
  216. /* strip vlan tag for VLAN-tagged packet */
  217. if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
  218. memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
  219. skb_pull(skb, VLAN_HLEN);
  220. }
  221. }
  222. void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv)
  223. {
  224. slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
  225. slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
  226. }
  227. void soft_reset(const char *module, void __iomem *reg)
  228. {
  229. unsigned long timeout = jiffies + HZ;
  230. writel_relaxed(1, reg);
  231. do {
  232. cpu_relax();
  233. } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
  234. WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
  235. }
  236. void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
  237. {
  238. struct cpsw_priv *priv = netdev_priv(ndev);
  239. struct cpsw_common *cpsw = priv->cpsw;
  240. int ch;
  241. cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
  242. ndev->stats.tx_errors++;
  243. cpsw_intr_disable(cpsw);
  244. for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
  245. cpdma_chan_stop(cpsw->txv[ch].ch);
  246. cpdma_chan_start(cpsw->txv[ch].ch);
  247. }
  248. cpsw_intr_enable(cpsw);
  249. netif_trans_update(ndev);
  250. netif_tx_wake_all_queues(ndev);
  251. }
  252. static int cpsw_get_common_speed(struct cpsw_common *cpsw)
  253. {
  254. int i, speed;
  255. for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
  256. if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
  257. speed += cpsw->slaves[i].phy->speed;
  258. return speed;
  259. }
  260. int cpsw_need_resplit(struct cpsw_common *cpsw)
  261. {
  262. int i, rlim_ch_num;
  263. int speed, ch_rate;
  264. /* re-split resources only in case speed was changed */
  265. speed = cpsw_get_common_speed(cpsw);
  266. if (speed == cpsw->speed || !speed)
  267. return 0;
  268. cpsw->speed = speed;
  269. for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
  270. ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
  271. if (!ch_rate)
  272. break;
  273. rlim_ch_num++;
  274. }
  275. /* cases not dependent on speed */
  276. if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
  277. return 0;
  278. return 1;
  279. }
  280. void cpsw_split_res(struct cpsw_common *cpsw)
  281. {
  282. u32 consumed_rate = 0, bigest_rate = 0;
  283. struct cpsw_vector *txv = cpsw->txv;
  284. int i, ch_weight, rlim_ch_num = 0;
  285. int budget, bigest_rate_ch = 0;
  286. u32 ch_rate, max_rate;
  287. int ch_budget = 0;
  288. for (i = 0; i < cpsw->tx_ch_num; i++) {
  289. ch_rate = cpdma_chan_get_rate(txv[i].ch);
  290. if (!ch_rate)
  291. continue;
  292. rlim_ch_num++;
  293. consumed_rate += ch_rate;
  294. }
  295. if (cpsw->tx_ch_num == rlim_ch_num) {
  296. max_rate = consumed_rate;
  297. } else if (!rlim_ch_num) {
  298. ch_budget = NAPI_POLL_WEIGHT / cpsw->tx_ch_num;
  299. bigest_rate = 0;
  300. max_rate = consumed_rate;
  301. } else {
  302. max_rate = cpsw->speed * 1000;
  303. /* if max_rate is less then expected due to reduced link speed,
  304. * split proportionally according next potential max speed
  305. */
  306. if (max_rate < consumed_rate)
  307. max_rate *= 10;
  308. if (max_rate < consumed_rate)
  309. max_rate *= 10;
  310. ch_budget = (consumed_rate * NAPI_POLL_WEIGHT) / max_rate;
  311. ch_budget = (NAPI_POLL_WEIGHT - ch_budget) /
  312. (cpsw->tx_ch_num - rlim_ch_num);
  313. bigest_rate = (max_rate - consumed_rate) /
  314. (cpsw->tx_ch_num - rlim_ch_num);
  315. }
  316. /* split tx weight/budget */
  317. budget = NAPI_POLL_WEIGHT;
  318. for (i = 0; i < cpsw->tx_ch_num; i++) {
  319. ch_rate = cpdma_chan_get_rate(txv[i].ch);
  320. if (ch_rate) {
  321. txv[i].budget = (ch_rate * NAPI_POLL_WEIGHT) / max_rate;
  322. if (!txv[i].budget)
  323. txv[i].budget++;
  324. if (ch_rate > bigest_rate) {
  325. bigest_rate_ch = i;
  326. bigest_rate = ch_rate;
  327. }
  328. ch_weight = (ch_rate * 100) / max_rate;
  329. if (!ch_weight)
  330. ch_weight++;
  331. cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
  332. } else {
  333. txv[i].budget = ch_budget;
  334. if (!bigest_rate_ch)
  335. bigest_rate_ch = i;
  336. cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
  337. }
  338. budget -= txv[i].budget;
  339. }
  340. if (budget)
  341. txv[bigest_rate_ch].budget += budget;
  342. /* split rx budget */
  343. budget = NAPI_POLL_WEIGHT;
  344. ch_budget = budget / cpsw->rx_ch_num;
  345. for (i = 0; i < cpsw->rx_ch_num; i++) {
  346. cpsw->rxv[i].budget = ch_budget;
  347. budget -= ch_budget;
  348. }
  349. if (budget)
  350. cpsw->rxv[0].budget += budget;
  351. }
  352. int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
  353. int ale_ageout, phys_addr_t desc_mem_phys,
  354. int descs_pool_size)
  355. {
  356. u32 slave_offset, sliver_offset, slave_size;
  357. struct cpsw_ale_params ale_params;
  358. struct cpsw_platform_data *data;
  359. struct cpdma_params dma_params;
  360. struct device *dev = cpsw->dev;
  361. struct device_node *cpts_node;
  362. void __iomem *cpts_regs;
  363. int ret = 0, i;
  364. data = &cpsw->data;
  365. cpsw->rx_ch_num = 1;
  366. cpsw->tx_ch_num = 1;
  367. cpsw->version = readl(&cpsw->regs->id_ver);
  368. memset(&dma_params, 0, sizeof(dma_params));
  369. memset(&ale_params, 0, sizeof(ale_params));
  370. switch (cpsw->version) {
  371. case CPSW_VERSION_1:
  372. cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
  373. cpts_regs = ss_regs + CPSW1_CPTS_OFFSET;
  374. cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
  375. dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
  376. dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
  377. ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
  378. slave_offset = CPSW1_SLAVE_OFFSET;
  379. slave_size = CPSW1_SLAVE_SIZE;
  380. sliver_offset = CPSW1_SLIVER_OFFSET;
  381. dma_params.desc_mem_phys = 0;
  382. break;
  383. case CPSW_VERSION_2:
  384. case CPSW_VERSION_3:
  385. case CPSW_VERSION_4:
  386. cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
  387. cpts_regs = ss_regs + CPSW2_CPTS_OFFSET;
  388. cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
  389. dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
  390. dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
  391. ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
  392. slave_offset = CPSW2_SLAVE_OFFSET;
  393. slave_size = CPSW2_SLAVE_SIZE;
  394. sliver_offset = CPSW2_SLIVER_OFFSET;
  395. dma_params.desc_mem_phys = desc_mem_phys;
  396. break;
  397. default:
  398. dev_err(dev, "unknown version 0x%08x\n", cpsw->version);
  399. return -ENODEV;
  400. }
  401. for (i = 0; i < cpsw->data.slaves; i++) {
  402. struct cpsw_slave *slave = &cpsw->slaves[i];
  403. void __iomem *regs = cpsw->regs;
  404. slave->slave_num = i;
  405. slave->data = &cpsw->data.slave_data[i];
  406. slave->regs = regs + slave_offset;
  407. slave->port_vlan = slave->data->dual_emac_res_vlan;
  408. slave->mac_sl = cpsw_sl_get("cpsw", dev, regs + sliver_offset);
  409. if (IS_ERR(slave->mac_sl))
  410. return PTR_ERR(slave->mac_sl);
  411. slave_offset += slave_size;
  412. sliver_offset += SLIVER_SIZE;
  413. }
  414. ale_params.dev = dev;
  415. ale_params.ale_ageout = ale_ageout;
  416. ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
  417. ale_params.dev_id = "cpsw";
  418. ale_params.bus_freq = cpsw->bus_freq_mhz * 1000000;
  419. cpsw->ale = cpsw_ale_create(&ale_params);
  420. if (IS_ERR(cpsw->ale)) {
  421. dev_err(dev, "error initializing ale engine\n");
  422. return PTR_ERR(cpsw->ale);
  423. }
  424. dma_params.dev = dev;
  425. dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
  426. dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
  427. dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
  428. dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
  429. dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
  430. dma_params.num_chan = data->channels;
  431. dma_params.has_soft_reset = true;
  432. dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
  433. dma_params.desc_mem_size = data->bd_ram_size;
  434. dma_params.desc_align = 16;
  435. dma_params.has_ext_regs = true;
  436. dma_params.desc_hw_addr = dma_params.desc_mem_phys;
  437. dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
  438. dma_params.descs_pool_size = descs_pool_size;
  439. cpsw->dma = cpdma_ctlr_create(&dma_params);
  440. if (!cpsw->dma) {
  441. dev_err(dev, "error initializing dma\n");
  442. return -ENOMEM;
  443. }
  444. cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts");
  445. if (!cpts_node)
  446. cpts_node = cpsw->dev->of_node;
  447. cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node,
  448. CPTS_N_ETX_TS);
  449. if (IS_ERR(cpsw->cpts)) {
  450. ret = PTR_ERR(cpsw->cpts);
  451. cpdma_ctlr_destroy(cpsw->dma);
  452. }
  453. of_node_put(cpts_node);
  454. return ret;
  455. }
  456. #if IS_ENABLED(CONFIG_TI_CPTS)
  457. static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
  458. {
  459. struct cpsw_common *cpsw = priv->cpsw;
  460. struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
  461. u32 ts_en, seq_id;
  462. if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
  463. slave_write(slave, 0, CPSW1_TS_CTL);
  464. return;
  465. }
  466. seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
  467. ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
  468. if (priv->tx_ts_enabled)
  469. ts_en |= CPSW_V1_TS_TX_EN;
  470. if (priv->rx_ts_enabled)
  471. ts_en |= CPSW_V1_TS_RX_EN;
  472. slave_write(slave, ts_en, CPSW1_TS_CTL);
  473. slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
  474. }
  475. static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
  476. {
  477. struct cpsw_common *cpsw = priv->cpsw;
  478. struct cpsw_slave *slave;
  479. u32 ctrl, mtype;
  480. slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
  481. ctrl = slave_read(slave, CPSW2_CONTROL);
  482. switch (cpsw->version) {
  483. case CPSW_VERSION_2:
  484. ctrl &= ~CTRL_V2_ALL_TS_MASK;
  485. if (priv->tx_ts_enabled)
  486. ctrl |= CTRL_V2_TX_TS_BITS;
  487. if (priv->rx_ts_enabled)
  488. ctrl |= CTRL_V2_RX_TS_BITS;
  489. break;
  490. case CPSW_VERSION_3:
  491. default:
  492. ctrl &= ~CTRL_V3_ALL_TS_MASK;
  493. if (priv->tx_ts_enabled)
  494. ctrl |= CTRL_V3_TX_TS_BITS;
  495. if (priv->rx_ts_enabled)
  496. ctrl |= CTRL_V3_RX_TS_BITS;
  497. break;
  498. }
  499. mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
  500. slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
  501. slave_write(slave, ctrl, CPSW2_CONTROL);
  502. writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
  503. writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
  504. }
  505. static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
  506. {
  507. struct cpsw_priv *priv = netdev_priv(dev);
  508. struct cpsw_common *cpsw = priv->cpsw;
  509. struct hwtstamp_config cfg;
  510. if (cpsw->version != CPSW_VERSION_1 &&
  511. cpsw->version != CPSW_VERSION_2 &&
  512. cpsw->version != CPSW_VERSION_3)
  513. return -EOPNOTSUPP;
  514. if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
  515. return -EFAULT;
  516. if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
  517. return -ERANGE;
  518. switch (cfg.rx_filter) {
  519. case HWTSTAMP_FILTER_NONE:
  520. priv->rx_ts_enabled = 0;
  521. break;
  522. case HWTSTAMP_FILTER_ALL:
  523. case HWTSTAMP_FILTER_NTP_ALL:
  524. case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
  525. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  526. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  527. return -ERANGE;
  528. case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
  529. case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
  530. case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
  531. case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
  532. case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
  533. case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
  534. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  535. case HWTSTAMP_FILTER_PTP_V2_SYNC:
  536. case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
  537. priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
  538. cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
  539. break;
  540. default:
  541. return -ERANGE;
  542. }
  543. priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
  544. switch (cpsw->version) {
  545. case CPSW_VERSION_1:
  546. cpsw_hwtstamp_v1(priv);
  547. break;
  548. case CPSW_VERSION_2:
  549. case CPSW_VERSION_3:
  550. cpsw_hwtstamp_v2(priv);
  551. break;
  552. default:
  553. WARN_ON(1);
  554. }
  555. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  556. }
  557. static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
  558. {
  559. struct cpsw_common *cpsw = ndev_to_cpsw(dev);
  560. struct cpsw_priv *priv = netdev_priv(dev);
  561. struct hwtstamp_config cfg;
  562. if (cpsw->version != CPSW_VERSION_1 &&
  563. cpsw->version != CPSW_VERSION_2 &&
  564. cpsw->version != CPSW_VERSION_3)
  565. return -EOPNOTSUPP;
  566. cfg.flags = 0;
  567. cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
  568. cfg.rx_filter = priv->rx_ts_enabled;
  569. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  570. }
  571. #else
  572. static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
  573. {
  574. return -EOPNOTSUPP;
  575. }
  576. static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
  577. {
  578. return -EOPNOTSUPP;
  579. }
  580. #endif /*CONFIG_TI_CPTS*/
  581. int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
  582. {
  583. struct cpsw_priv *priv = netdev_priv(dev);
  584. struct cpsw_common *cpsw = priv->cpsw;
  585. int slave_no = cpsw_slave_index(cpsw, priv);
  586. struct phy_device *phy;
  587. if (!netif_running(dev))
  588. return -EINVAL;
  589. phy = cpsw->slaves[slave_no].phy;
  590. if (!phy_has_hwtstamp(phy)) {
  591. switch (cmd) {
  592. case SIOCSHWTSTAMP:
  593. return cpsw_hwtstamp_set(dev, req);
  594. case SIOCGHWTSTAMP:
  595. return cpsw_hwtstamp_get(dev, req);
  596. }
  597. }
  598. if (phy)
  599. return phy_mii_ioctl(phy, req, cmd);
  600. return -EOPNOTSUPP;
  601. }
  602. int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
  603. {
  604. struct cpsw_priv *priv = netdev_priv(ndev);
  605. struct cpsw_common *cpsw = priv->cpsw;
  606. struct cpsw_slave *slave;
  607. u32 min_rate;
  608. u32 ch_rate;
  609. int i, ret;
  610. ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
  611. if (ch_rate == rate)
  612. return 0;
  613. ch_rate = rate * 1000;
  614. min_rate = cpdma_chan_get_min_rate(cpsw->dma);
  615. if ((ch_rate < min_rate && ch_rate)) {
  616. dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
  617. min_rate);
  618. return -EINVAL;
  619. }
  620. if (rate > cpsw->speed) {
  621. dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
  622. return -EINVAL;
  623. }
  624. ret = pm_runtime_resume_and_get(cpsw->dev);
  625. if (ret < 0)
  626. return ret;
  627. ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
  628. pm_runtime_put(cpsw->dev);
  629. if (ret)
  630. return ret;
  631. /* update rates for slaves tx queues */
  632. for (i = 0; i < cpsw->data.slaves; i++) {
  633. slave = &cpsw->slaves[i];
  634. if (!slave->ndev)
  635. continue;
  636. netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
  637. }
  638. cpsw_split_res(cpsw);
  639. return ret;
  640. }
  641. static int cpsw_tc_to_fifo(int tc, int num_tc)
  642. {
  643. if (tc == num_tc - 1)
  644. return 0;
  645. return CPSW_FIFO_SHAPERS_NUM - tc;
  646. }
  647. bool cpsw_shp_is_off(struct cpsw_priv *priv)
  648. {
  649. struct cpsw_common *cpsw = priv->cpsw;
  650. struct cpsw_slave *slave;
  651. u32 shift, mask, val;
  652. val = readl_relaxed(&cpsw->regs->ptype);
  653. slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
  654. shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
  655. mask = 7 << shift;
  656. val = val & mask;
  657. return !val;
  658. }
  659. static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
  660. {
  661. struct cpsw_common *cpsw = priv->cpsw;
  662. struct cpsw_slave *slave;
  663. u32 shift, mask, val;
  664. val = readl_relaxed(&cpsw->regs->ptype);
  665. slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
  666. shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
  667. mask = (1 << --fifo) << shift;
  668. val = on ? val | mask : val & ~mask;
  669. writel_relaxed(val, &cpsw->regs->ptype);
  670. }
  671. static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
  672. {
  673. struct cpsw_common *cpsw = priv->cpsw;
  674. u32 val = 0, send_pct, shift;
  675. struct cpsw_slave *slave;
  676. int pct = 0, i;
  677. if (bw > priv->shp_cfg_speed * 1000)
  678. goto err;
  679. /* shaping has to stay enabled for highest fifos linearly
  680. * and fifo bw no more then interface can allow
  681. */
  682. slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
  683. send_pct = slave_read(slave, SEND_PERCENT);
  684. for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
  685. if (!bw) {
  686. if (i >= fifo || !priv->fifo_bw[i])
  687. continue;
  688. dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
  689. continue;
  690. }
  691. if (!priv->fifo_bw[i] && i > fifo) {
  692. dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
  693. return -EINVAL;
  694. }
  695. shift = (i - 1) * 8;
  696. if (i == fifo) {
  697. send_pct &= ~(CPSW_PCT_MASK << shift);
  698. val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
  699. if (!val)
  700. val = 1;
  701. send_pct |= val << shift;
  702. pct += val;
  703. continue;
  704. }
  705. if (priv->fifo_bw[i])
  706. pct += (send_pct >> shift) & CPSW_PCT_MASK;
  707. }
  708. if (pct >= 100)
  709. goto err;
  710. slave_write(slave, send_pct, SEND_PERCENT);
  711. priv->fifo_bw[fifo] = bw;
  712. dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
  713. DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
  714. return 0;
  715. err:
  716. dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
  717. return -EINVAL;
  718. }
  719. static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
  720. {
  721. struct cpsw_common *cpsw = priv->cpsw;
  722. struct cpsw_slave *slave;
  723. u32 tx_in_ctl_rg, val;
  724. int ret;
  725. ret = cpsw_set_fifo_bw(priv, fifo, bw);
  726. if (ret)
  727. return ret;
  728. slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
  729. tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
  730. CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
  731. if (!bw)
  732. cpsw_fifo_shp_on(priv, fifo, bw);
  733. val = slave_read(slave, tx_in_ctl_rg);
  734. if (cpsw_shp_is_off(priv)) {
  735. /* disable FIFOs rate limited queues */
  736. val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
  737. /* set type of FIFO queues to normal priority mode */
  738. val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
  739. /* set type of FIFO queues to be rate limited */
  740. if (bw)
  741. val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
  742. else
  743. priv->shp_cfg_speed = 0;
  744. }
  745. /* toggle a FIFO rate limited queue */
  746. if (bw)
  747. val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
  748. else
  749. val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
  750. slave_write(slave, val, tx_in_ctl_rg);
  751. /* FIFO transmit shape enable */
  752. cpsw_fifo_shp_on(priv, fifo, bw);
  753. return 0;
  754. }
  755. /* Defaults:
  756. * class A - prio 3
  757. * class B - prio 2
  758. * shaping for class A should be set first
  759. */
  760. static int cpsw_set_cbs(struct net_device *ndev,
  761. struct tc_cbs_qopt_offload *qopt)
  762. {
  763. struct cpsw_priv *priv = netdev_priv(ndev);
  764. struct cpsw_common *cpsw = priv->cpsw;
  765. struct cpsw_slave *slave;
  766. int prev_speed = 0;
  767. int tc, ret, fifo;
  768. u32 bw = 0;
  769. tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
  770. /* enable channels in backward order, as highest FIFOs must be rate
  771. * limited first and for compliance with CPDMA rate limited channels
  772. * that also used in bacward order. FIFO0 cannot be rate limited.
  773. */
  774. fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
  775. if (!fifo) {
  776. dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
  777. return -EINVAL;
  778. }
  779. /* do nothing, it's disabled anyway */
  780. if (!qopt->enable && !priv->fifo_bw[fifo])
  781. return 0;
  782. /* shapers can be set if link speed is known */
  783. slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
  784. if (slave->phy && slave->phy->link) {
  785. if (priv->shp_cfg_speed &&
  786. priv->shp_cfg_speed != slave->phy->speed)
  787. prev_speed = priv->shp_cfg_speed;
  788. priv->shp_cfg_speed = slave->phy->speed;
  789. }
  790. if (!priv->shp_cfg_speed) {
  791. dev_err(priv->dev, "Link speed is not known");
  792. return -1;
  793. }
  794. ret = pm_runtime_resume_and_get(cpsw->dev);
  795. if (ret < 0)
  796. return ret;
  797. bw = qopt->enable ? qopt->idleslope : 0;
  798. ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
  799. if (ret) {
  800. priv->shp_cfg_speed = prev_speed;
  801. prev_speed = 0;
  802. }
  803. if (bw && prev_speed)
  804. dev_warn(priv->dev,
  805. "Speed was changed, CBS shaper speeds are changed!");
  806. pm_runtime_put_sync(cpsw->dev);
  807. return ret;
  808. }
  809. static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
  810. {
  811. struct tc_mqprio_qopt_offload *mqprio = type_data;
  812. struct cpsw_priv *priv = netdev_priv(ndev);
  813. struct cpsw_common *cpsw = priv->cpsw;
  814. int fifo, num_tc, count, offset;
  815. struct cpsw_slave *slave;
  816. u32 tx_prio_map = 0;
  817. int i, tc, ret;
  818. num_tc = mqprio->qopt.num_tc;
  819. if (num_tc > CPSW_TC_NUM)
  820. return -EINVAL;
  821. if (mqprio->mode != TC_MQPRIO_MODE_DCB)
  822. return -EINVAL;
  823. ret = pm_runtime_resume_and_get(cpsw->dev);
  824. if (ret < 0)
  825. return ret;
  826. if (num_tc) {
  827. for (i = 0; i < 8; i++) {
  828. tc = mqprio->qopt.prio_tc_map[i];
  829. fifo = cpsw_tc_to_fifo(tc, num_tc);
  830. tx_prio_map |= fifo << (4 * i);
  831. }
  832. netdev_set_num_tc(ndev, num_tc);
  833. for (i = 0; i < num_tc; i++) {
  834. count = mqprio->qopt.count[i];
  835. offset = mqprio->qopt.offset[i];
  836. netdev_set_tc_queue(ndev, i, count, offset);
  837. }
  838. }
  839. if (!mqprio->qopt.hw) {
  840. /* restore default configuration */
  841. netdev_reset_tc(ndev);
  842. tx_prio_map = TX_PRIORITY_MAPPING;
  843. }
  844. priv->mqprio_hw = mqprio->qopt.hw;
  845. offset = cpsw->version == CPSW_VERSION_1 ?
  846. CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
  847. slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
  848. slave_write(slave, tx_prio_map, offset);
  849. pm_runtime_put_sync(cpsw->dev);
  850. return 0;
  851. }
  852. static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f);
  853. int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
  854. void *type_data)
  855. {
  856. switch (type) {
  857. case TC_SETUP_QDISC_CBS:
  858. return cpsw_set_cbs(ndev, type_data);
  859. case TC_SETUP_QDISC_MQPRIO:
  860. return cpsw_set_mqprio(ndev, type_data);
  861. case TC_SETUP_BLOCK:
  862. return cpsw_qos_setup_tc_block(ndev, type_data);
  863. default:
  864. return -EOPNOTSUPP;
  865. }
  866. }
  867. void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
  868. {
  869. int fifo, bw;
  870. for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
  871. bw = priv->fifo_bw[fifo];
  872. if (!bw)
  873. continue;
  874. cpsw_set_fifo_rlimit(priv, fifo, bw);
  875. }
  876. }
  877. void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
  878. {
  879. struct cpsw_common *cpsw = priv->cpsw;
  880. u32 tx_prio_map = 0;
  881. int i, tc, fifo;
  882. u32 tx_prio_rg;
  883. if (!priv->mqprio_hw)
  884. return;
  885. for (i = 0; i < 8; i++) {
  886. tc = netdev_get_prio_tc_map(priv->ndev, i);
  887. fifo = CPSW_FIFO_SHAPERS_NUM - tc;
  888. tx_prio_map |= fifo << (4 * i);
  889. }
  890. tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
  891. CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
  892. slave_write(slave, tx_prio_map, tx_prio_rg);
  893. }
  894. int cpsw_fill_rx_channels(struct cpsw_priv *priv)
  895. {
  896. struct cpsw_common *cpsw = priv->cpsw;
  897. struct cpsw_meta_xdp *xmeta;
  898. struct page_pool *pool;
  899. struct page *page;
  900. int ch_buf_num;
  901. int ch, i, ret;
  902. dma_addr_t dma;
  903. for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
  904. pool = cpsw->page_pool[ch];
  905. ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
  906. for (i = 0; i < ch_buf_num; i++) {
  907. page = page_pool_dev_alloc_pages(pool);
  908. if (!page) {
  909. cpsw_err(priv, ifup, "allocate rx page err\n");
  910. return -ENOMEM;
  911. }
  912. xmeta = page_address(page) + CPSW_XMETA_OFFSET;
  913. xmeta->ndev = priv->ndev;
  914. xmeta->ch = ch;
  915. dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM_NA;
  916. ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
  917. page, dma,
  918. cpsw->rx_packet_max,
  919. 0);
  920. if (ret < 0) {
  921. cpsw_err(priv, ifup,
  922. "cannot submit page to channel %d rx, error %d\n",
  923. ch, ret);
  924. page_pool_recycle_direct(pool, page);
  925. return ret;
  926. }
  927. }
  928. cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
  929. ch, ch_buf_num);
  930. }
  931. return 0;
  932. }
  933. static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
  934. int size)
  935. {
  936. struct page_pool_params pp_params = {};
  937. struct page_pool *pool;
  938. pp_params.order = 0;
  939. pp_params.flags = PP_FLAG_DMA_MAP;
  940. pp_params.pool_size = size;
  941. pp_params.nid = NUMA_NO_NODE;
  942. pp_params.dma_dir = DMA_BIDIRECTIONAL;
  943. pp_params.dev = cpsw->dev;
  944. pool = page_pool_create(&pp_params);
  945. if (IS_ERR(pool))
  946. dev_err(cpsw->dev, "cannot create rx page pool\n");
  947. return pool;
  948. }
  949. static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
  950. {
  951. struct page_pool *pool;
  952. int ret = 0, pool_size;
  953. pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
  954. pool = cpsw_create_page_pool(cpsw, pool_size);
  955. if (IS_ERR(pool))
  956. ret = PTR_ERR(pool);
  957. else
  958. cpsw->page_pool[ch] = pool;
  959. return ret;
  960. }
  961. static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
  962. {
  963. struct cpsw_common *cpsw = priv->cpsw;
  964. struct xdp_rxq_info *rxq;
  965. struct page_pool *pool;
  966. int ret;
  967. pool = cpsw->page_pool[ch];
  968. rxq = &priv->xdp_rxq[ch];
  969. ret = xdp_rxq_info_reg(rxq, priv->ndev, ch, 0);
  970. if (ret)
  971. return ret;
  972. ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
  973. if (ret)
  974. xdp_rxq_info_unreg(rxq);
  975. return ret;
  976. }
  977. static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
  978. {
  979. struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
  980. if (!xdp_rxq_info_is_reg(rxq))
  981. return;
  982. xdp_rxq_info_unreg(rxq);
  983. }
  984. void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
  985. {
  986. struct net_device *ndev;
  987. int i, ch;
  988. for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
  989. for (i = 0; i < cpsw->data.slaves; i++) {
  990. ndev = cpsw->slaves[i].ndev;
  991. if (!ndev)
  992. continue;
  993. cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
  994. }
  995. page_pool_destroy(cpsw->page_pool[ch]);
  996. cpsw->page_pool[ch] = NULL;
  997. }
  998. }
  999. int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
  1000. {
  1001. struct net_device *ndev;
  1002. int i, ch, ret;
  1003. for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
  1004. ret = cpsw_create_rx_pool(cpsw, ch);
  1005. if (ret)
  1006. goto err_cleanup;
  1007. /* using same page pool is allowed as no running rx handlers
  1008. * simultaneously for both ndevs
  1009. */
  1010. for (i = 0; i < cpsw->data.slaves; i++) {
  1011. ndev = cpsw->slaves[i].ndev;
  1012. if (!ndev)
  1013. continue;
  1014. ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
  1015. if (ret)
  1016. goto err_cleanup;
  1017. }
  1018. }
  1019. return 0;
  1020. err_cleanup:
  1021. cpsw_destroy_xdp_rxqs(cpsw);
  1022. return ret;
  1023. }
  1024. static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
  1025. {
  1026. struct bpf_prog *prog = bpf->prog;
  1027. if (!priv->xdpi.prog && !prog)
  1028. return 0;
  1029. WRITE_ONCE(priv->xdp_prog, prog);
  1030. xdp_attachment_setup(&priv->xdpi, bpf);
  1031. return 0;
  1032. }
  1033. int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
  1034. {
  1035. struct cpsw_priv *priv = netdev_priv(ndev);
  1036. switch (bpf->command) {
  1037. case XDP_SETUP_PROG:
  1038. return cpsw_xdp_prog_setup(priv, bpf);
  1039. default:
  1040. return -EINVAL;
  1041. }
  1042. }
  1043. int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
  1044. struct page *page, int port)
  1045. {
  1046. struct cpsw_common *cpsw = priv->cpsw;
  1047. struct cpsw_meta_xdp *xmeta;
  1048. struct cpdma_chan *txch;
  1049. dma_addr_t dma;
  1050. int ret;
  1051. xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
  1052. xmeta->ndev = priv->ndev;
  1053. xmeta->ch = 0;
  1054. txch = cpsw->txv[0].ch;
  1055. if (page) {
  1056. dma = page_pool_get_dma_addr(page);
  1057. dma += xdpf->headroom + sizeof(struct xdp_frame);
  1058. ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
  1059. dma, xdpf->len, port);
  1060. } else {
  1061. if (sizeof(*xmeta) > xdpf->headroom)
  1062. return -EINVAL;
  1063. ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
  1064. xdpf->data, xdpf->len, port);
  1065. }
  1066. if (ret)
  1067. priv->ndev->stats.tx_dropped++;
  1068. return ret;
  1069. }
  1070. int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
  1071. struct page *page, int port, int *len)
  1072. {
  1073. struct cpsw_common *cpsw = priv->cpsw;
  1074. struct net_device *ndev = priv->ndev;
  1075. int ret = CPSW_XDP_CONSUMED;
  1076. struct xdp_frame *xdpf;
  1077. struct bpf_prog *prog;
  1078. u32 act;
  1079. prog = READ_ONCE(priv->xdp_prog);
  1080. if (!prog)
  1081. return CPSW_XDP_PASS;
  1082. act = bpf_prog_run_xdp(prog, xdp);
  1083. /* XDP prog might have changed packet data and boundaries */
  1084. *len = xdp->data_end - xdp->data;
  1085. switch (act) {
  1086. case XDP_PASS:
  1087. ret = CPSW_XDP_PASS;
  1088. goto out;
  1089. case XDP_TX:
  1090. xdpf = xdp_convert_buff_to_frame(xdp);
  1091. if (unlikely(!xdpf))
  1092. goto drop;
  1093. if (cpsw_xdp_tx_frame(priv, xdpf, page, port))
  1094. xdp_return_frame_rx_napi(xdpf);
  1095. break;
  1096. case XDP_REDIRECT:
  1097. if (xdp_do_redirect(ndev, xdp, prog))
  1098. goto drop;
  1099. /* Have to flush here, per packet, instead of doing it in bulk
  1100. * at the end of the napi handler. The RX devices on this
  1101. * particular hardware is sharing a common queue, so the
  1102. * incoming device might change per packet.
  1103. */
  1104. xdp_do_flush_map();
  1105. break;
  1106. default:
  1107. bpf_warn_invalid_xdp_action(ndev, prog, act);
  1108. fallthrough;
  1109. case XDP_ABORTED:
  1110. trace_xdp_exception(ndev, prog, act);
  1111. fallthrough; /* handle aborts by dropping packet */
  1112. case XDP_DROP:
  1113. ndev->stats.rx_bytes += *len;
  1114. ndev->stats.rx_packets++;
  1115. goto drop;
  1116. }
  1117. ndev->stats.rx_bytes += *len;
  1118. ndev->stats.rx_packets++;
  1119. out:
  1120. return ret;
  1121. drop:
  1122. page_pool_recycle_direct(cpsw->page_pool[ch], page);
  1123. return ret;
  1124. }
  1125. static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv,
  1126. struct netlink_ext_ack *extack,
  1127. struct flow_cls_offload *cls,
  1128. u64 rate_pkt_ps)
  1129. {
  1130. struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
  1131. struct flow_dissector *dissector = rule->match.dissector;
  1132. static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
  1133. struct flow_match_eth_addrs match;
  1134. u32 port_id;
  1135. int ret;
  1136. if (dissector->used_keys &
  1137. ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
  1138. BIT(FLOW_DISSECTOR_KEY_CONTROL) |
  1139. BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
  1140. NL_SET_ERR_MSG_MOD(extack,
  1141. "Unsupported keys used");
  1142. return -EOPNOTSUPP;
  1143. }
  1144. if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
  1145. NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
  1146. return -EOPNOTSUPP;
  1147. }
  1148. flow_rule_match_eth_addrs(rule, &match);
  1149. if (!is_zero_ether_addr(match.mask->src)) {
  1150. NL_SET_ERR_MSG_MOD(extack,
  1151. "Matching on source MAC not supported");
  1152. return -EOPNOTSUPP;
  1153. }
  1154. port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
  1155. if (is_broadcast_ether_addr(match.key->dst) &&
  1156. is_broadcast_ether_addr(match.mask->dst)) {
  1157. ret = cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, rate_pkt_ps);
  1158. if (ret)
  1159. return ret;
  1160. priv->ale_bc_ratelimit.cookie = cls->cookie;
  1161. priv->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
  1162. } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
  1163. ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
  1164. ret = cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, rate_pkt_ps);
  1165. if (ret)
  1166. return ret;
  1167. priv->ale_mc_ratelimit.cookie = cls->cookie;
  1168. priv->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
  1169. } else {
  1170. NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
  1171. return -EOPNOTSUPP;
  1172. }
  1173. return 0;
  1174. }
  1175. static int cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
  1176. const struct flow_action_entry *act,
  1177. struct netlink_ext_ack *extack)
  1178. {
  1179. if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
  1180. NL_SET_ERR_MSG_MOD(extack,
  1181. "Offload not supported when exceed action is not drop");
  1182. return -EOPNOTSUPP;
  1183. }
  1184. if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
  1185. act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
  1186. NL_SET_ERR_MSG_MOD(extack,
  1187. "Offload not supported when conform action is not pipe or ok");
  1188. return -EOPNOTSUPP;
  1189. }
  1190. if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
  1191. !flow_action_is_last_entry(action, act)) {
  1192. NL_SET_ERR_MSG_MOD(extack,
  1193. "Offload not supported when conform action is ok, but action is not last");
  1194. return -EOPNOTSUPP;
  1195. }
  1196. if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
  1197. act->police.avrate || act->police.overhead) {
  1198. NL_SET_ERR_MSG_MOD(extack,
  1199. "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
  1200. return -EOPNOTSUPP;
  1201. }
  1202. return 0;
  1203. }
  1204. static int cpsw_qos_configure_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
  1205. {
  1206. struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
  1207. struct netlink_ext_ack *extack = cls->common.extack;
  1208. const struct flow_action_entry *act;
  1209. int i, ret;
  1210. flow_action_for_each(i, act, &rule->action) {
  1211. switch (act->id) {
  1212. case FLOW_ACTION_POLICE:
  1213. ret = cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
  1214. if (ret)
  1215. return ret;
  1216. return cpsw_qos_clsflower_add_policer(priv, extack, cls,
  1217. act->police.rate_pkt_ps);
  1218. default:
  1219. NL_SET_ERR_MSG_MOD(extack, "Action not supported");
  1220. return -EOPNOTSUPP;
  1221. }
  1222. }
  1223. return -EOPNOTSUPP;
  1224. }
  1225. static int cpsw_qos_delete_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
  1226. {
  1227. u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
  1228. if (cls->cookie == priv->ale_bc_ratelimit.cookie) {
  1229. priv->ale_bc_ratelimit.cookie = 0;
  1230. priv->ale_bc_ratelimit.rate_packet_ps = 0;
  1231. cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, 0);
  1232. }
  1233. if (cls->cookie == priv->ale_mc_ratelimit.cookie) {
  1234. priv->ale_mc_ratelimit.cookie = 0;
  1235. priv->ale_mc_ratelimit.rate_packet_ps = 0;
  1236. cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, 0);
  1237. }
  1238. return 0;
  1239. }
  1240. static int cpsw_qos_setup_tc_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls_flower)
  1241. {
  1242. switch (cls_flower->command) {
  1243. case FLOW_CLS_REPLACE:
  1244. return cpsw_qos_configure_clsflower(priv, cls_flower);
  1245. case FLOW_CLS_DESTROY:
  1246. return cpsw_qos_delete_clsflower(priv, cls_flower);
  1247. default:
  1248. return -EOPNOTSUPP;
  1249. }
  1250. }
  1251. static int cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
  1252. {
  1253. struct cpsw_priv *priv = cb_priv;
  1254. int ret;
  1255. if (!tc_cls_can_offload_and_chain0(priv->ndev, type_data))
  1256. return -EOPNOTSUPP;
  1257. ret = pm_runtime_get_sync(priv->dev);
  1258. if (ret < 0) {
  1259. pm_runtime_put_noidle(priv->dev);
  1260. return ret;
  1261. }
  1262. switch (type) {
  1263. case TC_SETUP_CLSFLOWER:
  1264. ret = cpsw_qos_setup_tc_clsflower(priv, type_data);
  1265. break;
  1266. default:
  1267. ret = -EOPNOTSUPP;
  1268. }
  1269. pm_runtime_put(priv->dev);
  1270. return ret;
  1271. }
  1272. static LIST_HEAD(cpsw_qos_block_cb_list);
  1273. static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
  1274. {
  1275. struct cpsw_priv *priv = netdev_priv(ndev);
  1276. return flow_block_cb_setup_simple(f, &cpsw_qos_block_cb_list,
  1277. cpsw_qos_setup_tc_block_cb,
  1278. priv, priv, true);
  1279. }
  1280. void cpsw_qos_clsflower_resume(struct cpsw_priv *priv)
  1281. {
  1282. u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
  1283. if (priv->ale_bc_ratelimit.cookie)
  1284. cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id,
  1285. priv->ale_bc_ratelimit.rate_packet_ps);
  1286. if (priv->ale_mc_ratelimit.cookie)
  1287. cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id,
  1288. priv->ale_mc_ratelimit.rate_packet_ps);
  1289. }