cpsw_new.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Texas Instruments Ethernet Switch Driver
  4. *
  5. * Copyright (C) 2019 Texas Instruments
  6. */
  7. #include <linux/io.h>
  8. #include <linux/clk.h>
  9. #include <linux/timer.h>
  10. #include <linux/module.h>
  11. #include <linux/irqreturn.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/if_ether.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/net_tstamp.h>
  16. #include <linux/phy.h>
  17. #include <linux/phy/phy.h>
  18. #include <linux/delay.h>
  19. #include <linux/pinctrl/consumer.h>
  20. #include <linux/pm_runtime.h>
  21. #include <linux/gpio/consumer.h>
  22. #include <linux/of.h>
  23. #include <linux/of_mdio.h>
  24. #include <linux/of_net.h>
  25. #include <linux/of_device.h>
  26. #include <linux/if_vlan.h>
  27. #include <linux/kmemleak.h>
  28. #include <linux/sys_soc.h>
  29. #include <net/switchdev.h>
  30. #include <net/page_pool.h>
  31. #include <net/pkt_cls.h>
  32. #include <net/devlink.h>
  33. #include "cpsw.h"
  34. #include "cpsw_ale.h"
  35. #include "cpsw_priv.h"
  36. #include "cpsw_sl.h"
  37. #include "cpsw_switchdev.h"
  38. #include "cpts.h"
  39. #include "davinci_cpdma.h"
  40. #include <net/pkt_sched.h>
  41. static int debug_level;
  42. static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT;
  43. static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
  44. static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
  45. struct cpsw_devlink {
  46. struct cpsw_common *cpsw;
  47. };
  48. enum cpsw_devlink_param_id {
  49. CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
  50. CPSW_DL_PARAM_SWITCH_MODE,
  51. CPSW_DL_PARAM_ALE_BYPASS,
  52. };
  53. /* struct cpsw_common is not needed, kept here for compatibility
  54. * reasons witrh the old driver
  55. */
  56. static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
  57. struct cpsw_priv *priv)
  58. {
  59. if (priv->emac_port == HOST_PORT_NUM)
  60. return -1;
  61. return priv->emac_port - 1;
  62. }
  63. static bool cpsw_is_switch_en(struct cpsw_common *cpsw)
  64. {
  65. return !cpsw->data.dual_emac;
  66. }
  67. static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
  68. {
  69. struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
  70. bool enable_uni = false;
  71. int i;
  72. if (cpsw_is_switch_en(cpsw))
  73. return;
  74. /* Enabling promiscuous mode for one interface will be
  75. * common for both the interface as the interface shares
  76. * the same hardware resource.
  77. */
  78. for (i = 0; i < cpsw->data.slaves; i++)
  79. if (cpsw->slaves[i].ndev &&
  80. (cpsw->slaves[i].ndev->flags & IFF_PROMISC))
  81. enable_uni = true;
  82. if (!enable && enable_uni) {
  83. enable = enable_uni;
  84. dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
  85. }
  86. if (enable) {
  87. /* Enable unknown unicast, reg/unreg mcast */
  88. cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
  89. ALE_P0_UNI_FLOOD, 1);
  90. dev_dbg(cpsw->dev, "promiscuity enabled\n");
  91. } else {
  92. /* Disable unknown unicast */
  93. cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
  94. ALE_P0_UNI_FLOOD, 0);
  95. dev_dbg(cpsw->dev, "promiscuity disabled\n");
  96. }
  97. }
  98. /**
  99. * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
  100. * if it's not deleted
  101. * @ndev: device to sync
  102. * @addr: address to be added or deleted
  103. * @vid: vlan id, if vid < 0 set/unset address for real device
  104. * @add: add address if the flag is set or remove otherwise
  105. */
  106. static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
  107. int vid, int add)
  108. {
  109. struct cpsw_priv *priv = netdev_priv(ndev);
  110. struct cpsw_common *cpsw = priv->cpsw;
  111. int mask, flags, ret, slave_no;
  112. slave_no = cpsw_slave_index(cpsw, priv);
  113. if (vid < 0)
  114. vid = cpsw->slaves[slave_no].port_vlan;
  115. mask = ALE_PORT_HOST;
  116. flags = vid ? ALE_VLAN : 0;
  117. if (add)
  118. ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
  119. else
  120. ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
  121. return ret;
  122. }
  123. static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
  124. {
  125. struct addr_sync_ctx *sync_ctx = ctx;
  126. struct netdev_hw_addr *ha;
  127. int found = 0, ret = 0;
  128. if (!vdev || !(vdev->flags & IFF_UP))
  129. return 0;
  130. /* vlan address is relevant if its sync_cnt != 0 */
  131. netdev_for_each_mc_addr(ha, vdev) {
  132. if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
  133. found = ha->sync_cnt;
  134. break;
  135. }
  136. }
  137. if (found)
  138. sync_ctx->consumed++;
  139. if (sync_ctx->flush) {
  140. if (!found)
  141. cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
  142. return 0;
  143. }
  144. if (found)
  145. ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
  146. return ret;
  147. }
  148. static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
  149. {
  150. struct addr_sync_ctx sync_ctx;
  151. int ret;
  152. sync_ctx.consumed = 0;
  153. sync_ctx.addr = addr;
  154. sync_ctx.ndev = ndev;
  155. sync_ctx.flush = 0;
  156. ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
  157. if (sync_ctx.consumed < num && !ret)
  158. ret = cpsw_set_mc(ndev, addr, -1, 1);
  159. return ret;
  160. }
  161. static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
  162. {
  163. struct addr_sync_ctx sync_ctx;
  164. sync_ctx.consumed = 0;
  165. sync_ctx.addr = addr;
  166. sync_ctx.ndev = ndev;
  167. sync_ctx.flush = 1;
  168. vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
  169. if (sync_ctx.consumed == num)
  170. cpsw_set_mc(ndev, addr, -1, 0);
  171. return 0;
  172. }
  173. static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
  174. {
  175. struct addr_sync_ctx *sync_ctx = ctx;
  176. struct netdev_hw_addr *ha;
  177. int found = 0;
  178. if (!vdev || !(vdev->flags & IFF_UP))
  179. return 0;
  180. /* vlan address is relevant if its sync_cnt != 0 */
  181. netdev_for_each_mc_addr(ha, vdev) {
  182. if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
  183. found = ha->sync_cnt;
  184. break;
  185. }
  186. }
  187. if (!found)
  188. return 0;
  189. sync_ctx->consumed++;
  190. cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
  191. return 0;
  192. }
  193. static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
  194. {
  195. struct addr_sync_ctx sync_ctx;
  196. sync_ctx.addr = addr;
  197. sync_ctx.ndev = ndev;
  198. sync_ctx.consumed = 0;
  199. vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
  200. if (sync_ctx.consumed < num)
  201. cpsw_set_mc(ndev, addr, -1, 0);
  202. return 0;
  203. }
  204. static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
  205. {
  206. struct cpsw_priv *priv = netdev_priv(ndev);
  207. struct cpsw_common *cpsw = priv->cpsw;
  208. if (ndev->flags & IFF_PROMISC) {
  209. /* Enable promiscuous mode */
  210. cpsw_set_promiscious(ndev, true);
  211. cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port);
  212. return;
  213. }
  214. /* Disable promiscuous mode */
  215. cpsw_set_promiscious(ndev, false);
  216. /* Restore allmulti on vlans if necessary */
  217. cpsw_ale_set_allmulti(cpsw->ale,
  218. ndev->flags & IFF_ALLMULTI, priv->emac_port);
  219. /* add/remove mcast address either for real netdev or for vlan */
  220. __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
  221. cpsw_del_mc_addr);
  222. }
  223. static unsigned int cpsw_rxbuf_total_len(unsigned int len)
  224. {
  225. len += CPSW_HEADROOM_NA;
  226. len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  227. return SKB_DATA_ALIGN(len);
  228. }
  229. static void cpsw_rx_handler(void *token, int len, int status)
  230. {
  231. struct page *new_page, *page = token;
  232. void *pa = page_address(page);
  233. int headroom = CPSW_HEADROOM_NA;
  234. struct cpsw_meta_xdp *xmeta;
  235. struct cpsw_common *cpsw;
  236. struct net_device *ndev;
  237. int port, ch, pkt_size;
  238. struct cpsw_priv *priv;
  239. struct page_pool *pool;
  240. struct sk_buff *skb;
  241. struct xdp_buff xdp;
  242. int ret = 0;
  243. dma_addr_t dma;
  244. xmeta = pa + CPSW_XMETA_OFFSET;
  245. cpsw = ndev_to_cpsw(xmeta->ndev);
  246. ndev = xmeta->ndev;
  247. pkt_size = cpsw->rx_packet_max;
  248. ch = xmeta->ch;
  249. if (status >= 0) {
  250. port = CPDMA_RX_SOURCE_PORT(status);
  251. if (port)
  252. ndev = cpsw->slaves[--port].ndev;
  253. }
  254. priv = netdev_priv(ndev);
  255. pool = cpsw->page_pool[ch];
  256. if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
  257. /* In dual emac mode check for all interfaces */
  258. if (cpsw->usage_count && status >= 0) {
  259. /* The packet received is for the interface which
  260. * is already down and the other interface is up
  261. * and running, instead of freeing which results
  262. * in reducing of the number of rx descriptor in
  263. * DMA engine, requeue page back to cpdma.
  264. */
  265. new_page = page;
  266. goto requeue;
  267. }
  268. /* the interface is going down, pages are purged */
  269. page_pool_recycle_direct(pool, page);
  270. return;
  271. }
  272. new_page = page_pool_dev_alloc_pages(pool);
  273. if (unlikely(!new_page)) {
  274. new_page = page;
  275. ndev->stats.rx_dropped++;
  276. goto requeue;
  277. }
  278. if (priv->xdp_prog) {
  279. int size = len;
  280. xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
  281. if (status & CPDMA_RX_VLAN_ENCAP) {
  282. headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
  283. size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
  284. }
  285. xdp_prepare_buff(&xdp, pa, headroom, size, false);
  286. ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
  287. if (ret != CPSW_XDP_PASS)
  288. goto requeue;
  289. headroom = xdp.data - xdp.data_hard_start;
  290. /* XDP prog can modify vlan tag, so can't use encap header */
  291. status &= ~CPDMA_RX_VLAN_ENCAP;
  292. }
  293. /* pass skb to netstack if no XDP prog or returned XDP_PASS */
  294. skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
  295. if (!skb) {
  296. ndev->stats.rx_dropped++;
  297. page_pool_recycle_direct(pool, page);
  298. goto requeue;
  299. }
  300. skb->offload_fwd_mark = priv->offload_fwd_mark;
  301. skb_reserve(skb, headroom);
  302. skb_put(skb, len);
  303. skb->dev = ndev;
  304. if (status & CPDMA_RX_VLAN_ENCAP)
  305. cpsw_rx_vlan_encap(skb);
  306. if (priv->rx_ts_enabled)
  307. cpts_rx_timestamp(cpsw->cpts, skb);
  308. skb->protocol = eth_type_trans(skb, ndev);
  309. /* mark skb for recycling */
  310. skb_mark_for_recycle(skb);
  311. netif_receive_skb(skb);
  312. ndev->stats.rx_bytes += len;
  313. ndev->stats.rx_packets++;
  314. requeue:
  315. xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
  316. xmeta->ndev = ndev;
  317. xmeta->ch = ch;
  318. dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
  319. ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
  320. pkt_size, 0);
  321. if (ret < 0) {
  322. WARN_ON(ret == -ENOMEM);
  323. page_pool_recycle_direct(pool, new_page);
  324. }
  325. }
  326. static int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
  327. unsigned short vid)
  328. {
  329. struct cpsw_common *cpsw = priv->cpsw;
  330. int unreg_mcast_mask = 0;
  331. int mcast_mask;
  332. u32 port_mask;
  333. int ret;
  334. port_mask = (1 << priv->emac_port) | ALE_PORT_HOST;
  335. mcast_mask = ALE_PORT_HOST;
  336. if (priv->ndev->flags & IFF_ALLMULTI)
  337. unreg_mcast_mask = mcast_mask;
  338. ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
  339. unreg_mcast_mask);
  340. if (ret != 0)
  341. return ret;
  342. ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
  343. HOST_PORT_NUM, ALE_VLAN, vid);
  344. if (ret != 0)
  345. goto clean_vid;
  346. ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
  347. mcast_mask, ALE_VLAN, vid, 0);
  348. if (ret != 0)
  349. goto clean_vlan_ucast;
  350. return 0;
  351. clean_vlan_ucast:
  352. cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
  353. HOST_PORT_NUM, ALE_VLAN, vid);
  354. clean_vid:
  355. cpsw_ale_del_vlan(cpsw->ale, vid, 0);
  356. return ret;
  357. }
  358. static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
  359. __be16 proto, u16 vid)
  360. {
  361. struct cpsw_priv *priv = netdev_priv(ndev);
  362. struct cpsw_common *cpsw = priv->cpsw;
  363. int ret, i;
  364. if (cpsw_is_switch_en(cpsw)) {
  365. dev_dbg(cpsw->dev, ".ndo_vlan_rx_add_vid called in switch mode\n");
  366. return 0;
  367. }
  368. if (vid == cpsw->data.default_vlan)
  369. return 0;
  370. ret = pm_runtime_resume_and_get(cpsw->dev);
  371. if (ret < 0)
  372. return ret;
  373. /* In dual EMAC, reserved VLAN id should not be used for
  374. * creating VLAN interfaces as this can break the dual
  375. * EMAC port separation
  376. */
  377. for (i = 0; i < cpsw->data.slaves; i++) {
  378. if (cpsw->slaves[i].ndev &&
  379. vid == cpsw->slaves[i].port_vlan) {
  380. ret = -EINVAL;
  381. goto err;
  382. }
  383. }
  384. dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
  385. ret = cpsw_add_vlan_ale_entry(priv, vid);
  386. err:
  387. pm_runtime_put(cpsw->dev);
  388. return ret;
  389. }
  390. static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
  391. {
  392. struct cpsw_priv *priv = arg;
  393. if (!vdev || !vid)
  394. return 0;
  395. cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
  396. return 0;
  397. }
  398. /* restore resources after port reset */
  399. static void cpsw_restore(struct cpsw_priv *priv)
  400. {
  401. struct cpsw_common *cpsw = priv->cpsw;
  402. /* restore vlan configurations */
  403. vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
  404. /* restore MQPRIO offload */
  405. cpsw_mqprio_resume(&cpsw->slaves[priv->emac_port - 1], priv);
  406. /* restore CBS offload */
  407. cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv);
  408. cpsw_qos_clsflower_resume(priv);
  409. }
  410. static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
  411. {
  412. static const char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0};
  413. cpsw_ale_add_mcast(cpsw->ale, stpa,
  414. ALE_PORT_HOST, ALE_SUPER, 0,
  415. ALE_MCAST_BLOCK_LEARN_FWD);
  416. }
  417. static void cpsw_init_host_port_switch(struct cpsw_common *cpsw)
  418. {
  419. int vlan = cpsw->data.default_vlan;
  420. writel(CPSW_FIFO_NORMAL_MODE, &cpsw->host_port_regs->tx_in_ctl);
  421. writel(vlan, &cpsw->host_port_regs->port_vlan);
  422. cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
  423. ALE_ALL_PORTS, ALE_ALL_PORTS,
  424. ALE_PORT_1 | ALE_PORT_2);
  425. cpsw_init_stp_ale_entry(cpsw);
  426. cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
  427. dev_dbg(cpsw->dev, "Set P0_UNI_FLOOD\n");
  428. cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
  429. }
  430. static void cpsw_init_host_port_dual_mac(struct cpsw_common *cpsw)
  431. {
  432. int vlan = cpsw->data.default_vlan;
  433. writel(CPSW_FIFO_DUAL_MAC_MODE, &cpsw->host_port_regs->tx_in_ctl);
  434. cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
  435. dev_dbg(cpsw->dev, "unset P0_UNI_FLOOD\n");
  436. writel(vlan, &cpsw->host_port_regs->port_vlan);
  437. cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
  438. /* learning make no sense in dual_mac mode */
  439. cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
  440. }
  441. static void cpsw_init_host_port(struct cpsw_priv *priv)
  442. {
  443. struct cpsw_common *cpsw = priv->cpsw;
  444. u32 control_reg;
  445. /* soft reset the controller and initialize ale */
  446. soft_reset("cpsw", &cpsw->regs->soft_reset);
  447. cpsw_ale_start(cpsw->ale);
  448. /* switch to vlan unaware mode */
  449. cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
  450. CPSW_ALE_VLAN_AWARE);
  451. control_reg = readl(&cpsw->regs->control);
  452. control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
  453. writel(control_reg, &cpsw->regs->control);
  454. /* setup host port priority mapping */
  455. writel_relaxed(CPDMA_TX_PRIORITY_MAP,
  456. &cpsw->host_port_regs->cpdma_tx_pri_map);
  457. writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
  458. /* disable priority elevation */
  459. writel_relaxed(0, &cpsw->regs->ptype);
  460. /* enable statistics collection only on all ports */
  461. writel_relaxed(0x7, &cpsw->regs->stat_port_en);
  462. /* Enable internal fifo flow control */
  463. writel(0x7, &cpsw->regs->flow_control);
  464. if (cpsw_is_switch_en(cpsw))
  465. cpsw_init_host_port_switch(cpsw);
  466. else
  467. cpsw_init_host_port_dual_mac(cpsw);
  468. cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
  469. ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
  470. }
  471. static void cpsw_port_add_dual_emac_def_ale_entries(struct cpsw_priv *priv,
  472. struct cpsw_slave *slave)
  473. {
  474. u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
  475. struct cpsw_common *cpsw = priv->cpsw;
  476. u32 reg;
  477. reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
  478. CPSW2_PORT_VLAN;
  479. slave_write(slave, slave->port_vlan, reg);
  480. cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
  481. port_mask, port_mask, 0);
  482. cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
  483. ALE_PORT_HOST, ALE_VLAN, slave->port_vlan,
  484. ALE_MCAST_FWD);
  485. cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
  486. HOST_PORT_NUM, ALE_VLAN |
  487. ALE_SECURE, slave->port_vlan);
  488. cpsw_ale_control_set(cpsw->ale, priv->emac_port,
  489. ALE_PORT_DROP_UNKNOWN_VLAN, 1);
  490. /* learning make no sense in dual_mac mode */
  491. cpsw_ale_control_set(cpsw->ale, priv->emac_port,
  492. ALE_PORT_NOLEARN, 1);
  493. }
  494. static void cpsw_port_add_switch_def_ale_entries(struct cpsw_priv *priv,
  495. struct cpsw_slave *slave)
  496. {
  497. u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
  498. struct cpsw_common *cpsw = priv->cpsw;
  499. u32 reg;
  500. cpsw_ale_control_set(cpsw->ale, priv->emac_port,
  501. ALE_PORT_DROP_UNKNOWN_VLAN, 0);
  502. cpsw_ale_control_set(cpsw->ale, priv->emac_port,
  503. ALE_PORT_NOLEARN, 0);
  504. /* disabling SA_UPDATE required to make stp work, without this setting
  505. * Host MAC addresses will jump between ports.
  506. * As per TRM MAC address can be defined as unicast supervisory (super)
  507. * by setting both (ALE_BLOCKED | ALE_SECURE) which should prevent
  508. * SA_UPDATE, but HW seems works incorrectly and setting ALE_SECURE
  509. * causes STP packets to be dropped due to ingress filter
  510. * if (source address found) and (secure) and
  511. * (receive port number != port_number))
  512. * then discard the packet
  513. */
  514. cpsw_ale_control_set(cpsw->ale, priv->emac_port,
  515. ALE_PORT_NO_SA_UPDATE, 1);
  516. cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
  517. port_mask, ALE_VLAN, slave->port_vlan,
  518. ALE_MCAST_FWD_2);
  519. cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
  520. HOST_PORT_NUM, ALE_VLAN, slave->port_vlan);
  521. reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
  522. CPSW2_PORT_VLAN;
  523. slave_write(slave, slave->port_vlan, reg);
  524. }
  525. static void cpsw_adjust_link(struct net_device *ndev)
  526. {
  527. struct cpsw_priv *priv = netdev_priv(ndev);
  528. struct cpsw_common *cpsw = priv->cpsw;
  529. struct cpsw_slave *slave;
  530. struct phy_device *phy;
  531. u32 mac_control = 0;
  532. slave = &cpsw->slaves[priv->emac_port - 1];
  533. phy = slave->phy;
  534. if (!phy)
  535. return;
  536. if (phy->link) {
  537. mac_control = CPSW_SL_CTL_GMII_EN;
  538. if (phy->speed == 1000)
  539. mac_control |= CPSW_SL_CTL_GIG;
  540. if (phy->duplex)
  541. mac_control |= CPSW_SL_CTL_FULLDUPLEX;
  542. /* set speed_in input in case RMII mode is used in 100Mbps */
  543. if (phy->speed == 100)
  544. mac_control |= CPSW_SL_CTL_IFCTL_A;
  545. /* in band mode only works in 10Mbps RGMII mode */
  546. else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
  547. mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
  548. if (priv->rx_pause)
  549. mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
  550. if (priv->tx_pause)
  551. mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
  552. if (mac_control != slave->mac_control)
  553. cpsw_sl_ctl_set(slave->mac_sl, mac_control);
  554. /* enable forwarding */
  555. cpsw_ale_control_set(cpsw->ale, priv->emac_port,
  556. ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
  557. netif_tx_wake_all_queues(ndev);
  558. if (priv->shp_cfg_speed &&
  559. priv->shp_cfg_speed != slave->phy->speed &&
  560. !cpsw_shp_is_off(priv))
  561. dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are changed!");
  562. } else {
  563. netif_tx_stop_all_queues(ndev);
  564. mac_control = 0;
  565. /* disable forwarding */
  566. cpsw_ale_control_set(cpsw->ale, priv->emac_port,
  567. ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
  568. cpsw_sl_wait_for_idle(slave->mac_sl, 100);
  569. cpsw_sl_ctl_reset(slave->mac_sl);
  570. }
  571. if (mac_control != slave->mac_control)
  572. phy_print_status(phy);
  573. slave->mac_control = mac_control;
  574. if (phy->link && cpsw_need_resplit(cpsw))
  575. cpsw_split_res(cpsw);
  576. }
  577. static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
  578. {
  579. struct cpsw_common *cpsw = priv->cpsw;
  580. struct phy_device *phy;
  581. cpsw_sl_reset(slave->mac_sl, 100);
  582. cpsw_sl_ctl_reset(slave->mac_sl);
  583. /* setup priority mapping */
  584. cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
  585. RX_PRIORITY_MAPPING);
  586. switch (cpsw->version) {
  587. case CPSW_VERSION_1:
  588. slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
  589. /* Increase RX FIFO size to 5 for supporting fullduplex
  590. * flow control mode
  591. */
  592. slave_write(slave,
  593. (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
  594. CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
  595. break;
  596. case CPSW_VERSION_2:
  597. case CPSW_VERSION_3:
  598. case CPSW_VERSION_4:
  599. slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
  600. /* Increase RX FIFO size to 5 for supporting fullduplex
  601. * flow control mode
  602. */
  603. slave_write(slave,
  604. (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
  605. CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
  606. break;
  607. }
  608. /* setup max packet size, and mac address */
  609. cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
  610. cpsw->rx_packet_max);
  611. cpsw_set_slave_mac(slave, priv);
  612. slave->mac_control = 0; /* no link yet */
  613. if (cpsw_is_switch_en(cpsw))
  614. cpsw_port_add_switch_def_ale_entries(priv, slave);
  615. else
  616. cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
  617. if (!slave->data->phy_node)
  618. dev_err(priv->dev, "no phy found on slave %d\n",
  619. slave->slave_num);
  620. phy = of_phy_connect(priv->ndev, slave->data->phy_node,
  621. &cpsw_adjust_link, 0, slave->data->phy_if);
  622. if (!phy) {
  623. dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
  624. slave->data->phy_node,
  625. slave->slave_num);
  626. return;
  627. }
  628. slave->phy = phy;
  629. phy_attached_info(slave->phy);
  630. phy_start(slave->phy);
  631. /* Configure GMII_SEL register */
  632. phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
  633. slave->data->phy_if);
  634. }
  635. static int cpsw_ndo_stop(struct net_device *ndev)
  636. {
  637. struct cpsw_priv *priv = netdev_priv(ndev);
  638. struct cpsw_common *cpsw = priv->cpsw;
  639. struct cpsw_slave *slave;
  640. cpsw_info(priv, ifdown, "shutting down ndev\n");
  641. slave = &cpsw->slaves[priv->emac_port - 1];
  642. if (slave->phy)
  643. phy_stop(slave->phy);
  644. netif_tx_stop_all_queues(priv->ndev);
  645. if (slave->phy) {
  646. phy_disconnect(slave->phy);
  647. slave->phy = NULL;
  648. }
  649. __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
  650. if (cpsw->usage_count <= 1) {
  651. napi_disable(&cpsw->napi_rx);
  652. napi_disable(&cpsw->napi_tx);
  653. cpts_unregister(cpsw->cpts);
  654. cpsw_intr_disable(cpsw);
  655. cpdma_ctlr_stop(cpsw->dma);
  656. cpsw_ale_stop(cpsw->ale);
  657. cpsw_destroy_xdp_rxqs(cpsw);
  658. }
  659. if (cpsw_need_resplit(cpsw))
  660. cpsw_split_res(cpsw);
  661. cpsw->usage_count--;
  662. pm_runtime_put_sync(cpsw->dev);
  663. return 0;
  664. }
  665. static int cpsw_ndo_open(struct net_device *ndev)
  666. {
  667. struct cpsw_priv *priv = netdev_priv(ndev);
  668. struct cpsw_common *cpsw = priv->cpsw;
  669. int ret;
  670. dev_info(priv->dev, "starting ndev. mode: %s\n",
  671. cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
  672. ret = pm_runtime_resume_and_get(cpsw->dev);
  673. if (ret < 0)
  674. return ret;
  675. /* Notify the stack of the actual queue counts. */
  676. ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
  677. if (ret) {
  678. dev_err(priv->dev, "cannot set real number of tx queues\n");
  679. goto pm_cleanup;
  680. }
  681. ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
  682. if (ret) {
  683. dev_err(priv->dev, "cannot set real number of rx queues\n");
  684. goto pm_cleanup;
  685. }
  686. /* Initialize host and slave ports */
  687. if (!cpsw->usage_count)
  688. cpsw_init_host_port(priv);
  689. cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv);
  690. /* initialize shared resources for every ndev */
  691. if (!cpsw->usage_count) {
  692. /* create rxqs for both infs in dual mac as they use same pool
  693. * and must be destroyed together when no users.
  694. */
  695. ret = cpsw_create_xdp_rxqs(cpsw);
  696. if (ret < 0)
  697. goto err_cleanup;
  698. ret = cpsw_fill_rx_channels(priv);
  699. if (ret < 0)
  700. goto err_cleanup;
  701. if (cpsw->cpts) {
  702. if (cpts_register(cpsw->cpts))
  703. dev_err(priv->dev, "error registering cpts device\n");
  704. else
  705. writel(0x10, &cpsw->wr_regs->misc_en);
  706. }
  707. napi_enable(&cpsw->napi_rx);
  708. napi_enable(&cpsw->napi_tx);
  709. if (cpsw->tx_irq_disabled) {
  710. cpsw->tx_irq_disabled = false;
  711. enable_irq(cpsw->irqs_table[1]);
  712. }
  713. if (cpsw->rx_irq_disabled) {
  714. cpsw->rx_irq_disabled = false;
  715. enable_irq(cpsw->irqs_table[0]);
  716. }
  717. }
  718. cpsw_restore(priv);
  719. /* Enable Interrupt pacing if configured */
  720. if (cpsw->coal_intvl != 0) {
  721. struct ethtool_coalesce coal;
  722. coal.rx_coalesce_usecs = cpsw->coal_intvl;
  723. cpsw_set_coalesce(ndev, &coal, NULL, NULL);
  724. }
  725. cpdma_ctlr_start(cpsw->dma);
  726. cpsw_intr_enable(cpsw);
  727. cpsw->usage_count++;
  728. return 0;
  729. err_cleanup:
  730. cpsw_ndo_stop(ndev);
  731. pm_cleanup:
  732. pm_runtime_put_sync(cpsw->dev);
  733. return ret;
  734. }
  735. static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
  736. struct net_device *ndev)
  737. {
  738. struct cpsw_priv *priv = netdev_priv(ndev);
  739. struct cpsw_common *cpsw = priv->cpsw;
  740. struct cpts *cpts = cpsw->cpts;
  741. struct netdev_queue *txq;
  742. struct cpdma_chan *txch;
  743. int ret, q_idx;
  744. if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) {
  745. cpsw_err(priv, tx_err, "packet pad failed\n");
  746. ndev->stats.tx_dropped++;
  747. return NET_XMIT_DROP;
  748. }
  749. if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
  750. priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
  751. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  752. q_idx = skb_get_queue_mapping(skb);
  753. if (q_idx >= cpsw->tx_ch_num)
  754. q_idx = q_idx % cpsw->tx_ch_num;
  755. txch = cpsw->txv[q_idx].ch;
  756. txq = netdev_get_tx_queue(ndev, q_idx);
  757. skb_tx_timestamp(skb);
  758. ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
  759. priv->emac_port);
  760. if (unlikely(ret != 0)) {
  761. cpsw_err(priv, tx_err, "desc submit failed\n");
  762. goto fail;
  763. }
  764. /* If there is no more tx desc left free then we need to
  765. * tell the kernel to stop sending us tx frames.
  766. */
  767. if (unlikely(!cpdma_check_free_tx_desc(txch))) {
  768. netif_tx_stop_queue(txq);
  769. /* Barrier, so that stop_queue visible to other cpus */
  770. smp_mb__after_atomic();
  771. if (cpdma_check_free_tx_desc(txch))
  772. netif_tx_wake_queue(txq);
  773. }
  774. return NETDEV_TX_OK;
  775. fail:
  776. ndev->stats.tx_dropped++;
  777. netif_tx_stop_queue(txq);
  778. /* Barrier, so that stop_queue visible to other cpus */
  779. smp_mb__after_atomic();
  780. if (cpdma_check_free_tx_desc(txch))
  781. netif_tx_wake_queue(txq);
  782. return NETDEV_TX_BUSY;
  783. }
  784. static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
  785. {
  786. struct sockaddr *addr = (struct sockaddr *)p;
  787. struct cpsw_priv *priv = netdev_priv(ndev);
  788. struct cpsw_common *cpsw = priv->cpsw;
  789. int ret, slave_no;
  790. int flags = 0;
  791. u16 vid = 0;
  792. slave_no = cpsw_slave_index(cpsw, priv);
  793. if (!is_valid_ether_addr(addr->sa_data))
  794. return -EADDRNOTAVAIL;
  795. ret = pm_runtime_resume_and_get(cpsw->dev);
  796. if (ret < 0)
  797. return ret;
  798. vid = cpsw->slaves[slave_no].port_vlan;
  799. flags = ALE_VLAN | ALE_SECURE;
  800. cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
  801. flags, vid);
  802. cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
  803. flags, vid);
  804. ether_addr_copy(priv->mac_addr, addr->sa_data);
  805. eth_hw_addr_set(ndev, priv->mac_addr);
  806. cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv);
  807. pm_runtime_put(cpsw->dev);
  808. return 0;
  809. }
  810. static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
  811. __be16 proto, u16 vid)
  812. {
  813. struct cpsw_priv *priv = netdev_priv(ndev);
  814. struct cpsw_common *cpsw = priv->cpsw;
  815. int ret;
  816. int i;
  817. if (cpsw_is_switch_en(cpsw)) {
  818. dev_dbg(cpsw->dev, "ndo del vlan is called in switch mode\n");
  819. return 0;
  820. }
  821. if (vid == cpsw->data.default_vlan)
  822. return 0;
  823. ret = pm_runtime_resume_and_get(cpsw->dev);
  824. if (ret < 0)
  825. return ret;
  826. /* reset the return code as pm_runtime_get_sync() can return
  827. * non zero values as well.
  828. */
  829. ret = 0;
  830. for (i = 0; i < cpsw->data.slaves; i++) {
  831. if (cpsw->slaves[i].ndev &&
  832. vid == cpsw->slaves[i].port_vlan) {
  833. ret = -EINVAL;
  834. goto err;
  835. }
  836. }
  837. dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
  838. ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
  839. if (ret)
  840. dev_err(priv->dev, "cpsw_ale_del_vlan() failed: ret %d\n", ret);
  841. ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
  842. HOST_PORT_NUM, ALE_VLAN, vid);
  843. if (ret)
  844. dev_err(priv->dev, "cpsw_ale_del_ucast() failed: ret %d\n",
  845. ret);
  846. ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
  847. 0, ALE_VLAN, vid);
  848. if (ret)
  849. dev_err(priv->dev, "cpsw_ale_del_mcast failed. ret %d\n",
  850. ret);
  851. cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
  852. ret = 0;
  853. err:
  854. pm_runtime_put(cpsw->dev);
  855. return ret;
  856. }
  857. static int cpsw_ndo_get_phys_port_name(struct net_device *ndev, char *name,
  858. size_t len)
  859. {
  860. struct cpsw_priv *priv = netdev_priv(ndev);
  861. int err;
  862. err = snprintf(name, len, "p%d", priv->emac_port);
  863. if (err >= len)
  864. return -EINVAL;
  865. return 0;
  866. }
  867. #ifdef CONFIG_NET_POLL_CONTROLLER
  868. static void cpsw_ndo_poll_controller(struct net_device *ndev)
  869. {
  870. struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
  871. cpsw_intr_disable(cpsw);
  872. cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
  873. cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
  874. cpsw_intr_enable(cpsw);
  875. }
  876. #endif
  877. static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
  878. struct xdp_frame **frames, u32 flags)
  879. {
  880. struct cpsw_priv *priv = netdev_priv(ndev);
  881. struct xdp_frame *xdpf;
  882. int i, nxmit = 0;
  883. if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
  884. return -EINVAL;
  885. for (i = 0; i < n; i++) {
  886. xdpf = frames[i];
  887. if (xdpf->len < READ_ONCE(priv->tx_packet_min))
  888. break;
  889. if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
  890. break;
  891. nxmit++;
  892. }
  893. return nxmit;
  894. }
  895. static int cpsw_get_port_parent_id(struct net_device *ndev,
  896. struct netdev_phys_item_id *ppid)
  897. {
  898. struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
  899. ppid->id_len = sizeof(cpsw->base_mac);
  900. memcpy(&ppid->id, &cpsw->base_mac, ppid->id_len);
  901. return 0;
  902. }
  903. static const struct net_device_ops cpsw_netdev_ops = {
  904. .ndo_open = cpsw_ndo_open,
  905. .ndo_stop = cpsw_ndo_stop,
  906. .ndo_start_xmit = cpsw_ndo_start_xmit,
  907. .ndo_set_mac_address = cpsw_ndo_set_mac_address,
  908. .ndo_eth_ioctl = cpsw_ndo_ioctl,
  909. .ndo_validate_addr = eth_validate_addr,
  910. .ndo_tx_timeout = cpsw_ndo_tx_timeout,
  911. .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
  912. .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
  913. #ifdef CONFIG_NET_POLL_CONTROLLER
  914. .ndo_poll_controller = cpsw_ndo_poll_controller,
  915. #endif
  916. .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
  917. .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
  918. .ndo_setup_tc = cpsw_ndo_setup_tc,
  919. .ndo_get_phys_port_name = cpsw_ndo_get_phys_port_name,
  920. .ndo_bpf = cpsw_ndo_bpf,
  921. .ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
  922. .ndo_get_port_parent_id = cpsw_get_port_parent_id,
  923. };
  924. static void cpsw_get_drvinfo(struct net_device *ndev,
  925. struct ethtool_drvinfo *info)
  926. {
  927. struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
  928. struct platform_device *pdev;
  929. pdev = to_platform_device(cpsw->dev);
  930. strscpy(info->driver, "cpsw-switch", sizeof(info->driver));
  931. strscpy(info->version, "2.0", sizeof(info->version));
  932. strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
  933. }
  934. static int cpsw_set_pauseparam(struct net_device *ndev,
  935. struct ethtool_pauseparam *pause)
  936. {
  937. struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
  938. struct cpsw_priv *priv = netdev_priv(ndev);
  939. int slave_no;
  940. slave_no = cpsw_slave_index(cpsw, priv);
  941. if (!cpsw->slaves[slave_no].phy)
  942. return -EINVAL;
  943. if (!phy_validate_pause(cpsw->slaves[slave_no].phy, pause))
  944. return -EINVAL;
  945. priv->rx_pause = pause->rx_pause ? true : false;
  946. priv->tx_pause = pause->tx_pause ? true : false;
  947. phy_set_asym_pause(cpsw->slaves[slave_no].phy,
  948. priv->rx_pause, priv->tx_pause);
  949. return 0;
  950. }
  951. static int cpsw_set_channels(struct net_device *ndev,
  952. struct ethtool_channels *chs)
  953. {
  954. return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
  955. }
  956. static const struct ethtool_ops cpsw_ethtool_ops = {
  957. .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
  958. .get_drvinfo = cpsw_get_drvinfo,
  959. .get_msglevel = cpsw_get_msglevel,
  960. .set_msglevel = cpsw_set_msglevel,
  961. .get_link = ethtool_op_get_link,
  962. .get_ts_info = cpsw_get_ts_info,
  963. .get_coalesce = cpsw_get_coalesce,
  964. .set_coalesce = cpsw_set_coalesce,
  965. .get_sset_count = cpsw_get_sset_count,
  966. .get_strings = cpsw_get_strings,
  967. .get_ethtool_stats = cpsw_get_ethtool_stats,
  968. .get_pauseparam = cpsw_get_pauseparam,
  969. .set_pauseparam = cpsw_set_pauseparam,
  970. .get_wol = cpsw_get_wol,
  971. .set_wol = cpsw_set_wol,
  972. .get_regs_len = cpsw_get_regs_len,
  973. .get_regs = cpsw_get_regs,
  974. .begin = cpsw_ethtool_op_begin,
  975. .complete = cpsw_ethtool_op_complete,
  976. .get_channels = cpsw_get_channels,
  977. .set_channels = cpsw_set_channels,
  978. .get_link_ksettings = cpsw_get_link_ksettings,
  979. .set_link_ksettings = cpsw_set_link_ksettings,
  980. .get_eee = cpsw_get_eee,
  981. .set_eee = cpsw_set_eee,
  982. .nway_reset = cpsw_nway_reset,
  983. .get_ringparam = cpsw_get_ringparam,
  984. .set_ringparam = cpsw_set_ringparam,
  985. };
  986. static int cpsw_probe_dt(struct cpsw_common *cpsw)
  987. {
  988. struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np;
  989. struct cpsw_platform_data *data = &cpsw->data;
  990. struct device *dev = cpsw->dev;
  991. int ret;
  992. u32 prop;
  993. if (!node)
  994. return -EINVAL;
  995. tmp_node = of_get_child_by_name(node, "ethernet-ports");
  996. if (!tmp_node)
  997. return -ENOENT;
  998. data->slaves = of_get_child_count(tmp_node);
  999. if (data->slaves != CPSW_SLAVE_PORTS_NUM) {
  1000. of_node_put(tmp_node);
  1001. return -ENOENT;
  1002. }
  1003. data->active_slave = 0;
  1004. data->channels = CPSW_MAX_QUEUES;
  1005. data->dual_emac = true;
  1006. data->bd_ram_size = CPSW_BD_RAM_SIZE;
  1007. data->mac_control = 0;
  1008. data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
  1009. sizeof(struct cpsw_slave_data),
  1010. GFP_KERNEL);
  1011. if (!data->slave_data) {
  1012. of_node_put(tmp_node);
  1013. return -ENOMEM;
  1014. }
  1015. /* Populate all the child nodes here...
  1016. */
  1017. ret = devm_of_platform_populate(dev);
  1018. /* We do not want to force this, as in some cases may not have child */
  1019. if (ret)
  1020. dev_warn(dev, "Doesn't have any child node\n");
  1021. for_each_child_of_node(tmp_node, port_np) {
  1022. struct cpsw_slave_data *slave_data;
  1023. u32 port_id;
  1024. ret = of_property_read_u32(port_np, "reg", &port_id);
  1025. if (ret < 0) {
  1026. dev_err(dev, "%pOF error reading port_id %d\n",
  1027. port_np, ret);
  1028. goto err_node_put;
  1029. }
  1030. if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) {
  1031. dev_err(dev, "%pOF has invalid port_id %u\n",
  1032. port_np, port_id);
  1033. ret = -EINVAL;
  1034. goto err_node_put;
  1035. }
  1036. slave_data = &data->slave_data[port_id - 1];
  1037. slave_data->disabled = !of_device_is_available(port_np);
  1038. if (slave_data->disabled)
  1039. continue;
  1040. slave_data->slave_node = port_np;
  1041. slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL);
  1042. if (IS_ERR(slave_data->ifphy)) {
  1043. ret = PTR_ERR(slave_data->ifphy);
  1044. dev_err(dev, "%pOF: Error retrieving port phy: %d\n",
  1045. port_np, ret);
  1046. goto err_node_put;
  1047. }
  1048. if (of_phy_is_fixed_link(port_np)) {
  1049. ret = of_phy_register_fixed_link(port_np);
  1050. if (ret) {
  1051. dev_err_probe(dev, ret, "%pOF failed to register fixed-link phy\n",
  1052. port_np);
  1053. goto err_node_put;
  1054. }
  1055. slave_data->phy_node = of_node_get(port_np);
  1056. } else {
  1057. slave_data->phy_node =
  1058. of_parse_phandle(port_np, "phy-handle", 0);
  1059. }
  1060. if (!slave_data->phy_node) {
  1061. dev_err(dev, "%pOF no phy found\n", port_np);
  1062. ret = -ENODEV;
  1063. goto err_node_put;
  1064. }
  1065. ret = of_get_phy_mode(port_np, &slave_data->phy_if);
  1066. if (ret) {
  1067. dev_err(dev, "%pOF read phy-mode err %d\n",
  1068. port_np, ret);
  1069. goto err_node_put;
  1070. }
  1071. ret = of_get_mac_address(port_np, slave_data->mac_addr);
  1072. if (ret) {
  1073. ret = ti_cm_get_macid(dev, port_id - 1,
  1074. slave_data->mac_addr);
  1075. if (ret)
  1076. goto err_node_put;
  1077. }
  1078. if (of_property_read_u32(port_np, "ti,dual-emac-pvid",
  1079. &prop)) {
  1080. dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n",
  1081. port_np);
  1082. slave_data->dual_emac_res_vlan = port_id;
  1083. dev_err(dev, "%pOF Using %d as Reserved VLAN\n",
  1084. port_np, slave_data->dual_emac_res_vlan);
  1085. } else {
  1086. slave_data->dual_emac_res_vlan = prop;
  1087. }
  1088. }
  1089. of_node_put(tmp_node);
  1090. return 0;
  1091. err_node_put:
  1092. of_node_put(port_np);
  1093. of_node_put(tmp_node);
  1094. return ret;
  1095. }
  1096. static void cpsw_remove_dt(struct cpsw_common *cpsw)
  1097. {
  1098. struct cpsw_platform_data *data = &cpsw->data;
  1099. int i = 0;
  1100. for (i = 0; i < cpsw->data.slaves; i++) {
  1101. struct cpsw_slave_data *slave_data = &data->slave_data[i];
  1102. struct device_node *port_np = slave_data->phy_node;
  1103. if (port_np) {
  1104. if (of_phy_is_fixed_link(port_np))
  1105. of_phy_deregister_fixed_link(port_np);
  1106. of_node_put(port_np);
  1107. }
  1108. }
  1109. }
  1110. static int cpsw_create_ports(struct cpsw_common *cpsw)
  1111. {
  1112. struct cpsw_platform_data *data = &cpsw->data;
  1113. struct net_device *ndev, *napi_ndev = NULL;
  1114. struct device *dev = cpsw->dev;
  1115. struct cpsw_priv *priv;
  1116. int ret = 0, i = 0;
  1117. for (i = 0; i < cpsw->data.slaves; i++) {
  1118. struct cpsw_slave_data *slave_data = &data->slave_data[i];
  1119. if (slave_data->disabled)
  1120. continue;
  1121. ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
  1122. CPSW_MAX_QUEUES,
  1123. CPSW_MAX_QUEUES);
  1124. if (!ndev) {
  1125. dev_err(dev, "error allocating net_device\n");
  1126. return -ENOMEM;
  1127. }
  1128. priv = netdev_priv(ndev);
  1129. priv->cpsw = cpsw;
  1130. priv->ndev = ndev;
  1131. priv->dev = dev;
  1132. priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
  1133. priv->emac_port = i + 1;
  1134. priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
  1135. if (is_valid_ether_addr(slave_data->mac_addr)) {
  1136. ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
  1137. dev_info(cpsw->dev, "Detected MACID = %pM\n",
  1138. priv->mac_addr);
  1139. } else {
  1140. eth_random_addr(slave_data->mac_addr);
  1141. dev_info(cpsw->dev, "Random MACID = %pM\n",
  1142. priv->mac_addr);
  1143. }
  1144. eth_hw_addr_set(ndev, slave_data->mac_addr);
  1145. ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
  1146. cpsw->slaves[i].ndev = ndev;
  1147. ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
  1148. NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
  1149. ndev->netdev_ops = &cpsw_netdev_ops;
  1150. ndev->ethtool_ops = &cpsw_ethtool_ops;
  1151. SET_NETDEV_DEV(ndev, dev);
  1152. if (!napi_ndev) {
  1153. /* CPSW Host port CPDMA interface is shared between
  1154. * ports and there is only one TX and one RX IRQs
  1155. * available for all possible TX and RX channels
  1156. * accordingly.
  1157. */
  1158. netif_napi_add(ndev, &cpsw->napi_rx,
  1159. cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll);
  1160. netif_napi_add_tx(ndev, &cpsw->napi_tx,
  1161. cpsw->quirk_irq ?
  1162. cpsw_tx_poll : cpsw_tx_mq_poll);
  1163. }
  1164. napi_ndev = ndev;
  1165. }
  1166. return ret;
  1167. }
  1168. static void cpsw_unregister_ports(struct cpsw_common *cpsw)
  1169. {
  1170. int i = 0;
  1171. for (i = 0; i < cpsw->data.slaves; i++) {
  1172. if (!cpsw->slaves[i].ndev)
  1173. continue;
  1174. unregister_netdev(cpsw->slaves[i].ndev);
  1175. }
  1176. }
  1177. static int cpsw_register_ports(struct cpsw_common *cpsw)
  1178. {
  1179. int ret = 0, i = 0;
  1180. for (i = 0; i < cpsw->data.slaves; i++) {
  1181. if (!cpsw->slaves[i].ndev)
  1182. continue;
  1183. /* register the network device */
  1184. ret = register_netdev(cpsw->slaves[i].ndev);
  1185. if (ret) {
  1186. dev_err(cpsw->dev,
  1187. "cpsw: err registering net device%d\n", i);
  1188. cpsw->slaves[i].ndev = NULL;
  1189. break;
  1190. }
  1191. }
  1192. if (ret)
  1193. cpsw_unregister_ports(cpsw);
  1194. return ret;
  1195. }
  1196. bool cpsw_port_dev_check(const struct net_device *ndev)
  1197. {
  1198. if (ndev->netdev_ops == &cpsw_netdev_ops) {
  1199. struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
  1200. return !cpsw->data.dual_emac;
  1201. }
  1202. return false;
  1203. }
  1204. static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw)
  1205. {
  1206. int set_val = 0;
  1207. int i;
  1208. if (!cpsw->ale_bypass &&
  1209. (cpsw->br_members == (ALE_PORT_1 | ALE_PORT_2)))
  1210. set_val = 1;
  1211. dev_dbg(cpsw->dev, "set offload_fwd_mark %d\n", set_val);
  1212. for (i = 0; i < cpsw->data.slaves; i++) {
  1213. struct net_device *sl_ndev = cpsw->slaves[i].ndev;
  1214. struct cpsw_priv *priv = netdev_priv(sl_ndev);
  1215. priv->offload_fwd_mark = set_val;
  1216. }
  1217. }
  1218. static int cpsw_netdevice_port_link(struct net_device *ndev,
  1219. struct net_device *br_ndev,
  1220. struct netlink_ext_ack *extack)
  1221. {
  1222. struct cpsw_priv *priv = netdev_priv(ndev);
  1223. struct cpsw_common *cpsw = priv->cpsw;
  1224. int err;
  1225. if (!cpsw->br_members) {
  1226. cpsw->hw_bridge_dev = br_ndev;
  1227. } else {
  1228. /* This is adding the port to a second bridge, this is
  1229. * unsupported
  1230. */
  1231. if (cpsw->hw_bridge_dev != br_ndev)
  1232. return -EOPNOTSUPP;
  1233. }
  1234. err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
  1235. false, extack);
  1236. if (err)
  1237. return err;
  1238. cpsw->br_members |= BIT(priv->emac_port);
  1239. cpsw_port_offload_fwd_mark_update(cpsw);
  1240. return NOTIFY_DONE;
  1241. }
  1242. static void cpsw_netdevice_port_unlink(struct net_device *ndev)
  1243. {
  1244. struct cpsw_priv *priv = netdev_priv(ndev);
  1245. struct cpsw_common *cpsw = priv->cpsw;
  1246. switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL);
  1247. cpsw->br_members &= ~BIT(priv->emac_port);
  1248. cpsw_port_offload_fwd_mark_update(cpsw);
  1249. if (!cpsw->br_members)
  1250. cpsw->hw_bridge_dev = NULL;
  1251. }
  1252. /* netdev notifier */
  1253. static int cpsw_netdevice_event(struct notifier_block *unused,
  1254. unsigned long event, void *ptr)
  1255. {
  1256. struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
  1257. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  1258. struct netdev_notifier_changeupper_info *info;
  1259. int ret = NOTIFY_DONE;
  1260. if (!cpsw_port_dev_check(ndev))
  1261. return NOTIFY_DONE;
  1262. switch (event) {
  1263. case NETDEV_CHANGEUPPER:
  1264. info = ptr;
  1265. if (netif_is_bridge_master(info->upper_dev)) {
  1266. if (info->linking)
  1267. ret = cpsw_netdevice_port_link(ndev,
  1268. info->upper_dev,
  1269. extack);
  1270. else
  1271. cpsw_netdevice_port_unlink(ndev);
  1272. }
  1273. break;
  1274. default:
  1275. return NOTIFY_DONE;
  1276. }
  1277. return notifier_from_errno(ret);
  1278. }
  1279. static struct notifier_block cpsw_netdevice_nb __read_mostly = {
  1280. .notifier_call = cpsw_netdevice_event,
  1281. };
  1282. static int cpsw_register_notifiers(struct cpsw_common *cpsw)
  1283. {
  1284. int ret = 0;
  1285. ret = register_netdevice_notifier(&cpsw_netdevice_nb);
  1286. if (ret) {
  1287. dev_err(cpsw->dev, "can't register netdevice notifier\n");
  1288. return ret;
  1289. }
  1290. ret = cpsw_switchdev_register_notifiers(cpsw);
  1291. if (ret)
  1292. unregister_netdevice_notifier(&cpsw_netdevice_nb);
  1293. return ret;
  1294. }
  1295. static void cpsw_unregister_notifiers(struct cpsw_common *cpsw)
  1296. {
  1297. cpsw_switchdev_unregister_notifiers(cpsw);
  1298. unregister_netdevice_notifier(&cpsw_netdevice_nb);
  1299. }
  1300. static const struct devlink_ops cpsw_devlink_ops = {
  1301. };
  1302. static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
  1303. struct devlink_param_gset_ctx *ctx)
  1304. {
  1305. struct cpsw_devlink *dl_priv = devlink_priv(dl);
  1306. struct cpsw_common *cpsw = dl_priv->cpsw;
  1307. dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
  1308. if (id != CPSW_DL_PARAM_SWITCH_MODE)
  1309. return -EOPNOTSUPP;
  1310. ctx->val.vbool = !cpsw->data.dual_emac;
  1311. return 0;
  1312. }
  1313. static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
  1314. struct devlink_param_gset_ctx *ctx)
  1315. {
  1316. struct cpsw_devlink *dl_priv = devlink_priv(dl);
  1317. struct cpsw_common *cpsw = dl_priv->cpsw;
  1318. int vlan = cpsw->data.default_vlan;
  1319. bool switch_en = ctx->val.vbool;
  1320. bool if_running = false;
  1321. int i;
  1322. dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
  1323. if (id != CPSW_DL_PARAM_SWITCH_MODE)
  1324. return -EOPNOTSUPP;
  1325. if (switch_en == !cpsw->data.dual_emac)
  1326. return 0;
  1327. if (!switch_en && cpsw->br_members) {
  1328. dev_err(cpsw->dev, "Remove ports from BR before disabling switch mode\n");
  1329. return -EINVAL;
  1330. }
  1331. rtnl_lock();
  1332. for (i = 0; i < cpsw->data.slaves; i++) {
  1333. struct cpsw_slave *slave = &cpsw->slaves[i];
  1334. struct net_device *sl_ndev = slave->ndev;
  1335. if (!sl_ndev || !netif_running(sl_ndev))
  1336. continue;
  1337. if_running = true;
  1338. }
  1339. if (!if_running) {
  1340. /* all ndevs are down */
  1341. cpsw->data.dual_emac = !switch_en;
  1342. for (i = 0; i < cpsw->data.slaves; i++) {
  1343. struct cpsw_slave *slave = &cpsw->slaves[i];
  1344. struct net_device *sl_ndev = slave->ndev;
  1345. if (!sl_ndev)
  1346. continue;
  1347. if (switch_en)
  1348. vlan = cpsw->data.default_vlan;
  1349. else
  1350. vlan = slave->data->dual_emac_res_vlan;
  1351. slave->port_vlan = vlan;
  1352. }
  1353. goto exit;
  1354. }
  1355. if (switch_en) {
  1356. dev_info(cpsw->dev, "Enable switch mode\n");
  1357. /* enable bypass - no forwarding; all traffic goes to Host */
  1358. cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
  1359. /* clean up ALE table */
  1360. cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
  1361. cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
  1362. cpsw_init_host_port_switch(cpsw);
  1363. for (i = 0; i < cpsw->data.slaves; i++) {
  1364. struct cpsw_slave *slave = &cpsw->slaves[i];
  1365. struct net_device *sl_ndev = slave->ndev;
  1366. struct cpsw_priv *priv;
  1367. if (!sl_ndev)
  1368. continue;
  1369. priv = netdev_priv(sl_ndev);
  1370. slave->port_vlan = vlan;
  1371. WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE_VLAN);
  1372. if (netif_running(sl_ndev))
  1373. cpsw_port_add_switch_def_ale_entries(priv,
  1374. slave);
  1375. }
  1376. cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
  1377. cpsw->data.dual_emac = false;
  1378. } else {
  1379. dev_info(cpsw->dev, "Disable switch mode\n");
  1380. /* enable bypass - no forwarding; all traffic goes to Host */
  1381. cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
  1382. cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
  1383. cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
  1384. cpsw_init_host_port_dual_mac(cpsw);
  1385. for (i = 0; i < cpsw->data.slaves; i++) {
  1386. struct cpsw_slave *slave = &cpsw->slaves[i];
  1387. struct net_device *sl_ndev = slave->ndev;
  1388. struct cpsw_priv *priv;
  1389. if (!sl_ndev)
  1390. continue;
  1391. priv = netdev_priv(slave->ndev);
  1392. slave->port_vlan = slave->data->dual_emac_res_vlan;
  1393. WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE);
  1394. cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
  1395. }
  1396. cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
  1397. cpsw->data.dual_emac = true;
  1398. }
  1399. exit:
  1400. rtnl_unlock();
  1401. return 0;
  1402. }
  1403. static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
  1404. struct devlink_param_gset_ctx *ctx)
  1405. {
  1406. struct cpsw_devlink *dl_priv = devlink_priv(dl);
  1407. struct cpsw_common *cpsw = dl_priv->cpsw;
  1408. dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
  1409. switch (id) {
  1410. case CPSW_DL_PARAM_ALE_BYPASS:
  1411. ctx->val.vbool = cpsw_ale_control_get(cpsw->ale, 0, ALE_BYPASS);
  1412. break;
  1413. default:
  1414. return -EOPNOTSUPP;
  1415. }
  1416. return 0;
  1417. }
  1418. static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id,
  1419. struct devlink_param_gset_ctx *ctx)
  1420. {
  1421. struct cpsw_devlink *dl_priv = devlink_priv(dl);
  1422. struct cpsw_common *cpsw = dl_priv->cpsw;
  1423. int ret = -EOPNOTSUPP;
  1424. dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
  1425. switch (id) {
  1426. case CPSW_DL_PARAM_ALE_BYPASS:
  1427. ret = cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS,
  1428. ctx->val.vbool);
  1429. if (!ret) {
  1430. cpsw->ale_bypass = ctx->val.vbool;
  1431. cpsw_port_offload_fwd_mark_update(cpsw);
  1432. }
  1433. break;
  1434. default:
  1435. return -EOPNOTSUPP;
  1436. }
  1437. return 0;
  1438. }
  1439. static const struct devlink_param cpsw_devlink_params[] = {
  1440. DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_SWITCH_MODE,
  1441. "switch_mode", DEVLINK_PARAM_TYPE_BOOL,
  1442. BIT(DEVLINK_PARAM_CMODE_RUNTIME),
  1443. cpsw_dl_switch_mode_get, cpsw_dl_switch_mode_set,
  1444. NULL),
  1445. DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_ALE_BYPASS,
  1446. "ale_bypass", DEVLINK_PARAM_TYPE_BOOL,
  1447. BIT(DEVLINK_PARAM_CMODE_RUNTIME),
  1448. cpsw_dl_ale_ctrl_get, cpsw_dl_ale_ctrl_set, NULL),
  1449. };
  1450. static int cpsw_register_devlink(struct cpsw_common *cpsw)
  1451. {
  1452. struct device *dev = cpsw->dev;
  1453. struct cpsw_devlink *dl_priv;
  1454. int ret = 0;
  1455. cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv), dev);
  1456. if (!cpsw->devlink)
  1457. return -ENOMEM;
  1458. dl_priv = devlink_priv(cpsw->devlink);
  1459. dl_priv->cpsw = cpsw;
  1460. ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params,
  1461. ARRAY_SIZE(cpsw_devlink_params));
  1462. if (ret) {
  1463. dev_err(dev, "DL params reg fail ret:%d\n", ret);
  1464. goto dl_unreg;
  1465. }
  1466. devlink_register(cpsw->devlink);
  1467. return ret;
  1468. dl_unreg:
  1469. devlink_free(cpsw->devlink);
  1470. return ret;
  1471. }
  1472. static void cpsw_unregister_devlink(struct cpsw_common *cpsw)
  1473. {
  1474. devlink_unregister(cpsw->devlink);
  1475. devlink_params_unregister(cpsw->devlink, cpsw_devlink_params,
  1476. ARRAY_SIZE(cpsw_devlink_params));
  1477. devlink_free(cpsw->devlink);
  1478. }
  1479. static const struct of_device_id cpsw_of_mtable[] = {
  1480. { .compatible = "ti,cpsw-switch"},
  1481. { .compatible = "ti,am335x-cpsw-switch"},
  1482. { .compatible = "ti,am4372-cpsw-switch"},
  1483. { .compatible = "ti,dra7-cpsw-switch"},
  1484. { /* sentinel */ },
  1485. };
  1486. MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
  1487. static const struct soc_device_attribute cpsw_soc_devices[] = {
  1488. { .family = "AM33xx", .revision = "ES1.0"},
  1489. { /* sentinel */ }
  1490. };
  1491. static int cpsw_probe(struct platform_device *pdev)
  1492. {
  1493. const struct soc_device_attribute *soc;
  1494. struct device *dev = &pdev->dev;
  1495. struct cpsw_common *cpsw;
  1496. struct resource *ss_res;
  1497. struct gpio_descs *mode;
  1498. void __iomem *ss_regs;
  1499. int ret = 0, ch;
  1500. struct clk *clk;
  1501. int irq;
  1502. cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
  1503. if (!cpsw)
  1504. return -ENOMEM;
  1505. cpsw_slave_index = cpsw_slave_index_priv;
  1506. cpsw->dev = dev;
  1507. cpsw->slaves = devm_kcalloc(dev,
  1508. CPSW_SLAVE_PORTS_NUM,
  1509. sizeof(struct cpsw_slave),
  1510. GFP_KERNEL);
  1511. if (!cpsw->slaves)
  1512. return -ENOMEM;
  1513. mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
  1514. if (IS_ERR(mode)) {
  1515. ret = PTR_ERR(mode);
  1516. dev_err(dev, "gpio request failed, ret %d\n", ret);
  1517. return ret;
  1518. }
  1519. clk = devm_clk_get(dev, "fck");
  1520. if (IS_ERR(clk)) {
  1521. ret = PTR_ERR(clk);
  1522. dev_err(dev, "fck is not found %d\n", ret);
  1523. return ret;
  1524. }
  1525. cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
  1526. ss_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ss_res);
  1527. if (IS_ERR(ss_regs)) {
  1528. ret = PTR_ERR(ss_regs);
  1529. return ret;
  1530. }
  1531. cpsw->regs = ss_regs;
  1532. irq = platform_get_irq_byname(pdev, "rx");
  1533. if (irq < 0)
  1534. return irq;
  1535. cpsw->irqs_table[0] = irq;
  1536. irq = platform_get_irq_byname(pdev, "tx");
  1537. if (irq < 0)
  1538. return irq;
  1539. cpsw->irqs_table[1] = irq;
  1540. irq = platform_get_irq_byname(pdev, "misc");
  1541. if (irq <= 0)
  1542. return irq;
  1543. cpsw->misc_irq = irq;
  1544. platform_set_drvdata(pdev, cpsw);
  1545. /* This may be required here for child devices. */
  1546. pm_runtime_enable(dev);
  1547. /* Need to enable clocks with runtime PM api to access module
  1548. * registers
  1549. */
  1550. ret = pm_runtime_resume_and_get(dev);
  1551. if (ret < 0) {
  1552. pm_runtime_disable(dev);
  1553. return ret;
  1554. }
  1555. ret = cpsw_probe_dt(cpsw);
  1556. if (ret)
  1557. goto clean_dt_ret;
  1558. soc = soc_device_match(cpsw_soc_devices);
  1559. if (soc)
  1560. cpsw->quirk_irq = true;
  1561. cpsw->rx_packet_max = rx_packet_max;
  1562. cpsw->descs_pool_size = descs_pool_size;
  1563. eth_random_addr(cpsw->base_mac);
  1564. ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
  1565. (u32 __force)ss_res->start + CPSW2_BD_OFFSET,
  1566. descs_pool_size);
  1567. if (ret)
  1568. goto clean_dt_ret;
  1569. cpsw->wr_regs = cpsw->version == CPSW_VERSION_1 ?
  1570. ss_regs + CPSW1_WR_OFFSET :
  1571. ss_regs + CPSW2_WR_OFFSET;
  1572. ch = cpsw->quirk_irq ? 0 : 7;
  1573. cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
  1574. if (IS_ERR(cpsw->txv[0].ch)) {
  1575. dev_err(dev, "error initializing tx dma channel\n");
  1576. ret = PTR_ERR(cpsw->txv[0].ch);
  1577. goto clean_cpts;
  1578. }
  1579. cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
  1580. if (IS_ERR(cpsw->rxv[0].ch)) {
  1581. dev_err(dev, "error initializing rx dma channel\n");
  1582. ret = PTR_ERR(cpsw->rxv[0].ch);
  1583. goto clean_cpts;
  1584. }
  1585. cpsw_split_res(cpsw);
  1586. /* setup netdevs */
  1587. ret = cpsw_create_ports(cpsw);
  1588. if (ret)
  1589. goto clean_unregister_netdev;
  1590. /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
  1591. * MISC IRQs which are always kept disabled with this driver so
  1592. * we will not request them.
  1593. *
  1594. * If anyone wants to implement support for those, make sure to
  1595. * first request and append them to irqs_table array.
  1596. */
  1597. ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
  1598. 0, dev_name(dev), cpsw);
  1599. if (ret < 0) {
  1600. dev_err(dev, "error attaching irq (%d)\n", ret);
  1601. goto clean_unregister_netdev;
  1602. }
  1603. ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
  1604. 0, dev_name(dev), cpsw);
  1605. if (ret < 0) {
  1606. dev_err(dev, "error attaching irq (%d)\n", ret);
  1607. goto clean_unregister_netdev;
  1608. }
  1609. if (!cpsw->cpts)
  1610. goto skip_cpts;
  1611. ret = devm_request_irq(dev, cpsw->misc_irq, cpsw_misc_interrupt,
  1612. 0, dev_name(&pdev->dev), cpsw);
  1613. if (ret < 0) {
  1614. dev_err(dev, "error attaching misc irq (%d)\n", ret);
  1615. goto clean_unregister_netdev;
  1616. }
  1617. /* Enable misc CPTS evnt_pend IRQ */
  1618. cpts_set_irqpoll(cpsw->cpts, false);
  1619. skip_cpts:
  1620. ret = cpsw_register_notifiers(cpsw);
  1621. if (ret)
  1622. goto clean_unregister_netdev;
  1623. ret = cpsw_register_devlink(cpsw);
  1624. if (ret)
  1625. goto clean_unregister_notifiers;
  1626. ret = cpsw_register_ports(cpsw);
  1627. if (ret)
  1628. goto clean_unregister_notifiers;
  1629. dev_notice(dev, "initialized (regs %pa, pool size %d) hw_ver:%08X %d.%d (%d)\n",
  1630. &ss_res->start, descs_pool_size,
  1631. cpsw->version, CPSW_MAJOR_VERSION(cpsw->version),
  1632. CPSW_MINOR_VERSION(cpsw->version),
  1633. CPSW_RTL_VERSION(cpsw->version));
  1634. pm_runtime_put(dev);
  1635. return 0;
  1636. clean_unregister_notifiers:
  1637. cpsw_unregister_notifiers(cpsw);
  1638. clean_unregister_netdev:
  1639. cpsw_unregister_ports(cpsw);
  1640. clean_cpts:
  1641. cpts_release(cpsw->cpts);
  1642. cpdma_ctlr_destroy(cpsw->dma);
  1643. clean_dt_ret:
  1644. cpsw_remove_dt(cpsw);
  1645. pm_runtime_put_sync(dev);
  1646. pm_runtime_disable(dev);
  1647. return ret;
  1648. }
  1649. static int cpsw_remove(struct platform_device *pdev)
  1650. {
  1651. struct cpsw_common *cpsw = platform_get_drvdata(pdev);
  1652. int ret;
  1653. ret = pm_runtime_resume_and_get(&pdev->dev);
  1654. if (ret < 0)
  1655. return ret;
  1656. cpsw_unregister_notifiers(cpsw);
  1657. cpsw_unregister_devlink(cpsw);
  1658. cpsw_unregister_ports(cpsw);
  1659. cpts_release(cpsw->cpts);
  1660. cpdma_ctlr_destroy(cpsw->dma);
  1661. cpsw_remove_dt(cpsw);
  1662. pm_runtime_put_sync(&pdev->dev);
  1663. pm_runtime_disable(&pdev->dev);
  1664. return 0;
  1665. }
  1666. static int __maybe_unused cpsw_suspend(struct device *dev)
  1667. {
  1668. struct cpsw_common *cpsw = dev_get_drvdata(dev);
  1669. int i;
  1670. rtnl_lock();
  1671. for (i = 0; i < cpsw->data.slaves; i++) {
  1672. struct net_device *ndev = cpsw->slaves[i].ndev;
  1673. if (!(ndev && netif_running(ndev)))
  1674. continue;
  1675. cpsw_ndo_stop(ndev);
  1676. }
  1677. rtnl_unlock();
  1678. /* Select sleep pin state */
  1679. pinctrl_pm_select_sleep_state(dev);
  1680. return 0;
  1681. }
  1682. static int __maybe_unused cpsw_resume(struct device *dev)
  1683. {
  1684. struct cpsw_common *cpsw = dev_get_drvdata(dev);
  1685. int i;
  1686. /* Select default pin state */
  1687. pinctrl_pm_select_default_state(dev);
  1688. /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
  1689. rtnl_lock();
  1690. for (i = 0; i < cpsw->data.slaves; i++) {
  1691. struct net_device *ndev = cpsw->slaves[i].ndev;
  1692. if (!(ndev && netif_running(ndev)))
  1693. continue;
  1694. cpsw_ndo_open(ndev);
  1695. }
  1696. rtnl_unlock();
  1697. return 0;
  1698. }
  1699. static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
  1700. static struct platform_driver cpsw_driver = {
  1701. .driver = {
  1702. .name = "cpsw-switch",
  1703. .pm = &cpsw_pm_ops,
  1704. .of_match_table = cpsw_of_mtable,
  1705. },
  1706. .probe = cpsw_probe,
  1707. .remove = cpsw_remove,
  1708. };
  1709. module_platform_driver(cpsw_driver);
  1710. MODULE_LICENSE("GPL");
  1711. MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver");