cpsw.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Texas Instruments Ethernet Switch Driver
  4. *
  5. * Copyright (C) 2012 Texas Instruments
  6. *
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/io.h>
  10. #include <linux/clk.h>
  11. #include <linux/timer.h>
  12. #include <linux/module.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/irqreturn.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/if_ether.h>
  17. #include <linux/etherdevice.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/net_tstamp.h>
  20. #include <linux/phy.h>
  21. #include <linux/phy/phy.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/delay.h>
  24. #include <linux/pm_runtime.h>
  25. #include <linux/gpio/consumer.h>
  26. #include <linux/of.h>
  27. #include <linux/of_mdio.h>
  28. #include <linux/of_net.h>
  29. #include <linux/of_device.h>
  30. #include <linux/if_vlan.h>
  31. #include <linux/kmemleak.h>
  32. #include <linux/sys_soc.h>
  33. #include <net/page_pool.h>
  34. #include <linux/bpf.h>
  35. #include <linux/bpf_trace.h>
  36. #include <linux/pinctrl/consumer.h>
  37. #include <net/pkt_cls.h>
  38. #include "cpsw.h"
  39. #include "cpsw_ale.h"
  40. #include "cpsw_priv.h"
  41. #include "cpsw_sl.h"
  42. #include "cpts.h"
  43. #include "davinci_cpdma.h"
  44. #include <net/pkt_sched.h>
  45. static int debug_level;
  46. module_param(debug_level, int, 0);
  47. MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
  48. static int ale_ageout = 10;
  49. module_param(ale_ageout, int, 0);
  50. MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
  51. static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
  52. module_param(rx_packet_max, int, 0);
  53. MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
  54. static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
  55. module_param(descs_pool_size, int, 0444);
  56. MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
  57. #define for_each_slave(priv, func, arg...) \
  58. do { \
  59. struct cpsw_slave *slave; \
  60. struct cpsw_common *cpsw = (priv)->cpsw; \
  61. int n; \
  62. if (cpsw->data.dual_emac) \
  63. (func)((cpsw)->slaves + priv->emac_port, ##arg);\
  64. else \
  65. for (n = cpsw->data.slaves, \
  66. slave = cpsw->slaves; \
  67. n; n--) \
  68. (func)(slave++, ##arg); \
  69. } while (0)
  70. static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
  71. struct cpsw_priv *priv)
  72. {
  73. return cpsw->data.dual_emac ? priv->emac_port : cpsw->data.active_slave;
  74. }
  75. static int cpsw_get_slave_port(u32 slave_num)
  76. {
  77. return slave_num + 1;
  78. }
  79. static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
  80. __be16 proto, u16 vid);
  81. static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
  82. {
  83. struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
  84. struct cpsw_ale *ale = cpsw->ale;
  85. int i;
  86. if (cpsw->data.dual_emac) {
  87. bool flag = false;
  88. /* Enabling promiscuous mode for one interface will be
  89. * common for both the interface as the interface shares
  90. * the same hardware resource.
  91. */
  92. for (i = 0; i < cpsw->data.slaves; i++)
  93. if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
  94. flag = true;
  95. if (!enable && flag) {
  96. enable = true;
  97. dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
  98. }
  99. if (enable) {
  100. /* Enable Bypass */
  101. cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
  102. dev_dbg(&ndev->dev, "promiscuity enabled\n");
  103. } else {
  104. /* Disable Bypass */
  105. cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
  106. dev_dbg(&ndev->dev, "promiscuity disabled\n");
  107. }
  108. } else {
  109. if (enable) {
  110. unsigned long timeout = jiffies + HZ;
  111. /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
  112. for (i = 0; i <= cpsw->data.slaves; i++) {
  113. cpsw_ale_control_set(ale, i,
  114. ALE_PORT_NOLEARN, 1);
  115. cpsw_ale_control_set(ale, i,
  116. ALE_PORT_NO_SA_UPDATE, 1);
  117. }
  118. /* Clear All Untouched entries */
  119. cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
  120. do {
  121. cpu_relax();
  122. if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
  123. break;
  124. } while (time_after(timeout, jiffies));
  125. cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
  126. /* Clear all mcast from ALE */
  127. cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
  128. __hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
  129. /* Flood All Unicast Packets to Host port */
  130. cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
  131. dev_dbg(&ndev->dev, "promiscuity enabled\n");
  132. } else {
  133. /* Don't Flood All Unicast Packets to Host port */
  134. cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
  135. /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
  136. for (i = 0; i <= cpsw->data.slaves; i++) {
  137. cpsw_ale_control_set(ale, i,
  138. ALE_PORT_NOLEARN, 0);
  139. cpsw_ale_control_set(ale, i,
  140. ALE_PORT_NO_SA_UPDATE, 0);
  141. }
  142. dev_dbg(&ndev->dev, "promiscuity disabled\n");
  143. }
  144. }
  145. }
  146. /**
  147. * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
  148. * if it's not deleted
  149. * @ndev: device to sync
  150. * @addr: address to be added or deleted
  151. * @vid: vlan id, if vid < 0 set/unset address for real device
  152. * @add: add address if the flag is set or remove otherwise
  153. */
  154. static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
  155. int vid, int add)
  156. {
  157. struct cpsw_priv *priv = netdev_priv(ndev);
  158. struct cpsw_common *cpsw = priv->cpsw;
  159. int mask, flags, ret;
  160. if (vid < 0) {
  161. if (cpsw->data.dual_emac)
  162. vid = cpsw->slaves[priv->emac_port].port_vlan;
  163. else
  164. vid = 0;
  165. }
  166. mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
  167. flags = vid ? ALE_VLAN : 0;
  168. if (add)
  169. ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
  170. else
  171. ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
  172. return ret;
  173. }
  174. static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
  175. {
  176. struct addr_sync_ctx *sync_ctx = ctx;
  177. struct netdev_hw_addr *ha;
  178. int found = 0, ret = 0;
  179. if (!vdev || !(vdev->flags & IFF_UP))
  180. return 0;
  181. /* vlan address is relevant if its sync_cnt != 0 */
  182. netdev_for_each_mc_addr(ha, vdev) {
  183. if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
  184. found = ha->sync_cnt;
  185. break;
  186. }
  187. }
  188. if (found)
  189. sync_ctx->consumed++;
  190. if (sync_ctx->flush) {
  191. if (!found)
  192. cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
  193. return 0;
  194. }
  195. if (found)
  196. ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
  197. return ret;
  198. }
  199. static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
  200. {
  201. struct addr_sync_ctx sync_ctx;
  202. int ret;
  203. sync_ctx.consumed = 0;
  204. sync_ctx.addr = addr;
  205. sync_ctx.ndev = ndev;
  206. sync_ctx.flush = 0;
  207. ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
  208. if (sync_ctx.consumed < num && !ret)
  209. ret = cpsw_set_mc(ndev, addr, -1, 1);
  210. return ret;
  211. }
  212. static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
  213. {
  214. struct addr_sync_ctx sync_ctx;
  215. sync_ctx.consumed = 0;
  216. sync_ctx.addr = addr;
  217. sync_ctx.ndev = ndev;
  218. sync_ctx.flush = 1;
  219. vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
  220. if (sync_ctx.consumed == num)
  221. cpsw_set_mc(ndev, addr, -1, 0);
  222. return 0;
  223. }
  224. static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
  225. {
  226. struct addr_sync_ctx *sync_ctx = ctx;
  227. struct netdev_hw_addr *ha;
  228. int found = 0;
  229. if (!vdev || !(vdev->flags & IFF_UP))
  230. return 0;
  231. /* vlan address is relevant if its sync_cnt != 0 */
  232. netdev_for_each_mc_addr(ha, vdev) {
  233. if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
  234. found = ha->sync_cnt;
  235. break;
  236. }
  237. }
  238. if (!found)
  239. return 0;
  240. sync_ctx->consumed++;
  241. cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
  242. return 0;
  243. }
  244. static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
  245. {
  246. struct addr_sync_ctx sync_ctx;
  247. sync_ctx.addr = addr;
  248. sync_ctx.ndev = ndev;
  249. sync_ctx.consumed = 0;
  250. vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
  251. if (sync_ctx.consumed < num)
  252. cpsw_set_mc(ndev, addr, -1, 0);
  253. return 0;
  254. }
  255. static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
  256. {
  257. struct cpsw_priv *priv = netdev_priv(ndev);
  258. struct cpsw_common *cpsw = priv->cpsw;
  259. int slave_port = -1;
  260. if (cpsw->data.dual_emac)
  261. slave_port = priv->emac_port + 1;
  262. if (ndev->flags & IFF_PROMISC) {
  263. /* Enable promiscuous mode */
  264. cpsw_set_promiscious(ndev, true);
  265. cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port);
  266. return;
  267. } else {
  268. /* Disable promiscuous mode */
  269. cpsw_set_promiscious(ndev, false);
  270. }
  271. /* Restore allmulti on vlans if necessary */
  272. cpsw_ale_set_allmulti(cpsw->ale,
  273. ndev->flags & IFF_ALLMULTI, slave_port);
  274. /* add/remove mcast address either for real netdev or for vlan */
  275. __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
  276. cpsw_del_mc_addr);
  277. }
  278. static unsigned int cpsw_rxbuf_total_len(unsigned int len)
  279. {
  280. len += CPSW_HEADROOM_NA;
  281. len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  282. return SKB_DATA_ALIGN(len);
  283. }
  284. static void cpsw_rx_handler(void *token, int len, int status)
  285. {
  286. struct page *new_page, *page = token;
  287. void *pa = page_address(page);
  288. struct cpsw_meta_xdp *xmeta = pa + CPSW_XMETA_OFFSET;
  289. struct cpsw_common *cpsw = ndev_to_cpsw(xmeta->ndev);
  290. int pkt_size = cpsw->rx_packet_max;
  291. int ret = 0, port, ch = xmeta->ch;
  292. int headroom = CPSW_HEADROOM_NA;
  293. struct net_device *ndev = xmeta->ndev;
  294. struct cpsw_priv *priv;
  295. struct page_pool *pool;
  296. struct sk_buff *skb;
  297. struct xdp_buff xdp;
  298. dma_addr_t dma;
  299. if (cpsw->data.dual_emac && status >= 0) {
  300. port = CPDMA_RX_SOURCE_PORT(status);
  301. if (port)
  302. ndev = cpsw->slaves[--port].ndev;
  303. }
  304. priv = netdev_priv(ndev);
  305. pool = cpsw->page_pool[ch];
  306. if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
  307. /* In dual emac mode check for all interfaces */
  308. if (cpsw->data.dual_emac && cpsw->usage_count &&
  309. (status >= 0)) {
  310. /* The packet received is for the interface which
  311. * is already down and the other interface is up
  312. * and running, instead of freeing which results
  313. * in reducing of the number of rx descriptor in
  314. * DMA engine, requeue page back to cpdma.
  315. */
  316. new_page = page;
  317. goto requeue;
  318. }
  319. /* the interface is going down, pages are purged */
  320. page_pool_recycle_direct(pool, page);
  321. return;
  322. }
  323. new_page = page_pool_dev_alloc_pages(pool);
  324. if (unlikely(!new_page)) {
  325. new_page = page;
  326. ndev->stats.rx_dropped++;
  327. goto requeue;
  328. }
  329. if (priv->xdp_prog) {
  330. int size = len;
  331. xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
  332. if (status & CPDMA_RX_VLAN_ENCAP) {
  333. headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
  334. size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
  335. }
  336. xdp_prepare_buff(&xdp, pa, headroom, size, false);
  337. port = priv->emac_port + cpsw->data.dual_emac;
  338. ret = cpsw_run_xdp(priv, ch, &xdp, page, port, &len);
  339. if (ret != CPSW_XDP_PASS)
  340. goto requeue;
  341. headroom = xdp.data - xdp.data_hard_start;
  342. /* XDP prog can modify vlan tag, so can't use encap header */
  343. status &= ~CPDMA_RX_VLAN_ENCAP;
  344. }
  345. /* pass skb to netstack if no XDP prog or returned XDP_PASS */
  346. skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
  347. if (!skb) {
  348. ndev->stats.rx_dropped++;
  349. page_pool_recycle_direct(pool, page);
  350. goto requeue;
  351. }
  352. skb_reserve(skb, headroom);
  353. skb_put(skb, len);
  354. skb->dev = ndev;
  355. if (status & CPDMA_RX_VLAN_ENCAP)
  356. cpsw_rx_vlan_encap(skb);
  357. if (priv->rx_ts_enabled)
  358. cpts_rx_timestamp(cpsw->cpts, skb);
  359. skb->protocol = eth_type_trans(skb, ndev);
  360. /* mark skb for recycling */
  361. skb_mark_for_recycle(skb);
  362. netif_receive_skb(skb);
  363. ndev->stats.rx_bytes += len;
  364. ndev->stats.rx_packets++;
  365. requeue:
  366. xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
  367. xmeta->ndev = ndev;
  368. xmeta->ch = ch;
  369. dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
  370. ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
  371. pkt_size, 0);
  372. if (ret < 0) {
  373. WARN_ON(ret == -ENOMEM);
  374. page_pool_recycle_direct(pool, new_page);
  375. }
  376. }
  377. static void _cpsw_adjust_link(struct cpsw_slave *slave,
  378. struct cpsw_priv *priv, bool *link)
  379. {
  380. struct phy_device *phy = slave->phy;
  381. u32 mac_control = 0;
  382. u32 slave_port;
  383. struct cpsw_common *cpsw = priv->cpsw;
  384. if (!phy)
  385. return;
  386. slave_port = cpsw_get_slave_port(slave->slave_num);
  387. if (phy->link) {
  388. mac_control = CPSW_SL_CTL_GMII_EN;
  389. if (phy->speed == 1000)
  390. mac_control |= CPSW_SL_CTL_GIG;
  391. if (phy->duplex)
  392. mac_control |= CPSW_SL_CTL_FULLDUPLEX;
  393. /* set speed_in input in case RMII mode is used in 100Mbps */
  394. if (phy->speed == 100)
  395. mac_control |= CPSW_SL_CTL_IFCTL_A;
  396. /* in band mode only works in 10Mbps RGMII mode */
  397. else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
  398. mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
  399. if (priv->rx_pause)
  400. mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
  401. if (priv->tx_pause)
  402. mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
  403. if (mac_control != slave->mac_control)
  404. cpsw_sl_ctl_set(slave->mac_sl, mac_control);
  405. /* enable forwarding */
  406. cpsw_ale_control_set(cpsw->ale, slave_port,
  407. ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
  408. *link = true;
  409. if (priv->shp_cfg_speed &&
  410. priv->shp_cfg_speed != slave->phy->speed &&
  411. !cpsw_shp_is_off(priv))
  412. dev_warn(priv->dev,
  413. "Speed was changed, CBS shaper speeds are changed!");
  414. } else {
  415. mac_control = 0;
  416. /* disable forwarding */
  417. cpsw_ale_control_set(cpsw->ale, slave_port,
  418. ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
  419. cpsw_sl_wait_for_idle(slave->mac_sl, 100);
  420. cpsw_sl_ctl_reset(slave->mac_sl);
  421. }
  422. if (mac_control != slave->mac_control)
  423. phy_print_status(phy);
  424. slave->mac_control = mac_control;
  425. }
  426. static void cpsw_adjust_link(struct net_device *ndev)
  427. {
  428. struct cpsw_priv *priv = netdev_priv(ndev);
  429. struct cpsw_common *cpsw = priv->cpsw;
  430. bool link = false;
  431. for_each_slave(priv, _cpsw_adjust_link, priv, &link);
  432. if (link) {
  433. if (cpsw_need_resplit(cpsw))
  434. cpsw_split_res(cpsw);
  435. netif_carrier_on(ndev);
  436. if (netif_running(ndev))
  437. netif_tx_wake_all_queues(ndev);
  438. } else {
  439. netif_carrier_off(ndev);
  440. netif_tx_stop_all_queues(ndev);
  441. }
  442. }
  443. static inline void cpsw_add_dual_emac_def_ale_entries(
  444. struct cpsw_priv *priv, struct cpsw_slave *slave,
  445. u32 slave_port)
  446. {
  447. struct cpsw_common *cpsw = priv->cpsw;
  448. u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
  449. if (cpsw->version == CPSW_VERSION_1)
  450. slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
  451. else
  452. slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
  453. cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
  454. port_mask, port_mask, 0);
  455. cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
  456. ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
  457. cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
  458. HOST_PORT_NUM, ALE_VLAN |
  459. ALE_SECURE, slave->port_vlan);
  460. cpsw_ale_control_set(cpsw->ale, slave_port,
  461. ALE_PORT_DROP_UNKNOWN_VLAN, 1);
  462. }
  463. static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
  464. {
  465. u32 slave_port;
  466. struct phy_device *phy;
  467. struct cpsw_common *cpsw = priv->cpsw;
  468. cpsw_sl_reset(slave->mac_sl, 100);
  469. cpsw_sl_ctl_reset(slave->mac_sl);
  470. /* setup priority mapping */
  471. cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
  472. RX_PRIORITY_MAPPING);
  473. switch (cpsw->version) {
  474. case CPSW_VERSION_1:
  475. slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
  476. /* Increase RX FIFO size to 5 for supporting fullduplex
  477. * flow control mode
  478. */
  479. slave_write(slave,
  480. (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
  481. CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
  482. break;
  483. case CPSW_VERSION_2:
  484. case CPSW_VERSION_3:
  485. case CPSW_VERSION_4:
  486. slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
  487. /* Increase RX FIFO size to 5 for supporting fullduplex
  488. * flow control mode
  489. */
  490. slave_write(slave,
  491. (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
  492. CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
  493. break;
  494. }
  495. /* setup max packet size, and mac address */
  496. cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
  497. cpsw->rx_packet_max);
  498. cpsw_set_slave_mac(slave, priv);
  499. slave->mac_control = 0; /* no link yet */
  500. slave_port = cpsw_get_slave_port(slave->slave_num);
  501. if (cpsw->data.dual_emac)
  502. cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
  503. else
  504. cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
  505. 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
  506. if (slave->data->phy_node) {
  507. phy = of_phy_connect(priv->ndev, slave->data->phy_node,
  508. &cpsw_adjust_link, 0, slave->data->phy_if);
  509. if (!phy) {
  510. dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
  511. slave->data->phy_node,
  512. slave->slave_num);
  513. return;
  514. }
  515. } else {
  516. phy = phy_connect(priv->ndev, slave->data->phy_id,
  517. &cpsw_adjust_link, slave->data->phy_if);
  518. if (IS_ERR(phy)) {
  519. dev_err(priv->dev,
  520. "phy \"%s\" not found on slave %d, err %ld\n",
  521. slave->data->phy_id, slave->slave_num,
  522. PTR_ERR(phy));
  523. return;
  524. }
  525. }
  526. slave->phy = phy;
  527. phy_attached_info(slave->phy);
  528. phy_start(slave->phy);
  529. /* Configure GMII_SEL register */
  530. if (!IS_ERR(slave->data->ifphy))
  531. phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
  532. slave->data->phy_if);
  533. else
  534. cpsw_phy_sel(cpsw->dev, slave->phy->interface,
  535. slave->slave_num);
  536. }
  537. static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
  538. {
  539. struct cpsw_common *cpsw = priv->cpsw;
  540. const int vlan = cpsw->data.default_vlan;
  541. u32 reg;
  542. int i;
  543. int unreg_mcast_mask;
  544. reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
  545. CPSW2_PORT_VLAN;
  546. writel(vlan, &cpsw->host_port_regs->port_vlan);
  547. for (i = 0; i < cpsw->data.slaves; i++)
  548. slave_write(cpsw->slaves + i, vlan, reg);
  549. if (priv->ndev->flags & IFF_ALLMULTI)
  550. unreg_mcast_mask = ALE_ALL_PORTS;
  551. else
  552. unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
  553. cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
  554. ALE_ALL_PORTS, ALE_ALL_PORTS,
  555. unreg_mcast_mask);
  556. }
  557. static void cpsw_init_host_port(struct cpsw_priv *priv)
  558. {
  559. u32 fifo_mode;
  560. u32 control_reg;
  561. struct cpsw_common *cpsw = priv->cpsw;
  562. /* soft reset the controller and initialize ale */
  563. soft_reset("cpsw", &cpsw->regs->soft_reset);
  564. cpsw_ale_start(cpsw->ale);
  565. /* switch to vlan unaware mode */
  566. cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
  567. CPSW_ALE_VLAN_AWARE);
  568. control_reg = readl(&cpsw->regs->control);
  569. control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
  570. writel(control_reg, &cpsw->regs->control);
  571. fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
  572. CPSW_FIFO_NORMAL_MODE;
  573. writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
  574. /* setup host port priority mapping */
  575. writel_relaxed(CPDMA_TX_PRIORITY_MAP,
  576. &cpsw->host_port_regs->cpdma_tx_pri_map);
  577. writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
  578. cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
  579. ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
  580. if (!cpsw->data.dual_emac) {
  581. cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
  582. 0, 0);
  583. cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
  584. ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
  585. }
  586. }
  587. static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
  588. {
  589. u32 slave_port;
  590. slave_port = cpsw_get_slave_port(slave->slave_num);
  591. if (!slave->phy)
  592. return;
  593. phy_stop(slave->phy);
  594. phy_disconnect(slave->phy);
  595. slave->phy = NULL;
  596. cpsw_ale_control_set(cpsw->ale, slave_port,
  597. ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
  598. cpsw_sl_reset(slave->mac_sl, 100);
  599. cpsw_sl_ctl_reset(slave->mac_sl);
  600. }
  601. static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
  602. {
  603. struct cpsw_priv *priv = arg;
  604. if (!vdev)
  605. return 0;
  606. cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
  607. return 0;
  608. }
  609. /* restore resources after port reset */
  610. static void cpsw_restore(struct cpsw_priv *priv)
  611. {
  612. /* restore vlan configurations */
  613. vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
  614. /* restore MQPRIO offload */
  615. for_each_slave(priv, cpsw_mqprio_resume, priv);
  616. /* restore CBS offload */
  617. for_each_slave(priv, cpsw_cbs_resume, priv);
  618. }
  619. static int cpsw_ndo_open(struct net_device *ndev)
  620. {
  621. struct cpsw_priv *priv = netdev_priv(ndev);
  622. struct cpsw_common *cpsw = priv->cpsw;
  623. int ret;
  624. u32 reg;
  625. ret = pm_runtime_resume_and_get(cpsw->dev);
  626. if (ret < 0)
  627. return ret;
  628. netif_carrier_off(ndev);
  629. /* Notify the stack of the actual queue counts. */
  630. ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
  631. if (ret) {
  632. dev_err(priv->dev, "cannot set real number of tx queues\n");
  633. goto err_cleanup;
  634. }
  635. ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
  636. if (ret) {
  637. dev_err(priv->dev, "cannot set real number of rx queues\n");
  638. goto err_cleanup;
  639. }
  640. reg = cpsw->version;
  641. dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
  642. CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
  643. CPSW_RTL_VERSION(reg));
  644. /* Initialize host and slave ports */
  645. if (!cpsw->usage_count)
  646. cpsw_init_host_port(priv);
  647. for_each_slave(priv, cpsw_slave_open, priv);
  648. /* Add default VLAN */
  649. if (!cpsw->data.dual_emac)
  650. cpsw_add_default_vlan(priv);
  651. else
  652. cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
  653. ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
  654. /* initialize shared resources for every ndev */
  655. if (!cpsw->usage_count) {
  656. /* disable priority elevation */
  657. writel_relaxed(0, &cpsw->regs->ptype);
  658. /* enable statistics collection only on all ports */
  659. writel_relaxed(0x7, &cpsw->regs->stat_port_en);
  660. /* Enable internal fifo flow control */
  661. writel(0x7, &cpsw->regs->flow_control);
  662. napi_enable(&cpsw->napi_rx);
  663. napi_enable(&cpsw->napi_tx);
  664. if (cpsw->tx_irq_disabled) {
  665. cpsw->tx_irq_disabled = false;
  666. enable_irq(cpsw->irqs_table[1]);
  667. }
  668. if (cpsw->rx_irq_disabled) {
  669. cpsw->rx_irq_disabled = false;
  670. enable_irq(cpsw->irqs_table[0]);
  671. }
  672. /* create rxqs for both infs in dual mac as they use same pool
  673. * and must be destroyed together when no users.
  674. */
  675. ret = cpsw_create_xdp_rxqs(cpsw);
  676. if (ret < 0)
  677. goto err_cleanup;
  678. ret = cpsw_fill_rx_channels(priv);
  679. if (ret < 0)
  680. goto err_cleanup;
  681. if (cpsw->cpts) {
  682. if (cpts_register(cpsw->cpts))
  683. dev_err(priv->dev, "error registering cpts device\n");
  684. else
  685. writel(0x10, &cpsw->wr_regs->misc_en);
  686. }
  687. }
  688. cpsw_restore(priv);
  689. /* Enable Interrupt pacing if configured */
  690. if (cpsw->coal_intvl != 0) {
  691. struct ethtool_coalesce coal;
  692. coal.rx_coalesce_usecs = cpsw->coal_intvl;
  693. cpsw_set_coalesce(ndev, &coal, NULL, NULL);
  694. }
  695. cpdma_ctlr_start(cpsw->dma);
  696. cpsw_intr_enable(cpsw);
  697. cpsw->usage_count++;
  698. return 0;
  699. err_cleanup:
  700. if (!cpsw->usage_count) {
  701. napi_disable(&cpsw->napi_rx);
  702. napi_disable(&cpsw->napi_tx);
  703. cpdma_ctlr_stop(cpsw->dma);
  704. cpsw_destroy_xdp_rxqs(cpsw);
  705. }
  706. for_each_slave(priv, cpsw_slave_stop, cpsw);
  707. pm_runtime_put_sync(cpsw->dev);
  708. netif_carrier_off(priv->ndev);
  709. return ret;
  710. }
  711. static int cpsw_ndo_stop(struct net_device *ndev)
  712. {
  713. struct cpsw_priv *priv = netdev_priv(ndev);
  714. struct cpsw_common *cpsw = priv->cpsw;
  715. cpsw_info(priv, ifdown, "shutting down cpsw device\n");
  716. __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
  717. netif_tx_stop_all_queues(priv->ndev);
  718. netif_carrier_off(priv->ndev);
  719. if (cpsw->usage_count <= 1) {
  720. napi_disable(&cpsw->napi_rx);
  721. napi_disable(&cpsw->napi_tx);
  722. cpts_unregister(cpsw->cpts);
  723. cpsw_intr_disable(cpsw);
  724. cpdma_ctlr_stop(cpsw->dma);
  725. cpsw_ale_stop(cpsw->ale);
  726. cpsw_destroy_xdp_rxqs(cpsw);
  727. }
  728. for_each_slave(priv, cpsw_slave_stop, cpsw);
  729. if (cpsw_need_resplit(cpsw))
  730. cpsw_split_res(cpsw);
  731. cpsw->usage_count--;
  732. pm_runtime_put_sync(cpsw->dev);
  733. return 0;
  734. }
  735. static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
  736. struct net_device *ndev)
  737. {
  738. struct cpsw_priv *priv = netdev_priv(ndev);
  739. struct cpsw_common *cpsw = priv->cpsw;
  740. struct cpts *cpts = cpsw->cpts;
  741. struct netdev_queue *txq;
  742. struct cpdma_chan *txch;
  743. int ret, q_idx;
  744. if (skb_put_padto(skb, CPSW_MIN_PACKET_SIZE)) {
  745. cpsw_err(priv, tx_err, "packet pad failed\n");
  746. ndev->stats.tx_dropped++;
  747. return NET_XMIT_DROP;
  748. }
  749. if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
  750. priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
  751. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  752. q_idx = skb_get_queue_mapping(skb);
  753. if (q_idx >= cpsw->tx_ch_num)
  754. q_idx = q_idx % cpsw->tx_ch_num;
  755. txch = cpsw->txv[q_idx].ch;
  756. txq = netdev_get_tx_queue(ndev, q_idx);
  757. skb_tx_timestamp(skb);
  758. ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
  759. priv->emac_port + cpsw->data.dual_emac);
  760. if (unlikely(ret != 0)) {
  761. cpsw_err(priv, tx_err, "desc submit failed\n");
  762. goto fail;
  763. }
  764. /* If there is no more tx desc left free then we need to
  765. * tell the kernel to stop sending us tx frames.
  766. */
  767. if (unlikely(!cpdma_check_free_tx_desc(txch))) {
  768. netif_tx_stop_queue(txq);
  769. /* Barrier, so that stop_queue visible to other cpus */
  770. smp_mb__after_atomic();
  771. if (cpdma_check_free_tx_desc(txch))
  772. netif_tx_wake_queue(txq);
  773. }
  774. return NETDEV_TX_OK;
  775. fail:
  776. ndev->stats.tx_dropped++;
  777. netif_tx_stop_queue(txq);
  778. /* Barrier, so that stop_queue visible to other cpus */
  779. smp_mb__after_atomic();
  780. if (cpdma_check_free_tx_desc(txch))
  781. netif_tx_wake_queue(txq);
  782. return NETDEV_TX_BUSY;
  783. }
  784. static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
  785. {
  786. struct cpsw_priv *priv = netdev_priv(ndev);
  787. struct sockaddr *addr = (struct sockaddr *)p;
  788. struct cpsw_common *cpsw = priv->cpsw;
  789. int flags = 0;
  790. u16 vid = 0;
  791. int ret;
  792. if (!is_valid_ether_addr(addr->sa_data))
  793. return -EADDRNOTAVAIL;
  794. ret = pm_runtime_resume_and_get(cpsw->dev);
  795. if (ret < 0)
  796. return ret;
  797. if (cpsw->data.dual_emac) {
  798. vid = cpsw->slaves[priv->emac_port].port_vlan;
  799. flags = ALE_VLAN;
  800. }
  801. cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
  802. flags, vid);
  803. cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
  804. flags, vid);
  805. memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
  806. eth_hw_addr_set(ndev, priv->mac_addr);
  807. for_each_slave(priv, cpsw_set_slave_mac, priv);
  808. pm_runtime_put(cpsw->dev);
  809. return 0;
  810. }
  811. static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
  812. unsigned short vid)
  813. {
  814. int ret;
  815. int unreg_mcast_mask = 0;
  816. int mcast_mask;
  817. u32 port_mask;
  818. struct cpsw_common *cpsw = priv->cpsw;
  819. if (cpsw->data.dual_emac) {
  820. port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
  821. mcast_mask = ALE_PORT_HOST;
  822. if (priv->ndev->flags & IFF_ALLMULTI)
  823. unreg_mcast_mask = mcast_mask;
  824. } else {
  825. port_mask = ALE_ALL_PORTS;
  826. mcast_mask = port_mask;
  827. if (priv->ndev->flags & IFF_ALLMULTI)
  828. unreg_mcast_mask = ALE_ALL_PORTS;
  829. else
  830. unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
  831. }
  832. ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
  833. unreg_mcast_mask);
  834. if (ret != 0)
  835. return ret;
  836. ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
  837. HOST_PORT_NUM, ALE_VLAN, vid);
  838. if (ret != 0)
  839. goto clean_vid;
  840. ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
  841. mcast_mask, ALE_VLAN, vid, 0);
  842. if (ret != 0)
  843. goto clean_vlan_ucast;
  844. return 0;
  845. clean_vlan_ucast:
  846. cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
  847. HOST_PORT_NUM, ALE_VLAN, vid);
  848. clean_vid:
  849. cpsw_ale_del_vlan(cpsw->ale, vid, 0);
  850. return ret;
  851. }
  852. static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
  853. __be16 proto, u16 vid)
  854. {
  855. struct cpsw_priv *priv = netdev_priv(ndev);
  856. struct cpsw_common *cpsw = priv->cpsw;
  857. int ret;
  858. if (vid == cpsw->data.default_vlan)
  859. return 0;
  860. ret = pm_runtime_resume_and_get(cpsw->dev);
  861. if (ret < 0)
  862. return ret;
  863. if (cpsw->data.dual_emac) {
  864. /* In dual EMAC, reserved VLAN id should not be used for
  865. * creating VLAN interfaces as this can break the dual
  866. * EMAC port separation
  867. */
  868. int i;
  869. for (i = 0; i < cpsw->data.slaves; i++) {
  870. if (vid == cpsw->slaves[i].port_vlan) {
  871. ret = -EINVAL;
  872. goto err;
  873. }
  874. }
  875. }
  876. dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
  877. ret = cpsw_add_vlan_ale_entry(priv, vid);
  878. err:
  879. pm_runtime_put(cpsw->dev);
  880. return ret;
  881. }
  882. static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
  883. __be16 proto, u16 vid)
  884. {
  885. struct cpsw_priv *priv = netdev_priv(ndev);
  886. struct cpsw_common *cpsw = priv->cpsw;
  887. int ret;
  888. if (vid == cpsw->data.default_vlan)
  889. return 0;
  890. ret = pm_runtime_resume_and_get(cpsw->dev);
  891. if (ret < 0)
  892. return ret;
  893. if (cpsw->data.dual_emac) {
  894. int i;
  895. for (i = 0; i < cpsw->data.slaves; i++) {
  896. if (vid == cpsw->slaves[i].port_vlan)
  897. goto err;
  898. }
  899. }
  900. dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
  901. ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
  902. ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
  903. HOST_PORT_NUM, ALE_VLAN, vid);
  904. ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
  905. 0, ALE_VLAN, vid);
  906. ret |= cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
  907. err:
  908. pm_runtime_put(cpsw->dev);
  909. return ret;
  910. }
  911. static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
  912. struct xdp_frame **frames, u32 flags)
  913. {
  914. struct cpsw_priv *priv = netdev_priv(ndev);
  915. struct cpsw_common *cpsw = priv->cpsw;
  916. struct xdp_frame *xdpf;
  917. int i, nxmit = 0, port;
  918. if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
  919. return -EINVAL;
  920. for (i = 0; i < n; i++) {
  921. xdpf = frames[i];
  922. if (xdpf->len < CPSW_MIN_PACKET_SIZE)
  923. break;
  924. port = priv->emac_port + cpsw->data.dual_emac;
  925. if (cpsw_xdp_tx_frame(priv, xdpf, NULL, port))
  926. break;
  927. nxmit++;
  928. }
  929. return nxmit;
  930. }
  931. #ifdef CONFIG_NET_POLL_CONTROLLER
  932. static void cpsw_ndo_poll_controller(struct net_device *ndev)
  933. {
  934. struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
  935. cpsw_intr_disable(cpsw);
  936. cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
  937. cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
  938. cpsw_intr_enable(cpsw);
  939. }
  940. #endif
  941. static const struct net_device_ops cpsw_netdev_ops = {
  942. .ndo_open = cpsw_ndo_open,
  943. .ndo_stop = cpsw_ndo_stop,
  944. .ndo_start_xmit = cpsw_ndo_start_xmit,
  945. .ndo_set_mac_address = cpsw_ndo_set_mac_address,
  946. .ndo_eth_ioctl = cpsw_ndo_ioctl,
  947. .ndo_validate_addr = eth_validate_addr,
  948. .ndo_tx_timeout = cpsw_ndo_tx_timeout,
  949. .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
  950. .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
  951. #ifdef CONFIG_NET_POLL_CONTROLLER
  952. .ndo_poll_controller = cpsw_ndo_poll_controller,
  953. #endif
  954. .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
  955. .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
  956. .ndo_setup_tc = cpsw_ndo_setup_tc,
  957. .ndo_bpf = cpsw_ndo_bpf,
  958. .ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
  959. };
  960. static void cpsw_get_drvinfo(struct net_device *ndev,
  961. struct ethtool_drvinfo *info)
  962. {
  963. struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
  964. struct platform_device *pdev = to_platform_device(cpsw->dev);
  965. strscpy(info->driver, "cpsw", sizeof(info->driver));
  966. strscpy(info->version, "1.0", sizeof(info->version));
  967. strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
  968. }
  969. static int cpsw_set_pauseparam(struct net_device *ndev,
  970. struct ethtool_pauseparam *pause)
  971. {
  972. struct cpsw_priv *priv = netdev_priv(ndev);
  973. bool link;
  974. priv->rx_pause = pause->rx_pause ? true : false;
  975. priv->tx_pause = pause->tx_pause ? true : false;
  976. for_each_slave(priv, _cpsw_adjust_link, priv, &link);
  977. return 0;
  978. }
  979. static int cpsw_set_channels(struct net_device *ndev,
  980. struct ethtool_channels *chs)
  981. {
  982. return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
  983. }
  984. static const struct ethtool_ops cpsw_ethtool_ops = {
  985. .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
  986. .get_drvinfo = cpsw_get_drvinfo,
  987. .get_msglevel = cpsw_get_msglevel,
  988. .set_msglevel = cpsw_set_msglevel,
  989. .get_link = ethtool_op_get_link,
  990. .get_ts_info = cpsw_get_ts_info,
  991. .get_coalesce = cpsw_get_coalesce,
  992. .set_coalesce = cpsw_set_coalesce,
  993. .get_sset_count = cpsw_get_sset_count,
  994. .get_strings = cpsw_get_strings,
  995. .get_ethtool_stats = cpsw_get_ethtool_stats,
  996. .get_pauseparam = cpsw_get_pauseparam,
  997. .set_pauseparam = cpsw_set_pauseparam,
  998. .get_wol = cpsw_get_wol,
  999. .set_wol = cpsw_set_wol,
  1000. .get_regs_len = cpsw_get_regs_len,
  1001. .get_regs = cpsw_get_regs,
  1002. .begin = cpsw_ethtool_op_begin,
  1003. .complete = cpsw_ethtool_op_complete,
  1004. .get_channels = cpsw_get_channels,
  1005. .set_channels = cpsw_set_channels,
  1006. .get_link_ksettings = cpsw_get_link_ksettings,
  1007. .set_link_ksettings = cpsw_set_link_ksettings,
  1008. .get_eee = cpsw_get_eee,
  1009. .set_eee = cpsw_set_eee,
  1010. .nway_reset = cpsw_nway_reset,
  1011. .get_ringparam = cpsw_get_ringparam,
  1012. .set_ringparam = cpsw_set_ringparam,
  1013. };
  1014. static int cpsw_probe_dt(struct cpsw_platform_data *data,
  1015. struct platform_device *pdev)
  1016. {
  1017. struct device_node *node = pdev->dev.of_node;
  1018. struct device_node *slave_node;
  1019. int i = 0, ret;
  1020. u32 prop;
  1021. if (!node)
  1022. return -EINVAL;
  1023. if (of_property_read_u32(node, "slaves", &prop)) {
  1024. dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
  1025. return -EINVAL;
  1026. }
  1027. data->slaves = prop;
  1028. if (of_property_read_u32(node, "active_slave", &prop)) {
  1029. dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
  1030. return -EINVAL;
  1031. }
  1032. data->active_slave = prop;
  1033. data->slave_data = devm_kcalloc(&pdev->dev,
  1034. data->slaves,
  1035. sizeof(struct cpsw_slave_data),
  1036. GFP_KERNEL);
  1037. if (!data->slave_data)
  1038. return -ENOMEM;
  1039. if (of_property_read_u32(node, "cpdma_channels", &prop)) {
  1040. dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
  1041. return -EINVAL;
  1042. }
  1043. data->channels = prop;
  1044. if (of_property_read_u32(node, "bd_ram_size", &prop)) {
  1045. dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
  1046. return -EINVAL;
  1047. }
  1048. data->bd_ram_size = prop;
  1049. if (of_property_read_u32(node, "mac_control", &prop)) {
  1050. dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
  1051. return -EINVAL;
  1052. }
  1053. data->mac_control = prop;
  1054. if (of_property_read_bool(node, "dual_emac"))
  1055. data->dual_emac = true;
  1056. /*
  1057. * Populate all the child nodes here...
  1058. */
  1059. ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
  1060. /* We do not want to force this, as in some cases may not have child */
  1061. if (ret)
  1062. dev_warn(&pdev->dev, "Doesn't have any child node\n");
  1063. for_each_available_child_of_node(node, slave_node) {
  1064. struct cpsw_slave_data *slave_data = data->slave_data + i;
  1065. int lenp;
  1066. const __be32 *parp;
  1067. /* This is no slave child node, continue */
  1068. if (!of_node_name_eq(slave_node, "slave"))
  1069. continue;
  1070. slave_data->ifphy = devm_of_phy_get(&pdev->dev, slave_node,
  1071. NULL);
  1072. if (!IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) &&
  1073. IS_ERR(slave_data->ifphy)) {
  1074. ret = PTR_ERR(slave_data->ifphy);
  1075. dev_err(&pdev->dev,
  1076. "%d: Error retrieving port phy: %d\n", i, ret);
  1077. goto err_node_put;
  1078. }
  1079. slave_data->slave_node = slave_node;
  1080. slave_data->phy_node = of_parse_phandle(slave_node,
  1081. "phy-handle", 0);
  1082. parp = of_get_property(slave_node, "phy_id", &lenp);
  1083. if (slave_data->phy_node) {
  1084. dev_dbg(&pdev->dev,
  1085. "slave[%d] using phy-handle=\"%pOF\"\n",
  1086. i, slave_data->phy_node);
  1087. } else if (of_phy_is_fixed_link(slave_node)) {
  1088. /* In the case of a fixed PHY, the DT node associated
  1089. * to the PHY is the Ethernet MAC DT node.
  1090. */
  1091. ret = of_phy_register_fixed_link(slave_node);
  1092. if (ret) {
  1093. dev_err_probe(&pdev->dev, ret, "failed to register fixed-link phy\n");
  1094. goto err_node_put;
  1095. }
  1096. slave_data->phy_node = of_node_get(slave_node);
  1097. } else if (parp) {
  1098. u32 phyid;
  1099. struct device_node *mdio_node;
  1100. struct platform_device *mdio;
  1101. if (lenp != (sizeof(__be32) * 2)) {
  1102. dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
  1103. goto no_phy_slave;
  1104. }
  1105. mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
  1106. phyid = be32_to_cpup(parp+1);
  1107. mdio = of_find_device_by_node(mdio_node);
  1108. of_node_put(mdio_node);
  1109. if (!mdio) {
  1110. dev_err(&pdev->dev, "Missing mdio platform device\n");
  1111. ret = -EINVAL;
  1112. goto err_node_put;
  1113. }
  1114. snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
  1115. PHY_ID_FMT, mdio->name, phyid);
  1116. put_device(&mdio->dev);
  1117. } else {
  1118. dev_err(&pdev->dev,
  1119. "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
  1120. i);
  1121. goto no_phy_slave;
  1122. }
  1123. ret = of_get_phy_mode(slave_node, &slave_data->phy_if);
  1124. if (ret) {
  1125. dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
  1126. i);
  1127. goto err_node_put;
  1128. }
  1129. no_phy_slave:
  1130. ret = of_get_mac_address(slave_node, slave_data->mac_addr);
  1131. if (ret) {
  1132. ret = ti_cm_get_macid(&pdev->dev, i,
  1133. slave_data->mac_addr);
  1134. if (ret)
  1135. goto err_node_put;
  1136. }
  1137. if (data->dual_emac) {
  1138. if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
  1139. &prop)) {
  1140. dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
  1141. slave_data->dual_emac_res_vlan = i+1;
  1142. dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
  1143. slave_data->dual_emac_res_vlan, i);
  1144. } else {
  1145. slave_data->dual_emac_res_vlan = prop;
  1146. }
  1147. }
  1148. i++;
  1149. if (i == data->slaves) {
  1150. ret = 0;
  1151. goto err_node_put;
  1152. }
  1153. }
  1154. return 0;
  1155. err_node_put:
  1156. of_node_put(slave_node);
  1157. return ret;
  1158. }
  1159. static void cpsw_remove_dt(struct platform_device *pdev)
  1160. {
  1161. struct cpsw_common *cpsw = platform_get_drvdata(pdev);
  1162. struct cpsw_platform_data *data = &cpsw->data;
  1163. struct device_node *node = pdev->dev.of_node;
  1164. struct device_node *slave_node;
  1165. int i = 0;
  1166. for_each_available_child_of_node(node, slave_node) {
  1167. struct cpsw_slave_data *slave_data = &data->slave_data[i];
  1168. if (!of_node_name_eq(slave_node, "slave"))
  1169. continue;
  1170. if (of_phy_is_fixed_link(slave_node))
  1171. of_phy_deregister_fixed_link(slave_node);
  1172. of_node_put(slave_data->phy_node);
  1173. i++;
  1174. if (i == data->slaves) {
  1175. of_node_put(slave_node);
  1176. break;
  1177. }
  1178. }
  1179. of_platform_depopulate(&pdev->dev);
  1180. }
  1181. static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
  1182. {
  1183. struct cpsw_common *cpsw = priv->cpsw;
  1184. struct cpsw_platform_data *data = &cpsw->data;
  1185. struct net_device *ndev;
  1186. struct cpsw_priv *priv_sl2;
  1187. int ret = 0;
  1188. ndev = devm_alloc_etherdev_mqs(cpsw->dev, sizeof(struct cpsw_priv),
  1189. CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
  1190. if (!ndev) {
  1191. dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
  1192. return -ENOMEM;
  1193. }
  1194. priv_sl2 = netdev_priv(ndev);
  1195. priv_sl2->cpsw = cpsw;
  1196. priv_sl2->ndev = ndev;
  1197. priv_sl2->dev = &ndev->dev;
  1198. priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
  1199. if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
  1200. memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
  1201. ETH_ALEN);
  1202. dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
  1203. priv_sl2->mac_addr);
  1204. } else {
  1205. eth_random_addr(priv_sl2->mac_addr);
  1206. dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
  1207. priv_sl2->mac_addr);
  1208. }
  1209. eth_hw_addr_set(ndev, priv_sl2->mac_addr);
  1210. priv_sl2->emac_port = 1;
  1211. cpsw->slaves[1].ndev = ndev;
  1212. ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
  1213. ndev->netdev_ops = &cpsw_netdev_ops;
  1214. ndev->ethtool_ops = &cpsw_ethtool_ops;
  1215. /* register the network device */
  1216. SET_NETDEV_DEV(ndev, cpsw->dev);
  1217. ndev->dev.of_node = cpsw->slaves[1].data->slave_node;
  1218. ret = register_netdev(ndev);
  1219. if (ret)
  1220. dev_err(cpsw->dev, "cpsw: error registering net device\n");
  1221. return ret;
  1222. }
  1223. static const struct of_device_id cpsw_of_mtable[] = {
  1224. { .compatible = "ti,cpsw"},
  1225. { .compatible = "ti,am335x-cpsw"},
  1226. { .compatible = "ti,am4372-cpsw"},
  1227. { .compatible = "ti,dra7-cpsw"},
  1228. { /* sentinel */ },
  1229. };
  1230. MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
  1231. static const struct soc_device_attribute cpsw_soc_devices[] = {
  1232. { .family = "AM33xx", .revision = "ES1.0"},
  1233. { /* sentinel */ }
  1234. };
  1235. static int cpsw_probe(struct platform_device *pdev)
  1236. {
  1237. struct device *dev = &pdev->dev;
  1238. struct clk *clk;
  1239. struct cpsw_platform_data *data;
  1240. struct net_device *ndev;
  1241. struct cpsw_priv *priv;
  1242. void __iomem *ss_regs;
  1243. struct resource *ss_res;
  1244. struct gpio_descs *mode;
  1245. const struct soc_device_attribute *soc;
  1246. struct cpsw_common *cpsw;
  1247. int ret = 0, ch;
  1248. int irq;
  1249. cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
  1250. if (!cpsw)
  1251. return -ENOMEM;
  1252. platform_set_drvdata(pdev, cpsw);
  1253. cpsw_slave_index = cpsw_slave_index_priv;
  1254. cpsw->dev = dev;
  1255. mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
  1256. if (IS_ERR(mode)) {
  1257. ret = PTR_ERR(mode);
  1258. dev_err(dev, "gpio request failed, ret %d\n", ret);
  1259. return ret;
  1260. }
  1261. clk = devm_clk_get(dev, "fck");
  1262. if (IS_ERR(clk)) {
  1263. ret = PTR_ERR(clk);
  1264. dev_err(dev, "fck is not found %d\n", ret);
  1265. return ret;
  1266. }
  1267. cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
  1268. ss_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ss_res);
  1269. if (IS_ERR(ss_regs))
  1270. return PTR_ERR(ss_regs);
  1271. cpsw->regs = ss_regs;
  1272. cpsw->wr_regs = devm_platform_ioremap_resource(pdev, 1);
  1273. if (IS_ERR(cpsw->wr_regs))
  1274. return PTR_ERR(cpsw->wr_regs);
  1275. /* RX IRQ */
  1276. irq = platform_get_irq(pdev, 1);
  1277. if (irq < 0)
  1278. return irq;
  1279. cpsw->irqs_table[0] = irq;
  1280. /* TX IRQ */
  1281. irq = platform_get_irq(pdev, 2);
  1282. if (irq < 0)
  1283. return irq;
  1284. cpsw->irqs_table[1] = irq;
  1285. /* get misc irq*/
  1286. irq = platform_get_irq(pdev, 3);
  1287. if (irq <= 0)
  1288. return irq;
  1289. cpsw->misc_irq = irq;
  1290. /*
  1291. * This may be required here for child devices.
  1292. */
  1293. pm_runtime_enable(dev);
  1294. /* Need to enable clocks with runtime PM api to access module
  1295. * registers
  1296. */
  1297. ret = pm_runtime_resume_and_get(dev);
  1298. if (ret < 0)
  1299. goto clean_runtime_disable_ret;
  1300. ret = cpsw_probe_dt(&cpsw->data, pdev);
  1301. if (ret)
  1302. goto clean_dt_ret;
  1303. soc = soc_device_match(cpsw_soc_devices);
  1304. if (soc)
  1305. cpsw->quirk_irq = true;
  1306. data = &cpsw->data;
  1307. cpsw->slaves = devm_kcalloc(dev,
  1308. data->slaves, sizeof(struct cpsw_slave),
  1309. GFP_KERNEL);
  1310. if (!cpsw->slaves) {
  1311. ret = -ENOMEM;
  1312. goto clean_dt_ret;
  1313. }
  1314. cpsw->rx_packet_max = max(rx_packet_max, CPSW_MAX_PACKET_SIZE);
  1315. cpsw->descs_pool_size = descs_pool_size;
  1316. ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
  1317. ss_res->start + CPSW2_BD_OFFSET,
  1318. descs_pool_size);
  1319. if (ret)
  1320. goto clean_dt_ret;
  1321. ch = cpsw->quirk_irq ? 0 : 7;
  1322. cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
  1323. if (IS_ERR(cpsw->txv[0].ch)) {
  1324. dev_err(dev, "error initializing tx dma channel\n");
  1325. ret = PTR_ERR(cpsw->txv[0].ch);
  1326. goto clean_cpts;
  1327. }
  1328. cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
  1329. if (IS_ERR(cpsw->rxv[0].ch)) {
  1330. dev_err(dev, "error initializing rx dma channel\n");
  1331. ret = PTR_ERR(cpsw->rxv[0].ch);
  1332. goto clean_cpts;
  1333. }
  1334. cpsw_split_res(cpsw);
  1335. /* setup netdev */
  1336. ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
  1337. CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
  1338. if (!ndev) {
  1339. dev_err(dev, "error allocating net_device\n");
  1340. ret = -ENOMEM;
  1341. goto clean_cpts;
  1342. }
  1343. priv = netdev_priv(ndev);
  1344. priv->cpsw = cpsw;
  1345. priv->ndev = ndev;
  1346. priv->dev = dev;
  1347. priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
  1348. priv->emac_port = 0;
  1349. if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
  1350. memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
  1351. dev_info(dev, "Detected MACID = %pM\n", priv->mac_addr);
  1352. } else {
  1353. eth_random_addr(priv->mac_addr);
  1354. dev_info(dev, "Random MACID = %pM\n", priv->mac_addr);
  1355. }
  1356. eth_hw_addr_set(ndev, priv->mac_addr);
  1357. cpsw->slaves[0].ndev = ndev;
  1358. ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
  1359. ndev->netdev_ops = &cpsw_netdev_ops;
  1360. ndev->ethtool_ops = &cpsw_ethtool_ops;
  1361. netif_napi_add(ndev, &cpsw->napi_rx,
  1362. cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll);
  1363. netif_napi_add_tx(ndev, &cpsw->napi_tx,
  1364. cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll);
  1365. /* register the network device */
  1366. SET_NETDEV_DEV(ndev, dev);
  1367. ndev->dev.of_node = cpsw->slaves[0].data->slave_node;
  1368. ret = register_netdev(ndev);
  1369. if (ret) {
  1370. dev_err(dev, "error registering net device\n");
  1371. ret = -ENODEV;
  1372. goto clean_cpts;
  1373. }
  1374. if (cpsw->data.dual_emac) {
  1375. ret = cpsw_probe_dual_emac(priv);
  1376. if (ret) {
  1377. cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
  1378. goto clean_unregister_netdev_ret;
  1379. }
  1380. }
  1381. /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
  1382. * MISC IRQs which are always kept disabled with this driver so
  1383. * we will not request them.
  1384. *
  1385. * If anyone wants to implement support for those, make sure to
  1386. * first request and append them to irqs_table array.
  1387. */
  1388. ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
  1389. 0, dev_name(dev), cpsw);
  1390. if (ret < 0) {
  1391. dev_err(dev, "error attaching irq (%d)\n", ret);
  1392. goto clean_unregister_netdev_ret;
  1393. }
  1394. ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
  1395. 0, dev_name(&pdev->dev), cpsw);
  1396. if (ret < 0) {
  1397. dev_err(dev, "error attaching irq (%d)\n", ret);
  1398. goto clean_unregister_netdev_ret;
  1399. }
  1400. if (!cpsw->cpts)
  1401. goto skip_cpts;
  1402. ret = devm_request_irq(&pdev->dev, cpsw->misc_irq, cpsw_misc_interrupt,
  1403. 0, dev_name(&pdev->dev), cpsw);
  1404. if (ret < 0) {
  1405. dev_err(dev, "error attaching misc irq (%d)\n", ret);
  1406. goto clean_unregister_netdev_ret;
  1407. }
  1408. /* Enable misc CPTS evnt_pend IRQ */
  1409. cpts_set_irqpoll(cpsw->cpts, false);
  1410. skip_cpts:
  1411. cpsw_notice(priv, probe,
  1412. "initialized device (regs %pa, irq %d, pool size %d)\n",
  1413. &ss_res->start, cpsw->irqs_table[0], descs_pool_size);
  1414. pm_runtime_put(&pdev->dev);
  1415. return 0;
  1416. clean_unregister_netdev_ret:
  1417. unregister_netdev(ndev);
  1418. clean_cpts:
  1419. cpts_release(cpsw->cpts);
  1420. cpdma_ctlr_destroy(cpsw->dma);
  1421. clean_dt_ret:
  1422. cpsw_remove_dt(pdev);
  1423. pm_runtime_put_sync(&pdev->dev);
  1424. clean_runtime_disable_ret:
  1425. pm_runtime_disable(&pdev->dev);
  1426. return ret;
  1427. }
  1428. static int cpsw_remove(struct platform_device *pdev)
  1429. {
  1430. struct cpsw_common *cpsw = platform_get_drvdata(pdev);
  1431. int i, ret;
  1432. ret = pm_runtime_resume_and_get(&pdev->dev);
  1433. if (ret < 0)
  1434. return ret;
  1435. for (i = 0; i < cpsw->data.slaves; i++)
  1436. if (cpsw->slaves[i].ndev)
  1437. unregister_netdev(cpsw->slaves[i].ndev);
  1438. cpts_release(cpsw->cpts);
  1439. cpdma_ctlr_destroy(cpsw->dma);
  1440. cpsw_remove_dt(pdev);
  1441. pm_runtime_put_sync(&pdev->dev);
  1442. pm_runtime_disable(&pdev->dev);
  1443. return 0;
  1444. }
  1445. #ifdef CONFIG_PM_SLEEP
  1446. static int cpsw_suspend(struct device *dev)
  1447. {
  1448. struct cpsw_common *cpsw = dev_get_drvdata(dev);
  1449. int i;
  1450. rtnl_lock();
  1451. for (i = 0; i < cpsw->data.slaves; i++)
  1452. if (cpsw->slaves[i].ndev)
  1453. if (netif_running(cpsw->slaves[i].ndev))
  1454. cpsw_ndo_stop(cpsw->slaves[i].ndev);
  1455. rtnl_unlock();
  1456. /* Select sleep pin state */
  1457. pinctrl_pm_select_sleep_state(dev);
  1458. return 0;
  1459. }
  1460. static int cpsw_resume(struct device *dev)
  1461. {
  1462. struct cpsw_common *cpsw = dev_get_drvdata(dev);
  1463. int i;
  1464. /* Select default pin state */
  1465. pinctrl_pm_select_default_state(dev);
  1466. /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
  1467. rtnl_lock();
  1468. for (i = 0; i < cpsw->data.slaves; i++)
  1469. if (cpsw->slaves[i].ndev)
  1470. if (netif_running(cpsw->slaves[i].ndev))
  1471. cpsw_ndo_open(cpsw->slaves[i].ndev);
  1472. rtnl_unlock();
  1473. return 0;
  1474. }
  1475. #endif
  1476. static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
  1477. static struct platform_driver cpsw_driver = {
  1478. .driver = {
  1479. .name = "cpsw",
  1480. .pm = &cpsw_pm_ops,
  1481. .of_match_table = cpsw_of_mtable,
  1482. },
  1483. .probe = cpsw_probe,
  1484. .remove = cpsw_remove,
  1485. };
  1486. module_platform_driver(cpsw_driver);
  1487. MODULE_LICENSE("GPL");
  1488. MODULE_AUTHOR("Cyril Chemparathy <[email protected]>");
  1489. MODULE_AUTHOR("Mugunthan V N <[email protected]>");
  1490. MODULE_DESCRIPTION("TI CPSW Ethernet driver");