netsec.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224
  1. // SPDX-License-Identifier: GPL-2.0+
  2. #include <linux/types.h>
  3. #include <linux/clk.h>
  4. #include <linux/platform_device.h>
  5. #include <linux/pm_runtime.h>
  6. #include <linux/acpi.h>
  7. #include <linux/of_mdio.h>
  8. #include <linux/of_net.h>
  9. #include <linux/etherdevice.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/netlink.h>
  13. #include <linux/bpf.h>
  14. #include <linux/bpf_trace.h>
  15. #include <net/tcp.h>
  16. #include <net/page_pool.h>
  17. #include <net/ip6_checksum.h>
  18. #define NETSEC_REG_SOFT_RST 0x104
  19. #define NETSEC_REG_COM_INIT 0x120
  20. #define NETSEC_REG_TOP_STATUS 0x200
  21. #define NETSEC_IRQ_RX BIT(1)
  22. #define NETSEC_IRQ_TX BIT(0)
  23. #define NETSEC_REG_TOP_INTEN 0x204
  24. #define NETSEC_REG_INTEN_SET 0x234
  25. #define NETSEC_REG_INTEN_CLR 0x238
  26. #define NETSEC_REG_NRM_TX_STATUS 0x400
  27. #define NETSEC_REG_NRM_TX_INTEN 0x404
  28. #define NETSEC_REG_NRM_TX_INTEN_SET 0x428
  29. #define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c
  30. #define NRM_TX_ST_NTOWNR BIT(17)
  31. #define NRM_TX_ST_TR_ERR BIT(16)
  32. #define NRM_TX_ST_TXDONE BIT(15)
  33. #define NRM_TX_ST_TMREXP BIT(14)
  34. #define NETSEC_REG_NRM_RX_STATUS 0x440
  35. #define NETSEC_REG_NRM_RX_INTEN 0x444
  36. #define NETSEC_REG_NRM_RX_INTEN_SET 0x468
  37. #define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c
  38. #define NRM_RX_ST_RC_ERR BIT(16)
  39. #define NRM_RX_ST_PKTCNT BIT(15)
  40. #define NRM_RX_ST_TMREXP BIT(14)
  41. #define NETSEC_REG_PKT_CMD_BUF 0xd0
  42. #define NETSEC_REG_CLK_EN 0x100
  43. #define NETSEC_REG_PKT_CTRL 0x140
  44. #define NETSEC_REG_DMA_TMR_CTRL 0x20c
  45. #define NETSEC_REG_F_TAIKI_MC_VER 0x22c
  46. #define NETSEC_REG_F_TAIKI_VER 0x230
  47. #define NETSEC_REG_DMA_HM_CTRL 0x214
  48. #define NETSEC_REG_DMA_MH_CTRL 0x220
  49. #define NETSEC_REG_ADDR_DIS_CORE 0x218
  50. #define NETSEC_REG_DMAC_HM_CMD_BUF 0x210
  51. #define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c
  52. #define NETSEC_REG_NRM_TX_PKTCNT 0x410
  53. #define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414
  54. #define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418
  55. #define NETSEC_REG_NRM_TX_TMR 0x41c
  56. #define NETSEC_REG_NRM_RX_PKTCNT 0x454
  57. #define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458
  58. #define NETSEC_REG_NRM_TX_TXINT_TMR 0x420
  59. #define NETSEC_REG_NRM_RX_RXINT_TMR 0x460
  60. #define NETSEC_REG_NRM_RX_TMR 0x45c
  61. #define NETSEC_REG_NRM_TX_DESC_START_UP 0x434
  62. #define NETSEC_REG_NRM_TX_DESC_START_LW 0x408
  63. #define NETSEC_REG_NRM_RX_DESC_START_UP 0x474
  64. #define NETSEC_REG_NRM_RX_DESC_START_LW 0x448
  65. #define NETSEC_REG_NRM_TX_CONFIG 0x430
  66. #define NETSEC_REG_NRM_RX_CONFIG 0x470
  67. #define MAC_REG_STATUS 0x1024
  68. #define MAC_REG_DATA 0x11c0
  69. #define MAC_REG_CMD 0x11c4
  70. #define MAC_REG_FLOW_TH 0x11cc
  71. #define MAC_REG_INTF_SEL 0x11d4
  72. #define MAC_REG_DESC_INIT 0x11fc
  73. #define MAC_REG_DESC_SOFT_RST 0x1204
  74. #define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500
  75. #define GMAC_REG_MCR 0x0000
  76. #define GMAC_REG_MFFR 0x0004
  77. #define GMAC_REG_GAR 0x0010
  78. #define GMAC_REG_GDR 0x0014
  79. #define GMAC_REG_FCR 0x0018
  80. #define GMAC_REG_BMR 0x1000
  81. #define GMAC_REG_RDLAR 0x100c
  82. #define GMAC_REG_TDLAR 0x1010
  83. #define GMAC_REG_OMR 0x1018
  84. #define MHZ(n) ((n) * 1000 * 1000)
  85. #define NETSEC_TX_SHIFT_OWN_FIELD 31
  86. #define NETSEC_TX_SHIFT_LD_FIELD 30
  87. #define NETSEC_TX_SHIFT_DRID_FIELD 24
  88. #define NETSEC_TX_SHIFT_PT_FIELD 21
  89. #define NETSEC_TX_SHIFT_TDRID_FIELD 16
  90. #define NETSEC_TX_SHIFT_CC_FIELD 15
  91. #define NETSEC_TX_SHIFT_FS_FIELD 9
  92. #define NETSEC_TX_LAST 8
  93. #define NETSEC_TX_SHIFT_CO 7
  94. #define NETSEC_TX_SHIFT_SO 6
  95. #define NETSEC_TX_SHIFT_TRS_FIELD 4
  96. #define NETSEC_RX_PKT_OWN_FIELD 31
  97. #define NETSEC_RX_PKT_LD_FIELD 30
  98. #define NETSEC_RX_PKT_SDRID_FIELD 24
  99. #define NETSEC_RX_PKT_FR_FIELD 23
  100. #define NETSEC_RX_PKT_ER_FIELD 21
  101. #define NETSEC_RX_PKT_ERR_FIELD 16
  102. #define NETSEC_RX_PKT_TDRID_FIELD 12
  103. #define NETSEC_RX_PKT_FS_FIELD 9
  104. #define NETSEC_RX_PKT_LS_FIELD 8
  105. #define NETSEC_RX_PKT_CO_FIELD 6
  106. #define NETSEC_RX_PKT_ERR_MASK 3
  107. #define NETSEC_MAX_TX_PKT_LEN 1518
  108. #define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018
  109. #define NETSEC_RING_GMAC 15
  110. #define NETSEC_RING_MAX 2
  111. #define NETSEC_TCP_SEG_LEN_MAX 1460
  112. #define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960
  113. #define NETSEC_RX_CKSUM_NOTAVAIL 0
  114. #define NETSEC_RX_CKSUM_OK 1
  115. #define NETSEC_RX_CKSUM_NG 2
  116. #define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20)
  117. #define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4)
  118. #define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20)
  119. #define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19)
  120. #define NETSEC_INT_PKTCNT_MAX 2047
  121. #define NETSEC_FLOW_START_TH_MAX 95
  122. #define NETSEC_FLOW_STOP_TH_MAX 95
  123. #define NETSEC_FLOW_PAUSE_TIME_MIN 5
  124. #define NETSEC_CLK_EN_REG_DOM_ALL 0x3f
  125. #define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28)
  126. #define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27)
  127. #define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3)
  128. #define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2)
  129. #define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1)
  130. #define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0)
  131. #define NETSEC_CLK_EN_REG_DOM_G BIT(5)
  132. #define NETSEC_CLK_EN_REG_DOM_C BIT(1)
  133. #define NETSEC_CLK_EN_REG_DOM_D BIT(0)
  134. #define NETSEC_COM_INIT_REG_DB BIT(2)
  135. #define NETSEC_COM_INIT_REG_CLS BIT(1)
  136. #define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \
  137. NETSEC_COM_INIT_REG_DB)
  138. #define NETSEC_SOFT_RST_REG_RESET 0
  139. #define NETSEC_SOFT_RST_REG_RUN BIT(31)
  140. #define NETSEC_DMA_CTRL_REG_STOP 1
  141. #define MH_CTRL__MODE_TRANS BIT(20)
  142. #define NETSEC_GMAC_CMD_ST_READ 0
  143. #define NETSEC_GMAC_CMD_ST_WRITE BIT(28)
  144. #define NETSEC_GMAC_CMD_ST_BUSY BIT(31)
  145. #define NETSEC_GMAC_BMR_REG_COMMON 0x00412080
  146. #define NETSEC_GMAC_BMR_REG_RESET 0x00020181
  147. #define NETSEC_GMAC_BMR_REG_SWR 0x00000001
  148. #define NETSEC_GMAC_OMR_REG_ST BIT(13)
  149. #define NETSEC_GMAC_OMR_REG_SR BIT(1)
  150. #define NETSEC_GMAC_MCR_REG_IBN BIT(30)
  151. #define NETSEC_GMAC_MCR_REG_CST BIT(25)
  152. #define NETSEC_GMAC_MCR_REG_JE BIT(20)
  153. #define NETSEC_MCR_PS BIT(15)
  154. #define NETSEC_GMAC_MCR_REG_FES BIT(14)
  155. #define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c
  156. #define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c
  157. #define NETSEC_FCR_RFE BIT(2)
  158. #define NETSEC_FCR_TFE BIT(1)
  159. #define NETSEC_GMAC_GAR_REG_GW BIT(1)
  160. #define NETSEC_GMAC_GAR_REG_GB BIT(0)
  161. #define NETSEC_GMAC_GAR_REG_SHIFT_PA 11
  162. #define NETSEC_GMAC_GAR_REG_SHIFT_GR 6
  163. #define GMAC_REG_SHIFT_CR_GAR 2
  164. #define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2
  165. #define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3
  166. #define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0
  167. #define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1
  168. #define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4
  169. #define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5
  170. #define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000
  171. #define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000
  172. #define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000
  173. #define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31)
  174. #define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30)
  175. #define NETSEC_REG_DESC_TMR_MODE 4
  176. #define NETSEC_REG_DESC_ENDIAN 0
  177. #define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1
  178. #define NETSEC_MAC_DESC_INIT_REG_INIT 1
  179. #define NETSEC_EEPROM_MAC_ADDRESS 0x00
  180. #define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08
  181. #define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C
  182. #define NETSEC_EEPROM_HM_ME_SIZE 0x10
  183. #define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14
  184. #define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18
  185. #define NETSEC_EEPROM_MH_ME_SIZE 0x1C
  186. #define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
  187. #define NETSEC_EEPROM_PKT_ME_SIZE 0x24
  188. #define DESC_NUM 256
  189. #define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
  190. #define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
  191. NET_IP_ALIGN)
  192. #define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
  193. SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  194. #define NETSEC_RX_BUF_SIZE (PAGE_SIZE - NETSEC_RX_BUF_NON_DATA)
  195. #define DESC_SZ sizeof(struct netsec_de)
  196. #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
  197. #define NETSEC_XDP_PASS 0
  198. #define NETSEC_XDP_CONSUMED BIT(0)
  199. #define NETSEC_XDP_TX BIT(1)
  200. #define NETSEC_XDP_REDIR BIT(2)
  201. enum ring_id {
  202. NETSEC_RING_TX = 0,
  203. NETSEC_RING_RX
  204. };
  205. enum buf_type {
  206. TYPE_NETSEC_SKB = 0,
  207. TYPE_NETSEC_XDP_TX,
  208. TYPE_NETSEC_XDP_NDO,
  209. };
  210. struct netsec_desc {
  211. union {
  212. struct sk_buff *skb;
  213. struct xdp_frame *xdpf;
  214. };
  215. dma_addr_t dma_addr;
  216. void *addr;
  217. u16 len;
  218. u8 buf_type;
  219. };
  220. struct netsec_desc_ring {
  221. dma_addr_t desc_dma;
  222. struct netsec_desc *desc;
  223. void *vaddr;
  224. u16 head, tail;
  225. u16 xdp_xmit; /* netsec_xdp_xmit packets */
  226. struct page_pool *page_pool;
  227. struct xdp_rxq_info xdp_rxq;
  228. spinlock_t lock; /* XDP tx queue locking */
  229. };
  230. struct netsec_priv {
  231. struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
  232. struct ethtool_coalesce et_coalesce;
  233. struct bpf_prog *xdp_prog;
  234. spinlock_t reglock; /* protect reg access */
  235. struct napi_struct napi;
  236. phy_interface_t phy_interface;
  237. struct net_device *ndev;
  238. struct device_node *phy_np;
  239. struct phy_device *phydev;
  240. struct mii_bus *mii_bus;
  241. void __iomem *ioaddr;
  242. void __iomem *eeprom_base;
  243. struct device *dev;
  244. struct clk *clk;
  245. u32 msg_enable;
  246. u32 freq;
  247. u32 phy_addr;
  248. bool rx_cksum_offload_flag;
  249. };
  250. struct netsec_de { /* Netsec Descriptor layout */
  251. u32 attr;
  252. u32 data_buf_addr_up;
  253. u32 data_buf_addr_lw;
  254. u32 buf_len_info;
  255. };
  256. struct netsec_tx_pkt_ctrl {
  257. u16 tcp_seg_len;
  258. bool tcp_seg_offload_flag;
  259. bool cksum_offload_flag;
  260. };
  261. struct netsec_rx_pkt_info {
  262. int rx_cksum_result;
  263. int err_code;
  264. bool err_flag;
  265. };
  266. static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
  267. {
  268. writel(val, priv->ioaddr + reg_addr);
  269. }
  270. static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
  271. {
  272. return readl(priv->ioaddr + reg_addr);
  273. }
  274. /************* MDIO BUS OPS FOLLOW *************/
  275. #define TIMEOUT_SPINS_MAC 1000
  276. #define TIMEOUT_SECONDARY_MS_MAC 100
  277. static u32 netsec_clk_type(u32 freq)
  278. {
  279. if (freq < MHZ(35))
  280. return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
  281. if (freq < MHZ(60))
  282. return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
  283. if (freq < MHZ(100))
  284. return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
  285. if (freq < MHZ(150))
  286. return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
  287. if (freq < MHZ(250))
  288. return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
  289. return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
  290. }
  291. static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
  292. {
  293. u32 timeout = TIMEOUT_SPINS_MAC;
  294. while (--timeout && netsec_read(priv, addr) & mask)
  295. cpu_relax();
  296. if (timeout)
  297. return 0;
  298. timeout = TIMEOUT_SECONDARY_MS_MAC;
  299. while (--timeout && netsec_read(priv, addr) & mask)
  300. usleep_range(1000, 2000);
  301. if (timeout)
  302. return 0;
  303. netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
  304. return -ETIMEDOUT;
  305. }
  306. static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
  307. {
  308. netsec_write(priv, MAC_REG_DATA, value);
  309. netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
  310. return netsec_wait_while_busy(priv,
  311. MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
  312. }
  313. static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
  314. {
  315. int ret;
  316. netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
  317. ret = netsec_wait_while_busy(priv,
  318. MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
  319. if (ret)
  320. return ret;
  321. *read = netsec_read(priv, MAC_REG_DATA);
  322. return 0;
  323. }
  324. static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
  325. u32 addr, u32 mask)
  326. {
  327. u32 timeout = TIMEOUT_SPINS_MAC;
  328. int ret, data;
  329. do {
  330. ret = netsec_mac_read(priv, addr, &data);
  331. if (ret)
  332. break;
  333. cpu_relax();
  334. } while (--timeout && (data & mask));
  335. if (timeout)
  336. return 0;
  337. timeout = TIMEOUT_SECONDARY_MS_MAC;
  338. do {
  339. usleep_range(1000, 2000);
  340. ret = netsec_mac_read(priv, addr, &data);
  341. if (ret)
  342. break;
  343. cpu_relax();
  344. } while (--timeout && (data & mask));
  345. if (timeout && !ret)
  346. return 0;
  347. netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
  348. return -ETIMEDOUT;
  349. }
  350. static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
  351. {
  352. struct phy_device *phydev = priv->ndev->phydev;
  353. u32 value = 0;
  354. value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
  355. NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
  356. if (phydev->speed != SPEED_1000)
  357. value |= NETSEC_MCR_PS;
  358. if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
  359. phydev->speed == SPEED_100)
  360. value |= NETSEC_GMAC_MCR_REG_FES;
  361. value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
  362. if (phy_interface_mode_is_rgmii(priv->phy_interface))
  363. value |= NETSEC_GMAC_MCR_REG_IBN;
  364. if (netsec_mac_write(priv, GMAC_REG_MCR, value))
  365. return -ETIMEDOUT;
  366. return 0;
  367. }
  368. static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr);
  369. static int netsec_phy_write(struct mii_bus *bus,
  370. int phy_addr, int reg, u16 val)
  371. {
  372. int status;
  373. struct netsec_priv *priv = bus->priv;
  374. if (netsec_mac_write(priv, GMAC_REG_GDR, val))
  375. return -ETIMEDOUT;
  376. if (netsec_mac_write(priv, GMAC_REG_GAR,
  377. phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
  378. reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
  379. NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
  380. (netsec_clk_type(priv->freq) <<
  381. GMAC_REG_SHIFT_CR_GAR)))
  382. return -ETIMEDOUT;
  383. status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
  384. NETSEC_GMAC_GAR_REG_GB);
  385. /* Developerbox implements RTL8211E PHY and there is
  386. * a compatibility problem with F_GMAC4.
  387. * RTL8211E expects MDC clock must be kept toggling for several
  388. * clock cycle with MDIO high before entering the IDLE state.
  389. * To meet this requirement, netsec driver needs to issue dummy
  390. * read(e.g. read PHYID1(offset 0x2) register) right after write.
  391. */
  392. netsec_phy_read(bus, phy_addr, MII_PHYSID1);
  393. return status;
  394. }
  395. static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
  396. {
  397. struct netsec_priv *priv = bus->priv;
  398. u32 data;
  399. int ret;
  400. if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
  401. phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
  402. reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
  403. (netsec_clk_type(priv->freq) <<
  404. GMAC_REG_SHIFT_CR_GAR)))
  405. return -ETIMEDOUT;
  406. ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
  407. NETSEC_GMAC_GAR_REG_GB);
  408. if (ret)
  409. return ret;
  410. ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
  411. if (ret)
  412. return ret;
  413. return data;
  414. }
  415. /************* ETHTOOL_OPS FOLLOW *************/
  416. static void netsec_et_get_drvinfo(struct net_device *net_device,
  417. struct ethtool_drvinfo *info)
  418. {
  419. strscpy(info->driver, "netsec", sizeof(info->driver));
  420. strscpy(info->bus_info, dev_name(net_device->dev.parent),
  421. sizeof(info->bus_info));
  422. }
  423. static int netsec_et_get_coalesce(struct net_device *net_device,
  424. struct ethtool_coalesce *et_coalesce,
  425. struct kernel_ethtool_coalesce *kernel_coal,
  426. struct netlink_ext_ack *extack)
  427. {
  428. struct netsec_priv *priv = netdev_priv(net_device);
  429. *et_coalesce = priv->et_coalesce;
  430. return 0;
  431. }
  432. static int netsec_et_set_coalesce(struct net_device *net_device,
  433. struct ethtool_coalesce *et_coalesce,
  434. struct kernel_ethtool_coalesce *kernel_coal,
  435. struct netlink_ext_ack *extack)
  436. {
  437. struct netsec_priv *priv = netdev_priv(net_device);
  438. priv->et_coalesce = *et_coalesce;
  439. if (priv->et_coalesce.tx_coalesce_usecs < 50)
  440. priv->et_coalesce.tx_coalesce_usecs = 50;
  441. if (priv->et_coalesce.tx_max_coalesced_frames < 1)
  442. priv->et_coalesce.tx_max_coalesced_frames = 1;
  443. netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
  444. priv->et_coalesce.tx_max_coalesced_frames);
  445. netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
  446. priv->et_coalesce.tx_coalesce_usecs);
  447. netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
  448. netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
  449. if (priv->et_coalesce.rx_coalesce_usecs < 50)
  450. priv->et_coalesce.rx_coalesce_usecs = 50;
  451. if (priv->et_coalesce.rx_max_coalesced_frames < 1)
  452. priv->et_coalesce.rx_max_coalesced_frames = 1;
  453. netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
  454. priv->et_coalesce.rx_max_coalesced_frames);
  455. netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
  456. priv->et_coalesce.rx_coalesce_usecs);
  457. netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
  458. netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
  459. return 0;
  460. }
  461. static u32 netsec_et_get_msglevel(struct net_device *dev)
  462. {
  463. struct netsec_priv *priv = netdev_priv(dev);
  464. return priv->msg_enable;
  465. }
  466. static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
  467. {
  468. struct netsec_priv *priv = netdev_priv(dev);
  469. priv->msg_enable = datum;
  470. }
  471. static const struct ethtool_ops netsec_ethtool_ops = {
  472. .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
  473. ETHTOOL_COALESCE_MAX_FRAMES,
  474. .get_drvinfo = netsec_et_get_drvinfo,
  475. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  476. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  477. .get_link = ethtool_op_get_link,
  478. .get_coalesce = netsec_et_get_coalesce,
  479. .set_coalesce = netsec_et_set_coalesce,
  480. .get_msglevel = netsec_et_get_msglevel,
  481. .set_msglevel = netsec_et_set_msglevel,
  482. };
  483. /************* NETDEV_OPS FOLLOW *************/
  484. static void netsec_set_rx_de(struct netsec_priv *priv,
  485. struct netsec_desc_ring *dring, u16 idx,
  486. const struct netsec_desc *desc)
  487. {
  488. struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
  489. u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
  490. (1 << NETSEC_RX_PKT_FS_FIELD) |
  491. (1 << NETSEC_RX_PKT_LS_FIELD);
  492. if (idx == DESC_NUM - 1)
  493. attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
  494. de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
  495. de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
  496. de->buf_len_info = desc->len;
  497. de->attr = attr;
  498. dma_wmb();
  499. dring->desc[idx].dma_addr = desc->dma_addr;
  500. dring->desc[idx].addr = desc->addr;
  501. dring->desc[idx].len = desc->len;
  502. }
  503. static bool netsec_clean_tx_dring(struct netsec_priv *priv)
  504. {
  505. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
  506. struct xdp_frame_bulk bq;
  507. struct netsec_de *entry;
  508. int tail = dring->tail;
  509. unsigned int bytes;
  510. int cnt = 0;
  511. spin_lock(&dring->lock);
  512. bytes = 0;
  513. xdp_frame_bulk_init(&bq);
  514. entry = dring->vaddr + DESC_SZ * tail;
  515. rcu_read_lock(); /* need for xdp_return_frame_bulk */
  516. while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) &&
  517. cnt < DESC_NUM) {
  518. struct netsec_desc *desc;
  519. int eop;
  520. desc = &dring->desc[tail];
  521. eop = (entry->attr >> NETSEC_TX_LAST) & 1;
  522. dma_rmb();
  523. /* if buf_type is either TYPE_NETSEC_SKB or
  524. * TYPE_NETSEC_XDP_NDO we mapped it
  525. */
  526. if (desc->buf_type != TYPE_NETSEC_XDP_TX)
  527. dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
  528. DMA_TO_DEVICE);
  529. if (!eop)
  530. goto next;
  531. if (desc->buf_type == TYPE_NETSEC_SKB) {
  532. bytes += desc->skb->len;
  533. dev_kfree_skb(desc->skb);
  534. } else {
  535. bytes += desc->xdpf->len;
  536. if (desc->buf_type == TYPE_NETSEC_XDP_TX)
  537. xdp_return_frame_rx_napi(desc->xdpf);
  538. else
  539. xdp_return_frame_bulk(desc->xdpf, &bq);
  540. }
  541. next:
  542. /* clean up so netsec_uninit_pkt_dring() won't free the skb
  543. * again
  544. */
  545. *desc = (struct netsec_desc){};
  546. /* entry->attr is not going to be accessed by the NIC until
  547. * netsec_set_tx_de() is called. No need for a dma_wmb() here
  548. */
  549. entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
  550. /* move tail ahead */
  551. dring->tail = (tail + 1) % DESC_NUM;
  552. tail = dring->tail;
  553. entry = dring->vaddr + DESC_SZ * tail;
  554. cnt++;
  555. }
  556. xdp_flush_frame_bulk(&bq);
  557. rcu_read_unlock();
  558. spin_unlock(&dring->lock);
  559. if (!cnt)
  560. return false;
  561. /* reading the register clears the irq */
  562. netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
  563. priv->ndev->stats.tx_packets += cnt;
  564. priv->ndev->stats.tx_bytes += bytes;
  565. netdev_completed_queue(priv->ndev, cnt, bytes);
  566. return true;
  567. }
  568. static void netsec_process_tx(struct netsec_priv *priv)
  569. {
  570. struct net_device *ndev = priv->ndev;
  571. bool cleaned;
  572. cleaned = netsec_clean_tx_dring(priv);
  573. if (cleaned && netif_queue_stopped(ndev)) {
  574. /* Make sure we update the value, anyone stopping the queue
  575. * after this will read the proper consumer idx
  576. */
  577. smp_wmb();
  578. netif_wake_queue(ndev);
  579. }
  580. }
  581. static void *netsec_alloc_rx_data(struct netsec_priv *priv,
  582. dma_addr_t *dma_handle, u16 *desc_len)
  583. {
  584. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
  585. struct page *page;
  586. page = page_pool_dev_alloc_pages(dring->page_pool);
  587. if (!page)
  588. return NULL;
  589. /* We allocate the same buffer length for XDP and non-XDP cases.
  590. * page_pool API will map the whole page, skip what's needed for
  591. * network payloads and/or XDP
  592. */
  593. *dma_handle = page_pool_get_dma_addr(page) + NETSEC_RXBUF_HEADROOM;
  594. /* Make sure the incoming payload fits in the page for XDP and non-XDP
  595. * cases and reserve enough space for headroom + skb_shared_info
  596. */
  597. *desc_len = NETSEC_RX_BUF_SIZE;
  598. return page_address(page);
  599. }
  600. static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
  601. {
  602. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
  603. u16 idx = from;
  604. while (num) {
  605. netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]);
  606. idx++;
  607. if (idx >= DESC_NUM)
  608. idx = 0;
  609. num--;
  610. }
  611. }
  612. static void netsec_xdp_ring_tx_db(struct netsec_priv *priv, u16 pkts)
  613. {
  614. if (likely(pkts))
  615. netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, pkts);
  616. }
  617. static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res,
  618. u16 pkts)
  619. {
  620. if (xdp_res & NETSEC_XDP_REDIR)
  621. xdp_do_flush_map();
  622. if (xdp_res & NETSEC_XDP_TX)
  623. netsec_xdp_ring_tx_db(priv, pkts);
  624. }
  625. static void netsec_set_tx_de(struct netsec_priv *priv,
  626. struct netsec_desc_ring *dring,
  627. const struct netsec_tx_pkt_ctrl *tx_ctrl,
  628. const struct netsec_desc *desc, void *buf)
  629. {
  630. int idx = dring->head;
  631. struct netsec_de *de;
  632. u32 attr;
  633. de = dring->vaddr + (DESC_SZ * idx);
  634. attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
  635. (1 << NETSEC_TX_SHIFT_PT_FIELD) |
  636. (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
  637. (1 << NETSEC_TX_SHIFT_FS_FIELD) |
  638. (1 << NETSEC_TX_LAST) |
  639. (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
  640. (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
  641. (1 << NETSEC_TX_SHIFT_TRS_FIELD);
  642. if (idx == DESC_NUM - 1)
  643. attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
  644. de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
  645. de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
  646. de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
  647. de->attr = attr;
  648. dring->desc[idx] = *desc;
  649. if (desc->buf_type == TYPE_NETSEC_SKB)
  650. dring->desc[idx].skb = buf;
  651. else if (desc->buf_type == TYPE_NETSEC_XDP_TX ||
  652. desc->buf_type == TYPE_NETSEC_XDP_NDO)
  653. dring->desc[idx].xdpf = buf;
  654. /* move head ahead */
  655. dring->head = (dring->head + 1) % DESC_NUM;
  656. }
  657. /* The current driver only supports 1 Txq, this should run under spin_lock() */
  658. static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
  659. struct xdp_frame *xdpf, bool is_ndo)
  660. {
  661. struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
  662. struct page *page = virt_to_page(xdpf->data);
  663. struct netsec_tx_pkt_ctrl tx_ctrl = {};
  664. struct netsec_desc tx_desc;
  665. dma_addr_t dma_handle;
  666. u16 filled;
  667. if (tx_ring->head >= tx_ring->tail)
  668. filled = tx_ring->head - tx_ring->tail;
  669. else
  670. filled = tx_ring->head + DESC_NUM - tx_ring->tail;
  671. if (DESC_NUM - filled <= 1)
  672. return NETSEC_XDP_CONSUMED;
  673. if (is_ndo) {
  674. /* this is for ndo_xdp_xmit, the buffer needs mapping before
  675. * sending
  676. */
  677. dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len,
  678. DMA_TO_DEVICE);
  679. if (dma_mapping_error(priv->dev, dma_handle))
  680. return NETSEC_XDP_CONSUMED;
  681. tx_desc.buf_type = TYPE_NETSEC_XDP_NDO;
  682. } else {
  683. /* This is the device Rx buffer from page_pool. No need to remap
  684. * just sync and send it
  685. */
  686. struct netsec_desc_ring *rx_ring =
  687. &priv->desc_ring[NETSEC_RING_RX];
  688. enum dma_data_direction dma_dir =
  689. page_pool_get_dma_dir(rx_ring->page_pool);
  690. dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom +
  691. sizeof(*xdpf);
  692. dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len,
  693. dma_dir);
  694. tx_desc.buf_type = TYPE_NETSEC_XDP_TX;
  695. }
  696. tx_desc.dma_addr = dma_handle;
  697. tx_desc.addr = xdpf->data;
  698. tx_desc.len = xdpf->len;
  699. netdev_sent_queue(priv->ndev, xdpf->len);
  700. netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf);
  701. return NETSEC_XDP_TX;
  702. }
  703. static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
  704. {
  705. struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
  706. struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
  707. u32 ret;
  708. if (unlikely(!xdpf))
  709. return NETSEC_XDP_CONSUMED;
  710. spin_lock(&tx_ring->lock);
  711. ret = netsec_xdp_queue_one(priv, xdpf, false);
  712. spin_unlock(&tx_ring->lock);
  713. return ret;
  714. }
  715. static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
  716. struct xdp_buff *xdp)
  717. {
  718. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
  719. unsigned int sync, len = xdp->data_end - xdp->data;
  720. u32 ret = NETSEC_XDP_PASS;
  721. struct page *page;
  722. int err;
  723. u32 act;
  724. act = bpf_prog_run_xdp(prog, xdp);
  725. /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
  726. sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM;
  727. sync = max(sync, len);
  728. switch (act) {
  729. case XDP_PASS:
  730. ret = NETSEC_XDP_PASS;
  731. break;
  732. case XDP_TX:
  733. ret = netsec_xdp_xmit_back(priv, xdp);
  734. if (ret != NETSEC_XDP_TX) {
  735. page = virt_to_head_page(xdp->data);
  736. page_pool_put_page(dring->page_pool, page, sync, true);
  737. }
  738. break;
  739. case XDP_REDIRECT:
  740. err = xdp_do_redirect(priv->ndev, xdp, prog);
  741. if (!err) {
  742. ret = NETSEC_XDP_REDIR;
  743. } else {
  744. ret = NETSEC_XDP_CONSUMED;
  745. page = virt_to_head_page(xdp->data);
  746. page_pool_put_page(dring->page_pool, page, sync, true);
  747. }
  748. break;
  749. default:
  750. bpf_warn_invalid_xdp_action(priv->ndev, prog, act);
  751. fallthrough;
  752. case XDP_ABORTED:
  753. trace_xdp_exception(priv->ndev, prog, act);
  754. fallthrough; /* handle aborts by dropping packet */
  755. case XDP_DROP:
  756. ret = NETSEC_XDP_CONSUMED;
  757. page = virt_to_head_page(xdp->data);
  758. page_pool_put_page(dring->page_pool, page, sync, true);
  759. break;
  760. }
  761. return ret;
  762. }
  763. static int netsec_process_rx(struct netsec_priv *priv, int budget)
  764. {
  765. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
  766. struct net_device *ndev = priv->ndev;
  767. struct netsec_rx_pkt_info rx_info;
  768. enum dma_data_direction dma_dir;
  769. struct bpf_prog *xdp_prog;
  770. struct xdp_buff xdp;
  771. u16 xdp_xmit = 0;
  772. u32 xdp_act = 0;
  773. int done = 0;
  774. xdp_init_buff(&xdp, PAGE_SIZE, &dring->xdp_rxq);
  775. xdp_prog = READ_ONCE(priv->xdp_prog);
  776. dma_dir = page_pool_get_dma_dir(dring->page_pool);
  777. while (done < budget) {
  778. u16 idx = dring->tail;
  779. struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
  780. struct netsec_desc *desc = &dring->desc[idx];
  781. struct page *page = virt_to_page(desc->addr);
  782. u32 xdp_result = NETSEC_XDP_PASS;
  783. struct sk_buff *skb = NULL;
  784. u16 pkt_len, desc_len;
  785. dma_addr_t dma_handle;
  786. void *buf_addr;
  787. if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
  788. /* reading the register clears the irq */
  789. netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
  790. break;
  791. }
  792. /* This barrier is needed to keep us from reading
  793. * any other fields out of the netsec_de until we have
  794. * verified the descriptor has been written back
  795. */
  796. dma_rmb();
  797. done++;
  798. pkt_len = de->buf_len_info >> 16;
  799. rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) &
  800. NETSEC_RX_PKT_ERR_MASK;
  801. rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
  802. if (rx_info.err_flag) {
  803. netif_err(priv, drv, priv->ndev,
  804. "%s: rx fail err(%d)\n", __func__,
  805. rx_info.err_code);
  806. ndev->stats.rx_dropped++;
  807. dring->tail = (dring->tail + 1) % DESC_NUM;
  808. /* reuse buffer page frag */
  809. netsec_rx_fill(priv, idx, 1);
  810. continue;
  811. }
  812. rx_info.rx_cksum_result =
  813. (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
  814. /* allocate a fresh buffer and map it to the hardware.
  815. * This will eventually replace the old buffer in the hardware
  816. */
  817. buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
  818. if (unlikely(!buf_addr))
  819. break;
  820. dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len,
  821. dma_dir);
  822. prefetch(desc->addr);
  823. xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM,
  824. pkt_len, false);
  825. if (xdp_prog) {
  826. xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
  827. if (xdp_result != NETSEC_XDP_PASS) {
  828. xdp_act |= xdp_result;
  829. if (xdp_result == NETSEC_XDP_TX)
  830. xdp_xmit++;
  831. goto next;
  832. }
  833. }
  834. skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA);
  835. if (unlikely(!skb)) {
  836. /* If skb fails recycle_direct will either unmap and
  837. * free the page or refill the cache depending on the
  838. * cache state. Since we paid the allocation cost if
  839. * building an skb fails try to put the page into cache
  840. */
  841. page_pool_put_page(dring->page_pool, page, pkt_len,
  842. true);
  843. netif_err(priv, drv, priv->ndev,
  844. "rx failed to build skb\n");
  845. break;
  846. }
  847. skb_mark_for_recycle(skb);
  848. skb_reserve(skb, xdp.data - xdp.data_hard_start);
  849. skb_put(skb, xdp.data_end - xdp.data);
  850. skb->protocol = eth_type_trans(skb, priv->ndev);
  851. if (priv->rx_cksum_offload_flag &&
  852. rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
  853. skb->ip_summed = CHECKSUM_UNNECESSARY;
  854. next:
  855. if (skb)
  856. napi_gro_receive(&priv->napi, skb);
  857. if (skb || xdp_result) {
  858. ndev->stats.rx_packets++;
  859. ndev->stats.rx_bytes += xdp.data_end - xdp.data;
  860. }
  861. /* Update the descriptor with fresh buffers */
  862. desc->len = desc_len;
  863. desc->dma_addr = dma_handle;
  864. desc->addr = buf_addr;
  865. netsec_rx_fill(priv, idx, 1);
  866. dring->tail = (dring->tail + 1) % DESC_NUM;
  867. }
  868. netsec_finalize_xdp_rx(priv, xdp_act, xdp_xmit);
  869. return done;
  870. }
  871. static int netsec_napi_poll(struct napi_struct *napi, int budget)
  872. {
  873. struct netsec_priv *priv;
  874. int done;
  875. priv = container_of(napi, struct netsec_priv, napi);
  876. netsec_process_tx(priv);
  877. done = netsec_process_rx(priv, budget);
  878. if (done < budget && napi_complete_done(napi, done)) {
  879. unsigned long flags;
  880. spin_lock_irqsave(&priv->reglock, flags);
  881. netsec_write(priv, NETSEC_REG_INTEN_SET,
  882. NETSEC_IRQ_RX | NETSEC_IRQ_TX);
  883. spin_unlock_irqrestore(&priv->reglock, flags);
  884. }
  885. return done;
  886. }
  887. static int netsec_desc_used(struct netsec_desc_ring *dring)
  888. {
  889. int used;
  890. if (dring->head >= dring->tail)
  891. used = dring->head - dring->tail;
  892. else
  893. used = dring->head + DESC_NUM - dring->tail;
  894. return used;
  895. }
  896. static int netsec_check_stop_tx(struct netsec_priv *priv, int used)
  897. {
  898. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
  899. /* keep tail from touching the queue */
  900. if (DESC_NUM - used < 2) {
  901. netif_stop_queue(priv->ndev);
  902. /* Make sure we read the updated value in case
  903. * descriptors got freed
  904. */
  905. smp_rmb();
  906. used = netsec_desc_used(dring);
  907. if (DESC_NUM - used < 2)
  908. return NETDEV_TX_BUSY;
  909. netif_wake_queue(priv->ndev);
  910. }
  911. return 0;
  912. }
  913. static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
  914. struct net_device *ndev)
  915. {
  916. struct netsec_priv *priv = netdev_priv(ndev);
  917. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
  918. struct netsec_tx_pkt_ctrl tx_ctrl = {};
  919. struct netsec_desc tx_desc;
  920. u16 tso_seg_len = 0;
  921. int filled;
  922. spin_lock_bh(&dring->lock);
  923. filled = netsec_desc_used(dring);
  924. if (netsec_check_stop_tx(priv, filled)) {
  925. spin_unlock_bh(&dring->lock);
  926. net_warn_ratelimited("%s %s Tx queue full\n",
  927. dev_name(priv->dev), ndev->name);
  928. return NETDEV_TX_BUSY;
  929. }
  930. if (skb->ip_summed == CHECKSUM_PARTIAL)
  931. tx_ctrl.cksum_offload_flag = true;
  932. if (skb_is_gso(skb))
  933. tso_seg_len = skb_shinfo(skb)->gso_size;
  934. if (tso_seg_len > 0) {
  935. if (skb->protocol == htons(ETH_P_IP)) {
  936. ip_hdr(skb)->tot_len = 0;
  937. tcp_hdr(skb)->check =
  938. ~tcp_v4_check(0, ip_hdr(skb)->saddr,
  939. ip_hdr(skb)->daddr, 0);
  940. } else {
  941. tcp_v6_gso_csum_prep(skb);
  942. }
  943. tx_ctrl.tcp_seg_offload_flag = true;
  944. tx_ctrl.tcp_seg_len = tso_seg_len;
  945. }
  946. tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
  947. skb_headlen(skb), DMA_TO_DEVICE);
  948. if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
  949. spin_unlock_bh(&dring->lock);
  950. netif_err(priv, drv, priv->ndev,
  951. "%s: DMA mapping failed\n", __func__);
  952. ndev->stats.tx_dropped++;
  953. dev_kfree_skb_any(skb);
  954. return NETDEV_TX_OK;
  955. }
  956. tx_desc.addr = skb->data;
  957. tx_desc.len = skb_headlen(skb);
  958. tx_desc.buf_type = TYPE_NETSEC_SKB;
  959. skb_tx_timestamp(skb);
  960. netdev_sent_queue(priv->ndev, skb->len);
  961. netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
  962. spin_unlock_bh(&dring->lock);
  963. netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
  964. return NETDEV_TX_OK;
  965. }
  966. static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
  967. {
  968. struct netsec_desc_ring *dring = &priv->desc_ring[id];
  969. struct netsec_desc *desc;
  970. u16 idx;
  971. if (!dring->vaddr || !dring->desc)
  972. return;
  973. for (idx = 0; idx < DESC_NUM; idx++) {
  974. desc = &dring->desc[idx];
  975. if (!desc->addr)
  976. continue;
  977. if (id == NETSEC_RING_RX) {
  978. struct page *page = virt_to_page(desc->addr);
  979. page_pool_put_full_page(dring->page_pool, page, false);
  980. } else if (id == NETSEC_RING_TX) {
  981. dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
  982. DMA_TO_DEVICE);
  983. dev_kfree_skb(desc->skb);
  984. }
  985. }
  986. /* Rx is currently using page_pool */
  987. if (id == NETSEC_RING_RX) {
  988. if (xdp_rxq_info_is_reg(&dring->xdp_rxq))
  989. xdp_rxq_info_unreg(&dring->xdp_rxq);
  990. page_pool_destroy(dring->page_pool);
  991. }
  992. memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
  993. memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
  994. dring->head = 0;
  995. dring->tail = 0;
  996. if (id == NETSEC_RING_TX)
  997. netdev_reset_queue(priv->ndev);
  998. }
  999. static void netsec_free_dring(struct netsec_priv *priv, int id)
  1000. {
  1001. struct netsec_desc_ring *dring = &priv->desc_ring[id];
  1002. if (dring->vaddr) {
  1003. dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
  1004. dring->vaddr, dring->desc_dma);
  1005. dring->vaddr = NULL;
  1006. }
  1007. kfree(dring->desc);
  1008. dring->desc = NULL;
  1009. }
  1010. static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
  1011. {
  1012. struct netsec_desc_ring *dring = &priv->desc_ring[id];
  1013. dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
  1014. &dring->desc_dma, GFP_KERNEL);
  1015. if (!dring->vaddr)
  1016. goto err;
  1017. dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
  1018. if (!dring->desc)
  1019. goto err;
  1020. return 0;
  1021. err:
  1022. netsec_free_dring(priv, id);
  1023. return -ENOMEM;
  1024. }
  1025. static void netsec_setup_tx_dring(struct netsec_priv *priv)
  1026. {
  1027. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
  1028. int i;
  1029. for (i = 0; i < DESC_NUM; i++) {
  1030. struct netsec_de *de;
  1031. de = dring->vaddr + (DESC_SZ * i);
  1032. /* de->attr is not going to be accessed by the NIC
  1033. * until netsec_set_tx_de() is called.
  1034. * No need for a dma_wmb() here
  1035. */
  1036. de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
  1037. }
  1038. }
  1039. static int netsec_setup_rx_dring(struct netsec_priv *priv)
  1040. {
  1041. struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
  1042. struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
  1043. struct page_pool_params pp_params = {
  1044. .order = 0,
  1045. /* internal DMA mapping in page_pool */
  1046. .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
  1047. .pool_size = DESC_NUM,
  1048. .nid = NUMA_NO_NODE,
  1049. .dev = priv->dev,
  1050. .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
  1051. .offset = NETSEC_RXBUF_HEADROOM,
  1052. .max_len = NETSEC_RX_BUF_SIZE,
  1053. };
  1054. int i, err;
  1055. dring->page_pool = page_pool_create(&pp_params);
  1056. if (IS_ERR(dring->page_pool)) {
  1057. err = PTR_ERR(dring->page_pool);
  1058. dring->page_pool = NULL;
  1059. goto err_out;
  1060. }
  1061. err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0, priv->napi.napi_id);
  1062. if (err)
  1063. goto err_out;
  1064. err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_POOL,
  1065. dring->page_pool);
  1066. if (err)
  1067. goto err_out;
  1068. for (i = 0; i < DESC_NUM; i++) {
  1069. struct netsec_desc *desc = &dring->desc[i];
  1070. dma_addr_t dma_handle;
  1071. void *buf;
  1072. u16 len;
  1073. buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
  1074. if (!buf) {
  1075. err = -ENOMEM;
  1076. goto err_out;
  1077. }
  1078. desc->dma_addr = dma_handle;
  1079. desc->addr = buf;
  1080. desc->len = len;
  1081. }
  1082. netsec_rx_fill(priv, 0, DESC_NUM);
  1083. return 0;
  1084. err_out:
  1085. netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
  1086. return err;
  1087. }
  1088. static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
  1089. u32 addr_h, u32 addr_l, u32 size)
  1090. {
  1091. u64 base = (u64)addr_h << 32 | addr_l;
  1092. void __iomem *ucode;
  1093. u32 i;
  1094. ucode = ioremap(base, size * sizeof(u32));
  1095. if (!ucode)
  1096. return -ENOMEM;
  1097. for (i = 0; i < size; i++)
  1098. netsec_write(priv, reg, readl(ucode + i * 4));
  1099. iounmap(ucode);
  1100. return 0;
  1101. }
  1102. static int netsec_netdev_load_microcode(struct netsec_priv *priv)
  1103. {
  1104. u32 addr_h, addr_l, size;
  1105. int err;
  1106. addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
  1107. addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
  1108. size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
  1109. err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
  1110. addr_h, addr_l, size);
  1111. if (err)
  1112. return err;
  1113. addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
  1114. addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
  1115. size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
  1116. err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
  1117. addr_h, addr_l, size);
  1118. if (err)
  1119. return err;
  1120. addr_h = 0;
  1121. addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
  1122. size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
  1123. err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
  1124. addr_h, addr_l, size);
  1125. if (err)
  1126. return err;
  1127. return 0;
  1128. }
  1129. static int netsec_reset_hardware(struct netsec_priv *priv,
  1130. bool load_ucode)
  1131. {
  1132. u32 value;
  1133. int err;
  1134. /* stop DMA engines */
  1135. if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
  1136. netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
  1137. NETSEC_DMA_CTRL_REG_STOP);
  1138. netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
  1139. NETSEC_DMA_CTRL_REG_STOP);
  1140. while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
  1141. NETSEC_DMA_CTRL_REG_STOP)
  1142. cpu_relax();
  1143. while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
  1144. NETSEC_DMA_CTRL_REG_STOP)
  1145. cpu_relax();
  1146. }
  1147. netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
  1148. netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
  1149. netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
  1150. while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
  1151. cpu_relax();
  1152. /* set desc_start addr */
  1153. netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
  1154. upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
  1155. netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
  1156. lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
  1157. netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
  1158. upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
  1159. netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
  1160. lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
  1161. /* set normal tx dring ring config */
  1162. netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
  1163. 1 << NETSEC_REG_DESC_ENDIAN);
  1164. netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
  1165. 1 << NETSEC_REG_DESC_ENDIAN);
  1166. if (load_ucode) {
  1167. err = netsec_netdev_load_microcode(priv);
  1168. if (err) {
  1169. netif_err(priv, probe, priv->ndev,
  1170. "%s: failed to load microcode (%d)\n",
  1171. __func__, err);
  1172. return err;
  1173. }
  1174. }
  1175. /* start DMA engines */
  1176. netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
  1177. netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
  1178. usleep_range(1000, 2000);
  1179. if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
  1180. NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
  1181. netif_err(priv, probe, priv->ndev,
  1182. "microengine start failed\n");
  1183. return -ENXIO;
  1184. }
  1185. netsec_write(priv, NETSEC_REG_TOP_STATUS,
  1186. NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
  1187. value = NETSEC_PKT_CTRL_REG_MODE_NRM;
  1188. if (priv->ndev->mtu > ETH_DATA_LEN)
  1189. value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
  1190. /* change to normal mode */
  1191. netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
  1192. netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
  1193. while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
  1194. NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
  1195. cpu_relax();
  1196. /* clear any pending EMPTY/ERR irq status */
  1197. netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
  1198. /* Disable TX & RX intr */
  1199. netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
  1200. return 0;
  1201. }
  1202. static int netsec_start_gmac(struct netsec_priv *priv)
  1203. {
  1204. struct phy_device *phydev = priv->ndev->phydev;
  1205. u32 value = 0;
  1206. int ret;
  1207. if (phydev->speed != SPEED_1000)
  1208. value = (NETSEC_GMAC_MCR_REG_CST |
  1209. NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
  1210. if (netsec_mac_write(priv, GMAC_REG_MCR, value))
  1211. return -ETIMEDOUT;
  1212. if (netsec_mac_write(priv, GMAC_REG_BMR,
  1213. NETSEC_GMAC_BMR_REG_RESET))
  1214. return -ETIMEDOUT;
  1215. /* Wait soft reset */
  1216. usleep_range(1000, 5000);
  1217. ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
  1218. if (ret)
  1219. return ret;
  1220. if (value & NETSEC_GMAC_BMR_REG_SWR)
  1221. return -EAGAIN;
  1222. netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
  1223. if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
  1224. return -ETIMEDOUT;
  1225. netsec_write(priv, MAC_REG_DESC_INIT, 1);
  1226. if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
  1227. return -ETIMEDOUT;
  1228. if (netsec_mac_write(priv, GMAC_REG_BMR,
  1229. NETSEC_GMAC_BMR_REG_COMMON))
  1230. return -ETIMEDOUT;
  1231. if (netsec_mac_write(priv, GMAC_REG_RDLAR,
  1232. NETSEC_GMAC_RDLAR_REG_COMMON))
  1233. return -ETIMEDOUT;
  1234. if (netsec_mac_write(priv, GMAC_REG_TDLAR,
  1235. NETSEC_GMAC_TDLAR_REG_COMMON))
  1236. return -ETIMEDOUT;
  1237. if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
  1238. return -ETIMEDOUT;
  1239. ret = netsec_mac_update_to_phy_state(priv);
  1240. if (ret)
  1241. return ret;
  1242. ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
  1243. if (ret)
  1244. return ret;
  1245. value |= NETSEC_GMAC_OMR_REG_SR;
  1246. value |= NETSEC_GMAC_OMR_REG_ST;
  1247. netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
  1248. netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
  1249. netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce, NULL, NULL);
  1250. if (netsec_mac_write(priv, GMAC_REG_OMR, value))
  1251. return -ETIMEDOUT;
  1252. return 0;
  1253. }
  1254. static int netsec_stop_gmac(struct netsec_priv *priv)
  1255. {
  1256. u32 value;
  1257. int ret;
  1258. ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
  1259. if (ret)
  1260. return ret;
  1261. value &= ~NETSEC_GMAC_OMR_REG_SR;
  1262. value &= ~NETSEC_GMAC_OMR_REG_ST;
  1263. /* disable all interrupts */
  1264. netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
  1265. netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
  1266. return netsec_mac_write(priv, GMAC_REG_OMR, value);
  1267. }
  1268. static void netsec_phy_adjust_link(struct net_device *ndev)
  1269. {
  1270. struct netsec_priv *priv = netdev_priv(ndev);
  1271. if (ndev->phydev->link)
  1272. netsec_start_gmac(priv);
  1273. else
  1274. netsec_stop_gmac(priv);
  1275. phy_print_status(ndev->phydev);
  1276. }
  1277. static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
  1278. {
  1279. struct netsec_priv *priv = dev_id;
  1280. u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
  1281. unsigned long flags;
  1282. /* Disable interrupts */
  1283. if (status & NETSEC_IRQ_TX) {
  1284. val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
  1285. netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
  1286. }
  1287. if (status & NETSEC_IRQ_RX) {
  1288. val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
  1289. netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
  1290. }
  1291. spin_lock_irqsave(&priv->reglock, flags);
  1292. netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
  1293. spin_unlock_irqrestore(&priv->reglock, flags);
  1294. napi_schedule(&priv->napi);
  1295. return IRQ_HANDLED;
  1296. }
  1297. static int netsec_netdev_open(struct net_device *ndev)
  1298. {
  1299. struct netsec_priv *priv = netdev_priv(ndev);
  1300. int ret;
  1301. pm_runtime_get_sync(priv->dev);
  1302. netsec_setup_tx_dring(priv);
  1303. ret = netsec_setup_rx_dring(priv);
  1304. if (ret) {
  1305. netif_err(priv, probe, priv->ndev,
  1306. "%s: fail setup ring\n", __func__);
  1307. goto err1;
  1308. }
  1309. ret = request_irq(priv->ndev->irq, netsec_irq_handler,
  1310. IRQF_SHARED, "netsec", priv);
  1311. if (ret) {
  1312. netif_err(priv, drv, priv->ndev, "request_irq failed\n");
  1313. goto err2;
  1314. }
  1315. if (dev_of_node(priv->dev)) {
  1316. if (!of_phy_connect(priv->ndev, priv->phy_np,
  1317. netsec_phy_adjust_link, 0,
  1318. priv->phy_interface)) {
  1319. netif_err(priv, link, priv->ndev, "missing PHY\n");
  1320. ret = -ENODEV;
  1321. goto err3;
  1322. }
  1323. } else {
  1324. ret = phy_connect_direct(priv->ndev, priv->phydev,
  1325. netsec_phy_adjust_link,
  1326. priv->phy_interface);
  1327. if (ret) {
  1328. netif_err(priv, link, priv->ndev,
  1329. "phy_connect_direct() failed (%d)\n", ret);
  1330. goto err3;
  1331. }
  1332. }
  1333. phy_start(ndev->phydev);
  1334. netsec_start_gmac(priv);
  1335. napi_enable(&priv->napi);
  1336. netif_start_queue(ndev);
  1337. /* Enable TX+RX intr. */
  1338. netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
  1339. return 0;
  1340. err3:
  1341. free_irq(priv->ndev->irq, priv);
  1342. err2:
  1343. netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
  1344. err1:
  1345. pm_runtime_put_sync(priv->dev);
  1346. return ret;
  1347. }
  1348. static int netsec_netdev_stop(struct net_device *ndev)
  1349. {
  1350. int ret;
  1351. struct netsec_priv *priv = netdev_priv(ndev);
  1352. netif_stop_queue(priv->ndev);
  1353. dma_wmb();
  1354. napi_disable(&priv->napi);
  1355. netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
  1356. netsec_stop_gmac(priv);
  1357. free_irq(priv->ndev->irq, priv);
  1358. netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
  1359. netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
  1360. phy_stop(ndev->phydev);
  1361. phy_disconnect(ndev->phydev);
  1362. ret = netsec_reset_hardware(priv, false);
  1363. pm_runtime_put_sync(priv->dev);
  1364. return ret;
  1365. }
  1366. static int netsec_netdev_init(struct net_device *ndev)
  1367. {
  1368. struct netsec_priv *priv = netdev_priv(ndev);
  1369. int ret;
  1370. u16 data;
  1371. BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM);
  1372. ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
  1373. if (ret)
  1374. return ret;
  1375. ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
  1376. if (ret)
  1377. goto err1;
  1378. /* set phy power down */
  1379. data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR);
  1380. netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR,
  1381. data | BMCR_PDOWN);
  1382. ret = netsec_reset_hardware(priv, true);
  1383. if (ret)
  1384. goto err2;
  1385. /* Restore phy power state */
  1386. netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
  1387. spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock);
  1388. spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
  1389. return 0;
  1390. err2:
  1391. netsec_free_dring(priv, NETSEC_RING_RX);
  1392. err1:
  1393. netsec_free_dring(priv, NETSEC_RING_TX);
  1394. return ret;
  1395. }
  1396. static void netsec_netdev_uninit(struct net_device *ndev)
  1397. {
  1398. struct netsec_priv *priv = netdev_priv(ndev);
  1399. netsec_free_dring(priv, NETSEC_RING_RX);
  1400. netsec_free_dring(priv, NETSEC_RING_TX);
  1401. }
  1402. static int netsec_netdev_set_features(struct net_device *ndev,
  1403. netdev_features_t features)
  1404. {
  1405. struct netsec_priv *priv = netdev_priv(ndev);
  1406. priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
  1407. return 0;
  1408. }
  1409. static int netsec_xdp_xmit(struct net_device *ndev, int n,
  1410. struct xdp_frame **frames, u32 flags)
  1411. {
  1412. struct netsec_priv *priv = netdev_priv(ndev);
  1413. struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
  1414. int i, nxmit = 0;
  1415. if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
  1416. return -EINVAL;
  1417. spin_lock(&tx_ring->lock);
  1418. for (i = 0; i < n; i++) {
  1419. struct xdp_frame *xdpf = frames[i];
  1420. int err;
  1421. err = netsec_xdp_queue_one(priv, xdpf, true);
  1422. if (err != NETSEC_XDP_TX)
  1423. break;
  1424. tx_ring->xdp_xmit++;
  1425. nxmit++;
  1426. }
  1427. spin_unlock(&tx_ring->lock);
  1428. if (unlikely(flags & XDP_XMIT_FLUSH)) {
  1429. netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit);
  1430. tx_ring->xdp_xmit = 0;
  1431. }
  1432. return nxmit;
  1433. }
  1434. static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog,
  1435. struct netlink_ext_ack *extack)
  1436. {
  1437. struct net_device *dev = priv->ndev;
  1438. struct bpf_prog *old_prog;
  1439. /* For now just support only the usual MTU sized frames */
  1440. if (prog && dev->mtu > 1500) {
  1441. NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
  1442. return -EOPNOTSUPP;
  1443. }
  1444. if (netif_running(dev))
  1445. netsec_netdev_stop(dev);
  1446. /* Detach old prog, if any */
  1447. old_prog = xchg(&priv->xdp_prog, prog);
  1448. if (old_prog)
  1449. bpf_prog_put(old_prog);
  1450. if (netif_running(dev))
  1451. netsec_netdev_open(dev);
  1452. return 0;
  1453. }
  1454. static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp)
  1455. {
  1456. struct netsec_priv *priv = netdev_priv(ndev);
  1457. switch (xdp->command) {
  1458. case XDP_SETUP_PROG:
  1459. return netsec_xdp_setup(priv, xdp->prog, xdp->extack);
  1460. default:
  1461. return -EINVAL;
  1462. }
  1463. }
  1464. static const struct net_device_ops netsec_netdev_ops = {
  1465. .ndo_init = netsec_netdev_init,
  1466. .ndo_uninit = netsec_netdev_uninit,
  1467. .ndo_open = netsec_netdev_open,
  1468. .ndo_stop = netsec_netdev_stop,
  1469. .ndo_start_xmit = netsec_netdev_start_xmit,
  1470. .ndo_set_features = netsec_netdev_set_features,
  1471. .ndo_set_mac_address = eth_mac_addr,
  1472. .ndo_validate_addr = eth_validate_addr,
  1473. .ndo_eth_ioctl = phy_do_ioctl,
  1474. .ndo_xdp_xmit = netsec_xdp_xmit,
  1475. .ndo_bpf = netsec_xdp,
  1476. };
  1477. static int netsec_of_probe(struct platform_device *pdev,
  1478. struct netsec_priv *priv, u32 *phy_addr)
  1479. {
  1480. int err;
  1481. err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_interface);
  1482. if (err) {
  1483. dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
  1484. return err;
  1485. }
  1486. /*
  1487. * SynQuacer is physically configured with TX and RX delays
  1488. * but the standard firmware claimed otherwise for a long
  1489. * time, ignore it.
  1490. */
  1491. if (of_machine_is_compatible("socionext,developer-box") &&
  1492. priv->phy_interface != PHY_INTERFACE_MODE_RGMII_ID) {
  1493. dev_warn(&pdev->dev, "Outdated firmware reports incorrect PHY mode, overriding\n");
  1494. priv->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
  1495. }
  1496. priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
  1497. if (!priv->phy_np) {
  1498. dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
  1499. return -EINVAL;
  1500. }
  1501. *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np);
  1502. priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
  1503. if (IS_ERR(priv->clk))
  1504. return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
  1505. "phy_ref_clk not found\n");
  1506. priv->freq = clk_get_rate(priv->clk);
  1507. return 0;
  1508. }
  1509. static int netsec_acpi_probe(struct platform_device *pdev,
  1510. struct netsec_priv *priv, u32 *phy_addr)
  1511. {
  1512. int ret;
  1513. if (!IS_ENABLED(CONFIG_ACPI))
  1514. return -ENODEV;
  1515. /* ACPI systems are assumed to configure the PHY in firmware, so
  1516. * there is really no need to discover the PHY mode from the DSDT.
  1517. * Since firmware is known to exist in the field that configures the
  1518. * PHY correctly but passes the wrong mode string in the phy-mode
  1519. * device property, we have no choice but to ignore it.
  1520. */
  1521. priv->phy_interface = PHY_INTERFACE_MODE_NA;
  1522. ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
  1523. if (ret)
  1524. return dev_err_probe(&pdev->dev, ret,
  1525. "missing required property 'phy-channel'\n");
  1526. ret = device_property_read_u32(&pdev->dev,
  1527. "socionext,phy-clock-frequency",
  1528. &priv->freq);
  1529. if (ret)
  1530. return dev_err_probe(&pdev->dev, ret,
  1531. "missing required property 'socionext,phy-clock-frequency'\n");
  1532. return 0;
  1533. }
  1534. static void netsec_unregister_mdio(struct netsec_priv *priv)
  1535. {
  1536. struct phy_device *phydev = priv->phydev;
  1537. if (!dev_of_node(priv->dev) && phydev) {
  1538. phy_device_remove(phydev);
  1539. phy_device_free(phydev);
  1540. }
  1541. mdiobus_unregister(priv->mii_bus);
  1542. }
  1543. static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
  1544. {
  1545. struct mii_bus *bus;
  1546. int ret;
  1547. bus = devm_mdiobus_alloc(priv->dev);
  1548. if (!bus)
  1549. return -ENOMEM;
  1550. snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
  1551. bus->priv = priv;
  1552. bus->name = "SNI NETSEC MDIO";
  1553. bus->read = netsec_phy_read;
  1554. bus->write = netsec_phy_write;
  1555. bus->parent = priv->dev;
  1556. priv->mii_bus = bus;
  1557. if (dev_of_node(priv->dev)) {
  1558. struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
  1559. mdio_node = of_get_child_by_name(parent, "mdio");
  1560. if (mdio_node) {
  1561. parent = mdio_node;
  1562. } else {
  1563. /* older f/w doesn't populate the mdio subnode,
  1564. * allow relaxed upgrade of f/w in due time.
  1565. */
  1566. dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
  1567. }
  1568. ret = of_mdiobus_register(bus, parent);
  1569. of_node_put(mdio_node);
  1570. if (ret) {
  1571. dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
  1572. return ret;
  1573. }
  1574. } else {
  1575. /* Mask out all PHYs from auto probing. */
  1576. bus->phy_mask = ~0;
  1577. ret = mdiobus_register(bus);
  1578. if (ret) {
  1579. dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
  1580. return ret;
  1581. }
  1582. priv->phydev = get_phy_device(bus, phy_addr, false);
  1583. if (IS_ERR(priv->phydev)) {
  1584. ret = PTR_ERR(priv->phydev);
  1585. dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
  1586. priv->phydev = NULL;
  1587. mdiobus_unregister(bus);
  1588. return -ENODEV;
  1589. }
  1590. ret = phy_device_register(priv->phydev);
  1591. if (ret) {
  1592. phy_device_free(priv->phydev);
  1593. mdiobus_unregister(bus);
  1594. dev_err(priv->dev,
  1595. "phy_device_register err(%d)\n", ret);
  1596. }
  1597. }
  1598. return ret;
  1599. }
  1600. static int netsec_probe(struct platform_device *pdev)
  1601. {
  1602. struct resource *mmio_res, *eeprom_res;
  1603. struct netsec_priv *priv;
  1604. u32 hw_ver, phy_addr = 0;
  1605. struct net_device *ndev;
  1606. int ret;
  1607. int irq;
  1608. mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1609. if (!mmio_res) {
  1610. dev_err(&pdev->dev, "No MMIO resource found.\n");
  1611. return -ENODEV;
  1612. }
  1613. eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1614. if (!eeprom_res) {
  1615. dev_info(&pdev->dev, "No EEPROM resource found.\n");
  1616. return -ENODEV;
  1617. }
  1618. irq = platform_get_irq(pdev, 0);
  1619. if (irq < 0)
  1620. return irq;
  1621. ndev = alloc_etherdev(sizeof(*priv));
  1622. if (!ndev)
  1623. return -ENOMEM;
  1624. priv = netdev_priv(ndev);
  1625. spin_lock_init(&priv->reglock);
  1626. SET_NETDEV_DEV(ndev, &pdev->dev);
  1627. platform_set_drvdata(pdev, priv);
  1628. ndev->irq = irq;
  1629. priv->dev = &pdev->dev;
  1630. priv->ndev = ndev;
  1631. priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
  1632. NETIF_MSG_LINK | NETIF_MSG_PROBE;
  1633. priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
  1634. resource_size(mmio_res));
  1635. if (!priv->ioaddr) {
  1636. dev_err(&pdev->dev, "devm_ioremap() failed\n");
  1637. ret = -ENXIO;
  1638. goto free_ndev;
  1639. }
  1640. priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
  1641. resource_size(eeprom_res));
  1642. if (!priv->eeprom_base) {
  1643. dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
  1644. ret = -ENXIO;
  1645. goto free_ndev;
  1646. }
  1647. ret = device_get_ethdev_address(&pdev->dev, ndev);
  1648. if (ret && priv->eeprom_base) {
  1649. void __iomem *macp = priv->eeprom_base +
  1650. NETSEC_EEPROM_MAC_ADDRESS;
  1651. u8 addr[ETH_ALEN];
  1652. addr[0] = readb(macp + 3);
  1653. addr[1] = readb(macp + 2);
  1654. addr[2] = readb(macp + 1);
  1655. addr[3] = readb(macp + 0);
  1656. addr[4] = readb(macp + 7);
  1657. addr[5] = readb(macp + 6);
  1658. eth_hw_addr_set(ndev, addr);
  1659. }
  1660. if (!is_valid_ether_addr(ndev->dev_addr)) {
  1661. dev_warn(&pdev->dev, "No MAC address found, using random\n");
  1662. eth_hw_addr_random(ndev);
  1663. }
  1664. if (dev_of_node(&pdev->dev))
  1665. ret = netsec_of_probe(pdev, priv, &phy_addr);
  1666. else
  1667. ret = netsec_acpi_probe(pdev, priv, &phy_addr);
  1668. if (ret)
  1669. goto free_ndev;
  1670. priv->phy_addr = phy_addr;
  1671. if (!priv->freq) {
  1672. dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
  1673. ret = -ENODEV;
  1674. goto free_ndev;
  1675. }
  1676. /* default for throughput */
  1677. priv->et_coalesce.rx_coalesce_usecs = 500;
  1678. priv->et_coalesce.rx_max_coalesced_frames = 8;
  1679. priv->et_coalesce.tx_coalesce_usecs = 500;
  1680. priv->et_coalesce.tx_max_coalesced_frames = 8;
  1681. ret = device_property_read_u32(&pdev->dev, "max-frame-size",
  1682. &ndev->max_mtu);
  1683. if (ret < 0)
  1684. ndev->max_mtu = ETH_DATA_LEN;
  1685. /* runtime_pm coverage just for probe, open/close also cover it */
  1686. pm_runtime_enable(&pdev->dev);
  1687. pm_runtime_get_sync(&pdev->dev);
  1688. hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
  1689. /* this driver only supports F_TAIKI style NETSEC */
  1690. if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
  1691. NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
  1692. ret = -ENODEV;
  1693. goto pm_disable;
  1694. }
  1695. dev_info(&pdev->dev, "hardware revision %d.%d\n",
  1696. hw_ver >> 16, hw_ver & 0xffff);
  1697. netif_napi_add(ndev, &priv->napi, netsec_napi_poll);
  1698. ndev->netdev_ops = &netsec_netdev_ops;
  1699. ndev->ethtool_ops = &netsec_ethtool_ops;
  1700. ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
  1701. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  1702. ndev->hw_features = ndev->features;
  1703. priv->rx_cksum_offload_flag = true;
  1704. ret = netsec_register_mdio(priv, phy_addr);
  1705. if (ret)
  1706. goto unreg_napi;
  1707. if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
  1708. dev_warn(&pdev->dev, "Failed to set DMA mask\n");
  1709. ret = register_netdev(ndev);
  1710. if (ret) {
  1711. netif_err(priv, probe, ndev, "register_netdev() failed\n");
  1712. goto unreg_mii;
  1713. }
  1714. pm_runtime_put_sync(&pdev->dev);
  1715. return 0;
  1716. unreg_mii:
  1717. netsec_unregister_mdio(priv);
  1718. unreg_napi:
  1719. netif_napi_del(&priv->napi);
  1720. pm_disable:
  1721. pm_runtime_put_sync(&pdev->dev);
  1722. pm_runtime_disable(&pdev->dev);
  1723. free_ndev:
  1724. free_netdev(ndev);
  1725. dev_err(&pdev->dev, "init failed\n");
  1726. return ret;
  1727. }
  1728. static int netsec_remove(struct platform_device *pdev)
  1729. {
  1730. struct netsec_priv *priv = platform_get_drvdata(pdev);
  1731. unregister_netdev(priv->ndev);
  1732. netsec_unregister_mdio(priv);
  1733. netif_napi_del(&priv->napi);
  1734. pm_runtime_disable(&pdev->dev);
  1735. free_netdev(priv->ndev);
  1736. return 0;
  1737. }
  1738. #ifdef CONFIG_PM
  1739. static int netsec_runtime_suspend(struct device *dev)
  1740. {
  1741. struct netsec_priv *priv = dev_get_drvdata(dev);
  1742. netsec_write(priv, NETSEC_REG_CLK_EN, 0);
  1743. clk_disable_unprepare(priv->clk);
  1744. return 0;
  1745. }
  1746. static int netsec_runtime_resume(struct device *dev)
  1747. {
  1748. struct netsec_priv *priv = dev_get_drvdata(dev);
  1749. clk_prepare_enable(priv->clk);
  1750. netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
  1751. NETSEC_CLK_EN_REG_DOM_C |
  1752. NETSEC_CLK_EN_REG_DOM_G);
  1753. return 0;
  1754. }
  1755. #endif
  1756. static const struct dev_pm_ops netsec_pm_ops = {
  1757. SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
  1758. };
  1759. static const struct of_device_id netsec_dt_ids[] = {
  1760. { .compatible = "socionext,synquacer-netsec" },
  1761. { }
  1762. };
  1763. MODULE_DEVICE_TABLE(of, netsec_dt_ids);
  1764. #ifdef CONFIG_ACPI
  1765. static const struct acpi_device_id netsec_acpi_ids[] = {
  1766. { "SCX0001" },
  1767. { }
  1768. };
  1769. MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
  1770. #endif
  1771. static struct platform_driver netsec_driver = {
  1772. .probe = netsec_probe,
  1773. .remove = netsec_remove,
  1774. .driver = {
  1775. .name = "netsec",
  1776. .pm = &netsec_pm_ops,
  1777. .of_match_table = netsec_dt_ids,
  1778. .acpi_match_table = ACPI_PTR(netsec_acpi_ids),
  1779. },
  1780. };
  1781. module_platform_driver(netsec_driver);
  1782. MODULE_AUTHOR("Jassi Brar <[email protected]>");
  1783. MODULE_AUTHOR("Ard Biesheuvel <[email protected]>");
  1784. MODULE_DESCRIPTION("NETSEC Ethernet driver");
  1785. MODULE_LICENSE("GPL");