dwmac-qcom-ethqos.c 77 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (c) 2018-19, Linaro Limited
  3. // Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  4. #include <linux/module.h>
  5. #include <linux/of.h>
  6. #include <linux/of_device.h>
  7. #include <linux/platform_device.h>
  8. #include <linux/phy.h>
  9. #include <linux/regulator/consumer.h>
  10. #include <linux/of_gpio.h>
  11. #include <linux/io.h>
  12. #include <linux/iopoll.h>
  13. #include <linux/mii.h>
  14. #include <linux/of_mdio.h>
  15. #include <linux/slab.h>
  16. #include <linux/poll.h>
  17. #include <linux/debugfs.h>
  18. #include <linux/iommu.h>
  19. #include <linux/micrel_phy.h>
  20. #include <linux/rtnetlink.h>
  21. #include <linux/tcp.h>
  22. #include <linux/ip.h>
  23. #include <linux/ipv6.h>
  24. #include "stmmac.h"
  25. #include "stmmac_platform.h"
  26. #include "dwmac-qcom-ethqos.h"
  27. #include "stmmac_ptp.h"
  28. #include "dwmac-qcom-serdes.h"
  29. #define RGMII_IO_MACRO_DEBUG1 0x20
  30. #define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28
  31. /* RGMII_IO_MACRO_CONFIG fields */
  32. #define RGMII_CONFIG_FUNC_CLK_EN BIT(30)
  33. #define RGMII_CONFIG_POS_NEG_DATA_SEL BIT(23)
  34. #define RGMII_CONFIG_GPIO_CFG_RX_INT GENMASK(21, 20)
  35. #if IS_ENABLED(CONFIG_DWXGMAC_QCOM_VER4)
  36. #define RGMII_CONFIG_GPIO_CFG_TX_INT GENMASK(21, 19)
  37. #define RGMII_CONFIG_MAX_SPD_PRG_9 GENMASK(18, 10)
  38. #define RGMII_CONFIG_MAX_SPD_PRG_2 GENMASK(9, 6)
  39. #else
  40. #define RGMII_CONFIG_GPIO_CFG_TX_INT GENMASK(19, 17)
  41. #define RGMII_CONFIG_MAX_SPD_PRG_9 GENMASK(16, 8)
  42. #define RGMII_CONFIG_MAX_SPD_PRG_2 GENMASK(7, 6)
  43. #endif
  44. #define RGMII_CONFIG_INTF_SEL GENMASK(5, 4)
  45. #define RGMII_CONFIG_BYPASS_TX_ID_EN BIT(3)
  46. #define RGMII_CONFIG_LOOPBACK_EN BIT(2)
  47. #define RGMII_CONFIG_PROG_SWAP BIT(1)
  48. #define RGMII_CONFIG_DDR_MODE BIT(0)
  49. /*RGMII DLL CONFIG*/
  50. #define HSR_DLL_CONFIG 0x000B642C
  51. #define HSR_DLL_CONFIG_2 0xA001
  52. #define HSR_MACRO_CONFIG_2 0x01
  53. #define HSR_DLL_TEST_CTRL 0x1400000
  54. #define HSR_DDR_CONFIG 0x80040868
  55. #define HSR_SDCC_USR_CTRL 0x2C010800
  56. #define MACRO_CONFIG_2_MASK GENMASK(24, 17)
  57. #define DLL_CONFIG_2_MASK GENMASK(22, 0)
  58. #define HSR_SDCC_DLL_TEST_CTRL 0x1800000
  59. #define DDR_CONFIG_PRG_RCLK_DLY 115
  60. #define DLL_BYPASS BIT(30)
  61. /* SDCC_HC_REG_DLL_CONFIG fields */
  62. #define SDCC_DLL_CONFIG_DLL_RST BIT(30)
  63. #define SDCC_DLL_CONFIG_PDN BIT(29)
  64. #define SDCC_DLL_CONFIG_MCLK_FREQ GENMASK(26, 24)
  65. #define SDCC_DLL_CONFIG_CDR_SELEXT GENMASK(23, 20)
  66. #define SDCC_DLL_CONFIG_CDR_EXT_EN BIT(19)
  67. #define SDCC_DLL_CONFIG_CK_OUT_EN BIT(18)
  68. #define SDCC_DLL_CONFIG_CDR_EN BIT(17)
  69. #define SDCC_DLL_CONFIG_DLL_EN BIT(16)
  70. #define SDCC_DLL_MCLK_GATING_EN BIT(5)
  71. #define SDCC_DLL_CDR_FINE_PHASE GENMASK(3, 2)
  72. /* SDCC_HC_REG_DDR_CONFIG fields */
  73. #define SDCC_DDR_CONFIG_PRG_DLY_EN BIT(31)
  74. #define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY GENMASK(26, 21)
  75. #define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE GENMASK(29, 27)
  76. #define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN BIT(30)
  77. #define SDCC_DDR_CONFIG_PRG_RCLK_DLY GENMASK(8, 0)
  78. /* SDCC_HC_REG_DLL_CONFIG2 fields */
  79. #define SDCC_DLL_CONFIG2_DLL_CLOCK_DIS BIT(21)
  80. #define SDCC_DLL_CONFIG2_MCLK_FREQ_CALC GENMASK(17, 10)
  81. #define SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL GENMASK(3, 2)
  82. #define SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW BIT(1)
  83. #define SDCC_DLL_CONFIG2_DDR_CAL_EN BIT(0)
  84. /* SDC4_STATUS bits */
  85. #define SDC4_STATUS_DLL_LOCK BIT(7)
  86. /* RGMII_IO_MACRO_CONFIG2 fields */
  87. #if IS_ENABLED(CONFIG_DWXGMAC_QCOM_VER4)
  88. #define RGMII_CONFIG2_RSVD_CONFIG15 GENMASK(31, 24)
  89. #else
  90. #define RGMII_CONFIG2_RSVD_CONFIG15 GENMASK(31, 17)
  91. #endif
  92. #define RGMII_CONFIG2_MODE_EN_VIA_GMII BIT(21)
  93. #define RGMII_CONFIG2_MAX_SPD_PRG_3 GENMASK(20, 17)
  94. #define RGMII_CONFIG2_RGMII_CLK_SEL_CFG BIT(16)
  95. #define RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN BIT(13)
  96. #define RGMII_CONFIG2_CLK_DIVIDE_SEL BIT(12)
  97. #define RGMII_CONFIG2_RX_PROG_SWAP BIT(7)
  98. #define RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL BIT(6)
  99. #define RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN BIT(5)
  100. /* EMAC_WRAPPER_SGMII_PHY_CNTRL0 fields */
  101. #define SGMII_PHY_CNTRL0_2P5G_1G_CLK_SEL GENMASK(6, 5)
  102. /* EMAC_WRAPPER_SGMII_PHY_CNTRL1 fields */
  103. #define SGMII_PHY_CNTRL1_RGMII_SGMII_CLK_MUX_SEL BIT(0)
  104. #define SGMII_PHY_CNTRL1_USXGMII_GMII_MASTER_CLK_MUX_SEL BIT(4)
  105. #define SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN BIT(3)
  106. /* EMAC_WRAPPER_USXGMII_MUX_SEL fields */
  107. #define USXGMII_CLK_BLK_GMII_CLK_BLK_SEL BIT(1)
  108. #define USXGMII_CLK_BLK_CLK_EN BIT(0)
  109. /* RGMII_IO_MACRO_SCRATCH_2 fields */
  110. #define RGMII_SCRATCH2_MAX_SPD_PRG_4 GENMASK(5, 2)
  111. #define RGMII_SCRATCH2_MAX_SPD_PRG_5 GENMASK(9, 6)
  112. #define RGMII_SCRATCH2_MAX_SPD_PRG_6 GENMASK(13, 10)
  113. /*RGMIII_IO_MACRO_BYPASS fields */
  114. #define RGMII_BYPASS_EN BIT(0)
  115. #define EMAC_I0_EMAC_CORE_HW_VERSION_RGOFFADDR 0x00000070
  116. #define EMAC_HW_v2_3_2_RG 0x20030002
  117. #define MII_BUSY 0x00000001
  118. #define MII_WRITE 0x00000002
  119. /* GMAC4 defines */
  120. #define MII_GMAC4_GOC_SHIFT 2
  121. #define MII_GMAC4_WRITE BIT(MII_GMAC4_GOC_SHIFT)
  122. #define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT)
  123. #define MII_BUSY 0x00000001
  124. #define MII_WRITE 0x00000002
  125. #define DWC_ETH_QOS_PHY_INTR_STATUS 0x0013
  126. #define LINK_UP 1
  127. #define LINK_DOWN 0
  128. #define LINK_DOWN_STATE 0x800
  129. #define LINK_UP_STATE 0x400
  130. #define MICREL_PHY_ID PHY_ID_KSZ9031
  131. #define DWC_ETH_QOS_MICREL_PHY_INTCS 0x1b
  132. #define DWC_ETH_QOS_MICREL_PHY_CTL 0x1f
  133. #define DWC_ETH_QOS_MICREL_INTR_LEVEL 0x4000
  134. #define DWC_ETH_QOS_BASIC_STATUS 0x0001
  135. #define LINK_STATE_MASK 0x4
  136. #define AUTONEG_STATE_MASK 0x20
  137. #define MICREL_LINK_UP_INTR_STATUS BIT(0)
  138. #define GMAC_CONFIG_PS BIT(15)
  139. #define GMAC_CONFIG_FES BIT(14)
  140. #define GMAC_AN_CTRL_RAN BIT(9)
  141. #define GMAC_AN_CTRL_ANE BIT(12)
  142. #define DWMAC4_PCS_BASE 0x000000e0
  143. #define RGMII_CONFIG_10M_CLK_DVD GENMASK(18, 10)
  144. void *ipc_emac_log_ctxt;
  145. struct emac_emb_smmu_cb_ctx emac_emb_smmu_ctx = {0};
  146. struct plat_stmmacenet_data *plat_dat;
  147. struct qcom_ethqos *pethqos;
  148. #ifdef MODULE
  149. static char *eipv4;
  150. module_param(eipv4, charp, 0660);
  151. MODULE_PARM_DESC(eipv4, "ipv4 value from ethernet partition");
  152. static char *eipv6;
  153. module_param(eipv6, charp, 0660);
  154. MODULE_PARM_DESC(eipv6, "ipv6 value from ethernet partition");
  155. static char *ermac;
  156. module_param(ermac, charp, 0660);
  157. MODULE_PARM_DESC(ermac, "mac address from ethernet partition");
  158. #endif
  159. inline void *qcom_ethqos_get_priv(struct qcom_ethqos *ethqos)
  160. {
  161. struct platform_device *pdev = ethqos->pdev;
  162. struct net_device *dev = platform_get_drvdata(pdev);
  163. struct stmmac_priv *priv = netdev_priv(dev);
  164. return priv;
  165. }
  166. static unsigned char dev_addr[ETH_ALEN] = {
  167. 0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7};
  168. static struct ip_params pparams = {"", "", "", ""};
  169. static int set_early_ethernet_ipv4(char *ipv4_addr_in)
  170. {
  171. int ret = 1;
  172. pparams.is_valid_ipv4_addr = false;
  173. if (!ipv4_addr_in)
  174. return ret;
  175. strscpy(pparams.ipv4_addr_str,
  176. ipv4_addr_in, sizeof(pparams.ipv4_addr_str));
  177. ETHQOSDBG("Early ethernet IPv4 addr: %s\n", pparams.ipv4_addr_str);
  178. ret = in4_pton(pparams.ipv4_addr_str, -1,
  179. (u8 *)&pparams.ipv4_addr.s_addr, -1, NULL);
  180. if (ret != 1 || pparams.ipv4_addr.s_addr == 0) {
  181. ETHQOSERR("Invalid ipv4 address programmed: %s\n",
  182. ipv4_addr_in);
  183. return ret;
  184. }
  185. pparams.is_valid_ipv4_addr = true;
  186. return ret;
  187. }
  188. static int set_early_ethernet_ipv6(char *ipv6_addr_in)
  189. {
  190. int ret = 1;
  191. pparams.is_valid_ipv6_addr = false;
  192. if (!ipv6_addr_in)
  193. return ret;
  194. strscpy(pparams.ipv6_addr_str,
  195. ipv6_addr_in, sizeof(pparams.ipv6_addr_str));
  196. ETHQOSDBG("Early ethernet IPv6 addr: %s\n", pparams.ipv6_addr_str);
  197. ret = in6_pton(pparams.ipv6_addr_str, -1,
  198. (u8 *)&pparams.ipv6_addr.ifr6_addr.s6_addr32, -1, NULL);
  199. if (ret != 1 || !pparams.ipv6_addr.ifr6_addr.s6_addr32) {
  200. ETHQOSERR("Invalid ipv6 address programmed: %s\n",
  201. ipv6_addr_in);
  202. return ret;
  203. }
  204. pparams.is_valid_ipv6_addr = true;
  205. return ret;
  206. }
  207. static int set_early_ethernet_mac(char *mac_addr)
  208. {
  209. bool valid_mac = false;
  210. pparams.is_valid_mac_addr = false;
  211. if (!mac_addr)
  212. return 1;
  213. valid_mac = mac_pton(mac_addr, pparams.mac_addr);
  214. if (!valid_mac)
  215. goto fail;
  216. valid_mac = is_valid_ether_addr(pparams.mac_addr);
  217. if (!valid_mac)
  218. goto fail;
  219. pparams.is_valid_mac_addr = true;
  220. return 0;
  221. fail:
  222. ETHQOSERR("Invalid Mac address programmed: %s\n", mac_addr);
  223. return 1;
  224. }
  225. #ifndef MODULE
  226. static int __init set_early_ethernet_ipv4_static(char *ipv4_addr_in)
  227. {
  228. int ret = 1;
  229. ret = set_early_ethernet_ipv4(ipv4_addr_in);
  230. return ret;
  231. }
  232. __setup("eipv4=", set_early_ethernet_ipv4_static);
  233. static int __init set_early_ethernet_ipv6_static(char *ipv6_addr_in)
  234. {
  235. int ret = 1;
  236. ret = set_early_ethernet_ipv6(ipv6_addr_in);
  237. return ret;
  238. }
  239. __setup("eipv6=", set_early_ethernet_ipv6_static);
  240. static int __init set_early_ethernet_mac_static(char *mac_addr)
  241. {
  242. int ret = 1;
  243. ret = set_early_ethernet_mac(mac_addr);
  244. return ret;
  245. }
  246. __setup("ermac=", set_early_ethernet_mac_static);
  247. #endif
  248. static int qcom_ethqos_add_ipaddr(struct ip_params *ip_info,
  249. struct net_device *dev)
  250. {
  251. int res = 0;
  252. struct ifreq ir;
  253. struct sockaddr_in *sin = (void *)&ir.ifr_ifru.ifru_addr;
  254. struct net *net = dev_net(dev);
  255. if (!net || !net->genl_sock || !net->genl_sock->sk_socket) {
  256. ETHQOSINFO("Sock is null, unable to assign ipv4 address\n");
  257. return res;
  258. }
  259. /*For valid Ipv4 address*/
  260. memset(&ir, 0, sizeof(ir));
  261. memcpy(&sin->sin_addr.s_addr, &ip_info->ipv4_addr,
  262. sizeof(sin->sin_addr.s_addr));
  263. strscpy(ir.ifr_ifrn.ifrn_name,
  264. dev->name, sizeof(ir.ifr_ifrn.ifrn_name));
  265. sin->sin_family = AF_INET;
  266. sin->sin_port = 0;
  267. res = inet_ioctl(net->genl_sock->sk_socket,
  268. SIOCSIFADDR, (unsigned long)(void *)&ir);
  269. if (res) {
  270. ETHQOSERR("can't setup IPv4 address!: %d\r\n", res);
  271. } else {
  272. ETHQOSINFO("Assigned IPv4 address: %s\r\n",
  273. ip_info->ipv4_addr_str);
  274. ETHQOSINFO("M - Etherent Assigned IPv4 address\n");
  275. }
  276. return res;
  277. }
  278. static int qcom_ethqos_add_ipv6addr(struct ip_params *ip_info,
  279. struct net_device *dev)
  280. {
  281. int ret = -EFAULT;
  282. struct in6_ifreq ir6;
  283. char *prefix;
  284. struct net *net = dev_net(dev);
  285. /*For valid IPv6 address*/
  286. if (!net || !net->genl_sock || !net->genl_sock->sk_socket)
  287. ETHQOSERR("Sock is null, unable to assign ipv6 address\n");
  288. if (!net->ipv6.devconf_dflt) {
  289. ETHQOSERR("ipv6.devconf_dflt is null, schedule wq\n");
  290. schedule_delayed_work(&pethqos->ipv6_addr_assign_wq,
  291. msecs_to_jiffies(1000));
  292. return ret;
  293. }
  294. memset(&ir6, 0, sizeof(ir6));
  295. memcpy(&ir6, &ip_info->ipv6_addr, sizeof(struct in6_ifreq));
  296. ir6.ifr6_ifindex = dev->ifindex;
  297. prefix = strnchr(ip_info->ipv6_addr_str,
  298. strlen(ip_info->ipv6_addr_str), '/');
  299. if (!prefix) {
  300. ir6.ifr6_prefixlen = 0;
  301. } else {
  302. ret = kstrtoul(prefix + 1, 0, (unsigned long *)&ir6.ifr6_prefixlen);
  303. if (ir6.ifr6_prefixlen > 128)
  304. ir6.ifr6_prefixlen = 0;
  305. }
  306. ret = inet6_ioctl(net->genl_sock->sk_socket,
  307. SIOCSIFADDR, (unsigned long)(void *)&ir6);
  308. if (ret) {
  309. ETHQOSDBG("Can't setup IPv6 address!\r\n");
  310. } else {
  311. ETHQOSDBG("Assigned IPv6 address: %s\r\n",
  312. ip_info->ipv6_addr_str);
  313. ETHQOSINFO("M - Ethernet Assigned IPv6 address\n");
  314. }
  315. return ret;
  316. }
  317. static inline unsigned int dwmac_qcom_get_eth_type(unsigned char *buf)
  318. {
  319. return
  320. ((((u16)buf[QTAG_ETH_TYPE_OFFSET] << 8) |
  321. buf[QTAG_ETH_TYPE_OFFSET + 1]) == ETH_P_8021Q) ?
  322. (((u16)buf[QTAG_VLAN_ETH_TYPE_OFFSET] << 8) |
  323. buf[QTAG_VLAN_ETH_TYPE_OFFSET + 1]) :
  324. (((u16)buf[QTAG_ETH_TYPE_OFFSET] << 8) |
  325. buf[QTAG_ETH_TYPE_OFFSET + 1]);
  326. }
  327. static inline unsigned int dwmac_qcom_get_vlan_ucp(unsigned char *buf)
  328. {
  329. return
  330. (((u16)buf[QTAG_UCP_FIELD_OFFSET] << 8)
  331. | buf[QTAG_UCP_FIELD_OFFSET + 1]);
  332. }
  333. u16 dwmac_qcom_select_queue(struct net_device *dev,
  334. struct sk_buff *skb,
  335. struct net_device *sb_dev)
  336. {
  337. u16 txqueue_select = ALL_OTHER_TRAFFIC_TX_CHANNEL;
  338. unsigned int eth_type, priority;
  339. /* Retrieve ETH type */
  340. eth_type = dwmac_qcom_get_eth_type(skb->data);
  341. if (eth_type == ETH_P_TSN) {
  342. /* Read VLAN priority field from skb->data */
  343. priority = dwmac_qcom_get_vlan_ucp(skb->data);
  344. priority >>= VLAN_TAG_UCP_SHIFT;
  345. if (priority == CLASS_A_TRAFFIC_UCP)
  346. txqueue_select = CLASS_A_TRAFFIC_TX_CHANNEL;
  347. else if (priority == CLASS_B_TRAFFIC_UCP)
  348. txqueue_select = CLASS_B_TRAFFIC_TX_CHANNEL;
  349. else
  350. txqueue_select = ALL_OTHER_TX_TRAFFIC_IPA_DISABLED;
  351. } else {
  352. /* VLAN tagged IP packet or any other non vlan packets (PTP)*/
  353. txqueue_select = ALL_OTHER_TX_TRAFFIC_IPA_DISABLED;
  354. }
  355. ETHQOSDBG("tx_queue %d\n", txqueue_select);
  356. return txqueue_select;
  357. }
  358. void dwmac_qcom_program_avb_algorithm(struct stmmac_priv *priv,
  359. struct ifr_data_struct *req)
  360. {
  361. struct dwmac_qcom_avb_algorithm l_avb_struct, *u_avb_struct =
  362. (struct dwmac_qcom_avb_algorithm *)req->ptr;
  363. struct dwmac_qcom_avb_algorithm_params *avb_params;
  364. ETHQOSDBG("\n");
  365. if (copy_from_user(&l_avb_struct, (void __user *)u_avb_struct,
  366. sizeof(struct dwmac_qcom_avb_algorithm)))
  367. ETHQOSERR("Failed to fetch AVB Struct\n");
  368. if (priv->speed == SPEED_1000)
  369. avb_params = &l_avb_struct.speed1000params;
  370. else
  371. avb_params = &l_avb_struct.speed100params;
  372. /* Application uses 1 for CLASS A traffic and
  373. * 2 for CLASS B traffic
  374. * Configure right channel accordingly
  375. */
  376. if (l_avb_struct.qinx == 1)
  377. l_avb_struct.qinx = CLASS_A_TRAFFIC_TX_CHANNEL;
  378. else if (l_avb_struct.qinx == 2)
  379. l_avb_struct.qinx = CLASS_B_TRAFFIC_TX_CHANNEL;
  380. priv->plat->tx_queues_cfg[l_avb_struct.qinx].mode_to_use =
  381. MTL_QUEUE_AVB;
  382. priv->plat->tx_queues_cfg[l_avb_struct.qinx].send_slope =
  383. avb_params->send_slope,
  384. priv->plat->tx_queues_cfg[l_avb_struct.qinx].idle_slope =
  385. avb_params->idle_slope,
  386. priv->plat->tx_queues_cfg[l_avb_struct.qinx].high_credit =
  387. avb_params->hi_credit,
  388. priv->plat->tx_queues_cfg[l_avb_struct.qinx].low_credit =
  389. avb_params->low_credit,
  390. priv->hw->mac->config_cbs(priv->hw,
  391. priv->plat->tx_queues_cfg[l_avb_struct.qinx].send_slope,
  392. priv->plat->tx_queues_cfg[l_avb_struct.qinx].idle_slope,
  393. priv->plat->tx_queues_cfg[l_avb_struct.qinx].high_credit,
  394. priv->plat->tx_queues_cfg[l_avb_struct.qinx].low_credit,
  395. l_avb_struct.qinx);
  396. ETHQOSDBG("\n");
  397. }
  398. unsigned int dwmac_qcom_get_plat_tx_coal_frames(struct sk_buff *skb)
  399. {
  400. bool is_udp;
  401. unsigned int eth_type;
  402. eth_type = dwmac_qcom_get_eth_type(skb->data);
  403. #ifdef CONFIG_PTPSUPPORT_OBJ
  404. if (eth_type == ETH_P_1588)
  405. return PTP_INT_MOD;
  406. #endif
  407. if (eth_type == ETH_P_TSN)
  408. return AVB_INT_MOD;
  409. if (eth_type == ETH_P_IP || eth_type == ETH_P_IPV6) {
  410. #ifdef CONFIG_PTPSUPPORT_OBJ
  411. is_udp = (((eth_type == ETH_P_IP) &&
  412. (ip_hdr(skb)->protocol ==
  413. IPPROTO_UDP)) ||
  414. ((eth_type == ETH_P_IPV6) &&
  415. (ipv6_hdr(skb)->nexthdr ==
  416. IPPROTO_UDP)));
  417. if (is_udp && ((udp_hdr(skb)->dest ==
  418. htons(PTP_UDP_EV_PORT)) ||
  419. (udp_hdr(skb)->dest ==
  420. htons(PTP_UDP_GEN_PORT))))
  421. return PTP_INT_MOD;
  422. #endif
  423. return IP_PKT_INT_MOD;
  424. }
  425. return DEFAULT_INT_MOD;
  426. }
  427. int ethqos_handle_prv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  428. {
  429. struct stmmac_priv *pdata = netdev_priv(dev);
  430. struct ifr_data_struct req;
  431. int ret = 0;
  432. if (copy_from_user(&req, ifr->ifr_ifru.ifru_data,
  433. sizeof(struct ifr_data_struct)))
  434. return -EFAULT;
  435. switch (req.cmd) {
  436. case ETHQOS_CONFIG_PPSOUT_CMD:
  437. ret = ppsout_config(pdata, &req);
  438. break;
  439. case ETHQOS_AVB_ALGORITHM:
  440. dwmac_qcom_program_avb_algorithm(pdata, &req);
  441. break;
  442. default:
  443. break;
  444. }
  445. return ret;
  446. }
  447. static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
  448. {
  449. return readl(ethqos->rgmii_base + offset);
  450. }
  451. static void rgmii_writel(struct qcom_ethqos *ethqos,
  452. int value, unsigned int offset)
  453. {
  454. writel(value, ethqos->rgmii_base + offset);
  455. }
  456. static void rgmii_updatel(struct qcom_ethqos *ethqos,
  457. int mask, int val, unsigned int offset)
  458. {
  459. unsigned int temp;
  460. temp = rgmii_readl(ethqos, offset);
  461. temp = (temp & ~(mask)) | val;
  462. rgmii_writel(ethqos, temp, offset);
  463. }
  464. static void rgmii_dump(void *priv)
  465. {
  466. struct qcom_ethqos *ethqos = priv;
  467. dev_dbg(&ethqos->pdev->dev, "Rgmii register dump\n");
  468. dev_dbg(&ethqos->pdev->dev, "RGMII_IO_MACRO_CONFIG: %x\n",
  469. rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG));
  470. dev_dbg(&ethqos->pdev->dev, "SDCC_HC_REG_DLL_CONFIG: %x\n",
  471. rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG));
  472. dev_dbg(&ethqos->pdev->dev, "SDCC_HC_REG_DDR_CONFIG: %x\n",
  473. rgmii_readl(ethqos, SDCC_HC_REG_DDR_CONFIG));
  474. dev_dbg(&ethqos->pdev->dev, "SDCC_HC_REG_DLL_CONFIG2: %x\n",
  475. rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG2));
  476. dev_dbg(&ethqos->pdev->dev, "SDC4_STATUS: %x\n",
  477. rgmii_readl(ethqos, SDC4_STATUS));
  478. dev_dbg(&ethqos->pdev->dev, "SDCC_USR_CTL: %x\n",
  479. rgmii_readl(ethqos, SDCC_USR_CTL));
  480. dev_dbg(&ethqos->pdev->dev, "RGMII_IO_MACRO_CONFIG2: %x\n",
  481. rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG2));
  482. dev_dbg(&ethqos->pdev->dev, "RGMII_IO_MACRO_DEBUG1: %x\n",
  483. rgmii_readl(ethqos, RGMII_IO_MACRO_DEBUG1));
  484. dev_dbg(&ethqos->pdev->dev, "EMAC_SYSTEM_LOW_POWER_DEBUG: %x\n",
  485. rgmii_readl(ethqos, EMAC_SYSTEM_LOW_POWER_DEBUG));
  486. }
  487. /* Clock rates */
  488. #define RGMII_1000_NOM_CLK_FREQ (250 * 1000 * 1000UL)
  489. #define RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ (50 * 1000 * 1000UL)
  490. #define RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ (5 * 1000 * 1000UL)
  491. static void
  492. ethqos_update_rgmii_clk(struct qcom_ethqos *ethqos, unsigned int speed)
  493. {
  494. switch (speed) {
  495. case SPEED_1000:
  496. ethqos->rgmii_clk_rate = RGMII_1000_NOM_CLK_FREQ;
  497. break;
  498. case SPEED_100:
  499. ethqos->rgmii_clk_rate = RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ;
  500. break;
  501. case SPEED_10:
  502. ethqos->rgmii_clk_rate = RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ;
  503. break;
  504. }
  505. clk_set_rate(ethqos->rgmii_clk, ethqos->rgmii_clk_rate);
  506. }
  507. static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos)
  508. {
  509. rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN,
  510. RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
  511. }
  512. static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
  513. {
  514. unsigned int val;
  515. int retry = 1000;
  516. /* Set CDR_EN */
  517. if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG ||
  518. ethqos->emac_ver == EMAC_HW_v2_1_2)
  519. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EN,
  520. 0, SDCC_HC_REG_DLL_CONFIG);
  521. else
  522. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EN,
  523. SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG);
  524. /* Set CDR_EXT_EN */
  525. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN,
  526. SDCC_DLL_CONFIG_CDR_EXT_EN, SDCC_HC_REG_DLL_CONFIG);
  527. /* Clear CK_OUT_EN */
  528. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
  529. 0, SDCC_HC_REG_DLL_CONFIG);
  530. /* Set DLL_EN */
  531. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
  532. SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
  533. if (ethqos->emac_ver != EMAC_HW_v2_3_2_RG &&
  534. ethqos->emac_ver != EMAC_HW_v2_1_2) {
  535. rgmii_updatel(ethqos, SDCC_DLL_MCLK_GATING_EN,
  536. 0, SDCC_HC_REG_DLL_CONFIG);
  537. rgmii_updatel(ethqos, SDCC_DLL_CDR_FINE_PHASE,
  538. 0, SDCC_HC_REG_DLL_CONFIG);
  539. }
  540. /* Wait for CK_OUT_EN clear */
  541. do {
  542. val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
  543. val &= SDCC_DLL_CONFIG_CK_OUT_EN;
  544. if (!val)
  545. break;
  546. mdelay(1);
  547. retry--;
  548. } while (retry > 0);
  549. if (!retry)
  550. dev_err(&ethqos->pdev->dev, "Clear CK_OUT_EN timedout\n");
  551. /* Set CK_OUT_EN */
  552. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
  553. SDCC_DLL_CONFIG_CK_OUT_EN, SDCC_HC_REG_DLL_CONFIG);
  554. /* Wait for CK_OUT_EN set */
  555. retry = 1000;
  556. do {
  557. val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
  558. val &= SDCC_DLL_CONFIG_CK_OUT_EN;
  559. if (val)
  560. break;
  561. mdelay(1);
  562. retry--;
  563. } while (retry > 0);
  564. if (!retry)
  565. dev_err(&ethqos->pdev->dev, "Set CK_OUT_EN timedout\n");
  566. /* Set DDR_CAL_EN */
  567. rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN,
  568. SDCC_DLL_CONFIG2_DDR_CAL_EN, SDCC_HC_REG_DLL_CONFIG2);
  569. if (ethqos->emac_ver != EMAC_HW_v2_3_2_RG &&
  570. ethqos->emac_ver != EMAC_HW_v2_1_2) {
  571. rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
  572. 0, SDCC_HC_REG_DLL_CONFIG2);
  573. rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_MCLK_FREQ_CALC,
  574. 0x1A << 10, SDCC_HC_REG_DLL_CONFIG2);
  575. rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL,
  576. BIT(2), SDCC_HC_REG_DLL_CONFIG2);
  577. rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
  578. SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
  579. SDCC_HC_REG_DLL_CONFIG2);
  580. }
  581. return 0;
  582. }
  583. void emac_rgmii_io_macro_config_1G(struct qcom_ethqos *ethqos)
  584. {
  585. rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
  586. RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
  587. rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
  588. 0, RGMII_IO_MACRO_CONFIG);
  589. rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
  590. RGMII_CONFIG_POS_NEG_DATA_SEL,
  591. RGMII_IO_MACRO_CONFIG);
  592. rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
  593. RGMII_CONFIG_PROG_SWAP, RGMII_IO_MACRO_CONFIG);
  594. rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
  595. 0, RGMII_IO_MACRO_CONFIG2);
  596. rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
  597. RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
  598. RGMII_IO_MACRO_CONFIG2);
  599. rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
  600. 0, RGMII_IO_MACRO_CONFIG2);
  601. rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
  602. RGMII_CONFIG2_RX_PROG_SWAP,
  603. RGMII_IO_MACRO_CONFIG2);
  604. /* Set PRG_RCLK_DLY to 115 */
  605. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
  606. 115, SDCC_HC_REG_DDR_CONFIG);
  607. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
  608. SDCC_DDR_CONFIG_PRG_DLY_EN,
  609. SDCC_HC_REG_DDR_CONFIG);
  610. rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
  611. 0, RGMII_IO_MACRO_CONFIG);
  612. }
  613. void emac_rgmii_io_macro_config_100M(struct qcom_ethqos *ethqos)
  614. {
  615. rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
  616. RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
  617. rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
  618. RGMII_CONFIG_BYPASS_TX_ID_EN,
  619. RGMII_IO_MACRO_CONFIG);
  620. rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
  621. 0, RGMII_IO_MACRO_CONFIG);
  622. rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
  623. 0, RGMII_IO_MACRO_CONFIG);
  624. rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
  625. 0, RGMII_IO_MACRO_CONFIG2);
  626. rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
  627. RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
  628. RGMII_IO_MACRO_CONFIG2);
  629. rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_2,
  630. BIT(6), RGMII_IO_MACRO_CONFIG);
  631. rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
  632. 0, RGMII_IO_MACRO_CONFIG2);
  633. rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
  634. RGMII_CONFIG2_RX_PROG_SWAP,
  635. RGMII_IO_MACRO_CONFIG2);
  636. /* Write 0x5 to PRG_RCLK_DLY_CODE */
  637. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
  638. (BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
  639. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
  640. SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
  641. SDCC_HC_REG_DDR_CONFIG);
  642. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
  643. SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
  644. SDCC_HC_REG_DDR_CONFIG);
  645. rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
  646. 0, RGMII_IO_MACRO_CONFIG);
  647. }
  648. void emac_rgmii_io_macro_config_10M(struct qcom_ethqos *ethqos)
  649. {
  650. rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
  651. RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
  652. rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
  653. RGMII_CONFIG_BYPASS_TX_ID_EN,
  654. RGMII_IO_MACRO_CONFIG);
  655. rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
  656. 0, RGMII_IO_MACRO_CONFIG);
  657. rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
  658. 0, RGMII_IO_MACRO_CONFIG);
  659. rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
  660. 0, RGMII_IO_MACRO_CONFIG2);
  661. rgmii_updatel(ethqos,
  662. RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
  663. RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
  664. RGMII_IO_MACRO_CONFIG2);
  665. rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_9,
  666. BIT(12) | GENMASK(9, 8),
  667. RGMII_IO_MACRO_CONFIG);
  668. rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
  669. 0, RGMII_IO_MACRO_CONFIG2);
  670. rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
  671. RGMII_CONFIG2_RX_PROG_SWAP,
  672. RGMII_IO_MACRO_CONFIG2);
  673. /* Write 0x5 to PRG_RCLK_DLY_CODE */
  674. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
  675. (BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
  676. }
  677. static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
  678. {
  679. /* Disable loopback mode */
  680. rgmii_updatel(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN,
  681. 0, RGMII_IO_MACRO_CONFIG2);
  682. /* Select RGMII, write 0 to interface select */
  683. rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL,
  684. 0, RGMII_IO_MACRO_CONFIG);
  685. switch (ethqos->speed) {
  686. case SPEED_1000:
  687. rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
  688. RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
  689. rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
  690. 0, RGMII_IO_MACRO_CONFIG);
  691. rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
  692. RGMII_CONFIG_POS_NEG_DATA_SEL,
  693. RGMII_IO_MACRO_CONFIG);
  694. rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
  695. RGMII_CONFIG_PROG_SWAP, RGMII_IO_MACRO_CONFIG);
  696. if (ethqos->emac_ver != EMAC_HW_v2_1_2)
  697. rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
  698. 0, RGMII_IO_MACRO_CONFIG2);
  699. rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
  700. RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
  701. RGMII_IO_MACRO_CONFIG2);
  702. rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
  703. 0, RGMII_IO_MACRO_CONFIG2);
  704. rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
  705. RGMII_CONFIG2_RX_PROG_SWAP,
  706. RGMII_IO_MACRO_CONFIG2);
  707. /* Set PRG_RCLK_DLY to 57 for 1.8 ns delay */
  708. if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG)
  709. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
  710. 69, SDCC_HC_REG_DDR_CONFIG);
  711. else if (ethqos->emac_ver == EMAC_HW_v2_1_1)
  712. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
  713. 130, SDCC_HC_REG_DDR_CONFIG);
  714. else if (ethqos->emac_ver == EMAC_HW_v2_1_2)
  715. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
  716. 52, SDCC_HC_REG_DDR_CONFIG);
  717. else if (ethqos->emac_ver == EMAC_HW_v2_3_1)
  718. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
  719. 104, SDCC_HC_REG_DDR_CONFIG);
  720. else
  721. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
  722. 57, SDCC_HC_REG_DDR_CONFIG);
  723. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
  724. SDCC_DDR_CONFIG_PRG_DLY_EN,
  725. SDCC_HC_REG_DDR_CONFIG);
  726. if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG ||
  727. ethqos->emac_ver == EMAC_HW_v2_1_2)
  728. rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
  729. 0, RGMII_IO_MACRO_CONFIG);
  730. else
  731. rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
  732. RGMII_CONFIG_LOOPBACK_EN,
  733. RGMII_IO_MACRO_CONFIG);
  734. break;
  735. case SPEED_100:
  736. rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
  737. RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
  738. rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
  739. RGMII_CONFIG_BYPASS_TX_ID_EN,
  740. RGMII_IO_MACRO_CONFIG);
  741. rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
  742. 0, RGMII_IO_MACRO_CONFIG);
  743. rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
  744. 0, RGMII_IO_MACRO_CONFIG);
  745. if (ethqos->emac_ver != EMAC_HW_v2_1_2)
  746. rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
  747. 0, RGMII_IO_MACRO_CONFIG2);
  748. rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
  749. RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
  750. RGMII_IO_MACRO_CONFIG2);
  751. rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_2,
  752. BIT(6), RGMII_IO_MACRO_CONFIG);
  753. rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
  754. 0, RGMII_IO_MACRO_CONFIG2);
  755. if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG ||
  756. ethqos->emac_ver == EMAC_HW_v2_1_2 ||
  757. ethqos->emac_ver == EMAC_HW_v2_1_1 ||
  758. ethqos->emac_ver == EMAC_HW_v2_3_1)
  759. rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
  760. RGMII_CONFIG2_RX_PROG_SWAP,
  761. RGMII_IO_MACRO_CONFIG2);
  762. else
  763. rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
  764. 0, RGMII_IO_MACRO_CONFIG2);
  765. /* Write 0x5 to PRG_RCLK_DLY_CODE */
  766. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
  767. (BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
  768. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
  769. SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
  770. SDCC_HC_REG_DDR_CONFIG);
  771. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
  772. SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
  773. SDCC_HC_REG_DDR_CONFIG);
  774. if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG ||
  775. ethqos->emac_ver == EMAC_HW_v2_1_2)
  776. rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
  777. 0, RGMII_IO_MACRO_CONFIG);
  778. else
  779. rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
  780. RGMII_CONFIG_LOOPBACK_EN,
  781. RGMII_IO_MACRO_CONFIG);
  782. break;
  783. case SPEED_10:
  784. rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
  785. RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
  786. rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
  787. RGMII_CONFIG_BYPASS_TX_ID_EN,
  788. RGMII_IO_MACRO_CONFIG);
  789. rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
  790. 0, RGMII_IO_MACRO_CONFIG);
  791. rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
  792. 0, RGMII_IO_MACRO_CONFIG);
  793. if (ethqos->emac_ver != EMAC_HW_v2_1_2)
  794. rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
  795. 0, RGMII_IO_MACRO_CONFIG2);
  796. if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG ||
  797. ethqos->emac_ver == EMAC_HW_v2_1_2 ||
  798. ethqos->emac_ver == EMAC_HW_v2_1_1)
  799. rgmii_updatel(ethqos,
  800. RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
  801. RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
  802. RGMII_IO_MACRO_CONFIG2);
  803. else
  804. rgmii_updatel(ethqos,
  805. RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
  806. 0, RGMII_IO_MACRO_CONFIG2);
  807. rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_9,
  808. BIT(12) | GENMASK(9, 8),
  809. RGMII_IO_MACRO_CONFIG);
  810. rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
  811. 0, RGMII_IO_MACRO_CONFIG2);
  812. if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG ||
  813. ethqos->emac_ver == EMAC_HW_v2_1_2 ||
  814. ethqos->emac_ver == EMAC_HW_v2_1_1)
  815. rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
  816. RGMII_CONFIG2_RX_PROG_SWAP,
  817. RGMII_IO_MACRO_CONFIG2);
  818. else
  819. rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
  820. 0, RGMII_IO_MACRO_CONFIG2);
  821. /* Write 0x5 to PRG_RCLK_DLY_CODE */
  822. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
  823. (BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
  824. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
  825. SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
  826. SDCC_HC_REG_DDR_CONFIG);
  827. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
  828. SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
  829. SDCC_HC_REG_DDR_CONFIG);
  830. if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG ||
  831. ethqos->emac_ver == EMAC_HW_v2_1_2)
  832. rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
  833. 0, RGMII_IO_MACRO_CONFIG);
  834. else
  835. rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
  836. RGMII_CONFIG_LOOPBACK_EN,
  837. RGMII_IO_MACRO_CONFIG);
  838. break;
  839. default:
  840. dev_err(&ethqos->pdev->dev,
  841. "Invalid speed %d\n", ethqos->speed);
  842. return -EINVAL;
  843. }
  844. return 0;
  845. }
  846. static int ethqos_rgmii_macro_init_v3(struct qcom_ethqos *ethqos)
  847. {
  848. /* Disable loopback mode */
  849. rgmii_updatel(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN,
  850. 0, RGMII_IO_MACRO_CONFIG2);
  851. /* Select RGMII, write 0 to interface select */
  852. rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL,
  853. 0, RGMII_IO_MACRO_CONFIG);
  854. switch (ethqos->speed) {
  855. case SPEED_1000:
  856. emac_rgmii_io_macro_config_1G(ethqos);
  857. break;
  858. case SPEED_100:
  859. emac_rgmii_io_macro_config_100M(ethqos);
  860. break;
  861. case SPEED_10:
  862. emac_rgmii_io_macro_config_10M(ethqos);
  863. break;
  864. default:
  865. dev_err(&ethqos->pdev->dev,
  866. "Invalid speed %d\n", ethqos->speed);
  867. return -EINVAL;
  868. }
  869. return 0;
  870. }
  871. int ethqos_configure_sgmii_v3_1(struct qcom_ethqos *ethqos)
  872. {
  873. u32 value = 0;
  874. struct stmmac_priv *priv = qcom_ethqos_get_priv(ethqos);
  875. value = readl(priv->ioaddr + MAC_CTRL_REG);
  876. switch (ethqos->speed) {
  877. case SPEED_2500:
  878. value &= ~GMAC_CONFIG_PS;
  879. writel(value, priv->ioaddr + MAC_CTRL_REG);
  880. rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
  881. RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_IO_MACRO_CONFIG2);
  882. value = readl(priv->ioaddr + DWMAC4_PCS_BASE);
  883. value &= ~GMAC_AN_CTRL_ANE;
  884. writel(value, priv->ioaddr + DWMAC4_PCS_BASE);
  885. break;
  886. case SPEED_1000:
  887. value &= ~GMAC_CONFIG_PS;
  888. writel(value, priv->ioaddr + MAC_CTRL_REG);
  889. rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
  890. RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_IO_MACRO_CONFIG2);
  891. value = readl(priv->ioaddr + DWMAC4_PCS_BASE);
  892. value |= GMAC_AN_CTRL_RAN | GMAC_AN_CTRL_ANE;
  893. writel(value, priv->ioaddr + DWMAC4_PCS_BASE);
  894. break;
  895. case SPEED_100:
  896. value |= GMAC_CONFIG_PS | GMAC_CONFIG_FES;
  897. writel(value, priv->ioaddr + MAC_CTRL_REG);
  898. value = readl(priv->ioaddr + DWMAC4_PCS_BASE);
  899. value |= GMAC_AN_CTRL_RAN | GMAC_AN_CTRL_ANE;
  900. writel(value, priv->ioaddr + DWMAC4_PCS_BASE);
  901. break;
  902. case SPEED_10:
  903. value |= GMAC_CONFIG_PS;
  904. value &= ~GMAC_CONFIG_FES;
  905. writel(value, priv->ioaddr + MAC_CTRL_REG);
  906. rgmii_updatel(ethqos, RGMII_CONFIG_10M_CLK_DVD, BIT(10) |
  907. GENMASK(15, 14), RGMII_IO_MACRO_CONFIG);
  908. value = readl(priv->ioaddr + DWMAC4_PCS_BASE);
  909. value |= GMAC_AN_CTRL_RAN | GMAC_AN_CTRL_ANE;
  910. writel(value, priv->ioaddr + DWMAC4_PCS_BASE);
  911. break;
  912. default:
  913. dev_err(&ethqos->pdev->dev,
  914. "Invalid speed %d\n", ethqos->speed);
  915. return -EINVAL;
  916. }
  917. return 0;
  918. }
  919. static int ethqos_configure_mac_v3_1(struct qcom_ethqos *ethqos)
  920. {
  921. struct stmmac_priv *priv = qcom_ethqos_get_priv(ethqos);
  922. int ret = 0;
  923. switch (priv->plat->interface) {
  924. case PHY_INTERFACE_MODE_SGMII:
  925. ret = ethqos_configure_sgmii_v3_1(ethqos);
  926. qcom_ethqos_serdes_update(ethqos, ethqos->speed, priv->plat->interface);
  927. break;
  928. }
  929. return ret;
  930. }
  931. static int ethqos_configure(struct qcom_ethqos *ethqos)
  932. {
  933. volatile unsigned int dll_lock;
  934. unsigned int i, retry = 1000;
  935. if (ethqos->emac_ver == EMAC_HW_v3_1_0)
  936. return ethqos_configure_mac_v3_1(ethqos);
  937. /* Reset to POR values and enable clk */
  938. for (i = 0; i < ethqos->num_por; i++)
  939. rgmii_writel(ethqos, ethqos->por[i].value,
  940. ethqos->por[i].offset);
  941. ethqos_set_func_clk_en(ethqos);
  942. /* Initialize the DLL first */
  943. /* Set DLL_RST */
  944. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST,
  945. SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG);
  946. /* Set PDN */
  947. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN,
  948. SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
  949. /* Clear DLL_RST */
  950. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST, 0,
  951. SDCC_HC_REG_DLL_CONFIG);
  952. /* Clear PDN */
  953. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, 0,
  954. SDCC_HC_REG_DLL_CONFIG);
  955. if (ethqos->speed != SPEED_100 && ethqos->speed != SPEED_10) {
  956. /* Set DLL_EN */
  957. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
  958. SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
  959. /* Set CK_OUT_EN */
  960. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
  961. SDCC_DLL_CONFIG_CK_OUT_EN,
  962. SDCC_HC_REG_DLL_CONFIG);
  963. /* Set USR_CTL bit 26 with mask of 3 bits */
  964. rgmii_updatel(ethqos, GENMASK(26, 24), BIT(26), SDCC_USR_CTL);
  965. /* wait for DLL LOCK */
  966. do {
  967. mdelay(1);
  968. dll_lock = rgmii_readl(ethqos, SDC4_STATUS);
  969. if (dll_lock & SDC4_STATUS_DLL_LOCK)
  970. break;
  971. retry--;
  972. } while (retry > 0);
  973. if (!retry)
  974. dev_err(&ethqos->pdev->dev,
  975. "Timeout while waiting for DLL lock\n");
  976. }
  977. if (ethqos->speed == SPEED_1000)
  978. ethqos_dll_configure(ethqos);
  979. ethqos_rgmii_macro_init(ethqos);
  980. return 0;
  981. }
  982. /* for EMAC_HW_VER >= 3 */
  983. static int ethqos_configure_mac_v3(struct qcom_ethqos *ethqos)
  984. {
  985. unsigned int dll_lock;
  986. unsigned int i, retry = 1000;
  987. int ret = 0;
  988. /* Reset to POR values and enable clk */
  989. for (i = 0; i < ethqos->num_por; i++)
  990. rgmii_writel(ethqos, ethqos->por[i].value,
  991. ethqos->por[i].offset);
  992. ethqos_set_func_clk_en(ethqos);
  993. /* Put DLL into Reset and Powerdown */
  994. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST,
  995. SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG);
  996. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN,
  997. SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG)
  998. ;
  999. /*Power on and set DLL, Set->RST & PDN to '0' */
  1000. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST,
  1001. 0, SDCC_HC_REG_DLL_CONFIG);
  1002. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN,
  1003. 0, SDCC_HC_REG_DLL_CONFIG);
  1004. /* for 10 or 100Mbps further configuration not required */
  1005. if (ethqos->speed == SPEED_1000) {
  1006. /* Disable DLL output clock */
  1007. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
  1008. 0, SDCC_HC_REG_DLL_CONFIG);
  1009. /* Configure SDCC_DLL_TEST_CTRL */
  1010. rgmii_writel(ethqos, HSR_SDCC_DLL_TEST_CTRL, SDCC_TEST_CTL);
  1011. /* Configure SDCC_USR_CTRL */
  1012. rgmii_writel(ethqos, HSR_SDCC_USR_CTRL, SDCC_USR_CTL);
  1013. /* Configure DDR_CONFIG */
  1014. rgmii_writel(ethqos, HSR_DDR_CONFIG, SDCC_HC_REG_DDR_CONFIG);
  1015. /* Configure PRG_RCLK_DLY */
  1016. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
  1017. DDR_CONFIG_PRG_RCLK_DLY, SDCC_HC_REG_DDR_CONFIG);
  1018. /*Enable PRG_RCLK_CLY */
  1019. rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
  1020. SDCC_DDR_CONFIG_PRG_DLY_EN, SDCC_HC_REG_DDR_CONFIG);
  1021. /* Configure DLL_CONFIG */
  1022. rgmii_writel(ethqos, HSR_DLL_CONFIG, SDCC_HC_REG_DLL_CONFIG);
  1023. /*Set -> DLL_CONFIG_2 MCLK_FREQ_CALC*/
  1024. rgmii_writel(ethqos, HSR_DLL_CONFIG_2, SDCC_HC_REG_DLL_CONFIG2);
  1025. /*Power Down and Reset DLL*/
  1026. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST,
  1027. SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG);
  1028. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN,
  1029. SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
  1030. /*wait for 52us*/
  1031. usleep_range(52, 55);
  1032. /*Power on and set DLL, Set->RST & PDN to '0' */
  1033. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST,
  1034. 0, SDCC_HC_REG_DLL_CONFIG);
  1035. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN,
  1036. 0, SDCC_HC_REG_DLL_CONFIG);
  1037. /*Wait for 8000 input clock cycles, 8000 cycles of 100 MHz = 80us*/
  1038. usleep_range(80, 85);
  1039. /* Enable DLL output clock */
  1040. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
  1041. SDCC_DLL_CONFIG_CK_OUT_EN, SDCC_HC_REG_DLL_CONFIG);
  1042. /* Check for DLL lock */
  1043. do {
  1044. udelay(1);
  1045. dll_lock = rgmii_readl(ethqos, SDC4_STATUS);
  1046. if (dll_lock & SDC4_STATUS_DLL_LOCK)
  1047. break;
  1048. retry--;
  1049. } while (retry > 0);
  1050. if (!retry)
  1051. dev_err(&ethqos->pdev->dev,
  1052. "Timeout while waiting for DLL lock\n");
  1053. }
  1054. /* DLL bypass mode for 10Mbps and 100Mbps
  1055. * 1. Write 1 to PDN bit of SDCC_HC_REG_DLL_CONFIG register.
  1056. * 2. Write 1 to bypass bit of SDCC_USR_CTL register
  1057. * 3. Default value of this register is 0x00010800
  1058. */
  1059. if (ethqos->speed == SPEED_10 || ethqos->speed == SPEED_100) {
  1060. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN,
  1061. SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
  1062. rgmii_updatel(ethqos, DLL_BYPASS,
  1063. DLL_BYPASS, SDCC_USR_CTL);
  1064. }
  1065. ret = ethqos_rgmii_macro_init_v3(ethqos);
  1066. return ret;
  1067. }
  1068. static void qcom_serdes_loopback_v3_1(struct plat_stmmacenet_data *plat, bool on)
  1069. {
  1070. struct qcom_ethqos *ethqos = plat->bsp_priv;
  1071. if (on)
  1072. rgmii_updatel(ethqos, SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN,
  1073. SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN,
  1074. EMAC_WRAPPER_SGMII_PHY_CNTRL1_v3);
  1075. else
  1076. rgmii_updatel(ethqos, SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN, 0,
  1077. EMAC_WRAPPER_SGMII_PHY_CNTRL1_v3);
  1078. }
  1079. static int ethqos_serdes_power_up(struct net_device *ndev, void *priv)
  1080. {
  1081. struct qcom_ethqos *ethqos = priv;
  1082. struct net_device *dev = ndev;
  1083. struct stmmac_priv *s_priv = netdev_priv(dev);
  1084. ETHQOSINFO("%s : speed = %d interface = %d",
  1085. __func__,
  1086. ethqos->speed,
  1087. s_priv->plat->interface);
  1088. return qcom_ethqos_serdes_update(ethqos, ethqos->speed,
  1089. s_priv->plat->interface);
  1090. }
  1091. static int ethqos_configure_rgmii_v4(struct qcom_ethqos *ethqos)
  1092. {
  1093. unsigned int dll_lock;
  1094. unsigned int i, retry = 1000;
  1095. /* Reset to POR values and enable clk */
  1096. for (i = 0; i < ethqos->num_por; i++)
  1097. rgmii_writel(ethqos, ethqos->por[i].value,
  1098. ethqos->por[i].offset);
  1099. ethqos_set_func_clk_en(ethqos);
  1100. /* Initialize the DLL first */
  1101. /* Set DLL_RST */
  1102. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST,
  1103. SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG);
  1104. /* Set PDN */
  1105. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN,
  1106. SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
  1107. /* Clear DLL_RST */
  1108. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST, 0,
  1109. SDCC_HC_REG_DLL_CONFIG);
  1110. /* Clear PDN */
  1111. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, 0,
  1112. SDCC_HC_REG_DLL_CONFIG);
  1113. if (ethqos->speed != SPEED_100 && ethqos->speed != SPEED_10) {
  1114. /* Set DLL_EN */
  1115. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
  1116. SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
  1117. /* Set CK_OUT_EN */
  1118. rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
  1119. SDCC_DLL_CONFIG_CK_OUT_EN,
  1120. SDCC_HC_REG_DLL_CONFIG);
  1121. /* Set USR_CTL bit 26 with mask of 3 bits */
  1122. rgmii_updatel(ethqos, GENMASK(26, 24), BIT(26), SDCC_USR_CTL);
  1123. /* wait for DLL LOCK */
  1124. do {
  1125. mdelay(1);
  1126. dll_lock = rgmii_readl(ethqos, SDC4_STATUS);
  1127. if (dll_lock & SDC4_STATUS_DLL_LOCK)
  1128. break;
  1129. retry--;
  1130. } while (retry > 0);
  1131. if (!retry)
  1132. dev_err(&ethqos->pdev->dev,
  1133. "Timeout while waiting for DLL lock\n");
  1134. }
  1135. if (ethqos->speed == SPEED_1000)
  1136. ethqos_dll_configure(ethqos);
  1137. ethqos_rgmii_macro_init(ethqos);
  1138. return 0;
  1139. }
  1140. static int ethqos_configure_sgmii_v4(struct qcom_ethqos *ethqos)
  1141. {
  1142. rgmii_updatel(ethqos, RGMII_BYPASS_EN, RGMII_BYPASS_EN, RGMII_IO_MACRO_BYPASS);
  1143. rgmii_updatel(ethqos, RGMII_CONFIG2_MODE_EN_VIA_GMII, 0, RGMII_IO_MACRO_CONFIG2);
  1144. rgmii_updatel(ethqos, SGMII_PHY_CNTRL1_RGMII_SGMII_CLK_MUX_SEL, 0,
  1145. EMAC_WRAPPER_SGMII_PHY_CNTRL1);
  1146. rgmii_updatel(ethqos, SGMII_PHY_CNTRL1_USXGMII_GMII_MASTER_CLK_MUX_SEL,
  1147. SGMII_PHY_CNTRL1_USXGMII_GMII_MASTER_CLK_MUX_SEL,
  1148. EMAC_WRAPPER_SGMII_PHY_CNTRL1);
  1149. rgmii_updatel(ethqos, SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN, 0,
  1150. EMAC_WRAPPER_SGMII_PHY_CNTRL1);
  1151. rgmii_updatel(ethqos, USXGMII_CLK_BLK_GMII_CLK_BLK_SEL, 0, EMAC_WRAPPER_USXGMII_MUX_SEL);
  1152. rgmii_updatel(ethqos, USXGMII_CLK_BLK_CLK_EN, 0, EMAC_WRAPPER_USXGMII_MUX_SEL);
  1153. rgmii_updatel(ethqos, SGMII_PHY_CNTRL0_2P5G_1G_CLK_SEL, 0, EMAC_WRAPPER_SGMII_PHY_CNTRL0);
  1154. rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_2, (BIT(6) | BIT(9)), RGMII_IO_MACRO_CONFIG);
  1155. rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_9, (BIT(10) | BIT(14) | BIT(15)),
  1156. RGMII_IO_MACRO_CONFIG);
  1157. rgmii_updatel(ethqos, RGMII_CONFIG2_MAX_SPD_PRG_3, (BIT(17) | BIT(20)),
  1158. RGMII_IO_MACRO_CONFIG2);
  1159. rgmii_updatel(ethqos, RGMII_SCRATCH2_MAX_SPD_PRG_4, BIT(2), RGMII_IO_MACRO_SCRATCH_2);
  1160. rgmii_updatel(ethqos, RGMII_SCRATCH2_MAX_SPD_PRG_5, BIT(6) | BIT(7),
  1161. RGMII_IO_MACRO_SCRATCH_2);
  1162. rgmii_updatel(ethqos, RGMII_SCRATCH2_MAX_SPD_PRG_6, 0, RGMII_IO_MACRO_SCRATCH_2);
  1163. rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
  1164. RGMII_IO_MACRO_CONFIG2);
  1165. return 0;
  1166. }
  1167. static int ethqos_configure_usxgmii_v4(struct qcom_ethqos *ethqos)
  1168. {
  1169. rgmii_updatel(ethqos, RGMII_CONFIG2_MODE_EN_VIA_GMII, 0, RGMII_IO_MACRO_CONFIG2);
  1170. rgmii_updatel(ethqos, SGMII_PHY_CNTRL0_2P5G_1G_CLK_SEL, BIT(5),
  1171. EMAC_WRAPPER_SGMII_PHY_CNTRL0);
  1172. rgmii_updatel(ethqos, SGMII_PHY_CNTRL1_RGMII_SGMII_CLK_MUX_SEL, 0,
  1173. EMAC_WRAPPER_SGMII_PHY_CNTRL1);
  1174. rgmii_updatel(ethqos, SGMII_PHY_CNTRL1_USXGMII_GMII_MASTER_CLK_MUX_SEL,
  1175. SGMII_PHY_CNTRL1_USXGMII_GMII_MASTER_CLK_MUX_SEL,
  1176. EMAC_WRAPPER_SGMII_PHY_CNTRL1);
  1177. rgmii_updatel(ethqos, SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN, 0,
  1178. EMAC_WRAPPER_SGMII_PHY_CNTRL1);
  1179. rgmii_updatel(ethqos, USXGMII_CLK_BLK_GMII_CLK_BLK_SEL, 0, EMAC_WRAPPER_USXGMII_MUX_SEL);
  1180. rgmii_updatel(ethqos, USXGMII_CLK_BLK_CLK_EN, 0, EMAC_WRAPPER_USXGMII_MUX_SEL);
  1181. switch (ethqos->speed) {
  1182. case SPEED_10000:
  1183. rgmii_updatel(ethqos, USXGMII_CLK_BLK_GMII_CLK_BLK_SEL,
  1184. USXGMII_CLK_BLK_GMII_CLK_BLK_SEL,
  1185. EMAC_WRAPPER_USXGMII_MUX_SEL);
  1186. break;
  1187. case SPEED_5000:
  1188. rgmii_updatel(ethqos, SGMII_PHY_CNTRL0_2P5G_1G_CLK_SEL, 0,
  1189. EMAC_WRAPPER_SGMII_PHY_CNTRL0);
  1190. rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_2, (BIT(6) | BIT(7)),
  1191. RGMII_IO_MACRO_CONFIG);
  1192. rgmii_updatel(ethqos, RGMII_CONFIG2_MAX_SPD_PRG_3, (BIT(17) | BIT(18)),
  1193. RGMII_IO_MACRO_CONFIG2);
  1194. break;
  1195. case SPEED_2500:
  1196. rgmii_updatel(ethqos, SGMII_PHY_CNTRL0_2P5G_1G_CLK_SEL, 0,
  1197. EMAC_WRAPPER_SGMII_PHY_CNTRL0);
  1198. rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_9, (BIT(10) | BIT(11)),
  1199. RGMII_IO_MACRO_CONFIG);
  1200. rgmii_updatel(ethqos, RGMII_SCRATCH2_MAX_SPD_PRG_4, (BIT(2) | BIT(3)),
  1201. RGMII_IO_MACRO_SCRATCH_2);
  1202. rgmii_updatel(ethqos, RGMII_SCRATCH2_MAX_SPD_PRG_5, 0,
  1203. RGMII_IO_MACRO_SCRATCH_2);
  1204. break;
  1205. case SPEED_1000:
  1206. rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
  1207. RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
  1208. RGMII_IO_MACRO_CONFIG2);
  1209. break;
  1210. case SPEED_100:
  1211. rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
  1212. RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
  1213. RGMII_IO_MACRO_CONFIG2);
  1214. rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_2, BIT(9),
  1215. RGMII_IO_MACRO_CONFIG);
  1216. rgmii_updatel(ethqos, RGMII_CONFIG2_MAX_SPD_PRG_3, BIT(20),
  1217. RGMII_IO_MACRO_CONFIG2);
  1218. rgmii_updatel(ethqos, RGMII_SCRATCH2_MAX_SPD_PRG_6, BIT(1),
  1219. RGMII_IO_MACRO_SCRATCH_2);
  1220. break;
  1221. case SPEED_10:
  1222. rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
  1223. RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
  1224. RGMII_IO_MACRO_CONFIG2);
  1225. break;
  1226. default:
  1227. dev_err(&ethqos->pdev->dev,
  1228. "Invalid speed %d\n", ethqos->speed);
  1229. return -EINVAL;
  1230. }
  1231. return 0;
  1232. }
  1233. static int ethqos_configure_mac_v4(struct qcom_ethqos *ethqos)
  1234. {
  1235. struct stmmac_priv *priv = qcom_ethqos_get_priv(ethqos);
  1236. int ret = 0;
  1237. switch (priv->plat->interface) {
  1238. case PHY_INTERFACE_MODE_RGMII:
  1239. case PHY_INTERFACE_MODE_RGMII_ID:
  1240. case PHY_INTERFACE_MODE_RGMII_RXID:
  1241. case PHY_INTERFACE_MODE_RGMII_TXID:
  1242. ret = ethqos_configure_rgmii_v4(ethqos);
  1243. qcom_ethqos_serdes_update(ethqos, ethqos->speed, priv->plat->interface);
  1244. break;
  1245. case PHY_INTERFACE_MODE_SGMII:
  1246. ret = ethqos_configure_sgmii_v4(ethqos);
  1247. qcom_ethqos_serdes_update(ethqos, ethqos->speed, priv->plat->interface);
  1248. break;
  1249. case PHY_INTERFACE_MODE_USXGMII:
  1250. ret = ethqos_configure_usxgmii_v4(ethqos);
  1251. break;
  1252. }
  1253. return ret;
  1254. }
  1255. static void ethqos_fix_mac_speed(void *priv, unsigned int speed)
  1256. {
  1257. struct qcom_ethqos *ethqos = priv;
  1258. int ret = 0;
  1259. ethqos->speed = speed;
  1260. ethqos_update_rgmii_clk(ethqos, speed);
  1261. if (ethqos->emac_ver == EMAC_HW_v3_0_0_RG)
  1262. ret = ethqos_configure_mac_v3(ethqos);
  1263. else if (ethqos->emac_ver == EMAC_HW_v4_0_0)
  1264. ret = ethqos_configure_mac_v4(ethqos);
  1265. else
  1266. ret = ethqos_configure(ethqos);
  1267. if (ret != 0)
  1268. ETHQOSERR("HSR configuration has failed\n");
  1269. }
  1270. static int ethqos_clks_config(void *priv, bool enabled)
  1271. {
  1272. struct qcom_ethqos *ethqos = priv;
  1273. int ret = 0;
  1274. if (enabled) {
  1275. ret = clk_prepare_enable(ethqos->rgmii_clk);
  1276. if (ret) {
  1277. dev_err(&ethqos->pdev->dev, "rgmii_clk enable failed\n");
  1278. return ret;
  1279. }
  1280. /* Enable functional clock to prevent DMA reset to timeout due
  1281. * to lacking PHY clock after the hardware block has been power
  1282. * cycled. The actual configuration will be adjusted once
  1283. * ethqos_fix_mac_speed() is invoked.
  1284. */
  1285. ethqos_set_func_clk_en(ethqos);
  1286. } else {
  1287. clk_disable_unprepare(ethqos->rgmii_clk);
  1288. }
  1289. return ret;
  1290. }
  1291. static int ethqos_mdio_read(struct stmmac_priv *priv, int phyaddr, int phyreg)
  1292. {
  1293. unsigned int mii_address = priv->hw->mii.addr;
  1294. unsigned int mii_data = priv->hw->mii.data;
  1295. u32 v;
  1296. int data;
  1297. u32 value = MII_BUSY;
  1298. value |= (phyaddr << priv->hw->mii.addr_shift)
  1299. & priv->hw->mii.addr_mask;
  1300. value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
  1301. value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
  1302. & priv->hw->mii.clk_csr_mask;
  1303. if (priv->plat->has_gmac4)
  1304. value |= MII_GMAC4_READ;
  1305. if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
  1306. 100, 10000))
  1307. return -EBUSY;
  1308. writel_relaxed(value, priv->ioaddr + mii_address);
  1309. if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
  1310. 100, 10000))
  1311. return -EBUSY;
  1312. /* Read the data from the MII data register */
  1313. data = (int)readl_relaxed(priv->ioaddr + mii_data);
  1314. return data;
  1315. }
  1316. static int ethqos_phy_intr_config(struct qcom_ethqos *ethqos)
  1317. {
  1318. int ret = 0;
  1319. ethqos->phy_intr = platform_get_irq_byname(ethqos->pdev, "phy-intr");
  1320. if (ethqos->phy_intr < 0) {
  1321. dev_err(&ethqos->pdev->dev,
  1322. "PHY IRQ configuration information not found\n");
  1323. ret = 1;
  1324. }
  1325. return ret;
  1326. }
  1327. static void ethqos_handle_phy_interrupt(struct qcom_ethqos *ethqos)
  1328. {
  1329. int phy_intr_status = 0;
  1330. struct platform_device *pdev = ethqos->pdev;
  1331. struct net_device *dev = platform_get_drvdata(pdev);
  1332. struct stmmac_priv *priv = netdev_priv(dev);
  1333. int micrel_intr_status = 0;
  1334. if (priv->phydev && (priv->phydev->phy_id &
  1335. priv->phydev->drv->phy_id_mask)
  1336. == MICREL_PHY_ID) {
  1337. phy_intr_status = ethqos_mdio_read(priv,
  1338. priv->plat->phy_addr,
  1339. DWC_ETH_QOS_BASIC_STATUS);
  1340. ETHQOSDBG("Basic Status Reg (%#x) = %#x\n",
  1341. DWC_ETH_QOS_BASIC_STATUS, phy_intr_status);
  1342. micrel_intr_status = ethqos_mdio_read(priv,
  1343. priv->plat->phy_addr,
  1344. DWC_ETH_QOS_MICREL_PHY_INTCS);
  1345. ETHQOSDBG("MICREL PHY Intr EN Reg (%#x) = %#x\n",
  1346. DWC_ETH_QOS_MICREL_PHY_INTCS, micrel_intr_status);
  1347. /**
  1348. * Call ack interrupt to clear the WOL
  1349. * interrupt status fields
  1350. */
  1351. if (priv->phydev->drv->config_intr)
  1352. priv->phydev->drv->config_intr(priv->phydev);
  1353. /* Interrupt received for link state change */
  1354. if (phy_intr_status & LINK_STATE_MASK) {
  1355. if (micrel_intr_status & MICREL_LINK_UP_INTR_STATUS)
  1356. ETHQOSDBG("Intr for link UP state\n");
  1357. phy_mac_interrupt(priv->phydev);
  1358. } else if (!(phy_intr_status & LINK_STATE_MASK)) {
  1359. ETHQOSDBG("Intr for link DOWN state\n");
  1360. phy_mac_interrupt(priv->phydev);
  1361. } else if (!(phy_intr_status & AUTONEG_STATE_MASK)) {
  1362. ETHQOSDBG("Intr for link down with auto-neg err\n");
  1363. }
  1364. } else {
  1365. phy_intr_status =
  1366. ethqos_mdio_read(priv, priv->plat->phy_addr,
  1367. DWC_ETH_QOS_PHY_INTR_STATUS);
  1368. if (phy_intr_status & LINK_UP_STATE)
  1369. phylink_mac_change(priv->phylink, LINK_UP);
  1370. else if (phy_intr_status & LINK_DOWN_STATE)
  1371. phylink_mac_change(priv->phylink, LINK_DOWN);
  1372. }
  1373. }
  1374. static void ethqos_defer_phy_isr_work(struct work_struct *work)
  1375. {
  1376. struct qcom_ethqos *ethqos =
  1377. container_of(work, struct qcom_ethqos, emac_phy_work);
  1378. if (ethqos->clks_suspended)
  1379. wait_for_completion(&ethqos->clk_enable_done);
  1380. ethqos_handle_phy_interrupt(ethqos);
  1381. }
  1382. static irqreturn_t ethqos_phy_isr(int irq, void *dev_data)
  1383. {
  1384. struct qcom_ethqos *ethqos = (struct qcom_ethqos *)dev_data;
  1385. pm_wakeup_event(&ethqos->pdev->dev, PM_WAKEUP_MS);
  1386. queue_work(system_wq, &ethqos->emac_phy_work);
  1387. return IRQ_HANDLED;
  1388. }
  1389. static void ethqos_phy_irq_enable(void *priv_n)
  1390. {
  1391. struct stmmac_priv *priv = priv_n;
  1392. struct qcom_ethqos *ethqos = priv->plat->bsp_priv;
  1393. if (ethqos->phy_intr) {
  1394. ETHQOSINFO("enabling irq = %d\n", priv->phy_irq_enabled);
  1395. enable_irq(ethqos->phy_intr);
  1396. priv->phy_irq_enabled = true;
  1397. }
  1398. }
  1399. static void ethqos_phy_irq_disable(void *priv_n)
  1400. {
  1401. struct stmmac_priv *priv = priv_n;
  1402. struct qcom_ethqos *ethqos = priv->plat->bsp_priv;
  1403. if (ethqos->phy_intr) {
  1404. ETHQOSINFO("disabling irq = %d\n", priv->phy_irq_enabled);
  1405. disable_irq(ethqos->phy_intr);
  1406. priv->phy_irq_enabled = false;
  1407. }
  1408. }
  1409. static int ethqos_phy_intr_enable(struct qcom_ethqos *ethqos)
  1410. {
  1411. int ret = 0;
  1412. struct stmmac_priv *priv = qcom_ethqos_get_priv(ethqos);
  1413. INIT_WORK(&ethqos->emac_phy_work, ethqos_defer_phy_isr_work);
  1414. init_completion(&ethqos->clk_enable_done);
  1415. ret = request_irq(ethqos->phy_intr, ethqos_phy_isr,
  1416. IRQF_SHARED, "stmmac", ethqos);
  1417. if (ret) {
  1418. ETHQOSERR("Unable to register PHY IRQ %d\n",
  1419. ethqos->phy_intr);
  1420. return ret;
  1421. }
  1422. priv->plat->phy_intr_en_extn_stm = true;
  1423. priv->phy_irq_enabled = true;
  1424. return ret;
  1425. }
  1426. static const struct of_device_id qcom_ethqos_match[] = {
  1427. { .compatible = "qcom,stmmac-ethqos", },
  1428. { .compatible = "qcom,emac-smmu-embedded", },
  1429. { }
  1430. };
  1431. MODULE_DEVICE_TABLE(of, qcom_ethqos_match);
  1432. static void emac_emb_smmu_exit(void)
  1433. {
  1434. emac_emb_smmu_ctx.valid = false;
  1435. emac_emb_smmu_ctx.pdev_master = NULL;
  1436. emac_emb_smmu_ctx.smmu_pdev = NULL;
  1437. emac_emb_smmu_ctx.iommu_domain = NULL;
  1438. }
  1439. static int emac_emb_smmu_cb_probe(struct platform_device *pdev,
  1440. struct plat_stmmacenet_data *plat_dat)
  1441. {
  1442. int result = 0;
  1443. u32 iova_ap_mapping[2];
  1444. struct device *dev = &pdev->dev;
  1445. ETHQOSDBG("EMAC EMB SMMU CB probe: smmu pdev=%p\n", pdev);
  1446. result = of_property_read_u32_array(dev->of_node,
  1447. "qcom,iommu-dma-addr-pool",
  1448. iova_ap_mapping,
  1449. ARRAY_SIZE(iova_ap_mapping));
  1450. if (result) {
  1451. ETHQOSERR("Failed to read EMB start/size iova addresses\n");
  1452. return result;
  1453. }
  1454. emac_emb_smmu_ctx.smmu_pdev = pdev;
  1455. if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
  1456. dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
  1457. ETHQOSERR("DMA set 32bit mask failed\n");
  1458. return -EOPNOTSUPP;
  1459. }
  1460. emac_emb_smmu_ctx.valid = true;
  1461. emac_emb_smmu_ctx.iommu_domain =
  1462. iommu_get_domain_for_dev(&emac_emb_smmu_ctx.smmu_pdev->dev);
  1463. ETHQOSINFO("Successfully attached to IOMMU\n");
  1464. plat_dat->stmmac_emb_smmu_ctx = emac_emb_smmu_ctx;
  1465. if (emac_emb_smmu_ctx.pdev_master)
  1466. goto smmu_probe_done;
  1467. smmu_probe_done:
  1468. emac_emb_smmu_ctx.ret = result;
  1469. return result;
  1470. }
  1471. static void ethqos_pps_irq_config(struct qcom_ethqos *ethqos)
  1472. {
  1473. ethqos->pps_class_a_irq =
  1474. platform_get_irq_byname(ethqos->pdev, "ptp_pps_irq_0");
  1475. if (ethqos->pps_class_a_irq < 0) {
  1476. if (ethqos->pps_class_a_irq != -EPROBE_DEFER)
  1477. ETHQOSERR("class_a_irq config info not found\n");
  1478. }
  1479. ethqos->pps_class_b_irq =
  1480. platform_get_irq_byname(ethqos->pdev, "ptp_pps_irq_1");
  1481. if (ethqos->pps_class_b_irq < 0) {
  1482. if (ethqos->pps_class_b_irq != -EPROBE_DEFER)
  1483. ETHQOSERR("class_b_irq config info not found\n");
  1484. }
  1485. }
  1486. static void qcom_ethqos_phy_suspend_clks(struct qcom_ethqos *ethqos)
  1487. {
  1488. struct stmmac_priv *priv = qcom_ethqos_get_priv(ethqos);
  1489. ETHQOSDBG("Enter\n");
  1490. if (priv->plat->phy_intr_en_extn_stm)
  1491. reinit_completion(&ethqos->clk_enable_done);
  1492. ethqos->clks_suspended = 1;
  1493. ethqos_update_rgmii_clk(ethqos, 0);
  1494. if (!IS_ERR(priv->plat->stmmac_clk))
  1495. clk_disable_unprepare(priv->plat->stmmac_clk);
  1496. if (!IS_ERR(priv->plat->pclk))
  1497. clk_disable_unprepare(priv->plat->pclk);
  1498. if (!IS_ERR(priv->plat->clk_ptp_ref))
  1499. clk_disable_unprepare(priv->plat->clk_ptp_ref);
  1500. if (!IS_ERR(ethqos->rgmii_clk))
  1501. clk_disable_unprepare(ethqos->rgmii_clk);
  1502. if (!IS_ERR(ethqos->phyaux_clk))
  1503. clk_disable_unprepare(ethqos->phyaux_clk);
  1504. if (!IS_ERR(ethqos->sgmiref_clk))
  1505. clk_disable_unprepare(ethqos->sgmiref_clk);
  1506. ETHQOSDBG("Exit\n");
  1507. }
  1508. static inline bool qcom_ethqos_is_phy_link_up(struct qcom_ethqos *ethqos)
  1509. {
  1510. /* PHY driver initializes phydev->link=1.
  1511. * So, phydev->link is 1 even on bootup with no PHY connected.
  1512. * phydev->link is valid only after adjust_link is called once.
  1513. */
  1514. struct stmmac_priv *priv = qcom_ethqos_get_priv(ethqos);
  1515. return (priv->dev->phydev && priv->dev->phydev->link);
  1516. }
  1517. static void qcom_ethqos_phy_resume_clks(struct qcom_ethqos *ethqos)
  1518. {
  1519. struct stmmac_priv *priv = qcom_ethqos_get_priv(ethqos);
  1520. ETHQOSDBG("Enter\n");
  1521. if (!IS_ERR(priv->plat->stmmac_clk))
  1522. clk_prepare_enable(priv->plat->stmmac_clk);
  1523. if (!IS_ERR(priv->plat->pclk))
  1524. clk_prepare_enable(priv->plat->pclk);
  1525. if (!IS_ERR(priv->plat->clk_ptp_ref))
  1526. clk_prepare_enable(priv->plat->clk_ptp_ref);
  1527. if (!IS_ERR(ethqos->rgmii_clk))
  1528. clk_prepare_enable(ethqos->rgmii_clk);
  1529. if (!IS_ERR(ethqos->sgmiref_clk))
  1530. clk_prepare_enable(ethqos->sgmiref_clk);
  1531. if (!IS_ERR(ethqos->phyaux_clk))
  1532. clk_prepare_enable(ethqos->phyaux_clk);
  1533. if (qcom_ethqos_is_phy_link_up(ethqos))
  1534. ethqos_update_rgmii_clk(ethqos, ethqos->speed);
  1535. else
  1536. ethqos_update_rgmii_clk(ethqos, SPEED_10);
  1537. ethqos->clks_suspended = 0;
  1538. if (priv->plat->phy_intr_en_extn_stm)
  1539. complete_all(&ethqos->clk_enable_done);
  1540. ETHQOSDBG("Exit\n");
  1541. }
  1542. static ssize_t read_phy_reg_dump(struct file *file, char __user *user_buf,
  1543. size_t count, loff_t *ppos)
  1544. {
  1545. struct qcom_ethqos *ethqos = file->private_data;
  1546. unsigned int len = 0, buf_len = 2000;
  1547. char *buf;
  1548. ssize_t ret_cnt;
  1549. int phydata = 0;
  1550. int i = 0;
  1551. struct platform_device *pdev = ethqos->pdev;
  1552. struct net_device *dev = platform_get_drvdata(pdev);
  1553. struct stmmac_priv *priv = netdev_priv(dev);
  1554. if (!ethqos || !dev->phydev) {
  1555. ETHQOSERR("NULL Pointer\n");
  1556. return -EINVAL;
  1557. }
  1558. buf = kzalloc(buf_len, GFP_KERNEL);
  1559. if (!buf)
  1560. return -ENOMEM;
  1561. len += scnprintf(buf + len, buf_len - len,
  1562. "\n************* PHY Reg dump *************\n");
  1563. for (i = 0; i < 32; i++) {
  1564. phydata = ethqos_mdio_read(priv, priv->plat->phy_addr, i);
  1565. len += scnprintf(buf + len, buf_len - len,
  1566. "MII Register (%#x) = %#x\n",
  1567. i, phydata);
  1568. }
  1569. if (len > buf_len) {
  1570. ETHQOSERR("(len > buf_len) buffer not sufficient\n");
  1571. len = buf_len;
  1572. }
  1573. ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
  1574. kfree(buf);
  1575. return ret_cnt;
  1576. }
  1577. static ssize_t read_rgmii_reg_dump(struct file *file,
  1578. char __user *user_buf, size_t count,
  1579. loff_t *ppos)
  1580. {
  1581. struct qcom_ethqos *ethqos = file->private_data;
  1582. unsigned int len = 0, buf_len = 2000;
  1583. char *buf;
  1584. ssize_t ret_cnt;
  1585. int rgmii_data = 0;
  1586. struct platform_device *pdev = ethqos->pdev;
  1587. struct net_device *dev = platform_get_drvdata(pdev);
  1588. if (!ethqos || !dev->phydev) {
  1589. ETHQOSERR("NULL Pointer\n");
  1590. return -EINVAL;
  1591. }
  1592. buf = kzalloc(buf_len, GFP_KERNEL);
  1593. if (!buf)
  1594. return -ENOMEM;
  1595. len += scnprintf(buf + len, buf_len - len,
  1596. "\n************* RGMII Reg dump *************\n");
  1597. rgmii_data = rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG);
  1598. len += scnprintf(buf + len, buf_len - len,
  1599. "RGMII_IO_MACRO_CONFIG Register = %#x\n",
  1600. rgmii_data);
  1601. rgmii_data = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
  1602. len += scnprintf(buf + len, buf_len - len,
  1603. "SDCC_HC_REG_DLL_CONFIG Register = %#x\n",
  1604. rgmii_data);
  1605. rgmii_data = rgmii_readl(ethqos, SDCC_HC_REG_DDR_CONFIG);
  1606. len += scnprintf(buf + len, buf_len - len,
  1607. "SDCC_HC_REG_DDR_CONFIG Register = %#x\n",
  1608. rgmii_data);
  1609. rgmii_data = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG2);
  1610. len += scnprintf(buf + len, buf_len - len,
  1611. "SDCC_HC_REG_DLL_CONFIG2 Register = %#x\n",
  1612. rgmii_data);
  1613. rgmii_data = rgmii_readl(ethqos, SDC4_STATUS);
  1614. len += scnprintf(buf + len, buf_len - len,
  1615. "SDC4_STATUS Register = %#x\n",
  1616. rgmii_data);
  1617. rgmii_data = rgmii_readl(ethqos, SDCC_USR_CTL);
  1618. len += scnprintf(buf + len, buf_len - len,
  1619. "SDCC_USR_CTL Register = %#x\n",
  1620. rgmii_data);
  1621. rgmii_data = rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG2);
  1622. len += scnprintf(buf + len, buf_len - len,
  1623. "RGMII_IO_MACRO_CONFIG2 Register = %#x\n",
  1624. rgmii_data);
  1625. rgmii_data = rgmii_readl(ethqos, RGMII_IO_MACRO_DEBUG1);
  1626. len += scnprintf(buf + len, buf_len - len,
  1627. "RGMII_IO_MACRO_DEBUG1 Register = %#x\n",
  1628. rgmii_data);
  1629. rgmii_data = rgmii_readl(ethqos, EMAC_SYSTEM_LOW_POWER_DEBUG);
  1630. len += scnprintf(buf + len, buf_len - len,
  1631. "EMAC_SYSTEM_LOW_POWER_DEBUG Register = %#x\n",
  1632. rgmii_data);
  1633. if (len > buf_len) {
  1634. ETHQOSERR("(len > buf_len) buffer not sufficient\n");
  1635. len = buf_len;
  1636. }
  1637. ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
  1638. kfree(buf);
  1639. return ret_cnt;
  1640. }
  1641. static const struct file_operations fops_phy_reg_dump = {
  1642. .read = read_phy_reg_dump,
  1643. .open = simple_open,
  1644. .owner = THIS_MODULE,
  1645. .llseek = default_llseek,
  1646. };
  1647. static const struct file_operations fops_rgmii_reg_dump = {
  1648. .read = read_rgmii_reg_dump,
  1649. .open = simple_open,
  1650. .owner = THIS_MODULE,
  1651. .llseek = default_llseek,
  1652. };
  1653. static int ethqos_create_debugfs(struct qcom_ethqos *ethqos)
  1654. {
  1655. static struct dentry *phy_reg_dump;
  1656. static struct dentry *rgmii_reg_dump;
  1657. if (!ethqos) {
  1658. ETHQOSERR("Null Param %s\n", __func__);
  1659. return -ENOMEM;
  1660. }
  1661. ethqos->debugfs_dir = debugfs_create_dir("eth", NULL);
  1662. if (!ethqos->debugfs_dir || IS_ERR(ethqos->debugfs_dir)) {
  1663. ETHQOSERR("Can't create debugfs dir\n");
  1664. return -ENOMEM;
  1665. }
  1666. phy_reg_dump = debugfs_create_file("phy_reg_dump", 0400,
  1667. ethqos->debugfs_dir, ethqos,
  1668. &fops_phy_reg_dump);
  1669. if (!phy_reg_dump || IS_ERR(phy_reg_dump)) {
  1670. ETHQOSERR("Can't create phy_dump %p\n", phy_reg_dump);
  1671. goto fail;
  1672. }
  1673. rgmii_reg_dump = debugfs_create_file("rgmii_reg_dump", 0400,
  1674. ethqos->debugfs_dir, ethqos,
  1675. &fops_rgmii_reg_dump);
  1676. if (!rgmii_reg_dump || IS_ERR(rgmii_reg_dump)) {
  1677. ETHQOSERR("Can't create rgmii_dump %p\n", rgmii_reg_dump);
  1678. goto fail;
  1679. }
  1680. return 0;
  1681. fail:
  1682. debugfs_remove_recursive(ethqos->debugfs_dir);
  1683. return -ENOMEM;
  1684. }
  1685. static void qcom_ethqos_bringup_iface(struct work_struct *work)
  1686. {
  1687. struct platform_device *pdev = NULL;
  1688. struct net_device *ndev = NULL;
  1689. struct qcom_ethqos *ethqos =
  1690. container_of(work, struct qcom_ethqos, early_eth);
  1691. ETHQOSINFO("entry\n");
  1692. if (!ethqos)
  1693. return;
  1694. pdev = ethqos->pdev;
  1695. if (!pdev)
  1696. return;
  1697. ndev = platform_get_drvdata(pdev);
  1698. if (!ndev || netif_running(ndev))
  1699. return;
  1700. rtnl_lock();
  1701. if (dev_change_flags(ndev, ndev->flags | IFF_UP, NULL) < 0)
  1702. ETHQOSINFO("ERROR\n");
  1703. rtnl_unlock();
  1704. ETHQOSINFO("exit\n");
  1705. }
  1706. static void ethqos_is_ipv4_NW_stack_ready(struct work_struct *work)
  1707. {
  1708. struct delayed_work *dwork;
  1709. struct qcom_ethqos *ethqos;
  1710. struct platform_device *pdev = NULL;
  1711. struct net_device *ndev = NULL;
  1712. int ret;
  1713. ETHQOSINFO("\n");
  1714. dwork = container_of(work, struct delayed_work, work);
  1715. ethqos = container_of(dwork, struct qcom_ethqos, ipv4_addr_assign_wq);
  1716. if (!ethqos)
  1717. return;
  1718. pdev = ethqos->pdev;
  1719. if (!pdev)
  1720. return;
  1721. ndev = platform_get_drvdata(pdev);
  1722. ret = qcom_ethqos_add_ipaddr(&pparams, ndev);
  1723. if (ret)
  1724. return;
  1725. cancel_delayed_work_sync(&ethqos->ipv4_addr_assign_wq);
  1726. flush_delayed_work(&ethqos->ipv4_addr_assign_wq);
  1727. }
  1728. static void ethqos_is_ipv6_NW_stack_ready(struct work_struct *work)
  1729. {
  1730. struct delayed_work *dwork;
  1731. struct qcom_ethqos *ethqos;
  1732. struct platform_device *pdev = NULL;
  1733. struct net_device *ndev = NULL;
  1734. int ret;
  1735. ETHQOSINFO("\n");
  1736. dwork = container_of(work, struct delayed_work, work);
  1737. ethqos = container_of(dwork, struct qcom_ethqos, ipv6_addr_assign_wq);
  1738. if (!ethqos)
  1739. return;
  1740. pdev = ethqos->pdev;
  1741. if (!pdev)
  1742. return;
  1743. ndev = platform_get_drvdata(pdev);
  1744. ret = qcom_ethqos_add_ipv6addr(&pparams, ndev);
  1745. if (ret)
  1746. return;
  1747. cancel_delayed_work_sync(&ethqos->ipv6_addr_assign_wq);
  1748. flush_delayed_work(&ethqos->ipv6_addr_assign_wq);
  1749. }
  1750. static int ethqos_set_early_eth_param(struct stmmac_priv *priv,
  1751. struct qcom_ethqos *ethqos)
  1752. {
  1753. if (priv->plat && priv->plat->mdio_bus_data)
  1754. priv->plat->mdio_bus_data->phy_mask =
  1755. priv->plat->mdio_bus_data->phy_mask | DUPLEX_FULL | SPEED_100;
  1756. if (pparams.is_valid_ipv4_addr) {
  1757. INIT_DELAYED_WORK(&ethqos->ipv4_addr_assign_wq,
  1758. ethqos_is_ipv4_NW_stack_ready);
  1759. schedule_delayed_work(&ethqos->ipv4_addr_assign_wq,
  1760. 0);
  1761. }
  1762. if (pparams.is_valid_ipv6_addr) {
  1763. INIT_DELAYED_WORK(&ethqos->ipv6_addr_assign_wq,
  1764. ethqos_is_ipv6_NW_stack_ready);
  1765. schedule_delayed_work(&ethqos->ipv6_addr_assign_wq,
  1766. msecs_to_jiffies(1000));
  1767. }
  1768. if (pparams.is_valid_mac_addr) {
  1769. ether_addr_copy(dev_addr, pparams.mac_addr);
  1770. eth_hw_addr_set(priv->dev, dev_addr);
  1771. }
  1772. return 0;
  1773. }
  1774. static void qcom_ethqos_request_phy_wol(void *plat_n)
  1775. {
  1776. struct plat_stmmacenet_data *plat = plat_n;
  1777. struct qcom_ethqos *ethqos;
  1778. struct platform_device *pdev;
  1779. struct net_device *ndev;
  1780. if (!plat)
  1781. return;
  1782. ethqos = plat->bsp_priv;
  1783. pdev = ethqos->pdev;
  1784. ndev = platform_get_drvdata(pdev);
  1785. ethqos->phy_wol_supported = 0;
  1786. ethqos->phy_wol_wolopts = 0;
  1787. /* Check if phydev is valid*/
  1788. /* Check and enable Wake-on-LAN functionality in PHY*/
  1789. if (ndev->phydev) {
  1790. struct ethtool_wolinfo wol = {.cmd = ETHTOOL_GWOL};
  1791. wol.supported = 0;
  1792. wol.wolopts = 0;
  1793. ETHQOSINFO("phydev addr: 0x%pK\n", ndev->phydev);
  1794. phy_ethtool_get_wol(ndev->phydev, &wol);
  1795. ethqos->phy_wol_supported = wol.supported;
  1796. ETHQOSINFO("Get WoL[0x%x] in %s\n", wol.supported,
  1797. ndev->phydev->drv->name);
  1798. /* Try to enable supported Wake-on-LAN features in PHY*/
  1799. if (wol.supported) {
  1800. device_set_wakeup_capable(&ethqos->pdev->dev, 1);
  1801. wol.cmd = ETHTOOL_SWOL;
  1802. wol.wolopts = wol.supported;
  1803. if (!phy_ethtool_set_wol(ndev->phydev, &wol)) {
  1804. ethqos->phy_wol_wolopts = wol.wolopts;
  1805. enable_irq_wake(ethqos->phy_intr);
  1806. device_set_wakeup_enable(&ethqos->pdev->dev, 1);
  1807. ETHQOSINFO("Enabled WoL[0x%x] in %s\n",
  1808. wol.wolopts,
  1809. ndev->phydev->drv->name);
  1810. } else {
  1811. ETHQOSINFO("Disabled WoL[0x%x] in %s\n",
  1812. wol.wolopts,
  1813. ndev->phydev->drv->name);
  1814. }
  1815. } else {
  1816. ETHQOSINFO("WoL Not Supported\n");
  1817. }
  1818. }
  1819. }
  1820. static int qcom_ethqos_probe(struct platform_device *pdev)
  1821. {
  1822. struct device_node *np = pdev->dev.of_node;
  1823. struct stmmac_resources stmmac_res;
  1824. struct qcom_ethqos *ethqos = NULL;
  1825. struct net_device *ndev;
  1826. struct stmmac_priv *priv;
  1827. int ret;
  1828. if (of_device_is_compatible(pdev->dev.of_node,
  1829. "qcom,emac-smmu-embedded"))
  1830. return emac_emb_smmu_cb_probe(pdev, plat_dat);
  1831. ipc_emac_log_ctxt = ipc_log_context_create(IPCLOG_STATE_PAGES,
  1832. "emac", 0);
  1833. if (!ipc_emac_log_ctxt)
  1834. ETHQOSERR("Error creating logging context for emac\n");
  1835. else
  1836. ETHQOSDBG("IPC logging has been enabled for emac\n");
  1837. ETHQOSINFO("M - Ethernet probe start\n");
  1838. #ifdef MODULE
  1839. if (eipv4)
  1840. ret = set_early_ethernet_ipv4(eipv4);
  1841. if (eipv6)
  1842. ret = set_early_ethernet_ipv6(eipv6);
  1843. if (ermac)
  1844. ret = set_early_ethernet_mac(ermac);
  1845. #endif
  1846. ret = stmmac_get_platform_resources(pdev, &stmmac_res);
  1847. if (ret)
  1848. return ret;
  1849. ethqos = devm_kzalloc(&pdev->dev, sizeof(*ethqos), GFP_KERNEL);
  1850. if (!ethqos) {
  1851. ret = -ENOMEM;
  1852. goto err_mem;
  1853. }
  1854. ethqos->pdev = pdev;
  1855. ethqos_init_regulators(ethqos);
  1856. if (ethqos_init_gpio(ethqos))
  1857. ETHQOSDBG("ethqos_init_gpio failed");
  1858. plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
  1859. if (IS_ERR(plat_dat)) {
  1860. dev_err(&pdev->dev, "dt configuration failed\n");
  1861. return PTR_ERR(plat_dat);
  1862. }
  1863. ethqos->rgmii_base = devm_platform_ioremap_resource_byname(pdev, "rgmii");
  1864. if (IS_ERR(ethqos->rgmii_base)) {
  1865. ret = PTR_ERR(ethqos->rgmii_base);
  1866. goto err_mem;
  1867. }
  1868. ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii");
  1869. if (IS_ERR(ethqos->rgmii_clk)) {
  1870. ret = PTR_ERR(ethqos->rgmii_clk);
  1871. goto err_mem;
  1872. }
  1873. ethqos->por = of_device_get_match_data(&pdev->dev);
  1874. ret = ethqos_clks_config(ethqos, true);
  1875. if (ret)
  1876. goto err_mem;
  1877. if (plat_dat->interface == PHY_INTERFACE_MODE_SGMII ||
  1878. plat_dat->interface == PHY_INTERFACE_MODE_USXGMII)
  1879. qcom_ethqos_serdes_configure_dt(ethqos);
  1880. /*Initialize Early ethernet to false*/
  1881. ethqos->early_eth_enabled = false;
  1882. /*Check for valid mac, ip address to enable Early eth*/
  1883. if (pparams.is_valid_mac_addr &&
  1884. (pparams.is_valid_ipv4_addr || pparams.is_valid_ipv6_addr)) {
  1885. /* For 1000BASE-T mode, auto-negotiation is required and
  1886. * always used to establish a link.
  1887. * Configure phy and MAC in 100Mbps mode with autoneg
  1888. * disable as link up takes more time with autoneg
  1889. * enabled.
  1890. */
  1891. ethqos->early_eth_enabled = true;
  1892. ETHQOSINFO("Early ethernet is enabled\n");
  1893. }
  1894. ethqos->speed = SPEED_10;
  1895. ethqos_update_rgmii_clk(ethqos, SPEED_10);
  1896. ethqos_set_func_clk_en(ethqos);
  1897. plat_dat->bsp_priv = ethqos;
  1898. plat_dat->fix_mac_speed = ethqos_fix_mac_speed;
  1899. plat_dat->serdes_loopback_v3_1 = qcom_serdes_loopback_v3_1;
  1900. plat_dat->dump_debug_regs = rgmii_dump;
  1901. plat_dat->tx_select_queue = dwmac_qcom_select_queue;
  1902. plat_dat->has_gmac4 = 1;
  1903. plat_dat->early_eth = ethqos->early_eth_enabled;
  1904. if (plat_dat->interface == PHY_INTERFACE_MODE_SGMII ||
  1905. plat_dat->interface == PHY_INTERFACE_MODE_USXGMII)
  1906. plat_dat->serdes_powerup = ethqos_serdes_power_up;
  1907. /* Set mdio phy addr probe capability to c22 .
  1908. * If c22_c45 is set then multiple phy is getting detected.
  1909. */
  1910. if (of_property_read_bool(np, "eth-c22-mdio-probe"))
  1911. plat_dat->has_c22_mdio_probe_capability = 1;
  1912. else
  1913. plat_dat->has_c22_mdio_probe_capability = 0;
  1914. plat_dat->pmt = 1;
  1915. plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
  1916. plat_dat->handle_prv_ioctl = ethqos_handle_prv_ioctl;
  1917. plat_dat->request_phy_wol = qcom_ethqos_request_phy_wol;
  1918. plat_dat->init_pps = ethqos_init_pps;
  1919. plat_dat->phy_irq_enable = ethqos_phy_irq_enable;
  1920. plat_dat->phy_irq_disable = ethqos_phy_irq_disable;
  1921. if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
  1922. plat_dat->rx_clk_runs_in_lpi = 1;
  1923. if (of_property_read_bool(pdev->dev.of_node, "qcom,arm-smmu")) {
  1924. emac_emb_smmu_ctx.pdev_master = pdev;
  1925. ret = of_platform_populate(pdev->dev.of_node,
  1926. qcom_ethqos_match, NULL, &pdev->dev);
  1927. if (ret)
  1928. ETHQOSERR("Failed to populate EMAC platform\n");
  1929. if (emac_emb_smmu_ctx.ret) {
  1930. ETHQOSERR("smmu probe failed\n");
  1931. of_platform_depopulate(&pdev->dev);
  1932. ret = emac_emb_smmu_ctx.ret;
  1933. emac_emb_smmu_ctx.ret = 0;
  1934. }
  1935. }
  1936. if (of_property_read_bool(pdev->dev.of_node,
  1937. "emac-core-version")) {
  1938. /* Read emac core version value from dtsi */
  1939. ret = of_property_read_u32(pdev->dev.of_node,
  1940. "emac-core-version",
  1941. &ethqos->emac_ver);
  1942. if (ret) {
  1943. ETHQOSDBG(":resource emac-hw-ver! not in dtsi\n");
  1944. ethqos->emac_ver = EMAC_HW_NONE;
  1945. WARN_ON(1);
  1946. }
  1947. } else {
  1948. ethqos->emac_ver =
  1949. rgmii_readl(ethqos, EMAC_I0_EMAC_CORE_HW_VERSION_RGOFFADDR);
  1950. }
  1951. ETHQOSDBG(": emac_core_version = %d\n", ethqos->emac_ver);
  1952. if (of_property_read_bool(pdev->dev.of_node,
  1953. "gdsc-off-on-suspend")) {
  1954. ethqos->gdsc_off_on_suspend = true;
  1955. } else {
  1956. ethqos->gdsc_off_on_suspend = false;
  1957. }
  1958. ETHQOSDBG("gdsc-off-on-suspend = %d\n",
  1959. ethqos->gdsc_off_on_suspend);
  1960. ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
  1961. if (ret)
  1962. goto err_clk;
  1963. pethqos = ethqos;
  1964. if (of_property_read_bool(np, "pcs-v3")) {
  1965. plat_dat->pcs_v3 = true;
  1966. } else {
  1967. plat_dat->pcs_v3 = false;
  1968. ETHQOSDBG(":pcs-v3 not in dtsi\n");
  1969. }
  1970. if (!ethqos_phy_intr_config(ethqos)) {
  1971. if (ethqos_phy_intr_enable(ethqos))
  1972. ETHQOSERR("ethqos_phy_intr_enable failed");
  1973. } else {
  1974. ETHQOSERR("Phy interrupt configuration failed");
  1975. }
  1976. if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG) {
  1977. ethqos_pps_irq_config(ethqos);
  1978. create_pps_interrupt_device_node(&ethqos->avb_class_a_dev_t,
  1979. &ethqos->avb_class_a_cdev,
  1980. &ethqos->avb_class_a_class,
  1981. AVB_CLASS_A_POLL_DEV_NODE);
  1982. create_pps_interrupt_device_node(&ethqos->avb_class_b_dev_t,
  1983. &ethqos->avb_class_b_cdev,
  1984. &ethqos->avb_class_b_class,
  1985. AVB_CLASS_B_POLL_DEV_NODE);
  1986. }
  1987. pethqos = ethqos;
  1988. rgmii_dump(ethqos);
  1989. ethqos_create_debugfs(ethqos);
  1990. ndev = dev_get_drvdata(&ethqos->pdev->dev);
  1991. priv = netdev_priv(ndev);
  1992. if (ethqos->early_eth_enabled) {
  1993. /* Initialize work*/
  1994. INIT_WORK(&ethqos->early_eth,
  1995. qcom_ethqos_bringup_iface);
  1996. /* Queue the work*/
  1997. queue_work(system_wq, &ethqos->early_eth);
  1998. /*Set early eth parameters*/
  1999. ethqos_set_early_eth_param(priv, ethqos);
  2000. }
  2001. ETHQOSINFO("M - Ethernet probe end\n");
  2002. return ret;
  2003. err_clk:
  2004. ethqos_clks_config(ethqos, false);
  2005. err_mem:
  2006. stmmac_remove_config_dt(pdev, plat_dat);
  2007. return ret;
  2008. }
  2009. static int qcom_ethqos_remove(struct platform_device *pdev)
  2010. {
  2011. struct qcom_ethqos *ethqos;
  2012. int ret;
  2013. struct stmmac_priv *priv;
  2014. ethqos = get_stmmac_bsp_priv(&pdev->dev);
  2015. if (!ethqos)
  2016. return -ENODEV;
  2017. priv = qcom_ethqos_get_priv(ethqos);
  2018. ret = stmmac_pltfr_remove(pdev);
  2019. if (priv->plat->phy_intr_en_extn_stm)
  2020. free_irq(ethqos->phy_intr, ethqos);
  2021. priv->phy_irq_enabled = false;
  2022. if (priv->plat->phy_intr_en_extn_stm)
  2023. cancel_work_sync(&ethqos->emac_phy_work);
  2024. emac_emb_smmu_exit();
  2025. ethqos_disable_regulators(ethqos);
  2026. ethqos_clks_config(ethqos, false);
  2027. return ret;
  2028. }
  2029. static int qcom_ethqos_suspend(struct device *dev)
  2030. {
  2031. struct qcom_ethqos *ethqos;
  2032. struct net_device *ndev = NULL;
  2033. int ret;
  2034. if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded")) {
  2035. ETHQOSDBG("smmu return\n");
  2036. return 0;
  2037. }
  2038. ethqos = get_stmmac_bsp_priv(dev);
  2039. if (!ethqos)
  2040. return -ENODEV;
  2041. ndev = dev_get_drvdata(dev);
  2042. if (!ndev)
  2043. return -EINVAL;
  2044. ret = stmmac_suspend(dev);
  2045. qcom_ethqos_phy_suspend_clks(ethqos);
  2046. if (ethqos->gdsc_off_on_suspend) {
  2047. if (ethqos->gdsc_emac) {
  2048. regulator_disable(ethqos->gdsc_emac);
  2049. ETHQOSDBG("Disabled <%s>\n", EMAC_GDSC_EMAC_NAME);
  2050. }
  2051. }
  2052. ETHQOSDBG(" ret = %d\n", ret);
  2053. return ret;
  2054. }
  2055. static int qcom_ethqos_resume(struct device *dev)
  2056. {
  2057. struct net_device *ndev = NULL;
  2058. struct qcom_ethqos *ethqos;
  2059. int ret;
  2060. ETHQOSDBG("Resume Enter\n");
  2061. if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded"))
  2062. return 0;
  2063. ethqos = get_stmmac_bsp_priv(dev);
  2064. if (!ethqos)
  2065. return -ENODEV;
  2066. if (ethqos->gdsc_off_on_suspend) {
  2067. ret = regulator_enable(ethqos->gdsc_emac);
  2068. if (ret)
  2069. ETHQOSERR("Can not enable <%s>\n", EMAC_GDSC_EMAC_NAME);
  2070. ETHQOSDBG("Enabled <%s>\n", EMAC_GDSC_EMAC_NAME);
  2071. }
  2072. ndev = dev_get_drvdata(dev);
  2073. if (!ndev) {
  2074. ETHQOSERR(" Resume not possible\n");
  2075. return -EINVAL;
  2076. }
  2077. qcom_ethqos_phy_resume_clks(ethqos);
  2078. if (ethqos->gdsc_off_on_suspend)
  2079. ethqos_set_func_clk_en(ethqos);
  2080. ret = stmmac_resume(dev);
  2081. ETHQOSDBG("<--Resume Exit\n");
  2082. return ret;
  2083. }
  2084. static int qcom_ethqos_enable_clks(struct qcom_ethqos *ethqos, struct device *dev)
  2085. {
  2086. struct stmmac_priv *priv = qcom_ethqos_get_priv(ethqos);
  2087. int ret = 0;
  2088. /* clock setup */
  2089. priv->plat->stmmac_clk = devm_clk_get(dev,
  2090. STMMAC_RESOURCE_NAME);
  2091. if (IS_ERR(priv->plat->stmmac_clk)) {
  2092. dev_warn(dev, "stmmac_clk clock failed\n");
  2093. ret = PTR_ERR(priv->plat->stmmac_clk);
  2094. priv->plat->stmmac_clk = NULL;
  2095. } else {
  2096. ret = clk_prepare_enable(priv->plat->stmmac_clk);
  2097. if (ret)
  2098. ETHQOSINFO("stmmac_clk clk failed\n");
  2099. }
  2100. priv->plat->pclk = devm_clk_get(dev, "pclk");
  2101. if (IS_ERR(priv->plat->pclk)) {
  2102. dev_warn(dev, "pclk clock failed\n");
  2103. ret = PTR_ERR(priv->plat->pclk);
  2104. priv->plat->pclk = NULL;
  2105. goto error_pclk_get;
  2106. } else {
  2107. ret = clk_prepare_enable(priv->plat->pclk);
  2108. if (ret) {
  2109. ETHQOSINFO("pclk clk failed\n");
  2110. goto error_pclk_get;
  2111. }
  2112. }
  2113. ethqos->rgmii_clk = devm_clk_get(dev, "rgmii");
  2114. if (IS_ERR(ethqos->rgmii_clk)) {
  2115. dev_warn(dev, "rgmii clock failed\n");
  2116. ret = PTR_ERR(ethqos->rgmii_clk);
  2117. goto error_rgmii_get;
  2118. } else {
  2119. ret = clk_prepare_enable(ethqos->rgmii_clk);
  2120. if (ret) {
  2121. ETHQOSINFO("rgmmi clk failed\n");
  2122. goto error_rgmii_get;
  2123. }
  2124. }
  2125. return 0;
  2126. error_rgmii_get:
  2127. clk_disable_unprepare(priv->plat->pclk);
  2128. error_pclk_get:
  2129. clk_disable_unprepare(priv->plat->stmmac_clk);
  2130. return ret;
  2131. }
  2132. static void qcom_ethqos_disable_clks(struct qcom_ethqos *ethqos, struct device *dev)
  2133. {
  2134. struct stmmac_priv *priv = qcom_ethqos_get_priv(ethqos);
  2135. ETHQOSINFO("Enter\n");
  2136. if (priv->plat->stmmac_clk)
  2137. clk_disable_unprepare(priv->plat->stmmac_clk);
  2138. if (priv->plat->pclk)
  2139. clk_disable_unprepare(priv->plat->pclk);
  2140. if (ethqos->rgmii_clk)
  2141. clk_disable_unprepare(ethqos->rgmii_clk);
  2142. ETHQOSINFO("Exit\n");
  2143. }
  2144. static int qcom_ethqos_hib_restore(struct device *dev)
  2145. {
  2146. struct qcom_ethqos *ethqos;
  2147. struct stmmac_priv *priv;
  2148. struct net_device *ndev = NULL;
  2149. int ret = 0;
  2150. if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded"))
  2151. return 0;
  2152. ETHQOSINFO(" start\n");
  2153. ethqos = get_stmmac_bsp_priv(dev);
  2154. if (!ethqos)
  2155. return -ENODEV;
  2156. ndev = dev_get_drvdata(dev);
  2157. if (!ndev)
  2158. return -EINVAL;
  2159. priv = netdev_priv(ndev);
  2160. ret = ethqos_init_regulators(ethqos);
  2161. if (ret)
  2162. return ret;
  2163. ret = ethqos_init_gpio(ethqos);
  2164. if (ret)
  2165. return ret;
  2166. ret = qcom_ethqos_enable_clks(ethqos, dev);
  2167. if (ret)
  2168. return ret;
  2169. ethqos_update_rgmii_clk(ethqos, ethqos->speed);
  2170. ethqos_set_func_clk_en(ethqos);
  2171. #ifdef DWC_ETH_QOS_CONFIG_PTP
  2172. if (priv->plat->clk_ptp_ref) {
  2173. ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
  2174. if (ret < 0)
  2175. netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
  2176. }
  2177. ret = stmmac_init_ptp(priv);
  2178. if (ret == -EOPNOTSUPP) {
  2179. netdev_warn(priv->dev, "PTP not supported by HW\n");
  2180. } else if (ret) {
  2181. netdev_warn(priv->dev, "PTP init failed\n");
  2182. } else {
  2183. clk_set_rate(priv->plat->clk_ptp_ref,
  2184. priv->plat->clk_ptp_rate);
  2185. }
  2186. ret = priv->plat->init_pps(priv);
  2187. #endif /* end of DWC_ETH_QOS_CONFIG_PTP */
  2188. /* issue software reset to device */
  2189. ret = stmmac_reset(priv, priv->ioaddr);
  2190. if (ret) {
  2191. dev_err(priv->device, "Failed to reset\n");
  2192. return ret;
  2193. }
  2194. if (!netif_running(ndev)) {
  2195. rtnl_lock();
  2196. dev_open(ndev, NULL);
  2197. rtnl_unlock();
  2198. ETHQOSINFO("calling open\n");
  2199. }
  2200. ETHQOSINFO("end\n");
  2201. return ret;
  2202. }
  2203. static int qcom_ethqos_hib_freeze(struct device *dev)
  2204. {
  2205. struct qcom_ethqos *ethqos;
  2206. struct stmmac_priv *priv;
  2207. int ret = 0;
  2208. struct net_device *ndev = NULL;
  2209. if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded"))
  2210. return 0;
  2211. ethqos = get_stmmac_bsp_priv(dev);
  2212. if (!ethqos)
  2213. return -ENODEV;
  2214. ndev = dev_get_drvdata(dev);
  2215. if (!ndev)
  2216. return -EINVAL;
  2217. priv = netdev_priv(ndev);
  2218. ETHQOSINFO("start\n");
  2219. if (netif_running(ndev)) {
  2220. rtnl_lock();
  2221. dev_close(ndev);
  2222. rtnl_unlock();
  2223. ETHQOSINFO("calling netdev off\n");
  2224. }
  2225. #ifdef DWC_ETH_QOS_CONFIG_PTP
  2226. stmmac_release_ptp(priv);
  2227. #endif /* end of DWC_ETH_QOS_CONFIG_PTP */
  2228. qcom_ethqos_disable_clks(ethqos, dev);
  2229. ethqos_disable_regulators(ethqos);
  2230. ethqos_free_gpios(ethqos);
  2231. ETHQOSINFO("end\n");
  2232. return ret;
  2233. }
  2234. static const struct dev_pm_ops qcom_ethqos_pm_ops = {
  2235. .freeze = qcom_ethqos_hib_freeze,
  2236. .restore = qcom_ethqos_hib_restore,
  2237. .thaw = qcom_ethqos_hib_restore,
  2238. .suspend = qcom_ethqos_suspend,
  2239. .resume = qcom_ethqos_resume,
  2240. };
  2241. static struct platform_driver qcom_ethqos_driver = {
  2242. .probe = qcom_ethqos_probe,
  2243. .remove = qcom_ethqos_remove,
  2244. .driver = {
  2245. .name = DRV_NAME,
  2246. .pm = &qcom_ethqos_pm_ops,
  2247. .of_match_table = of_match_ptr(qcom_ethqos_match),
  2248. },
  2249. };
  2250. static int __init qcom_ethqos_init_module(void)
  2251. {
  2252. int ret = 0;
  2253. ETHQOSDBG("\n");
  2254. ret = platform_driver_register(&qcom_ethqos_driver);
  2255. if (ret < 0) {
  2256. ETHQOSINFO("qcom-ethqos: Driver registration failed");
  2257. return ret;
  2258. }
  2259. ETHQOSDBG("\n");
  2260. return ret;
  2261. }
  2262. static void __exit qcom_ethqos_exit_module(void)
  2263. {
  2264. ETHQOSDBG("\n");
  2265. platform_driver_unregister(&qcom_ethqos_driver);
  2266. ETHQOSDBG("\n");
  2267. }
  2268. /*!
  2269. * \brief Macro to register the driver registration function.
  2270. *
  2271. * \details A module always begin with either the init_module or the function
  2272. * you specify with module_init call. This is the entry function for modules;
  2273. * it tells the kernel what functionality the module provides and sets up the
  2274. * kernel to run the module's functions when they're needed. Once it does this,
  2275. * entry function returns and the module does nothing until the kernel wants
  2276. * to do something with the code that the module provides.
  2277. */
  2278. module_init(qcom_ethqos_init_module)
  2279. /*!
  2280. * \brief Macro to register the driver un-registration function.
  2281. *
  2282. * \details All modules end by calling either cleanup_module or the function
  2283. * you specify with the module_exit call. This is the exit function for modules;
  2284. * it undoes whatever entry function did. It unregisters the functionality
  2285. * that the entry function registered.
  2286. */
  2287. module_exit(qcom_ethqos_exit_module)
  2288. MODULE_DESCRIPTION("Qualcomm ETHQOS driver");
  2289. MODULE_LICENSE("GPL v2");