qca8k-8xxx.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2009 Felix Fietkau <[email protected]>
  4. * Copyright (C) 2011-2012 Gabor Juhos <[email protected]>
  5. * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
  6. * Copyright (c) 2016 John Crispin <[email protected]>
  7. */
  8. #include <linux/module.h>
  9. #include <linux/phy.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/bitfield.h>
  12. #include <linux/regmap.h>
  13. #include <net/dsa.h>
  14. #include <linux/of_net.h>
  15. #include <linux/of_mdio.h>
  16. #include <linux/of_platform.h>
  17. #include <linux/mdio.h>
  18. #include <linux/phylink.h>
  19. #include <linux/gpio/consumer.h>
  20. #include <linux/etherdevice.h>
  21. #include <linux/dsa/tag_qca.h>
  22. #include "qca8k.h"
  23. static void
  24. qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
  25. {
  26. regaddr >>= 1;
  27. *r1 = regaddr & 0x1e;
  28. regaddr >>= 5;
  29. *r2 = regaddr & 0x7;
  30. regaddr >>= 3;
  31. *page = regaddr & 0x3ff;
  32. }
  33. static int
  34. qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
  35. {
  36. int ret;
  37. ret = bus->read(bus, phy_id, regnum);
  38. if (ret >= 0) {
  39. *val = ret;
  40. ret = bus->read(bus, phy_id, regnum + 1);
  41. *val |= ret << 16;
  42. }
  43. if (ret < 0) {
  44. dev_err_ratelimited(&bus->dev,
  45. "failed to read qca8k 32bit register\n");
  46. *val = 0;
  47. return ret;
  48. }
  49. return 0;
  50. }
  51. static void
  52. qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
  53. {
  54. u16 lo, hi;
  55. int ret;
  56. lo = val & 0xffff;
  57. hi = (u16)(val >> 16);
  58. ret = bus->write(bus, phy_id, regnum, lo);
  59. if (ret >= 0)
  60. ret = bus->write(bus, phy_id, regnum + 1, hi);
  61. if (ret < 0)
  62. dev_err_ratelimited(&bus->dev,
  63. "failed to write qca8k 32bit register\n");
  64. }
  65. static int
  66. qca8k_set_page(struct qca8k_priv *priv, u16 page)
  67. {
  68. u16 *cached_page = &priv->mdio_cache.page;
  69. struct mii_bus *bus = priv->bus;
  70. int ret;
  71. if (page == *cached_page)
  72. return 0;
  73. ret = bus->write(bus, 0x18, 0, page);
  74. if (ret < 0) {
  75. dev_err_ratelimited(&bus->dev,
  76. "failed to set qca8k page\n");
  77. return ret;
  78. }
  79. *cached_page = page;
  80. usleep_range(1000, 2000);
  81. return 0;
  82. }
  83. static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
  84. {
  85. struct qca8k_mgmt_eth_data *mgmt_eth_data;
  86. struct qca8k_priv *priv = ds->priv;
  87. struct qca_mgmt_ethhdr *mgmt_ethhdr;
  88. u32 command;
  89. u8 len, cmd;
  90. int i;
  91. mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
  92. mgmt_eth_data = &priv->mgmt_eth_data;
  93. command = get_unaligned_le32(&mgmt_ethhdr->command);
  94. cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
  95. len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
  96. /* Special case for len of 15 as this is the max value for len and needs to
  97. * be increased before converting it from word to dword.
  98. */
  99. if (len == 15)
  100. len++;
  101. /* We can ignore odd value, we always round up them in the alloc function. */
  102. len *= sizeof(u16);
  103. /* Make sure the seq match the requested packet */
  104. if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
  105. mgmt_eth_data->ack = true;
  106. if (cmd == MDIO_READ) {
  107. u32 *val = mgmt_eth_data->data;
  108. *val = get_unaligned_le32(&mgmt_ethhdr->mdio_data);
  109. /* Get the rest of the 12 byte of data.
  110. * The read/write function will extract the requested data.
  111. */
  112. if (len > QCA_HDR_MGMT_DATA1_LEN) {
  113. __le32 *data2 = (__le32 *)skb->data;
  114. int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
  115. len - QCA_HDR_MGMT_DATA1_LEN);
  116. val++;
  117. for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
  118. *val = get_unaligned_le32(data2);
  119. val++;
  120. data2++;
  121. }
  122. }
  123. }
  124. complete(&mgmt_eth_data->rw_done);
  125. }
  126. static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
  127. int priority, unsigned int len)
  128. {
  129. struct qca_mgmt_ethhdr *mgmt_ethhdr;
  130. unsigned int real_len;
  131. struct sk_buff *skb;
  132. __le32 *data2;
  133. u32 command;
  134. u16 hdr;
  135. int i;
  136. skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
  137. if (!skb)
  138. return NULL;
  139. /* Hdr mgmt length value is in step of word size.
  140. * As an example to process 4 byte of data the correct length to set is 2.
  141. * To process 8 byte 4, 12 byte 6, 16 byte 8...
  142. *
  143. * Odd values will always return the next size on the ack packet.
  144. * (length of 3 (6 byte) will always return 8 bytes of data)
  145. *
  146. * This means that a value of 15 (0xf) actually means reading/writing 32 bytes
  147. * of data.
  148. *
  149. * To correctly calculate the length we devide the requested len by word and
  150. * round up.
  151. * On the ack function we can skip the odd check as we already handle the
  152. * case here.
  153. */
  154. real_len = DIV_ROUND_UP(len, sizeof(u16));
  155. /* We check if the result len is odd and we round up another time to
  156. * the next size. (length of 3 will be increased to 4 as switch will always
  157. * return 8 bytes)
  158. */
  159. if (real_len % sizeof(u16) != 0)
  160. real_len++;
  161. /* Max reg value is 0xf(15) but switch will always return the next size (32 byte) */
  162. if (real_len == 16)
  163. real_len--;
  164. skb_reset_mac_header(skb);
  165. skb_set_network_header(skb, skb->len);
  166. mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
  167. hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
  168. hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
  169. hdr |= QCA_HDR_XMIT_FROM_CPU;
  170. hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
  171. hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
  172. command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
  173. command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
  174. command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
  175. command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
  176. QCA_HDR_MGMT_CHECK_CODE_VAL);
  177. put_unaligned_le32(command, &mgmt_ethhdr->command);
  178. if (cmd == MDIO_WRITE)
  179. put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data);
  180. mgmt_ethhdr->hdr = htons(hdr);
  181. data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
  182. if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) {
  183. int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
  184. len - QCA_HDR_MGMT_DATA1_LEN);
  185. val++;
  186. for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
  187. put_unaligned_le32(*val, data2);
  188. data2++;
  189. val++;
  190. }
  191. }
  192. return skb;
  193. }
  194. static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
  195. {
  196. struct qca_mgmt_ethhdr *mgmt_ethhdr;
  197. u32 seq;
  198. seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
  199. mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
  200. put_unaligned_le32(seq, &mgmt_ethhdr->seq);
  201. }
  202. static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
  203. {
  204. struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
  205. struct sk_buff *skb;
  206. bool ack;
  207. int ret;
  208. skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
  209. QCA8K_ETHERNET_MDIO_PRIORITY, len);
  210. if (!skb)
  211. return -ENOMEM;
  212. mutex_lock(&mgmt_eth_data->mutex);
  213. /* Check mgmt_master if is operational */
  214. if (!priv->mgmt_master) {
  215. kfree_skb(skb);
  216. mutex_unlock(&mgmt_eth_data->mutex);
  217. return -EINVAL;
  218. }
  219. skb->dev = priv->mgmt_master;
  220. reinit_completion(&mgmt_eth_data->rw_done);
  221. /* Increment seq_num and set it in the mdio pkt */
  222. mgmt_eth_data->seq++;
  223. qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
  224. mgmt_eth_data->ack = false;
  225. dev_queue_xmit(skb);
  226. ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
  227. msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
  228. *val = mgmt_eth_data->data[0];
  229. if (len > QCA_HDR_MGMT_DATA1_LEN)
  230. memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
  231. ack = mgmt_eth_data->ack;
  232. mutex_unlock(&mgmt_eth_data->mutex);
  233. if (ret <= 0)
  234. return -ETIMEDOUT;
  235. if (!ack)
  236. return -EINVAL;
  237. return 0;
  238. }
  239. static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
  240. {
  241. struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
  242. struct sk_buff *skb;
  243. bool ack;
  244. int ret;
  245. skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
  246. QCA8K_ETHERNET_MDIO_PRIORITY, len);
  247. if (!skb)
  248. return -ENOMEM;
  249. mutex_lock(&mgmt_eth_data->mutex);
  250. /* Check mgmt_master if is operational */
  251. if (!priv->mgmt_master) {
  252. kfree_skb(skb);
  253. mutex_unlock(&mgmt_eth_data->mutex);
  254. return -EINVAL;
  255. }
  256. skb->dev = priv->mgmt_master;
  257. reinit_completion(&mgmt_eth_data->rw_done);
  258. /* Increment seq_num and set it in the mdio pkt */
  259. mgmt_eth_data->seq++;
  260. qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
  261. mgmt_eth_data->ack = false;
  262. dev_queue_xmit(skb);
  263. ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
  264. msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
  265. ack = mgmt_eth_data->ack;
  266. mutex_unlock(&mgmt_eth_data->mutex);
  267. if (ret <= 0)
  268. return -ETIMEDOUT;
  269. if (!ack)
  270. return -EINVAL;
  271. return 0;
  272. }
  273. static int
  274. qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
  275. {
  276. u32 val = 0;
  277. int ret;
  278. ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
  279. if (ret)
  280. return ret;
  281. val &= ~mask;
  282. val |= write_val;
  283. return qca8k_write_eth(priv, reg, &val, sizeof(val));
  284. }
  285. static int
  286. qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
  287. {
  288. struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
  289. struct mii_bus *bus = priv->bus;
  290. u16 r1, r2, page;
  291. int ret;
  292. if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
  293. return 0;
  294. qca8k_split_addr(reg, &r1, &r2, &page);
  295. mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
  296. ret = qca8k_set_page(priv, page);
  297. if (ret < 0)
  298. goto exit;
  299. ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
  300. exit:
  301. mutex_unlock(&bus->mdio_lock);
  302. return ret;
  303. }
  304. static int
  305. qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
  306. {
  307. struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
  308. struct mii_bus *bus = priv->bus;
  309. u16 r1, r2, page;
  310. int ret;
  311. if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
  312. return 0;
  313. qca8k_split_addr(reg, &r1, &r2, &page);
  314. mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
  315. ret = qca8k_set_page(priv, page);
  316. if (ret < 0)
  317. goto exit;
  318. qca8k_mii_write32(bus, 0x10 | r2, r1, val);
  319. exit:
  320. mutex_unlock(&bus->mdio_lock);
  321. return ret;
  322. }
  323. static int
  324. qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
  325. {
  326. struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
  327. struct mii_bus *bus = priv->bus;
  328. u16 r1, r2, page;
  329. u32 val;
  330. int ret;
  331. if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
  332. return 0;
  333. qca8k_split_addr(reg, &r1, &r2, &page);
  334. mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
  335. ret = qca8k_set_page(priv, page);
  336. if (ret < 0)
  337. goto exit;
  338. ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
  339. if (ret < 0)
  340. goto exit;
  341. val &= ~mask;
  342. val |= write_val;
  343. qca8k_mii_write32(bus, 0x10 | r2, r1, val);
  344. exit:
  345. mutex_unlock(&bus->mdio_lock);
  346. return ret;
  347. }
  348. static struct regmap_config qca8k_regmap_config = {
  349. .reg_bits = 16,
  350. .val_bits = 32,
  351. .reg_stride = 4,
  352. .max_register = 0x16ac, /* end MIB - Port6 range */
  353. .reg_read = qca8k_regmap_read,
  354. .reg_write = qca8k_regmap_write,
  355. .reg_update_bits = qca8k_regmap_update_bits,
  356. .rd_table = &qca8k_readable_table,
  357. .disable_locking = true, /* Locking is handled by qca8k read/write */
  358. .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
  359. };
  360. static int
  361. qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
  362. struct sk_buff *read_skb, u32 *val)
  363. {
  364. struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
  365. bool ack;
  366. int ret;
  367. if (!skb)
  368. return -ENOMEM;
  369. reinit_completion(&mgmt_eth_data->rw_done);
  370. /* Increment seq_num and set it in the copy pkt */
  371. mgmt_eth_data->seq++;
  372. qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
  373. mgmt_eth_data->ack = false;
  374. dev_queue_xmit(skb);
  375. ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
  376. QCA8K_ETHERNET_TIMEOUT);
  377. ack = mgmt_eth_data->ack;
  378. if (ret <= 0)
  379. return -ETIMEDOUT;
  380. if (!ack)
  381. return -EINVAL;
  382. *val = mgmt_eth_data->data[0];
  383. return 0;
  384. }
  385. static int
  386. qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
  387. int regnum, u16 data)
  388. {
  389. struct sk_buff *write_skb, *clear_skb, *read_skb;
  390. struct qca8k_mgmt_eth_data *mgmt_eth_data;
  391. u32 write_val, clear_val = 0, val;
  392. struct net_device *mgmt_master;
  393. int ret, ret1;
  394. bool ack;
  395. if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
  396. return -EINVAL;
  397. mgmt_eth_data = &priv->mgmt_eth_data;
  398. write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
  399. QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
  400. QCA8K_MDIO_MASTER_REG_ADDR(regnum);
  401. if (read) {
  402. write_val |= QCA8K_MDIO_MASTER_READ;
  403. } else {
  404. write_val |= QCA8K_MDIO_MASTER_WRITE;
  405. write_val |= QCA8K_MDIO_MASTER_DATA(data);
  406. }
  407. /* Prealloc all the needed skb before the lock */
  408. write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
  409. QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
  410. if (!write_skb)
  411. return -ENOMEM;
  412. clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
  413. QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
  414. if (!clear_skb) {
  415. ret = -ENOMEM;
  416. goto err_clear_skb;
  417. }
  418. read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
  419. QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
  420. if (!read_skb) {
  421. ret = -ENOMEM;
  422. goto err_read_skb;
  423. }
  424. /* It seems that accessing the switch's internal PHYs via management
  425. * packets still uses the MDIO bus within the switch internally, and
  426. * these accesses can conflict with external MDIO accesses to other
  427. * devices on the MDIO bus.
  428. * We therefore need to lock the MDIO bus onto which the switch is
  429. * connected.
  430. */
  431. mutex_lock(&priv->bus->mdio_lock);
  432. /* Actually start the request:
  433. * 1. Send mdio master packet
  434. * 2. Busy Wait for mdio master command
  435. * 3. Get the data if we are reading
  436. * 4. Reset the mdio master (even with error)
  437. */
  438. mutex_lock(&mgmt_eth_data->mutex);
  439. /* Check if mgmt_master is operational */
  440. mgmt_master = priv->mgmt_master;
  441. if (!mgmt_master) {
  442. mutex_unlock(&mgmt_eth_data->mutex);
  443. mutex_unlock(&priv->bus->mdio_lock);
  444. ret = -EINVAL;
  445. goto err_mgmt_master;
  446. }
  447. read_skb->dev = mgmt_master;
  448. clear_skb->dev = mgmt_master;
  449. write_skb->dev = mgmt_master;
  450. reinit_completion(&mgmt_eth_data->rw_done);
  451. /* Increment seq_num and set it in the write pkt */
  452. mgmt_eth_data->seq++;
  453. qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
  454. mgmt_eth_data->ack = false;
  455. dev_queue_xmit(write_skb);
  456. ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
  457. QCA8K_ETHERNET_TIMEOUT);
  458. ack = mgmt_eth_data->ack;
  459. if (ret <= 0) {
  460. ret = -ETIMEDOUT;
  461. kfree_skb(read_skb);
  462. goto exit;
  463. }
  464. if (!ack) {
  465. ret = -EINVAL;
  466. kfree_skb(read_skb);
  467. goto exit;
  468. }
  469. ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
  470. !(val & QCA8K_MDIO_MASTER_BUSY), 0,
  471. QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
  472. mgmt_eth_data, read_skb, &val);
  473. if (ret < 0 && ret1 < 0) {
  474. ret = ret1;
  475. goto exit;
  476. }
  477. if (read) {
  478. reinit_completion(&mgmt_eth_data->rw_done);
  479. /* Increment seq_num and set it in the read pkt */
  480. mgmt_eth_data->seq++;
  481. qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
  482. mgmt_eth_data->ack = false;
  483. dev_queue_xmit(read_skb);
  484. ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
  485. QCA8K_ETHERNET_TIMEOUT);
  486. ack = mgmt_eth_data->ack;
  487. if (ret <= 0) {
  488. ret = -ETIMEDOUT;
  489. goto exit;
  490. }
  491. if (!ack) {
  492. ret = -EINVAL;
  493. goto exit;
  494. }
  495. ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
  496. } else {
  497. kfree_skb(read_skb);
  498. }
  499. exit:
  500. reinit_completion(&mgmt_eth_data->rw_done);
  501. /* Increment seq_num and set it in the clear pkt */
  502. mgmt_eth_data->seq++;
  503. qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
  504. mgmt_eth_data->ack = false;
  505. dev_queue_xmit(clear_skb);
  506. wait_for_completion_timeout(&mgmt_eth_data->rw_done,
  507. QCA8K_ETHERNET_TIMEOUT);
  508. mutex_unlock(&mgmt_eth_data->mutex);
  509. mutex_unlock(&priv->bus->mdio_lock);
  510. return ret;
  511. /* Error handling before lock */
  512. err_mgmt_master:
  513. kfree_skb(read_skb);
  514. err_read_skb:
  515. kfree_skb(clear_skb);
  516. err_clear_skb:
  517. kfree_skb(write_skb);
  518. return ret;
  519. }
  520. static u32
  521. qca8k_port_to_phy(int port)
  522. {
  523. /* From Andrew Lunn:
  524. * Port 0 has no internal phy.
  525. * Port 1 has an internal PHY at MDIO address 0.
  526. * Port 2 has an internal PHY at MDIO address 1.
  527. * ...
  528. * Port 5 has an internal PHY at MDIO address 4.
  529. * Port 6 has no internal PHY.
  530. */
  531. return port - 1;
  532. }
  533. static int
  534. qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
  535. {
  536. u16 r1, r2, page;
  537. u32 val;
  538. int ret, ret1;
  539. qca8k_split_addr(reg, &r1, &r2, &page);
  540. ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
  541. QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
  542. bus, 0x10 | r2, r1, &val);
  543. /* Check if qca8k_read has failed for a different reason
  544. * before returnting -ETIMEDOUT
  545. */
  546. if (ret < 0 && ret1 < 0)
  547. return ret1;
  548. return ret;
  549. }
  550. static int
  551. qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
  552. {
  553. struct mii_bus *bus = priv->bus;
  554. u16 r1, r2, page;
  555. u32 val;
  556. int ret;
  557. if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
  558. return -EINVAL;
  559. val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
  560. QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
  561. QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
  562. QCA8K_MDIO_MASTER_DATA(data);
  563. qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
  564. mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
  565. ret = qca8k_set_page(priv, page);
  566. if (ret)
  567. goto exit;
  568. qca8k_mii_write32(bus, 0x10 | r2, r1, val);
  569. ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
  570. QCA8K_MDIO_MASTER_BUSY);
  571. exit:
  572. /* even if the busy_wait timeouts try to clear the MASTER_EN */
  573. qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
  574. mutex_unlock(&bus->mdio_lock);
  575. return ret;
  576. }
  577. static int
  578. qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
  579. {
  580. struct mii_bus *bus = priv->bus;
  581. u16 r1, r2, page;
  582. u32 val;
  583. int ret;
  584. if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
  585. return -EINVAL;
  586. val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
  587. QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
  588. QCA8K_MDIO_MASTER_REG_ADDR(regnum);
  589. qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
  590. mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
  591. ret = qca8k_set_page(priv, page);
  592. if (ret)
  593. goto exit;
  594. qca8k_mii_write32(bus, 0x10 | r2, r1, val);
  595. ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
  596. QCA8K_MDIO_MASTER_BUSY);
  597. if (ret)
  598. goto exit;
  599. ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
  600. exit:
  601. /* even if the busy_wait timeouts try to clear the MASTER_EN */
  602. qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
  603. mutex_unlock(&bus->mdio_lock);
  604. if (ret >= 0)
  605. ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
  606. return ret;
  607. }
  608. static int
  609. qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
  610. {
  611. struct qca8k_priv *priv = slave_bus->priv;
  612. int ret;
  613. /* Use mdio Ethernet when available, fallback to legacy one on error */
  614. ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
  615. if (!ret)
  616. return 0;
  617. return qca8k_mdio_write(priv, phy, regnum, data);
  618. }
  619. static int
  620. qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
  621. {
  622. struct qca8k_priv *priv = slave_bus->priv;
  623. int ret;
  624. /* Use mdio Ethernet when available, fallback to legacy one on error */
  625. ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
  626. if (ret >= 0)
  627. return ret;
  628. ret = qca8k_mdio_read(priv, phy, regnum);
  629. if (ret < 0)
  630. return 0xffff;
  631. return ret;
  632. }
  633. static int
  634. qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
  635. {
  636. port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
  637. return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
  638. }
  639. static int
  640. qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
  641. {
  642. port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
  643. return qca8k_internal_mdio_read(slave_bus, port, regnum);
  644. }
  645. static int
  646. qca8k_mdio_register(struct qca8k_priv *priv)
  647. {
  648. struct dsa_switch *ds = priv->ds;
  649. struct device_node *mdio;
  650. struct mii_bus *bus;
  651. bus = devm_mdiobus_alloc(ds->dev);
  652. if (!bus)
  653. return -ENOMEM;
  654. bus->priv = (void *)priv;
  655. snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
  656. ds->dst->index, ds->index);
  657. bus->parent = ds->dev;
  658. bus->phy_mask = ~ds->phys_mii_mask;
  659. ds->slave_mii_bus = bus;
  660. /* Check if the devicetree declare the port:phy mapping */
  661. mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
  662. if (of_device_is_available(mdio)) {
  663. bus->name = "qca8k slave mii";
  664. bus->read = qca8k_internal_mdio_read;
  665. bus->write = qca8k_internal_mdio_write;
  666. return devm_of_mdiobus_register(priv->dev, bus, mdio);
  667. }
  668. /* If a mapping can't be found the legacy mapping is used,
  669. * using the qca8k_port_to_phy function
  670. */
  671. bus->name = "qca8k-legacy slave mii";
  672. bus->read = qca8k_legacy_mdio_read;
  673. bus->write = qca8k_legacy_mdio_write;
  674. return devm_mdiobus_register(priv->dev, bus);
  675. }
  676. static int
  677. qca8k_setup_mdio_bus(struct qca8k_priv *priv)
  678. {
  679. u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
  680. struct device_node *ports, *port;
  681. phy_interface_t mode;
  682. int err;
  683. ports = of_get_child_by_name(priv->dev->of_node, "ports");
  684. if (!ports)
  685. ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
  686. if (!ports)
  687. return -EINVAL;
  688. for_each_available_child_of_node(ports, port) {
  689. err = of_property_read_u32(port, "reg", &reg);
  690. if (err) {
  691. of_node_put(port);
  692. of_node_put(ports);
  693. return err;
  694. }
  695. if (!dsa_is_user_port(priv->ds, reg))
  696. continue;
  697. of_get_phy_mode(port, &mode);
  698. if (of_property_read_bool(port, "phy-handle") &&
  699. mode != PHY_INTERFACE_MODE_INTERNAL)
  700. external_mdio_mask |= BIT(reg);
  701. else
  702. internal_mdio_mask |= BIT(reg);
  703. }
  704. of_node_put(ports);
  705. if (!external_mdio_mask && !internal_mdio_mask) {
  706. dev_err(priv->dev, "no PHYs are defined.\n");
  707. return -EINVAL;
  708. }
  709. /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
  710. * the MDIO_MASTER register also _disconnects_ the external MDC
  711. * passthrough to the internal PHYs. It's not possible to use both
  712. * configurations at the same time!
  713. *
  714. * Because this came up during the review process:
  715. * If the external mdio-bus driver is capable magically disabling
  716. * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
  717. * accessors for the time being, it would be possible to pull this
  718. * off.
  719. */
  720. if (!!external_mdio_mask && !!internal_mdio_mask) {
  721. dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
  722. return -EINVAL;
  723. }
  724. if (external_mdio_mask) {
  725. /* Make sure to disable the internal mdio bus in cases
  726. * a dt-overlay and driver reload changed the configuration
  727. */
  728. return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
  729. QCA8K_MDIO_MASTER_EN);
  730. }
  731. return qca8k_mdio_register(priv);
  732. }
  733. static int
  734. qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
  735. {
  736. u32 mask = 0;
  737. int ret = 0;
  738. /* SoC specific settings for ipq8064.
  739. * If more device require this consider adding
  740. * a dedicated binding.
  741. */
  742. if (of_machine_is_compatible("qcom,ipq8064"))
  743. mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
  744. /* SoC specific settings for ipq8065 */
  745. if (of_machine_is_compatible("qcom,ipq8065"))
  746. mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
  747. if (mask) {
  748. ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
  749. QCA8K_MAC_PWR_RGMII0_1_8V |
  750. QCA8K_MAC_PWR_RGMII1_1_8V,
  751. mask);
  752. }
  753. return ret;
  754. }
  755. static int qca8k_find_cpu_port(struct dsa_switch *ds)
  756. {
  757. struct qca8k_priv *priv = ds->priv;
  758. /* Find the connected cpu port. Valid port are 0 or 6 */
  759. if (dsa_is_cpu_port(ds, 0))
  760. return 0;
  761. dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
  762. if (dsa_is_cpu_port(ds, 6))
  763. return 6;
  764. return -EINVAL;
  765. }
  766. static int
  767. qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
  768. {
  769. const struct qca8k_match_data *data = priv->info;
  770. struct device_node *node = priv->dev->of_node;
  771. u32 val = 0;
  772. int ret;
  773. /* QCA8327 require to set to the correct mode.
  774. * His bigger brother QCA8328 have the 172 pin layout.
  775. * Should be applied by default but we set this just to make sure.
  776. */
  777. if (priv->switch_id == QCA8K_ID_QCA8327) {
  778. /* Set the correct package of 148 pin for QCA8327 */
  779. if (data->reduced_package)
  780. val |= QCA8327_PWS_PACKAGE148_EN;
  781. ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
  782. val);
  783. if (ret)
  784. return ret;
  785. }
  786. if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
  787. val |= QCA8K_PWS_POWER_ON_SEL;
  788. if (of_property_read_bool(node, "qca,led-open-drain")) {
  789. if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
  790. dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
  791. return -EINVAL;
  792. }
  793. val |= QCA8K_PWS_LED_OPEN_EN_CSR;
  794. }
  795. return qca8k_rmw(priv, QCA8K_REG_PWS,
  796. QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
  797. val);
  798. }
  799. static int
  800. qca8k_parse_port_config(struct qca8k_priv *priv)
  801. {
  802. int port, cpu_port_index = -1, ret;
  803. struct device_node *port_dn;
  804. phy_interface_t mode;
  805. struct dsa_port *dp;
  806. u32 delay;
  807. /* We have 2 CPU port. Check them */
  808. for (port = 0; port < QCA8K_NUM_PORTS; port++) {
  809. /* Skip every other port */
  810. if (port != 0 && port != 6)
  811. continue;
  812. dp = dsa_to_port(priv->ds, port);
  813. port_dn = dp->dn;
  814. cpu_port_index++;
  815. if (!of_device_is_available(port_dn))
  816. continue;
  817. ret = of_get_phy_mode(port_dn, &mode);
  818. if (ret)
  819. continue;
  820. switch (mode) {
  821. case PHY_INTERFACE_MODE_RGMII:
  822. case PHY_INTERFACE_MODE_RGMII_ID:
  823. case PHY_INTERFACE_MODE_RGMII_TXID:
  824. case PHY_INTERFACE_MODE_RGMII_RXID:
  825. case PHY_INTERFACE_MODE_SGMII:
  826. delay = 0;
  827. if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
  828. /* Switch regs accept value in ns, convert ps to ns */
  829. delay = delay / 1000;
  830. else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
  831. mode == PHY_INTERFACE_MODE_RGMII_TXID)
  832. delay = 1;
  833. if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
  834. dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
  835. delay = 3;
  836. }
  837. priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
  838. delay = 0;
  839. if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
  840. /* Switch regs accept value in ns, convert ps to ns */
  841. delay = delay / 1000;
  842. else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
  843. mode == PHY_INTERFACE_MODE_RGMII_RXID)
  844. delay = 2;
  845. if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
  846. dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
  847. delay = 3;
  848. }
  849. priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
  850. /* Skip sgmii parsing for rgmii* mode */
  851. if (mode == PHY_INTERFACE_MODE_RGMII ||
  852. mode == PHY_INTERFACE_MODE_RGMII_ID ||
  853. mode == PHY_INTERFACE_MODE_RGMII_TXID ||
  854. mode == PHY_INTERFACE_MODE_RGMII_RXID)
  855. break;
  856. if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
  857. priv->ports_config.sgmii_tx_clk_falling_edge = true;
  858. if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
  859. priv->ports_config.sgmii_rx_clk_falling_edge = true;
  860. if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
  861. priv->ports_config.sgmii_enable_pll = true;
  862. if (priv->switch_id == QCA8K_ID_QCA8327) {
  863. dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
  864. priv->ports_config.sgmii_enable_pll = false;
  865. }
  866. if (priv->switch_revision < 2)
  867. dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
  868. }
  869. break;
  870. default:
  871. continue;
  872. }
  873. }
  874. return 0;
  875. }
  876. static void
  877. qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
  878. u32 reg)
  879. {
  880. u32 delay, val = 0;
  881. int ret;
  882. /* Delay can be declared in 3 different way.
  883. * Mode to rgmii and internal-delay standard binding defined
  884. * rgmii-id or rgmii-tx/rx phy mode set.
  885. * The parse logic set a delay different than 0 only when one
  886. * of the 3 different way is used. In all other case delay is
  887. * not enabled. With ID or TX/RXID delay is enabled and set
  888. * to the default and recommended value.
  889. */
  890. if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
  891. delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
  892. val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
  893. QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
  894. }
  895. if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
  896. delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
  897. val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
  898. QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
  899. }
  900. /* Set RGMII delay based on the selected values */
  901. ret = qca8k_rmw(priv, reg,
  902. QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
  903. QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
  904. QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
  905. QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
  906. val);
  907. if (ret)
  908. dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
  909. cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
  910. }
  911. static struct phylink_pcs *
  912. qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
  913. phy_interface_t interface)
  914. {
  915. struct qca8k_priv *priv = ds->priv;
  916. struct phylink_pcs *pcs = NULL;
  917. switch (interface) {
  918. case PHY_INTERFACE_MODE_SGMII:
  919. case PHY_INTERFACE_MODE_1000BASEX:
  920. switch (port) {
  921. case 0:
  922. pcs = &priv->pcs_port_0.pcs;
  923. break;
  924. case 6:
  925. pcs = &priv->pcs_port_6.pcs;
  926. break;
  927. }
  928. break;
  929. default:
  930. break;
  931. }
  932. return pcs;
  933. }
  934. static void
  935. qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
  936. const struct phylink_link_state *state)
  937. {
  938. struct qca8k_priv *priv = ds->priv;
  939. int cpu_port_index;
  940. u32 reg;
  941. switch (port) {
  942. case 0: /* 1st CPU port */
  943. if (state->interface != PHY_INTERFACE_MODE_RGMII &&
  944. state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
  945. state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
  946. state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
  947. state->interface != PHY_INTERFACE_MODE_SGMII)
  948. return;
  949. reg = QCA8K_REG_PORT0_PAD_CTRL;
  950. cpu_port_index = QCA8K_CPU_PORT0;
  951. break;
  952. case 1:
  953. case 2:
  954. case 3:
  955. case 4:
  956. case 5:
  957. /* Internal PHY, nothing to do */
  958. return;
  959. case 6: /* 2nd CPU port / external PHY */
  960. if (state->interface != PHY_INTERFACE_MODE_RGMII &&
  961. state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
  962. state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
  963. state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
  964. state->interface != PHY_INTERFACE_MODE_SGMII &&
  965. state->interface != PHY_INTERFACE_MODE_1000BASEX)
  966. return;
  967. reg = QCA8K_REG_PORT6_PAD_CTRL;
  968. cpu_port_index = QCA8K_CPU_PORT6;
  969. break;
  970. default:
  971. dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
  972. return;
  973. }
  974. if (port != 6 && phylink_autoneg_inband(mode)) {
  975. dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
  976. __func__);
  977. return;
  978. }
  979. switch (state->interface) {
  980. case PHY_INTERFACE_MODE_RGMII:
  981. case PHY_INTERFACE_MODE_RGMII_ID:
  982. case PHY_INTERFACE_MODE_RGMII_TXID:
  983. case PHY_INTERFACE_MODE_RGMII_RXID:
  984. qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
  985. /* Configure rgmii delay */
  986. qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
  987. /* QCA8337 requires to set rgmii rx delay for all ports.
  988. * This is enabled through PORT5_PAD_CTRL for all ports,
  989. * rather than individual port registers.
  990. */
  991. if (priv->switch_id == QCA8K_ID_QCA8337)
  992. qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
  993. QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
  994. break;
  995. case PHY_INTERFACE_MODE_SGMII:
  996. case PHY_INTERFACE_MODE_1000BASEX:
  997. /* Enable SGMII on the port */
  998. qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
  999. break;
  1000. default:
  1001. dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
  1002. phy_modes(state->interface), port);
  1003. return;
  1004. }
  1005. }
  1006. static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
  1007. struct phylink_config *config)
  1008. {
  1009. switch (port) {
  1010. case 0: /* 1st CPU port */
  1011. phy_interface_set_rgmii(config->supported_interfaces);
  1012. __set_bit(PHY_INTERFACE_MODE_SGMII,
  1013. config->supported_interfaces);
  1014. break;
  1015. case 1:
  1016. case 2:
  1017. case 3:
  1018. case 4:
  1019. case 5:
  1020. /* Internal PHY */
  1021. __set_bit(PHY_INTERFACE_MODE_GMII,
  1022. config->supported_interfaces);
  1023. __set_bit(PHY_INTERFACE_MODE_INTERNAL,
  1024. config->supported_interfaces);
  1025. break;
  1026. case 6: /* 2nd CPU port / external PHY */
  1027. phy_interface_set_rgmii(config->supported_interfaces);
  1028. __set_bit(PHY_INTERFACE_MODE_SGMII,
  1029. config->supported_interfaces);
  1030. __set_bit(PHY_INTERFACE_MODE_1000BASEX,
  1031. config->supported_interfaces);
  1032. break;
  1033. }
  1034. config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
  1035. MAC_10 | MAC_100 | MAC_1000FD;
  1036. config->legacy_pre_march2020 = false;
  1037. }
  1038. static void
  1039. qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
  1040. phy_interface_t interface)
  1041. {
  1042. struct qca8k_priv *priv = ds->priv;
  1043. qca8k_port_set_status(priv, port, 0);
  1044. }
  1045. static void
  1046. qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
  1047. phy_interface_t interface, struct phy_device *phydev,
  1048. int speed, int duplex, bool tx_pause, bool rx_pause)
  1049. {
  1050. struct qca8k_priv *priv = ds->priv;
  1051. u32 reg;
  1052. if (phylink_autoneg_inband(mode)) {
  1053. reg = QCA8K_PORT_STATUS_LINK_AUTO;
  1054. } else {
  1055. switch (speed) {
  1056. case SPEED_10:
  1057. reg = QCA8K_PORT_STATUS_SPEED_10;
  1058. break;
  1059. case SPEED_100:
  1060. reg = QCA8K_PORT_STATUS_SPEED_100;
  1061. break;
  1062. case SPEED_1000:
  1063. reg = QCA8K_PORT_STATUS_SPEED_1000;
  1064. break;
  1065. default:
  1066. reg = QCA8K_PORT_STATUS_LINK_AUTO;
  1067. break;
  1068. }
  1069. if (duplex == DUPLEX_FULL)
  1070. reg |= QCA8K_PORT_STATUS_DUPLEX;
  1071. if (rx_pause || dsa_is_cpu_port(ds, port))
  1072. reg |= QCA8K_PORT_STATUS_RXFLOW;
  1073. if (tx_pause || dsa_is_cpu_port(ds, port))
  1074. reg |= QCA8K_PORT_STATUS_TXFLOW;
  1075. }
  1076. reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
  1077. qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
  1078. }
  1079. static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
  1080. {
  1081. return container_of(pcs, struct qca8k_pcs, pcs);
  1082. }
  1083. static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
  1084. struct phylink_link_state *state)
  1085. {
  1086. struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
  1087. int port = pcs_to_qca8k_pcs(pcs)->port;
  1088. u32 reg;
  1089. int ret;
  1090. ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
  1091. if (ret < 0) {
  1092. state->link = false;
  1093. return;
  1094. }
  1095. state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
  1096. state->an_complete = state->link;
  1097. state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
  1098. DUPLEX_HALF;
  1099. switch (reg & QCA8K_PORT_STATUS_SPEED) {
  1100. case QCA8K_PORT_STATUS_SPEED_10:
  1101. state->speed = SPEED_10;
  1102. break;
  1103. case QCA8K_PORT_STATUS_SPEED_100:
  1104. state->speed = SPEED_100;
  1105. break;
  1106. case QCA8K_PORT_STATUS_SPEED_1000:
  1107. state->speed = SPEED_1000;
  1108. break;
  1109. default:
  1110. state->speed = SPEED_UNKNOWN;
  1111. break;
  1112. }
  1113. if (reg & QCA8K_PORT_STATUS_RXFLOW)
  1114. state->pause |= MLO_PAUSE_RX;
  1115. if (reg & QCA8K_PORT_STATUS_TXFLOW)
  1116. state->pause |= MLO_PAUSE_TX;
  1117. }
  1118. static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
  1119. phy_interface_t interface,
  1120. const unsigned long *advertising,
  1121. bool permit_pause_to_mac)
  1122. {
  1123. struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
  1124. int cpu_port_index, ret, port;
  1125. u32 reg, val;
  1126. port = pcs_to_qca8k_pcs(pcs)->port;
  1127. switch (port) {
  1128. case 0:
  1129. reg = QCA8K_REG_PORT0_PAD_CTRL;
  1130. cpu_port_index = QCA8K_CPU_PORT0;
  1131. break;
  1132. case 6:
  1133. reg = QCA8K_REG_PORT6_PAD_CTRL;
  1134. cpu_port_index = QCA8K_CPU_PORT6;
  1135. break;
  1136. default:
  1137. WARN_ON(1);
  1138. return -EINVAL;
  1139. }
  1140. /* Enable/disable SerDes auto-negotiation as necessary */
  1141. ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
  1142. if (ret)
  1143. return ret;
  1144. if (phylink_autoneg_inband(mode))
  1145. val &= ~QCA8K_PWS_SERDES_AEN_DIS;
  1146. else
  1147. val |= QCA8K_PWS_SERDES_AEN_DIS;
  1148. qca8k_write(priv, QCA8K_REG_PWS, val);
  1149. /* Configure the SGMII parameters */
  1150. ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
  1151. if (ret)
  1152. return ret;
  1153. val |= QCA8K_SGMII_EN_SD;
  1154. if (priv->ports_config.sgmii_enable_pll)
  1155. val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
  1156. QCA8K_SGMII_EN_TX;
  1157. if (dsa_is_cpu_port(priv->ds, port)) {
  1158. /* CPU port, we're talking to the CPU MAC, be a PHY */
  1159. val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
  1160. val |= QCA8K_SGMII_MODE_CTRL_PHY;
  1161. } else if (interface == PHY_INTERFACE_MODE_SGMII) {
  1162. val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
  1163. val |= QCA8K_SGMII_MODE_CTRL_MAC;
  1164. } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
  1165. val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
  1166. val |= QCA8K_SGMII_MODE_CTRL_BASEX;
  1167. }
  1168. qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
  1169. /* From original code is reported port instability as SGMII also
  1170. * require delay set. Apply advised values here or take them from DT.
  1171. */
  1172. if (interface == PHY_INTERFACE_MODE_SGMII)
  1173. qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
  1174. /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
  1175. * falling edge is set writing in the PORT0 PAD reg
  1176. */
  1177. if (priv->switch_id == QCA8K_ID_QCA8327 ||
  1178. priv->switch_id == QCA8K_ID_QCA8337)
  1179. reg = QCA8K_REG_PORT0_PAD_CTRL;
  1180. val = 0;
  1181. /* SGMII Clock phase configuration */
  1182. if (priv->ports_config.sgmii_rx_clk_falling_edge)
  1183. val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
  1184. if (priv->ports_config.sgmii_tx_clk_falling_edge)
  1185. val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
  1186. if (val)
  1187. ret = qca8k_rmw(priv, reg,
  1188. QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
  1189. QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
  1190. val);
  1191. return 0;
  1192. }
  1193. static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
  1194. {
  1195. }
  1196. static const struct phylink_pcs_ops qca8k_pcs_ops = {
  1197. .pcs_get_state = qca8k_pcs_get_state,
  1198. .pcs_config = qca8k_pcs_config,
  1199. .pcs_an_restart = qca8k_pcs_an_restart,
  1200. };
  1201. static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
  1202. int port)
  1203. {
  1204. qpcs->pcs.ops = &qca8k_pcs_ops;
  1205. /* We don't have interrupts for link changes, so we need to poll */
  1206. qpcs->pcs.poll = true;
  1207. qpcs->priv = priv;
  1208. qpcs->port = port;
  1209. }
  1210. static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
  1211. {
  1212. struct qca8k_mib_eth_data *mib_eth_data;
  1213. struct qca8k_priv *priv = ds->priv;
  1214. const struct qca8k_mib_desc *mib;
  1215. struct mib_ethhdr *mib_ethhdr;
  1216. __le32 *data2;
  1217. u8 port;
  1218. int i;
  1219. mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
  1220. mib_eth_data = &priv->mib_eth_data;
  1221. /* The switch autocast every port. Ignore other packet and
  1222. * parse only the requested one.
  1223. */
  1224. port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
  1225. if (port != mib_eth_data->req_port)
  1226. goto exit;
  1227. data2 = (__le32 *)skb->data;
  1228. for (i = 0; i < priv->info->mib_count; i++) {
  1229. mib = &ar8327_mib[i];
  1230. /* First 3 mib are present in the skb head */
  1231. if (i < 3) {
  1232. mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i);
  1233. continue;
  1234. }
  1235. /* Some mib are 64 bit wide */
  1236. if (mib->size == 2)
  1237. mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2);
  1238. else
  1239. mib_eth_data->data[i] = get_unaligned_le32(data2);
  1240. data2 += mib->size;
  1241. }
  1242. exit:
  1243. /* Complete on receiving all the mib packet */
  1244. if (refcount_dec_and_test(&mib_eth_data->port_parsed))
  1245. complete(&mib_eth_data->rw_done);
  1246. }
  1247. static int
  1248. qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
  1249. {
  1250. struct dsa_port *dp = dsa_to_port(ds, port);
  1251. struct qca8k_mib_eth_data *mib_eth_data;
  1252. struct qca8k_priv *priv = ds->priv;
  1253. int ret;
  1254. mib_eth_data = &priv->mib_eth_data;
  1255. mutex_lock(&mib_eth_data->mutex);
  1256. reinit_completion(&mib_eth_data->rw_done);
  1257. mib_eth_data->req_port = dp->index;
  1258. mib_eth_data->data = data;
  1259. refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
  1260. mutex_lock(&priv->reg_mutex);
  1261. /* Send mib autocast request */
  1262. ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
  1263. QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
  1264. FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
  1265. QCA8K_MIB_BUSY);
  1266. mutex_unlock(&priv->reg_mutex);
  1267. if (ret)
  1268. goto exit;
  1269. ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
  1270. exit:
  1271. mutex_unlock(&mib_eth_data->mutex);
  1272. return ret;
  1273. }
  1274. static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
  1275. {
  1276. struct qca8k_priv *priv = ds->priv;
  1277. /* Communicate to the phy internal driver the switch revision.
  1278. * Based on the switch revision different values needs to be
  1279. * set to the dbg and mmd reg on the phy.
  1280. * The first 2 bit are used to communicate the switch revision
  1281. * to the phy driver.
  1282. */
  1283. if (port > 0 && port < 6)
  1284. return priv->switch_revision;
  1285. return 0;
  1286. }
  1287. static enum dsa_tag_protocol
  1288. qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
  1289. enum dsa_tag_protocol mp)
  1290. {
  1291. return DSA_TAG_PROTO_QCA;
  1292. }
  1293. static void
  1294. qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
  1295. bool operational)
  1296. {
  1297. struct dsa_port *dp = master->dsa_ptr;
  1298. struct qca8k_priv *priv = ds->priv;
  1299. /* Ethernet MIB/MDIO is only supported for CPU port 0 */
  1300. if (dp->index != 0)
  1301. return;
  1302. mutex_lock(&priv->mgmt_eth_data.mutex);
  1303. mutex_lock(&priv->mib_eth_data.mutex);
  1304. priv->mgmt_master = operational ? (struct net_device *)master : NULL;
  1305. mutex_unlock(&priv->mib_eth_data.mutex);
  1306. mutex_unlock(&priv->mgmt_eth_data.mutex);
  1307. }
  1308. static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
  1309. enum dsa_tag_protocol proto)
  1310. {
  1311. struct qca_tagger_data *tagger_data;
  1312. switch (proto) {
  1313. case DSA_TAG_PROTO_QCA:
  1314. tagger_data = ds->tagger_data;
  1315. tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
  1316. tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
  1317. break;
  1318. default:
  1319. return -EOPNOTSUPP;
  1320. }
  1321. return 0;
  1322. }
  1323. static int
  1324. qca8k_setup(struct dsa_switch *ds)
  1325. {
  1326. struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
  1327. int cpu_port, ret, i;
  1328. u32 mask;
  1329. cpu_port = qca8k_find_cpu_port(ds);
  1330. if (cpu_port < 0) {
  1331. dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
  1332. return cpu_port;
  1333. }
  1334. /* Parse CPU port config to be later used in phy_link mac_config */
  1335. ret = qca8k_parse_port_config(priv);
  1336. if (ret)
  1337. return ret;
  1338. ret = qca8k_setup_mdio_bus(priv);
  1339. if (ret)
  1340. return ret;
  1341. ret = qca8k_setup_of_pws_reg(priv);
  1342. if (ret)
  1343. return ret;
  1344. ret = qca8k_setup_mac_pwr_sel(priv);
  1345. if (ret)
  1346. return ret;
  1347. qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
  1348. qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
  1349. /* Make sure MAC06 is disabled */
  1350. ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
  1351. QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
  1352. if (ret) {
  1353. dev_err(priv->dev, "failed disabling MAC06 exchange");
  1354. return ret;
  1355. }
  1356. /* Enable CPU Port */
  1357. ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
  1358. QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
  1359. if (ret) {
  1360. dev_err(priv->dev, "failed enabling CPU port");
  1361. return ret;
  1362. }
  1363. /* Enable MIB counters */
  1364. ret = qca8k_mib_init(priv);
  1365. if (ret)
  1366. dev_warn(priv->dev, "mib init failed");
  1367. /* Initial setup of all ports */
  1368. for (i = 0; i < QCA8K_NUM_PORTS; i++) {
  1369. /* Disable forwarding by default on all ports */
  1370. ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
  1371. QCA8K_PORT_LOOKUP_MEMBER, 0);
  1372. if (ret)
  1373. return ret;
  1374. /* Enable QCA header mode on all cpu ports */
  1375. if (dsa_is_cpu_port(ds, i)) {
  1376. ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
  1377. FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
  1378. FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
  1379. if (ret) {
  1380. dev_err(priv->dev, "failed enabling QCA header mode");
  1381. return ret;
  1382. }
  1383. }
  1384. /* Disable MAC by default on all user ports */
  1385. if (dsa_is_user_port(ds, i))
  1386. qca8k_port_set_status(priv, i, 0);
  1387. }
  1388. /* Forward all unknown frames to CPU port for Linux processing
  1389. * Notice that in multi-cpu config only one port should be set
  1390. * for igmp, unknown, multicast and broadcast packet
  1391. */
  1392. ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
  1393. FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
  1394. FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
  1395. FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
  1396. FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
  1397. if (ret)
  1398. return ret;
  1399. /* Setup connection between CPU port & user ports
  1400. * Configure specific switch configuration for ports
  1401. */
  1402. for (i = 0; i < QCA8K_NUM_PORTS; i++) {
  1403. /* CPU port gets connected to all user ports of the switch */
  1404. if (dsa_is_cpu_port(ds, i)) {
  1405. ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
  1406. QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
  1407. if (ret)
  1408. return ret;
  1409. }
  1410. /* Individual user ports get connected to CPU port only */
  1411. if (dsa_is_user_port(ds, i)) {
  1412. ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
  1413. QCA8K_PORT_LOOKUP_MEMBER,
  1414. BIT(cpu_port));
  1415. if (ret)
  1416. return ret;
  1417. /* Enable ARP Auto-learning by default */
  1418. ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
  1419. QCA8K_PORT_LOOKUP_LEARN);
  1420. if (ret)
  1421. return ret;
  1422. /* For port based vlans to work we need to set the
  1423. * default egress vid
  1424. */
  1425. ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
  1426. QCA8K_EGREES_VLAN_PORT_MASK(i),
  1427. QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
  1428. if (ret)
  1429. return ret;
  1430. ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
  1431. QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
  1432. QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
  1433. if (ret)
  1434. return ret;
  1435. }
  1436. /* The port 5 of the qca8337 have some problem in flood condition. The
  1437. * original legacy driver had some specific buffer and priority settings
  1438. * for the different port suggested by the QCA switch team. Add this
  1439. * missing settings to improve switch stability under load condition.
  1440. * This problem is limited to qca8337 and other qca8k switch are not affected.
  1441. */
  1442. if (priv->switch_id == QCA8K_ID_QCA8337) {
  1443. switch (i) {
  1444. /* The 2 CPU port and port 5 requires some different
  1445. * priority than any other ports.
  1446. */
  1447. case 0:
  1448. case 5:
  1449. case 6:
  1450. mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
  1451. QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
  1452. QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
  1453. QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
  1454. QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
  1455. QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
  1456. QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
  1457. break;
  1458. default:
  1459. mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
  1460. QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
  1461. QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
  1462. QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
  1463. QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
  1464. }
  1465. qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
  1466. mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
  1467. QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
  1468. QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
  1469. QCA8K_PORT_HOL_CTRL1_WRED_EN;
  1470. qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
  1471. QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
  1472. QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
  1473. QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
  1474. QCA8K_PORT_HOL_CTRL1_WRED_EN,
  1475. mask);
  1476. }
  1477. }
  1478. /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
  1479. if (priv->switch_id == QCA8K_ID_QCA8327) {
  1480. mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
  1481. QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
  1482. qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
  1483. QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
  1484. QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
  1485. mask);
  1486. }
  1487. /* Setup our port MTUs to match power on defaults */
  1488. ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
  1489. if (ret)
  1490. dev_warn(priv->dev, "failed setting MTU settings");
  1491. /* Flush the FDB table */
  1492. qca8k_fdb_flush(priv);
  1493. /* Set min a max ageing value supported */
  1494. ds->ageing_time_min = 7000;
  1495. ds->ageing_time_max = 458745000;
  1496. /* Set max number of LAGs supported */
  1497. ds->num_lag_ids = QCA8K_NUM_LAGS;
  1498. return 0;
  1499. }
  1500. static const struct dsa_switch_ops qca8k_switch_ops = {
  1501. .get_tag_protocol = qca8k_get_tag_protocol,
  1502. .setup = qca8k_setup,
  1503. .get_strings = qca8k_get_strings,
  1504. .get_ethtool_stats = qca8k_get_ethtool_stats,
  1505. .get_sset_count = qca8k_get_sset_count,
  1506. .set_ageing_time = qca8k_set_ageing_time,
  1507. .get_mac_eee = qca8k_get_mac_eee,
  1508. .set_mac_eee = qca8k_set_mac_eee,
  1509. .port_enable = qca8k_port_enable,
  1510. .port_disable = qca8k_port_disable,
  1511. .port_change_mtu = qca8k_port_change_mtu,
  1512. .port_max_mtu = qca8k_port_max_mtu,
  1513. .port_stp_state_set = qca8k_port_stp_state_set,
  1514. .port_bridge_join = qca8k_port_bridge_join,
  1515. .port_bridge_leave = qca8k_port_bridge_leave,
  1516. .port_fast_age = qca8k_port_fast_age,
  1517. .port_fdb_add = qca8k_port_fdb_add,
  1518. .port_fdb_del = qca8k_port_fdb_del,
  1519. .port_fdb_dump = qca8k_port_fdb_dump,
  1520. .port_mdb_add = qca8k_port_mdb_add,
  1521. .port_mdb_del = qca8k_port_mdb_del,
  1522. .port_mirror_add = qca8k_port_mirror_add,
  1523. .port_mirror_del = qca8k_port_mirror_del,
  1524. .port_vlan_filtering = qca8k_port_vlan_filtering,
  1525. .port_vlan_add = qca8k_port_vlan_add,
  1526. .port_vlan_del = qca8k_port_vlan_del,
  1527. .phylink_get_caps = qca8k_phylink_get_caps,
  1528. .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
  1529. .phylink_mac_config = qca8k_phylink_mac_config,
  1530. .phylink_mac_link_down = qca8k_phylink_mac_link_down,
  1531. .phylink_mac_link_up = qca8k_phylink_mac_link_up,
  1532. .get_phy_flags = qca8k_get_phy_flags,
  1533. .port_lag_join = qca8k_port_lag_join,
  1534. .port_lag_leave = qca8k_port_lag_leave,
  1535. .master_state_change = qca8k_master_change,
  1536. .connect_tag_protocol = qca8k_connect_tag_protocol,
  1537. };
  1538. static int
  1539. qca8k_sw_probe(struct mdio_device *mdiodev)
  1540. {
  1541. struct qca8k_priv *priv;
  1542. int ret;
  1543. /* allocate the private data struct so that we can probe the switches
  1544. * ID register
  1545. */
  1546. priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
  1547. if (!priv)
  1548. return -ENOMEM;
  1549. priv->bus = mdiodev->bus;
  1550. priv->dev = &mdiodev->dev;
  1551. priv->info = of_device_get_match_data(priv->dev);
  1552. priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
  1553. GPIOD_ASIS);
  1554. if (IS_ERR(priv->reset_gpio))
  1555. return PTR_ERR(priv->reset_gpio);
  1556. if (priv->reset_gpio) {
  1557. gpiod_set_value_cansleep(priv->reset_gpio, 1);
  1558. /* The active low duration must be greater than 10 ms
  1559. * and checkpatch.pl wants 20 ms.
  1560. */
  1561. msleep(20);
  1562. gpiod_set_value_cansleep(priv->reset_gpio, 0);
  1563. }
  1564. /* Start by setting up the register mapping */
  1565. priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
  1566. &qca8k_regmap_config);
  1567. if (IS_ERR(priv->regmap)) {
  1568. dev_err(priv->dev, "regmap initialization failed");
  1569. return PTR_ERR(priv->regmap);
  1570. }
  1571. priv->mdio_cache.page = 0xffff;
  1572. /* Check the detected switch id */
  1573. ret = qca8k_read_switch_id(priv);
  1574. if (ret)
  1575. return ret;
  1576. priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
  1577. if (!priv->ds)
  1578. return -ENOMEM;
  1579. mutex_init(&priv->mgmt_eth_data.mutex);
  1580. init_completion(&priv->mgmt_eth_data.rw_done);
  1581. mutex_init(&priv->mib_eth_data.mutex);
  1582. init_completion(&priv->mib_eth_data.rw_done);
  1583. priv->ds->dev = &mdiodev->dev;
  1584. priv->ds->num_ports = QCA8K_NUM_PORTS;
  1585. priv->ds->priv = priv;
  1586. priv->ds->ops = &qca8k_switch_ops;
  1587. mutex_init(&priv->reg_mutex);
  1588. dev_set_drvdata(&mdiodev->dev, priv);
  1589. return dsa_register_switch(priv->ds);
  1590. }
  1591. static void
  1592. qca8k_sw_remove(struct mdio_device *mdiodev)
  1593. {
  1594. struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
  1595. int i;
  1596. if (!priv)
  1597. return;
  1598. for (i = 0; i < QCA8K_NUM_PORTS; i++)
  1599. qca8k_port_set_status(priv, i, 0);
  1600. dsa_unregister_switch(priv->ds);
  1601. }
  1602. static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
  1603. {
  1604. struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
  1605. if (!priv)
  1606. return;
  1607. dsa_switch_shutdown(priv->ds);
  1608. dev_set_drvdata(&mdiodev->dev, NULL);
  1609. }
  1610. #ifdef CONFIG_PM_SLEEP
  1611. static void
  1612. qca8k_set_pm(struct qca8k_priv *priv, int enable)
  1613. {
  1614. int port;
  1615. for (port = 0; port < QCA8K_NUM_PORTS; port++) {
  1616. /* Do not enable on resume if the port was
  1617. * disabled before.
  1618. */
  1619. if (!(priv->port_enabled_map & BIT(port)))
  1620. continue;
  1621. qca8k_port_set_status(priv, port, enable);
  1622. }
  1623. }
  1624. static int qca8k_suspend(struct device *dev)
  1625. {
  1626. struct qca8k_priv *priv = dev_get_drvdata(dev);
  1627. qca8k_set_pm(priv, 0);
  1628. return dsa_switch_suspend(priv->ds);
  1629. }
  1630. static int qca8k_resume(struct device *dev)
  1631. {
  1632. struct qca8k_priv *priv = dev_get_drvdata(dev);
  1633. qca8k_set_pm(priv, 1);
  1634. return dsa_switch_resume(priv->ds);
  1635. }
  1636. #endif /* CONFIG_PM_SLEEP */
  1637. static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
  1638. qca8k_suspend, qca8k_resume);
  1639. static const struct qca8k_info_ops qca8xxx_ops = {
  1640. .autocast_mib = qca8k_get_ethtool_stats_eth,
  1641. .read_eth = qca8k_read_eth,
  1642. .write_eth = qca8k_write_eth,
  1643. };
  1644. static const struct qca8k_match_data qca8327 = {
  1645. .id = QCA8K_ID_QCA8327,
  1646. .reduced_package = true,
  1647. .mib_count = QCA8K_QCA832X_MIB_COUNT,
  1648. .ops = &qca8xxx_ops,
  1649. };
  1650. static const struct qca8k_match_data qca8328 = {
  1651. .id = QCA8K_ID_QCA8327,
  1652. .mib_count = QCA8K_QCA832X_MIB_COUNT,
  1653. .ops = &qca8xxx_ops,
  1654. };
  1655. static const struct qca8k_match_data qca833x = {
  1656. .id = QCA8K_ID_QCA8337,
  1657. .mib_count = QCA8K_QCA833X_MIB_COUNT,
  1658. .ops = &qca8xxx_ops,
  1659. };
  1660. static const struct of_device_id qca8k_of_match[] = {
  1661. { .compatible = "qca,qca8327", .data = &qca8327 },
  1662. { .compatible = "qca,qca8328", .data = &qca8328 },
  1663. { .compatible = "qca,qca8334", .data = &qca833x },
  1664. { .compatible = "qca,qca8337", .data = &qca833x },
  1665. { /* sentinel */ },
  1666. };
  1667. static struct mdio_driver qca8kmdio_driver = {
  1668. .probe = qca8k_sw_probe,
  1669. .remove = qca8k_sw_remove,
  1670. .shutdown = qca8k_sw_shutdown,
  1671. .mdiodrv.driver = {
  1672. .name = "qca8k",
  1673. .of_match_table = qca8k_of_match,
  1674. .pm = &qca8k_pm_ops,
  1675. },
  1676. };
  1677. mdio_module_driver(qca8kmdio_driver);
  1678. MODULE_AUTHOR("Mathieu Olivari, John Crispin <[email protected]>");
  1679. MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
  1680. MODULE_LICENSE("GPL v2");
  1681. MODULE_ALIAS("platform:qca8k");