fsl_ucc_hdlc.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Freescale QUICC Engine HDLC Device Driver
  3. *
  4. * Copyright 2016 Freescale Semiconductor Inc.
  5. */
  6. #include <linux/delay.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/hdlc.h>
  9. #include <linux/init.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/irq.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/of_address.h>
  17. #include <linux/of_irq.h>
  18. #include <linux/of_platform.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/sched.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/slab.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/stddef.h>
  25. #include <soc/fsl/qe/qe_tdm.h>
  26. #include <uapi/linux/if_arp.h>
  27. #include "fsl_ucc_hdlc.h"
  28. #define DRV_DESC "Freescale QE UCC HDLC Driver"
  29. #define DRV_NAME "ucc_hdlc"
  30. #define TDM_PPPOHT_SLIC_MAXIN
  31. #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
  32. static int uhdlc_close(struct net_device *dev);
  33. static struct ucc_tdm_info utdm_primary_info = {
  34. .uf_info = {
  35. .tsa = 0,
  36. .cdp = 0,
  37. .cds = 1,
  38. .ctsp = 1,
  39. .ctss = 1,
  40. .revd = 0,
  41. .urfs = 256,
  42. .utfs = 256,
  43. .urfet = 128,
  44. .urfset = 192,
  45. .utfet = 128,
  46. .utftt = 0x40,
  47. .ufpt = 256,
  48. .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
  49. .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
  50. .tenc = UCC_FAST_TX_ENCODING_NRZ,
  51. .renc = UCC_FAST_RX_ENCODING_NRZ,
  52. .tcrc = UCC_FAST_16_BIT_CRC,
  53. .synl = UCC_FAST_SYNC_LEN_NOT_USED,
  54. },
  55. .si_info = {
  56. #ifdef TDM_PPPOHT_SLIC_MAXIN
  57. .simr_rfsd = 1,
  58. .simr_tfsd = 2,
  59. #else
  60. .simr_rfsd = 0,
  61. .simr_tfsd = 0,
  62. #endif
  63. .simr_crt = 0,
  64. .simr_sl = 0,
  65. .simr_ce = 1,
  66. .simr_fe = 1,
  67. .simr_gm = 0,
  68. },
  69. };
  70. static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
  71. static int uhdlc_init(struct ucc_hdlc_private *priv)
  72. {
  73. struct ucc_tdm_info *ut_info;
  74. struct ucc_fast_info *uf_info;
  75. u32 cecr_subblock;
  76. u16 bd_status;
  77. int ret, i;
  78. void *bd_buffer;
  79. dma_addr_t bd_dma_addr;
  80. s32 riptr;
  81. s32 tiptr;
  82. u32 gumr;
  83. ut_info = priv->ut_info;
  84. uf_info = &ut_info->uf_info;
  85. if (priv->tsa) {
  86. uf_info->tsa = 1;
  87. uf_info->ctsp = 1;
  88. uf_info->cds = 1;
  89. uf_info->ctss = 1;
  90. } else {
  91. uf_info->cds = 0;
  92. uf_info->ctsp = 0;
  93. uf_info->ctss = 0;
  94. }
  95. /* This sets HPM register in CMXUCR register which configures a
  96. * open drain connected HDLC bus
  97. */
  98. if (priv->hdlc_bus)
  99. uf_info->brkpt_support = 1;
  100. uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
  101. UCC_HDLC_UCCE_TXB) << 16);
  102. ret = ucc_fast_init(uf_info, &priv->uccf);
  103. if (ret) {
  104. dev_err(priv->dev, "Failed to init uccf.");
  105. return ret;
  106. }
  107. priv->uf_regs = priv->uccf->uf_regs;
  108. ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
  109. /* Loopback mode */
  110. if (priv->loopback) {
  111. dev_info(priv->dev, "Loopback Mode\n");
  112. /* use the same clock when work in loopback */
  113. qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
  114. gumr = ioread32be(&priv->uf_regs->gumr);
  115. gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
  116. UCC_FAST_GUMR_TCI);
  117. gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
  118. iowrite32be(gumr, &priv->uf_regs->gumr);
  119. }
  120. /* Initialize SI */
  121. if (priv->tsa)
  122. ucc_tdm_init(priv->utdm, priv->ut_info);
  123. /* Write to QE CECR, UCCx channel to Stop Transmission */
  124. cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
  125. ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
  126. QE_CR_PROTOCOL_UNSPECIFIED, 0);
  127. /* Set UPSMR normal mode (need fixed)*/
  128. iowrite32be(0, &priv->uf_regs->upsmr);
  129. /* hdlc_bus mode */
  130. if (priv->hdlc_bus) {
  131. u32 upsmr;
  132. dev_info(priv->dev, "HDLC bus Mode\n");
  133. upsmr = ioread32be(&priv->uf_regs->upsmr);
  134. /* bus mode and retransmit enable, with collision window
  135. * set to 8 bytes
  136. */
  137. upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
  138. UCC_HDLC_UPSMR_CW8;
  139. iowrite32be(upsmr, &priv->uf_regs->upsmr);
  140. /* explicitly disable CDS & CTSP */
  141. gumr = ioread32be(&priv->uf_regs->gumr);
  142. gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
  143. /* set automatic sync to explicitly ignore CD signal */
  144. gumr |= UCC_FAST_GUMR_SYNL_AUTO;
  145. iowrite32be(gumr, &priv->uf_regs->gumr);
  146. }
  147. priv->rx_ring_size = RX_BD_RING_LEN;
  148. priv->tx_ring_size = TX_BD_RING_LEN;
  149. /* Alloc Rx BD */
  150. priv->rx_bd_base = dma_alloc_coherent(priv->dev,
  151. RX_BD_RING_LEN * sizeof(struct qe_bd),
  152. &priv->dma_rx_bd, GFP_KERNEL);
  153. if (!priv->rx_bd_base) {
  154. dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
  155. ret = -ENOMEM;
  156. goto free_uccf;
  157. }
  158. /* Alloc Tx BD */
  159. priv->tx_bd_base = dma_alloc_coherent(priv->dev,
  160. TX_BD_RING_LEN * sizeof(struct qe_bd),
  161. &priv->dma_tx_bd, GFP_KERNEL);
  162. if (!priv->tx_bd_base) {
  163. dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
  164. ret = -ENOMEM;
  165. goto free_rx_bd;
  166. }
  167. /* Alloc parameter ram for ucc hdlc */
  168. priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
  169. ALIGNMENT_OF_UCC_HDLC_PRAM);
  170. if (priv->ucc_pram_offset < 0) {
  171. dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
  172. ret = -ENOMEM;
  173. goto free_tx_bd;
  174. }
  175. priv->rx_skbuff = kcalloc(priv->rx_ring_size,
  176. sizeof(*priv->rx_skbuff),
  177. GFP_KERNEL);
  178. if (!priv->rx_skbuff) {
  179. ret = -ENOMEM;
  180. goto free_ucc_pram;
  181. }
  182. priv->tx_skbuff = kcalloc(priv->tx_ring_size,
  183. sizeof(*priv->tx_skbuff),
  184. GFP_KERNEL);
  185. if (!priv->tx_skbuff) {
  186. ret = -ENOMEM;
  187. goto free_rx_skbuff;
  188. }
  189. priv->skb_curtx = 0;
  190. priv->skb_dirtytx = 0;
  191. priv->curtx_bd = priv->tx_bd_base;
  192. priv->dirty_tx = priv->tx_bd_base;
  193. priv->currx_bd = priv->rx_bd_base;
  194. priv->currx_bdnum = 0;
  195. /* init parameter base */
  196. cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
  197. ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
  198. QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
  199. priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
  200. qe_muram_addr(priv->ucc_pram_offset);
  201. /* Zero out parameter ram */
  202. memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
  203. /* Alloc riptr, tiptr */
  204. riptr = qe_muram_alloc(32, 32);
  205. if (riptr < 0) {
  206. dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
  207. ret = -ENOMEM;
  208. goto free_tx_skbuff;
  209. }
  210. tiptr = qe_muram_alloc(32, 32);
  211. if (tiptr < 0) {
  212. dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
  213. ret = -ENOMEM;
  214. goto free_riptr;
  215. }
  216. if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
  217. dev_err(priv->dev, "MURAM allocation out of addressable range\n");
  218. ret = -ENOMEM;
  219. goto free_tiptr;
  220. }
  221. /* Set RIPTR, TIPTR */
  222. iowrite16be(riptr, &priv->ucc_pram->riptr);
  223. iowrite16be(tiptr, &priv->ucc_pram->tiptr);
  224. /* Set MRBLR */
  225. iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
  226. /* Set RBASE, TBASE */
  227. iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
  228. iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
  229. /* Set RSTATE, TSTATE */
  230. iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
  231. iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
  232. /* Set C_MASK, C_PRES for 16bit CRC */
  233. iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
  234. iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
  235. iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
  236. iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
  237. iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
  238. iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
  239. iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
  240. iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
  241. iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
  242. iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
  243. /* Get BD buffer */
  244. bd_buffer = dma_alloc_coherent(priv->dev,
  245. (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
  246. &bd_dma_addr, GFP_KERNEL);
  247. if (!bd_buffer) {
  248. dev_err(priv->dev, "Could not allocate buffer descriptors\n");
  249. ret = -ENOMEM;
  250. goto free_tiptr;
  251. }
  252. priv->rx_buffer = bd_buffer;
  253. priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
  254. priv->dma_rx_addr = bd_dma_addr;
  255. priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
  256. for (i = 0; i < RX_BD_RING_LEN; i++) {
  257. if (i < (RX_BD_RING_LEN - 1))
  258. bd_status = R_E_S | R_I_S;
  259. else
  260. bd_status = R_E_S | R_I_S | R_W_S;
  261. priv->rx_bd_base[i].status = cpu_to_be16(bd_status);
  262. priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH);
  263. }
  264. for (i = 0; i < TX_BD_RING_LEN; i++) {
  265. if (i < (TX_BD_RING_LEN - 1))
  266. bd_status = T_I_S | T_TC_S;
  267. else
  268. bd_status = T_I_S | T_TC_S | T_W_S;
  269. priv->tx_bd_base[i].status = cpu_to_be16(bd_status);
  270. priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH);
  271. }
  272. dma_wmb();
  273. return 0;
  274. free_tiptr:
  275. qe_muram_free(tiptr);
  276. free_riptr:
  277. qe_muram_free(riptr);
  278. free_tx_skbuff:
  279. kfree(priv->tx_skbuff);
  280. free_rx_skbuff:
  281. kfree(priv->rx_skbuff);
  282. free_ucc_pram:
  283. qe_muram_free(priv->ucc_pram_offset);
  284. free_tx_bd:
  285. dma_free_coherent(priv->dev,
  286. TX_BD_RING_LEN * sizeof(struct qe_bd),
  287. priv->tx_bd_base, priv->dma_tx_bd);
  288. free_rx_bd:
  289. dma_free_coherent(priv->dev,
  290. RX_BD_RING_LEN * sizeof(struct qe_bd),
  291. priv->rx_bd_base, priv->dma_rx_bd);
  292. free_uccf:
  293. ucc_fast_free(priv->uccf);
  294. return ret;
  295. }
  296. static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
  297. {
  298. hdlc_device *hdlc = dev_to_hdlc(dev);
  299. struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
  300. struct qe_bd *bd;
  301. u16 bd_status;
  302. unsigned long flags;
  303. __be16 *proto_head;
  304. switch (dev->type) {
  305. case ARPHRD_RAWHDLC:
  306. if (skb_headroom(skb) < HDLC_HEAD_LEN) {
  307. dev->stats.tx_dropped++;
  308. dev_kfree_skb(skb);
  309. netdev_err(dev, "No enough space for hdlc head\n");
  310. return -ENOMEM;
  311. }
  312. skb_push(skb, HDLC_HEAD_LEN);
  313. proto_head = (__be16 *)skb->data;
  314. *proto_head = htons(DEFAULT_HDLC_HEAD);
  315. dev->stats.tx_bytes += skb->len;
  316. break;
  317. case ARPHRD_PPP:
  318. proto_head = (__be16 *)skb->data;
  319. if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
  320. dev->stats.tx_dropped++;
  321. dev_kfree_skb(skb);
  322. netdev_err(dev, "Wrong ppp header\n");
  323. return -ENOMEM;
  324. }
  325. dev->stats.tx_bytes += skb->len;
  326. break;
  327. case ARPHRD_ETHER:
  328. dev->stats.tx_bytes += skb->len;
  329. break;
  330. default:
  331. dev->stats.tx_dropped++;
  332. dev_kfree_skb(skb);
  333. return -ENOMEM;
  334. }
  335. netdev_sent_queue(dev, skb->len);
  336. spin_lock_irqsave(&priv->lock, flags);
  337. dma_rmb();
  338. /* Start from the next BD that should be filled */
  339. bd = priv->curtx_bd;
  340. bd_status = be16_to_cpu(bd->status);
  341. /* Save the skb pointer so we can free it later */
  342. priv->tx_skbuff[priv->skb_curtx] = skb;
  343. /* Update the current skb pointer (wrapping if this was the last) */
  344. priv->skb_curtx =
  345. (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
  346. /* copy skb data to tx buffer for sdma processing */
  347. memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
  348. skb->data, skb->len);
  349. /* set bd status and length */
  350. bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
  351. bd->length = cpu_to_be16(skb->len);
  352. bd->status = cpu_to_be16(bd_status);
  353. /* Move to next BD in the ring */
  354. if (!(bd_status & T_W_S))
  355. bd += 1;
  356. else
  357. bd = priv->tx_bd_base;
  358. if (bd == priv->dirty_tx) {
  359. if (!netif_queue_stopped(dev))
  360. netif_stop_queue(dev);
  361. }
  362. priv->curtx_bd = bd;
  363. spin_unlock_irqrestore(&priv->lock, flags);
  364. return NETDEV_TX_OK;
  365. }
  366. static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
  367. {
  368. u32 cecr_subblock;
  369. cecr_subblock =
  370. ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
  371. qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
  372. QE_CR_PROTOCOL_UNSPECIFIED, 0);
  373. return 0;
  374. }
  375. static int hdlc_tx_done(struct ucc_hdlc_private *priv)
  376. {
  377. /* Start from the next BD that should be filled */
  378. struct net_device *dev = priv->ndev;
  379. unsigned int bytes_sent = 0;
  380. int howmany = 0;
  381. struct qe_bd *bd; /* BD pointer */
  382. u16 bd_status;
  383. int tx_restart = 0;
  384. dma_rmb();
  385. bd = priv->dirty_tx;
  386. bd_status = be16_to_cpu(bd->status);
  387. /* Normal processing. */
  388. while ((bd_status & T_R_S) == 0) {
  389. struct sk_buff *skb;
  390. if (bd_status & T_UN_S) { /* Underrun */
  391. dev->stats.tx_fifo_errors++;
  392. tx_restart = 1;
  393. }
  394. if (bd_status & T_CT_S) { /* Carrier lost */
  395. dev->stats.tx_carrier_errors++;
  396. tx_restart = 1;
  397. }
  398. /* BD contains already transmitted buffer. */
  399. /* Handle the transmitted buffer and release */
  400. /* the BD to be used with the current frame */
  401. skb = priv->tx_skbuff[priv->skb_dirtytx];
  402. if (!skb)
  403. break;
  404. howmany++;
  405. bytes_sent += skb->len;
  406. dev->stats.tx_packets++;
  407. memset(priv->tx_buffer +
  408. (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
  409. 0, skb->len);
  410. dev_consume_skb_irq(skb);
  411. priv->tx_skbuff[priv->skb_dirtytx] = NULL;
  412. priv->skb_dirtytx =
  413. (priv->skb_dirtytx +
  414. 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
  415. /* We freed a buffer, so now we can restart transmission */
  416. if (netif_queue_stopped(dev))
  417. netif_wake_queue(dev);
  418. /* Advance the confirmation BD pointer */
  419. if (!(bd_status & T_W_S))
  420. bd += 1;
  421. else
  422. bd = priv->tx_bd_base;
  423. bd_status = be16_to_cpu(bd->status);
  424. }
  425. priv->dirty_tx = bd;
  426. if (tx_restart)
  427. hdlc_tx_restart(priv);
  428. netdev_completed_queue(dev, howmany, bytes_sent);
  429. return 0;
  430. }
  431. static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
  432. {
  433. struct net_device *dev = priv->ndev;
  434. struct sk_buff *skb = NULL;
  435. hdlc_device *hdlc = dev_to_hdlc(dev);
  436. struct qe_bd *bd;
  437. u16 bd_status;
  438. u16 length, howmany = 0;
  439. u8 *bdbuffer;
  440. dma_rmb();
  441. bd = priv->currx_bd;
  442. bd_status = be16_to_cpu(bd->status);
  443. /* while there are received buffers and BD is full (~R_E) */
  444. while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
  445. if (bd_status & (RX_BD_ERRORS)) {
  446. dev->stats.rx_errors++;
  447. if (bd_status & R_CD_S)
  448. dev->stats.collisions++;
  449. if (bd_status & R_OV_S)
  450. dev->stats.rx_fifo_errors++;
  451. if (bd_status & R_CR_S)
  452. dev->stats.rx_crc_errors++;
  453. if (bd_status & R_AB_S)
  454. dev->stats.rx_over_errors++;
  455. if (bd_status & R_NO_S)
  456. dev->stats.rx_frame_errors++;
  457. if (bd_status & R_LG_S)
  458. dev->stats.rx_length_errors++;
  459. goto recycle;
  460. }
  461. bdbuffer = priv->rx_buffer +
  462. (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
  463. length = be16_to_cpu(bd->length);
  464. switch (dev->type) {
  465. case ARPHRD_RAWHDLC:
  466. bdbuffer += HDLC_HEAD_LEN;
  467. length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
  468. skb = dev_alloc_skb(length);
  469. if (!skb) {
  470. dev->stats.rx_dropped++;
  471. return -ENOMEM;
  472. }
  473. skb_put(skb, length);
  474. skb->len = length;
  475. skb->dev = dev;
  476. memcpy(skb->data, bdbuffer, length);
  477. break;
  478. case ARPHRD_PPP:
  479. case ARPHRD_ETHER:
  480. length -= HDLC_CRC_SIZE;
  481. skb = dev_alloc_skb(length);
  482. if (!skb) {
  483. dev->stats.rx_dropped++;
  484. return -ENOMEM;
  485. }
  486. skb_put(skb, length);
  487. skb->len = length;
  488. skb->dev = dev;
  489. memcpy(skb->data, bdbuffer, length);
  490. break;
  491. }
  492. dev->stats.rx_packets++;
  493. dev->stats.rx_bytes += skb->len;
  494. howmany++;
  495. if (hdlc->proto)
  496. skb->protocol = hdlc_type_trans(skb, dev);
  497. netif_receive_skb(skb);
  498. recycle:
  499. bd->status = cpu_to_be16((bd_status & R_W_S) | R_E_S | R_I_S);
  500. /* update to point at the next bd */
  501. if (bd_status & R_W_S) {
  502. priv->currx_bdnum = 0;
  503. bd = priv->rx_bd_base;
  504. } else {
  505. if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
  506. priv->currx_bdnum += 1;
  507. else
  508. priv->currx_bdnum = RX_BD_RING_LEN - 1;
  509. bd += 1;
  510. }
  511. bd_status = be16_to_cpu(bd->status);
  512. }
  513. dma_rmb();
  514. priv->currx_bd = bd;
  515. return howmany;
  516. }
  517. static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
  518. {
  519. struct ucc_hdlc_private *priv = container_of(napi,
  520. struct ucc_hdlc_private,
  521. napi);
  522. int howmany;
  523. /* Tx event processing */
  524. spin_lock(&priv->lock);
  525. hdlc_tx_done(priv);
  526. spin_unlock(&priv->lock);
  527. howmany = 0;
  528. howmany += hdlc_rx_done(priv, budget - howmany);
  529. if (howmany < budget) {
  530. napi_complete_done(napi, howmany);
  531. qe_setbits_be32(priv->uccf->p_uccm,
  532. (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
  533. }
  534. return howmany;
  535. }
  536. static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
  537. {
  538. struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
  539. struct net_device *dev = priv->ndev;
  540. struct ucc_fast_private *uccf;
  541. u32 ucce;
  542. u32 uccm;
  543. uccf = priv->uccf;
  544. ucce = ioread32be(uccf->p_ucce);
  545. uccm = ioread32be(uccf->p_uccm);
  546. ucce &= uccm;
  547. iowrite32be(ucce, uccf->p_ucce);
  548. if (!ucce)
  549. return IRQ_NONE;
  550. if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
  551. if (napi_schedule_prep(&priv->napi)) {
  552. uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
  553. << 16);
  554. iowrite32be(uccm, uccf->p_uccm);
  555. __napi_schedule(&priv->napi);
  556. }
  557. }
  558. /* Errors and other events */
  559. if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
  560. dev->stats.rx_missed_errors++;
  561. if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
  562. dev->stats.tx_errors++;
  563. return IRQ_HANDLED;
  564. }
  565. static int uhdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
  566. {
  567. const size_t size = sizeof(te1_settings);
  568. te1_settings line;
  569. struct ucc_hdlc_private *priv = netdev_priv(dev);
  570. switch (ifs->type) {
  571. case IF_GET_IFACE:
  572. ifs->type = IF_IFACE_E1;
  573. if (ifs->size < size) {
  574. ifs->size = size; /* data size wanted */
  575. return -ENOBUFS;
  576. }
  577. memset(&line, 0, sizeof(line));
  578. line.clock_type = priv->clocking;
  579. if (copy_to_user(ifs->ifs_ifsu.sync, &line, size))
  580. return -EFAULT;
  581. return 0;
  582. default:
  583. return hdlc_ioctl(dev, ifs);
  584. }
  585. }
  586. static int uhdlc_open(struct net_device *dev)
  587. {
  588. u32 cecr_subblock;
  589. hdlc_device *hdlc = dev_to_hdlc(dev);
  590. struct ucc_hdlc_private *priv = hdlc->priv;
  591. struct ucc_tdm *utdm = priv->utdm;
  592. int rc = 0;
  593. if (priv->hdlc_busy != 1) {
  594. if (request_irq(priv->ut_info->uf_info.irq,
  595. ucc_hdlc_irq_handler, 0, "hdlc", priv))
  596. return -ENODEV;
  597. cecr_subblock = ucc_fast_get_qe_cr_subblock(
  598. priv->ut_info->uf_info.ucc_num);
  599. qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
  600. QE_CR_PROTOCOL_UNSPECIFIED, 0);
  601. ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
  602. /* Enable the TDM port */
  603. if (priv->tsa)
  604. qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
  605. priv->hdlc_busy = 1;
  606. netif_device_attach(priv->ndev);
  607. napi_enable(&priv->napi);
  608. netdev_reset_queue(dev);
  609. netif_start_queue(dev);
  610. rc = hdlc_open(dev);
  611. if (rc)
  612. uhdlc_close(dev);
  613. }
  614. return rc;
  615. }
  616. static void uhdlc_memclean(struct ucc_hdlc_private *priv)
  617. {
  618. qe_muram_free(ioread16be(&priv->ucc_pram->riptr));
  619. qe_muram_free(ioread16be(&priv->ucc_pram->tiptr));
  620. if (priv->rx_bd_base) {
  621. dma_free_coherent(priv->dev,
  622. RX_BD_RING_LEN * sizeof(struct qe_bd),
  623. priv->rx_bd_base, priv->dma_rx_bd);
  624. priv->rx_bd_base = NULL;
  625. priv->dma_rx_bd = 0;
  626. }
  627. if (priv->tx_bd_base) {
  628. dma_free_coherent(priv->dev,
  629. TX_BD_RING_LEN * sizeof(struct qe_bd),
  630. priv->tx_bd_base, priv->dma_tx_bd);
  631. priv->tx_bd_base = NULL;
  632. priv->dma_tx_bd = 0;
  633. }
  634. if (priv->ucc_pram) {
  635. qe_muram_free(priv->ucc_pram_offset);
  636. priv->ucc_pram = NULL;
  637. priv->ucc_pram_offset = 0;
  638. }
  639. kfree(priv->rx_skbuff);
  640. priv->rx_skbuff = NULL;
  641. kfree(priv->tx_skbuff);
  642. priv->tx_skbuff = NULL;
  643. if (priv->uf_regs) {
  644. iounmap(priv->uf_regs);
  645. priv->uf_regs = NULL;
  646. }
  647. if (priv->uccf) {
  648. ucc_fast_free(priv->uccf);
  649. priv->uccf = NULL;
  650. }
  651. if (priv->rx_buffer) {
  652. dma_free_coherent(priv->dev,
  653. RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
  654. priv->rx_buffer, priv->dma_rx_addr);
  655. priv->rx_buffer = NULL;
  656. priv->dma_rx_addr = 0;
  657. }
  658. if (priv->tx_buffer) {
  659. dma_free_coherent(priv->dev,
  660. TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
  661. priv->tx_buffer, priv->dma_tx_addr);
  662. priv->tx_buffer = NULL;
  663. priv->dma_tx_addr = 0;
  664. }
  665. }
  666. static int uhdlc_close(struct net_device *dev)
  667. {
  668. struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
  669. struct ucc_tdm *utdm = priv->utdm;
  670. u32 cecr_subblock;
  671. napi_disable(&priv->napi);
  672. cecr_subblock = ucc_fast_get_qe_cr_subblock(
  673. priv->ut_info->uf_info.ucc_num);
  674. qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
  675. (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
  676. qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
  677. (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
  678. if (priv->tsa)
  679. qe_clrbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
  680. ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
  681. free_irq(priv->ut_info->uf_info.irq, priv);
  682. netif_stop_queue(dev);
  683. netdev_reset_queue(dev);
  684. priv->hdlc_busy = 0;
  685. hdlc_close(dev);
  686. return 0;
  687. }
  688. static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
  689. unsigned short parity)
  690. {
  691. struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
  692. if (encoding != ENCODING_NRZ &&
  693. encoding != ENCODING_NRZI)
  694. return -EINVAL;
  695. if (parity != PARITY_NONE &&
  696. parity != PARITY_CRC32_PR1_CCITT &&
  697. parity != PARITY_CRC16_PR0_CCITT &&
  698. parity != PARITY_CRC16_PR1_CCITT)
  699. return -EINVAL;
  700. priv->encoding = encoding;
  701. priv->parity = parity;
  702. return 0;
  703. }
  704. #ifdef CONFIG_PM
  705. static void store_clk_config(struct ucc_hdlc_private *priv)
  706. {
  707. struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx;
  708. /* store si clk */
  709. priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
  710. priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
  711. /* store si sync */
  712. priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
  713. /* store ucc clk */
  714. memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
  715. }
  716. static void resume_clk_config(struct ucc_hdlc_private *priv)
  717. {
  718. struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx;
  719. memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
  720. iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
  721. iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
  722. iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
  723. }
  724. static int uhdlc_suspend(struct device *dev)
  725. {
  726. struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
  727. struct ucc_fast __iomem *uf_regs;
  728. if (!priv)
  729. return -EINVAL;
  730. if (!netif_running(priv->ndev))
  731. return 0;
  732. netif_device_detach(priv->ndev);
  733. napi_disable(&priv->napi);
  734. uf_regs = priv->uf_regs;
  735. /* backup gumr guemr*/
  736. priv->gumr = ioread32be(&uf_regs->gumr);
  737. priv->guemr = ioread8(&uf_regs->guemr);
  738. priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
  739. GFP_KERNEL);
  740. if (!priv->ucc_pram_bak)
  741. return -ENOMEM;
  742. /* backup HDLC parameter */
  743. memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
  744. sizeof(struct ucc_hdlc_param));
  745. /* store the clk configuration */
  746. store_clk_config(priv);
  747. /* save power */
  748. ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
  749. return 0;
  750. }
  751. static int uhdlc_resume(struct device *dev)
  752. {
  753. struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
  754. struct ucc_tdm *utdm;
  755. struct ucc_tdm_info *ut_info;
  756. struct ucc_fast __iomem *uf_regs;
  757. struct ucc_fast_private *uccf;
  758. struct ucc_fast_info *uf_info;
  759. int i;
  760. u32 cecr_subblock;
  761. u16 bd_status;
  762. if (!priv)
  763. return -EINVAL;
  764. if (!netif_running(priv->ndev))
  765. return 0;
  766. utdm = priv->utdm;
  767. ut_info = priv->ut_info;
  768. uf_info = &ut_info->uf_info;
  769. uf_regs = priv->uf_regs;
  770. uccf = priv->uccf;
  771. /* restore gumr guemr */
  772. iowrite8(priv->guemr, &uf_regs->guemr);
  773. iowrite32be(priv->gumr, &uf_regs->gumr);
  774. /* Set Virtual Fifo registers */
  775. iowrite16be(uf_info->urfs, &uf_regs->urfs);
  776. iowrite16be(uf_info->urfet, &uf_regs->urfet);
  777. iowrite16be(uf_info->urfset, &uf_regs->urfset);
  778. iowrite16be(uf_info->utfs, &uf_regs->utfs);
  779. iowrite16be(uf_info->utfet, &uf_regs->utfet);
  780. iowrite16be(uf_info->utftt, &uf_regs->utftt);
  781. /* utfb, urfb are offsets from MURAM base */
  782. iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
  783. iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
  784. /* Rx Tx and sync clock routing */
  785. resume_clk_config(priv);
  786. iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
  787. iowrite32be(0xffffffff, &uf_regs->ucce);
  788. ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
  789. /* rebuild SIRAM */
  790. if (priv->tsa)
  791. ucc_tdm_init(priv->utdm, priv->ut_info);
  792. /* Write to QE CECR, UCCx channel to Stop Transmission */
  793. cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
  794. qe_issue_cmd(QE_STOP_TX, cecr_subblock,
  795. (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
  796. /* Set UPSMR normal mode */
  797. iowrite32be(0, &uf_regs->upsmr);
  798. /* init parameter base */
  799. cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
  800. qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
  801. QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
  802. priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
  803. qe_muram_addr(priv->ucc_pram_offset);
  804. /* restore ucc parameter */
  805. memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
  806. sizeof(struct ucc_hdlc_param));
  807. kfree(priv->ucc_pram_bak);
  808. /* rebuild BD entry */
  809. for (i = 0; i < RX_BD_RING_LEN; i++) {
  810. if (i < (RX_BD_RING_LEN - 1))
  811. bd_status = R_E_S | R_I_S;
  812. else
  813. bd_status = R_E_S | R_I_S | R_W_S;
  814. priv->rx_bd_base[i].status = cpu_to_be16(bd_status);
  815. priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH);
  816. }
  817. for (i = 0; i < TX_BD_RING_LEN; i++) {
  818. if (i < (TX_BD_RING_LEN - 1))
  819. bd_status = T_I_S | T_TC_S;
  820. else
  821. bd_status = T_I_S | T_TC_S | T_W_S;
  822. priv->tx_bd_base[i].status = cpu_to_be16(bd_status);
  823. priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH);
  824. }
  825. dma_wmb();
  826. /* if hdlc is busy enable TX and RX */
  827. if (priv->hdlc_busy == 1) {
  828. cecr_subblock = ucc_fast_get_qe_cr_subblock(
  829. priv->ut_info->uf_info.ucc_num);
  830. qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
  831. (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
  832. ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
  833. /* Enable the TDM port */
  834. if (priv->tsa)
  835. qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
  836. }
  837. napi_enable(&priv->napi);
  838. netif_device_attach(priv->ndev);
  839. return 0;
  840. }
  841. static const struct dev_pm_ops uhdlc_pm_ops = {
  842. .suspend = uhdlc_suspend,
  843. .resume = uhdlc_resume,
  844. .freeze = uhdlc_suspend,
  845. .thaw = uhdlc_resume,
  846. };
  847. #define HDLC_PM_OPS (&uhdlc_pm_ops)
  848. #else
  849. #define HDLC_PM_OPS NULL
  850. #endif
  851. static void uhdlc_tx_timeout(struct net_device *ndev, unsigned int txqueue)
  852. {
  853. netdev_err(ndev, "%s\n", __func__);
  854. }
  855. static const struct net_device_ops uhdlc_ops = {
  856. .ndo_open = uhdlc_open,
  857. .ndo_stop = uhdlc_close,
  858. .ndo_start_xmit = hdlc_start_xmit,
  859. .ndo_siocwandev = uhdlc_ioctl,
  860. .ndo_tx_timeout = uhdlc_tx_timeout,
  861. };
  862. static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
  863. {
  864. struct device_node *np;
  865. struct platform_device *pdev;
  866. struct resource *res;
  867. static int siram_init_flag;
  868. int ret = 0;
  869. np = of_find_compatible_node(NULL, NULL, name);
  870. if (!np)
  871. return -EINVAL;
  872. pdev = of_find_device_by_node(np);
  873. if (!pdev) {
  874. pr_err("%pOFn: failed to lookup pdev\n", np);
  875. of_node_put(np);
  876. return -EINVAL;
  877. }
  878. of_node_put(np);
  879. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  880. if (!res) {
  881. ret = -EINVAL;
  882. goto error_put_device;
  883. }
  884. *ptr = ioremap(res->start, resource_size(res));
  885. if (!*ptr) {
  886. ret = -ENOMEM;
  887. goto error_put_device;
  888. }
  889. /* We've remapped the addresses, and we don't need the device any
  890. * more, so we should release it.
  891. */
  892. put_device(&pdev->dev);
  893. if (init_flag && siram_init_flag == 0) {
  894. memset_io(*ptr, 0, resource_size(res));
  895. siram_init_flag = 1;
  896. }
  897. return 0;
  898. error_put_device:
  899. put_device(&pdev->dev);
  900. return ret;
  901. }
  902. static int ucc_hdlc_probe(struct platform_device *pdev)
  903. {
  904. struct device_node *np = pdev->dev.of_node;
  905. struct ucc_hdlc_private *uhdlc_priv = NULL;
  906. struct ucc_tdm_info *ut_info;
  907. struct ucc_tdm *utdm = NULL;
  908. struct resource res;
  909. struct net_device *dev;
  910. hdlc_device *hdlc;
  911. int ucc_num;
  912. const char *sprop;
  913. int ret;
  914. u32 val;
  915. ret = of_property_read_u32_index(np, "cell-index", 0, &val);
  916. if (ret) {
  917. dev_err(&pdev->dev, "Invalid ucc property\n");
  918. return -ENODEV;
  919. }
  920. ucc_num = val - 1;
  921. if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
  922. dev_err(&pdev->dev, ": Invalid UCC num\n");
  923. return -EINVAL;
  924. }
  925. memcpy(&utdm_info[ucc_num], &utdm_primary_info,
  926. sizeof(utdm_primary_info));
  927. ut_info = &utdm_info[ucc_num];
  928. ut_info->uf_info.ucc_num = ucc_num;
  929. sprop = of_get_property(np, "rx-clock-name", NULL);
  930. if (sprop) {
  931. ut_info->uf_info.rx_clock = qe_clock_source(sprop);
  932. if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
  933. (ut_info->uf_info.rx_clock > QE_CLK24)) {
  934. dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
  935. return -EINVAL;
  936. }
  937. } else {
  938. dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
  939. return -EINVAL;
  940. }
  941. sprop = of_get_property(np, "tx-clock-name", NULL);
  942. if (sprop) {
  943. ut_info->uf_info.tx_clock = qe_clock_source(sprop);
  944. if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
  945. (ut_info->uf_info.tx_clock > QE_CLK24)) {
  946. dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
  947. return -EINVAL;
  948. }
  949. } else {
  950. dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
  951. return -EINVAL;
  952. }
  953. ret = of_address_to_resource(np, 0, &res);
  954. if (ret)
  955. return -EINVAL;
  956. ut_info->uf_info.regs = res.start;
  957. ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
  958. uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
  959. if (!uhdlc_priv)
  960. return -ENOMEM;
  961. dev_set_drvdata(&pdev->dev, uhdlc_priv);
  962. uhdlc_priv->dev = &pdev->dev;
  963. uhdlc_priv->ut_info = ut_info;
  964. if (of_get_property(np, "fsl,tdm-interface", NULL))
  965. uhdlc_priv->tsa = 1;
  966. if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
  967. uhdlc_priv->loopback = 1;
  968. if (of_get_property(np, "fsl,hdlc-bus", NULL))
  969. uhdlc_priv->hdlc_bus = 1;
  970. if (uhdlc_priv->tsa == 1) {
  971. utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
  972. if (!utdm) {
  973. ret = -ENOMEM;
  974. dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
  975. goto free_uhdlc_priv;
  976. }
  977. uhdlc_priv->utdm = utdm;
  978. ret = ucc_of_parse_tdm(np, utdm, ut_info);
  979. if (ret)
  980. goto free_utdm;
  981. ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
  982. (void __iomem **)&utdm->si_regs);
  983. if (ret)
  984. goto free_utdm;
  985. ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
  986. (void __iomem **)&utdm->siram);
  987. if (ret)
  988. goto unmap_si_regs;
  989. }
  990. if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
  991. uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
  992. ret = uhdlc_init(uhdlc_priv);
  993. if (ret) {
  994. dev_err(&pdev->dev, "Failed to init uhdlc\n");
  995. goto undo_uhdlc_init;
  996. }
  997. dev = alloc_hdlcdev(uhdlc_priv);
  998. if (!dev) {
  999. ret = -ENOMEM;
  1000. pr_err("ucc_hdlc: unable to allocate memory\n");
  1001. goto undo_uhdlc_init;
  1002. }
  1003. uhdlc_priv->ndev = dev;
  1004. hdlc = dev_to_hdlc(dev);
  1005. dev->tx_queue_len = 16;
  1006. dev->netdev_ops = &uhdlc_ops;
  1007. dev->watchdog_timeo = 2 * HZ;
  1008. hdlc->attach = ucc_hdlc_attach;
  1009. hdlc->xmit = ucc_hdlc_tx;
  1010. netif_napi_add_weight(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
  1011. if (register_hdlc_device(dev)) {
  1012. ret = -ENOBUFS;
  1013. pr_err("ucc_hdlc: unable to register hdlc device\n");
  1014. goto free_dev;
  1015. }
  1016. return 0;
  1017. free_dev:
  1018. free_netdev(dev);
  1019. undo_uhdlc_init:
  1020. if (utdm)
  1021. iounmap(utdm->siram);
  1022. unmap_si_regs:
  1023. if (utdm)
  1024. iounmap(utdm->si_regs);
  1025. free_utdm:
  1026. if (uhdlc_priv->tsa)
  1027. kfree(utdm);
  1028. free_uhdlc_priv:
  1029. kfree(uhdlc_priv);
  1030. return ret;
  1031. }
  1032. static int ucc_hdlc_remove(struct platform_device *pdev)
  1033. {
  1034. struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
  1035. uhdlc_memclean(priv);
  1036. if (priv->utdm->si_regs) {
  1037. iounmap(priv->utdm->si_regs);
  1038. priv->utdm->si_regs = NULL;
  1039. }
  1040. if (priv->utdm->siram) {
  1041. iounmap(priv->utdm->siram);
  1042. priv->utdm->siram = NULL;
  1043. }
  1044. kfree(priv);
  1045. dev_info(&pdev->dev, "UCC based hdlc module removed\n");
  1046. return 0;
  1047. }
  1048. static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
  1049. {
  1050. .compatible = "fsl,ucc-hdlc",
  1051. },
  1052. {},
  1053. };
  1054. MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
  1055. static struct platform_driver ucc_hdlc_driver = {
  1056. .probe = ucc_hdlc_probe,
  1057. .remove = ucc_hdlc_remove,
  1058. .driver = {
  1059. .name = DRV_NAME,
  1060. .pm = HDLC_PM_OPS,
  1061. .of_match_table = fsl_ucc_hdlc_of_match,
  1062. },
  1063. };
  1064. module_platform_driver(ucc_hdlc_driver);
  1065. MODULE_LICENSE("GPL");
  1066. MODULE_DESCRIPTION(DRV_DESC);