pxa168_eth.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * PXA168 ethernet driver.
  4. * Most of the code is derived from mv643xx ethernet driver.
  5. *
  6. * Copyright (C) 2010 Marvell International Ltd.
  7. * Sachin Sanap <[email protected]>
  8. * Zhangfei Gao <[email protected]>
  9. * Philip Rakity <[email protected]>
  10. * Mark Brown <[email protected]>
  11. */
  12. #include <linux/bitops.h>
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/etherdevice.h>
  17. #include <linux/ethtool.h>
  18. #include <linux/in.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/io.h>
  21. #include <linux/ip.h>
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/of.h>
  25. #include <linux/of_net.h>
  26. #include <linux/phy.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/pxa168_eth.h>
  29. #include <linux/tcp.h>
  30. #include <linux/types.h>
  31. #include <linux/udp.h>
  32. #include <linux/workqueue.h>
  33. #include <linux/pgtable.h>
  34. #include <asm/cacheflush.h>
  35. #define DRIVER_NAME "pxa168-eth"
  36. #define DRIVER_VERSION "0.3"
  37. /*
  38. * Registers
  39. */
  40. #define PHY_ADDRESS 0x0000
  41. #define SMI 0x0010
  42. #define PORT_CONFIG 0x0400
  43. #define PORT_CONFIG_EXT 0x0408
  44. #define PORT_COMMAND 0x0410
  45. #define PORT_STATUS 0x0418
  46. #define HTPR 0x0428
  47. #define MAC_ADDR_LOW 0x0430
  48. #define MAC_ADDR_HIGH 0x0438
  49. #define SDMA_CONFIG 0x0440
  50. #define SDMA_CMD 0x0448
  51. #define INT_CAUSE 0x0450
  52. #define INT_W_CLEAR 0x0454
  53. #define INT_MASK 0x0458
  54. #define ETH_F_RX_DESC_0 0x0480
  55. #define ETH_C_RX_DESC_0 0x04A0
  56. #define ETH_C_TX_DESC_1 0x04E4
  57. /* smi register */
  58. #define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */
  59. #define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */
  60. #define SMI_OP_W (0 << 26) /* Write operation */
  61. #define SMI_OP_R (1 << 26) /* Read operation */
  62. #define PHY_WAIT_ITERATIONS 10
  63. #define PXA168_ETH_PHY_ADDR_DEFAULT 0
  64. /* RX & TX descriptor command */
  65. #define BUF_OWNED_BY_DMA (1 << 31)
  66. /* RX descriptor status */
  67. #define RX_EN_INT (1 << 23)
  68. #define RX_FIRST_DESC (1 << 17)
  69. #define RX_LAST_DESC (1 << 16)
  70. #define RX_ERROR (1 << 15)
  71. /* TX descriptor command */
  72. #define TX_EN_INT (1 << 23)
  73. #define TX_GEN_CRC (1 << 22)
  74. #define TX_ZERO_PADDING (1 << 18)
  75. #define TX_FIRST_DESC (1 << 17)
  76. #define TX_LAST_DESC (1 << 16)
  77. #define TX_ERROR (1 << 15)
  78. /* SDMA_CMD */
  79. #define SDMA_CMD_AT (1 << 31)
  80. #define SDMA_CMD_TXDL (1 << 24)
  81. #define SDMA_CMD_TXDH (1 << 23)
  82. #define SDMA_CMD_AR (1 << 15)
  83. #define SDMA_CMD_ERD (1 << 7)
  84. /* Bit definitions of the Port Config Reg */
  85. #define PCR_DUPLEX_FULL (1 << 15)
  86. #define PCR_HS (1 << 12)
  87. #define PCR_EN (1 << 7)
  88. #define PCR_PM (1 << 0)
  89. /* Bit definitions of the Port Config Extend Reg */
  90. #define PCXR_2BSM (1 << 28)
  91. #define PCXR_DSCP_EN (1 << 21)
  92. #define PCXR_RMII_EN (1 << 20)
  93. #define PCXR_AN_SPEED_DIS (1 << 19)
  94. #define PCXR_SPEED_100 (1 << 18)
  95. #define PCXR_MFL_1518 (0 << 14)
  96. #define PCXR_MFL_1536 (1 << 14)
  97. #define PCXR_MFL_2048 (2 << 14)
  98. #define PCXR_MFL_64K (3 << 14)
  99. #define PCXR_FLOWCTL_DIS (1 << 12)
  100. #define PCXR_FLP (1 << 11)
  101. #define PCXR_AN_FLOWCTL_DIS (1 << 10)
  102. #define PCXR_AN_DUPLEX_DIS (1 << 9)
  103. #define PCXR_PRIO_TX_OFF 3
  104. #define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF)
  105. /* Bit definitions of the SDMA Config Reg */
  106. #define SDCR_BSZ_OFF 12
  107. #define SDCR_BSZ8 (3 << SDCR_BSZ_OFF)
  108. #define SDCR_BSZ4 (2 << SDCR_BSZ_OFF)
  109. #define SDCR_BSZ2 (1 << SDCR_BSZ_OFF)
  110. #define SDCR_BSZ1 (0 << SDCR_BSZ_OFF)
  111. #define SDCR_BLMR (1 << 6)
  112. #define SDCR_BLMT (1 << 7)
  113. #define SDCR_RIFB (1 << 9)
  114. #define SDCR_RC_OFF 2
  115. #define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF)
  116. /*
  117. * Bit definitions of the Interrupt Cause Reg
  118. * and Interrupt MASK Reg is the same
  119. */
  120. #define ICR_RXBUF (1 << 0)
  121. #define ICR_TXBUF_H (1 << 2)
  122. #define ICR_TXBUF_L (1 << 3)
  123. #define ICR_TXEND_H (1 << 6)
  124. #define ICR_TXEND_L (1 << 7)
  125. #define ICR_RXERR (1 << 8)
  126. #define ICR_TXERR_H (1 << 10)
  127. #define ICR_TXERR_L (1 << 11)
  128. #define ICR_TX_UDR (1 << 13)
  129. #define ICR_MII_CH (1 << 28)
  130. #define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\
  131. ICR_TXERR_H | ICR_TXERR_L |\
  132. ICR_TXEND_H | ICR_TXEND_L |\
  133. ICR_RXBUF | ICR_RXERR | ICR_MII_CH)
  134. #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
  135. #define NUM_RX_DESCS 64
  136. #define NUM_TX_DESCS 64
  137. #define HASH_ADD 0
  138. #define HASH_DELETE 1
  139. #define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */
  140. #define HOP_NUMBER 12
  141. /* Bit definitions for Port status */
  142. #define PORT_SPEED_100 (1 << 0)
  143. #define FULL_DUPLEX (1 << 1)
  144. #define FLOW_CONTROL_DISABLED (1 << 2)
  145. #define LINK_UP (1 << 3)
  146. /* Bit definitions for work to be done */
  147. #define WORK_TX_DONE (1 << 1)
  148. /*
  149. * Misc definitions.
  150. */
  151. #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
  152. struct rx_desc {
  153. u32 cmd_sts; /* Descriptor command status */
  154. u16 byte_cnt; /* Descriptor buffer byte count */
  155. u16 buf_size; /* Buffer size */
  156. u32 buf_ptr; /* Descriptor buffer pointer */
  157. u32 next_desc_ptr; /* Next descriptor pointer */
  158. };
  159. struct tx_desc {
  160. u32 cmd_sts; /* Command/status field */
  161. u16 reserved;
  162. u16 byte_cnt; /* buffer byte count */
  163. u32 buf_ptr; /* pointer to buffer for this descriptor */
  164. u32 next_desc_ptr; /* Pointer to next descriptor */
  165. };
  166. struct pxa168_eth_private {
  167. struct platform_device *pdev;
  168. int port_num; /* User Ethernet port number */
  169. int phy_addr;
  170. int phy_speed;
  171. int phy_duplex;
  172. phy_interface_t phy_intf;
  173. int rx_resource_err; /* Rx ring resource error flag */
  174. /* Next available and first returning Rx resource */
  175. int rx_curr_desc_q, rx_used_desc_q;
  176. /* Next available and first returning Tx resource */
  177. int tx_curr_desc_q, tx_used_desc_q;
  178. struct rx_desc *p_rx_desc_area;
  179. dma_addr_t rx_desc_dma;
  180. int rx_desc_area_size;
  181. struct sk_buff **rx_skb;
  182. struct tx_desc *p_tx_desc_area;
  183. dma_addr_t tx_desc_dma;
  184. int tx_desc_area_size;
  185. struct sk_buff **tx_skb;
  186. struct work_struct tx_timeout_task;
  187. struct net_device *dev;
  188. struct napi_struct napi;
  189. u8 work_todo;
  190. int skb_size;
  191. /* Size of Tx Ring per queue */
  192. int tx_ring_size;
  193. /* Number of tx descriptors in use */
  194. int tx_desc_count;
  195. /* Size of Rx Ring per queue */
  196. int rx_ring_size;
  197. /* Number of rx descriptors in use */
  198. int rx_desc_count;
  199. /*
  200. * Used in case RX Ring is empty, which can occur when
  201. * system does not have resources (skb's)
  202. */
  203. struct timer_list timeout;
  204. struct mii_bus *smi_bus;
  205. /* clock */
  206. struct clk *clk;
  207. struct pxa168_eth_platform_data *pd;
  208. /*
  209. * Ethernet controller base address.
  210. */
  211. void __iomem *base;
  212. /* Pointer to the hardware address filter table */
  213. void *htpr;
  214. dma_addr_t htpr_dma;
  215. };
  216. struct addr_table_entry {
  217. __le32 lo;
  218. __le32 hi;
  219. };
  220. /* Bit fields of a Hash Table Entry */
  221. enum hash_table_entry {
  222. HASH_ENTRY_VALID = 1,
  223. SKIP = 2,
  224. HASH_ENTRY_RECEIVE_DISCARD = 4,
  225. HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
  226. };
  227. static int pxa168_init_hw(struct pxa168_eth_private *pep);
  228. static int pxa168_init_phy(struct net_device *dev);
  229. static void eth_port_reset(struct net_device *dev);
  230. static void eth_port_start(struct net_device *dev);
  231. static int pxa168_eth_open(struct net_device *dev);
  232. static int pxa168_eth_stop(struct net_device *dev);
  233. static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
  234. {
  235. return readl_relaxed(pep->base + offset);
  236. }
  237. static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
  238. {
  239. writel_relaxed(data, pep->base + offset);
  240. }
  241. static void abort_dma(struct pxa168_eth_private *pep)
  242. {
  243. int delay;
  244. int max_retries = 40;
  245. do {
  246. wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
  247. udelay(100);
  248. delay = 10;
  249. while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
  250. && delay-- > 0) {
  251. udelay(10);
  252. }
  253. } while (max_retries-- > 0 && delay <= 0);
  254. if (max_retries <= 0)
  255. netdev_err(pep->dev, "%s : DMA Stuck\n", __func__);
  256. }
  257. static void rxq_refill(struct net_device *dev)
  258. {
  259. struct pxa168_eth_private *pep = netdev_priv(dev);
  260. struct sk_buff *skb;
  261. struct rx_desc *p_used_rx_desc;
  262. int used_rx_desc;
  263. while (pep->rx_desc_count < pep->rx_ring_size) {
  264. int size;
  265. skb = netdev_alloc_skb(dev, pep->skb_size);
  266. if (!skb)
  267. break;
  268. if (SKB_DMA_REALIGN)
  269. skb_reserve(skb, SKB_DMA_REALIGN);
  270. pep->rx_desc_count++;
  271. /* Get 'used' Rx descriptor */
  272. used_rx_desc = pep->rx_used_desc_q;
  273. p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
  274. size = skb_end_pointer(skb) - skb->data;
  275. p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev,
  276. skb->data,
  277. size,
  278. DMA_FROM_DEVICE);
  279. p_used_rx_desc->buf_size = size;
  280. pep->rx_skb[used_rx_desc] = skb;
  281. /* Return the descriptor to DMA ownership */
  282. dma_wmb();
  283. p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
  284. dma_wmb();
  285. /* Move the used descriptor pointer to the next descriptor */
  286. pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
  287. /* Any Rx return cancels the Rx resource error status */
  288. pep->rx_resource_err = 0;
  289. skb_reserve(skb, ETH_HW_IP_ALIGN);
  290. }
  291. /*
  292. * If RX ring is empty of SKB, set a timer to try allocating
  293. * again at a later time.
  294. */
  295. if (pep->rx_desc_count == 0) {
  296. pep->timeout.expires = jiffies + (HZ / 10);
  297. add_timer(&pep->timeout);
  298. }
  299. }
  300. static inline void rxq_refill_timer_wrapper(struct timer_list *t)
  301. {
  302. struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
  303. napi_schedule(&pep->napi);
  304. }
  305. static inline u8 flip_8_bits(u8 x)
  306. {
  307. return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
  308. | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
  309. | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
  310. | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
  311. }
  312. static void nibble_swap_every_byte(unsigned char *mac_addr)
  313. {
  314. int i;
  315. for (i = 0; i < ETH_ALEN; i++) {
  316. mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
  317. ((mac_addr[i] & 0xf0) >> 4);
  318. }
  319. }
  320. static void inverse_every_nibble(unsigned char *mac_addr)
  321. {
  322. int i;
  323. for (i = 0; i < ETH_ALEN; i++)
  324. mac_addr[i] = flip_8_bits(mac_addr[i]);
  325. }
  326. /*
  327. * ----------------------------------------------------------------------------
  328. * This function will calculate the hash function of the address.
  329. * Inputs
  330. * mac_addr_orig - MAC address.
  331. * Outputs
  332. * return the calculated entry.
  333. */
  334. static u32 hash_function(const unsigned char *mac_addr_orig)
  335. {
  336. u32 hash_result;
  337. u32 addr0;
  338. u32 addr1;
  339. u32 addr2;
  340. u32 addr3;
  341. unsigned char mac_addr[ETH_ALEN];
  342. /* Make a copy of MAC address since we are going to performe bit
  343. * operations on it
  344. */
  345. memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
  346. nibble_swap_every_byte(mac_addr);
  347. inverse_every_nibble(mac_addr);
  348. addr0 = (mac_addr[5] >> 2) & 0x3f;
  349. addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
  350. addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
  351. addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
  352. hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
  353. hash_result = hash_result & 0x07ff;
  354. return hash_result;
  355. }
  356. /*
  357. * ----------------------------------------------------------------------------
  358. * This function will add/del an entry to the address table.
  359. * Inputs
  360. * pep - ETHERNET .
  361. * mac_addr - MAC address.
  362. * skip - if 1, skip this address.Used in case of deleting an entry which is a
  363. * part of chain in the hash table.We can't just delete the entry since
  364. * that will break the chain.We need to defragment the tables time to
  365. * time.
  366. * rd - 0 Discard packet upon match.
  367. * - 1 Receive packet upon match.
  368. * Outputs
  369. * address table entry is added/deleted.
  370. * 0 if success.
  371. * -ENOSPC if table full
  372. */
  373. static int add_del_hash_entry(struct pxa168_eth_private *pep,
  374. const unsigned char *mac_addr,
  375. u32 rd, u32 skip, int del)
  376. {
  377. struct addr_table_entry *entry, *start;
  378. u32 new_high;
  379. u32 new_low;
  380. u32 i;
  381. new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
  382. | (((mac_addr[1] >> 0) & 0xf) << 11)
  383. | (((mac_addr[0] >> 4) & 0xf) << 7)
  384. | (((mac_addr[0] >> 0) & 0xf) << 3)
  385. | (((mac_addr[3] >> 4) & 0x1) << 31)
  386. | (((mac_addr[3] >> 0) & 0xf) << 27)
  387. | (((mac_addr[2] >> 4) & 0xf) << 23)
  388. | (((mac_addr[2] >> 0) & 0xf) << 19)
  389. | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
  390. | HASH_ENTRY_VALID;
  391. new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
  392. | (((mac_addr[5] >> 0) & 0xf) << 11)
  393. | (((mac_addr[4] >> 4) & 0xf) << 7)
  394. | (((mac_addr[4] >> 0) & 0xf) << 3)
  395. | (((mac_addr[3] >> 5) & 0x7) << 0);
  396. /*
  397. * Pick the appropriate table, start scanning for free/reusable
  398. * entries at the index obtained by hashing the specified MAC address
  399. */
  400. start = pep->htpr;
  401. entry = start + hash_function(mac_addr);
  402. for (i = 0; i < HOP_NUMBER; i++) {
  403. if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
  404. break;
  405. } else {
  406. /* if same address put in same position */
  407. if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
  408. (new_low & 0xfffffff8)) &&
  409. (le32_to_cpu(entry->hi) == new_high)) {
  410. break;
  411. }
  412. }
  413. if (entry == start + 0x7ff)
  414. entry = start;
  415. else
  416. entry++;
  417. }
  418. if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
  419. (le32_to_cpu(entry->hi) != new_high) && del)
  420. return 0;
  421. if (i == HOP_NUMBER) {
  422. if (!del) {
  423. netdev_info(pep->dev,
  424. "%s: table section is full, need to "
  425. "move to 16kB implementation?\n",
  426. __FILE__);
  427. return -ENOSPC;
  428. } else
  429. return 0;
  430. }
  431. /*
  432. * Update the selected entry
  433. */
  434. if (del) {
  435. entry->hi = 0;
  436. entry->lo = 0;
  437. } else {
  438. entry->hi = cpu_to_le32(new_high);
  439. entry->lo = cpu_to_le32(new_low);
  440. }
  441. return 0;
  442. }
  443. /*
  444. * ----------------------------------------------------------------------------
  445. * Create an addressTable entry from MAC address info
  446. * found in the specifed net_device struct
  447. *
  448. * Input : pointer to ethernet interface network device structure
  449. * Output : N/A
  450. */
  451. static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
  452. unsigned char *oaddr,
  453. const unsigned char *addr)
  454. {
  455. /* Delete old entry */
  456. if (oaddr)
  457. add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
  458. /* Add new entry */
  459. add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
  460. }
  461. static int init_hash_table(struct pxa168_eth_private *pep)
  462. {
  463. /*
  464. * Hardware expects CPU to build a hash table based on a predefined
  465. * hash function and populate it based on hardware address. The
  466. * location of the hash table is identified by 32-bit pointer stored
  467. * in HTPR internal register. Two possible sizes exists for the hash
  468. * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
  469. * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
  470. * 1/2kB.
  471. */
  472. /* TODO: Add support for 8kB hash table and alternative hash
  473. * function.Driver can dynamically switch to them if the 1/2kB hash
  474. * table is full.
  475. */
  476. if (!pep->htpr) {
  477. pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
  478. HASH_ADDR_TABLE_SIZE,
  479. &pep->htpr_dma, GFP_KERNEL);
  480. if (!pep->htpr)
  481. return -ENOMEM;
  482. } else {
  483. memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
  484. }
  485. wrl(pep, HTPR, pep->htpr_dma);
  486. return 0;
  487. }
  488. static void pxa168_eth_set_rx_mode(struct net_device *dev)
  489. {
  490. struct pxa168_eth_private *pep = netdev_priv(dev);
  491. struct netdev_hw_addr *ha;
  492. u32 val;
  493. val = rdl(pep, PORT_CONFIG);
  494. if (dev->flags & IFF_PROMISC)
  495. val |= PCR_PM;
  496. else
  497. val &= ~PCR_PM;
  498. wrl(pep, PORT_CONFIG, val);
  499. /*
  500. * Remove the old list of MAC address and add dev->addr
  501. * and multicast address.
  502. */
  503. memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
  504. update_hash_table_mac_address(pep, NULL, dev->dev_addr);
  505. netdev_for_each_mc_addr(ha, dev)
  506. update_hash_table_mac_address(pep, NULL, ha->addr);
  507. }
  508. static void pxa168_eth_get_mac_address(struct net_device *dev,
  509. unsigned char *addr)
  510. {
  511. struct pxa168_eth_private *pep = netdev_priv(dev);
  512. unsigned int mac_h = rdl(pep, MAC_ADDR_HIGH);
  513. unsigned int mac_l = rdl(pep, MAC_ADDR_LOW);
  514. addr[0] = (mac_h >> 24) & 0xff;
  515. addr[1] = (mac_h >> 16) & 0xff;
  516. addr[2] = (mac_h >> 8) & 0xff;
  517. addr[3] = mac_h & 0xff;
  518. addr[4] = (mac_l >> 8) & 0xff;
  519. addr[5] = mac_l & 0xff;
  520. }
  521. static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
  522. {
  523. struct sockaddr *sa = addr;
  524. struct pxa168_eth_private *pep = netdev_priv(dev);
  525. unsigned char oldMac[ETH_ALEN];
  526. u32 mac_h, mac_l;
  527. if (!is_valid_ether_addr(sa->sa_data))
  528. return -EADDRNOTAVAIL;
  529. memcpy(oldMac, dev->dev_addr, ETH_ALEN);
  530. eth_hw_addr_set(dev, sa->sa_data);
  531. mac_h = dev->dev_addr[0] << 24;
  532. mac_h |= dev->dev_addr[1] << 16;
  533. mac_h |= dev->dev_addr[2] << 8;
  534. mac_h |= dev->dev_addr[3];
  535. mac_l = dev->dev_addr[4] << 8;
  536. mac_l |= dev->dev_addr[5];
  537. wrl(pep, MAC_ADDR_HIGH, mac_h);
  538. wrl(pep, MAC_ADDR_LOW, mac_l);
  539. netif_addr_lock_bh(dev);
  540. update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
  541. netif_addr_unlock_bh(dev);
  542. return 0;
  543. }
  544. static void eth_port_start(struct net_device *dev)
  545. {
  546. unsigned int val = 0;
  547. struct pxa168_eth_private *pep = netdev_priv(dev);
  548. int tx_curr_desc, rx_curr_desc;
  549. phy_start(dev->phydev);
  550. /* Assignment of Tx CTRP of given queue */
  551. tx_curr_desc = pep->tx_curr_desc_q;
  552. wrl(pep, ETH_C_TX_DESC_1,
  553. (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
  554. /* Assignment of Rx CRDP of given queue */
  555. rx_curr_desc = pep->rx_curr_desc_q;
  556. wrl(pep, ETH_C_RX_DESC_0,
  557. (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
  558. wrl(pep, ETH_F_RX_DESC_0,
  559. (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
  560. /* Clear all interrupts */
  561. wrl(pep, INT_CAUSE, 0);
  562. /* Enable all interrupts for receive, transmit and error. */
  563. wrl(pep, INT_MASK, ALL_INTS);
  564. val = rdl(pep, PORT_CONFIG);
  565. val |= PCR_EN;
  566. wrl(pep, PORT_CONFIG, val);
  567. /* Start RX DMA engine */
  568. val = rdl(pep, SDMA_CMD);
  569. val |= SDMA_CMD_ERD;
  570. wrl(pep, SDMA_CMD, val);
  571. }
  572. static void eth_port_reset(struct net_device *dev)
  573. {
  574. struct pxa168_eth_private *pep = netdev_priv(dev);
  575. unsigned int val = 0;
  576. /* Stop all interrupts for receive, transmit and error. */
  577. wrl(pep, INT_MASK, 0);
  578. /* Clear all interrupts */
  579. wrl(pep, INT_CAUSE, 0);
  580. /* Stop RX DMA */
  581. val = rdl(pep, SDMA_CMD);
  582. val &= ~SDMA_CMD_ERD; /* abort dma command */
  583. /* Abort any transmit and receive operations and put DMA
  584. * in idle state.
  585. */
  586. abort_dma(pep);
  587. /* Disable port */
  588. val = rdl(pep, PORT_CONFIG);
  589. val &= ~PCR_EN;
  590. wrl(pep, PORT_CONFIG, val);
  591. phy_stop(dev->phydev);
  592. }
  593. /*
  594. * txq_reclaim - Free the tx desc data for completed descriptors
  595. * If force is non-zero, frees uncompleted descriptors as well
  596. */
  597. static int txq_reclaim(struct net_device *dev, int force)
  598. {
  599. struct pxa168_eth_private *pep = netdev_priv(dev);
  600. struct tx_desc *desc;
  601. u32 cmd_sts;
  602. struct sk_buff *skb;
  603. int tx_index;
  604. dma_addr_t addr;
  605. int count;
  606. int released = 0;
  607. netif_tx_lock(dev);
  608. pep->work_todo &= ~WORK_TX_DONE;
  609. while (pep->tx_desc_count > 0) {
  610. tx_index = pep->tx_used_desc_q;
  611. desc = &pep->p_tx_desc_area[tx_index];
  612. cmd_sts = desc->cmd_sts;
  613. if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
  614. if (released > 0) {
  615. goto txq_reclaim_end;
  616. } else {
  617. released = -1;
  618. goto txq_reclaim_end;
  619. }
  620. }
  621. pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
  622. pep->tx_desc_count--;
  623. addr = desc->buf_ptr;
  624. count = desc->byte_cnt;
  625. skb = pep->tx_skb[tx_index];
  626. if (skb)
  627. pep->tx_skb[tx_index] = NULL;
  628. if (cmd_sts & TX_ERROR) {
  629. if (net_ratelimit())
  630. netdev_err(dev, "Error in TX\n");
  631. dev->stats.tx_errors++;
  632. }
  633. dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE);
  634. if (skb)
  635. dev_kfree_skb_irq(skb);
  636. released++;
  637. }
  638. txq_reclaim_end:
  639. netif_tx_unlock(dev);
  640. return released;
  641. }
  642. static void pxa168_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
  643. {
  644. struct pxa168_eth_private *pep = netdev_priv(dev);
  645. netdev_info(dev, "TX timeout desc_count %d\n", pep->tx_desc_count);
  646. schedule_work(&pep->tx_timeout_task);
  647. }
  648. static void pxa168_eth_tx_timeout_task(struct work_struct *work)
  649. {
  650. struct pxa168_eth_private *pep = container_of(work,
  651. struct pxa168_eth_private,
  652. tx_timeout_task);
  653. struct net_device *dev = pep->dev;
  654. pxa168_eth_stop(dev);
  655. pxa168_eth_open(dev);
  656. }
  657. static int rxq_process(struct net_device *dev, int budget)
  658. {
  659. struct pxa168_eth_private *pep = netdev_priv(dev);
  660. struct net_device_stats *stats = &dev->stats;
  661. unsigned int received_packets = 0;
  662. struct sk_buff *skb;
  663. while (budget-- > 0) {
  664. int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
  665. struct rx_desc *rx_desc;
  666. unsigned int cmd_sts;
  667. /* Do not process Rx ring in case of Rx ring resource error */
  668. if (pep->rx_resource_err)
  669. break;
  670. rx_curr_desc = pep->rx_curr_desc_q;
  671. rx_used_desc = pep->rx_used_desc_q;
  672. rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
  673. cmd_sts = rx_desc->cmd_sts;
  674. dma_rmb();
  675. if (cmd_sts & (BUF_OWNED_BY_DMA))
  676. break;
  677. skb = pep->rx_skb[rx_curr_desc];
  678. pep->rx_skb[rx_curr_desc] = NULL;
  679. rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
  680. pep->rx_curr_desc_q = rx_next_curr_desc;
  681. /* Rx descriptors exhausted. */
  682. /* Set the Rx ring resource error flag */
  683. if (rx_next_curr_desc == rx_used_desc)
  684. pep->rx_resource_err = 1;
  685. pep->rx_desc_count--;
  686. dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr,
  687. rx_desc->buf_size,
  688. DMA_FROM_DEVICE);
  689. received_packets++;
  690. /*
  691. * Update statistics.
  692. * Note byte count includes 4 byte CRC count
  693. */
  694. stats->rx_packets++;
  695. stats->rx_bytes += rx_desc->byte_cnt;
  696. /*
  697. * In case received a packet without first / last bits on OR
  698. * the error summary bit is on, the packets needs to be droped.
  699. */
  700. if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  701. (RX_FIRST_DESC | RX_LAST_DESC))
  702. || (cmd_sts & RX_ERROR)) {
  703. stats->rx_dropped++;
  704. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  705. (RX_FIRST_DESC | RX_LAST_DESC)) {
  706. if (net_ratelimit())
  707. netdev_err(dev,
  708. "Rx pkt on multiple desc\n");
  709. }
  710. if (cmd_sts & RX_ERROR)
  711. stats->rx_errors++;
  712. dev_kfree_skb_irq(skb);
  713. } else {
  714. /*
  715. * The -4 is for the CRC in the trailer of the
  716. * received packet
  717. */
  718. skb_put(skb, rx_desc->byte_cnt - 4);
  719. skb->protocol = eth_type_trans(skb, dev);
  720. netif_receive_skb(skb);
  721. }
  722. }
  723. /* Fill RX ring with skb's */
  724. rxq_refill(dev);
  725. return received_packets;
  726. }
  727. static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
  728. struct net_device *dev)
  729. {
  730. u32 icr;
  731. int ret = 0;
  732. icr = rdl(pep, INT_CAUSE);
  733. if (icr == 0)
  734. return IRQ_NONE;
  735. wrl(pep, INT_CAUSE, ~icr);
  736. if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
  737. pep->work_todo |= WORK_TX_DONE;
  738. ret = 1;
  739. }
  740. if (icr & ICR_RXBUF)
  741. ret = 1;
  742. return ret;
  743. }
  744. static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
  745. {
  746. struct net_device *dev = (struct net_device *)dev_id;
  747. struct pxa168_eth_private *pep = netdev_priv(dev);
  748. if (unlikely(!pxa168_eth_collect_events(pep, dev)))
  749. return IRQ_NONE;
  750. /* Disable interrupts */
  751. wrl(pep, INT_MASK, 0);
  752. napi_schedule(&pep->napi);
  753. return IRQ_HANDLED;
  754. }
  755. static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
  756. {
  757. int skb_size;
  758. /*
  759. * Reserve 2+14 bytes for an ethernet header (the hardware
  760. * automatically prepends 2 bytes of dummy data to each
  761. * received packet), 16 bytes for up to four VLAN tags, and
  762. * 4 bytes for the trailing FCS -- 36 bytes total.
  763. */
  764. skb_size = pep->dev->mtu + 36;
  765. /*
  766. * Make sure that the skb size is a multiple of 8 bytes, as
  767. * the lower three bits of the receive descriptor's buffer
  768. * size field are ignored by the hardware.
  769. */
  770. pep->skb_size = (skb_size + 7) & ~7;
  771. /*
  772. * If NET_SKB_PAD is smaller than a cache line,
  773. * netdev_alloc_skb() will cause skb->data to be misaligned
  774. * to a cache line boundary. If this is the case, include
  775. * some extra space to allow re-aligning the data area.
  776. */
  777. pep->skb_size += SKB_DMA_REALIGN;
  778. }
  779. static int set_port_config_ext(struct pxa168_eth_private *pep)
  780. {
  781. int skb_size;
  782. pxa168_eth_recalc_skb_size(pep);
  783. if (pep->skb_size <= 1518)
  784. skb_size = PCXR_MFL_1518;
  785. else if (pep->skb_size <= 1536)
  786. skb_size = PCXR_MFL_1536;
  787. else if (pep->skb_size <= 2048)
  788. skb_size = PCXR_MFL_2048;
  789. else
  790. skb_size = PCXR_MFL_64K;
  791. /* Extended Port Configuration */
  792. wrl(pep, PORT_CONFIG_EXT,
  793. PCXR_AN_SPEED_DIS | /* Disable HW AN */
  794. PCXR_AN_DUPLEX_DIS |
  795. PCXR_AN_FLOWCTL_DIS |
  796. PCXR_2BSM | /* Two byte prefix aligns IP hdr */
  797. PCXR_DSCP_EN | /* Enable DSCP in IP */
  798. skb_size | PCXR_FLP | /* do not force link pass */
  799. PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */
  800. return 0;
  801. }
  802. static void pxa168_eth_adjust_link(struct net_device *dev)
  803. {
  804. struct pxa168_eth_private *pep = netdev_priv(dev);
  805. struct phy_device *phy = dev->phydev;
  806. u32 cfg, cfg_o = rdl(pep, PORT_CONFIG);
  807. u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT);
  808. cfg = cfg_o & ~PCR_DUPLEX_FULL;
  809. cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN);
  810. if (phy->interface == PHY_INTERFACE_MODE_RMII)
  811. cfgext |= PCXR_RMII_EN;
  812. if (phy->speed == SPEED_100)
  813. cfgext |= PCXR_SPEED_100;
  814. if (phy->duplex)
  815. cfg |= PCR_DUPLEX_FULL;
  816. if (!phy->pause)
  817. cfgext |= PCXR_FLOWCTL_DIS;
  818. /* Bail out if there has nothing changed */
  819. if (cfg == cfg_o && cfgext == cfgext_o)
  820. return;
  821. wrl(pep, PORT_CONFIG, cfg);
  822. wrl(pep, PORT_CONFIG_EXT, cfgext);
  823. phy_print_status(phy);
  824. }
  825. static int pxa168_init_phy(struct net_device *dev)
  826. {
  827. struct pxa168_eth_private *pep = netdev_priv(dev);
  828. struct ethtool_link_ksettings cmd;
  829. struct phy_device *phy = NULL;
  830. int err;
  831. if (dev->phydev)
  832. return 0;
  833. phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
  834. if (IS_ERR(phy))
  835. return PTR_ERR(phy);
  836. err = phy_connect_direct(dev, phy, pxa168_eth_adjust_link,
  837. pep->phy_intf);
  838. if (err)
  839. return err;
  840. cmd.base.phy_address = pep->phy_addr;
  841. cmd.base.speed = pep->phy_speed;
  842. cmd.base.duplex = pep->phy_duplex;
  843. linkmode_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES);
  844. cmd.base.autoneg = AUTONEG_ENABLE;
  845. if (cmd.base.speed != 0)
  846. cmd.base.autoneg = AUTONEG_DISABLE;
  847. return phy_ethtool_set_link_ksettings(dev, &cmd);
  848. }
  849. static int pxa168_init_hw(struct pxa168_eth_private *pep)
  850. {
  851. int err = 0;
  852. /* Disable interrupts */
  853. wrl(pep, INT_MASK, 0);
  854. wrl(pep, INT_CAUSE, 0);
  855. /* Write to ICR to clear interrupts. */
  856. wrl(pep, INT_W_CLEAR, 0);
  857. /* Abort any transmit and receive operations and put DMA
  858. * in idle state.
  859. */
  860. abort_dma(pep);
  861. /* Initialize address hash table */
  862. err = init_hash_table(pep);
  863. if (err)
  864. return err;
  865. /* SDMA configuration */
  866. wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
  867. SDCR_RIFB | /* Rx interrupt on frame */
  868. SDCR_BLMT | /* Little endian transmit */
  869. SDCR_BLMR | /* Little endian receive */
  870. SDCR_RC_MAX_RETRANS); /* Max retransmit count */
  871. /* Port Configuration */
  872. wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
  873. set_port_config_ext(pep);
  874. return err;
  875. }
  876. static int rxq_init(struct net_device *dev)
  877. {
  878. struct pxa168_eth_private *pep = netdev_priv(dev);
  879. struct rx_desc *p_rx_desc;
  880. int size = 0, i = 0;
  881. int rx_desc_num = pep->rx_ring_size;
  882. /* Allocate RX skb rings */
  883. pep->rx_skb = kcalloc(rx_desc_num, sizeof(*pep->rx_skb), GFP_KERNEL);
  884. if (!pep->rx_skb)
  885. return -ENOMEM;
  886. /* Allocate RX ring */
  887. pep->rx_desc_count = 0;
  888. size = pep->rx_ring_size * sizeof(struct rx_desc);
  889. pep->rx_desc_area_size = size;
  890. pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
  891. &pep->rx_desc_dma,
  892. GFP_KERNEL);
  893. if (!pep->p_rx_desc_area)
  894. goto out;
  895. /* initialize the next_desc_ptr links in the Rx descriptors ring */
  896. p_rx_desc = pep->p_rx_desc_area;
  897. for (i = 0; i < rx_desc_num; i++) {
  898. p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
  899. ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
  900. }
  901. /* Save Rx desc pointer to driver struct. */
  902. pep->rx_curr_desc_q = 0;
  903. pep->rx_used_desc_q = 0;
  904. pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
  905. return 0;
  906. out:
  907. kfree(pep->rx_skb);
  908. return -ENOMEM;
  909. }
  910. static void rxq_deinit(struct net_device *dev)
  911. {
  912. struct pxa168_eth_private *pep = netdev_priv(dev);
  913. int curr;
  914. /* Free preallocated skb's on RX rings */
  915. for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
  916. if (pep->rx_skb[curr]) {
  917. dev_kfree_skb(pep->rx_skb[curr]);
  918. pep->rx_desc_count--;
  919. }
  920. }
  921. if (pep->rx_desc_count)
  922. netdev_err(dev, "Error in freeing Rx Ring. %d skb's still\n",
  923. pep->rx_desc_count);
  924. /* Free RX ring */
  925. if (pep->p_rx_desc_area)
  926. dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
  927. pep->p_rx_desc_area, pep->rx_desc_dma);
  928. kfree(pep->rx_skb);
  929. }
  930. static int txq_init(struct net_device *dev)
  931. {
  932. struct pxa168_eth_private *pep = netdev_priv(dev);
  933. struct tx_desc *p_tx_desc;
  934. int size = 0, i = 0;
  935. int tx_desc_num = pep->tx_ring_size;
  936. pep->tx_skb = kcalloc(tx_desc_num, sizeof(*pep->tx_skb), GFP_KERNEL);
  937. if (!pep->tx_skb)
  938. return -ENOMEM;
  939. /* Allocate TX ring */
  940. pep->tx_desc_count = 0;
  941. size = pep->tx_ring_size * sizeof(struct tx_desc);
  942. pep->tx_desc_area_size = size;
  943. pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
  944. &pep->tx_desc_dma,
  945. GFP_KERNEL);
  946. if (!pep->p_tx_desc_area)
  947. goto out;
  948. /* Initialize the next_desc_ptr links in the Tx descriptors ring */
  949. p_tx_desc = pep->p_tx_desc_area;
  950. for (i = 0; i < tx_desc_num; i++) {
  951. p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
  952. ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
  953. }
  954. pep->tx_curr_desc_q = 0;
  955. pep->tx_used_desc_q = 0;
  956. pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
  957. return 0;
  958. out:
  959. kfree(pep->tx_skb);
  960. return -ENOMEM;
  961. }
  962. static void txq_deinit(struct net_device *dev)
  963. {
  964. struct pxa168_eth_private *pep = netdev_priv(dev);
  965. /* Free outstanding skb's on TX ring */
  966. txq_reclaim(dev, 1);
  967. BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
  968. /* Free TX ring */
  969. if (pep->p_tx_desc_area)
  970. dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
  971. pep->p_tx_desc_area, pep->tx_desc_dma);
  972. kfree(pep->tx_skb);
  973. }
  974. static int pxa168_eth_open(struct net_device *dev)
  975. {
  976. struct pxa168_eth_private *pep = netdev_priv(dev);
  977. int err;
  978. err = pxa168_init_phy(dev);
  979. if (err)
  980. return err;
  981. err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
  982. if (err) {
  983. dev_err(&dev->dev, "can't assign irq\n");
  984. return -EAGAIN;
  985. }
  986. pep->rx_resource_err = 0;
  987. err = rxq_init(dev);
  988. if (err != 0)
  989. goto out_free_irq;
  990. err = txq_init(dev);
  991. if (err != 0)
  992. goto out_free_rx_skb;
  993. pep->rx_used_desc_q = 0;
  994. pep->rx_curr_desc_q = 0;
  995. /* Fill RX ring with skb's */
  996. rxq_refill(dev);
  997. pep->rx_used_desc_q = 0;
  998. pep->rx_curr_desc_q = 0;
  999. netif_carrier_off(dev);
  1000. napi_enable(&pep->napi);
  1001. eth_port_start(dev);
  1002. return 0;
  1003. out_free_rx_skb:
  1004. rxq_deinit(dev);
  1005. out_free_irq:
  1006. free_irq(dev->irq, dev);
  1007. return err;
  1008. }
  1009. static int pxa168_eth_stop(struct net_device *dev)
  1010. {
  1011. struct pxa168_eth_private *pep = netdev_priv(dev);
  1012. eth_port_reset(dev);
  1013. /* Disable interrupts */
  1014. wrl(pep, INT_MASK, 0);
  1015. wrl(pep, INT_CAUSE, 0);
  1016. /* Write to ICR to clear interrupts. */
  1017. wrl(pep, INT_W_CLEAR, 0);
  1018. napi_disable(&pep->napi);
  1019. del_timer_sync(&pep->timeout);
  1020. netif_carrier_off(dev);
  1021. free_irq(dev->irq, dev);
  1022. rxq_deinit(dev);
  1023. txq_deinit(dev);
  1024. return 0;
  1025. }
  1026. static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
  1027. {
  1028. struct pxa168_eth_private *pep = netdev_priv(dev);
  1029. dev->mtu = mtu;
  1030. set_port_config_ext(pep);
  1031. if (!netif_running(dev))
  1032. return 0;
  1033. /*
  1034. * Stop and then re-open the interface. This will allocate RX
  1035. * skbs of the new MTU.
  1036. * There is a possible danger that the open will not succeed,
  1037. * due to memory being full.
  1038. */
  1039. pxa168_eth_stop(dev);
  1040. if (pxa168_eth_open(dev)) {
  1041. dev_err(&dev->dev,
  1042. "fatal error on re-opening device after MTU change\n");
  1043. }
  1044. return 0;
  1045. }
  1046. static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
  1047. {
  1048. int tx_desc_curr;
  1049. tx_desc_curr = pep->tx_curr_desc_q;
  1050. pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
  1051. BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
  1052. pep->tx_desc_count++;
  1053. return tx_desc_curr;
  1054. }
  1055. static int pxa168_rx_poll(struct napi_struct *napi, int budget)
  1056. {
  1057. struct pxa168_eth_private *pep =
  1058. container_of(napi, struct pxa168_eth_private, napi);
  1059. struct net_device *dev = pep->dev;
  1060. int work_done = 0;
  1061. /*
  1062. * We call txq_reclaim every time since in NAPI interupts are disabled
  1063. * and due to this we miss the TX_DONE interrupt,which is not updated in
  1064. * interrupt status register.
  1065. */
  1066. txq_reclaim(dev, 0);
  1067. if (netif_queue_stopped(dev)
  1068. && pep->tx_ring_size - pep->tx_desc_count > 1) {
  1069. netif_wake_queue(dev);
  1070. }
  1071. work_done = rxq_process(dev, budget);
  1072. if (work_done < budget) {
  1073. napi_complete_done(napi, work_done);
  1074. wrl(pep, INT_MASK, ALL_INTS);
  1075. }
  1076. return work_done;
  1077. }
  1078. static netdev_tx_t
  1079. pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1080. {
  1081. struct pxa168_eth_private *pep = netdev_priv(dev);
  1082. struct net_device_stats *stats = &dev->stats;
  1083. struct tx_desc *desc;
  1084. int tx_index;
  1085. int length;
  1086. tx_index = eth_alloc_tx_desc_index(pep);
  1087. desc = &pep->p_tx_desc_area[tx_index];
  1088. length = skb->len;
  1089. pep->tx_skb[tx_index] = skb;
  1090. desc->byte_cnt = length;
  1091. desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length,
  1092. DMA_TO_DEVICE);
  1093. skb_tx_timestamp(skb);
  1094. dma_wmb();
  1095. desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
  1096. TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
  1097. wmb();
  1098. wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
  1099. stats->tx_bytes += length;
  1100. stats->tx_packets++;
  1101. netif_trans_update(dev);
  1102. if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
  1103. /* We handled the current skb, but now we are out of space.*/
  1104. netif_stop_queue(dev);
  1105. }
  1106. return NETDEV_TX_OK;
  1107. }
  1108. static int smi_wait_ready(struct pxa168_eth_private *pep)
  1109. {
  1110. int i = 0;
  1111. /* wait for the SMI register to become available */
  1112. for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
  1113. if (i == PHY_WAIT_ITERATIONS)
  1114. return -ETIMEDOUT;
  1115. msleep(10);
  1116. }
  1117. return 0;
  1118. }
  1119. static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
  1120. {
  1121. struct pxa168_eth_private *pep = bus->priv;
  1122. int i = 0;
  1123. int val;
  1124. if (smi_wait_ready(pep)) {
  1125. netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
  1126. return -ETIMEDOUT;
  1127. }
  1128. wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
  1129. /* now wait for the data to be valid */
  1130. for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
  1131. if (i == PHY_WAIT_ITERATIONS) {
  1132. netdev_warn(pep->dev,
  1133. "pxa168_eth: SMI bus read not valid\n");
  1134. return -ENODEV;
  1135. }
  1136. msleep(10);
  1137. }
  1138. return val & 0xffff;
  1139. }
  1140. static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
  1141. u16 value)
  1142. {
  1143. struct pxa168_eth_private *pep = bus->priv;
  1144. if (smi_wait_ready(pep)) {
  1145. netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
  1146. return -ETIMEDOUT;
  1147. }
  1148. wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
  1149. SMI_OP_W | (value & 0xffff));
  1150. if (smi_wait_ready(pep)) {
  1151. netdev_err(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
  1152. return -ETIMEDOUT;
  1153. }
  1154. return 0;
  1155. }
  1156. #ifdef CONFIG_NET_POLL_CONTROLLER
  1157. static void pxa168_eth_netpoll(struct net_device *dev)
  1158. {
  1159. disable_irq(dev->irq);
  1160. pxa168_eth_int_handler(dev->irq, dev);
  1161. enable_irq(dev->irq);
  1162. }
  1163. #endif
  1164. static void pxa168_get_drvinfo(struct net_device *dev,
  1165. struct ethtool_drvinfo *info)
  1166. {
  1167. strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
  1168. strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
  1169. strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
  1170. strscpy(info->bus_info, "N/A", sizeof(info->bus_info));
  1171. }
  1172. static const struct ethtool_ops pxa168_ethtool_ops = {
  1173. .get_drvinfo = pxa168_get_drvinfo,
  1174. .nway_reset = phy_ethtool_nway_reset,
  1175. .get_link = ethtool_op_get_link,
  1176. .get_ts_info = ethtool_op_get_ts_info,
  1177. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  1178. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  1179. };
  1180. static const struct net_device_ops pxa168_eth_netdev_ops = {
  1181. .ndo_open = pxa168_eth_open,
  1182. .ndo_stop = pxa168_eth_stop,
  1183. .ndo_start_xmit = pxa168_eth_start_xmit,
  1184. .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
  1185. .ndo_set_mac_address = pxa168_eth_set_mac_address,
  1186. .ndo_validate_addr = eth_validate_addr,
  1187. .ndo_eth_ioctl = phy_do_ioctl,
  1188. .ndo_change_mtu = pxa168_eth_change_mtu,
  1189. .ndo_tx_timeout = pxa168_eth_tx_timeout,
  1190. #ifdef CONFIG_NET_POLL_CONTROLLER
  1191. .ndo_poll_controller = pxa168_eth_netpoll,
  1192. #endif
  1193. };
  1194. static int pxa168_eth_probe(struct platform_device *pdev)
  1195. {
  1196. struct pxa168_eth_private *pep = NULL;
  1197. struct net_device *dev = NULL;
  1198. struct clk *clk;
  1199. struct device_node *np;
  1200. int err;
  1201. printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
  1202. clk = devm_clk_get(&pdev->dev, NULL);
  1203. if (IS_ERR(clk)) {
  1204. dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n");
  1205. return -ENODEV;
  1206. }
  1207. clk_prepare_enable(clk);
  1208. dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
  1209. if (!dev) {
  1210. err = -ENOMEM;
  1211. goto err_clk;
  1212. }
  1213. platform_set_drvdata(pdev, dev);
  1214. pep = netdev_priv(dev);
  1215. pep->dev = dev;
  1216. pep->clk = clk;
  1217. pep->base = devm_platform_ioremap_resource(pdev, 0);
  1218. if (IS_ERR(pep->base)) {
  1219. err = PTR_ERR(pep->base);
  1220. goto err_netdev;
  1221. }
  1222. err = platform_get_irq(pdev, 0);
  1223. if (err == -EPROBE_DEFER)
  1224. goto err_netdev;
  1225. BUG_ON(dev->irq < 0);
  1226. dev->irq = err;
  1227. dev->netdev_ops = &pxa168_eth_netdev_ops;
  1228. dev->watchdog_timeo = 2 * HZ;
  1229. dev->base_addr = 0;
  1230. dev->ethtool_ops = &pxa168_ethtool_ops;
  1231. /* MTU range: 68 - 9500 */
  1232. dev->min_mtu = ETH_MIN_MTU;
  1233. dev->max_mtu = 9500;
  1234. INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
  1235. err = of_get_ethdev_address(pdev->dev.of_node, dev);
  1236. if (err) {
  1237. u8 addr[ETH_ALEN];
  1238. /* try reading the mac address, if set by the bootloader */
  1239. pxa168_eth_get_mac_address(dev, addr);
  1240. if (is_valid_ether_addr(addr)) {
  1241. eth_hw_addr_set(dev, addr);
  1242. } else {
  1243. dev_info(&pdev->dev, "Using random mac address\n");
  1244. eth_hw_addr_random(dev);
  1245. }
  1246. }
  1247. pep->rx_ring_size = NUM_RX_DESCS;
  1248. pep->tx_ring_size = NUM_TX_DESCS;
  1249. pep->pd = dev_get_platdata(&pdev->dev);
  1250. if (pep->pd) {
  1251. if (pep->pd->rx_queue_size)
  1252. pep->rx_ring_size = pep->pd->rx_queue_size;
  1253. if (pep->pd->tx_queue_size)
  1254. pep->tx_ring_size = pep->pd->tx_queue_size;
  1255. pep->port_num = pep->pd->port_number;
  1256. pep->phy_addr = pep->pd->phy_addr;
  1257. pep->phy_speed = pep->pd->speed;
  1258. pep->phy_duplex = pep->pd->duplex;
  1259. pep->phy_intf = pep->pd->intf;
  1260. if (pep->pd->init)
  1261. pep->pd->init();
  1262. } else if (pdev->dev.of_node) {
  1263. of_property_read_u32(pdev->dev.of_node, "port-id",
  1264. &pep->port_num);
  1265. np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
  1266. if (!np) {
  1267. dev_err(&pdev->dev, "missing phy-handle\n");
  1268. err = -EINVAL;
  1269. goto err_netdev;
  1270. }
  1271. of_property_read_u32(np, "reg", &pep->phy_addr);
  1272. of_node_put(np);
  1273. err = of_get_phy_mode(pdev->dev.of_node, &pep->phy_intf);
  1274. if (err && err != -ENODEV)
  1275. goto err_netdev;
  1276. }
  1277. /* Hardware supports only 3 ports */
  1278. BUG_ON(pep->port_num > 2);
  1279. netif_napi_add_weight(dev, &pep->napi, pxa168_rx_poll,
  1280. pep->rx_ring_size);
  1281. memset(&pep->timeout, 0, sizeof(struct timer_list));
  1282. timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0);
  1283. pep->smi_bus = mdiobus_alloc();
  1284. if (!pep->smi_bus) {
  1285. err = -ENOMEM;
  1286. goto err_netdev;
  1287. }
  1288. pep->smi_bus->priv = pep;
  1289. pep->smi_bus->name = "pxa168_eth smi";
  1290. pep->smi_bus->read = pxa168_smi_read;
  1291. pep->smi_bus->write = pxa168_smi_write;
  1292. snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
  1293. pdev->name, pdev->id);
  1294. pep->smi_bus->parent = &pdev->dev;
  1295. pep->smi_bus->phy_mask = 0xffffffff;
  1296. err = mdiobus_register(pep->smi_bus);
  1297. if (err)
  1298. goto err_free_mdio;
  1299. pep->pdev = pdev;
  1300. SET_NETDEV_DEV(dev, &pdev->dev);
  1301. pxa168_init_hw(pep);
  1302. err = register_netdev(dev);
  1303. if (err)
  1304. goto err_mdiobus;
  1305. return 0;
  1306. err_mdiobus:
  1307. mdiobus_unregister(pep->smi_bus);
  1308. err_free_mdio:
  1309. mdiobus_free(pep->smi_bus);
  1310. err_netdev:
  1311. free_netdev(dev);
  1312. err_clk:
  1313. clk_disable_unprepare(clk);
  1314. return err;
  1315. }
  1316. static int pxa168_eth_remove(struct platform_device *pdev)
  1317. {
  1318. struct net_device *dev = platform_get_drvdata(pdev);
  1319. struct pxa168_eth_private *pep = netdev_priv(dev);
  1320. cancel_work_sync(&pep->tx_timeout_task);
  1321. if (pep->htpr) {
  1322. dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
  1323. pep->htpr, pep->htpr_dma);
  1324. pep->htpr = NULL;
  1325. }
  1326. if (dev->phydev)
  1327. phy_disconnect(dev->phydev);
  1328. clk_disable_unprepare(pep->clk);
  1329. mdiobus_unregister(pep->smi_bus);
  1330. mdiobus_free(pep->smi_bus);
  1331. unregister_netdev(dev);
  1332. free_netdev(dev);
  1333. return 0;
  1334. }
  1335. static void pxa168_eth_shutdown(struct platform_device *pdev)
  1336. {
  1337. struct net_device *dev = platform_get_drvdata(pdev);
  1338. eth_port_reset(dev);
  1339. }
  1340. #ifdef CONFIG_PM
  1341. static int pxa168_eth_resume(struct platform_device *pdev)
  1342. {
  1343. return -ENOSYS;
  1344. }
  1345. static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
  1346. {
  1347. return -ENOSYS;
  1348. }
  1349. #else
  1350. #define pxa168_eth_resume NULL
  1351. #define pxa168_eth_suspend NULL
  1352. #endif
  1353. static const struct of_device_id pxa168_eth_of_match[] = {
  1354. { .compatible = "marvell,pxa168-eth" },
  1355. { },
  1356. };
  1357. MODULE_DEVICE_TABLE(of, pxa168_eth_of_match);
  1358. static struct platform_driver pxa168_eth_driver = {
  1359. .probe = pxa168_eth_probe,
  1360. .remove = pxa168_eth_remove,
  1361. .shutdown = pxa168_eth_shutdown,
  1362. .resume = pxa168_eth_resume,
  1363. .suspend = pxa168_eth_suspend,
  1364. .driver = {
  1365. .name = DRIVER_NAME,
  1366. .of_match_table = of_match_ptr(pxa168_eth_of_match),
  1367. },
  1368. };
  1369. module_platform_driver(pxa168_eth_driver);
  1370. MODULE_LICENSE("GPL");
  1371. MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
  1372. MODULE_ALIAS("platform:pxa168_eth");