xilinx_sdfec.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Xilinx SDFEC
  4. *
  5. * Copyright (C) 2019 Xilinx, Inc.
  6. *
  7. * Description:
  8. * This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
  9. * IP. It exposes a char device which supports file operations
  10. * like open(), close() and ioctl().
  11. */
  12. #include <linux/miscdevice.h>
  13. #include <linux/io.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/of_platform.h>
  18. #include <linux/poll.h>
  19. #include <linux/slab.h>
  20. #include <linux/clk.h>
  21. #include <linux/compat.h>
  22. #include <linux/highmem.h>
  23. #include <uapi/misc/xilinx_sdfec.h>
  24. #define DEV_NAME_LEN 12
  25. static DEFINE_IDA(dev_nrs);
  26. /* Xilinx SDFEC Register Map */
  27. /* CODE_WRI_PROTECT Register */
  28. #define XSDFEC_CODE_WR_PROTECT_ADDR (0x4)
  29. /* ACTIVE Register */
  30. #define XSDFEC_ACTIVE_ADDR (0x8)
  31. #define XSDFEC_IS_ACTIVITY_SET (0x1)
  32. /* AXIS_WIDTH Register */
  33. #define XSDFEC_AXIS_WIDTH_ADDR (0xC)
  34. #define XSDFEC_AXIS_DOUT_WORDS_LSB (5)
  35. #define XSDFEC_AXIS_DOUT_WIDTH_LSB (3)
  36. #define XSDFEC_AXIS_DIN_WORDS_LSB (2)
  37. #define XSDFEC_AXIS_DIN_WIDTH_LSB (0)
  38. /* AXIS_ENABLE Register */
  39. #define XSDFEC_AXIS_ENABLE_ADDR (0x10)
  40. #define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38)
  41. #define XSDFEC_AXIS_IN_ENABLE_MASK (0x7)
  42. #define XSDFEC_AXIS_ENABLE_MASK \
  43. (XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK)
  44. /* FEC_CODE Register */
  45. #define XSDFEC_FEC_CODE_ADDR (0x14)
  46. /* ORDER Register Map */
  47. #define XSDFEC_ORDER_ADDR (0x18)
  48. /* Interrupt Status Register */
  49. #define XSDFEC_ISR_ADDR (0x1C)
  50. /* Interrupt Status Register Bit Mask */
  51. #define XSDFEC_ISR_MASK (0x3F)
  52. /* Write Only - Interrupt Enable Register */
  53. #define XSDFEC_IER_ADDR (0x20)
  54. /* Write Only - Interrupt Disable Register */
  55. #define XSDFEC_IDR_ADDR (0x24)
  56. /* Read Only - Interrupt Mask Register */
  57. #define XSDFEC_IMR_ADDR (0x28)
  58. /* ECC Interrupt Status Register */
  59. #define XSDFEC_ECC_ISR_ADDR (0x2C)
  60. /* Single Bit Errors */
  61. #define XSDFEC_ECC_ISR_SBE_MASK (0x7FF)
  62. /* PL Initialize Single Bit Errors */
  63. #define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000)
  64. /* Multi Bit Errors */
  65. #define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800)
  66. /* PL Initialize Multi Bit Errors */
  67. #define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000)
  68. /* Multi Bit Error to Event Shift */
  69. #define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11)
  70. /* PL Initialize Multi Bit Error to Event Shift */
  71. #define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4)
  72. /* ECC Interrupt Status Bit Mask */
  73. #define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK)
  74. /* ECC Interrupt Status PL Initialize Bit Mask */
  75. #define XSDFEC_PL_INIT_ECC_ISR_MASK \
  76. (XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
  77. /* ECC Interrupt Status All Bit Mask */
  78. #define XSDFEC_ALL_ECC_ISR_MASK \
  79. (XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK)
  80. /* ECC Interrupt Status Single Bit Errors Mask */
  81. #define XSDFEC_ALL_ECC_ISR_SBE_MASK \
  82. (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK)
  83. /* ECC Interrupt Status Multi Bit Errors Mask */
  84. #define XSDFEC_ALL_ECC_ISR_MBE_MASK \
  85. (XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
  86. /* Write Only - ECC Interrupt Enable Register */
  87. #define XSDFEC_ECC_IER_ADDR (0x30)
  88. /* Write Only - ECC Interrupt Disable Register */
  89. #define XSDFEC_ECC_IDR_ADDR (0x34)
  90. /* Read Only - ECC Interrupt Mask Register */
  91. #define XSDFEC_ECC_IMR_ADDR (0x38)
  92. /* BYPASS Register */
  93. #define XSDFEC_BYPASS_ADDR (0x3C)
  94. /* Turbo Code Register */
  95. #define XSDFEC_TURBO_ADDR (0x100)
  96. #define XSDFEC_TURBO_SCALE_MASK (0xFFF)
  97. #define XSDFEC_TURBO_SCALE_BIT_POS (8)
  98. #define XSDFEC_TURBO_SCALE_MAX (15)
  99. /* REG0 Register */
  100. #define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000)
  101. #define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0)
  102. #define XSDFEC_REG0_N_MIN (4)
  103. #define XSDFEC_REG0_N_MAX (32768)
  104. #define XSDFEC_REG0_N_MUL_P (256)
  105. #define XSDFEC_REG0_N_LSB (0)
  106. #define XSDFEC_REG0_K_MIN (2)
  107. #define XSDFEC_REG0_K_MAX (32766)
  108. #define XSDFEC_REG0_K_MUL_P (256)
  109. #define XSDFEC_REG0_K_LSB (16)
  110. /* REG1 Register */
  111. #define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004)
  112. #define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4)
  113. #define XSDFEC_REG1_PSIZE_MIN (2)
  114. #define XSDFEC_REG1_PSIZE_MAX (512)
  115. #define XSDFEC_REG1_NO_PACKING_MASK (0x400)
  116. #define XSDFEC_REG1_NO_PACKING_LSB (10)
  117. #define XSDFEC_REG1_NM_MASK (0xFF800)
  118. #define XSDFEC_REG1_NM_LSB (11)
  119. #define XSDFEC_REG1_BYPASS_MASK (0x100000)
  120. /* REG2 Register */
  121. #define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008)
  122. #define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8)
  123. #define XSDFEC_REG2_NLAYERS_MIN (1)
  124. #define XSDFEC_REG2_NLAYERS_MAX (256)
  125. #define XSDFEC_REG2_NNMQC_MASK (0xFFE00)
  126. #define XSDFEC_REG2_NMQC_LSB (9)
  127. #define XSDFEC_REG2_NORM_TYPE_MASK (0x100000)
  128. #define XSDFEC_REG2_NORM_TYPE_LSB (20)
  129. #define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000)
  130. #define XSDFEC_REG2_SPEICAL_QC_LSB (21)
  131. #define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000)
  132. #define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
  133. #define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000)
  134. #define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
  135. /* REG3 Register */
  136. #define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C)
  137. #define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC)
  138. #define XSDFEC_REG3_LA_OFF_LSB (8)
  139. #define XSDFEC_REG3_QC_OFF_LSB (16)
  140. #define XSDFEC_LDPC_REG_JUMP (0x10)
  141. #define XSDFEC_REG_WIDTH_JUMP (4)
  142. /* The maximum number of pinned pages */
  143. #define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1)
  144. /**
  145. * struct xsdfec_clks - For managing SD-FEC clocks
  146. * @core_clk: Main processing clock for core
  147. * @axi_clk: AXI4-Lite memory-mapped clock
  148. * @din_words_clk: DIN Words AXI4-Stream Slave clock
  149. * @din_clk: DIN AXI4-Stream Slave clock
  150. * @dout_clk: DOUT Words AXI4-Stream Slave clock
  151. * @dout_words_clk: DOUT AXI4-Stream Slave clock
  152. * @ctrl_clk: Control AXI4-Stream Slave clock
  153. * @status_clk: Status AXI4-Stream Slave clock
  154. */
  155. struct xsdfec_clks {
  156. struct clk *core_clk;
  157. struct clk *axi_clk;
  158. struct clk *din_words_clk;
  159. struct clk *din_clk;
  160. struct clk *dout_clk;
  161. struct clk *dout_words_clk;
  162. struct clk *ctrl_clk;
  163. struct clk *status_clk;
  164. };
  165. /**
  166. * struct xsdfec_dev - Driver data for SDFEC
  167. * @miscdev: Misc device handle
  168. * @clks: Clocks managed by the SDFEC driver
  169. * @waitq: Driver wait queue
  170. * @config: Configuration of the SDFEC device
  171. * @dev_name: Device name
  172. * @flags: spinlock flags
  173. * @regs: device physical base address
  174. * @dev: pointer to device struct
  175. * @state: State of the SDFEC device
  176. * @error_data_lock: Error counter and states spinlock
  177. * @dev_id: Device ID
  178. * @isr_err_count: Count of ISR errors
  179. * @cecc_count: Count of Correctable ECC errors (SBE)
  180. * @uecc_count: Count of Uncorrectable ECC errors (MBE)
  181. * @irq: IRQ number
  182. * @state_updated: indicates State updated by interrupt handler
  183. * @stats_updated: indicates Stats updated by interrupt handler
  184. * @intr_enabled: indicates IRQ enabled
  185. *
  186. * This structure contains necessary state for SDFEC driver to operate
  187. */
  188. struct xsdfec_dev {
  189. struct miscdevice miscdev;
  190. struct xsdfec_clks clks;
  191. wait_queue_head_t waitq;
  192. struct xsdfec_config config;
  193. char dev_name[DEV_NAME_LEN];
  194. unsigned long flags;
  195. void __iomem *regs;
  196. struct device *dev;
  197. enum xsdfec_state state;
  198. /* Spinlock to protect state_updated and stats_updated */
  199. spinlock_t error_data_lock;
  200. int dev_id;
  201. u32 isr_err_count;
  202. u32 cecc_count;
  203. u32 uecc_count;
  204. int irq;
  205. bool state_updated;
  206. bool stats_updated;
  207. bool intr_enabled;
  208. };
  209. static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr,
  210. u32 value)
  211. {
  212. dev_dbg(xsdfec->dev, "Writing 0x%x to offset 0x%x", value, addr);
  213. iowrite32(value, xsdfec->regs + addr);
  214. }
  215. static inline u32 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
  216. {
  217. u32 rval;
  218. rval = ioread32(xsdfec->regs + addr);
  219. dev_dbg(xsdfec->dev, "Read value = 0x%x from offset 0x%x", rval, addr);
  220. return rval;
  221. }
  222. static void update_bool_config_from_reg(struct xsdfec_dev *xsdfec,
  223. u32 reg_offset, u32 bit_num,
  224. char *config_value)
  225. {
  226. u32 reg_val;
  227. u32 bit_mask = 1 << bit_num;
  228. reg_val = xsdfec_regread(xsdfec, reg_offset);
  229. *config_value = (reg_val & bit_mask) > 0;
  230. }
  231. static void update_config_from_hw(struct xsdfec_dev *xsdfec)
  232. {
  233. u32 reg_value;
  234. bool sdfec_started;
  235. /* Update the Order */
  236. reg_value = xsdfec_regread(xsdfec, XSDFEC_ORDER_ADDR);
  237. xsdfec->config.order = reg_value;
  238. update_bool_config_from_reg(xsdfec, XSDFEC_BYPASS_ADDR,
  239. 0, /* Bit Number, maybe change to mask */
  240. &xsdfec->config.bypass);
  241. update_bool_config_from_reg(xsdfec, XSDFEC_CODE_WR_PROTECT_ADDR,
  242. 0, /* Bit Number */
  243. &xsdfec->config.code_wr_protect);
  244. reg_value = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
  245. xsdfec->config.irq.enable_isr = (reg_value & XSDFEC_ISR_MASK) > 0;
  246. reg_value = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
  247. xsdfec->config.irq.enable_ecc_isr =
  248. (reg_value & XSDFEC_ECC_ISR_MASK) > 0;
  249. reg_value = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
  250. sdfec_started = (reg_value & XSDFEC_AXIS_IN_ENABLE_MASK) > 0;
  251. if (sdfec_started)
  252. xsdfec->state = XSDFEC_STARTED;
  253. else
  254. xsdfec->state = XSDFEC_STOPPED;
  255. }
  256. static int xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
  257. {
  258. struct xsdfec_status status;
  259. int err;
  260. memset(&status, 0, sizeof(status));
  261. spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
  262. status.state = xsdfec->state;
  263. xsdfec->state_updated = false;
  264. spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
  265. status.activity = (xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR) &
  266. XSDFEC_IS_ACTIVITY_SET);
  267. err = copy_to_user(arg, &status, sizeof(status));
  268. if (err)
  269. err = -EFAULT;
  270. return err;
  271. }
  272. static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
  273. {
  274. int err;
  275. err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config));
  276. if (err)
  277. err = -EFAULT;
  278. return err;
  279. }
  280. static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
  281. {
  282. u32 mask_read;
  283. if (enable) {
  284. /* Enable */
  285. xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK);
  286. mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
  287. if (mask_read & XSDFEC_ISR_MASK) {
  288. dev_dbg(xsdfec->dev,
  289. "SDFEC enabling irq with IER failed");
  290. return -EIO;
  291. }
  292. } else {
  293. /* Disable */
  294. xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK);
  295. mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
  296. if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
  297. dev_dbg(xsdfec->dev,
  298. "SDFEC disabling irq with IDR failed");
  299. return -EIO;
  300. }
  301. }
  302. return 0;
  303. }
  304. static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
  305. {
  306. u32 mask_read;
  307. if (enable) {
  308. /* Enable */
  309. xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
  310. XSDFEC_ALL_ECC_ISR_MASK);
  311. mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
  312. if (mask_read & XSDFEC_ALL_ECC_ISR_MASK) {
  313. dev_dbg(xsdfec->dev,
  314. "SDFEC enabling ECC irq with ECC IER failed");
  315. return -EIO;
  316. }
  317. } else {
  318. /* Disable */
  319. xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
  320. XSDFEC_ALL_ECC_ISR_MASK);
  321. mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
  322. if (!(((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
  323. XSDFEC_ECC_ISR_MASK) ||
  324. ((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
  325. XSDFEC_PL_INIT_ECC_ISR_MASK))) {
  326. dev_dbg(xsdfec->dev,
  327. "SDFEC disable ECC irq with ECC IDR failed");
  328. return -EIO;
  329. }
  330. }
  331. return 0;
  332. }
  333. static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
  334. {
  335. struct xsdfec_irq irq;
  336. int err;
  337. int isr_err;
  338. int ecc_err;
  339. err = copy_from_user(&irq, arg, sizeof(irq));
  340. if (err)
  341. return -EFAULT;
  342. /* Setup tlast related IRQ */
  343. isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr);
  344. if (!isr_err)
  345. xsdfec->config.irq.enable_isr = irq.enable_isr;
  346. /* Setup ECC related IRQ */
  347. ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr);
  348. if (!ecc_err)
  349. xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr;
  350. if (isr_err < 0 || ecc_err < 0)
  351. err = -EIO;
  352. return err;
  353. }
  354. static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
  355. {
  356. struct xsdfec_turbo turbo;
  357. int err;
  358. u32 turbo_write;
  359. err = copy_from_user(&turbo, arg, sizeof(turbo));
  360. if (err)
  361. return -EFAULT;
  362. if (turbo.alg >= XSDFEC_TURBO_ALG_MAX)
  363. return -EINVAL;
  364. if (turbo.scale > XSDFEC_TURBO_SCALE_MAX)
  365. return -EINVAL;
  366. /* Check to see what device tree says about the FEC codes */
  367. if (xsdfec->config.code == XSDFEC_LDPC_CODE)
  368. return -EIO;
  369. turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK)
  370. << XSDFEC_TURBO_SCALE_BIT_POS) |
  371. turbo.alg;
  372. xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
  373. return err;
  374. }
  375. static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
  376. {
  377. u32 reg_value;
  378. struct xsdfec_turbo turbo_params;
  379. int err;
  380. if (xsdfec->config.code == XSDFEC_LDPC_CODE)
  381. return -EIO;
  382. memset(&turbo_params, 0, sizeof(turbo_params));
  383. reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
  384. turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
  385. XSDFEC_TURBO_SCALE_BIT_POS;
  386. turbo_params.alg = reg_value & 0x1;
  387. err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
  388. if (err)
  389. err = -EFAULT;
  390. return err;
  391. }
  392. static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize,
  393. u32 offset)
  394. {
  395. u32 wdata;
  396. if (n < XSDFEC_REG0_N_MIN || n > XSDFEC_REG0_N_MAX || psize == 0 ||
  397. (n > XSDFEC_REG0_N_MUL_P * psize) || n <= k || ((n % psize) != 0)) {
  398. dev_dbg(xsdfec->dev, "N value is not in range");
  399. return -EINVAL;
  400. }
  401. n <<= XSDFEC_REG0_N_LSB;
  402. if (k < XSDFEC_REG0_K_MIN || k > XSDFEC_REG0_K_MAX ||
  403. (k > XSDFEC_REG0_K_MUL_P * psize) || ((k % psize) != 0)) {
  404. dev_dbg(xsdfec->dev, "K value is not in range");
  405. return -EINVAL;
  406. }
  407. k = k << XSDFEC_REG0_K_LSB;
  408. wdata = k | n;
  409. if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
  410. XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
  411. dev_dbg(xsdfec->dev, "Writing outside of LDPC reg0 space 0x%x",
  412. XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
  413. (offset * XSDFEC_LDPC_REG_JUMP));
  414. return -EINVAL;
  415. }
  416. xsdfec_regwrite(xsdfec,
  417. XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
  418. (offset * XSDFEC_LDPC_REG_JUMP),
  419. wdata);
  420. return 0;
  421. }
  422. static int xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
  423. u32 no_packing, u32 nm, u32 offset)
  424. {
  425. u32 wdata;
  426. if (psize < XSDFEC_REG1_PSIZE_MIN || psize > XSDFEC_REG1_PSIZE_MAX) {
  427. dev_dbg(xsdfec->dev, "Psize is not in range");
  428. return -EINVAL;
  429. }
  430. if (no_packing != 0 && no_packing != 1)
  431. dev_dbg(xsdfec->dev, "No-packing bit register invalid");
  432. no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
  433. XSDFEC_REG1_NO_PACKING_MASK);
  434. if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
  435. dev_dbg(xsdfec->dev, "NM is beyond 10 bits");
  436. nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
  437. wdata = nm | no_packing | psize;
  438. if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
  439. XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
  440. dev_dbg(xsdfec->dev, "Writing outside of LDPC reg1 space 0x%x",
  441. XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
  442. (offset * XSDFEC_LDPC_REG_JUMP));
  443. return -EINVAL;
  444. }
  445. xsdfec_regwrite(xsdfec,
  446. XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
  447. (offset * XSDFEC_LDPC_REG_JUMP),
  448. wdata);
  449. return 0;
  450. }
  451. static int xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
  452. u32 norm_type, u32 special_qc, u32 no_final_parity,
  453. u32 max_schedule, u32 offset)
  454. {
  455. u32 wdata;
  456. if (nlayers < XSDFEC_REG2_NLAYERS_MIN ||
  457. nlayers > XSDFEC_REG2_NLAYERS_MAX) {
  458. dev_dbg(xsdfec->dev, "Nlayers is not in range");
  459. return -EINVAL;
  460. }
  461. if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
  462. dev_dbg(xsdfec->dev, "NMQC exceeds 11 bits");
  463. nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
  464. if (norm_type > 1)
  465. dev_dbg(xsdfec->dev, "Norm type is invalid");
  466. norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
  467. XSDFEC_REG2_NORM_TYPE_MASK);
  468. if (special_qc > 1)
  469. dev_dbg(xsdfec->dev, "Special QC in invalid");
  470. special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
  471. XSDFEC_REG2_SPECIAL_QC_MASK);
  472. if (no_final_parity > 1)
  473. dev_dbg(xsdfec->dev, "No final parity check invalid");
  474. no_final_parity =
  475. ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
  476. XSDFEC_REG2_NO_FINAL_PARITY_MASK);
  477. if (max_schedule &
  478. ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >> XSDFEC_REG2_MAX_SCHEDULE_LSB))
  479. dev_dbg(xsdfec->dev, "Max Schedule exceeds 2 bits");
  480. max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
  481. XSDFEC_REG2_MAX_SCHEDULE_MASK);
  482. wdata = (max_schedule | no_final_parity | special_qc | norm_type |
  483. nmqc | nlayers);
  484. if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
  485. XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
  486. dev_dbg(xsdfec->dev, "Writing outside of LDPC reg2 space 0x%x",
  487. XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
  488. (offset * XSDFEC_LDPC_REG_JUMP));
  489. return -EINVAL;
  490. }
  491. xsdfec_regwrite(xsdfec,
  492. XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
  493. (offset * XSDFEC_LDPC_REG_JUMP),
  494. wdata);
  495. return 0;
  496. }
  497. static int xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off, u8 la_off,
  498. u16 qc_off, u32 offset)
  499. {
  500. u32 wdata;
  501. wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
  502. (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
  503. if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
  504. XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
  505. dev_dbg(xsdfec->dev, "Writing outside of LDPC reg3 space 0x%x",
  506. XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
  507. (offset * XSDFEC_LDPC_REG_JUMP));
  508. return -EINVAL;
  509. }
  510. xsdfec_regwrite(xsdfec,
  511. XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
  512. (offset * XSDFEC_LDPC_REG_JUMP),
  513. wdata);
  514. return 0;
  515. }
  516. static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
  517. u32 *src_ptr, u32 len, const u32 base_addr,
  518. const u32 depth)
  519. {
  520. u32 reg = 0;
  521. int res, i, nr_pages;
  522. u32 n;
  523. u32 *addr = NULL;
  524. struct page *pages[MAX_NUM_PAGES];
  525. /*
  526. * Writes that go beyond the length of
  527. * Shared Scale(SC) table should fail
  528. */
  529. if (offset > depth / XSDFEC_REG_WIDTH_JUMP ||
  530. len > depth / XSDFEC_REG_WIDTH_JUMP ||
  531. offset + len > depth / XSDFEC_REG_WIDTH_JUMP) {
  532. dev_dbg(xsdfec->dev, "Write exceeds SC table length");
  533. return -EINVAL;
  534. }
  535. n = (len * XSDFEC_REG_WIDTH_JUMP) / PAGE_SIZE;
  536. if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)
  537. n += 1;
  538. if (WARN_ON_ONCE(n > INT_MAX))
  539. return -EINVAL;
  540. nr_pages = n;
  541. res = pin_user_pages_fast((unsigned long)src_ptr, nr_pages, 0, pages);
  542. if (res < nr_pages) {
  543. if (res > 0)
  544. unpin_user_pages(pages, res);
  545. return -EINVAL;
  546. }
  547. for (i = 0; i < nr_pages; i++) {
  548. addr = kmap_local_page(pages[i]);
  549. do {
  550. xsdfec_regwrite(xsdfec,
  551. base_addr + ((offset + reg) *
  552. XSDFEC_REG_WIDTH_JUMP),
  553. addr[reg]);
  554. reg++;
  555. } while ((reg < len) &&
  556. ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
  557. kunmap_local(addr);
  558. unpin_user_page(pages[i]);
  559. }
  560. return 0;
  561. }
  562. static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
  563. {
  564. struct xsdfec_ldpc_params *ldpc;
  565. int ret, n;
  566. ldpc = memdup_user(arg, sizeof(*ldpc));
  567. if (IS_ERR(ldpc))
  568. return PTR_ERR(ldpc);
  569. if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
  570. ret = -EIO;
  571. goto err_out;
  572. }
  573. /* Verify Device has not started */
  574. if (xsdfec->state == XSDFEC_STARTED) {
  575. ret = -EIO;
  576. goto err_out;
  577. }
  578. if (xsdfec->config.code_wr_protect) {
  579. ret = -EIO;
  580. goto err_out;
  581. }
  582. /* Write Reg 0 */
  583. ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize,
  584. ldpc->code_id);
  585. if (ret)
  586. goto err_out;
  587. /* Write Reg 1 */
  588. ret = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing, ldpc->nm,
  589. ldpc->code_id);
  590. if (ret)
  591. goto err_out;
  592. /* Write Reg 2 */
  593. ret = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
  594. ldpc->norm_type, ldpc->special_qc,
  595. ldpc->no_final_parity, ldpc->max_schedule,
  596. ldpc->code_id);
  597. if (ret)
  598. goto err_out;
  599. /* Write Reg 3 */
  600. ret = xsdfec_reg3_write(xsdfec, ldpc->sc_off, ldpc->la_off,
  601. ldpc->qc_off, ldpc->code_id);
  602. if (ret)
  603. goto err_out;
  604. /* Write Shared Codes */
  605. n = ldpc->nlayers / 4;
  606. if (ldpc->nlayers % 4)
  607. n++;
  608. ret = xsdfec_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table, n,
  609. XSDFEC_LDPC_SC_TABLE_ADDR_BASE,
  610. XSDFEC_SC_TABLE_DEPTH);
  611. if (ret < 0)
  612. goto err_out;
  613. ret = xsdfec_table_write(xsdfec, 4 * ldpc->la_off, ldpc->la_table,
  614. ldpc->nlayers, XSDFEC_LDPC_LA_TABLE_ADDR_BASE,
  615. XSDFEC_LA_TABLE_DEPTH);
  616. if (ret < 0)
  617. goto err_out;
  618. ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table,
  619. ldpc->nqc, XSDFEC_LDPC_QC_TABLE_ADDR_BASE,
  620. XSDFEC_QC_TABLE_DEPTH);
  621. err_out:
  622. kfree(ldpc);
  623. return ret;
  624. }
  625. static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
  626. {
  627. bool order_invalid;
  628. enum xsdfec_order order;
  629. int err;
  630. err = get_user(order, (enum xsdfec_order __user *)arg);
  631. if (err)
  632. return -EFAULT;
  633. order_invalid = (order != XSDFEC_MAINTAIN_ORDER) &&
  634. (order != XSDFEC_OUT_OF_ORDER);
  635. if (order_invalid)
  636. return -EINVAL;
  637. /* Verify Device has not started */
  638. if (xsdfec->state == XSDFEC_STARTED)
  639. return -EIO;
  640. xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order);
  641. xsdfec->config.order = order;
  642. return 0;
  643. }
  644. static int xsdfec_set_bypass(struct xsdfec_dev *xsdfec, bool __user *arg)
  645. {
  646. bool bypass;
  647. int err;
  648. err = get_user(bypass, arg);
  649. if (err)
  650. return -EFAULT;
  651. /* Verify Device has not started */
  652. if (xsdfec->state == XSDFEC_STARTED)
  653. return -EIO;
  654. if (bypass)
  655. xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 1);
  656. else
  657. xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 0);
  658. xsdfec->config.bypass = bypass;
  659. return 0;
  660. }
  661. static int xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *arg)
  662. {
  663. u32 reg_value;
  664. bool is_active;
  665. int err;
  666. reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
  667. /* using a double ! operator instead of casting */
  668. is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET);
  669. err = put_user(is_active, arg);
  670. if (err)
  671. return -EFAULT;
  672. return err;
  673. }
  674. static u32
  675. xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg)
  676. {
  677. u32 axis_width_field = 0;
  678. switch (axis_width_cfg) {
  679. case XSDFEC_1x128b:
  680. axis_width_field = 0;
  681. break;
  682. case XSDFEC_2x128b:
  683. axis_width_field = 1;
  684. break;
  685. case XSDFEC_4x128b:
  686. axis_width_field = 2;
  687. break;
  688. }
  689. return axis_width_field;
  690. }
  691. static u32 xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include
  692. axis_word_inc_cfg)
  693. {
  694. u32 axis_words_field = 0;
  695. if (axis_word_inc_cfg == XSDFEC_FIXED_VALUE ||
  696. axis_word_inc_cfg == XSDFEC_IN_BLOCK)
  697. axis_words_field = 0;
  698. else if (axis_word_inc_cfg == XSDFEC_PER_AXI_TRANSACTION)
  699. axis_words_field = 1;
  700. return axis_words_field;
  701. }
  702. static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec)
  703. {
  704. u32 reg_value;
  705. u32 dout_words_field;
  706. u32 dout_width_field;
  707. u32 din_words_field;
  708. u32 din_width_field;
  709. struct xsdfec_config *config = &xsdfec->config;
  710. /* translate config info to register values */
  711. dout_words_field =
  712. xsdfec_translate_axis_words_cfg_val(config->dout_word_include);
  713. dout_width_field =
  714. xsdfec_translate_axis_width_cfg_val(config->dout_width);
  715. din_words_field =
  716. xsdfec_translate_axis_words_cfg_val(config->din_word_include);
  717. din_width_field =
  718. xsdfec_translate_axis_width_cfg_val(config->din_width);
  719. reg_value = dout_words_field << XSDFEC_AXIS_DOUT_WORDS_LSB;
  720. reg_value |= dout_width_field << XSDFEC_AXIS_DOUT_WIDTH_LSB;
  721. reg_value |= din_words_field << XSDFEC_AXIS_DIN_WORDS_LSB;
  722. reg_value |= din_width_field << XSDFEC_AXIS_DIN_WIDTH_LSB;
  723. xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value);
  724. return 0;
  725. }
  726. static int xsdfec_dev_open(struct inode *iptr, struct file *fptr)
  727. {
  728. return 0;
  729. }
  730. static int xsdfec_dev_release(struct inode *iptr, struct file *fptr)
  731. {
  732. return 0;
  733. }
  734. static int xsdfec_start(struct xsdfec_dev *xsdfec)
  735. {
  736. u32 regread;
  737. regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
  738. regread &= 0x1;
  739. if (regread != xsdfec->config.code) {
  740. dev_dbg(xsdfec->dev,
  741. "%s SDFEC HW code does not match driver code, reg %d, code %d",
  742. __func__, regread, xsdfec->config.code);
  743. return -EINVAL;
  744. }
  745. /* Set AXIS enable */
  746. xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR,
  747. XSDFEC_AXIS_ENABLE_MASK);
  748. /* Done */
  749. xsdfec->state = XSDFEC_STARTED;
  750. return 0;
  751. }
  752. static int xsdfec_stop(struct xsdfec_dev *xsdfec)
  753. {
  754. u32 regread;
  755. if (xsdfec->state != XSDFEC_STARTED)
  756. dev_dbg(xsdfec->dev, "Device not started correctly");
  757. /* Disable AXIS_ENABLE Input interfaces only */
  758. regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
  759. regread &= (~XSDFEC_AXIS_IN_ENABLE_MASK);
  760. xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
  761. /* Stop */
  762. xsdfec->state = XSDFEC_STOPPED;
  763. return 0;
  764. }
  765. static int xsdfec_clear_stats(struct xsdfec_dev *xsdfec)
  766. {
  767. spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
  768. xsdfec->isr_err_count = 0;
  769. xsdfec->uecc_count = 0;
  770. xsdfec->cecc_count = 0;
  771. spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
  772. return 0;
  773. }
  774. static int xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg)
  775. {
  776. int err;
  777. struct xsdfec_stats user_stats;
  778. spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
  779. user_stats.isr_err_count = xsdfec->isr_err_count;
  780. user_stats.cecc_count = xsdfec->cecc_count;
  781. user_stats.uecc_count = xsdfec->uecc_count;
  782. xsdfec->stats_updated = false;
  783. spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
  784. err = copy_to_user(arg, &user_stats, sizeof(user_stats));
  785. if (err)
  786. err = -EFAULT;
  787. return err;
  788. }
  789. static int xsdfec_set_default_config(struct xsdfec_dev *xsdfec)
  790. {
  791. /* Ensure registers are aligned with core configuration */
  792. xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
  793. xsdfec_cfg_axi_streams(xsdfec);
  794. update_config_from_hw(xsdfec);
  795. return 0;
  796. }
  797. static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd,
  798. unsigned long data)
  799. {
  800. struct xsdfec_dev *xsdfec;
  801. void __user *arg = (void __user *)data;
  802. int rval;
  803. xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev);
  804. /* In failed state allow only reset and get status IOCTLs */
  805. if (xsdfec->state == XSDFEC_NEEDS_RESET &&
  806. (cmd != XSDFEC_SET_DEFAULT_CONFIG && cmd != XSDFEC_GET_STATUS &&
  807. cmd != XSDFEC_GET_STATS && cmd != XSDFEC_CLEAR_STATS)) {
  808. return -EPERM;
  809. }
  810. switch (cmd) {
  811. case XSDFEC_START_DEV:
  812. rval = xsdfec_start(xsdfec);
  813. break;
  814. case XSDFEC_STOP_DEV:
  815. rval = xsdfec_stop(xsdfec);
  816. break;
  817. case XSDFEC_CLEAR_STATS:
  818. rval = xsdfec_clear_stats(xsdfec);
  819. break;
  820. case XSDFEC_GET_STATS:
  821. rval = xsdfec_get_stats(xsdfec, arg);
  822. break;
  823. case XSDFEC_GET_STATUS:
  824. rval = xsdfec_get_status(xsdfec, arg);
  825. break;
  826. case XSDFEC_GET_CONFIG:
  827. rval = xsdfec_get_config(xsdfec, arg);
  828. break;
  829. case XSDFEC_SET_DEFAULT_CONFIG:
  830. rval = xsdfec_set_default_config(xsdfec);
  831. break;
  832. case XSDFEC_SET_IRQ:
  833. rval = xsdfec_set_irq(xsdfec, arg);
  834. break;
  835. case XSDFEC_SET_TURBO:
  836. rval = xsdfec_set_turbo(xsdfec, arg);
  837. break;
  838. case XSDFEC_GET_TURBO:
  839. rval = xsdfec_get_turbo(xsdfec, arg);
  840. break;
  841. case XSDFEC_ADD_LDPC_CODE_PARAMS:
  842. rval = xsdfec_add_ldpc(xsdfec, arg);
  843. break;
  844. case XSDFEC_SET_ORDER:
  845. rval = xsdfec_set_order(xsdfec, arg);
  846. break;
  847. case XSDFEC_SET_BYPASS:
  848. rval = xsdfec_set_bypass(xsdfec, arg);
  849. break;
  850. case XSDFEC_IS_ACTIVE:
  851. rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
  852. break;
  853. default:
  854. rval = -ENOTTY;
  855. break;
  856. }
  857. return rval;
  858. }
  859. static __poll_t xsdfec_poll(struct file *file, poll_table *wait)
  860. {
  861. __poll_t mask = 0;
  862. struct xsdfec_dev *xsdfec;
  863. xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
  864. poll_wait(file, &xsdfec->waitq, wait);
  865. /* XSDFEC ISR detected an error */
  866. spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
  867. if (xsdfec->state_updated)
  868. mask |= EPOLLIN | EPOLLPRI;
  869. if (xsdfec->stats_updated)
  870. mask |= EPOLLIN | EPOLLRDNORM;
  871. spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
  872. return mask;
  873. }
  874. static const struct file_operations xsdfec_fops = {
  875. .owner = THIS_MODULE,
  876. .open = xsdfec_dev_open,
  877. .release = xsdfec_dev_release,
  878. .unlocked_ioctl = xsdfec_dev_ioctl,
  879. .poll = xsdfec_poll,
  880. .compat_ioctl = compat_ptr_ioctl,
  881. };
  882. static int xsdfec_parse_of(struct xsdfec_dev *xsdfec)
  883. {
  884. struct device *dev = xsdfec->dev;
  885. struct device_node *node = dev->of_node;
  886. int rval;
  887. const char *fec_code;
  888. u32 din_width;
  889. u32 din_word_include;
  890. u32 dout_width;
  891. u32 dout_word_include;
  892. rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
  893. if (rval < 0)
  894. return rval;
  895. if (!strcasecmp(fec_code, "ldpc"))
  896. xsdfec->config.code = XSDFEC_LDPC_CODE;
  897. else if (!strcasecmp(fec_code, "turbo"))
  898. xsdfec->config.code = XSDFEC_TURBO_CODE;
  899. else
  900. return -EINVAL;
  901. rval = of_property_read_u32(node, "xlnx,sdfec-din-words",
  902. &din_word_include);
  903. if (rval < 0)
  904. return rval;
  905. if (din_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
  906. xsdfec->config.din_word_include = din_word_include;
  907. else
  908. return -EINVAL;
  909. rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width);
  910. if (rval < 0)
  911. return rval;
  912. switch (din_width) {
  913. /* Fall through and set for valid values */
  914. case XSDFEC_1x128b:
  915. case XSDFEC_2x128b:
  916. case XSDFEC_4x128b:
  917. xsdfec->config.din_width = din_width;
  918. break;
  919. default:
  920. return -EINVAL;
  921. }
  922. rval = of_property_read_u32(node, "xlnx,sdfec-dout-words",
  923. &dout_word_include);
  924. if (rval < 0)
  925. return rval;
  926. if (dout_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
  927. xsdfec->config.dout_word_include = dout_word_include;
  928. else
  929. return -EINVAL;
  930. rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width);
  931. if (rval < 0)
  932. return rval;
  933. switch (dout_width) {
  934. /* Fall through and set for valid values */
  935. case XSDFEC_1x128b:
  936. case XSDFEC_2x128b:
  937. case XSDFEC_4x128b:
  938. xsdfec->config.dout_width = dout_width;
  939. break;
  940. default:
  941. return -EINVAL;
  942. }
  943. /* Write LDPC to CODE Register */
  944. xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
  945. xsdfec_cfg_axi_streams(xsdfec);
  946. return 0;
  947. }
  948. static irqreturn_t xsdfec_irq_thread(int irq, void *dev_id)
  949. {
  950. struct xsdfec_dev *xsdfec = dev_id;
  951. irqreturn_t ret = IRQ_HANDLED;
  952. u32 ecc_err;
  953. u32 isr_err;
  954. u32 uecc_count;
  955. u32 cecc_count;
  956. u32 isr_err_count;
  957. u32 aecc_count;
  958. u32 tmp;
  959. WARN_ON(xsdfec->irq != irq);
  960. /* Mask Interrupts */
  961. xsdfec_isr_enable(xsdfec, false);
  962. xsdfec_ecc_isr_enable(xsdfec, false);
  963. /* Read ISR */
  964. ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
  965. isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
  966. /* Clear the interrupts */
  967. xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err);
  968. xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err);
  969. tmp = ecc_err & XSDFEC_ALL_ECC_ISR_MBE_MASK;
  970. /* Count uncorrectable 2-bit errors */
  971. uecc_count = hweight32(tmp);
  972. /* Count all ECC errors */
  973. aecc_count = hweight32(ecc_err);
  974. /* Number of correctable 1-bit ECC error */
  975. cecc_count = aecc_count - 2 * uecc_count;
  976. /* Count ISR errors */
  977. isr_err_count = hweight32(isr_err);
  978. dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp,
  979. uecc_count, aecc_count, cecc_count, isr_err_count);
  980. dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count,
  981. xsdfec->cecc_count, xsdfec->isr_err_count);
  982. spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
  983. /* Add new errors to a 2-bits counter */
  984. if (uecc_count)
  985. xsdfec->uecc_count += uecc_count;
  986. /* Add new errors to a 1-bits counter */
  987. if (cecc_count)
  988. xsdfec->cecc_count += cecc_count;
  989. /* Add new errors to a ISR counter */
  990. if (isr_err_count)
  991. xsdfec->isr_err_count += isr_err_count;
  992. /* Update state/stats flag */
  993. if (uecc_count) {
  994. if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK)
  995. xsdfec->state = XSDFEC_NEEDS_RESET;
  996. else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
  997. xsdfec->state = XSDFEC_PL_RECONFIGURE;
  998. xsdfec->stats_updated = true;
  999. xsdfec->state_updated = true;
  1000. }
  1001. if (cecc_count)
  1002. xsdfec->stats_updated = true;
  1003. if (isr_err_count) {
  1004. xsdfec->state = XSDFEC_NEEDS_RESET;
  1005. xsdfec->stats_updated = true;
  1006. xsdfec->state_updated = true;
  1007. }
  1008. spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
  1009. dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated,
  1010. xsdfec->stats_updated);
  1011. /* Enable another polling */
  1012. if (xsdfec->state_updated || xsdfec->stats_updated)
  1013. wake_up_interruptible(&xsdfec->waitq);
  1014. else
  1015. ret = IRQ_NONE;
  1016. /* Unmask Interrupts */
  1017. xsdfec_isr_enable(xsdfec, true);
  1018. xsdfec_ecc_isr_enable(xsdfec, true);
  1019. return ret;
  1020. }
  1021. static int xsdfec_clk_init(struct platform_device *pdev,
  1022. struct xsdfec_clks *clks)
  1023. {
  1024. int err;
  1025. clks->core_clk = devm_clk_get(&pdev->dev, "core_clk");
  1026. if (IS_ERR(clks->core_clk)) {
  1027. dev_err(&pdev->dev, "failed to get core_clk");
  1028. return PTR_ERR(clks->core_clk);
  1029. }
  1030. clks->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
  1031. if (IS_ERR(clks->axi_clk)) {
  1032. dev_err(&pdev->dev, "failed to get axi_clk");
  1033. return PTR_ERR(clks->axi_clk);
  1034. }
  1035. clks->din_words_clk = devm_clk_get(&pdev->dev, "s_axis_din_words_aclk");
  1036. if (IS_ERR(clks->din_words_clk)) {
  1037. if (PTR_ERR(clks->din_words_clk) != -ENOENT) {
  1038. err = PTR_ERR(clks->din_words_clk);
  1039. return err;
  1040. }
  1041. clks->din_words_clk = NULL;
  1042. }
  1043. clks->din_clk = devm_clk_get(&pdev->dev, "s_axis_din_aclk");
  1044. if (IS_ERR(clks->din_clk)) {
  1045. if (PTR_ERR(clks->din_clk) != -ENOENT) {
  1046. err = PTR_ERR(clks->din_clk);
  1047. return err;
  1048. }
  1049. clks->din_clk = NULL;
  1050. }
  1051. clks->dout_clk = devm_clk_get(&pdev->dev, "m_axis_dout_aclk");
  1052. if (IS_ERR(clks->dout_clk)) {
  1053. if (PTR_ERR(clks->dout_clk) != -ENOENT) {
  1054. err = PTR_ERR(clks->dout_clk);
  1055. return err;
  1056. }
  1057. clks->dout_clk = NULL;
  1058. }
  1059. clks->dout_words_clk =
  1060. devm_clk_get(&pdev->dev, "s_axis_dout_words_aclk");
  1061. if (IS_ERR(clks->dout_words_clk)) {
  1062. if (PTR_ERR(clks->dout_words_clk) != -ENOENT) {
  1063. err = PTR_ERR(clks->dout_words_clk);
  1064. return err;
  1065. }
  1066. clks->dout_words_clk = NULL;
  1067. }
  1068. clks->ctrl_clk = devm_clk_get(&pdev->dev, "s_axis_ctrl_aclk");
  1069. if (IS_ERR(clks->ctrl_clk)) {
  1070. if (PTR_ERR(clks->ctrl_clk) != -ENOENT) {
  1071. err = PTR_ERR(clks->ctrl_clk);
  1072. return err;
  1073. }
  1074. clks->ctrl_clk = NULL;
  1075. }
  1076. clks->status_clk = devm_clk_get(&pdev->dev, "m_axis_status_aclk");
  1077. if (IS_ERR(clks->status_clk)) {
  1078. if (PTR_ERR(clks->status_clk) != -ENOENT) {
  1079. err = PTR_ERR(clks->status_clk);
  1080. return err;
  1081. }
  1082. clks->status_clk = NULL;
  1083. }
  1084. err = clk_prepare_enable(clks->core_clk);
  1085. if (err) {
  1086. dev_err(&pdev->dev, "failed to enable core_clk (%d)", err);
  1087. return err;
  1088. }
  1089. err = clk_prepare_enable(clks->axi_clk);
  1090. if (err) {
  1091. dev_err(&pdev->dev, "failed to enable axi_clk (%d)", err);
  1092. goto err_disable_core_clk;
  1093. }
  1094. err = clk_prepare_enable(clks->din_clk);
  1095. if (err) {
  1096. dev_err(&pdev->dev, "failed to enable din_clk (%d)", err);
  1097. goto err_disable_axi_clk;
  1098. }
  1099. err = clk_prepare_enable(clks->din_words_clk);
  1100. if (err) {
  1101. dev_err(&pdev->dev, "failed to enable din_words_clk (%d)", err);
  1102. goto err_disable_din_clk;
  1103. }
  1104. err = clk_prepare_enable(clks->dout_clk);
  1105. if (err) {
  1106. dev_err(&pdev->dev, "failed to enable dout_clk (%d)", err);
  1107. goto err_disable_din_words_clk;
  1108. }
  1109. err = clk_prepare_enable(clks->dout_words_clk);
  1110. if (err) {
  1111. dev_err(&pdev->dev, "failed to enable dout_words_clk (%d)",
  1112. err);
  1113. goto err_disable_dout_clk;
  1114. }
  1115. err = clk_prepare_enable(clks->ctrl_clk);
  1116. if (err) {
  1117. dev_err(&pdev->dev, "failed to enable ctrl_clk (%d)", err);
  1118. goto err_disable_dout_words_clk;
  1119. }
  1120. err = clk_prepare_enable(clks->status_clk);
  1121. if (err) {
  1122. dev_err(&pdev->dev, "failed to enable status_clk (%d)\n", err);
  1123. goto err_disable_ctrl_clk;
  1124. }
  1125. return err;
  1126. err_disable_ctrl_clk:
  1127. clk_disable_unprepare(clks->ctrl_clk);
  1128. err_disable_dout_words_clk:
  1129. clk_disable_unprepare(clks->dout_words_clk);
  1130. err_disable_dout_clk:
  1131. clk_disable_unprepare(clks->dout_clk);
  1132. err_disable_din_words_clk:
  1133. clk_disable_unprepare(clks->din_words_clk);
  1134. err_disable_din_clk:
  1135. clk_disable_unprepare(clks->din_clk);
  1136. err_disable_axi_clk:
  1137. clk_disable_unprepare(clks->axi_clk);
  1138. err_disable_core_clk:
  1139. clk_disable_unprepare(clks->core_clk);
  1140. return err;
  1141. }
  1142. static void xsdfec_disable_all_clks(struct xsdfec_clks *clks)
  1143. {
  1144. clk_disable_unprepare(clks->status_clk);
  1145. clk_disable_unprepare(clks->ctrl_clk);
  1146. clk_disable_unprepare(clks->dout_words_clk);
  1147. clk_disable_unprepare(clks->dout_clk);
  1148. clk_disable_unprepare(clks->din_words_clk);
  1149. clk_disable_unprepare(clks->din_clk);
  1150. clk_disable_unprepare(clks->core_clk);
  1151. clk_disable_unprepare(clks->axi_clk);
  1152. }
  1153. static int xsdfec_probe(struct platform_device *pdev)
  1154. {
  1155. struct xsdfec_dev *xsdfec;
  1156. struct device *dev;
  1157. struct resource *res;
  1158. int err;
  1159. bool irq_enabled = true;
  1160. xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
  1161. if (!xsdfec)
  1162. return -ENOMEM;
  1163. xsdfec->dev = &pdev->dev;
  1164. spin_lock_init(&xsdfec->error_data_lock);
  1165. err = xsdfec_clk_init(pdev, &xsdfec->clks);
  1166. if (err)
  1167. return err;
  1168. dev = xsdfec->dev;
  1169. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1170. xsdfec->regs = devm_ioremap_resource(dev, res);
  1171. if (IS_ERR(xsdfec->regs)) {
  1172. err = PTR_ERR(xsdfec->regs);
  1173. goto err_xsdfec_dev;
  1174. }
  1175. xsdfec->irq = platform_get_irq(pdev, 0);
  1176. if (xsdfec->irq < 0) {
  1177. dev_dbg(dev, "platform_get_irq failed");
  1178. irq_enabled = false;
  1179. }
  1180. err = xsdfec_parse_of(xsdfec);
  1181. if (err < 0)
  1182. goto err_xsdfec_dev;
  1183. update_config_from_hw(xsdfec);
  1184. /* Save driver private data */
  1185. platform_set_drvdata(pdev, xsdfec);
  1186. if (irq_enabled) {
  1187. init_waitqueue_head(&xsdfec->waitq);
  1188. /* Register IRQ thread */
  1189. err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
  1190. xsdfec_irq_thread, IRQF_ONESHOT,
  1191. "xilinx-sdfec16", xsdfec);
  1192. if (err < 0) {
  1193. dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
  1194. goto err_xsdfec_dev;
  1195. }
  1196. }
  1197. err = ida_alloc(&dev_nrs, GFP_KERNEL);
  1198. if (err < 0)
  1199. goto err_xsdfec_dev;
  1200. xsdfec->dev_id = err;
  1201. snprintf(xsdfec->dev_name, DEV_NAME_LEN, "xsdfec%d", xsdfec->dev_id);
  1202. xsdfec->miscdev.minor = MISC_DYNAMIC_MINOR;
  1203. xsdfec->miscdev.name = xsdfec->dev_name;
  1204. xsdfec->miscdev.fops = &xsdfec_fops;
  1205. xsdfec->miscdev.parent = dev;
  1206. err = misc_register(&xsdfec->miscdev);
  1207. if (err) {
  1208. dev_err(dev, "error:%d. Unable to register device", err);
  1209. goto err_xsdfec_ida;
  1210. }
  1211. return 0;
  1212. err_xsdfec_ida:
  1213. ida_free(&dev_nrs, xsdfec->dev_id);
  1214. err_xsdfec_dev:
  1215. xsdfec_disable_all_clks(&xsdfec->clks);
  1216. return err;
  1217. }
  1218. static int xsdfec_remove(struct platform_device *pdev)
  1219. {
  1220. struct xsdfec_dev *xsdfec;
  1221. xsdfec = platform_get_drvdata(pdev);
  1222. misc_deregister(&xsdfec->miscdev);
  1223. ida_free(&dev_nrs, xsdfec->dev_id);
  1224. xsdfec_disable_all_clks(&xsdfec->clks);
  1225. return 0;
  1226. }
  1227. static const struct of_device_id xsdfec_of_match[] = {
  1228. {
  1229. .compatible = "xlnx,sd-fec-1.1",
  1230. },
  1231. { /* end of table */ }
  1232. };
  1233. MODULE_DEVICE_TABLE(of, xsdfec_of_match);
  1234. static struct platform_driver xsdfec_driver = {
  1235. .driver = {
  1236. .name = "xilinx-sdfec",
  1237. .of_match_table = xsdfec_of_match,
  1238. },
  1239. .probe = xsdfec_probe,
  1240. .remove = xsdfec_remove,
  1241. };
  1242. module_platform_driver(xsdfec_driver);
  1243. MODULE_AUTHOR("Xilinx, Inc");
  1244. MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
  1245. MODULE_LICENSE("GPL");