xilinx_can.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Xilinx CAN device driver
  3. *
  4. * Copyright (C) 2012 - 2022 Xilinx, Inc.
  5. * Copyright (C) 2009 PetaLogix. All rights reserved.
  6. * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
  7. *
  8. * Description:
  9. * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
  10. */
  11. #include <linux/bitfield.h>
  12. #include <linux/clk.h>
  13. #include <linux/errno.h>
  14. #include <linux/ethtool.h>
  15. #include <linux/init.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/io.h>
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/netdevice.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/skbuff.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/string.h>
  27. #include <linux/types.h>
  28. #include <linux/can/dev.h>
  29. #include <linux/can/error.h>
  30. #include <linux/pm_runtime.h>
  31. #define DRIVER_NAME "xilinx_can"
  32. /* CAN registers set */
  33. enum xcan_reg {
  34. XCAN_SRR_OFFSET = 0x00, /* Software reset */
  35. XCAN_MSR_OFFSET = 0x04, /* Mode select */
  36. XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */
  37. XCAN_BTR_OFFSET = 0x0C, /* Bit timing */
  38. XCAN_ECR_OFFSET = 0x10, /* Error counter */
  39. XCAN_ESR_OFFSET = 0x14, /* Error status */
  40. XCAN_SR_OFFSET = 0x18, /* Status */
  41. XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
  42. XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
  43. XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
  44. /* not on CAN FD cores */
  45. XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */
  46. XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */
  47. XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */
  48. /* only on CAN FD cores */
  49. XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate
  50. * Prescaler
  51. */
  52. XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
  53. XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
  54. XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
  55. XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
  56. XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
  57. XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */
  58. XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */
  59. XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */
  60. XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */
  61. };
  62. #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
  63. #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
  64. #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
  65. #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
  66. #define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08)
  67. #define XCAN_CANFD_FRAME_SIZE 0x48
  68. #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
  69. XCAN_CANFD_FRAME_SIZE * (n))
  70. #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \
  71. XCAN_CANFD_FRAME_SIZE * (n))
  72. #define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \
  73. XCAN_CANFD_FRAME_SIZE * (n))
  74. /* the single TX mailbox used by this driver on CAN FD HW */
  75. #define XCAN_TX_MAILBOX_IDX 0
  76. /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
  77. #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
  78. #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
  79. #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
  80. #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
  81. #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
  82. #define XCAN_BRPR_TDCO_MASK GENMASK(12, 8) /* TDCO */
  83. #define XCAN_2_BRPR_TDCO_MASK GENMASK(13, 8) /* TDCO for CANFD 2.0 */
  84. #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
  85. #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
  86. #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
  87. #define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */
  88. #define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */
  89. #define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */
  90. #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
  91. #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
  92. #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
  93. #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */
  94. #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
  95. #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
  96. #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
  97. #define XCAN_SR_TDCV_MASK GENMASK(22, 16) /* TDCV Value */
  98. #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
  99. #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
  100. #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
  101. #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
  102. #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
  103. #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
  104. #define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */
  105. #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
  106. #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
  107. #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
  108. #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */
  109. #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */
  110. #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */
  111. #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */
  112. #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */
  113. #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
  114. #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
  115. #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
  116. #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
  117. #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
  118. #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
  119. #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
  120. #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
  121. #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
  122. #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */
  123. #define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */
  124. #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */
  125. #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */
  126. #define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */
  127. #define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */
  128. #define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
  129. /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
  130. #define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */
  131. #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
  132. #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
  133. #define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
  134. #define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */
  135. #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
  136. #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
  137. #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
  138. #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */
  139. /* CAN frame length constants */
  140. #define XCAN_FRAME_MAX_DATA_LEN 8
  141. #define XCANFD_DW_BYTES 4
  142. #define XCAN_TIMEOUT (1 * HZ)
  143. /* TX-FIFO-empty interrupt available */
  144. #define XCAN_FLAG_TXFEMP 0x0001
  145. /* RX Match Not Finished interrupt available */
  146. #define XCAN_FLAG_RXMNF 0x0002
  147. /* Extended acceptance filters with control at 0xE0 */
  148. #define XCAN_FLAG_EXT_FILTERS 0x0004
  149. /* TX mailboxes instead of TX FIFO */
  150. #define XCAN_FLAG_TX_MAILBOXES 0x0008
  151. /* RX FIFO with each buffer in separate registers at 0x1100
  152. * instead of the regular FIFO at 0x50
  153. */
  154. #define XCAN_FLAG_RX_FIFO_MULTI 0x0010
  155. #define XCAN_FLAG_CANFD_2 0x0020
  156. enum xcan_ip_type {
  157. XAXI_CAN = 0,
  158. XZYNQ_CANPS,
  159. XAXI_CANFD,
  160. XAXI_CANFD_2_0,
  161. };
  162. struct xcan_devtype_data {
  163. enum xcan_ip_type cantype;
  164. unsigned int flags;
  165. const struct can_bittiming_const *bittiming_const;
  166. const char *bus_clk_name;
  167. unsigned int btr_ts2_shift;
  168. unsigned int btr_sjw_shift;
  169. };
  170. /**
  171. * struct xcan_priv - This definition define CAN driver instance
  172. * @can: CAN private data structure.
  173. * @tx_lock: Lock for synchronizing TX interrupt handling
  174. * @tx_head: Tx CAN packets ready to send on the queue
  175. * @tx_tail: Tx CAN packets successfully sended on the queue
  176. * @tx_max: Maximum number packets the driver can send
  177. * @napi: NAPI structure
  178. * @read_reg: For reading data from CAN registers
  179. * @write_reg: For writing data to CAN registers
  180. * @dev: Network device data structure
  181. * @reg_base: Ioremapped address to registers
  182. * @irq_flags: For request_irq()
  183. * @bus_clk: Pointer to struct clk
  184. * @can_clk: Pointer to struct clk
  185. * @devtype: Device type specific constants
  186. */
  187. struct xcan_priv {
  188. struct can_priv can;
  189. spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */
  190. unsigned int tx_head;
  191. unsigned int tx_tail;
  192. unsigned int tx_max;
  193. struct napi_struct napi;
  194. u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
  195. void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
  196. u32 val);
  197. struct device *dev;
  198. void __iomem *reg_base;
  199. unsigned long irq_flags;
  200. struct clk *bus_clk;
  201. struct clk *can_clk;
  202. struct xcan_devtype_data devtype;
  203. };
  204. /* CAN Bittiming constants as per Xilinx CAN specs */
  205. static const struct can_bittiming_const xcan_bittiming_const = {
  206. .name = DRIVER_NAME,
  207. .tseg1_min = 1,
  208. .tseg1_max = 16,
  209. .tseg2_min = 1,
  210. .tseg2_max = 8,
  211. .sjw_max = 4,
  212. .brp_min = 1,
  213. .brp_max = 256,
  214. .brp_inc = 1,
  215. };
  216. /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */
  217. static const struct can_bittiming_const xcan_bittiming_const_canfd = {
  218. .name = DRIVER_NAME,
  219. .tseg1_min = 1,
  220. .tseg1_max = 64,
  221. .tseg2_min = 1,
  222. .tseg2_max = 16,
  223. .sjw_max = 16,
  224. .brp_min = 1,
  225. .brp_max = 256,
  226. .brp_inc = 1,
  227. };
  228. /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
  229. static const struct can_bittiming_const xcan_data_bittiming_const_canfd = {
  230. .name = DRIVER_NAME,
  231. .tseg1_min = 1,
  232. .tseg1_max = 16,
  233. .tseg2_min = 1,
  234. .tseg2_max = 8,
  235. .sjw_max = 8,
  236. .brp_min = 1,
  237. .brp_max = 256,
  238. .brp_inc = 1,
  239. };
  240. /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */
  241. static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
  242. .name = DRIVER_NAME,
  243. .tseg1_min = 1,
  244. .tseg1_max = 256,
  245. .tseg2_min = 1,
  246. .tseg2_max = 128,
  247. .sjw_max = 128,
  248. .brp_min = 1,
  249. .brp_max = 256,
  250. .brp_inc = 1,
  251. };
  252. /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
  253. static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
  254. .name = DRIVER_NAME,
  255. .tseg1_min = 1,
  256. .tseg1_max = 32,
  257. .tseg2_min = 1,
  258. .tseg2_max = 16,
  259. .sjw_max = 16,
  260. .brp_min = 1,
  261. .brp_max = 256,
  262. .brp_inc = 1,
  263. };
  264. /* Transmission Delay Compensation constants for CANFD 1.0 */
  265. static const struct can_tdc_const xcan_tdc_const_canfd = {
  266. .tdcv_min = 0,
  267. .tdcv_max = 0, /* Manual mode not supported. */
  268. .tdco_min = 0,
  269. .tdco_max = 32,
  270. .tdcf_min = 0, /* Filter window not supported */
  271. .tdcf_max = 0,
  272. };
  273. /* Transmission Delay Compensation constants for CANFD 2.0 */
  274. static const struct can_tdc_const xcan_tdc_const_canfd2 = {
  275. .tdcv_min = 0,
  276. .tdcv_max = 0, /* Manual mode not supported. */
  277. .tdco_min = 0,
  278. .tdco_max = 64,
  279. .tdcf_min = 0, /* Filter window not supported */
  280. .tdcf_max = 0,
  281. };
  282. /**
  283. * xcan_write_reg_le - Write a value to the device register little endian
  284. * @priv: Driver private data structure
  285. * @reg: Register offset
  286. * @val: Value to write at the Register offset
  287. *
  288. * Write data to the paricular CAN register
  289. */
  290. static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
  291. u32 val)
  292. {
  293. iowrite32(val, priv->reg_base + reg);
  294. }
  295. /**
  296. * xcan_read_reg_le - Read a value from the device register little endian
  297. * @priv: Driver private data structure
  298. * @reg: Register offset
  299. *
  300. * Read data from the particular CAN register
  301. * Return: value read from the CAN register
  302. */
  303. static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
  304. {
  305. return ioread32(priv->reg_base + reg);
  306. }
  307. /**
  308. * xcan_write_reg_be - Write a value to the device register big endian
  309. * @priv: Driver private data structure
  310. * @reg: Register offset
  311. * @val: Value to write at the Register offset
  312. *
  313. * Write data to the paricular CAN register
  314. */
  315. static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
  316. u32 val)
  317. {
  318. iowrite32be(val, priv->reg_base + reg);
  319. }
  320. /**
  321. * xcan_read_reg_be - Read a value from the device register big endian
  322. * @priv: Driver private data structure
  323. * @reg: Register offset
  324. *
  325. * Read data from the particular CAN register
  326. * Return: value read from the CAN register
  327. */
  328. static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
  329. {
  330. return ioread32be(priv->reg_base + reg);
  331. }
  332. /**
  333. * xcan_rx_int_mask - Get the mask for the receive interrupt
  334. * @priv: Driver private data structure
  335. *
  336. * Return: The receive interrupt mask used by the driver on this HW
  337. */
  338. static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
  339. {
  340. /* RXNEMP is better suited for our use case as it cannot be cleared
  341. * while the FIFO is non-empty, but CAN FD HW does not have it
  342. */
  343. if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
  344. return XCAN_IXR_RXOK_MASK;
  345. else
  346. return XCAN_IXR_RXNEMP_MASK;
  347. }
  348. /**
  349. * set_reset_mode - Resets the CAN device mode
  350. * @ndev: Pointer to net_device structure
  351. *
  352. * This is the driver reset mode routine.The driver
  353. * enters into configuration mode.
  354. *
  355. * Return: 0 on success and failure value on error
  356. */
  357. static int set_reset_mode(struct net_device *ndev)
  358. {
  359. struct xcan_priv *priv = netdev_priv(ndev);
  360. unsigned long timeout;
  361. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  362. timeout = jiffies + XCAN_TIMEOUT;
  363. while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
  364. if (time_after(jiffies, timeout)) {
  365. netdev_warn(ndev, "timed out for config mode\n");
  366. return -ETIMEDOUT;
  367. }
  368. usleep_range(500, 10000);
  369. }
  370. /* reset clears FIFOs */
  371. priv->tx_head = 0;
  372. priv->tx_tail = 0;
  373. return 0;
  374. }
  375. /**
  376. * xcan_set_bittiming - CAN set bit timing routine
  377. * @ndev: Pointer to net_device structure
  378. *
  379. * This is the driver set bittiming routine.
  380. * Return: 0 on success and failure value on error
  381. */
  382. static int xcan_set_bittiming(struct net_device *ndev)
  383. {
  384. struct xcan_priv *priv = netdev_priv(ndev);
  385. struct can_bittiming *bt = &priv->can.bittiming;
  386. struct can_bittiming *dbt = &priv->can.data_bittiming;
  387. u32 btr0, btr1;
  388. u32 is_config_mode;
  389. /* Check whether Xilinx CAN is in configuration mode.
  390. * It cannot set bit timing if Xilinx CAN is not in configuration mode.
  391. */
  392. is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
  393. XCAN_SR_CONFIG_MASK;
  394. if (!is_config_mode) {
  395. netdev_alert(ndev,
  396. "BUG! Cannot set bittiming - CAN is not in config mode\n");
  397. return -EPERM;
  398. }
  399. /* Setting Baud Rate prescaler value in BRPR Register */
  400. btr0 = (bt->brp - 1);
  401. /* Setting Time Segment 1 in BTR Register */
  402. btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
  403. /* Setting Time Segment 2 in BTR Register */
  404. btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
  405. /* Setting Synchronous jump width in BTR Register */
  406. btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
  407. priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
  408. priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
  409. if (priv->devtype.cantype == XAXI_CANFD ||
  410. priv->devtype.cantype == XAXI_CANFD_2_0) {
  411. /* Setting Baud Rate prescaler value in F_BRPR Register */
  412. btr0 = dbt->brp - 1;
  413. if (can_tdc_is_enabled(&priv->can)) {
  414. if (priv->devtype.cantype == XAXI_CANFD)
  415. btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
  416. XCAN_BRPR_TDC_ENABLE;
  417. else
  418. btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
  419. XCAN_BRPR_TDC_ENABLE;
  420. }
  421. /* Setting Time Segment 1 in BTR Register */
  422. btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
  423. /* Setting Time Segment 2 in BTR Register */
  424. btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
  425. /* Setting Synchronous jump width in BTR Register */
  426. btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift;
  427. priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0);
  428. priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1);
  429. }
  430. netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
  431. priv->read_reg(priv, XCAN_BRPR_OFFSET),
  432. priv->read_reg(priv, XCAN_BTR_OFFSET));
  433. return 0;
  434. }
  435. /**
  436. * xcan_chip_start - This the drivers start routine
  437. * @ndev: Pointer to net_device structure
  438. *
  439. * This is the drivers start routine.
  440. * Based on the State of the CAN device it puts
  441. * the CAN device into a proper mode.
  442. *
  443. * Return: 0 on success and failure value on error
  444. */
  445. static int xcan_chip_start(struct net_device *ndev)
  446. {
  447. struct xcan_priv *priv = netdev_priv(ndev);
  448. u32 reg_msr;
  449. int err;
  450. u32 ier;
  451. /* Check if it is in reset mode */
  452. err = set_reset_mode(ndev);
  453. if (err < 0)
  454. return err;
  455. err = xcan_set_bittiming(ndev);
  456. if (err < 0)
  457. return err;
  458. /* Enable interrupts
  459. *
  460. * We enable the ERROR interrupt even with
  461. * CAN_CTRLMODE_BERR_REPORTING disabled as there is no
  462. * dedicated interrupt for a state change to
  463. * ERROR_WARNING/ERROR_PASSIVE.
  464. */
  465. ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
  466. XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
  467. XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
  468. XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
  469. if (priv->devtype.flags & XCAN_FLAG_RXMNF)
  470. ier |= XCAN_IXR_RXMNF_MASK;
  471. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  472. /* Check whether it is loopback mode or normal mode */
  473. if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
  474. reg_msr = XCAN_MSR_LBACK_MASK;
  475. else
  476. reg_msr = 0x0;
  477. /* enable the first extended filter, if any, as cores with extended
  478. * filtering default to non-receipt if all filters are disabled
  479. */
  480. if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
  481. priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
  482. priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
  483. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
  484. netdev_dbg(ndev, "status:#x%08x\n",
  485. priv->read_reg(priv, XCAN_SR_OFFSET));
  486. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  487. return 0;
  488. }
  489. /**
  490. * xcan_do_set_mode - This sets the mode of the driver
  491. * @ndev: Pointer to net_device structure
  492. * @mode: Tells the mode of the driver
  493. *
  494. * This check the drivers state and calls the corresponding modes to set.
  495. *
  496. * Return: 0 on success and failure value on error
  497. */
  498. static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
  499. {
  500. int ret;
  501. switch (mode) {
  502. case CAN_MODE_START:
  503. ret = xcan_chip_start(ndev);
  504. if (ret < 0) {
  505. netdev_err(ndev, "xcan_chip_start failed!\n");
  506. return ret;
  507. }
  508. netif_wake_queue(ndev);
  509. break;
  510. default:
  511. ret = -EOPNOTSUPP;
  512. break;
  513. }
  514. return ret;
  515. }
  516. /**
  517. * xcan_write_frame - Write a frame to HW
  518. * @ndev: Pointer to net_device structure
  519. * @skb: sk_buff pointer that contains data to be Txed
  520. * @frame_offset: Register offset to write the frame to
  521. */
  522. static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
  523. int frame_offset)
  524. {
  525. u32 id, dlc, data[2] = {0, 0};
  526. struct canfd_frame *cf = (struct canfd_frame *)skb->data;
  527. u32 ramoff, dwindex = 0, i;
  528. struct xcan_priv *priv = netdev_priv(ndev);
  529. /* Watch carefully on the bit sequence */
  530. if (cf->can_id & CAN_EFF_FLAG) {
  531. /* Extended CAN ID format */
  532. id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
  533. XCAN_IDR_ID2_MASK;
  534. id |= (((cf->can_id & CAN_EFF_MASK) >>
  535. (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) <<
  536. XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
  537. /* The substibute remote TX request bit should be "1"
  538. * for extended frames as in the Xilinx CAN datasheet
  539. */
  540. id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
  541. if (cf->can_id & CAN_RTR_FLAG)
  542. /* Extended frames remote TX request */
  543. id |= XCAN_IDR_RTR_MASK;
  544. } else {
  545. /* Standard CAN ID format */
  546. id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
  547. XCAN_IDR_ID1_MASK;
  548. if (cf->can_id & CAN_RTR_FLAG)
  549. /* Standard frames remote TX request */
  550. id |= XCAN_IDR_SRR_MASK;
  551. }
  552. dlc = can_fd_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
  553. if (can_is_canfd_skb(skb)) {
  554. if (cf->flags & CANFD_BRS)
  555. dlc |= XCAN_DLCR_BRS_MASK;
  556. dlc |= XCAN_DLCR_EDL_MASK;
  557. }
  558. if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
  559. (priv->devtype.flags & XCAN_FLAG_TXFEMP))
  560. can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
  561. else
  562. can_put_echo_skb(skb, ndev, 0, 0);
  563. priv->tx_head++;
  564. priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
  565. /* If the CAN frame is RTR frame this write triggers transmission
  566. * (not on CAN FD)
  567. */
  568. priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
  569. if (priv->devtype.cantype == XAXI_CANFD ||
  570. priv->devtype.cantype == XAXI_CANFD_2_0) {
  571. for (i = 0; i < cf->len; i += 4) {
  572. ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) +
  573. (dwindex * XCANFD_DW_BYTES);
  574. priv->write_reg(priv, ramoff,
  575. be32_to_cpup((__be32 *)(cf->data + i)));
  576. dwindex++;
  577. }
  578. } else {
  579. if (cf->len > 0)
  580. data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
  581. if (cf->len > 4)
  582. data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
  583. if (!(cf->can_id & CAN_RTR_FLAG)) {
  584. priv->write_reg(priv,
  585. XCAN_FRAME_DW1_OFFSET(frame_offset),
  586. data[0]);
  587. /* If the CAN frame is Standard/Extended frame this
  588. * write triggers transmission (not on CAN FD)
  589. */
  590. priv->write_reg(priv,
  591. XCAN_FRAME_DW2_OFFSET(frame_offset),
  592. data[1]);
  593. }
  594. }
  595. }
  596. /**
  597. * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
  598. * @skb: sk_buff pointer that contains data to be Txed
  599. * @ndev: Pointer to net_device structure
  600. *
  601. * Return: 0 on success, -ENOSPC if FIFO is full.
  602. */
  603. static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
  604. {
  605. struct xcan_priv *priv = netdev_priv(ndev);
  606. unsigned long flags;
  607. /* Check if the TX buffer is full */
  608. if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
  609. XCAN_SR_TXFLL_MASK))
  610. return -ENOSPC;
  611. spin_lock_irqsave(&priv->tx_lock, flags);
  612. xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET);
  613. /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
  614. if (priv->tx_max > 1)
  615. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
  616. /* Check if the TX buffer is full */
  617. if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
  618. netif_stop_queue(ndev);
  619. spin_unlock_irqrestore(&priv->tx_lock, flags);
  620. return 0;
  621. }
  622. /**
  623. * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
  624. * @skb: sk_buff pointer that contains data to be Txed
  625. * @ndev: Pointer to net_device structure
  626. *
  627. * Return: 0 on success, -ENOSPC if there is no space
  628. */
  629. static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
  630. {
  631. struct xcan_priv *priv = netdev_priv(ndev);
  632. unsigned long flags;
  633. if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
  634. BIT(XCAN_TX_MAILBOX_IDX)))
  635. return -ENOSPC;
  636. spin_lock_irqsave(&priv->tx_lock, flags);
  637. xcan_write_frame(ndev, skb,
  638. XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
  639. /* Mark buffer as ready for transmit */
  640. priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
  641. netif_stop_queue(ndev);
  642. spin_unlock_irqrestore(&priv->tx_lock, flags);
  643. return 0;
  644. }
  645. /**
  646. * xcan_start_xmit - Starts the transmission
  647. * @skb: sk_buff pointer that contains data to be Txed
  648. * @ndev: Pointer to net_device structure
  649. *
  650. * This function is invoked from upper layers to initiate transmission.
  651. *
  652. * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
  653. */
  654. static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  655. {
  656. struct xcan_priv *priv = netdev_priv(ndev);
  657. int ret;
  658. if (can_dev_dropped_skb(ndev, skb))
  659. return NETDEV_TX_OK;
  660. if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
  661. ret = xcan_start_xmit_mailbox(skb, ndev);
  662. else
  663. ret = xcan_start_xmit_fifo(skb, ndev);
  664. if (ret < 0) {
  665. netdev_err(ndev, "BUG!, TX full when queue awake!\n");
  666. netif_stop_queue(ndev);
  667. return NETDEV_TX_BUSY;
  668. }
  669. return NETDEV_TX_OK;
  670. }
  671. /**
  672. * xcan_rx - Is called from CAN isr to complete the received
  673. * frame processing
  674. * @ndev: Pointer to net_device structure
  675. * @frame_base: Register offset to the frame to be read
  676. *
  677. * This function is invoked from the CAN isr(poll) to process the Rx frames. It
  678. * does minimal processing and invokes "netif_receive_skb" to complete further
  679. * processing.
  680. * Return: 1 on success and 0 on failure.
  681. */
  682. static int xcan_rx(struct net_device *ndev, int frame_base)
  683. {
  684. struct xcan_priv *priv = netdev_priv(ndev);
  685. struct net_device_stats *stats = &ndev->stats;
  686. struct can_frame *cf;
  687. struct sk_buff *skb;
  688. u32 id_xcan, dlc, data[2] = {0, 0};
  689. skb = alloc_can_skb(ndev, &cf);
  690. if (unlikely(!skb)) {
  691. stats->rx_dropped++;
  692. return 0;
  693. }
  694. /* Read a frame from Xilinx zynq CANPS */
  695. id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
  696. dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
  697. XCAN_DLCR_DLC_SHIFT;
  698. /* Change Xilinx CAN data length format to socketCAN data format */
  699. cf->len = can_cc_dlc2len(dlc);
  700. /* Change Xilinx CAN ID format to socketCAN ID format */
  701. if (id_xcan & XCAN_IDR_IDE_MASK) {
  702. /* The received frame is an Extended format frame */
  703. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
  704. cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
  705. XCAN_IDR_ID2_SHIFT;
  706. cf->can_id |= CAN_EFF_FLAG;
  707. if (id_xcan & XCAN_IDR_RTR_MASK)
  708. cf->can_id |= CAN_RTR_FLAG;
  709. } else {
  710. /* The received frame is a standard format frame */
  711. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
  712. XCAN_IDR_ID1_SHIFT;
  713. if (id_xcan & XCAN_IDR_SRR_MASK)
  714. cf->can_id |= CAN_RTR_FLAG;
  715. }
  716. /* DW1/DW2 must always be read to remove message from RXFIFO */
  717. data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
  718. data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
  719. if (!(cf->can_id & CAN_RTR_FLAG)) {
  720. /* Change Xilinx CAN data format to socketCAN data format */
  721. if (cf->len > 0)
  722. *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
  723. if (cf->len > 4)
  724. *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
  725. stats->rx_bytes += cf->len;
  726. }
  727. stats->rx_packets++;
  728. netif_receive_skb(skb);
  729. return 1;
  730. }
  731. /**
  732. * xcanfd_rx - Is called from CAN isr to complete the received
  733. * frame processing
  734. * @ndev: Pointer to net_device structure
  735. * @frame_base: Register offset to the frame to be read
  736. *
  737. * This function is invoked from the CAN isr(poll) to process the Rx frames. It
  738. * does minimal processing and invokes "netif_receive_skb" to complete further
  739. * processing.
  740. * Return: 1 on success and 0 on failure.
  741. */
  742. static int xcanfd_rx(struct net_device *ndev, int frame_base)
  743. {
  744. struct xcan_priv *priv = netdev_priv(ndev);
  745. struct net_device_stats *stats = &ndev->stats;
  746. struct canfd_frame *cf;
  747. struct sk_buff *skb;
  748. u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset;
  749. id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
  750. dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base));
  751. if (dlc & XCAN_DLCR_EDL_MASK)
  752. skb = alloc_canfd_skb(ndev, &cf);
  753. else
  754. skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
  755. if (unlikely(!skb)) {
  756. stats->rx_dropped++;
  757. return 0;
  758. }
  759. /* Change Xilinx CANFD data length format to socketCAN data
  760. * format
  761. */
  762. if (dlc & XCAN_DLCR_EDL_MASK)
  763. cf->len = can_fd_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
  764. XCAN_DLCR_DLC_SHIFT);
  765. else
  766. cf->len = can_cc_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
  767. XCAN_DLCR_DLC_SHIFT);
  768. /* Change Xilinx CAN ID format to socketCAN ID format */
  769. if (id_xcan & XCAN_IDR_IDE_MASK) {
  770. /* The received frame is an Extended format frame */
  771. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
  772. cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
  773. XCAN_IDR_ID2_SHIFT;
  774. cf->can_id |= CAN_EFF_FLAG;
  775. if (id_xcan & XCAN_IDR_RTR_MASK)
  776. cf->can_id |= CAN_RTR_FLAG;
  777. } else {
  778. /* The received frame is a standard format frame */
  779. cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
  780. XCAN_IDR_ID1_SHIFT;
  781. if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan &
  782. XCAN_IDR_SRR_MASK))
  783. cf->can_id |= CAN_RTR_FLAG;
  784. }
  785. /* Check the frame received is FD or not*/
  786. if (dlc & XCAN_DLCR_EDL_MASK) {
  787. for (i = 0; i < cf->len; i += 4) {
  788. dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) +
  789. (dwindex * XCANFD_DW_BYTES);
  790. data[0] = priv->read_reg(priv, dw_offset);
  791. *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
  792. dwindex++;
  793. }
  794. } else {
  795. for (i = 0; i < cf->len; i += 4) {
  796. dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base);
  797. data[0] = priv->read_reg(priv, dw_offset + i);
  798. *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
  799. }
  800. }
  801. if (!(cf->can_id & CAN_RTR_FLAG))
  802. stats->rx_bytes += cf->len;
  803. stats->rx_packets++;
  804. netif_receive_skb(skb);
  805. return 1;
  806. }
  807. /**
  808. * xcan_current_error_state - Get current error state from HW
  809. * @ndev: Pointer to net_device structure
  810. *
  811. * Checks the current CAN error state from the HW. Note that this
  812. * only checks for ERROR_PASSIVE and ERROR_WARNING.
  813. *
  814. * Return:
  815. * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
  816. * otherwise.
  817. */
  818. static enum can_state xcan_current_error_state(struct net_device *ndev)
  819. {
  820. struct xcan_priv *priv = netdev_priv(ndev);
  821. u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
  822. if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
  823. return CAN_STATE_ERROR_PASSIVE;
  824. else if (status & XCAN_SR_ERRWRN_MASK)
  825. return CAN_STATE_ERROR_WARNING;
  826. else
  827. return CAN_STATE_ERROR_ACTIVE;
  828. }
  829. /**
  830. * xcan_set_error_state - Set new CAN error state
  831. * @ndev: Pointer to net_device structure
  832. * @new_state: The new CAN state to be set
  833. * @cf: Error frame to be populated or NULL
  834. *
  835. * Set new CAN error state for the device, updating statistics and
  836. * populating the error frame if given.
  837. */
  838. static void xcan_set_error_state(struct net_device *ndev,
  839. enum can_state new_state,
  840. struct can_frame *cf)
  841. {
  842. struct xcan_priv *priv = netdev_priv(ndev);
  843. u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
  844. u32 txerr = ecr & XCAN_ECR_TEC_MASK;
  845. u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
  846. enum can_state tx_state = txerr >= rxerr ? new_state : 0;
  847. enum can_state rx_state = txerr <= rxerr ? new_state : 0;
  848. /* non-ERROR states are handled elsewhere */
  849. if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
  850. return;
  851. can_change_state(ndev, cf, tx_state, rx_state);
  852. if (cf) {
  853. cf->can_id |= CAN_ERR_CNT;
  854. cf->data[6] = txerr;
  855. cf->data[7] = rxerr;
  856. }
  857. }
  858. /**
  859. * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
  860. * @ndev: Pointer to net_device structure
  861. *
  862. * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
  863. * the performed RX/TX has caused it to drop to a lesser state and set
  864. * the interface state accordingly.
  865. */
  866. static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
  867. {
  868. struct xcan_priv *priv = netdev_priv(ndev);
  869. enum can_state old_state = priv->can.state;
  870. enum can_state new_state;
  871. /* changing error state due to successful frame RX/TX can only
  872. * occur from these states
  873. */
  874. if (old_state != CAN_STATE_ERROR_WARNING &&
  875. old_state != CAN_STATE_ERROR_PASSIVE)
  876. return;
  877. new_state = xcan_current_error_state(ndev);
  878. if (new_state != old_state) {
  879. struct sk_buff *skb;
  880. struct can_frame *cf;
  881. skb = alloc_can_err_skb(ndev, &cf);
  882. xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
  883. if (skb)
  884. netif_rx(skb);
  885. }
  886. }
  887. /**
  888. * xcan_err_interrupt - error frame Isr
  889. * @ndev: net_device pointer
  890. * @isr: interrupt status register value
  891. *
  892. * This is the CAN error interrupt and it will
  893. * check the type of error and forward the error
  894. * frame to upper layers.
  895. */
  896. static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
  897. {
  898. struct xcan_priv *priv = netdev_priv(ndev);
  899. struct net_device_stats *stats = &ndev->stats;
  900. struct can_frame cf = { };
  901. u32 err_status;
  902. err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
  903. priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
  904. if (isr & XCAN_IXR_BSOFF_MASK) {
  905. priv->can.state = CAN_STATE_BUS_OFF;
  906. priv->can.can_stats.bus_off++;
  907. /* Leave device in Config Mode in bus-off state */
  908. priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
  909. can_bus_off(ndev);
  910. cf.can_id |= CAN_ERR_BUSOFF;
  911. } else {
  912. enum can_state new_state = xcan_current_error_state(ndev);
  913. if (new_state != priv->can.state)
  914. xcan_set_error_state(ndev, new_state, &cf);
  915. }
  916. /* Check for Arbitration lost interrupt */
  917. if (isr & XCAN_IXR_ARBLST_MASK) {
  918. priv->can.can_stats.arbitration_lost++;
  919. cf.can_id |= CAN_ERR_LOSTARB;
  920. cf.data[0] = CAN_ERR_LOSTARB_UNSPEC;
  921. }
  922. /* Check for RX FIFO Overflow interrupt */
  923. if (isr & XCAN_IXR_RXOFLW_MASK) {
  924. stats->rx_over_errors++;
  925. stats->rx_errors++;
  926. cf.can_id |= CAN_ERR_CRTL;
  927. cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
  928. }
  929. /* Check for RX Match Not Finished interrupt */
  930. if (isr & XCAN_IXR_RXMNF_MASK) {
  931. stats->rx_dropped++;
  932. stats->rx_errors++;
  933. netdev_err(ndev, "RX match not finished, frame discarded\n");
  934. cf.can_id |= CAN_ERR_CRTL;
  935. cf.data[1] |= CAN_ERR_CRTL_UNSPEC;
  936. }
  937. /* Check for error interrupt */
  938. if (isr & XCAN_IXR_ERROR_MASK) {
  939. bool berr_reporting = false;
  940. if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
  941. berr_reporting = true;
  942. cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
  943. }
  944. /* Check for Ack error interrupt */
  945. if (err_status & XCAN_ESR_ACKER_MASK) {
  946. stats->tx_errors++;
  947. if (berr_reporting) {
  948. cf.can_id |= CAN_ERR_ACK;
  949. cf.data[3] = CAN_ERR_PROT_LOC_ACK;
  950. }
  951. }
  952. /* Check for Bit error interrupt */
  953. if (err_status & XCAN_ESR_BERR_MASK) {
  954. stats->tx_errors++;
  955. if (berr_reporting) {
  956. cf.can_id |= CAN_ERR_PROT;
  957. cf.data[2] = CAN_ERR_PROT_BIT;
  958. }
  959. }
  960. /* Check for Stuff error interrupt */
  961. if (err_status & XCAN_ESR_STER_MASK) {
  962. stats->rx_errors++;
  963. if (berr_reporting) {
  964. cf.can_id |= CAN_ERR_PROT;
  965. cf.data[2] = CAN_ERR_PROT_STUFF;
  966. }
  967. }
  968. /* Check for Form error interrupt */
  969. if (err_status & XCAN_ESR_FMER_MASK) {
  970. stats->rx_errors++;
  971. if (berr_reporting) {
  972. cf.can_id |= CAN_ERR_PROT;
  973. cf.data[2] = CAN_ERR_PROT_FORM;
  974. }
  975. }
  976. /* Check for CRC error interrupt */
  977. if (err_status & XCAN_ESR_CRCER_MASK) {
  978. stats->rx_errors++;
  979. if (berr_reporting) {
  980. cf.can_id |= CAN_ERR_PROT;
  981. cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
  982. }
  983. }
  984. priv->can.can_stats.bus_error++;
  985. }
  986. if (cf.can_id) {
  987. struct can_frame *skb_cf;
  988. struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
  989. if (skb) {
  990. skb_cf->can_id |= cf.can_id;
  991. memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
  992. netif_rx(skb);
  993. }
  994. }
  995. netdev_dbg(ndev, "%s: error status register:0x%x\n",
  996. __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
  997. }
  998. /**
  999. * xcan_state_interrupt - It will check the state of the CAN device
  1000. * @ndev: net_device pointer
  1001. * @isr: interrupt status register value
  1002. *
  1003. * This will checks the state of the CAN device
  1004. * and puts the device into appropriate state.
  1005. */
  1006. static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
  1007. {
  1008. struct xcan_priv *priv = netdev_priv(ndev);
  1009. /* Check for Sleep interrupt if set put CAN device in sleep state */
  1010. if (isr & XCAN_IXR_SLP_MASK)
  1011. priv->can.state = CAN_STATE_SLEEPING;
  1012. /* Check for Wake up interrupt if set put CAN device in Active state */
  1013. if (isr & XCAN_IXR_WKUP_MASK)
  1014. priv->can.state = CAN_STATE_ERROR_ACTIVE;
  1015. }
  1016. /**
  1017. * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
  1018. * @priv: Driver private data structure
  1019. *
  1020. * Return: Register offset of the next frame in RX FIFO.
  1021. */
  1022. static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
  1023. {
  1024. int offset;
  1025. if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
  1026. u32 fsr, mask;
  1027. /* clear RXOK before the is-empty check so that any newly
  1028. * received frame will reassert it without a race
  1029. */
  1030. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
  1031. fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
  1032. /* check if RX FIFO is empty */
  1033. if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
  1034. mask = XCAN_2_FSR_FL_MASK;
  1035. else
  1036. mask = XCAN_FSR_FL_MASK;
  1037. if (!(fsr & mask))
  1038. return -ENOENT;
  1039. if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
  1040. offset =
  1041. XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK);
  1042. else
  1043. offset =
  1044. XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
  1045. } else {
  1046. /* check if RX FIFO is empty */
  1047. if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
  1048. XCAN_IXR_RXNEMP_MASK))
  1049. return -ENOENT;
  1050. /* frames are read from a static offset */
  1051. offset = XCAN_RXFIFO_OFFSET;
  1052. }
  1053. return offset;
  1054. }
  1055. /**
  1056. * xcan_rx_poll - Poll routine for rx packets (NAPI)
  1057. * @napi: napi structure pointer
  1058. * @quota: Max number of rx packets to be processed.
  1059. *
  1060. * This is the poll routine for rx part.
  1061. * It will process the packets maximux quota value.
  1062. *
  1063. * Return: number of packets received
  1064. */
  1065. static int xcan_rx_poll(struct napi_struct *napi, int quota)
  1066. {
  1067. struct net_device *ndev = napi->dev;
  1068. struct xcan_priv *priv = netdev_priv(ndev);
  1069. u32 ier;
  1070. int work_done = 0;
  1071. int frame_offset;
  1072. while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
  1073. (work_done < quota)) {
  1074. if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK)
  1075. work_done += xcanfd_rx(ndev, frame_offset);
  1076. else
  1077. work_done += xcan_rx(ndev, frame_offset);
  1078. if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
  1079. /* increment read index */
  1080. priv->write_reg(priv, XCAN_FSR_OFFSET,
  1081. XCAN_FSR_IRI_MASK);
  1082. else
  1083. /* clear rx-not-empty (will actually clear only if
  1084. * empty)
  1085. */
  1086. priv->write_reg(priv, XCAN_ICR_OFFSET,
  1087. XCAN_IXR_RXNEMP_MASK);
  1088. }
  1089. if (work_done)
  1090. xcan_update_error_state_after_rxtx(ndev);
  1091. if (work_done < quota) {
  1092. if (napi_complete_done(napi, work_done)) {
  1093. ier = priv->read_reg(priv, XCAN_IER_OFFSET);
  1094. ier |= xcan_rx_int_mask(priv);
  1095. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  1096. }
  1097. }
  1098. return work_done;
  1099. }
  1100. /**
  1101. * xcan_tx_interrupt - Tx Done Isr
  1102. * @ndev: net_device pointer
  1103. * @isr: Interrupt status register value
  1104. */
  1105. static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
  1106. {
  1107. struct xcan_priv *priv = netdev_priv(ndev);
  1108. struct net_device_stats *stats = &ndev->stats;
  1109. unsigned int frames_in_fifo;
  1110. int frames_sent = 1; /* TXOK => at least 1 frame was sent */
  1111. unsigned long flags;
  1112. int retries = 0;
  1113. /* Synchronize with xmit as we need to know the exact number
  1114. * of frames in the FIFO to stay in sync due to the TXFEMP
  1115. * handling.
  1116. * This also prevents a race between netif_wake_queue() and
  1117. * netif_stop_queue().
  1118. */
  1119. spin_lock_irqsave(&priv->tx_lock, flags);
  1120. frames_in_fifo = priv->tx_head - priv->tx_tail;
  1121. if (WARN_ON_ONCE(frames_in_fifo == 0)) {
  1122. /* clear TXOK anyway to avoid getting back here */
  1123. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
  1124. spin_unlock_irqrestore(&priv->tx_lock, flags);
  1125. return;
  1126. }
  1127. /* Check if 2 frames were sent (TXOK only means that at least 1
  1128. * frame was sent).
  1129. */
  1130. if (frames_in_fifo > 1) {
  1131. WARN_ON(frames_in_fifo > priv->tx_max);
  1132. /* Synchronize TXOK and isr so that after the loop:
  1133. * (1) isr variable is up-to-date at least up to TXOK clear
  1134. * time. This avoids us clearing a TXOK of a second frame
  1135. * but not noticing that the FIFO is now empty and thus
  1136. * marking only a single frame as sent.
  1137. * (2) No TXOK is left. Having one could mean leaving a
  1138. * stray TXOK as we might process the associated frame
  1139. * via TXFEMP handling as we read TXFEMP *after* TXOK
  1140. * clear to satisfy (1).
  1141. */
  1142. while ((isr & XCAN_IXR_TXOK_MASK) &&
  1143. !WARN_ON(++retries == 100)) {
  1144. priv->write_reg(priv, XCAN_ICR_OFFSET,
  1145. XCAN_IXR_TXOK_MASK);
  1146. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  1147. }
  1148. if (isr & XCAN_IXR_TXFEMP_MASK) {
  1149. /* nothing in FIFO anymore */
  1150. frames_sent = frames_in_fifo;
  1151. }
  1152. } else {
  1153. /* single frame in fifo, just clear TXOK */
  1154. priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
  1155. }
  1156. while (frames_sent--) {
  1157. stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
  1158. priv->tx_max, NULL);
  1159. priv->tx_tail++;
  1160. stats->tx_packets++;
  1161. }
  1162. netif_wake_queue(ndev);
  1163. spin_unlock_irqrestore(&priv->tx_lock, flags);
  1164. xcan_update_error_state_after_rxtx(ndev);
  1165. }
  1166. /**
  1167. * xcan_interrupt - CAN Isr
  1168. * @irq: irq number
  1169. * @dev_id: device id pointer
  1170. *
  1171. * This is the xilinx CAN Isr. It checks for the type of interrupt
  1172. * and invokes the corresponding ISR.
  1173. *
  1174. * Return:
  1175. * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
  1176. */
  1177. static irqreturn_t xcan_interrupt(int irq, void *dev_id)
  1178. {
  1179. struct net_device *ndev = (struct net_device *)dev_id;
  1180. struct xcan_priv *priv = netdev_priv(ndev);
  1181. u32 isr, ier;
  1182. u32 isr_errors;
  1183. u32 rx_int_mask = xcan_rx_int_mask(priv);
  1184. /* Get the interrupt status from Xilinx CAN */
  1185. isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
  1186. if (!isr)
  1187. return IRQ_NONE;
  1188. /* Check for the type of interrupt and Processing it */
  1189. if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
  1190. priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
  1191. XCAN_IXR_WKUP_MASK));
  1192. xcan_state_interrupt(ndev, isr);
  1193. }
  1194. /* Check for Tx interrupt and Processing it */
  1195. if (isr & XCAN_IXR_TXOK_MASK)
  1196. xcan_tx_interrupt(ndev, isr);
  1197. /* Check for the type of error interrupt and Processing it */
  1198. isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
  1199. XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
  1200. XCAN_IXR_RXMNF_MASK);
  1201. if (isr_errors) {
  1202. priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
  1203. xcan_err_interrupt(ndev, isr);
  1204. }
  1205. /* Check for the type of receive interrupt and Processing it */
  1206. if (isr & rx_int_mask) {
  1207. ier = priv->read_reg(priv, XCAN_IER_OFFSET);
  1208. ier &= ~rx_int_mask;
  1209. priv->write_reg(priv, XCAN_IER_OFFSET, ier);
  1210. napi_schedule(&priv->napi);
  1211. }
  1212. return IRQ_HANDLED;
  1213. }
  1214. /**
  1215. * xcan_chip_stop - Driver stop routine
  1216. * @ndev: Pointer to net_device structure
  1217. *
  1218. * This is the drivers stop routine. It will disable the
  1219. * interrupts and put the device into configuration mode.
  1220. */
  1221. static void xcan_chip_stop(struct net_device *ndev)
  1222. {
  1223. struct xcan_priv *priv = netdev_priv(ndev);
  1224. int ret;
  1225. /* Disable interrupts and leave the can in configuration mode */
  1226. ret = set_reset_mode(ndev);
  1227. if (ret < 0)
  1228. netdev_dbg(ndev, "set_reset_mode() Failed\n");
  1229. priv->can.state = CAN_STATE_STOPPED;
  1230. }
  1231. /**
  1232. * xcan_open - Driver open routine
  1233. * @ndev: Pointer to net_device structure
  1234. *
  1235. * This is the driver open routine.
  1236. * Return: 0 on success and failure value on error
  1237. */
  1238. static int xcan_open(struct net_device *ndev)
  1239. {
  1240. struct xcan_priv *priv = netdev_priv(ndev);
  1241. int ret;
  1242. ret = pm_runtime_get_sync(priv->dev);
  1243. if (ret < 0) {
  1244. netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
  1245. __func__, ret);
  1246. goto err;
  1247. }
  1248. ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
  1249. ndev->name, ndev);
  1250. if (ret < 0) {
  1251. netdev_err(ndev, "irq allocation for CAN failed\n");
  1252. goto err;
  1253. }
  1254. /* Set chip into reset mode */
  1255. ret = set_reset_mode(ndev);
  1256. if (ret < 0) {
  1257. netdev_err(ndev, "mode resetting failed!\n");
  1258. goto err_irq;
  1259. }
  1260. /* Common open */
  1261. ret = open_candev(ndev);
  1262. if (ret)
  1263. goto err_irq;
  1264. ret = xcan_chip_start(ndev);
  1265. if (ret < 0) {
  1266. netdev_err(ndev, "xcan_chip_start failed!\n");
  1267. goto err_candev;
  1268. }
  1269. napi_enable(&priv->napi);
  1270. netif_start_queue(ndev);
  1271. return 0;
  1272. err_candev:
  1273. close_candev(ndev);
  1274. err_irq:
  1275. free_irq(ndev->irq, ndev);
  1276. err:
  1277. pm_runtime_put(priv->dev);
  1278. return ret;
  1279. }
  1280. /**
  1281. * xcan_close - Driver close routine
  1282. * @ndev: Pointer to net_device structure
  1283. *
  1284. * Return: 0 always
  1285. */
  1286. static int xcan_close(struct net_device *ndev)
  1287. {
  1288. struct xcan_priv *priv = netdev_priv(ndev);
  1289. netif_stop_queue(ndev);
  1290. napi_disable(&priv->napi);
  1291. xcan_chip_stop(ndev);
  1292. free_irq(ndev->irq, ndev);
  1293. close_candev(ndev);
  1294. pm_runtime_put(priv->dev);
  1295. return 0;
  1296. }
  1297. /**
  1298. * xcan_get_berr_counter - error counter routine
  1299. * @ndev: Pointer to net_device structure
  1300. * @bec: Pointer to can_berr_counter structure
  1301. *
  1302. * This is the driver error counter routine.
  1303. * Return: 0 on success and failure value on error
  1304. */
  1305. static int xcan_get_berr_counter(const struct net_device *ndev,
  1306. struct can_berr_counter *bec)
  1307. {
  1308. struct xcan_priv *priv = netdev_priv(ndev);
  1309. int ret;
  1310. ret = pm_runtime_get_sync(priv->dev);
  1311. if (ret < 0) {
  1312. netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
  1313. __func__, ret);
  1314. pm_runtime_put(priv->dev);
  1315. return ret;
  1316. }
  1317. bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
  1318. bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
  1319. XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
  1320. pm_runtime_put(priv->dev);
  1321. return 0;
  1322. }
  1323. /**
  1324. * xcan_get_auto_tdcv - Get Transmitter Delay Compensation Value
  1325. * @ndev: Pointer to net_device structure
  1326. * @tdcv: Pointer to TDCV value
  1327. *
  1328. * Return: 0 on success
  1329. */
  1330. static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
  1331. {
  1332. struct xcan_priv *priv = netdev_priv(ndev);
  1333. *tdcv = FIELD_GET(XCAN_SR_TDCV_MASK, priv->read_reg(priv, XCAN_SR_OFFSET));
  1334. return 0;
  1335. }
  1336. static const struct net_device_ops xcan_netdev_ops = {
  1337. .ndo_open = xcan_open,
  1338. .ndo_stop = xcan_close,
  1339. .ndo_start_xmit = xcan_start_xmit,
  1340. .ndo_change_mtu = can_change_mtu,
  1341. };
  1342. static const struct ethtool_ops xcan_ethtool_ops = {
  1343. .get_ts_info = ethtool_op_get_ts_info,
  1344. };
  1345. /**
  1346. * xcan_suspend - Suspend method for the driver
  1347. * @dev: Address of the device structure
  1348. *
  1349. * Put the driver into low power mode.
  1350. * Return: 0 on success and failure value on error
  1351. */
  1352. static int __maybe_unused xcan_suspend(struct device *dev)
  1353. {
  1354. struct net_device *ndev = dev_get_drvdata(dev);
  1355. if (netif_running(ndev)) {
  1356. netif_stop_queue(ndev);
  1357. netif_device_detach(ndev);
  1358. xcan_chip_stop(ndev);
  1359. }
  1360. return pm_runtime_force_suspend(dev);
  1361. }
  1362. /**
  1363. * xcan_resume - Resume from suspend
  1364. * @dev: Address of the device structure
  1365. *
  1366. * Resume operation after suspend.
  1367. * Return: 0 on success and failure value on error
  1368. */
  1369. static int __maybe_unused xcan_resume(struct device *dev)
  1370. {
  1371. struct net_device *ndev = dev_get_drvdata(dev);
  1372. int ret;
  1373. ret = pm_runtime_force_resume(dev);
  1374. if (ret) {
  1375. dev_err(dev, "pm_runtime_force_resume failed on resume\n");
  1376. return ret;
  1377. }
  1378. if (netif_running(ndev)) {
  1379. ret = xcan_chip_start(ndev);
  1380. if (ret) {
  1381. dev_err(dev, "xcan_chip_start failed on resume\n");
  1382. return ret;
  1383. }
  1384. netif_device_attach(ndev);
  1385. netif_start_queue(ndev);
  1386. }
  1387. return 0;
  1388. }
  1389. /**
  1390. * xcan_runtime_suspend - Runtime suspend method for the driver
  1391. * @dev: Address of the device structure
  1392. *
  1393. * Put the driver into low power mode.
  1394. * Return: 0 always
  1395. */
  1396. static int __maybe_unused xcan_runtime_suspend(struct device *dev)
  1397. {
  1398. struct net_device *ndev = dev_get_drvdata(dev);
  1399. struct xcan_priv *priv = netdev_priv(ndev);
  1400. clk_disable_unprepare(priv->bus_clk);
  1401. clk_disable_unprepare(priv->can_clk);
  1402. return 0;
  1403. }
  1404. /**
  1405. * xcan_runtime_resume - Runtime resume from suspend
  1406. * @dev: Address of the device structure
  1407. *
  1408. * Resume operation after suspend.
  1409. * Return: 0 on success and failure value on error
  1410. */
  1411. static int __maybe_unused xcan_runtime_resume(struct device *dev)
  1412. {
  1413. struct net_device *ndev = dev_get_drvdata(dev);
  1414. struct xcan_priv *priv = netdev_priv(ndev);
  1415. int ret;
  1416. ret = clk_prepare_enable(priv->bus_clk);
  1417. if (ret) {
  1418. dev_err(dev, "Cannot enable clock.\n");
  1419. return ret;
  1420. }
  1421. ret = clk_prepare_enable(priv->can_clk);
  1422. if (ret) {
  1423. dev_err(dev, "Cannot enable clock.\n");
  1424. clk_disable_unprepare(priv->bus_clk);
  1425. return ret;
  1426. }
  1427. return 0;
  1428. }
  1429. static const struct dev_pm_ops xcan_dev_pm_ops = {
  1430. SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
  1431. SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
  1432. };
  1433. static const struct xcan_devtype_data xcan_zynq_data = {
  1434. .cantype = XZYNQ_CANPS,
  1435. .flags = XCAN_FLAG_TXFEMP,
  1436. .bittiming_const = &xcan_bittiming_const,
  1437. .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
  1438. .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
  1439. .bus_clk_name = "pclk",
  1440. };
  1441. static const struct xcan_devtype_data xcan_axi_data = {
  1442. .cantype = XAXI_CAN,
  1443. .bittiming_const = &xcan_bittiming_const,
  1444. .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
  1445. .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
  1446. .bus_clk_name = "s_axi_aclk",
  1447. };
  1448. static const struct xcan_devtype_data xcan_canfd_data = {
  1449. .cantype = XAXI_CANFD,
  1450. .flags = XCAN_FLAG_EXT_FILTERS |
  1451. XCAN_FLAG_RXMNF |
  1452. XCAN_FLAG_TX_MAILBOXES |
  1453. XCAN_FLAG_RX_FIFO_MULTI,
  1454. .bittiming_const = &xcan_bittiming_const_canfd,
  1455. .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
  1456. .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
  1457. .bus_clk_name = "s_axi_aclk",
  1458. };
  1459. static const struct xcan_devtype_data xcan_canfd2_data = {
  1460. .cantype = XAXI_CANFD_2_0,
  1461. .flags = XCAN_FLAG_EXT_FILTERS |
  1462. XCAN_FLAG_RXMNF |
  1463. XCAN_FLAG_TX_MAILBOXES |
  1464. XCAN_FLAG_CANFD_2 |
  1465. XCAN_FLAG_RX_FIFO_MULTI,
  1466. .bittiming_const = &xcan_bittiming_const_canfd2,
  1467. .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
  1468. .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
  1469. .bus_clk_name = "s_axi_aclk",
  1470. };
  1471. /* Match table for OF platform binding */
  1472. static const struct of_device_id xcan_of_match[] = {
  1473. { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
  1474. { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
  1475. { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
  1476. { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data },
  1477. { /* end of list */ },
  1478. };
  1479. MODULE_DEVICE_TABLE(of, xcan_of_match);
  1480. /**
  1481. * xcan_probe - Platform registration call
  1482. * @pdev: Handle to the platform device structure
  1483. *
  1484. * This function does all the memory allocation and registration for the CAN
  1485. * device.
  1486. *
  1487. * Return: 0 on success and failure value on error
  1488. */
  1489. static int xcan_probe(struct platform_device *pdev)
  1490. {
  1491. struct net_device *ndev;
  1492. struct xcan_priv *priv;
  1493. const struct of_device_id *of_id;
  1494. const struct xcan_devtype_data *devtype = &xcan_axi_data;
  1495. void __iomem *addr;
  1496. int ret;
  1497. int rx_max, tx_max;
  1498. u32 hw_tx_max = 0, hw_rx_max = 0;
  1499. const char *hw_tx_max_property;
  1500. /* Get the virtual base address for the device */
  1501. addr = devm_platform_ioremap_resource(pdev, 0);
  1502. if (IS_ERR(addr)) {
  1503. ret = PTR_ERR(addr);
  1504. goto err;
  1505. }
  1506. of_id = of_match_device(xcan_of_match, &pdev->dev);
  1507. if (of_id && of_id->data)
  1508. devtype = of_id->data;
  1509. hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
  1510. "tx-mailbox-count" : "tx-fifo-depth";
  1511. ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
  1512. &hw_tx_max);
  1513. if (ret < 0) {
  1514. dev_err(&pdev->dev, "missing %s property\n",
  1515. hw_tx_max_property);
  1516. goto err;
  1517. }
  1518. ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
  1519. &hw_rx_max);
  1520. if (ret < 0) {
  1521. dev_err(&pdev->dev,
  1522. "missing rx-fifo-depth property (mailbox mode is not supported)\n");
  1523. goto err;
  1524. }
  1525. /* With TX FIFO:
  1526. *
  1527. * There is no way to directly figure out how many frames have been
  1528. * sent when the TXOK interrupt is processed. If TXFEMP
  1529. * is supported, we can have 2 frames in the FIFO and use TXFEMP
  1530. * to determine if 1 or 2 frames have been sent.
  1531. * Theoretically we should be able to use TXFWMEMP to determine up
  1532. * to 3 frames, but it seems that after putting a second frame in the
  1533. * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
  1534. * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
  1535. * sent), which is not a sensible state - possibly TXFWMEMP is not
  1536. * completely synchronized with the rest of the bits?
  1537. *
  1538. * With TX mailboxes:
  1539. *
  1540. * HW sends frames in CAN ID priority order. To preserve FIFO ordering
  1541. * we submit frames one at a time.
  1542. */
  1543. if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
  1544. (devtype->flags & XCAN_FLAG_TXFEMP))
  1545. tx_max = min(hw_tx_max, 2U);
  1546. else
  1547. tx_max = 1;
  1548. rx_max = hw_rx_max;
  1549. /* Create a CAN device instance */
  1550. ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
  1551. if (!ndev)
  1552. return -ENOMEM;
  1553. priv = netdev_priv(ndev);
  1554. priv->dev = &pdev->dev;
  1555. priv->can.bittiming_const = devtype->bittiming_const;
  1556. priv->can.do_set_mode = xcan_do_set_mode;
  1557. priv->can.do_get_berr_counter = xcan_get_berr_counter;
  1558. priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
  1559. CAN_CTRLMODE_BERR_REPORTING;
  1560. if (devtype->cantype == XAXI_CANFD) {
  1561. priv->can.data_bittiming_const =
  1562. &xcan_data_bittiming_const_canfd;
  1563. priv->can.tdc_const = &xcan_tdc_const_canfd;
  1564. }
  1565. if (devtype->cantype == XAXI_CANFD_2_0) {
  1566. priv->can.data_bittiming_const =
  1567. &xcan_data_bittiming_const_canfd2;
  1568. priv->can.tdc_const = &xcan_tdc_const_canfd2;
  1569. }
  1570. if (devtype->cantype == XAXI_CANFD ||
  1571. devtype->cantype == XAXI_CANFD_2_0) {
  1572. priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
  1573. CAN_CTRLMODE_TDC_AUTO;
  1574. priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv;
  1575. }
  1576. priv->reg_base = addr;
  1577. priv->tx_max = tx_max;
  1578. priv->devtype = *devtype;
  1579. spin_lock_init(&priv->tx_lock);
  1580. /* Get IRQ for the device */
  1581. ret = platform_get_irq(pdev, 0);
  1582. if (ret < 0)
  1583. goto err_free;
  1584. ndev->irq = ret;
  1585. ndev->flags |= IFF_ECHO; /* We support local echo */
  1586. platform_set_drvdata(pdev, ndev);
  1587. SET_NETDEV_DEV(ndev, &pdev->dev);
  1588. ndev->netdev_ops = &xcan_netdev_ops;
  1589. ndev->ethtool_ops = &xcan_ethtool_ops;
  1590. /* Getting the CAN can_clk info */
  1591. priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
  1592. if (IS_ERR(priv->can_clk)) {
  1593. ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk),
  1594. "device clock not found\n");
  1595. goto err_free;
  1596. }
  1597. priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
  1598. if (IS_ERR(priv->bus_clk)) {
  1599. ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk),
  1600. "bus clock not found\n");
  1601. goto err_free;
  1602. }
  1603. priv->write_reg = xcan_write_reg_le;
  1604. priv->read_reg = xcan_read_reg_le;
  1605. pm_runtime_enable(&pdev->dev);
  1606. ret = pm_runtime_get_sync(&pdev->dev);
  1607. if (ret < 0) {
  1608. netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
  1609. __func__, ret);
  1610. goto err_disableclks;
  1611. }
  1612. if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
  1613. priv->write_reg = xcan_write_reg_be;
  1614. priv->read_reg = xcan_read_reg_be;
  1615. }
  1616. priv->can.clock.freq = clk_get_rate(priv->can_clk);
  1617. netif_napi_add_weight(ndev, &priv->napi, xcan_rx_poll, rx_max);
  1618. ret = register_candev(ndev);
  1619. if (ret) {
  1620. dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
  1621. goto err_disableclks;
  1622. }
  1623. pm_runtime_put(&pdev->dev);
  1624. if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
  1625. priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000);
  1626. priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000);
  1627. }
  1628. netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
  1629. priv->reg_base, ndev->irq, priv->can.clock.freq,
  1630. hw_tx_max, priv->tx_max);
  1631. return 0;
  1632. err_disableclks:
  1633. pm_runtime_put(priv->dev);
  1634. pm_runtime_disable(&pdev->dev);
  1635. err_free:
  1636. free_candev(ndev);
  1637. err:
  1638. return ret;
  1639. }
  1640. /**
  1641. * xcan_remove - Unregister the device after releasing the resources
  1642. * @pdev: Handle to the platform device structure
  1643. *
  1644. * This function frees all the resources allocated to the device.
  1645. * Return: 0 always
  1646. */
  1647. static int xcan_remove(struct platform_device *pdev)
  1648. {
  1649. struct net_device *ndev = platform_get_drvdata(pdev);
  1650. unregister_candev(ndev);
  1651. pm_runtime_disable(&pdev->dev);
  1652. free_candev(ndev);
  1653. return 0;
  1654. }
  1655. static struct platform_driver xcan_driver = {
  1656. .probe = xcan_probe,
  1657. .remove = xcan_remove,
  1658. .driver = {
  1659. .name = DRIVER_NAME,
  1660. .pm = &xcan_dev_pm_ops,
  1661. .of_match_table = xcan_of_match,
  1662. },
  1663. };
  1664. module_platform_driver(xcan_driver);
  1665. MODULE_LICENSE("GPL");
  1666. MODULE_AUTHOR("Xilinx Inc");
  1667. MODULE_DESCRIPTION("Xilinx CAN interface");