i3c-master-cdns.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2018 Cadence Design Systems Inc.
  4. *
  5. * Author: Boris Brezillon <[email protected]>
  6. */
  7. #include <linux/bitops.h>
  8. #include <linux/clk.h>
  9. #include <linux/err.h>
  10. #include <linux/errno.h>
  11. #include <linux/i3c/master.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/io.h>
  14. #include <linux/iopoll.h>
  15. #include <linux/ioport.h>
  16. #include <linux/kernel.h>
  17. #include <linux/list.h>
  18. #include <linux/module.h>
  19. #include <linux/of.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/slab.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/workqueue.h>
  24. #include <linux/of_device.h>
  25. #define DEV_ID 0x0
  26. #define DEV_ID_I3C_MASTER 0x5034
  27. #define CONF_STATUS0 0x4
  28. #define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29))
  29. #define CONF_STATUS0_ECC_CHK BIT(28)
  30. #define CONF_STATUS0_INTEG_CHK BIT(27)
  31. #define CONF_STATUS0_CSR_DAP_CHK BIT(26)
  32. #define CONF_STATUS0_TRANS_TOUT_CHK BIT(25)
  33. #define CONF_STATUS0_PROT_FAULTS_CHK BIT(24)
  34. #define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16)
  35. #define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8)
  36. #define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7))
  37. #define CONF_STATUS0_SUPPORTS_DDR BIT(5)
  38. #define CONF_STATUS0_SEC_MASTER BIT(4)
  39. #define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0))
  40. #define CONF_STATUS1 0x8
  41. #define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1)
  42. #define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26))
  43. #define CONF_STATUS1_SLVDDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21))
  44. #define CONF_STATUS1_SLVDDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16))
  45. #define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10))
  46. #define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5))
  47. #define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0)))
  48. #define REV_ID 0xc
  49. #define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20)
  50. #define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8)
  51. #define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 4)) >> 4)
  52. #define REV_ID_REV_MINOR(id) ((id) & GENMASK(3, 0))
  53. #define CTRL 0x10
  54. #define CTRL_DEV_EN BIT(31)
  55. #define CTRL_HALT_EN BIT(30)
  56. #define CTRL_MCS BIT(29)
  57. #define CTRL_MCS_EN BIT(28)
  58. #define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24))
  59. #define CTRL_HJ_DISEC BIT(8)
  60. #define CTRL_MST_ACK BIT(7)
  61. #define CTRL_HJ_ACK BIT(6)
  62. #define CTRL_HJ_INIT BIT(5)
  63. #define CTRL_MST_INIT BIT(4)
  64. #define CTRL_AHDR_OPT BIT(3)
  65. #define CTRL_PURE_BUS_MODE 0
  66. #define CTRL_MIXED_FAST_BUS_MODE 2
  67. #define CTRL_MIXED_SLOW_BUS_MODE 3
  68. #define CTRL_BUS_MODE_MASK GENMASK(1, 0)
  69. #define THD_DELAY_MAX 3
  70. #define PRESCL_CTRL0 0x14
  71. #define PRESCL_CTRL0_I2C(x) ((x) << 16)
  72. #define PRESCL_CTRL0_I3C(x) (x)
  73. #define PRESCL_CTRL0_MAX GENMASK(9, 0)
  74. #define PRESCL_CTRL1 0x18
  75. #define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
  76. #define PRESCL_CTRL1_PP_LOW(x) ((x) << 8)
  77. #define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0)
  78. #define PRESCL_CTRL1_OD_LOW(x) (x)
  79. #define MST_IER 0x20
  80. #define MST_IDR 0x24
  81. #define MST_IMR 0x28
  82. #define MST_ICR 0x2c
  83. #define MST_ISR 0x30
  84. #define MST_INT_HALTED BIT(18)
  85. #define MST_INT_MR_DONE BIT(17)
  86. #define MST_INT_IMM_COMP BIT(16)
  87. #define MST_INT_TX_THR BIT(15)
  88. #define MST_INT_TX_OVF BIT(14)
  89. #define MST_INT_IBID_THR BIT(12)
  90. #define MST_INT_IBID_UNF BIT(11)
  91. #define MST_INT_IBIR_THR BIT(10)
  92. #define MST_INT_IBIR_UNF BIT(9)
  93. #define MST_INT_IBIR_OVF BIT(8)
  94. #define MST_INT_RX_THR BIT(7)
  95. #define MST_INT_RX_UNF BIT(6)
  96. #define MST_INT_CMDD_EMP BIT(5)
  97. #define MST_INT_CMDD_THR BIT(4)
  98. #define MST_INT_CMDD_OVF BIT(3)
  99. #define MST_INT_CMDR_THR BIT(2)
  100. #define MST_INT_CMDR_UNF BIT(1)
  101. #define MST_INT_CMDR_OVF BIT(0)
  102. #define MST_STATUS0 0x34
  103. #define MST_STATUS0_IDLE BIT(18)
  104. #define MST_STATUS0_HALTED BIT(17)
  105. #define MST_STATUS0_MASTER_MODE BIT(16)
  106. #define MST_STATUS0_TX_FULL BIT(13)
  107. #define MST_STATUS0_IBID_FULL BIT(12)
  108. #define MST_STATUS0_IBIR_FULL BIT(11)
  109. #define MST_STATUS0_RX_FULL BIT(10)
  110. #define MST_STATUS0_CMDD_FULL BIT(9)
  111. #define MST_STATUS0_CMDR_FULL BIT(8)
  112. #define MST_STATUS0_TX_EMP BIT(5)
  113. #define MST_STATUS0_IBID_EMP BIT(4)
  114. #define MST_STATUS0_IBIR_EMP BIT(3)
  115. #define MST_STATUS0_RX_EMP BIT(2)
  116. #define MST_STATUS0_CMDD_EMP BIT(1)
  117. #define MST_STATUS0_CMDR_EMP BIT(0)
  118. #define CMDR 0x38
  119. #define CMDR_NO_ERROR 0
  120. #define CMDR_DDR_PREAMBLE_ERROR 1
  121. #define CMDR_DDR_PARITY_ERROR 2
  122. #define CMDR_DDR_RX_FIFO_OVF 3
  123. #define CMDR_DDR_TX_FIFO_UNF 4
  124. #define CMDR_M0_ERROR 5
  125. #define CMDR_M1_ERROR 6
  126. #define CMDR_M2_ERROR 7
  127. #define CMDR_MST_ABORT 8
  128. #define CMDR_NACK_RESP 9
  129. #define CMDR_INVALID_DA 10
  130. #define CMDR_DDR_DROPPED 11
  131. #define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24)
  132. #define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8)
  133. #define CMDR_CMDID_HJACK_DISEC 0xfe
  134. #define CMDR_CMDID_HJACK_ENTDAA 0xff
  135. #define CMDR_CMDID(x) ((x) & GENMASK(7, 0))
  136. #define IBIR 0x3c
  137. #define IBIR_ACKED BIT(12)
  138. #define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8)
  139. #define IBIR_ERROR BIT(7)
  140. #define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2)
  141. #define IBIR_TYPE_IBI 0
  142. #define IBIR_TYPE_HJ 1
  143. #define IBIR_TYPE_MR 2
  144. #define IBIR_TYPE(x) ((x) & GENMASK(1, 0))
  145. #define SLV_IER 0x40
  146. #define SLV_IDR 0x44
  147. #define SLV_IMR 0x48
  148. #define SLV_ICR 0x4c
  149. #define SLV_ISR 0x50
  150. #define SLV_INT_TM BIT(20)
  151. #define SLV_INT_ERROR BIT(19)
  152. #define SLV_INT_EVENT_UP BIT(18)
  153. #define SLV_INT_HJ_DONE BIT(17)
  154. #define SLV_INT_MR_DONE BIT(16)
  155. #define SLV_INT_DA_UPD BIT(15)
  156. #define SLV_INT_SDR_FAIL BIT(14)
  157. #define SLV_INT_DDR_FAIL BIT(13)
  158. #define SLV_INT_M_RD_ABORT BIT(12)
  159. #define SLV_INT_DDR_RX_THR BIT(11)
  160. #define SLV_INT_DDR_TX_THR BIT(10)
  161. #define SLV_INT_SDR_RX_THR BIT(9)
  162. #define SLV_INT_SDR_TX_THR BIT(8)
  163. #define SLV_INT_DDR_RX_UNF BIT(7)
  164. #define SLV_INT_DDR_TX_OVF BIT(6)
  165. #define SLV_INT_SDR_RX_UNF BIT(5)
  166. #define SLV_INT_SDR_TX_OVF BIT(4)
  167. #define SLV_INT_DDR_RD_COMP BIT(3)
  168. #define SLV_INT_DDR_WR_COMP BIT(2)
  169. #define SLV_INT_SDR_RD_COMP BIT(1)
  170. #define SLV_INT_SDR_WR_COMP BIT(0)
  171. #define SLV_STATUS0 0x54
  172. #define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16)
  173. #define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0))
  174. #define SLV_STATUS1 0x58
  175. #define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20)
  176. #define SLV_STATUS1_VEN_TM BIT(19)
  177. #define SLV_STATUS1_HJ_DIS BIT(18)
  178. #define SLV_STATUS1_MR_DIS BIT(17)
  179. #define SLV_STATUS1_PROT_ERR BIT(16)
  180. #define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9)
  181. #define SLV_STATUS1_HAS_DA BIT(8)
  182. #define SLV_STATUS1_DDR_RX_FULL BIT(7)
  183. #define SLV_STATUS1_DDR_TX_FULL BIT(6)
  184. #define SLV_STATUS1_DDR_RX_EMPTY BIT(5)
  185. #define SLV_STATUS1_DDR_TX_EMPTY BIT(4)
  186. #define SLV_STATUS1_SDR_RX_FULL BIT(3)
  187. #define SLV_STATUS1_SDR_TX_FULL BIT(2)
  188. #define SLV_STATUS1_SDR_RX_EMPTY BIT(1)
  189. #define SLV_STATUS1_SDR_TX_EMPTY BIT(0)
  190. #define CMD0_FIFO 0x60
  191. #define CMD0_FIFO_IS_DDR BIT(31)
  192. #define CMD0_FIFO_IS_CCC BIT(30)
  193. #define CMD0_FIFO_BCH BIT(29)
  194. #define XMIT_BURST_STATIC_SUBADDR 0
  195. #define XMIT_SINGLE_INC_SUBADDR 1
  196. #define XMIT_SINGLE_STATIC_SUBADDR 2
  197. #define XMIT_BURST_WITHOUT_SUBADDR 3
  198. #define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27)
  199. #define CMD0_FIFO_SBCA BIT(26)
  200. #define CMD0_FIFO_RSBC BIT(25)
  201. #define CMD0_FIFO_IS_10B BIT(24)
  202. #define CMD0_FIFO_PL_LEN(l) ((l) << 12)
  203. #define CMD0_FIFO_PL_LEN_MAX 4095
  204. #define CMD0_FIFO_DEV_ADDR(a) ((a) << 1)
  205. #define CMD0_FIFO_RNW BIT(0)
  206. #define CMD1_FIFO 0x64
  207. #define CMD1_FIFO_CMDID(id) ((id) << 24)
  208. #define CMD1_FIFO_CSRADDR(a) (a)
  209. #define CMD1_FIFO_CCC(id) (id)
  210. #define TX_FIFO 0x68
  211. #define IMD_CMD0 0x70
  212. #define IMD_CMD0_PL_LEN(l) ((l) << 12)
  213. #define IMD_CMD0_DEV_ADDR(a) ((a) << 1)
  214. #define IMD_CMD0_RNW BIT(0)
  215. #define IMD_CMD1 0x74
  216. #define IMD_CMD1_CCC(id) (id)
  217. #define IMD_DATA 0x78
  218. #define RX_FIFO 0x80
  219. #define IBI_DATA_FIFO 0x84
  220. #define SLV_DDR_TX_FIFO 0x88
  221. #define SLV_DDR_RX_FIFO 0x8c
  222. #define CMD_IBI_THR_CTRL 0x90
  223. #define IBIR_THR(t) ((t) << 24)
  224. #define CMDR_THR(t) ((t) << 16)
  225. #define IBI_THR(t) ((t) << 8)
  226. #define CMD_THR(t) (t)
  227. #define TX_RX_THR_CTRL 0x94
  228. #define RX_THR(t) ((t) << 16)
  229. #define TX_THR(t) (t)
  230. #define SLV_DDR_TX_RX_THR_CTRL 0x98
  231. #define SLV_DDR_RX_THR(t) ((t) << 16)
  232. #define SLV_DDR_TX_THR(t) (t)
  233. #define FLUSH_CTRL 0x9c
  234. #define FLUSH_IBI_RESP BIT(23)
  235. #define FLUSH_CMD_RESP BIT(22)
  236. #define FLUSH_SLV_DDR_RX_FIFO BIT(22)
  237. #define FLUSH_SLV_DDR_TX_FIFO BIT(21)
  238. #define FLUSH_IMM_FIFO BIT(20)
  239. #define FLUSH_IBI_FIFO BIT(19)
  240. #define FLUSH_RX_FIFO BIT(18)
  241. #define FLUSH_TX_FIFO BIT(17)
  242. #define FLUSH_CMD_FIFO BIT(16)
  243. #define TTO_PRESCL_CTRL0 0xb0
  244. #define TTO_PRESCL_CTRL0_DIVB(x) ((x) << 16)
  245. #define TTO_PRESCL_CTRL0_DIVA(x) (x)
  246. #define TTO_PRESCL_CTRL1 0xb4
  247. #define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16)
  248. #define TTO_PRESCL_CTRL1_DIVA(x) (x)
  249. #define DEVS_CTRL 0xb8
  250. #define DEVS_CTRL_DEV_CLR_SHIFT 16
  251. #define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16)
  252. #define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev))
  253. #define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev)
  254. #define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0)
  255. #define MAX_DEVS 16
  256. #define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10))
  257. #define DEV_ID_RR0_LVR_EXT_ADDR BIT(11)
  258. #define DEV_ID_RR0_HDR_CAP BIT(10)
  259. #define DEV_ID_RR0_IS_I3C BIT(9)
  260. #define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(6, 0) | GENMASK(15, 13))
  261. #define DEV_ID_RR0_SET_DEV_ADDR(a) (((a) & GENMASK(6, 0)) | \
  262. (((a) & GENMASK(9, 7)) << 6))
  263. #define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | \
  264. (((x) >> 6) & GENMASK(9, 7)))
  265. #define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10))
  266. #define DEV_ID_RR1_PID_MSB(pid) (pid)
  267. #define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10))
  268. #define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16)
  269. #define DEV_ID_RR2_BCR(bcr) ((bcr) << 8)
  270. #define DEV_ID_RR2_DCR(dcr) (dcr)
  271. #define DEV_ID_RR2_LVR(lvr) (lvr)
  272. #define SIR_MAP(x) (0x180 + ((x) * 4))
  273. #define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2)
  274. #define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0))
  275. #define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0))
  276. #define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0))
  277. #define DEV_ROLE_SLAVE 0
  278. #define DEV_ROLE_MASTER 1
  279. #define SIR_MAP_DEV_ROLE(role) ((role) << 14)
  280. #define SIR_MAP_DEV_SLOW BIT(13)
  281. #define SIR_MAP_DEV_PL(l) ((l) << 8)
  282. #define SIR_MAP_PL_MAX GENMASK(4, 0)
  283. #define SIR_MAP_DEV_DA(a) ((a) << 1)
  284. #define SIR_MAP_DEV_ACK BIT(0)
  285. #define GPIR_WORD(x) (0x200 + ((x) * 4))
  286. #define GPI_REG(val, id) \
  287. (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
  288. #define GPOR_WORD(x) (0x220 + ((x) * 4))
  289. #define GPO_REG(val, id) \
  290. (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
  291. #define ASF_INT_STATUS 0x300
  292. #define ASF_INT_RAW_STATUS 0x304
  293. #define ASF_INT_MASK 0x308
  294. #define ASF_INT_TEST 0x30c
  295. #define ASF_INT_FATAL_SELECT 0x310
  296. #define ASF_INTEGRITY_ERR BIT(6)
  297. #define ASF_PROTOCOL_ERR BIT(5)
  298. #define ASF_TRANS_TIMEOUT_ERR BIT(4)
  299. #define ASF_CSR_ERR BIT(3)
  300. #define ASF_DAP_ERR BIT(2)
  301. #define ASF_SRAM_UNCORR_ERR BIT(1)
  302. #define ASF_SRAM_CORR_ERR BIT(0)
  303. #define ASF_SRAM_CORR_FAULT_STATUS 0x320
  304. #define ASF_SRAM_UNCORR_FAULT_STATUS 0x324
  305. #define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24)
  306. #define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0))
  307. #define ASF_SRAM_FAULT_STATS 0x328
  308. #define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16)
  309. #define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0))
  310. #define ASF_TRANS_TOUT_CTRL 0x330
  311. #define ASF_TRANS_TOUT_EN BIT(31)
  312. #define ASF_TRANS_TOUT_VAL(x) (x)
  313. #define ASF_TRANS_TOUT_FAULT_MASK 0x334
  314. #define ASF_TRANS_TOUT_FAULT_STATUS 0x338
  315. #define ASF_TRANS_TOUT_FAULT_APB BIT(3)
  316. #define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2)
  317. #define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1)
  318. #define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0)
  319. #define ASF_PROTO_FAULT_MASK 0x340
  320. #define ASF_PROTO_FAULT_STATUS 0x344
  321. #define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31)
  322. #define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30)
  323. #define ASF_PROTO_FAULT_S(x) BIT(16 + (x))
  324. #define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15)
  325. #define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14)
  326. #define ASF_PROTO_FAULT_M(x) BIT(x)
  327. struct cdns_i3c_master_caps {
  328. u32 cmdfifodepth;
  329. u32 cmdrfifodepth;
  330. u32 txfifodepth;
  331. u32 rxfifodepth;
  332. u32 ibirfifodepth;
  333. };
  334. struct cdns_i3c_cmd {
  335. u32 cmd0;
  336. u32 cmd1;
  337. u32 tx_len;
  338. const void *tx_buf;
  339. u32 rx_len;
  340. void *rx_buf;
  341. u32 error;
  342. };
  343. struct cdns_i3c_xfer {
  344. struct list_head node;
  345. struct completion comp;
  346. int ret;
  347. unsigned int ncmds;
  348. struct cdns_i3c_cmd cmds[];
  349. };
  350. struct cdns_i3c_data {
  351. u8 thd_delay_ns;
  352. };
  353. struct cdns_i3c_master {
  354. struct work_struct hj_work;
  355. struct i3c_master_controller base;
  356. u32 free_rr_slots;
  357. unsigned int maxdevs;
  358. struct {
  359. unsigned int num_slots;
  360. struct i3c_dev_desc **slots;
  361. spinlock_t lock;
  362. } ibi;
  363. struct {
  364. struct list_head list;
  365. struct cdns_i3c_xfer *cur;
  366. spinlock_t lock;
  367. } xferqueue;
  368. void __iomem *regs;
  369. struct clk *sysclk;
  370. struct clk *pclk;
  371. struct cdns_i3c_master_caps caps;
  372. unsigned long i3c_scl_lim;
  373. const struct cdns_i3c_data *devdata;
  374. };
  375. static inline struct cdns_i3c_master *
  376. to_cdns_i3c_master(struct i3c_master_controller *master)
  377. {
  378. return container_of(master, struct cdns_i3c_master, base);
  379. }
  380. static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master,
  381. const u8 *bytes, int nbytes)
  382. {
  383. writesl(master->regs + TX_FIFO, bytes, nbytes / 4);
  384. if (nbytes & 3) {
  385. u32 tmp = 0;
  386. memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
  387. writesl(master->regs + TX_FIFO, &tmp, 1);
  388. }
  389. }
  390. static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master,
  391. u8 *bytes, int nbytes)
  392. {
  393. readsl(master->regs + RX_FIFO, bytes, nbytes / 4);
  394. if (nbytes & 3) {
  395. u32 tmp;
  396. readsl(master->regs + RX_FIFO, &tmp, 1);
  397. memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
  398. }
  399. }
  400. static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
  401. const struct i3c_ccc_cmd *cmd)
  402. {
  403. if (cmd->ndests > 1)
  404. return false;
  405. switch (cmd->id) {
  406. case I3C_CCC_ENEC(true):
  407. case I3C_CCC_ENEC(false):
  408. case I3C_CCC_DISEC(true):
  409. case I3C_CCC_DISEC(false):
  410. case I3C_CCC_ENTAS(0, true):
  411. case I3C_CCC_ENTAS(0, false):
  412. case I3C_CCC_RSTDAA(true):
  413. case I3C_CCC_RSTDAA(false):
  414. case I3C_CCC_ENTDAA:
  415. case I3C_CCC_SETMWL(true):
  416. case I3C_CCC_SETMWL(false):
  417. case I3C_CCC_SETMRL(true):
  418. case I3C_CCC_SETMRL(false):
  419. case I3C_CCC_DEFSLVS:
  420. case I3C_CCC_ENTHDR(0):
  421. case I3C_CCC_SETDASA:
  422. case I3C_CCC_SETNEWDA:
  423. case I3C_CCC_GETMWL:
  424. case I3C_CCC_GETMRL:
  425. case I3C_CCC_GETPID:
  426. case I3C_CCC_GETBCR:
  427. case I3C_CCC_GETDCR:
  428. case I3C_CCC_GETSTATUS:
  429. case I3C_CCC_GETACCMST:
  430. case I3C_CCC_GETMXDS:
  431. case I3C_CCC_GETHDRCAP:
  432. return true;
  433. default:
  434. break;
  435. }
  436. return false;
  437. }
  438. static int cdns_i3c_master_disable(struct cdns_i3c_master *master)
  439. {
  440. u32 status;
  441. writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, master->regs + CTRL);
  442. return readl_poll_timeout(master->regs + MST_STATUS0, status,
  443. status & MST_STATUS0_IDLE, 10, 1000000);
  444. }
  445. static void cdns_i3c_master_enable(struct cdns_i3c_master *master)
  446. {
  447. writel(readl(master->regs + CTRL) | CTRL_DEV_EN, master->regs + CTRL);
  448. }
  449. static struct cdns_i3c_xfer *
  450. cdns_i3c_master_alloc_xfer(struct cdns_i3c_master *master, unsigned int ncmds)
  451. {
  452. struct cdns_i3c_xfer *xfer;
  453. xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
  454. if (!xfer)
  455. return NULL;
  456. INIT_LIST_HEAD(&xfer->node);
  457. xfer->ncmds = ncmds;
  458. xfer->ret = -ETIMEDOUT;
  459. return xfer;
  460. }
  461. static void cdns_i3c_master_free_xfer(struct cdns_i3c_xfer *xfer)
  462. {
  463. kfree(xfer);
  464. }
  465. static void cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master *master)
  466. {
  467. struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
  468. unsigned int i;
  469. if (!xfer)
  470. return;
  471. writel(MST_INT_CMDD_EMP, master->regs + MST_ICR);
  472. for (i = 0; i < xfer->ncmds; i++) {
  473. struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
  474. cdns_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf,
  475. cmd->tx_len);
  476. }
  477. for (i = 0; i < xfer->ncmds; i++) {
  478. struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
  479. writel(cmd->cmd1 | CMD1_FIFO_CMDID(i),
  480. master->regs + CMD1_FIFO);
  481. writel(cmd->cmd0, master->regs + CMD0_FIFO);
  482. }
  483. writel(readl(master->regs + CTRL) | CTRL_MCS,
  484. master->regs + CTRL);
  485. writel(MST_INT_CMDD_EMP, master->regs + MST_IER);
  486. }
  487. static void cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master *master,
  488. u32 isr)
  489. {
  490. struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
  491. int i, ret = 0;
  492. u32 status0;
  493. if (!xfer)
  494. return;
  495. if (!(isr & MST_INT_CMDD_EMP))
  496. return;
  497. writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
  498. for (status0 = readl(master->regs + MST_STATUS0);
  499. !(status0 & MST_STATUS0_CMDR_EMP);
  500. status0 = readl(master->regs + MST_STATUS0)) {
  501. struct cdns_i3c_cmd *cmd;
  502. u32 cmdr, rx_len, id;
  503. cmdr = readl(master->regs + CMDR);
  504. id = CMDR_CMDID(cmdr);
  505. if (id == CMDR_CMDID_HJACK_DISEC ||
  506. id == CMDR_CMDID_HJACK_ENTDAA ||
  507. WARN_ON(id >= xfer->ncmds))
  508. continue;
  509. cmd = &xfer->cmds[CMDR_CMDID(cmdr)];
  510. rx_len = min_t(u32, CMDR_XFER_BYTES(cmdr), cmd->rx_len);
  511. cdns_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
  512. cmd->error = CMDR_ERROR(cmdr);
  513. }
  514. for (i = 0; i < xfer->ncmds; i++) {
  515. switch (xfer->cmds[i].error) {
  516. case CMDR_NO_ERROR:
  517. break;
  518. case CMDR_DDR_PREAMBLE_ERROR:
  519. case CMDR_DDR_PARITY_ERROR:
  520. case CMDR_M0_ERROR:
  521. case CMDR_M1_ERROR:
  522. case CMDR_M2_ERROR:
  523. case CMDR_MST_ABORT:
  524. case CMDR_NACK_RESP:
  525. case CMDR_DDR_DROPPED:
  526. ret = -EIO;
  527. break;
  528. case CMDR_DDR_RX_FIFO_OVF:
  529. case CMDR_DDR_TX_FIFO_UNF:
  530. ret = -ENOSPC;
  531. break;
  532. case CMDR_INVALID_DA:
  533. default:
  534. ret = -EINVAL;
  535. break;
  536. }
  537. }
  538. xfer->ret = ret;
  539. complete(&xfer->comp);
  540. xfer = list_first_entry_or_null(&master->xferqueue.list,
  541. struct cdns_i3c_xfer, node);
  542. if (xfer)
  543. list_del_init(&xfer->node);
  544. master->xferqueue.cur = xfer;
  545. cdns_i3c_master_start_xfer_locked(master);
  546. }
  547. static void cdns_i3c_master_queue_xfer(struct cdns_i3c_master *master,
  548. struct cdns_i3c_xfer *xfer)
  549. {
  550. unsigned long flags;
  551. init_completion(&xfer->comp);
  552. spin_lock_irqsave(&master->xferqueue.lock, flags);
  553. if (master->xferqueue.cur) {
  554. list_add_tail(&xfer->node, &master->xferqueue.list);
  555. } else {
  556. master->xferqueue.cur = xfer;
  557. cdns_i3c_master_start_xfer_locked(master);
  558. }
  559. spin_unlock_irqrestore(&master->xferqueue.lock, flags);
  560. }
  561. static void cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master *master,
  562. struct cdns_i3c_xfer *xfer)
  563. {
  564. unsigned long flags;
  565. spin_lock_irqsave(&master->xferqueue.lock, flags);
  566. if (master->xferqueue.cur == xfer) {
  567. u32 status;
  568. writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN,
  569. master->regs + CTRL);
  570. readl_poll_timeout_atomic(master->regs + MST_STATUS0, status,
  571. status & MST_STATUS0_IDLE, 10,
  572. 1000000);
  573. master->xferqueue.cur = NULL;
  574. writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO |
  575. FLUSH_CMD_RESP,
  576. master->regs + FLUSH_CTRL);
  577. writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
  578. writel(readl(master->regs + CTRL) | CTRL_DEV_EN,
  579. master->regs + CTRL);
  580. } else {
  581. list_del_init(&xfer->node);
  582. }
  583. spin_unlock_irqrestore(&master->xferqueue.lock, flags);
  584. }
  585. static enum i3c_error_code cdns_i3c_cmd_get_err(struct cdns_i3c_cmd *cmd)
  586. {
  587. switch (cmd->error) {
  588. case CMDR_M0_ERROR:
  589. return I3C_ERROR_M0;
  590. case CMDR_M1_ERROR:
  591. return I3C_ERROR_M1;
  592. case CMDR_M2_ERROR:
  593. case CMDR_NACK_RESP:
  594. return I3C_ERROR_M2;
  595. default:
  596. break;
  597. }
  598. return I3C_ERROR_UNKNOWN;
  599. }
  600. static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
  601. struct i3c_ccc_cmd *cmd)
  602. {
  603. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  604. struct cdns_i3c_xfer *xfer;
  605. struct cdns_i3c_cmd *ccmd;
  606. int ret;
  607. xfer = cdns_i3c_master_alloc_xfer(master, 1);
  608. if (!xfer)
  609. return -ENOMEM;
  610. ccmd = xfer->cmds;
  611. ccmd->cmd1 = CMD1_FIFO_CCC(cmd->id);
  612. ccmd->cmd0 = CMD0_FIFO_IS_CCC |
  613. CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len);
  614. if (cmd->id & I3C_CCC_DIRECT)
  615. ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr);
  616. if (cmd->rnw) {
  617. ccmd->cmd0 |= CMD0_FIFO_RNW;
  618. ccmd->rx_buf = cmd->dests[0].payload.data;
  619. ccmd->rx_len = cmd->dests[0].payload.len;
  620. } else {
  621. ccmd->tx_buf = cmd->dests[0].payload.data;
  622. ccmd->tx_len = cmd->dests[0].payload.len;
  623. }
  624. cdns_i3c_master_queue_xfer(master, xfer);
  625. if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
  626. cdns_i3c_master_unqueue_xfer(master, xfer);
  627. ret = xfer->ret;
  628. cmd->err = cdns_i3c_cmd_get_err(&xfer->cmds[0]);
  629. cdns_i3c_master_free_xfer(xfer);
  630. return ret;
  631. }
  632. static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
  633. struct i3c_priv_xfer *xfers,
  634. int nxfers)
  635. {
  636. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  637. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  638. int txslots = 0, rxslots = 0, i, ret;
  639. struct cdns_i3c_xfer *cdns_xfer;
  640. for (i = 0; i < nxfers; i++) {
  641. if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
  642. return -ENOTSUPP;
  643. }
  644. if (!nxfers)
  645. return 0;
  646. if (nxfers > master->caps.cmdfifodepth ||
  647. nxfers > master->caps.cmdrfifodepth)
  648. return -ENOTSUPP;
  649. /*
  650. * First make sure that all transactions (block of transfers separated
  651. * by a STOP marker) fit in the FIFOs.
  652. */
  653. for (i = 0; i < nxfers; i++) {
  654. if (xfers[i].rnw)
  655. rxslots += DIV_ROUND_UP(xfers[i].len, 4);
  656. else
  657. txslots += DIV_ROUND_UP(xfers[i].len, 4);
  658. }
  659. if (rxslots > master->caps.rxfifodepth ||
  660. txslots > master->caps.txfifodepth)
  661. return -ENOTSUPP;
  662. cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
  663. if (!cdns_xfer)
  664. return -ENOMEM;
  665. for (i = 0; i < nxfers; i++) {
  666. struct cdns_i3c_cmd *ccmd = &cdns_xfer->cmds[i];
  667. u32 pl_len = xfers[i].len;
  668. ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(dev->info.dyn_addr) |
  669. CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
  670. if (xfers[i].rnw) {
  671. ccmd->cmd0 |= CMD0_FIFO_RNW;
  672. ccmd->rx_buf = xfers[i].data.in;
  673. ccmd->rx_len = xfers[i].len;
  674. pl_len++;
  675. } else {
  676. ccmd->tx_buf = xfers[i].data.out;
  677. ccmd->tx_len = xfers[i].len;
  678. }
  679. ccmd->cmd0 |= CMD0_FIFO_PL_LEN(pl_len);
  680. if (i < nxfers - 1)
  681. ccmd->cmd0 |= CMD0_FIFO_RSBC;
  682. if (!i)
  683. ccmd->cmd0 |= CMD0_FIFO_BCH;
  684. }
  685. cdns_i3c_master_queue_xfer(master, cdns_xfer);
  686. if (!wait_for_completion_timeout(&cdns_xfer->comp,
  687. msecs_to_jiffies(1000)))
  688. cdns_i3c_master_unqueue_xfer(master, cdns_xfer);
  689. ret = cdns_xfer->ret;
  690. for (i = 0; i < nxfers; i++)
  691. xfers[i].err = cdns_i3c_cmd_get_err(&cdns_xfer->cmds[i]);
  692. cdns_i3c_master_free_xfer(cdns_xfer);
  693. return ret;
  694. }
  695. static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
  696. const struct i2c_msg *xfers, int nxfers)
  697. {
  698. struct i3c_master_controller *m = i2c_dev_get_master(dev);
  699. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  700. unsigned int nrxwords = 0, ntxwords = 0;
  701. struct cdns_i3c_xfer *xfer;
  702. int i, ret = 0;
  703. if (nxfers > master->caps.cmdfifodepth)
  704. return -ENOTSUPP;
  705. for (i = 0; i < nxfers; i++) {
  706. if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
  707. return -ENOTSUPP;
  708. if (xfers[i].flags & I2C_M_RD)
  709. nrxwords += DIV_ROUND_UP(xfers[i].len, 4);
  710. else
  711. ntxwords += DIV_ROUND_UP(xfers[i].len, 4);
  712. }
  713. if (ntxwords > master->caps.txfifodepth ||
  714. nrxwords > master->caps.rxfifodepth)
  715. return -ENOTSUPP;
  716. xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
  717. if (!xfer)
  718. return -ENOMEM;
  719. for (i = 0; i < nxfers; i++) {
  720. struct cdns_i3c_cmd *ccmd = &xfer->cmds[i];
  721. ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(xfers[i].addr) |
  722. CMD0_FIFO_PL_LEN(xfers[i].len) |
  723. CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
  724. if (xfers[i].flags & I2C_M_TEN)
  725. ccmd->cmd0 |= CMD0_FIFO_IS_10B;
  726. if (xfers[i].flags & I2C_M_RD) {
  727. ccmd->cmd0 |= CMD0_FIFO_RNW;
  728. ccmd->rx_buf = xfers[i].buf;
  729. ccmd->rx_len = xfers[i].len;
  730. } else {
  731. ccmd->tx_buf = xfers[i].buf;
  732. ccmd->tx_len = xfers[i].len;
  733. }
  734. }
  735. cdns_i3c_master_queue_xfer(master, xfer);
  736. if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
  737. cdns_i3c_master_unqueue_xfer(master, xfer);
  738. ret = xfer->ret;
  739. cdns_i3c_master_free_xfer(xfer);
  740. return ret;
  741. }
  742. struct cdns_i3c_i2c_dev_data {
  743. u16 id;
  744. s16 ibi;
  745. struct i3c_generic_ibi_pool *ibi_pool;
  746. };
  747. static u32 prepare_rr0_dev_address(u32 addr)
  748. {
  749. u32 ret = (addr << 1) & 0xff;
  750. /* RR0[7:1] = addr[6:0] */
  751. ret |= (addr & GENMASK(6, 0)) << 1;
  752. /* RR0[15:13] = addr[9:7] */
  753. ret |= (addr & GENMASK(9, 7)) << 6;
  754. /* RR0[0] = ~XOR(addr[6:0]) */
  755. if (!(hweight8(addr & 0x7f) & 1))
  756. ret |= 1;
  757. return ret;
  758. }
  759. static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev)
  760. {
  761. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  762. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  763. struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  764. u32 rr;
  765. rr = prepare_rr0_dev_address(dev->info.dyn_addr ?
  766. dev->info.dyn_addr :
  767. dev->info.static_addr);
  768. writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id));
  769. }
  770. static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
  771. u8 dyn_addr)
  772. {
  773. unsigned long activedevs;
  774. u32 rr;
  775. int i;
  776. if (!dyn_addr) {
  777. if (!master->free_rr_slots)
  778. return -ENOSPC;
  779. return ffs(master->free_rr_slots) - 1;
  780. }
  781. activedevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
  782. activedevs &= ~BIT(0);
  783. for_each_set_bit(i, &activedevs, master->maxdevs + 1) {
  784. rr = readl(master->regs + DEV_ID_RR0(i));
  785. if (!(rr & DEV_ID_RR0_IS_I3C) ||
  786. DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr)
  787. continue;
  788. return i;
  789. }
  790. return -EINVAL;
  791. }
  792. static int cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
  793. u8 old_dyn_addr)
  794. {
  795. cdns_i3c_master_upd_i3c_addr(dev);
  796. return 0;
  797. }
  798. static int cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
  799. {
  800. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  801. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  802. struct cdns_i3c_i2c_dev_data *data;
  803. int slot;
  804. data = kzalloc(sizeof(*data), GFP_KERNEL);
  805. if (!data)
  806. return -ENOMEM;
  807. slot = cdns_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
  808. if (slot < 0) {
  809. kfree(data);
  810. return slot;
  811. }
  812. data->ibi = -1;
  813. data->id = slot;
  814. i3c_dev_set_master_data(dev, data);
  815. master->free_rr_slots &= ~BIT(slot);
  816. if (!dev->info.dyn_addr) {
  817. cdns_i3c_master_upd_i3c_addr(dev);
  818. writel(readl(master->regs + DEVS_CTRL) |
  819. DEVS_CTRL_DEV_ACTIVE(data->id),
  820. master->regs + DEVS_CTRL);
  821. }
  822. return 0;
  823. }
  824. static void cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
  825. {
  826. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  827. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  828. struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  829. writel(readl(master->regs + DEVS_CTRL) |
  830. DEVS_CTRL_DEV_CLR(data->id),
  831. master->regs + DEVS_CTRL);
  832. i3c_dev_set_master_data(dev, NULL);
  833. master->free_rr_slots |= BIT(data->id);
  834. kfree(data);
  835. }
  836. static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
  837. {
  838. struct i3c_master_controller *m = i2c_dev_get_master(dev);
  839. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  840. struct cdns_i3c_i2c_dev_data *data;
  841. int slot;
  842. slot = cdns_i3c_master_get_rr_slot(master, 0);
  843. if (slot < 0)
  844. return slot;
  845. data = kzalloc(sizeof(*data), GFP_KERNEL);
  846. if (!data)
  847. return -ENOMEM;
  848. data->id = slot;
  849. master->free_rr_slots &= ~BIT(slot);
  850. i2c_dev_set_master_data(dev, data);
  851. writel(prepare_rr0_dev_address(dev->addr),
  852. master->regs + DEV_ID_RR0(data->id));
  853. writel(dev->lvr, master->regs + DEV_ID_RR2(data->id));
  854. writel(readl(master->regs + DEVS_CTRL) |
  855. DEVS_CTRL_DEV_ACTIVE(data->id),
  856. master->regs + DEVS_CTRL);
  857. return 0;
  858. }
  859. static void cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
  860. {
  861. struct i3c_master_controller *m = i2c_dev_get_master(dev);
  862. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  863. struct cdns_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
  864. writel(readl(master->regs + DEVS_CTRL) |
  865. DEVS_CTRL_DEV_CLR(data->id),
  866. master->regs + DEVS_CTRL);
  867. master->free_rr_slots |= BIT(data->id);
  868. i2c_dev_set_master_data(dev, NULL);
  869. kfree(data);
  870. }
  871. static void cdns_i3c_master_bus_cleanup(struct i3c_master_controller *m)
  872. {
  873. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  874. cdns_i3c_master_disable(master);
  875. }
  876. static void cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master *master,
  877. unsigned int slot,
  878. struct i3c_device_info *info)
  879. {
  880. u32 rr;
  881. memset(info, 0, sizeof(*info));
  882. rr = readl(master->regs + DEV_ID_RR0(slot));
  883. info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr);
  884. rr = readl(master->regs + DEV_ID_RR2(slot));
  885. info->dcr = rr;
  886. info->bcr = rr >> 8;
  887. info->pid = rr >> 16;
  888. info->pid |= (u64)readl(master->regs + DEV_ID_RR1(slot)) << 16;
  889. }
  890. static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master)
  891. {
  892. struct i3c_master_controller *m = &master->base;
  893. unsigned long i3c_lim_period, pres_step, ncycles;
  894. struct i3c_bus *bus = i3c_master_get_bus(m);
  895. unsigned long new_i3c_scl_lim = 0;
  896. struct i3c_dev_desc *dev;
  897. u32 prescl1, ctrl;
  898. i3c_bus_for_each_i3cdev(bus, dev) {
  899. unsigned long max_fscl;
  900. max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
  901. I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
  902. switch (max_fscl) {
  903. case I3C_SDR1_FSCL_8MHZ:
  904. max_fscl = 8000000;
  905. break;
  906. case I3C_SDR2_FSCL_6MHZ:
  907. max_fscl = 6000000;
  908. break;
  909. case I3C_SDR3_FSCL_4MHZ:
  910. max_fscl = 4000000;
  911. break;
  912. case I3C_SDR4_FSCL_2MHZ:
  913. max_fscl = 2000000;
  914. break;
  915. case I3C_SDR0_FSCL_MAX:
  916. default:
  917. max_fscl = 0;
  918. break;
  919. }
  920. if (max_fscl &&
  921. (new_i3c_scl_lim > max_fscl || !new_i3c_scl_lim))
  922. new_i3c_scl_lim = max_fscl;
  923. }
  924. /* Only update PRESCL_CTRL1 if the I3C SCL limitation has changed. */
  925. if (new_i3c_scl_lim == master->i3c_scl_lim)
  926. return;
  927. master->i3c_scl_lim = new_i3c_scl_lim;
  928. if (!new_i3c_scl_lim)
  929. return;
  930. pres_step = 1000000000UL / (bus->scl_rate.i3c * 4);
  931. /* Configure PP_LOW to meet I3C slave limitations. */
  932. prescl1 = readl(master->regs + PRESCL_CTRL1) &
  933. ~PRESCL_CTRL1_PP_LOW_MASK;
  934. ctrl = readl(master->regs + CTRL);
  935. i3c_lim_period = DIV_ROUND_UP(1000000000, master->i3c_scl_lim);
  936. ncycles = DIV_ROUND_UP(i3c_lim_period, pres_step);
  937. if (ncycles < 4)
  938. ncycles = 0;
  939. else
  940. ncycles -= 4;
  941. prescl1 |= PRESCL_CTRL1_PP_LOW(ncycles);
  942. /* Disable I3C master before updating PRESCL_CTRL1. */
  943. if (ctrl & CTRL_DEV_EN)
  944. cdns_i3c_master_disable(master);
  945. writel(prescl1, master->regs + PRESCL_CTRL1);
  946. if (ctrl & CTRL_DEV_EN)
  947. cdns_i3c_master_enable(master);
  948. }
  949. static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
  950. {
  951. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  952. unsigned long olddevs, newdevs;
  953. int ret, slot;
  954. u8 addrs[MAX_DEVS] = { };
  955. u8 last_addr = 0;
  956. olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
  957. olddevs |= BIT(0);
  958. /* Prepare RR slots before launching DAA. */
  959. for_each_clear_bit(slot, &olddevs, master->maxdevs + 1) {
  960. ret = i3c_master_get_free_addr(m, last_addr + 1);
  961. if (ret < 0)
  962. return -ENOSPC;
  963. last_addr = ret;
  964. addrs[slot] = last_addr;
  965. writel(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C,
  966. master->regs + DEV_ID_RR0(slot));
  967. writel(0, master->regs + DEV_ID_RR1(slot));
  968. writel(0, master->regs + DEV_ID_RR2(slot));
  969. }
  970. ret = i3c_master_entdaa_locked(&master->base);
  971. if (ret && ret != I3C_ERROR_M2)
  972. return ret;
  973. newdevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
  974. newdevs &= ~olddevs;
  975. /*
  976. * Clear all retaining registers filled during DAA. We already
  977. * have the addressed assigned to them in the addrs array.
  978. */
  979. for_each_set_bit(slot, &newdevs, master->maxdevs + 1)
  980. i3c_master_add_i3c_dev_locked(m, addrs[slot]);
  981. /*
  982. * Clear slots that ended up not being used. Can be caused by I3C
  983. * device creation failure or when the I3C device was already known
  984. * by the system but with a different address (in this case the device
  985. * already has a slot and does not need a new one).
  986. */
  987. writel(readl(master->regs + DEVS_CTRL) |
  988. master->free_rr_slots << DEVS_CTRL_DEV_CLR_SHIFT,
  989. master->regs + DEVS_CTRL);
  990. i3c_master_defslvs_locked(&master->base);
  991. cdns_i3c_master_upd_i3c_scl_lim(master);
  992. /* Unmask Hot-Join and Mastership request interrupts. */
  993. i3c_master_enec_locked(m, I3C_BROADCAST_ADDR,
  994. I3C_CCC_EVENT_HJ | I3C_CCC_EVENT_MR);
  995. return 0;
  996. }
  997. static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master)
  998. {
  999. unsigned long sysclk_rate = clk_get_rate(master->sysclk);
  1000. u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns,
  1001. (NSEC_PER_SEC / sysclk_rate));
  1002. /* Every value greater than 3 is not valid. */
  1003. if (thd_delay > THD_DELAY_MAX)
  1004. thd_delay = THD_DELAY_MAX;
  1005. /* CTLR_THD_DEL value is encoded. */
  1006. return (THD_DELAY_MAX - thd_delay);
  1007. }
  1008. static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
  1009. {
  1010. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  1011. unsigned long pres_step, sysclk_rate, max_i2cfreq;
  1012. struct i3c_bus *bus = i3c_master_get_bus(m);
  1013. u32 ctrl, prescl0, prescl1, pres, low;
  1014. struct i3c_device_info info = { };
  1015. int ret, ncycles;
  1016. switch (bus->mode) {
  1017. case I3C_BUS_MODE_PURE:
  1018. ctrl = CTRL_PURE_BUS_MODE;
  1019. break;
  1020. case I3C_BUS_MODE_MIXED_FAST:
  1021. ctrl = CTRL_MIXED_FAST_BUS_MODE;
  1022. break;
  1023. case I3C_BUS_MODE_MIXED_SLOW:
  1024. ctrl = CTRL_MIXED_SLOW_BUS_MODE;
  1025. break;
  1026. default:
  1027. return -EINVAL;
  1028. }
  1029. sysclk_rate = clk_get_rate(master->sysclk);
  1030. if (!sysclk_rate)
  1031. return -EINVAL;
  1032. pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
  1033. if (pres > PRESCL_CTRL0_MAX)
  1034. return -ERANGE;
  1035. bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
  1036. prescl0 = PRESCL_CTRL0_I3C(pres);
  1037. low = ((I3C_BUS_TLOW_OD_MIN_NS * sysclk_rate) / (pres + 1)) - 2;
  1038. prescl1 = PRESCL_CTRL1_OD_LOW(low);
  1039. max_i2cfreq = bus->scl_rate.i2c;
  1040. pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
  1041. if (pres > PRESCL_CTRL0_MAX)
  1042. return -ERANGE;
  1043. bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
  1044. prescl0 |= PRESCL_CTRL0_I2C(pres);
  1045. writel(prescl0, master->regs + PRESCL_CTRL0);
  1046. /* Calculate OD and PP low. */
  1047. pres_step = 1000000000 / (bus->scl_rate.i3c * 4);
  1048. ncycles = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2;
  1049. if (ncycles < 0)
  1050. ncycles = 0;
  1051. prescl1 = PRESCL_CTRL1_OD_LOW(ncycles);
  1052. writel(prescl1, master->regs + PRESCL_CTRL1);
  1053. /* Get an address for the master. */
  1054. ret = i3c_master_get_free_addr(m, 0);
  1055. if (ret < 0)
  1056. return ret;
  1057. writel(prepare_rr0_dev_address(ret) | DEV_ID_RR0_IS_I3C,
  1058. master->regs + DEV_ID_RR0(0));
  1059. cdns_i3c_master_dev_rr_to_info(master, 0, &info);
  1060. if (info.bcr & I3C_BCR_HDR_CAP)
  1061. info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR);
  1062. ret = i3c_master_set_info(&master->base, &info);
  1063. if (ret)
  1064. return ret;
  1065. /*
  1066. * Enable Hot-Join, and, when a Hot-Join request happens, disable all
  1067. * events coming from this device.
  1068. *
  1069. * We will issue ENTDAA afterwards from the threaded IRQ handler.
  1070. */
  1071. ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
  1072. /*
  1073. * Configure data hold delay based on device-specific data.
  1074. *
  1075. * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on
  1076. * master output. This setting allows to meet this timing on master's
  1077. * SoC outputs, regardless of PCB balancing.
  1078. */
  1079. ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master));
  1080. writel(ctrl, master->regs + CTRL);
  1081. cdns_i3c_master_enable(master);
  1082. return 0;
  1083. }
  1084. static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master,
  1085. u32 ibir)
  1086. {
  1087. struct cdns_i3c_i2c_dev_data *data;
  1088. bool data_consumed = false;
  1089. struct i3c_ibi_slot *slot;
  1090. u32 id = IBIR_SLVID(ibir);
  1091. struct i3c_dev_desc *dev;
  1092. size_t nbytes;
  1093. u8 *buf;
  1094. /*
  1095. * FIXME: maybe we should report the FIFO OVF errors to the upper
  1096. * layer.
  1097. */
  1098. if (id >= master->ibi.num_slots || (ibir & IBIR_ERROR))
  1099. goto out;
  1100. dev = master->ibi.slots[id];
  1101. spin_lock(&master->ibi.lock);
  1102. data = i3c_dev_get_master_data(dev);
  1103. slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
  1104. if (!slot)
  1105. goto out_unlock;
  1106. buf = slot->data;
  1107. nbytes = IBIR_XFER_BYTES(ibir);
  1108. readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4);
  1109. if (nbytes % 3) {
  1110. u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO);
  1111. memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
  1112. }
  1113. slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir),
  1114. dev->ibi->max_payload_len);
  1115. i3c_master_queue_ibi(dev, slot);
  1116. data_consumed = true;
  1117. out_unlock:
  1118. spin_unlock(&master->ibi.lock);
  1119. out:
  1120. /* Consume data from the FIFO if it's not been done already. */
  1121. if (!data_consumed) {
  1122. int i;
  1123. for (i = 0; i < IBIR_XFER_BYTES(ibir); i += 4)
  1124. readl(master->regs + IBI_DATA_FIFO);
  1125. }
  1126. }
  1127. static void cnds_i3c_master_demux_ibis(struct cdns_i3c_master *master)
  1128. {
  1129. u32 status0;
  1130. writel(MST_INT_IBIR_THR, master->regs + MST_ICR);
  1131. for (status0 = readl(master->regs + MST_STATUS0);
  1132. !(status0 & MST_STATUS0_IBIR_EMP);
  1133. status0 = readl(master->regs + MST_STATUS0)) {
  1134. u32 ibir = readl(master->regs + IBIR);
  1135. switch (IBIR_TYPE(ibir)) {
  1136. case IBIR_TYPE_IBI:
  1137. cdns_i3c_master_handle_ibi(master, ibir);
  1138. break;
  1139. case IBIR_TYPE_HJ:
  1140. WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
  1141. queue_work(master->base.wq, &master->hj_work);
  1142. break;
  1143. case IBIR_TYPE_MR:
  1144. WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
  1145. break;
  1146. default:
  1147. break;
  1148. }
  1149. }
  1150. }
  1151. static irqreturn_t cdns_i3c_master_interrupt(int irq, void *data)
  1152. {
  1153. struct cdns_i3c_master *master = data;
  1154. u32 status;
  1155. status = readl(master->regs + MST_ISR);
  1156. if (!(status & readl(master->regs + MST_IMR)))
  1157. return IRQ_NONE;
  1158. spin_lock(&master->xferqueue.lock);
  1159. cdns_i3c_master_end_xfer_locked(master, status);
  1160. spin_unlock(&master->xferqueue.lock);
  1161. if (status & MST_INT_IBIR_THR)
  1162. cnds_i3c_master_demux_ibis(master);
  1163. return IRQ_HANDLED;
  1164. }
  1165. static int cdns_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
  1166. {
  1167. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  1168. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  1169. struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1170. unsigned long flags;
  1171. u32 sirmap;
  1172. int ret;
  1173. ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
  1174. I3C_CCC_EVENT_SIR);
  1175. if (ret)
  1176. return ret;
  1177. spin_lock_irqsave(&master->ibi.lock, flags);
  1178. sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
  1179. sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
  1180. sirmap |= SIR_MAP_DEV_CONF(data->ibi,
  1181. SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
  1182. writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
  1183. spin_unlock_irqrestore(&master->ibi.lock, flags);
  1184. return ret;
  1185. }
  1186. static int cdns_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
  1187. {
  1188. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  1189. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  1190. struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1191. unsigned long flags;
  1192. u32 sircfg, sirmap;
  1193. int ret;
  1194. spin_lock_irqsave(&master->ibi.lock, flags);
  1195. sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
  1196. sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
  1197. sircfg = SIR_MAP_DEV_ROLE(dev->info.bcr >> 6) |
  1198. SIR_MAP_DEV_DA(dev->info.dyn_addr) |
  1199. SIR_MAP_DEV_PL(dev->info.max_ibi_len) |
  1200. SIR_MAP_DEV_ACK;
  1201. if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM)
  1202. sircfg |= SIR_MAP_DEV_SLOW;
  1203. sirmap |= SIR_MAP_DEV_CONF(data->ibi, sircfg);
  1204. writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
  1205. spin_unlock_irqrestore(&master->ibi.lock, flags);
  1206. ret = i3c_master_enec_locked(m, dev->info.dyn_addr,
  1207. I3C_CCC_EVENT_SIR);
  1208. if (ret) {
  1209. spin_lock_irqsave(&master->ibi.lock, flags);
  1210. sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
  1211. sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
  1212. sirmap |= SIR_MAP_DEV_CONF(data->ibi,
  1213. SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
  1214. writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
  1215. spin_unlock_irqrestore(&master->ibi.lock, flags);
  1216. }
  1217. return ret;
  1218. }
  1219. static int cdns_i3c_master_request_ibi(struct i3c_dev_desc *dev,
  1220. const struct i3c_ibi_setup *req)
  1221. {
  1222. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  1223. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  1224. struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1225. unsigned long flags;
  1226. unsigned int i;
  1227. data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
  1228. if (IS_ERR(data->ibi_pool))
  1229. return PTR_ERR(data->ibi_pool);
  1230. spin_lock_irqsave(&master->ibi.lock, flags);
  1231. for (i = 0; i < master->ibi.num_slots; i++) {
  1232. if (!master->ibi.slots[i]) {
  1233. data->ibi = i;
  1234. master->ibi.slots[i] = dev;
  1235. break;
  1236. }
  1237. }
  1238. spin_unlock_irqrestore(&master->ibi.lock, flags);
  1239. if (i < master->ibi.num_slots)
  1240. return 0;
  1241. i3c_generic_ibi_free_pool(data->ibi_pool);
  1242. data->ibi_pool = NULL;
  1243. return -ENOSPC;
  1244. }
  1245. static void cdns_i3c_master_free_ibi(struct i3c_dev_desc *dev)
  1246. {
  1247. struct i3c_master_controller *m = i3c_dev_get_master(dev);
  1248. struct cdns_i3c_master *master = to_cdns_i3c_master(m);
  1249. struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1250. unsigned long flags;
  1251. spin_lock_irqsave(&master->ibi.lock, flags);
  1252. master->ibi.slots[data->ibi] = NULL;
  1253. data->ibi = -1;
  1254. spin_unlock_irqrestore(&master->ibi.lock, flags);
  1255. i3c_generic_ibi_free_pool(data->ibi_pool);
  1256. }
  1257. static void cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
  1258. struct i3c_ibi_slot *slot)
  1259. {
  1260. struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
  1261. i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
  1262. }
  1263. static const struct i3c_master_controller_ops cdns_i3c_master_ops = {
  1264. .bus_init = cdns_i3c_master_bus_init,
  1265. .bus_cleanup = cdns_i3c_master_bus_cleanup,
  1266. .do_daa = cdns_i3c_master_do_daa,
  1267. .attach_i3c_dev = cdns_i3c_master_attach_i3c_dev,
  1268. .reattach_i3c_dev = cdns_i3c_master_reattach_i3c_dev,
  1269. .detach_i3c_dev = cdns_i3c_master_detach_i3c_dev,
  1270. .attach_i2c_dev = cdns_i3c_master_attach_i2c_dev,
  1271. .detach_i2c_dev = cdns_i3c_master_detach_i2c_dev,
  1272. .supports_ccc_cmd = cdns_i3c_master_supports_ccc_cmd,
  1273. .send_ccc_cmd = cdns_i3c_master_send_ccc_cmd,
  1274. .priv_xfers = cdns_i3c_master_priv_xfers,
  1275. .i2c_xfers = cdns_i3c_master_i2c_xfers,
  1276. .enable_ibi = cdns_i3c_master_enable_ibi,
  1277. .disable_ibi = cdns_i3c_master_disable_ibi,
  1278. .request_ibi = cdns_i3c_master_request_ibi,
  1279. .free_ibi = cdns_i3c_master_free_ibi,
  1280. .recycle_ibi_slot = cdns_i3c_master_recycle_ibi_slot,
  1281. };
  1282. static void cdns_i3c_master_hj(struct work_struct *work)
  1283. {
  1284. struct cdns_i3c_master *master = container_of(work,
  1285. struct cdns_i3c_master,
  1286. hj_work);
  1287. i3c_master_do_daa(&master->base);
  1288. }
  1289. static struct cdns_i3c_data cdns_i3c_devdata = {
  1290. .thd_delay_ns = 10,
  1291. };
  1292. static const struct of_device_id cdns_i3c_master_of_ids[] = {
  1293. { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata },
  1294. { /* sentinel */ },
  1295. };
  1296. static int cdns_i3c_master_probe(struct platform_device *pdev)
  1297. {
  1298. struct cdns_i3c_master *master;
  1299. int ret, irq;
  1300. u32 val;
  1301. master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
  1302. if (!master)
  1303. return -ENOMEM;
  1304. master->devdata = of_device_get_match_data(&pdev->dev);
  1305. if (!master->devdata)
  1306. return -EINVAL;
  1307. master->regs = devm_platform_ioremap_resource(pdev, 0);
  1308. if (IS_ERR(master->regs))
  1309. return PTR_ERR(master->regs);
  1310. master->pclk = devm_clk_get(&pdev->dev, "pclk");
  1311. if (IS_ERR(master->pclk))
  1312. return PTR_ERR(master->pclk);
  1313. master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
  1314. if (IS_ERR(master->sysclk))
  1315. return PTR_ERR(master->sysclk);
  1316. irq = platform_get_irq(pdev, 0);
  1317. if (irq < 0)
  1318. return irq;
  1319. ret = clk_prepare_enable(master->pclk);
  1320. if (ret)
  1321. return ret;
  1322. ret = clk_prepare_enable(master->sysclk);
  1323. if (ret)
  1324. goto err_disable_pclk;
  1325. if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) {
  1326. ret = -EINVAL;
  1327. goto err_disable_sysclk;
  1328. }
  1329. spin_lock_init(&master->xferqueue.lock);
  1330. INIT_LIST_HEAD(&master->xferqueue.list);
  1331. INIT_WORK(&master->hj_work, cdns_i3c_master_hj);
  1332. writel(0xffffffff, master->regs + MST_IDR);
  1333. writel(0xffffffff, master->regs + SLV_IDR);
  1334. ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0,
  1335. dev_name(&pdev->dev), master);
  1336. if (ret)
  1337. goto err_disable_sysclk;
  1338. platform_set_drvdata(pdev, master);
  1339. val = readl(master->regs + CONF_STATUS0);
  1340. /* Device ID0 is reserved to describe this master. */
  1341. master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
  1342. master->free_rr_slots = GENMASK(master->maxdevs, 1);
  1343. master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
  1344. master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
  1345. val = readl(master->regs + CONF_STATUS1);
  1346. master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
  1347. master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
  1348. master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
  1349. spin_lock_init(&master->ibi.lock);
  1350. master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
  1351. master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
  1352. sizeof(*master->ibi.slots),
  1353. GFP_KERNEL);
  1354. if (!master->ibi.slots) {
  1355. ret = -ENOMEM;
  1356. goto err_disable_sysclk;
  1357. }
  1358. writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
  1359. writel(MST_INT_IBIR_THR, master->regs + MST_IER);
  1360. writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL);
  1361. ret = i3c_master_register(&master->base, &pdev->dev,
  1362. &cdns_i3c_master_ops, false);
  1363. if (ret)
  1364. goto err_disable_sysclk;
  1365. return 0;
  1366. err_disable_sysclk:
  1367. clk_disable_unprepare(master->sysclk);
  1368. err_disable_pclk:
  1369. clk_disable_unprepare(master->pclk);
  1370. return ret;
  1371. }
  1372. static int cdns_i3c_master_remove(struct platform_device *pdev)
  1373. {
  1374. struct cdns_i3c_master *master = platform_get_drvdata(pdev);
  1375. int ret;
  1376. ret = i3c_master_unregister(&master->base);
  1377. if (ret)
  1378. return ret;
  1379. clk_disable_unprepare(master->sysclk);
  1380. clk_disable_unprepare(master->pclk);
  1381. return 0;
  1382. }
  1383. static struct platform_driver cdns_i3c_master = {
  1384. .probe = cdns_i3c_master_probe,
  1385. .remove = cdns_i3c_master_remove,
  1386. .driver = {
  1387. .name = "cdns-i3c-master",
  1388. .of_match_table = cdns_i3c_master_of_ids,
  1389. },
  1390. };
  1391. module_platform_driver(cdns_i3c_master);
  1392. MODULE_AUTHOR("Boris Brezillon <[email protected]>");
  1393. MODULE_DESCRIPTION("Cadence I3C master driver");
  1394. MODULE_LICENSE("GPL v2");
  1395. MODULE_ALIAS("platform:cdns-i3c-master");