imx-mailbox.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2018 Pengutronix, Oleksij Rempel <[email protected]>
  4. * Copyright 2022 NXP, Peng Fan <[email protected]>
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/firmware/imx/ipc.h>
  8. #include <linux/firmware/imx/s4.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/io.h>
  11. #include <linux/iopoll.h>
  12. #include <linux/jiffies.h>
  13. #include <linux/kernel.h>
  14. #include <linux/mailbox_controller.h>
  15. #include <linux/module.h>
  16. #include <linux/of_device.h>
  17. #include <linux/pm_runtime.h>
  18. #include <linux/suspend.h>
  19. #include <linux/slab.h>
  20. #define IMX_MU_CHANS 17
  21. /* TX0/RX0/RXDB[0-3] */
  22. #define IMX_MU_SCU_CHANS 6
  23. /* TX0/RX0 */
  24. #define IMX_MU_S4_CHANS 2
  25. #define IMX_MU_CHAN_NAME_SIZE 20
  26. #define IMX_MU_NUM_RR 4
  27. #define IMX_MU_SECO_TX_TOUT (msecs_to_jiffies(3000))
  28. #define IMX_MU_SECO_RX_TOUT (msecs_to_jiffies(3000))
  29. /* Please not change TX & RX */
  30. enum imx_mu_chan_type {
  31. IMX_MU_TYPE_TX = 0, /* Tx */
  32. IMX_MU_TYPE_RX = 1, /* Rx */
  33. IMX_MU_TYPE_TXDB = 2, /* Tx doorbell */
  34. IMX_MU_TYPE_RXDB = 3, /* Rx doorbell */
  35. IMX_MU_TYPE_RST = 4, /* Reset */
  36. };
  37. enum imx_mu_xcr {
  38. IMX_MU_CR,
  39. IMX_MU_GIER,
  40. IMX_MU_GCR,
  41. IMX_MU_TCR,
  42. IMX_MU_RCR,
  43. IMX_MU_xCR_MAX,
  44. };
  45. enum imx_mu_xsr {
  46. IMX_MU_SR,
  47. IMX_MU_GSR,
  48. IMX_MU_TSR,
  49. IMX_MU_RSR,
  50. IMX_MU_xSR_MAX,
  51. };
  52. struct imx_sc_rpc_msg_max {
  53. struct imx_sc_rpc_msg hdr;
  54. u32 data[30];
  55. };
  56. struct imx_s4_rpc_msg_max {
  57. struct imx_s4_rpc_msg hdr;
  58. u32 data[254];
  59. };
  60. struct imx_mu_con_priv {
  61. unsigned int idx;
  62. char irq_desc[IMX_MU_CHAN_NAME_SIZE];
  63. enum imx_mu_chan_type type;
  64. struct mbox_chan *chan;
  65. struct tasklet_struct txdb_tasklet;
  66. };
  67. struct imx_mu_priv {
  68. struct device *dev;
  69. void __iomem *base;
  70. void *msg;
  71. spinlock_t xcr_lock; /* control register lock */
  72. struct mbox_controller mbox;
  73. struct mbox_chan mbox_chans[IMX_MU_CHANS];
  74. struct imx_mu_con_priv con_priv[IMX_MU_CHANS];
  75. const struct imx_mu_dcfg *dcfg;
  76. struct clk *clk;
  77. int irq[IMX_MU_CHANS];
  78. bool suspend;
  79. u32 xcr[IMX_MU_xCR_MAX];
  80. bool side_b;
  81. };
  82. enum imx_mu_type {
  83. IMX_MU_V1,
  84. IMX_MU_V2 = BIT(1),
  85. IMX_MU_V2_S4 = BIT(15),
  86. IMX_MU_V2_IRQ = BIT(16),
  87. };
  88. struct imx_mu_dcfg {
  89. int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
  90. int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
  91. int (*rxdb)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
  92. void (*init)(struct imx_mu_priv *priv);
  93. enum imx_mu_type type;
  94. u32 xTR; /* Transmit Register0 */
  95. u32 xRR; /* Receive Register0 */
  96. u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */
  97. u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */
  98. };
  99. #define IMX_MU_xSR_GIPn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
  100. #define IMX_MU_xSR_RFn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
  101. #define IMX_MU_xSR_TEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
  102. /* General Purpose Interrupt Enable */
  103. #define IMX_MU_xCR_GIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
  104. /* Receive Interrupt Enable */
  105. #define IMX_MU_xCR_RIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
  106. /* Transmit Interrupt Enable */
  107. #define IMX_MU_xCR_TIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
  108. /* General Purpose Interrupt Request */
  109. #define IMX_MU_xCR_GIRn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
  110. /* MU reset */
  111. #define IMX_MU_xCR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(5))
  112. #define IMX_MU_xSR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(7))
  113. static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
  114. {
  115. return container_of(mbox, struct imx_mu_priv, mbox);
  116. }
  117. static void imx_mu_write(struct imx_mu_priv *priv, u32 val, u32 offs)
  118. {
  119. iowrite32(val, priv->base + offs);
  120. }
  121. static u32 imx_mu_read(struct imx_mu_priv *priv, u32 offs)
  122. {
  123. return ioread32(priv->base + offs);
  124. }
  125. static int imx_mu_tx_waiting_write(struct imx_mu_priv *priv, u32 val, u32 idx)
  126. {
  127. u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_TX_TOUT;
  128. u32 status;
  129. u32 can_write;
  130. dev_dbg(priv->dev, "Trying to write %.8x to idx %d\n", val, idx);
  131. do {
  132. status = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_TSR]);
  133. can_write = status & IMX_MU_xSR_TEn(priv->dcfg->type, idx % 4);
  134. } while (!can_write && time_is_after_jiffies64(timeout_time));
  135. if (!can_write) {
  136. dev_err(priv->dev, "timeout trying to write %.8x at %d(%.8x)\n",
  137. val, idx, status);
  138. return -ETIME;
  139. }
  140. imx_mu_write(priv, val, priv->dcfg->xTR + (idx % 4) * 4);
  141. return 0;
  142. }
  143. static int imx_mu_rx_waiting_read(struct imx_mu_priv *priv, u32 *val, u32 idx)
  144. {
  145. u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_RX_TOUT;
  146. u32 status;
  147. u32 can_read;
  148. dev_dbg(priv->dev, "Trying to read from idx %d\n", idx);
  149. do {
  150. status = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_RSR]);
  151. can_read = status & IMX_MU_xSR_RFn(priv->dcfg->type, idx % 4);
  152. } while (!can_read && time_is_after_jiffies64(timeout_time));
  153. if (!can_read) {
  154. dev_err(priv->dev, "timeout trying to read idx %d (%.8x)\n",
  155. idx, status);
  156. return -ETIME;
  157. }
  158. *val = imx_mu_read(priv, priv->dcfg->xRR + (idx % 4) * 4);
  159. dev_dbg(priv->dev, "Read %.8x\n", *val);
  160. return 0;
  161. }
  162. static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, enum imx_mu_xcr type, u32 set, u32 clr)
  163. {
  164. unsigned long flags;
  165. u32 val;
  166. spin_lock_irqsave(&priv->xcr_lock, flags);
  167. val = imx_mu_read(priv, priv->dcfg->xCR[type]);
  168. val &= ~clr;
  169. val |= set;
  170. imx_mu_write(priv, val, priv->dcfg->xCR[type]);
  171. spin_unlock_irqrestore(&priv->xcr_lock, flags);
  172. return val;
  173. }
  174. static int imx_mu_generic_tx(struct imx_mu_priv *priv,
  175. struct imx_mu_con_priv *cp,
  176. void *data)
  177. {
  178. u32 *arg = data;
  179. switch (cp->type) {
  180. case IMX_MU_TYPE_TX:
  181. imx_mu_write(priv, *arg, priv->dcfg->xTR + cp->idx * 4);
  182. imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
  183. break;
  184. case IMX_MU_TYPE_TXDB:
  185. imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
  186. tasklet_schedule(&cp->txdb_tasklet);
  187. break;
  188. default:
  189. dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
  190. return -EINVAL;
  191. }
  192. return 0;
  193. }
  194. static int imx_mu_generic_rx(struct imx_mu_priv *priv,
  195. struct imx_mu_con_priv *cp)
  196. {
  197. u32 dat;
  198. dat = imx_mu_read(priv, priv->dcfg->xRR + (cp->idx) * 4);
  199. mbox_chan_received_data(cp->chan, (void *)&dat);
  200. return 0;
  201. }
  202. static int imx_mu_generic_rxdb(struct imx_mu_priv *priv,
  203. struct imx_mu_con_priv *cp)
  204. {
  205. imx_mu_write(priv, IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx),
  206. priv->dcfg->xSR[IMX_MU_GSR]);
  207. mbox_chan_received_data(cp->chan, NULL);
  208. return 0;
  209. }
  210. static int imx_mu_specific_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data)
  211. {
  212. u32 *arg = data;
  213. int i, ret;
  214. u32 xsr;
  215. u32 size, max_size, num_tr;
  216. if (priv->dcfg->type & IMX_MU_V2_S4) {
  217. size = ((struct imx_s4_rpc_msg_max *)data)->hdr.size;
  218. max_size = sizeof(struct imx_s4_rpc_msg_max);
  219. num_tr = 8;
  220. } else {
  221. size = ((struct imx_sc_rpc_msg_max *)data)->hdr.size;
  222. max_size = sizeof(struct imx_sc_rpc_msg_max);
  223. num_tr = 4;
  224. }
  225. switch (cp->type) {
  226. case IMX_MU_TYPE_TX:
  227. /*
  228. * msg->hdr.size specifies the number of u32 words while
  229. * sizeof yields bytes.
  230. */
  231. if (size > max_size / 4) {
  232. /*
  233. * The real message size can be different to
  234. * struct imx_sc_rpc_msg_max/imx_s4_rpc_msg_max size
  235. */
  236. dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on TX; got: %i bytes\n", max_size, size << 2);
  237. return -EINVAL;
  238. }
  239. for (i = 0; i < num_tr && i < size; i++)
  240. imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
  241. for (; i < size; i++) {
  242. ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_TSR],
  243. xsr,
  244. xsr & IMX_MU_xSR_TEn(priv->dcfg->type, i % num_tr),
  245. 0, 5 * USEC_PER_SEC);
  246. if (ret) {
  247. dev_err(priv->dev, "Send data index: %d timeout\n", i);
  248. return ret;
  249. }
  250. imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
  251. }
  252. imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
  253. break;
  254. default:
  255. dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
  256. return -EINVAL;
  257. }
  258. return 0;
  259. }
  260. static int imx_mu_specific_rx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
  261. {
  262. u32 *data;
  263. int i, ret;
  264. u32 xsr;
  265. u32 size, max_size;
  266. data = (u32 *)priv->msg;
  267. imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, 0));
  268. *data++ = imx_mu_read(priv, priv->dcfg->xRR);
  269. if (priv->dcfg->type & IMX_MU_V2_S4) {
  270. size = ((struct imx_s4_rpc_msg_max *)priv->msg)->hdr.size;
  271. max_size = sizeof(struct imx_s4_rpc_msg_max);
  272. } else {
  273. size = ((struct imx_sc_rpc_msg_max *)priv->msg)->hdr.size;
  274. max_size = sizeof(struct imx_sc_rpc_msg_max);
  275. }
  276. if (size > max_size / 4) {
  277. dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on RX; got: %i bytes\n", max_size, size << 2);
  278. return -EINVAL;
  279. }
  280. for (i = 1; i < size; i++) {
  281. ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_RSR], xsr,
  282. xsr & IMX_MU_xSR_RFn(priv->dcfg->type, i % 4), 0,
  283. 5 * USEC_PER_SEC);
  284. if (ret) {
  285. dev_err(priv->dev, "timeout read idx %d\n", i);
  286. return ret;
  287. }
  288. *data++ = imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
  289. }
  290. imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, 0), 0);
  291. mbox_chan_received_data(cp->chan, (void *)priv->msg);
  292. return 0;
  293. }
  294. static int imx_mu_seco_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp,
  295. void *data)
  296. {
  297. struct imx_sc_rpc_msg_max *msg = data;
  298. u32 *arg = data;
  299. u32 byte_size;
  300. int err;
  301. int i;
  302. dev_dbg(priv->dev, "Sending message\n");
  303. switch (cp->type) {
  304. case IMX_MU_TYPE_TXDB:
  305. byte_size = msg->hdr.size * sizeof(u32);
  306. if (byte_size > sizeof(*msg)) {
  307. /*
  308. * The real message size can be different to
  309. * struct imx_sc_rpc_msg_max size
  310. */
  311. dev_err(priv->dev,
  312. "Exceed max msg size (%zu) on TX, got: %i\n",
  313. sizeof(*msg), byte_size);
  314. return -EINVAL;
  315. }
  316. print_hex_dump_debug("from client ", DUMP_PREFIX_OFFSET, 4, 4,
  317. data, byte_size, false);
  318. /* Send first word */
  319. dev_dbg(priv->dev, "Sending header\n");
  320. imx_mu_write(priv, *arg++, priv->dcfg->xTR);
  321. /* Send signaling */
  322. dev_dbg(priv->dev, "Sending signaling\n");
  323. imx_mu_xcr_rmw(priv, IMX_MU_GCR,
  324. IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
  325. /* Send words to fill the mailbox */
  326. for (i = 1; i < 4 && i < msg->hdr.size; i++) {
  327. dev_dbg(priv->dev, "Sending word %d\n", i);
  328. imx_mu_write(priv, *arg++,
  329. priv->dcfg->xTR + (i % 4) * 4);
  330. }
  331. /* Send rest of message waiting for remote read */
  332. for (; i < msg->hdr.size; i++) {
  333. dev_dbg(priv->dev, "Sending word %d\n", i);
  334. err = imx_mu_tx_waiting_write(priv, *arg++, i);
  335. if (err) {
  336. dev_err(priv->dev, "Timeout tx %d\n", i);
  337. return err;
  338. }
  339. }
  340. /* Simulate hack for mbox framework */
  341. tasklet_schedule(&cp->txdb_tasklet);
  342. break;
  343. default:
  344. dev_warn_ratelimited(priv->dev,
  345. "Send data on wrong channel type: %d\n",
  346. cp->type);
  347. return -EINVAL;
  348. }
  349. return 0;
  350. }
  351. static int imx_mu_seco_rxdb(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
  352. {
  353. struct imx_sc_rpc_msg_max msg;
  354. u32 *data = (u32 *)&msg;
  355. u32 byte_size;
  356. int err = 0;
  357. int i;
  358. dev_dbg(priv->dev, "Receiving message\n");
  359. /* Read header */
  360. dev_dbg(priv->dev, "Receiving header\n");
  361. *data++ = imx_mu_read(priv, priv->dcfg->xRR);
  362. byte_size = msg.hdr.size * sizeof(u32);
  363. if (byte_size > sizeof(msg)) {
  364. dev_err(priv->dev, "Exceed max msg size (%zu) on RX, got: %i\n",
  365. sizeof(msg), byte_size);
  366. err = -EINVAL;
  367. goto error;
  368. }
  369. /* Read message waiting they are written */
  370. for (i = 1; i < msg.hdr.size; i++) {
  371. dev_dbg(priv->dev, "Receiving word %d\n", i);
  372. err = imx_mu_rx_waiting_read(priv, data++, i);
  373. if (err) {
  374. dev_err(priv->dev, "Timeout rx %d\n", i);
  375. goto error;
  376. }
  377. }
  378. /* Clear GIP */
  379. imx_mu_write(priv, IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx),
  380. priv->dcfg->xSR[IMX_MU_GSR]);
  381. print_hex_dump_debug("to client ", DUMP_PREFIX_OFFSET, 4, 4,
  382. &msg, byte_size, false);
  383. /* send data to client */
  384. dev_dbg(priv->dev, "Sending message to client\n");
  385. mbox_chan_received_data(cp->chan, (void *)&msg);
  386. goto exit;
  387. error:
  388. mbox_chan_received_data(cp->chan, ERR_PTR(err));
  389. exit:
  390. return err;
  391. }
  392. static void imx_mu_txdb_tasklet(unsigned long data)
  393. {
  394. struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
  395. mbox_chan_txdone(cp->chan, 0);
  396. }
  397. static irqreturn_t imx_mu_isr(int irq, void *p)
  398. {
  399. struct mbox_chan *chan = p;
  400. struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
  401. struct imx_mu_con_priv *cp = chan->con_priv;
  402. u32 val, ctrl;
  403. switch (cp->type) {
  404. case IMX_MU_TYPE_TX:
  405. ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_TCR]);
  406. val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_TSR]);
  407. val &= IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx) &
  408. (ctrl & IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
  409. break;
  410. case IMX_MU_TYPE_RX:
  411. ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_RCR]);
  412. val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_RSR]);
  413. val &= IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx) &
  414. (ctrl & IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx));
  415. break;
  416. case IMX_MU_TYPE_RXDB:
  417. ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_GIER]);
  418. val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
  419. val &= IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx) &
  420. (ctrl & IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
  421. break;
  422. case IMX_MU_TYPE_RST:
  423. return IRQ_NONE;
  424. default:
  425. dev_warn_ratelimited(priv->dev, "Unhandled channel type %d\n",
  426. cp->type);
  427. return IRQ_NONE;
  428. }
  429. if (!val)
  430. return IRQ_NONE;
  431. if ((val == IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx)) &&
  432. (cp->type == IMX_MU_TYPE_TX)) {
  433. imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
  434. mbox_chan_txdone(chan, 0);
  435. } else if ((val == IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx)) &&
  436. (cp->type == IMX_MU_TYPE_RX)) {
  437. priv->dcfg->rx(priv, cp);
  438. } else if ((val == IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx)) &&
  439. (cp->type == IMX_MU_TYPE_RXDB)) {
  440. priv->dcfg->rxdb(priv, cp);
  441. } else {
  442. dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
  443. return IRQ_NONE;
  444. }
  445. if (priv->suspend)
  446. pm_system_wakeup();
  447. return IRQ_HANDLED;
  448. }
  449. static int imx_mu_send_data(struct mbox_chan *chan, void *data)
  450. {
  451. struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
  452. struct imx_mu_con_priv *cp = chan->con_priv;
  453. return priv->dcfg->tx(priv, cp, data);
  454. }
  455. static int imx_mu_startup(struct mbox_chan *chan)
  456. {
  457. struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
  458. struct imx_mu_con_priv *cp = chan->con_priv;
  459. unsigned long irq_flag = 0;
  460. int ret;
  461. pm_runtime_get_sync(priv->dev);
  462. if (cp->type == IMX_MU_TYPE_TXDB) {
  463. /* Tx doorbell don't have ACK support */
  464. tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
  465. (unsigned long)cp);
  466. return 0;
  467. }
  468. /* IPC MU should be with IRQF_NO_SUSPEND set */
  469. if (!priv->dev->pm_domain)
  470. irq_flag |= IRQF_NO_SUSPEND;
  471. if (!(priv->dcfg->type & IMX_MU_V2_IRQ))
  472. irq_flag |= IRQF_SHARED;
  473. ret = request_irq(priv->irq[cp->type], imx_mu_isr, irq_flag, cp->irq_desc, chan);
  474. if (ret) {
  475. dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq[cp->type]);
  476. return ret;
  477. }
  478. switch (cp->type) {
  479. case IMX_MU_TYPE_RX:
  480. imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx), 0);
  481. break;
  482. case IMX_MU_TYPE_RXDB:
  483. imx_mu_xcr_rmw(priv, IMX_MU_GIER, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx), 0);
  484. break;
  485. default:
  486. break;
  487. }
  488. return 0;
  489. }
  490. static void imx_mu_shutdown(struct mbox_chan *chan)
  491. {
  492. struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
  493. struct imx_mu_con_priv *cp = chan->con_priv;
  494. int ret;
  495. u32 sr;
  496. if (cp->type == IMX_MU_TYPE_TXDB) {
  497. tasklet_kill(&cp->txdb_tasklet);
  498. pm_runtime_put_sync(priv->dev);
  499. return;
  500. }
  501. switch (cp->type) {
  502. case IMX_MU_TYPE_TX:
  503. imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
  504. break;
  505. case IMX_MU_TYPE_RX:
  506. imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx));
  507. break;
  508. case IMX_MU_TYPE_RXDB:
  509. imx_mu_xcr_rmw(priv, IMX_MU_GIER, 0, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
  510. break;
  511. case IMX_MU_TYPE_RST:
  512. imx_mu_xcr_rmw(priv, IMX_MU_CR, IMX_MU_xCR_RST(priv->dcfg->type), 0);
  513. ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_SR], sr,
  514. !(sr & IMX_MU_xSR_RST(priv->dcfg->type)), 1, 5);
  515. if (ret)
  516. dev_warn(priv->dev, "RST channel timeout\n");
  517. break;
  518. default:
  519. break;
  520. }
  521. free_irq(priv->irq[cp->type], chan);
  522. pm_runtime_put_sync(priv->dev);
  523. }
  524. static const struct mbox_chan_ops imx_mu_ops = {
  525. .send_data = imx_mu_send_data,
  526. .startup = imx_mu_startup,
  527. .shutdown = imx_mu_shutdown,
  528. };
  529. static struct mbox_chan *imx_mu_specific_xlate(struct mbox_controller *mbox,
  530. const struct of_phandle_args *sp)
  531. {
  532. u32 type, idx, chan;
  533. if (sp->args_count != 2) {
  534. dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
  535. return ERR_PTR(-EINVAL);
  536. }
  537. type = sp->args[0]; /* channel type */
  538. idx = sp->args[1]; /* index */
  539. switch (type) {
  540. case IMX_MU_TYPE_TX:
  541. case IMX_MU_TYPE_RX:
  542. if (idx != 0)
  543. dev_err(mbox->dev, "Invalid chan idx: %d\n", idx);
  544. chan = type;
  545. break;
  546. case IMX_MU_TYPE_RXDB:
  547. chan = 2 + idx;
  548. break;
  549. default:
  550. dev_err(mbox->dev, "Invalid chan type: %d\n", type);
  551. return ERR_PTR(-EINVAL);
  552. }
  553. if (chan >= mbox->num_chans) {
  554. dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
  555. return ERR_PTR(-EINVAL);
  556. }
  557. return &mbox->chans[chan];
  558. }
  559. static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
  560. const struct of_phandle_args *sp)
  561. {
  562. u32 type, idx, chan;
  563. if (sp->args_count != 2) {
  564. dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
  565. return ERR_PTR(-EINVAL);
  566. }
  567. type = sp->args[0]; /* channel type */
  568. idx = sp->args[1]; /* index */
  569. chan = type * 4 + idx;
  570. if (chan >= mbox->num_chans) {
  571. dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
  572. return ERR_PTR(-EINVAL);
  573. }
  574. return &mbox->chans[chan];
  575. }
  576. static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
  577. const struct of_phandle_args *sp)
  578. {
  579. u32 type;
  580. if (sp->args_count < 1) {
  581. dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
  582. return ERR_PTR(-EINVAL);
  583. }
  584. type = sp->args[0]; /* channel type */
  585. /* Only supports TXDB and RXDB */
  586. if (type == IMX_MU_TYPE_TX || type == IMX_MU_TYPE_RX) {
  587. dev_err(mbox->dev, "Invalid type: %d\n", type);
  588. return ERR_PTR(-EINVAL);
  589. }
  590. return imx_mu_xlate(mbox, sp);
  591. }
  592. static void imx_mu_init_generic(struct imx_mu_priv *priv)
  593. {
  594. unsigned int i;
  595. unsigned int val;
  596. for (i = 0; i < IMX_MU_CHANS; i++) {
  597. struct imx_mu_con_priv *cp = &priv->con_priv[i];
  598. cp->idx = i % 4;
  599. cp->type = i >> 2;
  600. cp->chan = &priv->mbox_chans[i];
  601. priv->mbox_chans[i].con_priv = cp;
  602. snprintf(cp->irq_desc, sizeof(cp->irq_desc),
  603. "imx_mu_chan[%i-%i]", cp->type, cp->idx);
  604. }
  605. priv->mbox.num_chans = IMX_MU_CHANS;
  606. priv->mbox.of_xlate = imx_mu_xlate;
  607. if (priv->side_b)
  608. return;
  609. /* Set default MU configuration */
  610. for (i = 0; i < IMX_MU_xCR_MAX; i++)
  611. imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
  612. /* Clear any pending GIP */
  613. val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
  614. imx_mu_write(priv, val, priv->dcfg->xSR[IMX_MU_GSR]);
  615. /* Clear any pending RSR */
  616. for (i = 0; i < IMX_MU_NUM_RR; i++)
  617. imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
  618. }
  619. static void imx_mu_init_specific(struct imx_mu_priv *priv)
  620. {
  621. unsigned int i;
  622. int num_chans = priv->dcfg->type & IMX_MU_V2_S4 ? IMX_MU_S4_CHANS : IMX_MU_SCU_CHANS;
  623. for (i = 0; i < num_chans; i++) {
  624. struct imx_mu_con_priv *cp = &priv->con_priv[i];
  625. cp->idx = i < 2 ? 0 : i - 2;
  626. cp->type = i < 2 ? i : IMX_MU_TYPE_RXDB;
  627. cp->chan = &priv->mbox_chans[i];
  628. priv->mbox_chans[i].con_priv = cp;
  629. snprintf(cp->irq_desc, sizeof(cp->irq_desc),
  630. "imx_mu_chan[%i-%i]", cp->type, cp->idx);
  631. }
  632. priv->mbox.num_chans = num_chans;
  633. priv->mbox.of_xlate = imx_mu_specific_xlate;
  634. /* Set default MU configuration */
  635. for (i = 0; i < IMX_MU_xCR_MAX; i++)
  636. imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
  637. }
  638. static void imx_mu_init_seco(struct imx_mu_priv *priv)
  639. {
  640. imx_mu_init_generic(priv);
  641. priv->mbox.of_xlate = imx_mu_seco_xlate;
  642. }
  643. static int imx_mu_probe(struct platform_device *pdev)
  644. {
  645. struct device *dev = &pdev->dev;
  646. struct device_node *np = dev->of_node;
  647. struct imx_mu_priv *priv;
  648. const struct imx_mu_dcfg *dcfg;
  649. int i, ret;
  650. u32 size;
  651. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  652. if (!priv)
  653. return -ENOMEM;
  654. priv->dev = dev;
  655. priv->base = devm_platform_ioremap_resource(pdev, 0);
  656. if (IS_ERR(priv->base))
  657. return PTR_ERR(priv->base);
  658. dcfg = of_device_get_match_data(dev);
  659. if (!dcfg)
  660. return -EINVAL;
  661. priv->dcfg = dcfg;
  662. if (priv->dcfg->type & IMX_MU_V2_IRQ) {
  663. priv->irq[IMX_MU_TYPE_TX] = platform_get_irq_byname(pdev, "tx");
  664. if (priv->irq[IMX_MU_TYPE_TX] < 0)
  665. return priv->irq[IMX_MU_TYPE_TX];
  666. priv->irq[IMX_MU_TYPE_RX] = platform_get_irq_byname(pdev, "rx");
  667. if (priv->irq[IMX_MU_TYPE_RX] < 0)
  668. return priv->irq[IMX_MU_TYPE_RX];
  669. } else {
  670. ret = platform_get_irq(pdev, 0);
  671. if (ret < 0)
  672. return ret;
  673. for (i = 0; i < IMX_MU_CHANS; i++)
  674. priv->irq[i] = ret;
  675. }
  676. if (priv->dcfg->type & IMX_MU_V2_S4)
  677. size = sizeof(struct imx_s4_rpc_msg_max);
  678. else
  679. size = sizeof(struct imx_sc_rpc_msg_max);
  680. priv->msg = devm_kzalloc(dev, size, GFP_KERNEL);
  681. if (!priv->msg)
  682. return -ENOMEM;
  683. priv->clk = devm_clk_get(dev, NULL);
  684. if (IS_ERR(priv->clk)) {
  685. if (PTR_ERR(priv->clk) != -ENOENT)
  686. return PTR_ERR(priv->clk);
  687. priv->clk = NULL;
  688. }
  689. ret = clk_prepare_enable(priv->clk);
  690. if (ret) {
  691. dev_err(dev, "Failed to enable clock\n");
  692. return ret;
  693. }
  694. priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
  695. priv->dcfg->init(priv);
  696. spin_lock_init(&priv->xcr_lock);
  697. priv->mbox.dev = dev;
  698. priv->mbox.ops = &imx_mu_ops;
  699. priv->mbox.chans = priv->mbox_chans;
  700. priv->mbox.txdone_irq = true;
  701. platform_set_drvdata(pdev, priv);
  702. ret = devm_mbox_controller_register(dev, &priv->mbox);
  703. if (ret) {
  704. clk_disable_unprepare(priv->clk);
  705. return ret;
  706. }
  707. pm_runtime_enable(dev);
  708. ret = pm_runtime_resume_and_get(dev);
  709. if (ret < 0)
  710. goto disable_runtime_pm;
  711. ret = pm_runtime_put_sync(dev);
  712. if (ret < 0)
  713. goto disable_runtime_pm;
  714. clk_disable_unprepare(priv->clk);
  715. return 0;
  716. disable_runtime_pm:
  717. pm_runtime_disable(dev);
  718. clk_disable_unprepare(priv->clk);
  719. return ret;
  720. }
  721. static int imx_mu_remove(struct platform_device *pdev)
  722. {
  723. struct imx_mu_priv *priv = platform_get_drvdata(pdev);
  724. pm_runtime_disable(priv->dev);
  725. return 0;
  726. }
  727. static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
  728. .tx = imx_mu_generic_tx,
  729. .rx = imx_mu_generic_rx,
  730. .rxdb = imx_mu_generic_rxdb,
  731. .init = imx_mu_init_generic,
  732. .xTR = 0x0,
  733. .xRR = 0x10,
  734. .xSR = {0x20, 0x20, 0x20, 0x20},
  735. .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
  736. };
  737. static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
  738. .tx = imx_mu_generic_tx,
  739. .rx = imx_mu_generic_rx,
  740. .rxdb = imx_mu_generic_rxdb,
  741. .init = imx_mu_init_generic,
  742. .xTR = 0x20,
  743. .xRR = 0x40,
  744. .xSR = {0x60, 0x60, 0x60, 0x60},
  745. .xCR = {0x64, 0x64, 0x64, 0x64, 0x64},
  746. };
  747. static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
  748. .tx = imx_mu_generic_tx,
  749. .rx = imx_mu_generic_rx,
  750. .rxdb = imx_mu_generic_rxdb,
  751. .init = imx_mu_init_generic,
  752. .type = IMX_MU_V2,
  753. .xTR = 0x200,
  754. .xRR = 0x280,
  755. .xSR = {0xC, 0x118, 0x124, 0x12C},
  756. .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
  757. };
  758. static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = {
  759. .tx = imx_mu_specific_tx,
  760. .rx = imx_mu_specific_rx,
  761. .init = imx_mu_init_specific,
  762. .type = IMX_MU_V2 | IMX_MU_V2_S4,
  763. .xTR = 0x200,
  764. .xRR = 0x280,
  765. .xSR = {0xC, 0x118, 0x124, 0x12C},
  766. .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
  767. };
  768. static const struct imx_mu_dcfg imx_mu_cfg_imx93_s4 = {
  769. .tx = imx_mu_specific_tx,
  770. .rx = imx_mu_specific_rx,
  771. .init = imx_mu_init_specific,
  772. .type = IMX_MU_V2 | IMX_MU_V2_S4 | IMX_MU_V2_IRQ,
  773. .xTR = 0x200,
  774. .xRR = 0x280,
  775. .xSR = {0xC, 0x118, 0x124, 0x12C},
  776. .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
  777. };
  778. static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
  779. .tx = imx_mu_specific_tx,
  780. .rx = imx_mu_specific_rx,
  781. .init = imx_mu_init_specific,
  782. .rxdb = imx_mu_generic_rxdb,
  783. .xTR = 0x0,
  784. .xRR = 0x10,
  785. .xSR = {0x20, 0x20, 0x20, 0x20},
  786. .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
  787. };
  788. static const struct imx_mu_dcfg imx_mu_cfg_imx8_seco = {
  789. .tx = imx_mu_seco_tx,
  790. .rx = imx_mu_generic_rx,
  791. .rxdb = imx_mu_seco_rxdb,
  792. .init = imx_mu_init_seco,
  793. .xTR = 0x0,
  794. .xRR = 0x10,
  795. .xSR = {0x20, 0x20, 0x20, 0x20},
  796. .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
  797. };
  798. static const struct of_device_id imx_mu_dt_ids[] = {
  799. { .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
  800. { .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
  801. { .compatible = "fsl,imx8ulp-mu", .data = &imx_mu_cfg_imx8ulp },
  802. { .compatible = "fsl,imx8ulp-mu-s4", .data = &imx_mu_cfg_imx8ulp_s4 },
  803. { .compatible = "fsl,imx93-mu-s4", .data = &imx_mu_cfg_imx93_s4 },
  804. { .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
  805. { .compatible = "fsl,imx8-mu-seco", .data = &imx_mu_cfg_imx8_seco },
  806. { },
  807. };
  808. MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
  809. static int __maybe_unused imx_mu_suspend_noirq(struct device *dev)
  810. {
  811. struct imx_mu_priv *priv = dev_get_drvdata(dev);
  812. int i;
  813. if (!priv->clk) {
  814. for (i = 0; i < IMX_MU_xCR_MAX; i++)
  815. priv->xcr[i] = imx_mu_read(priv, priv->dcfg->xCR[i]);
  816. }
  817. priv->suspend = true;
  818. return 0;
  819. }
  820. static int __maybe_unused imx_mu_resume_noirq(struct device *dev)
  821. {
  822. struct imx_mu_priv *priv = dev_get_drvdata(dev);
  823. int i;
  824. /*
  825. * ONLY restore MU when context lost, the TIE could
  826. * be set during noirq resume as there is MU data
  827. * communication going on, and restore the saved
  828. * value will overwrite the TIE and cause MU data
  829. * send failed, may lead to system freeze. This issue
  830. * is observed by testing freeze mode suspend.
  831. */
  832. if (!priv->clk && !imx_mu_read(priv, priv->dcfg->xCR[0])) {
  833. for (i = 0; i < IMX_MU_xCR_MAX; i++)
  834. imx_mu_write(priv, priv->xcr[i], priv->dcfg->xCR[i]);
  835. }
  836. priv->suspend = false;
  837. return 0;
  838. }
  839. static int __maybe_unused imx_mu_runtime_suspend(struct device *dev)
  840. {
  841. struct imx_mu_priv *priv = dev_get_drvdata(dev);
  842. clk_disable_unprepare(priv->clk);
  843. return 0;
  844. }
  845. static int __maybe_unused imx_mu_runtime_resume(struct device *dev)
  846. {
  847. struct imx_mu_priv *priv = dev_get_drvdata(dev);
  848. int ret;
  849. ret = clk_prepare_enable(priv->clk);
  850. if (ret)
  851. dev_err(dev, "failed to enable clock\n");
  852. return ret;
  853. }
  854. static const struct dev_pm_ops imx_mu_pm_ops = {
  855. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq,
  856. imx_mu_resume_noirq)
  857. SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
  858. imx_mu_runtime_resume, NULL)
  859. };
  860. static struct platform_driver imx_mu_driver = {
  861. .probe = imx_mu_probe,
  862. .remove = imx_mu_remove,
  863. .driver = {
  864. .name = "imx_mu",
  865. .of_match_table = imx_mu_dt_ids,
  866. .pm = &imx_mu_pm_ops,
  867. },
  868. };
  869. module_platform_driver(imx_mu_driver);
  870. MODULE_AUTHOR("Oleksij Rempel <[email protected]>");
  871. MODULE_DESCRIPTION("Message Unit driver for i.MX");
  872. MODULE_LICENSE("GPL v2");