arm_mhuv2.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ARM Message Handling Unit Version 2 (MHUv2) driver.
  4. *
  5. * Copyright (C) 2020 ARM Ltd.
  6. * Copyright (C) 2020 Linaro Ltd.
  7. *
  8. * An MHUv2 mailbox controller can provide up to 124 channel windows (each 32
  9. * bit long) and the driver allows any combination of both the transport
  10. * protocol modes: data-transfer and doorbell, to be used on those channel
  11. * windows.
  12. *
  13. * The transport protocols should be specified in the device tree entry for the
  14. * device. The transport protocols determine how the underlying hardware
  15. * resources of the device are utilized when transmitting data. Refer to the
  16. * device tree bindings of the ARM MHUv2 controller for more details.
  17. *
  18. * The number of registered mailbox channels is dependent on both the underlying
  19. * hardware - mainly the number of channel windows implemented by the platform,
  20. * as well as the selected transport protocols.
  21. *
  22. * The MHUv2 controller can work both as a sender and receiver, but the driver
  23. * and the DT bindings support unidirectional transfers for better allocation of
  24. * the channels. That is, this driver will be probed for two separate devices
  25. * for each mailbox controller, a sender device and a receiver device.
  26. */
  27. #include <linux/amba/bus.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/mailbox_controller.h>
  30. #include <linux/mailbox/arm_mhuv2_message.h>
  31. #include <linux/module.h>
  32. #include <linux/of_address.h>
  33. #include <linux/spinlock.h>
  34. /* ====== MHUv2 Registers ====== */
  35. /* Maximum number of channel windows */
  36. #define MHUV2_CH_WN_MAX 124
  37. /* Number of combined interrupt status registers */
  38. #define MHUV2_CMB_INT_ST_REG_CNT 4
  39. #define MHUV2_STAT_BYTES (sizeof(u32))
  40. #define MHUV2_STAT_BITS (MHUV2_STAT_BYTES * __CHAR_BIT__)
  41. #define LSB_MASK(n) ((1 << (n * __CHAR_BIT__)) - 1)
  42. #define MHUV2_PROTOCOL_PROP "arm,mhuv2-protocols"
  43. /* Register Message Handling Unit Configuration fields */
  44. struct mhu_cfg_t {
  45. u32 num_ch : 7;
  46. u32 pad : 25;
  47. } __packed;
  48. /* register Interrupt Status fields */
  49. struct int_st_t {
  50. u32 nr2r : 1;
  51. u32 r2nr : 1;
  52. u32 pad : 30;
  53. } __packed;
  54. /* Register Interrupt Clear fields */
  55. struct int_clr_t {
  56. u32 nr2r : 1;
  57. u32 r2nr : 1;
  58. u32 pad : 30;
  59. } __packed;
  60. /* Register Interrupt Enable fields */
  61. struct int_en_t {
  62. u32 r2nr : 1;
  63. u32 nr2r : 1;
  64. u32 chcomb : 1;
  65. u32 pad : 29;
  66. } __packed;
  67. /* Register Implementer Identification fields */
  68. struct iidr_t {
  69. u32 implementer : 12;
  70. u32 revision : 4;
  71. u32 variant : 4;
  72. u32 product_id : 12;
  73. } __packed;
  74. /* Register Architecture Identification Register fields */
  75. struct aidr_t {
  76. u32 arch_minor_rev : 4;
  77. u32 arch_major_rev : 4;
  78. u32 pad : 24;
  79. } __packed;
  80. /* Sender Channel Window fields */
  81. struct mhu2_send_ch_wn_reg {
  82. u32 stat;
  83. u8 pad1[0x0C - 0x04];
  84. u32 stat_set;
  85. u32 int_st;
  86. u32 int_clr;
  87. u32 int_en;
  88. u8 pad2[0x20 - 0x1C];
  89. } __packed;
  90. /* Sender frame register fields */
  91. struct mhu2_send_frame_reg {
  92. struct mhu2_send_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX];
  93. struct mhu_cfg_t mhu_cfg;
  94. u32 resp_cfg;
  95. u32 access_request;
  96. u32 access_ready;
  97. struct int_st_t int_st;
  98. struct int_clr_t int_clr;
  99. struct int_en_t int_en;
  100. u32 reserved0;
  101. u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT];
  102. u8 pad[0xFC8 - 0xFB0];
  103. struct iidr_t iidr;
  104. struct aidr_t aidr;
  105. } __packed;
  106. /* Receiver Channel Window fields */
  107. struct mhu2_recv_ch_wn_reg {
  108. u32 stat;
  109. u32 stat_masked;
  110. u32 stat_clear;
  111. u8 reserved0[0x10 - 0x0C];
  112. u32 mask;
  113. u32 mask_set;
  114. u32 mask_clear;
  115. u8 pad[0x20 - 0x1C];
  116. } __packed;
  117. /* Receiver frame register fields */
  118. struct mhu2_recv_frame_reg {
  119. struct mhu2_recv_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX];
  120. struct mhu_cfg_t mhu_cfg;
  121. u8 reserved0[0xF90 - 0xF84];
  122. struct int_st_t int_st;
  123. struct int_clr_t int_clr;
  124. struct int_en_t int_en;
  125. u32 pad;
  126. u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT];
  127. u8 reserved2[0xFC8 - 0xFB0];
  128. struct iidr_t iidr;
  129. struct aidr_t aidr;
  130. } __packed;
  131. /* ====== MHUv2 data structures ====== */
  132. enum mhuv2_transport_protocol {
  133. DOORBELL = 0,
  134. DATA_TRANSFER = 1
  135. };
  136. enum mhuv2_frame {
  137. RECEIVER_FRAME,
  138. SENDER_FRAME
  139. };
  140. /**
  141. * struct mhuv2 - MHUv2 mailbox controller data
  142. *
  143. * @mbox: Mailbox controller belonging to the MHU frame.
  144. * @send: Base address of the register mapping region.
  145. * @recv: Base address of the register mapping region.
  146. * @frame: Frame type: RECEIVER_FRAME or SENDER_FRAME.
  147. * @irq: Interrupt.
  148. * @windows: Channel windows implemented by the platform.
  149. * @minor: Minor version of the controller.
  150. * @length: Length of the protocols array in bytes.
  151. * @protocols: Raw protocol information, derived from device tree.
  152. * @doorbell_pending_lock: spinlock required for correct operation of Tx
  153. * interrupt for doorbells.
  154. */
  155. struct mhuv2 {
  156. struct mbox_controller mbox;
  157. union {
  158. struct mhu2_send_frame_reg __iomem *send;
  159. struct mhu2_recv_frame_reg __iomem *recv;
  160. };
  161. enum mhuv2_frame frame;
  162. unsigned int irq;
  163. unsigned int windows;
  164. unsigned int minor;
  165. unsigned int length;
  166. u32 *protocols;
  167. spinlock_t doorbell_pending_lock;
  168. };
  169. #define mhu_from_mbox(_mbox) container_of(_mbox, struct mhuv2, mbox)
  170. /**
  171. * struct mhuv2_protocol_ops - MHUv2 operations
  172. *
  173. * Each transport protocol must provide an implementation of the operations
  174. * provided here.
  175. *
  176. * @rx_startup: Startup callback for receiver.
  177. * @rx_shutdown: Shutdown callback for receiver.
  178. * @read_data: Reads and clears newly available data.
  179. * @tx_startup: Startup callback for receiver.
  180. * @tx_shutdown: Shutdown callback for receiver.
  181. * @last_tx_done: Report back if the last tx is completed or not.
  182. * @send_data: Send data to the receiver.
  183. */
  184. struct mhuv2_protocol_ops {
  185. int (*rx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan);
  186. void (*rx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan);
  187. void *(*read_data)(struct mhuv2 *mhu, struct mbox_chan *chan);
  188. void (*tx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan);
  189. void (*tx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan);
  190. int (*last_tx_done)(struct mhuv2 *mhu, struct mbox_chan *chan);
  191. int (*send_data)(struct mhuv2 *mhu, struct mbox_chan *chan, void *arg);
  192. };
  193. /*
  194. * MHUv2 mailbox channel's private information
  195. *
  196. * @ops: protocol specific ops for the channel.
  197. * @ch_wn_idx: Channel window index allocated to the channel.
  198. * @windows: Total number of windows consumed by the channel, only relevant
  199. * in DATA_TRANSFER protocol.
  200. * @doorbell: Doorbell bit number within the ch_wn_idx window, only relevant
  201. * in DOORBELL protocol.
  202. * @pending: Flag indicating pending doorbell interrupt, only relevant in
  203. * DOORBELL protocol.
  204. */
  205. struct mhuv2_mbox_chan_priv {
  206. const struct mhuv2_protocol_ops *ops;
  207. u32 ch_wn_idx;
  208. union {
  209. u32 windows;
  210. struct {
  211. u32 doorbell;
  212. u32 pending;
  213. };
  214. };
  215. };
  216. /* Macro for reading a bitfield within a physically mapped packed struct */
  217. #define readl_relaxed_bitfield(_regptr, _type, _field) \
  218. ({ \
  219. u32 _regval; \
  220. _regval = readl_relaxed((_regptr)); \
  221. (*(_type *)(&_regval))._field; \
  222. })
  223. /* Macro for writing a bitfield within a physically mapped packed struct */
  224. #define writel_relaxed_bitfield(_value, _regptr, _type, _field) \
  225. ({ \
  226. u32 _regval; \
  227. _regval = readl_relaxed(_regptr); \
  228. (*(_type *)(&_regval))._field = _value; \
  229. writel_relaxed(_regval, _regptr); \
  230. })
  231. /* =================== Doorbell transport protocol operations =============== */
  232. static int mhuv2_doorbell_rx_startup(struct mhuv2 *mhu, struct mbox_chan *chan)
  233. {
  234. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  235. writel_relaxed(BIT(priv->doorbell),
  236. &mhu->recv->ch_wn[priv->ch_wn_idx].mask_clear);
  237. return 0;
  238. }
  239. static void mhuv2_doorbell_rx_shutdown(struct mhuv2 *mhu,
  240. struct mbox_chan *chan)
  241. {
  242. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  243. writel_relaxed(BIT(priv->doorbell),
  244. &mhu->recv->ch_wn[priv->ch_wn_idx].mask_set);
  245. }
  246. static void *mhuv2_doorbell_read_data(struct mhuv2 *mhu, struct mbox_chan *chan)
  247. {
  248. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  249. writel_relaxed(BIT(priv->doorbell),
  250. &mhu->recv->ch_wn[priv->ch_wn_idx].stat_clear);
  251. return NULL;
  252. }
  253. static int mhuv2_doorbell_last_tx_done(struct mhuv2 *mhu,
  254. struct mbox_chan *chan)
  255. {
  256. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  257. return !(readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat) &
  258. BIT(priv->doorbell));
  259. }
  260. static int mhuv2_doorbell_send_data(struct mhuv2 *mhu, struct mbox_chan *chan,
  261. void *arg)
  262. {
  263. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  264. unsigned long flags;
  265. spin_lock_irqsave(&mhu->doorbell_pending_lock, flags);
  266. priv->pending = 1;
  267. writel_relaxed(BIT(priv->doorbell),
  268. &mhu->send->ch_wn[priv->ch_wn_idx].stat_set);
  269. spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags);
  270. return 0;
  271. }
  272. static const struct mhuv2_protocol_ops mhuv2_doorbell_ops = {
  273. .rx_startup = mhuv2_doorbell_rx_startup,
  274. .rx_shutdown = mhuv2_doorbell_rx_shutdown,
  275. .read_data = mhuv2_doorbell_read_data,
  276. .last_tx_done = mhuv2_doorbell_last_tx_done,
  277. .send_data = mhuv2_doorbell_send_data,
  278. };
  279. #define IS_PROTOCOL_DOORBELL(_priv) (_priv->ops == &mhuv2_doorbell_ops)
  280. /* ============= Data transfer transport protocol operations ================ */
  281. static int mhuv2_data_transfer_rx_startup(struct mhuv2 *mhu,
  282. struct mbox_chan *chan)
  283. {
  284. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  285. int i = priv->ch_wn_idx + priv->windows - 1;
  286. /*
  287. * The protocol mandates that all but the last status register must be
  288. * masked.
  289. */
  290. writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_clear);
  291. return 0;
  292. }
  293. static void mhuv2_data_transfer_rx_shutdown(struct mhuv2 *mhu,
  294. struct mbox_chan *chan)
  295. {
  296. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  297. int i = priv->ch_wn_idx + priv->windows - 1;
  298. writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set);
  299. }
  300. static void *mhuv2_data_transfer_read_data(struct mhuv2 *mhu,
  301. struct mbox_chan *chan)
  302. {
  303. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  304. const int windows = priv->windows;
  305. struct arm_mhuv2_mbox_msg *msg;
  306. u32 *data;
  307. int i, idx;
  308. msg = kzalloc(sizeof(*msg) + windows * MHUV2_STAT_BYTES, GFP_KERNEL);
  309. if (!msg)
  310. return ERR_PTR(-ENOMEM);
  311. data = msg->data = msg + 1;
  312. msg->len = windows * MHUV2_STAT_BYTES;
  313. /*
  314. * Messages are expected in order of most significant word to least
  315. * significant word. Refer mhuv2_data_transfer_send_data() for more
  316. * details.
  317. *
  318. * We also need to read the stat register instead of stat_masked, as we
  319. * masked all but the last window.
  320. *
  321. * Last channel window must be cleared as the final operation. Upon
  322. * clearing the last channel window register, which is unmasked in
  323. * data-transfer protocol, the interrupt is de-asserted.
  324. */
  325. for (i = 0; i < windows; i++) {
  326. idx = priv->ch_wn_idx + i;
  327. data[windows - 1 - i] = readl_relaxed(&mhu->recv->ch_wn[idx].stat);
  328. writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[idx].stat_clear);
  329. }
  330. return msg;
  331. }
  332. static void mhuv2_data_transfer_tx_startup(struct mhuv2 *mhu,
  333. struct mbox_chan *chan)
  334. {
  335. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  336. int i = priv->ch_wn_idx + priv->windows - 1;
  337. /* Enable interrupts only for the last window */
  338. if (mhu->minor) {
  339. writel_relaxed(0x1, &mhu->send->ch_wn[i].int_clr);
  340. writel_relaxed(0x1, &mhu->send->ch_wn[i].int_en);
  341. }
  342. }
  343. static void mhuv2_data_transfer_tx_shutdown(struct mhuv2 *mhu,
  344. struct mbox_chan *chan)
  345. {
  346. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  347. int i = priv->ch_wn_idx + priv->windows - 1;
  348. if (mhu->minor)
  349. writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en);
  350. }
  351. static int mhuv2_data_transfer_last_tx_done(struct mhuv2 *mhu,
  352. struct mbox_chan *chan)
  353. {
  354. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  355. int i = priv->ch_wn_idx + priv->windows - 1;
  356. /* Just checking the last channel window should be enough */
  357. return !readl_relaxed(&mhu->send->ch_wn[i].stat);
  358. }
  359. /*
  360. * Message will be transmitted from most significant to least significant word.
  361. * This is to allow for messages shorter than channel windows to still trigger
  362. * the receiver interrupt which gets activated when the last stat register is
  363. * written. As an example, a 6-word message is to be written on a 4-channel MHU
  364. * connection: Registers marked with '*' are masked, and will not generate an
  365. * interrupt on the receiver side once written.
  366. *
  367. * u32 *data = [0x00000001], [0x00000002], [0x00000003], [0x00000004],
  368. * [0x00000005], [0x00000006]
  369. *
  370. * ROUND 1:
  371. * stat reg To write Write sequence
  372. * [ stat 3 ] <- [0x00000001] 4 <- triggers interrupt on receiver
  373. * [ stat 2 ] <- [0x00000002] 3
  374. * [ stat 1 ] <- [0x00000003] 2
  375. * [ stat 0 ] <- [0x00000004] 1
  376. *
  377. * data += 4 // Increment data pointer by number of stat regs
  378. *
  379. * ROUND 2:
  380. * stat reg To write Write sequence
  381. * [ stat 3 ] <- [0x00000005] 2 <- triggers interrupt on receiver
  382. * [ stat 2 ] <- [0x00000006] 1
  383. * [ stat 1 ] <- [0x00000000]
  384. * [ stat 0 ] <- [0x00000000]
  385. */
  386. static int mhuv2_data_transfer_send_data(struct mhuv2 *mhu,
  387. struct mbox_chan *chan, void *arg)
  388. {
  389. const struct arm_mhuv2_mbox_msg *msg = arg;
  390. int bytes_left = msg->len, bytes_to_send, bytes_in_round, i;
  391. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  392. int windows = priv->windows;
  393. u32 *data = msg->data, word;
  394. while (bytes_left) {
  395. if (!data[0]) {
  396. dev_err(mhu->mbox.dev, "Data aligned at first window can't be zero to guarantee interrupt generation at receiver");
  397. return -EINVAL;
  398. }
  399. while(!mhuv2_data_transfer_last_tx_done(mhu, chan))
  400. continue;
  401. bytes_in_round = min(bytes_left, (int)(windows * MHUV2_STAT_BYTES));
  402. for (i = windows - 1; i >= 0; i--) {
  403. /* Data less than windows can transfer ? */
  404. if (unlikely(bytes_in_round <= i * MHUV2_STAT_BYTES))
  405. continue;
  406. word = data[i];
  407. bytes_to_send = bytes_in_round & (MHUV2_STAT_BYTES - 1);
  408. if (unlikely(bytes_to_send))
  409. word &= LSB_MASK(bytes_to_send);
  410. else
  411. bytes_to_send = MHUV2_STAT_BYTES;
  412. writel_relaxed(word, &mhu->send->ch_wn[priv->ch_wn_idx + windows - 1 - i].stat_set);
  413. bytes_left -= bytes_to_send;
  414. bytes_in_round -= bytes_to_send;
  415. }
  416. data += windows;
  417. }
  418. return 0;
  419. }
  420. static const struct mhuv2_protocol_ops mhuv2_data_transfer_ops = {
  421. .rx_startup = mhuv2_data_transfer_rx_startup,
  422. .rx_shutdown = mhuv2_data_transfer_rx_shutdown,
  423. .read_data = mhuv2_data_transfer_read_data,
  424. .tx_startup = mhuv2_data_transfer_tx_startup,
  425. .tx_shutdown = mhuv2_data_transfer_tx_shutdown,
  426. .last_tx_done = mhuv2_data_transfer_last_tx_done,
  427. .send_data = mhuv2_data_transfer_send_data,
  428. };
  429. /* Interrupt handlers */
  430. static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 __iomem *reg)
  431. {
  432. struct mbox_chan *chans = mhu->mbox.chans;
  433. int channel = 0, i, offset = 0, windows, protocol, ch_wn;
  434. u32 stat;
  435. for (i = 0; i < MHUV2_CMB_INT_ST_REG_CNT; i++) {
  436. stat = readl_relaxed(reg + i);
  437. if (!stat)
  438. continue;
  439. ch_wn = i * MHUV2_STAT_BITS + __builtin_ctz(stat);
  440. for (i = 0; i < mhu->length; i += 2) {
  441. protocol = mhu->protocols[i];
  442. windows = mhu->protocols[i + 1];
  443. if (ch_wn >= offset + windows) {
  444. if (protocol == DOORBELL)
  445. channel += MHUV2_STAT_BITS * windows;
  446. else
  447. channel++;
  448. offset += windows;
  449. continue;
  450. }
  451. /* Return first chan of the window in doorbell mode */
  452. if (protocol == DOORBELL)
  453. channel += MHUV2_STAT_BITS * (ch_wn - offset);
  454. return &chans[channel];
  455. }
  456. }
  457. return ERR_PTR(-EIO);
  458. }
  459. static irqreturn_t mhuv2_sender_interrupt(int irq, void *data)
  460. {
  461. struct mhuv2 *mhu = data;
  462. struct device *dev = mhu->mbox.dev;
  463. struct mhuv2_mbox_chan_priv *priv;
  464. struct mbox_chan *chan;
  465. unsigned long flags;
  466. int i, found = 0;
  467. u32 stat;
  468. chan = get_irq_chan_comb(mhu, mhu->send->chcomb_int_st);
  469. if (IS_ERR(chan)) {
  470. dev_warn(dev, "Failed to find channel for the Tx interrupt\n");
  471. return IRQ_NONE;
  472. }
  473. priv = chan->con_priv;
  474. if (!IS_PROTOCOL_DOORBELL(priv)) {
  475. writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + priv->windows - 1].int_clr);
  476. if (chan->cl) {
  477. mbox_chan_txdone(chan, 0);
  478. return IRQ_HANDLED;
  479. }
  480. dev_warn(dev, "Tx interrupt Received on channel (%u) not currently attached to a mailbox client\n",
  481. priv->ch_wn_idx);
  482. return IRQ_NONE;
  483. }
  484. /* Clear the interrupt first, so we don't miss any doorbell later */
  485. writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx].int_clr);
  486. /*
  487. * In Doorbell mode, make sure no new transitions happen while the
  488. * interrupt handler is trying to find the finished doorbell tx
  489. * operations, else we may think few of the transfers were complete
  490. * before they actually were.
  491. */
  492. spin_lock_irqsave(&mhu->doorbell_pending_lock, flags);
  493. /*
  494. * In case of doorbell mode, the first channel of the window is returned
  495. * by get_irq_chan_comb(). Find all the pending channels here.
  496. */
  497. stat = readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat);
  498. for (i = 0; i < MHUV2_STAT_BITS; i++) {
  499. priv = chan[i].con_priv;
  500. /* Find cases where pending was 1, but stat's bit is cleared */
  501. if (priv->pending ^ ((stat >> i) & 0x1)) {
  502. BUG_ON(!priv->pending);
  503. if (!chan->cl) {
  504. dev_warn(dev, "Tx interrupt received on doorbell (%u : %u) channel not currently attached to a mailbox client\n",
  505. priv->ch_wn_idx, i);
  506. continue;
  507. }
  508. mbox_chan_txdone(&chan[i], 0);
  509. priv->pending = 0;
  510. found++;
  511. }
  512. }
  513. spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags);
  514. if (!found) {
  515. /*
  516. * We may have already processed the doorbell in the previous
  517. * iteration if the interrupt came right after we cleared it but
  518. * before we read the stat register.
  519. */
  520. dev_dbg(dev, "Couldn't find the doorbell (%u) for the Tx interrupt interrupt\n",
  521. priv->ch_wn_idx);
  522. return IRQ_NONE;
  523. }
  524. return IRQ_HANDLED;
  525. }
  526. static struct mbox_chan *get_irq_chan_comb_rx(struct mhuv2 *mhu)
  527. {
  528. struct mhuv2_mbox_chan_priv *priv;
  529. struct mbox_chan *chan;
  530. u32 stat;
  531. chan = get_irq_chan_comb(mhu, mhu->recv->chcomb_int_st);
  532. if (IS_ERR(chan))
  533. return chan;
  534. priv = chan->con_priv;
  535. if (!IS_PROTOCOL_DOORBELL(priv))
  536. return chan;
  537. /*
  538. * In case of doorbell mode, the first channel of the window is returned
  539. * by the routine. Find the exact channel here.
  540. */
  541. stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked);
  542. BUG_ON(!stat);
  543. return chan + __builtin_ctz(stat);
  544. }
  545. static struct mbox_chan *get_irq_chan_stat_rx(struct mhuv2 *mhu)
  546. {
  547. struct mbox_chan *chans = mhu->mbox.chans;
  548. struct mhuv2_mbox_chan_priv *priv;
  549. u32 stat;
  550. int i = 0;
  551. while (i < mhu->mbox.num_chans) {
  552. priv = chans[i].con_priv;
  553. stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked);
  554. if (stat) {
  555. if (IS_PROTOCOL_DOORBELL(priv))
  556. i += __builtin_ctz(stat);
  557. return &chans[i];
  558. }
  559. i += IS_PROTOCOL_DOORBELL(priv) ? MHUV2_STAT_BITS : 1;
  560. }
  561. return ERR_PTR(-EIO);
  562. }
  563. static struct mbox_chan *get_irq_chan_rx(struct mhuv2 *mhu)
  564. {
  565. if (!mhu->minor)
  566. return get_irq_chan_stat_rx(mhu);
  567. return get_irq_chan_comb_rx(mhu);
  568. }
  569. static irqreturn_t mhuv2_receiver_interrupt(int irq, void *arg)
  570. {
  571. struct mhuv2 *mhu = arg;
  572. struct mbox_chan *chan = get_irq_chan_rx(mhu);
  573. struct device *dev = mhu->mbox.dev;
  574. struct mhuv2_mbox_chan_priv *priv;
  575. int ret = IRQ_NONE;
  576. void *data;
  577. if (IS_ERR(chan)) {
  578. dev_warn(dev, "Failed to find channel for the rx interrupt\n");
  579. return IRQ_NONE;
  580. }
  581. priv = chan->con_priv;
  582. /* Read and clear the data first */
  583. data = priv->ops->read_data(mhu, chan);
  584. if (!chan->cl) {
  585. dev_warn(dev, "Received data on channel (%u) not currently attached to a mailbox client\n",
  586. priv->ch_wn_idx);
  587. } else if (IS_ERR(data)) {
  588. dev_err(dev, "Failed to read data: %lu\n", PTR_ERR(data));
  589. } else {
  590. mbox_chan_received_data(chan, data);
  591. ret = IRQ_HANDLED;
  592. }
  593. if (!IS_ERR(data))
  594. kfree(data);
  595. return ret;
  596. }
  597. /* Sender and receiver ops */
  598. static bool mhuv2_sender_last_tx_done(struct mbox_chan *chan)
  599. {
  600. struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
  601. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  602. return priv->ops->last_tx_done(mhu, chan);
  603. }
  604. static int mhuv2_sender_send_data(struct mbox_chan *chan, void *data)
  605. {
  606. struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
  607. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  608. if (!priv->ops->last_tx_done(mhu, chan))
  609. return -EBUSY;
  610. return priv->ops->send_data(mhu, chan, data);
  611. }
  612. static int mhuv2_sender_startup(struct mbox_chan *chan)
  613. {
  614. struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
  615. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  616. if (priv->ops->tx_startup)
  617. priv->ops->tx_startup(mhu, chan);
  618. return 0;
  619. }
  620. static void mhuv2_sender_shutdown(struct mbox_chan *chan)
  621. {
  622. struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
  623. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  624. if (priv->ops->tx_shutdown)
  625. priv->ops->tx_shutdown(mhu, chan);
  626. }
  627. static const struct mbox_chan_ops mhuv2_sender_ops = {
  628. .send_data = mhuv2_sender_send_data,
  629. .startup = mhuv2_sender_startup,
  630. .shutdown = mhuv2_sender_shutdown,
  631. .last_tx_done = mhuv2_sender_last_tx_done,
  632. };
  633. static int mhuv2_receiver_startup(struct mbox_chan *chan)
  634. {
  635. struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
  636. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  637. return priv->ops->rx_startup(mhu, chan);
  638. }
  639. static void mhuv2_receiver_shutdown(struct mbox_chan *chan)
  640. {
  641. struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
  642. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  643. priv->ops->rx_shutdown(mhu, chan);
  644. }
  645. static int mhuv2_receiver_send_data(struct mbox_chan *chan, void *data)
  646. {
  647. dev_err(chan->mbox->dev,
  648. "Trying to transmit on a receiver MHU frame\n");
  649. return -EIO;
  650. }
  651. static bool mhuv2_receiver_last_tx_done(struct mbox_chan *chan)
  652. {
  653. dev_err(chan->mbox->dev, "Trying to Tx poll on a receiver MHU frame\n");
  654. return true;
  655. }
  656. static const struct mbox_chan_ops mhuv2_receiver_ops = {
  657. .send_data = mhuv2_receiver_send_data,
  658. .startup = mhuv2_receiver_startup,
  659. .shutdown = mhuv2_receiver_shutdown,
  660. .last_tx_done = mhuv2_receiver_last_tx_done,
  661. };
  662. static struct mbox_chan *mhuv2_mbox_of_xlate(struct mbox_controller *mbox,
  663. const struct of_phandle_args *pa)
  664. {
  665. struct mhuv2 *mhu = mhu_from_mbox(mbox);
  666. struct mbox_chan *chans = mbox->chans;
  667. int channel = 0, i, offset, doorbell, protocol, windows;
  668. if (pa->args_count != 2)
  669. return ERR_PTR(-EINVAL);
  670. offset = pa->args[0];
  671. doorbell = pa->args[1];
  672. if (doorbell >= MHUV2_STAT_BITS)
  673. goto out;
  674. for (i = 0; i < mhu->length; i += 2) {
  675. protocol = mhu->protocols[i];
  676. windows = mhu->protocols[i + 1];
  677. if (protocol == DOORBELL) {
  678. if (offset < windows)
  679. return &chans[channel + MHUV2_STAT_BITS * offset + doorbell];
  680. channel += MHUV2_STAT_BITS * windows;
  681. offset -= windows;
  682. } else {
  683. if (offset == 0) {
  684. if (doorbell)
  685. goto out;
  686. return &chans[channel];
  687. }
  688. channel++;
  689. offset--;
  690. }
  691. }
  692. out:
  693. dev_err(mbox->dev, "Couldn't xlate to a valid channel (%d: %d)\n",
  694. pa->args[0], doorbell);
  695. return ERR_PTR(-ENODEV);
  696. }
  697. static int mhuv2_verify_protocol(struct mhuv2 *mhu)
  698. {
  699. struct device *dev = mhu->mbox.dev;
  700. int protocol, windows, channels = 0, total_windows = 0, i;
  701. for (i = 0; i < mhu->length; i += 2) {
  702. protocol = mhu->protocols[i];
  703. windows = mhu->protocols[i + 1];
  704. if (!windows) {
  705. dev_err(dev, "Window size can't be zero (%d)\n", i);
  706. return -EINVAL;
  707. }
  708. total_windows += windows;
  709. if (protocol == DOORBELL) {
  710. channels += MHUV2_STAT_BITS * windows;
  711. } else if (protocol == DATA_TRANSFER) {
  712. channels++;
  713. } else {
  714. dev_err(dev, "Invalid protocol (%d) present in %s property at index %d\n",
  715. protocol, MHUV2_PROTOCOL_PROP, i);
  716. return -EINVAL;
  717. }
  718. }
  719. if (total_windows > mhu->windows) {
  720. dev_err(dev, "Channel windows can't be more than what's implemented by the hardware ( %d: %d)\n",
  721. total_windows, mhu->windows);
  722. return -EINVAL;
  723. }
  724. mhu->mbox.num_chans = channels;
  725. return 0;
  726. }
  727. static int mhuv2_allocate_channels(struct mhuv2 *mhu)
  728. {
  729. struct mbox_controller *mbox = &mhu->mbox;
  730. struct mhuv2_mbox_chan_priv *priv;
  731. struct device *dev = mbox->dev;
  732. struct mbox_chan *chans;
  733. int protocol, windows = 0, next_window = 0, i, j, k;
  734. chans = devm_kcalloc(dev, mbox->num_chans, sizeof(*chans), GFP_KERNEL);
  735. if (!chans)
  736. return -ENOMEM;
  737. mbox->chans = chans;
  738. for (i = 0; i < mhu->length; i += 2) {
  739. next_window += windows;
  740. protocol = mhu->protocols[i];
  741. windows = mhu->protocols[i + 1];
  742. if (protocol == DATA_TRANSFER) {
  743. priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
  744. if (!priv)
  745. return -ENOMEM;
  746. priv->ch_wn_idx = next_window;
  747. priv->ops = &mhuv2_data_transfer_ops;
  748. priv->windows = windows;
  749. chans++->con_priv = priv;
  750. continue;
  751. }
  752. for (j = 0; j < windows; j++) {
  753. for (k = 0; k < MHUV2_STAT_BITS; k++) {
  754. priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
  755. if (!priv)
  756. return -ENOMEM;
  757. priv->ch_wn_idx = next_window + j;
  758. priv->ops = &mhuv2_doorbell_ops;
  759. priv->doorbell = k;
  760. chans++->con_priv = priv;
  761. }
  762. /*
  763. * Permanently enable interrupt as we can't
  764. * control it per doorbell.
  765. */
  766. if (mhu->frame == SENDER_FRAME && mhu->minor)
  767. writel_relaxed(0x1, &mhu->send->ch_wn[priv->ch_wn_idx].int_en);
  768. }
  769. }
  770. /* Make sure we have initialized all channels */
  771. BUG_ON(chans - mbox->chans != mbox->num_chans);
  772. return 0;
  773. }
  774. static int mhuv2_parse_channels(struct mhuv2 *mhu)
  775. {
  776. struct device *dev = mhu->mbox.dev;
  777. const struct device_node *np = dev->of_node;
  778. int ret, count;
  779. u32 *protocols;
  780. count = of_property_count_u32_elems(np, MHUV2_PROTOCOL_PROP);
  781. if (count <= 0 || count % 2) {
  782. dev_err(dev, "Invalid %s property (%d)\n", MHUV2_PROTOCOL_PROP,
  783. count);
  784. return -EINVAL;
  785. }
  786. protocols = devm_kmalloc_array(dev, count, sizeof(*protocols), GFP_KERNEL);
  787. if (!protocols)
  788. return -ENOMEM;
  789. ret = of_property_read_u32_array(np, MHUV2_PROTOCOL_PROP, protocols, count);
  790. if (ret) {
  791. dev_err(dev, "Failed to read %s property: %d\n",
  792. MHUV2_PROTOCOL_PROP, ret);
  793. return ret;
  794. }
  795. mhu->protocols = protocols;
  796. mhu->length = count;
  797. ret = mhuv2_verify_protocol(mhu);
  798. if (ret)
  799. return ret;
  800. return mhuv2_allocate_channels(mhu);
  801. }
  802. static int mhuv2_tx_init(struct amba_device *adev, struct mhuv2 *mhu,
  803. void __iomem *reg)
  804. {
  805. struct device *dev = mhu->mbox.dev;
  806. int ret, i;
  807. mhu->frame = SENDER_FRAME;
  808. mhu->mbox.ops = &mhuv2_sender_ops;
  809. mhu->send = reg;
  810. mhu->windows = readl_relaxed_bitfield(&mhu->send->mhu_cfg, struct mhu_cfg_t, num_ch);
  811. mhu->minor = readl_relaxed_bitfield(&mhu->send->aidr, struct aidr_t, arch_minor_rev);
  812. spin_lock_init(&mhu->doorbell_pending_lock);
  813. /*
  814. * For minor version 1 and forward, tx interrupt is provided by
  815. * the controller.
  816. */
  817. if (mhu->minor && adev->irq[0]) {
  818. ret = devm_request_threaded_irq(dev, adev->irq[0], NULL,
  819. mhuv2_sender_interrupt,
  820. IRQF_ONESHOT, "mhuv2-tx", mhu);
  821. if (ret) {
  822. dev_err(dev, "Failed to request tx IRQ, fallback to polling mode: %d\n",
  823. ret);
  824. } else {
  825. mhu->mbox.txdone_irq = true;
  826. mhu->mbox.txdone_poll = false;
  827. mhu->irq = adev->irq[0];
  828. writel_relaxed_bitfield(1, &mhu->send->int_en, struct int_en_t, chcomb);
  829. /* Disable all channel interrupts */
  830. for (i = 0; i < mhu->windows; i++)
  831. writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en);
  832. goto out;
  833. }
  834. }
  835. mhu->mbox.txdone_irq = false;
  836. mhu->mbox.txdone_poll = true;
  837. mhu->mbox.txpoll_period = 1;
  838. out:
  839. /* Wait for receiver to be ready */
  840. writel_relaxed(0x1, &mhu->send->access_request);
  841. while (!readl_relaxed(&mhu->send->access_ready))
  842. continue;
  843. return 0;
  844. }
  845. static int mhuv2_rx_init(struct amba_device *adev, struct mhuv2 *mhu,
  846. void __iomem *reg)
  847. {
  848. struct device *dev = mhu->mbox.dev;
  849. int ret, i;
  850. mhu->frame = RECEIVER_FRAME;
  851. mhu->mbox.ops = &mhuv2_receiver_ops;
  852. mhu->recv = reg;
  853. mhu->windows = readl_relaxed_bitfield(&mhu->recv->mhu_cfg, struct mhu_cfg_t, num_ch);
  854. mhu->minor = readl_relaxed_bitfield(&mhu->recv->aidr, struct aidr_t, arch_minor_rev);
  855. mhu->irq = adev->irq[0];
  856. if (!mhu->irq) {
  857. dev_err(dev, "Missing receiver IRQ\n");
  858. return -EINVAL;
  859. }
  860. ret = devm_request_threaded_irq(dev, mhu->irq, NULL,
  861. mhuv2_receiver_interrupt, IRQF_ONESHOT,
  862. "mhuv2-rx", mhu);
  863. if (ret) {
  864. dev_err(dev, "Failed to request rx IRQ\n");
  865. return ret;
  866. }
  867. /* Mask all the channel windows */
  868. for (i = 0; i < mhu->windows; i++)
  869. writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set);
  870. if (mhu->minor)
  871. writel_relaxed_bitfield(1, &mhu->recv->int_en, struct int_en_t, chcomb);
  872. return 0;
  873. }
  874. static int mhuv2_probe(struct amba_device *adev, const struct amba_id *id)
  875. {
  876. struct device *dev = &adev->dev;
  877. const struct device_node *np = dev->of_node;
  878. struct mhuv2 *mhu;
  879. void __iomem *reg;
  880. int ret = -EINVAL;
  881. reg = devm_of_iomap(dev, dev->of_node, 0, NULL);
  882. if (IS_ERR(reg))
  883. return PTR_ERR(reg);
  884. mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
  885. if (!mhu)
  886. return -ENOMEM;
  887. mhu->mbox.dev = dev;
  888. mhu->mbox.of_xlate = mhuv2_mbox_of_xlate;
  889. if (of_device_is_compatible(np, "arm,mhuv2-tx"))
  890. ret = mhuv2_tx_init(adev, mhu, reg);
  891. else if (of_device_is_compatible(np, "arm,mhuv2-rx"))
  892. ret = mhuv2_rx_init(adev, mhu, reg);
  893. else
  894. dev_err(dev, "Invalid compatible property\n");
  895. if (ret)
  896. return ret;
  897. /* Channel windows can't be 0 */
  898. BUG_ON(!mhu->windows);
  899. ret = mhuv2_parse_channels(mhu);
  900. if (ret)
  901. return ret;
  902. amba_set_drvdata(adev, mhu);
  903. ret = devm_mbox_controller_register(dev, &mhu->mbox);
  904. if (ret)
  905. dev_err(dev, "failed to register ARM MHUv2 driver %d\n", ret);
  906. return ret;
  907. }
  908. static void mhuv2_remove(struct amba_device *adev)
  909. {
  910. struct mhuv2 *mhu = amba_get_drvdata(adev);
  911. if (mhu->frame == SENDER_FRAME)
  912. writel_relaxed(0x0, &mhu->send->access_request);
  913. }
  914. static struct amba_id mhuv2_ids[] = {
  915. {
  916. /* 2.0 */
  917. .id = 0xbb0d1,
  918. .mask = 0xfffff,
  919. },
  920. {
  921. /* 2.1 */
  922. .id = 0xbb076,
  923. .mask = 0xfffff,
  924. },
  925. { 0, 0 },
  926. };
  927. MODULE_DEVICE_TABLE(amba, mhuv2_ids);
  928. static struct amba_driver mhuv2_driver = {
  929. .drv = {
  930. .name = "arm-mhuv2",
  931. },
  932. .id_table = mhuv2_ids,
  933. .probe = mhuv2_probe,
  934. .remove = mhuv2_remove,
  935. };
  936. module_amba_driver(mhuv2_driver);
  937. MODULE_LICENSE("GPL v2");
  938. MODULE_DESCRIPTION("ARM MHUv2 Driver");
  939. MODULE_AUTHOR("Viresh Kumar <[email protected]>");
  940. MODULE_AUTHOR("Tushar Khandelwal <[email protected]>");