nwl-dsi.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * i.MX8 NWL MIPI DSI host driver
  4. *
  5. * Copyright (C) 2017 NXP
  6. * Copyright (C) 2020 Purism SPC
  7. */
  8. #include <linux/bitfield.h>
  9. #include <linux/bits.h>
  10. #include <linux/clk.h>
  11. #include <linux/irq.h>
  12. #include <linux/math64.h>
  13. #include <linux/mfd/syscon.h>
  14. #include <linux/media-bus-format.h>
  15. #include <linux/module.h>
  16. #include <linux/mux/consumer.h>
  17. #include <linux/of.h>
  18. #include <linux/of_platform.h>
  19. #include <linux/phy/phy.h>
  20. #include <linux/regmap.h>
  21. #include <linux/reset.h>
  22. #include <linux/sys_soc.h>
  23. #include <linux/time64.h>
  24. #include <drm/drm_atomic_state_helper.h>
  25. #include <drm/drm_bridge.h>
  26. #include <drm/drm_mipi_dsi.h>
  27. #include <drm/drm_of.h>
  28. #include <drm/drm_print.h>
  29. #include <video/mipi_display.h>
  30. #include "nwl-dsi.h"
  31. #define DRV_NAME "nwl-dsi"
  32. /* i.MX8 NWL quirks */
  33. /* i.MX8MQ errata E11418 */
  34. #define E11418_HS_MODE_QUIRK BIT(0)
  35. #define NWL_DSI_MIPI_FIFO_TIMEOUT msecs_to_jiffies(500)
  36. enum transfer_direction {
  37. DSI_PACKET_SEND,
  38. DSI_PACKET_RECEIVE,
  39. };
  40. #define NWL_DSI_ENDPOINT_LCDIF 0
  41. #define NWL_DSI_ENDPOINT_DCSS 1
  42. struct nwl_dsi_transfer {
  43. const struct mipi_dsi_msg *msg;
  44. struct mipi_dsi_packet packet;
  45. struct completion completed;
  46. int status; /* status of transmission */
  47. enum transfer_direction direction;
  48. bool need_bta;
  49. u8 cmd;
  50. u16 rx_word_count;
  51. size_t tx_len; /* in bytes */
  52. size_t rx_len; /* in bytes */
  53. };
  54. struct nwl_dsi {
  55. struct drm_bridge bridge;
  56. struct mipi_dsi_host dsi_host;
  57. struct device *dev;
  58. struct phy *phy;
  59. union phy_configure_opts phy_cfg;
  60. unsigned int quirks;
  61. struct regmap *regmap;
  62. int irq;
  63. /*
  64. * The DSI host controller needs this reset sequence according to NWL:
  65. * 1. Deassert pclk reset to get access to DSI regs
  66. * 2. Configure DSI Host and DPHY and enable DPHY
  67. * 3. Deassert ESC and BYTE resets to allow host TX operations)
  68. * 4. Send DSI cmds to configure peripheral (handled by panel drv)
  69. * 5. Deassert DPI reset so DPI receives pixels and starts sending
  70. * DSI data
  71. *
  72. * TODO: Since panel_bridges do their DSI setup in enable we
  73. * currently have 4. and 5. swapped.
  74. */
  75. struct reset_control *rst_byte;
  76. struct reset_control *rst_esc;
  77. struct reset_control *rst_dpi;
  78. struct reset_control *rst_pclk;
  79. struct mux_control *mux;
  80. /* DSI clocks */
  81. struct clk *phy_ref_clk;
  82. struct clk *rx_esc_clk;
  83. struct clk *tx_esc_clk;
  84. struct clk *core_clk;
  85. /*
  86. * hardware bug: the i.MX8MQ needs this clock on during reset
  87. * even when not using LCDIF.
  88. */
  89. struct clk *lcdif_clk;
  90. /* dsi lanes */
  91. u32 lanes;
  92. enum mipi_dsi_pixel_format format;
  93. struct drm_display_mode mode;
  94. unsigned long dsi_mode_flags;
  95. int error;
  96. struct nwl_dsi_transfer *xfer;
  97. };
  98. static const struct regmap_config nwl_dsi_regmap_config = {
  99. .reg_bits = 16,
  100. .val_bits = 32,
  101. .reg_stride = 4,
  102. .max_register = NWL_DSI_IRQ_MASK2,
  103. .name = DRV_NAME,
  104. };
  105. static inline struct nwl_dsi *bridge_to_dsi(struct drm_bridge *bridge)
  106. {
  107. return container_of(bridge, struct nwl_dsi, bridge);
  108. }
  109. static int nwl_dsi_clear_error(struct nwl_dsi *dsi)
  110. {
  111. int ret = dsi->error;
  112. dsi->error = 0;
  113. return ret;
  114. }
  115. static void nwl_dsi_write(struct nwl_dsi *dsi, unsigned int reg, u32 val)
  116. {
  117. int ret;
  118. if (dsi->error)
  119. return;
  120. ret = regmap_write(dsi->regmap, reg, val);
  121. if (ret < 0) {
  122. DRM_DEV_ERROR(dsi->dev,
  123. "Failed to write NWL DSI reg 0x%x: %d\n", reg,
  124. ret);
  125. dsi->error = ret;
  126. }
  127. }
  128. static u32 nwl_dsi_read(struct nwl_dsi *dsi, u32 reg)
  129. {
  130. unsigned int val;
  131. int ret;
  132. if (dsi->error)
  133. return 0;
  134. ret = regmap_read(dsi->regmap, reg, &val);
  135. if (ret < 0) {
  136. DRM_DEV_ERROR(dsi->dev, "Failed to read NWL DSI reg 0x%x: %d\n",
  137. reg, ret);
  138. dsi->error = ret;
  139. }
  140. return val;
  141. }
  142. static int nwl_dsi_get_dpi_pixel_format(enum mipi_dsi_pixel_format format)
  143. {
  144. switch (format) {
  145. case MIPI_DSI_FMT_RGB565:
  146. return NWL_DSI_PIXEL_FORMAT_16;
  147. case MIPI_DSI_FMT_RGB666:
  148. return NWL_DSI_PIXEL_FORMAT_18L;
  149. case MIPI_DSI_FMT_RGB666_PACKED:
  150. return NWL_DSI_PIXEL_FORMAT_18;
  151. case MIPI_DSI_FMT_RGB888:
  152. return NWL_DSI_PIXEL_FORMAT_24;
  153. default:
  154. return -EINVAL;
  155. }
  156. }
  157. /*
  158. * ps2bc - Picoseconds to byte clock cycles
  159. */
  160. static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
  161. {
  162. u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
  163. return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp,
  164. dsi->lanes * 8ULL * NSEC_PER_SEC);
  165. }
  166. /*
  167. * ui2bc - UI time periods to byte clock cycles
  168. */
  169. static u32 ui2bc(unsigned int ui)
  170. {
  171. return DIV_ROUND_UP(ui, BITS_PER_BYTE);
  172. }
  173. /*
  174. * us2bc - micro seconds to lp clock cycles
  175. */
  176. static u32 us2lp(u32 lp_clk_rate, unsigned long us)
  177. {
  178. return DIV_ROUND_UP(us * lp_clk_rate, USEC_PER_SEC);
  179. }
  180. static int nwl_dsi_config_host(struct nwl_dsi *dsi)
  181. {
  182. u32 cycles;
  183. struct phy_configure_opts_mipi_dphy *cfg = &dsi->phy_cfg.mipi_dphy;
  184. if (dsi->lanes < 1 || dsi->lanes > 4)
  185. return -EINVAL;
  186. DRM_DEV_DEBUG_DRIVER(dsi->dev, "DSI Lanes %d\n", dsi->lanes);
  187. nwl_dsi_write(dsi, NWL_DSI_CFG_NUM_LANES, dsi->lanes - 1);
  188. if (dsi->dsi_mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
  189. nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x01);
  190. nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x01);
  191. } else {
  192. nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x00);
  193. nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x00);
  194. }
  195. /* values in byte clock cycles */
  196. cycles = ui2bc(cfg->clk_pre);
  197. DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles);
  198. nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles);
  199. cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero);
  200. DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles);
  201. cycles += ui2bc(cfg->clk_pre);
  202. DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles);
  203. nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles);
  204. cycles = ps2bc(dsi, cfg->hs_exit);
  205. DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap: 0x%x\n", cycles);
  206. nwl_dsi_write(dsi, NWL_DSI_CFG_TX_GAP, cycles);
  207. nwl_dsi_write(dsi, NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP, 0x01);
  208. nwl_dsi_write(dsi, NWL_DSI_CFG_HTX_TO_COUNT, 0x00);
  209. nwl_dsi_write(dsi, NWL_DSI_CFG_LRX_H_TO_COUNT, 0x00);
  210. nwl_dsi_write(dsi, NWL_DSI_CFG_BTA_H_TO_COUNT, 0x00);
  211. /* In LP clock cycles */
  212. cycles = us2lp(cfg->lp_clk_rate, cfg->wakeup);
  213. DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_twakeup: 0x%x\n", cycles);
  214. nwl_dsi_write(dsi, NWL_DSI_CFG_TWAKEUP, cycles);
  215. return nwl_dsi_clear_error(dsi);
  216. }
  217. static int nwl_dsi_config_dpi(struct nwl_dsi *dsi)
  218. {
  219. u32 mode;
  220. int color_format;
  221. bool burst_mode;
  222. int hfront_porch, hback_porch, vfront_porch, vback_porch;
  223. int hsync_len, vsync_len;
  224. hfront_porch = dsi->mode.hsync_start - dsi->mode.hdisplay;
  225. hsync_len = dsi->mode.hsync_end - dsi->mode.hsync_start;
  226. hback_porch = dsi->mode.htotal - dsi->mode.hsync_end;
  227. vfront_porch = dsi->mode.vsync_start - dsi->mode.vdisplay;
  228. vsync_len = dsi->mode.vsync_end - dsi->mode.vsync_start;
  229. vback_porch = dsi->mode.vtotal - dsi->mode.vsync_end;
  230. DRM_DEV_DEBUG_DRIVER(dsi->dev, "hfront_porch = %d\n", hfront_porch);
  231. DRM_DEV_DEBUG_DRIVER(dsi->dev, "hback_porch = %d\n", hback_porch);
  232. DRM_DEV_DEBUG_DRIVER(dsi->dev, "hsync_len = %d\n", hsync_len);
  233. DRM_DEV_DEBUG_DRIVER(dsi->dev, "hdisplay = %d\n", dsi->mode.hdisplay);
  234. DRM_DEV_DEBUG_DRIVER(dsi->dev, "vfront_porch = %d\n", vfront_porch);
  235. DRM_DEV_DEBUG_DRIVER(dsi->dev, "vback_porch = %d\n", vback_porch);
  236. DRM_DEV_DEBUG_DRIVER(dsi->dev, "vsync_len = %d\n", vsync_len);
  237. DRM_DEV_DEBUG_DRIVER(dsi->dev, "vactive = %d\n", dsi->mode.vdisplay);
  238. DRM_DEV_DEBUG_DRIVER(dsi->dev, "clock = %d kHz\n", dsi->mode.clock);
  239. color_format = nwl_dsi_get_dpi_pixel_format(dsi->format);
  240. if (color_format < 0) {
  241. DRM_DEV_ERROR(dsi->dev, "Invalid color format 0x%x\n",
  242. dsi->format);
  243. return color_format;
  244. }
  245. DRM_DEV_DEBUG_DRIVER(dsi->dev, "pixel fmt = %d\n", dsi->format);
  246. nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT);
  247. nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format);
  248. /*
  249. * Adjusting input polarity based on the video mode results in
  250. * a black screen so always pick active low:
  251. */
  252. nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY,
  253. NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW);
  254. nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY,
  255. NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW);
  256. burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
  257. !(dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE);
  258. if (burst_mode) {
  259. nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, NWL_DSI_VM_BURST_MODE);
  260. nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, 256);
  261. } else {
  262. mode = ((dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) ?
  263. NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES :
  264. NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS);
  265. nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, mode);
  266. nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL,
  267. dsi->mode.hdisplay);
  268. }
  269. nwl_dsi_write(dsi, NWL_DSI_HFP, hfront_porch);
  270. nwl_dsi_write(dsi, NWL_DSI_HBP, hback_porch);
  271. nwl_dsi_write(dsi, NWL_DSI_HSA, hsync_len);
  272. nwl_dsi_write(dsi, NWL_DSI_ENABLE_MULT_PKTS, 0x0);
  273. nwl_dsi_write(dsi, NWL_DSI_BLLP_MODE, 0x1);
  274. nwl_dsi_write(dsi, NWL_DSI_USE_NULL_PKT_BLLP, 0x0);
  275. nwl_dsi_write(dsi, NWL_DSI_VC, 0x0);
  276. nwl_dsi_write(dsi, NWL_DSI_PIXEL_PAYLOAD_SIZE, dsi->mode.hdisplay);
  277. nwl_dsi_write(dsi, NWL_DSI_VACTIVE, dsi->mode.vdisplay - 1);
  278. nwl_dsi_write(dsi, NWL_DSI_VBP, vback_porch);
  279. nwl_dsi_write(dsi, NWL_DSI_VFP, vfront_porch);
  280. return nwl_dsi_clear_error(dsi);
  281. }
  282. static int nwl_dsi_init_interrupts(struct nwl_dsi *dsi)
  283. {
  284. u32 irq_enable = ~(u32)(NWL_DSI_TX_PKT_DONE_MASK |
  285. NWL_DSI_RX_PKT_HDR_RCVD_MASK |
  286. NWL_DSI_TX_FIFO_OVFLW_MASK |
  287. NWL_DSI_HS_TX_TIMEOUT_MASK);
  288. nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, irq_enable);
  289. nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK2, 0x7);
  290. return nwl_dsi_clear_error(dsi);
  291. }
  292. static int nwl_dsi_host_attach(struct mipi_dsi_host *dsi_host,
  293. struct mipi_dsi_device *device)
  294. {
  295. struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
  296. struct device *dev = dsi->dev;
  297. DRM_DEV_INFO(dev, "lanes=%u, format=0x%x flags=0x%lx\n", device->lanes,
  298. device->format, device->mode_flags);
  299. if (device->lanes < 1 || device->lanes > 4)
  300. return -EINVAL;
  301. dsi->lanes = device->lanes;
  302. dsi->format = device->format;
  303. dsi->dsi_mode_flags = device->mode_flags;
  304. return 0;
  305. }
  306. static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status)
  307. {
  308. struct device *dev = dsi->dev;
  309. struct nwl_dsi_transfer *xfer = dsi->xfer;
  310. int err;
  311. u8 *payload = xfer->msg->rx_buf;
  312. u32 val;
  313. u16 word_count;
  314. u8 channel;
  315. u8 data_type;
  316. xfer->status = 0;
  317. if (xfer->rx_word_count == 0) {
  318. if (!(status & NWL_DSI_RX_PKT_HDR_RCVD))
  319. return false;
  320. /* Get the RX header and parse it */
  321. val = nwl_dsi_read(dsi, NWL_DSI_RX_PKT_HEADER);
  322. err = nwl_dsi_clear_error(dsi);
  323. if (err)
  324. xfer->status = err;
  325. word_count = NWL_DSI_WC(val);
  326. channel = NWL_DSI_RX_VC(val);
  327. data_type = NWL_DSI_RX_DT(val);
  328. if (channel != xfer->msg->channel) {
  329. DRM_DEV_ERROR(dev,
  330. "[%02X] Channel mismatch (%u != %u)\n",
  331. xfer->cmd, channel, xfer->msg->channel);
  332. xfer->status = -EINVAL;
  333. return true;
  334. }
  335. switch (data_type) {
  336. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
  337. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
  338. if (xfer->msg->rx_len > 1) {
  339. /* read second byte */
  340. payload[1] = word_count >> 8;
  341. ++xfer->rx_len;
  342. }
  343. fallthrough;
  344. case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
  345. case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
  346. if (xfer->msg->rx_len > 0) {
  347. /* read first byte */
  348. payload[0] = word_count & 0xff;
  349. ++xfer->rx_len;
  350. }
  351. xfer->status = xfer->rx_len;
  352. return true;
  353. case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
  354. word_count &= 0xff;
  355. DRM_DEV_ERROR(dev, "[%02X] DSI error report: 0x%02x\n",
  356. xfer->cmd, word_count);
  357. xfer->status = -EPROTO;
  358. return true;
  359. }
  360. if (word_count > xfer->msg->rx_len) {
  361. DRM_DEV_ERROR(dev,
  362. "[%02X] Receive buffer too small: %zu (< %u)\n",
  363. xfer->cmd, xfer->msg->rx_len, word_count);
  364. xfer->status = -EINVAL;
  365. return true;
  366. }
  367. xfer->rx_word_count = word_count;
  368. } else {
  369. /* Set word_count from previous header read */
  370. word_count = xfer->rx_word_count;
  371. }
  372. /* If RX payload is not yet received, wait for it */
  373. if (!(status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD))
  374. return false;
  375. /* Read the RX payload */
  376. while (word_count >= 4) {
  377. val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
  378. payload[0] = (val >> 0) & 0xff;
  379. payload[1] = (val >> 8) & 0xff;
  380. payload[2] = (val >> 16) & 0xff;
  381. payload[3] = (val >> 24) & 0xff;
  382. payload += 4;
  383. xfer->rx_len += 4;
  384. word_count -= 4;
  385. }
  386. if (word_count > 0) {
  387. val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
  388. switch (word_count) {
  389. case 3:
  390. payload[2] = (val >> 16) & 0xff;
  391. ++xfer->rx_len;
  392. fallthrough;
  393. case 2:
  394. payload[1] = (val >> 8) & 0xff;
  395. ++xfer->rx_len;
  396. fallthrough;
  397. case 1:
  398. payload[0] = (val >> 0) & 0xff;
  399. ++xfer->rx_len;
  400. break;
  401. }
  402. }
  403. xfer->status = xfer->rx_len;
  404. err = nwl_dsi_clear_error(dsi);
  405. if (err)
  406. xfer->status = err;
  407. return true;
  408. }
  409. static void nwl_dsi_finish_transmission(struct nwl_dsi *dsi, u32 status)
  410. {
  411. struct nwl_dsi_transfer *xfer = dsi->xfer;
  412. bool end_packet = false;
  413. if (!xfer)
  414. return;
  415. if (xfer->direction == DSI_PACKET_SEND &&
  416. status & NWL_DSI_TX_PKT_DONE) {
  417. xfer->status = xfer->tx_len;
  418. end_packet = true;
  419. } else if (status & NWL_DSI_DPHY_DIRECTION &&
  420. ((status & (NWL_DSI_RX_PKT_HDR_RCVD |
  421. NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)))) {
  422. end_packet = nwl_dsi_read_packet(dsi, status);
  423. }
  424. if (end_packet)
  425. complete(&xfer->completed);
  426. }
  427. static void nwl_dsi_begin_transmission(struct nwl_dsi *dsi)
  428. {
  429. struct nwl_dsi_transfer *xfer = dsi->xfer;
  430. struct mipi_dsi_packet *pkt = &xfer->packet;
  431. const u8 *payload;
  432. size_t length;
  433. u16 word_count;
  434. u8 hs_mode;
  435. u32 val;
  436. u32 hs_workaround = 0;
  437. /* Send the payload, if any */
  438. length = pkt->payload_length;
  439. payload = pkt->payload;
  440. while (length >= 4) {
  441. val = *(u32 *)payload;
  442. hs_workaround |= !(val & 0xFFFF00);
  443. nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
  444. payload += 4;
  445. length -= 4;
  446. }
  447. /* Send the rest of the payload */
  448. val = 0;
  449. switch (length) {
  450. case 3:
  451. val |= payload[2] << 16;
  452. fallthrough;
  453. case 2:
  454. val |= payload[1] << 8;
  455. hs_workaround |= !(val & 0xFFFF00);
  456. fallthrough;
  457. case 1:
  458. val |= payload[0];
  459. nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
  460. break;
  461. }
  462. xfer->tx_len = pkt->payload_length;
  463. /*
  464. * Send the header
  465. * header[0] = Virtual Channel + Data Type
  466. * header[1] = Word Count LSB (LP) or first param (SP)
  467. * header[2] = Word Count MSB (LP) or second param (SP)
  468. */
  469. word_count = pkt->header[1] | (pkt->header[2] << 8);
  470. if (hs_workaround && (dsi->quirks & E11418_HS_MODE_QUIRK)) {
  471. DRM_DEV_DEBUG_DRIVER(dsi->dev,
  472. "Using hs mode workaround for cmd 0x%x\n",
  473. xfer->cmd);
  474. hs_mode = 1;
  475. } else {
  476. hs_mode = (xfer->msg->flags & MIPI_DSI_MSG_USE_LPM) ? 0 : 1;
  477. }
  478. val = NWL_DSI_WC(word_count) | NWL_DSI_TX_VC(xfer->msg->channel) |
  479. NWL_DSI_TX_DT(xfer->msg->type) | NWL_DSI_HS_SEL(hs_mode) |
  480. NWL_DSI_BTA_TX(xfer->need_bta);
  481. nwl_dsi_write(dsi, NWL_DSI_PKT_CONTROL, val);
  482. /* Send packet command */
  483. nwl_dsi_write(dsi, NWL_DSI_SEND_PACKET, 0x1);
  484. }
  485. static ssize_t nwl_dsi_host_transfer(struct mipi_dsi_host *dsi_host,
  486. const struct mipi_dsi_msg *msg)
  487. {
  488. struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
  489. struct nwl_dsi_transfer xfer;
  490. ssize_t ret = 0;
  491. /* Create packet to be sent */
  492. dsi->xfer = &xfer;
  493. ret = mipi_dsi_create_packet(&xfer.packet, msg);
  494. if (ret < 0) {
  495. dsi->xfer = NULL;
  496. return ret;
  497. }
  498. if ((msg->type & MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM ||
  499. msg->type & MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM ||
  500. msg->type & MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM ||
  501. msg->type & MIPI_DSI_DCS_READ) &&
  502. msg->rx_len > 0 && msg->rx_buf)
  503. xfer.direction = DSI_PACKET_RECEIVE;
  504. else
  505. xfer.direction = DSI_PACKET_SEND;
  506. xfer.need_bta = (xfer.direction == DSI_PACKET_RECEIVE);
  507. xfer.need_bta |= (msg->flags & MIPI_DSI_MSG_REQ_ACK) ? 1 : 0;
  508. xfer.msg = msg;
  509. xfer.status = -ETIMEDOUT;
  510. xfer.rx_word_count = 0;
  511. xfer.rx_len = 0;
  512. xfer.cmd = 0x00;
  513. if (msg->tx_len > 0)
  514. xfer.cmd = ((u8 *)(msg->tx_buf))[0];
  515. init_completion(&xfer.completed);
  516. ret = clk_prepare_enable(dsi->rx_esc_clk);
  517. if (ret < 0) {
  518. DRM_DEV_ERROR(dsi->dev, "Failed to enable rx_esc clk: %zd\n",
  519. ret);
  520. return ret;
  521. }
  522. DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled rx_esc clk @%lu Hz\n",
  523. clk_get_rate(dsi->rx_esc_clk));
  524. /* Initiate the DSI packet transmision */
  525. nwl_dsi_begin_transmission(dsi);
  526. if (!wait_for_completion_timeout(&xfer.completed,
  527. NWL_DSI_MIPI_FIFO_TIMEOUT)) {
  528. DRM_DEV_ERROR(dsi_host->dev, "[%02X] DSI transfer timed out\n",
  529. xfer.cmd);
  530. ret = -ETIMEDOUT;
  531. } else {
  532. ret = xfer.status;
  533. }
  534. clk_disable_unprepare(dsi->rx_esc_clk);
  535. return ret;
  536. }
  537. static const struct mipi_dsi_host_ops nwl_dsi_host_ops = {
  538. .attach = nwl_dsi_host_attach,
  539. .transfer = nwl_dsi_host_transfer,
  540. };
  541. static irqreturn_t nwl_dsi_irq_handler(int irq, void *data)
  542. {
  543. u32 irq_status;
  544. struct nwl_dsi *dsi = data;
  545. irq_status = nwl_dsi_read(dsi, NWL_DSI_IRQ_STATUS);
  546. if (irq_status & NWL_DSI_TX_FIFO_OVFLW)
  547. DRM_DEV_ERROR_RATELIMITED(dsi->dev, "tx fifo overflow\n");
  548. if (irq_status & NWL_DSI_HS_TX_TIMEOUT)
  549. DRM_DEV_ERROR_RATELIMITED(dsi->dev, "HS tx timeout\n");
  550. if (irq_status & NWL_DSI_TX_PKT_DONE ||
  551. irq_status & NWL_DSI_RX_PKT_HDR_RCVD ||
  552. irq_status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)
  553. nwl_dsi_finish_transmission(dsi, irq_status);
  554. return IRQ_HANDLED;
  555. }
  556. static int nwl_dsi_mode_set(struct nwl_dsi *dsi)
  557. {
  558. struct device *dev = dsi->dev;
  559. union phy_configure_opts *phy_cfg = &dsi->phy_cfg;
  560. int ret;
  561. if (!dsi->lanes) {
  562. DRM_DEV_ERROR(dev, "Need DSI lanes: %d\n", dsi->lanes);
  563. return -EINVAL;
  564. }
  565. ret = phy_init(dsi->phy);
  566. if (ret < 0) {
  567. DRM_DEV_ERROR(dev, "Failed to init DSI phy: %d\n", ret);
  568. return ret;
  569. }
  570. ret = phy_set_mode(dsi->phy, PHY_MODE_MIPI_DPHY);
  571. if (ret < 0) {
  572. DRM_DEV_ERROR(dev, "Failed to set DSI phy mode: %d\n", ret);
  573. goto uninit_phy;
  574. }
  575. ret = phy_configure(dsi->phy, phy_cfg);
  576. if (ret < 0) {
  577. DRM_DEV_ERROR(dev, "Failed to configure DSI phy: %d\n", ret);
  578. goto uninit_phy;
  579. }
  580. ret = clk_prepare_enable(dsi->tx_esc_clk);
  581. if (ret < 0) {
  582. DRM_DEV_ERROR(dsi->dev, "Failed to enable tx_esc clk: %d\n",
  583. ret);
  584. goto uninit_phy;
  585. }
  586. DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled tx_esc clk @%lu Hz\n",
  587. clk_get_rate(dsi->tx_esc_clk));
  588. ret = nwl_dsi_config_host(dsi);
  589. if (ret < 0) {
  590. DRM_DEV_ERROR(dev, "Failed to set up DSI: %d", ret);
  591. goto disable_clock;
  592. }
  593. ret = nwl_dsi_config_dpi(dsi);
  594. if (ret < 0) {
  595. DRM_DEV_ERROR(dev, "Failed to set up DPI: %d", ret);
  596. goto disable_clock;
  597. }
  598. ret = phy_power_on(dsi->phy);
  599. if (ret < 0) {
  600. DRM_DEV_ERROR(dev, "Failed to power on DPHY (%d)\n", ret);
  601. goto disable_clock;
  602. }
  603. ret = nwl_dsi_init_interrupts(dsi);
  604. if (ret < 0)
  605. goto power_off_phy;
  606. return ret;
  607. power_off_phy:
  608. phy_power_off(dsi->phy);
  609. disable_clock:
  610. clk_disable_unprepare(dsi->tx_esc_clk);
  611. uninit_phy:
  612. phy_exit(dsi->phy);
  613. return ret;
  614. }
  615. static int nwl_dsi_disable(struct nwl_dsi *dsi)
  616. {
  617. struct device *dev = dsi->dev;
  618. DRM_DEV_DEBUG_DRIVER(dev, "Disabling clocks and phy\n");
  619. phy_power_off(dsi->phy);
  620. phy_exit(dsi->phy);
  621. /* Disabling the clock before the phy breaks enabling dsi again */
  622. clk_disable_unprepare(dsi->tx_esc_clk);
  623. return 0;
  624. }
  625. static void
  626. nwl_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
  627. struct drm_bridge_state *old_bridge_state)
  628. {
  629. struct nwl_dsi *dsi = bridge_to_dsi(bridge);
  630. int ret;
  631. nwl_dsi_disable(dsi);
  632. ret = reset_control_assert(dsi->rst_dpi);
  633. if (ret < 0) {
  634. DRM_DEV_ERROR(dsi->dev, "Failed to assert DPI: %d\n", ret);
  635. return;
  636. }
  637. ret = reset_control_assert(dsi->rst_byte);
  638. if (ret < 0) {
  639. DRM_DEV_ERROR(dsi->dev, "Failed to assert ESC: %d\n", ret);
  640. return;
  641. }
  642. ret = reset_control_assert(dsi->rst_esc);
  643. if (ret < 0) {
  644. DRM_DEV_ERROR(dsi->dev, "Failed to assert BYTE: %d\n", ret);
  645. return;
  646. }
  647. ret = reset_control_assert(dsi->rst_pclk);
  648. if (ret < 0) {
  649. DRM_DEV_ERROR(dsi->dev, "Failed to assert PCLK: %d\n", ret);
  650. return;
  651. }
  652. clk_disable_unprepare(dsi->core_clk);
  653. clk_disable_unprepare(dsi->lcdif_clk);
  654. pm_runtime_put(dsi->dev);
  655. }
  656. static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi,
  657. const struct drm_display_mode *mode,
  658. union phy_configure_opts *phy_opts)
  659. {
  660. unsigned long rate;
  661. int ret;
  662. if (dsi->lanes < 1 || dsi->lanes > 4)
  663. return -EINVAL;
  664. /*
  665. * So far the DPHY spec minimal timings work for both mixel
  666. * dphy and nwl dsi host
  667. */
  668. ret = phy_mipi_dphy_get_default_config(mode->clock * 1000,
  669. mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes,
  670. &phy_opts->mipi_dphy);
  671. if (ret < 0)
  672. return ret;
  673. rate = clk_get_rate(dsi->tx_esc_clk);
  674. DRM_DEV_DEBUG_DRIVER(dsi->dev, "LP clk is @%lu Hz\n", rate);
  675. phy_opts->mipi_dphy.lp_clk_rate = rate;
  676. return 0;
  677. }
  678. static enum drm_mode_status
  679. nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
  680. const struct drm_display_info *info,
  681. const struct drm_display_mode *mode)
  682. {
  683. struct nwl_dsi *dsi = bridge_to_dsi(bridge);
  684. int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
  685. if (mode->clock * bpp > 15000000 * dsi->lanes)
  686. return MODE_CLOCK_HIGH;
  687. if (mode->clock * bpp < 80000 * dsi->lanes)
  688. return MODE_CLOCK_LOW;
  689. return MODE_OK;
  690. }
  691. static int nwl_dsi_bridge_atomic_check(struct drm_bridge *bridge,
  692. struct drm_bridge_state *bridge_state,
  693. struct drm_crtc_state *crtc_state,
  694. struct drm_connector_state *conn_state)
  695. {
  696. struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
  697. /* At least LCDIF + NWL needs active high sync */
  698. adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
  699. adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
  700. /*
  701. * Do a full modeset if crtc_state->active is changed to be true.
  702. * This ensures our ->mode_set() is called to get the DSI controller
  703. * and the PHY ready to send DCS commands, when only the connector's
  704. * DPMS is brought out of "Off" status.
  705. */
  706. if (crtc_state->active_changed && crtc_state->active)
  707. crtc_state->mode_changed = true;
  708. return 0;
  709. }
  710. static void
  711. nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
  712. const struct drm_display_mode *mode,
  713. const struct drm_display_mode *adjusted_mode)
  714. {
  715. struct nwl_dsi *dsi = bridge_to_dsi(bridge);
  716. struct device *dev = dsi->dev;
  717. union phy_configure_opts new_cfg;
  718. unsigned long phy_ref_rate;
  719. int ret;
  720. ret = nwl_dsi_get_dphy_params(dsi, adjusted_mode, &new_cfg);
  721. if (ret < 0)
  722. return;
  723. phy_ref_rate = clk_get_rate(dsi->phy_ref_clk);
  724. DRM_DEV_DEBUG_DRIVER(dev, "PHY at ref rate: %lu\n", phy_ref_rate);
  725. /* Save the new desired phy config */
  726. memcpy(&dsi->phy_cfg, &new_cfg, sizeof(new_cfg));
  727. drm_mode_copy(&dsi->mode, adjusted_mode);
  728. drm_mode_debug_printmodeline(adjusted_mode);
  729. if (pm_runtime_resume_and_get(dev) < 0)
  730. return;
  731. if (clk_prepare_enable(dsi->lcdif_clk) < 0)
  732. goto runtime_put;
  733. if (clk_prepare_enable(dsi->core_clk) < 0)
  734. goto runtime_put;
  735. /* Step 1 from DSI reset-out instructions */
  736. ret = reset_control_deassert(dsi->rst_pclk);
  737. if (ret < 0) {
  738. DRM_DEV_ERROR(dev, "Failed to deassert PCLK: %d\n", ret);
  739. goto runtime_put;
  740. }
  741. /* Step 2 from DSI reset-out instructions */
  742. nwl_dsi_mode_set(dsi);
  743. /* Step 3 from DSI reset-out instructions */
  744. ret = reset_control_deassert(dsi->rst_esc);
  745. if (ret < 0) {
  746. DRM_DEV_ERROR(dev, "Failed to deassert ESC: %d\n", ret);
  747. goto runtime_put;
  748. }
  749. ret = reset_control_deassert(dsi->rst_byte);
  750. if (ret < 0) {
  751. DRM_DEV_ERROR(dev, "Failed to deassert BYTE: %d\n", ret);
  752. goto runtime_put;
  753. }
  754. return;
  755. runtime_put:
  756. pm_runtime_put_sync(dev);
  757. }
  758. static void
  759. nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
  760. struct drm_bridge_state *old_bridge_state)
  761. {
  762. struct nwl_dsi *dsi = bridge_to_dsi(bridge);
  763. int ret;
  764. /* Step 5 from DSI reset-out instructions */
  765. ret = reset_control_deassert(dsi->rst_dpi);
  766. if (ret < 0)
  767. DRM_DEV_ERROR(dsi->dev, "Failed to deassert DPI: %d\n", ret);
  768. }
  769. static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
  770. enum drm_bridge_attach_flags flags)
  771. {
  772. struct nwl_dsi *dsi = bridge_to_dsi(bridge);
  773. struct drm_bridge *panel_bridge;
  774. panel_bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node, 1, 0);
  775. if (IS_ERR(panel_bridge))
  776. return PTR_ERR(panel_bridge);
  777. return drm_bridge_attach(bridge->encoder, panel_bridge, bridge, flags);
  778. }
  779. static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
  780. struct drm_bridge_state *bridge_state,
  781. struct drm_crtc_state *crtc_state,
  782. struct drm_connector_state *conn_state,
  783. u32 output_fmt,
  784. unsigned int *num_input_fmts)
  785. {
  786. u32 *input_fmts, input_fmt;
  787. *num_input_fmts = 0;
  788. switch (output_fmt) {
  789. /* If MEDIA_BUS_FMT_FIXED is tested, return default bus format */
  790. case MEDIA_BUS_FMT_FIXED:
  791. input_fmt = MEDIA_BUS_FMT_RGB888_1X24;
  792. break;
  793. case MEDIA_BUS_FMT_RGB888_1X24:
  794. case MEDIA_BUS_FMT_RGB666_1X18:
  795. case MEDIA_BUS_FMT_RGB565_1X16:
  796. input_fmt = output_fmt;
  797. break;
  798. default:
  799. return NULL;
  800. }
  801. input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
  802. if (!input_fmts)
  803. return NULL;
  804. input_fmts[0] = input_fmt;
  805. *num_input_fmts = 1;
  806. return input_fmts;
  807. }
  808. static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
  809. .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
  810. .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
  811. .atomic_reset = drm_atomic_helper_bridge_reset,
  812. .atomic_check = nwl_dsi_bridge_atomic_check,
  813. .atomic_enable = nwl_dsi_bridge_atomic_enable,
  814. .atomic_disable = nwl_dsi_bridge_atomic_disable,
  815. .atomic_get_input_bus_fmts = nwl_bridge_atomic_get_input_bus_fmts,
  816. .mode_set = nwl_dsi_bridge_mode_set,
  817. .mode_valid = nwl_dsi_bridge_mode_valid,
  818. .attach = nwl_dsi_bridge_attach,
  819. };
  820. static int nwl_dsi_parse_dt(struct nwl_dsi *dsi)
  821. {
  822. struct platform_device *pdev = to_platform_device(dsi->dev);
  823. struct clk *clk;
  824. void __iomem *base;
  825. int ret;
  826. dsi->phy = devm_phy_get(dsi->dev, "dphy");
  827. if (IS_ERR(dsi->phy)) {
  828. ret = PTR_ERR(dsi->phy);
  829. if (ret != -EPROBE_DEFER)
  830. DRM_DEV_ERROR(dsi->dev, "Could not get PHY: %d\n", ret);
  831. return ret;
  832. }
  833. clk = devm_clk_get(dsi->dev, "lcdif");
  834. if (IS_ERR(clk)) {
  835. ret = PTR_ERR(clk);
  836. DRM_DEV_ERROR(dsi->dev, "Failed to get lcdif clock: %d\n",
  837. ret);
  838. return ret;
  839. }
  840. dsi->lcdif_clk = clk;
  841. clk = devm_clk_get(dsi->dev, "core");
  842. if (IS_ERR(clk)) {
  843. ret = PTR_ERR(clk);
  844. DRM_DEV_ERROR(dsi->dev, "Failed to get core clock: %d\n",
  845. ret);
  846. return ret;
  847. }
  848. dsi->core_clk = clk;
  849. clk = devm_clk_get(dsi->dev, "phy_ref");
  850. if (IS_ERR(clk)) {
  851. ret = PTR_ERR(clk);
  852. DRM_DEV_ERROR(dsi->dev, "Failed to get phy_ref clock: %d\n",
  853. ret);
  854. return ret;
  855. }
  856. dsi->phy_ref_clk = clk;
  857. clk = devm_clk_get(dsi->dev, "rx_esc");
  858. if (IS_ERR(clk)) {
  859. ret = PTR_ERR(clk);
  860. DRM_DEV_ERROR(dsi->dev, "Failed to get rx_esc clock: %d\n",
  861. ret);
  862. return ret;
  863. }
  864. dsi->rx_esc_clk = clk;
  865. clk = devm_clk_get(dsi->dev, "tx_esc");
  866. if (IS_ERR(clk)) {
  867. ret = PTR_ERR(clk);
  868. DRM_DEV_ERROR(dsi->dev, "Failed to get tx_esc clock: %d\n",
  869. ret);
  870. return ret;
  871. }
  872. dsi->tx_esc_clk = clk;
  873. dsi->mux = devm_mux_control_get(dsi->dev, NULL);
  874. if (IS_ERR(dsi->mux)) {
  875. ret = PTR_ERR(dsi->mux);
  876. if (ret != -EPROBE_DEFER)
  877. DRM_DEV_ERROR(dsi->dev, "Failed to get mux: %d\n", ret);
  878. return ret;
  879. }
  880. base = devm_platform_ioremap_resource(pdev, 0);
  881. if (IS_ERR(base))
  882. return PTR_ERR(base);
  883. dsi->regmap =
  884. devm_regmap_init_mmio(dsi->dev, base, &nwl_dsi_regmap_config);
  885. if (IS_ERR(dsi->regmap)) {
  886. ret = PTR_ERR(dsi->regmap);
  887. DRM_DEV_ERROR(dsi->dev, "Failed to create NWL DSI regmap: %d\n",
  888. ret);
  889. return ret;
  890. }
  891. dsi->irq = platform_get_irq(pdev, 0);
  892. if (dsi->irq < 0) {
  893. DRM_DEV_ERROR(dsi->dev, "Failed to get device IRQ: %d\n",
  894. dsi->irq);
  895. return dsi->irq;
  896. }
  897. dsi->rst_pclk = devm_reset_control_get_exclusive(dsi->dev, "pclk");
  898. if (IS_ERR(dsi->rst_pclk)) {
  899. DRM_DEV_ERROR(dsi->dev, "Failed to get pclk reset: %ld\n",
  900. PTR_ERR(dsi->rst_pclk));
  901. return PTR_ERR(dsi->rst_pclk);
  902. }
  903. dsi->rst_byte = devm_reset_control_get_exclusive(dsi->dev, "byte");
  904. if (IS_ERR(dsi->rst_byte)) {
  905. DRM_DEV_ERROR(dsi->dev, "Failed to get byte reset: %ld\n",
  906. PTR_ERR(dsi->rst_byte));
  907. return PTR_ERR(dsi->rst_byte);
  908. }
  909. dsi->rst_esc = devm_reset_control_get_exclusive(dsi->dev, "esc");
  910. if (IS_ERR(dsi->rst_esc)) {
  911. DRM_DEV_ERROR(dsi->dev, "Failed to get esc reset: %ld\n",
  912. PTR_ERR(dsi->rst_esc));
  913. return PTR_ERR(dsi->rst_esc);
  914. }
  915. dsi->rst_dpi = devm_reset_control_get_exclusive(dsi->dev, "dpi");
  916. if (IS_ERR(dsi->rst_dpi)) {
  917. DRM_DEV_ERROR(dsi->dev, "Failed to get dpi reset: %ld\n",
  918. PTR_ERR(dsi->rst_dpi));
  919. return PTR_ERR(dsi->rst_dpi);
  920. }
  921. return 0;
  922. }
  923. static int nwl_dsi_select_input(struct nwl_dsi *dsi)
  924. {
  925. struct device_node *remote;
  926. u32 use_dcss = 1;
  927. int ret;
  928. remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
  929. NWL_DSI_ENDPOINT_LCDIF);
  930. if (remote) {
  931. use_dcss = 0;
  932. } else {
  933. remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
  934. NWL_DSI_ENDPOINT_DCSS);
  935. if (!remote) {
  936. DRM_DEV_ERROR(dsi->dev,
  937. "No valid input endpoint found\n");
  938. return -EINVAL;
  939. }
  940. }
  941. DRM_DEV_INFO(dsi->dev, "Using %s as input source\n",
  942. (use_dcss) ? "DCSS" : "LCDIF");
  943. ret = mux_control_try_select(dsi->mux, use_dcss);
  944. if (ret < 0)
  945. DRM_DEV_ERROR(dsi->dev, "Failed to select input: %d\n", ret);
  946. of_node_put(remote);
  947. return ret;
  948. }
  949. static int nwl_dsi_deselect_input(struct nwl_dsi *dsi)
  950. {
  951. int ret;
  952. ret = mux_control_deselect(dsi->mux);
  953. if (ret < 0)
  954. DRM_DEV_ERROR(dsi->dev, "Failed to deselect input: %d\n", ret);
  955. return ret;
  956. }
  957. static const struct drm_bridge_timings nwl_dsi_timings = {
  958. .input_bus_flags = DRM_BUS_FLAG_DE_LOW,
  959. };
  960. static const struct of_device_id nwl_dsi_dt_ids[] = {
  961. { .compatible = "fsl,imx8mq-nwl-dsi", },
  962. { /* sentinel */ }
  963. };
  964. MODULE_DEVICE_TABLE(of, nwl_dsi_dt_ids);
  965. static const struct soc_device_attribute nwl_dsi_quirks_match[] = {
  966. { .soc_id = "i.MX8MQ", .revision = "2.0",
  967. .data = (void *)E11418_HS_MODE_QUIRK },
  968. { /* sentinel. */ }
  969. };
  970. static int nwl_dsi_probe(struct platform_device *pdev)
  971. {
  972. struct device *dev = &pdev->dev;
  973. const struct soc_device_attribute *attr;
  974. struct nwl_dsi *dsi;
  975. int ret;
  976. dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
  977. if (!dsi)
  978. return -ENOMEM;
  979. dsi->dev = dev;
  980. ret = nwl_dsi_parse_dt(dsi);
  981. if (ret)
  982. return ret;
  983. ret = devm_request_irq(dev, dsi->irq, nwl_dsi_irq_handler, 0,
  984. dev_name(dev), dsi);
  985. if (ret < 0) {
  986. DRM_DEV_ERROR(dev, "Failed to request IRQ %d: %d\n", dsi->irq,
  987. ret);
  988. return ret;
  989. }
  990. dsi->dsi_host.ops = &nwl_dsi_host_ops;
  991. dsi->dsi_host.dev = dev;
  992. ret = mipi_dsi_host_register(&dsi->dsi_host);
  993. if (ret) {
  994. DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret);
  995. return ret;
  996. }
  997. attr = soc_device_match(nwl_dsi_quirks_match);
  998. if (attr)
  999. dsi->quirks = (uintptr_t)attr->data;
  1000. dsi->bridge.driver_private = dsi;
  1001. dsi->bridge.funcs = &nwl_dsi_bridge_funcs;
  1002. dsi->bridge.of_node = dev->of_node;
  1003. dsi->bridge.timings = &nwl_dsi_timings;
  1004. dev_set_drvdata(dev, dsi);
  1005. pm_runtime_enable(dev);
  1006. ret = nwl_dsi_select_input(dsi);
  1007. if (ret < 0) {
  1008. pm_runtime_disable(dev);
  1009. mipi_dsi_host_unregister(&dsi->dsi_host);
  1010. return ret;
  1011. }
  1012. drm_bridge_add(&dsi->bridge);
  1013. return 0;
  1014. }
  1015. static int nwl_dsi_remove(struct platform_device *pdev)
  1016. {
  1017. struct nwl_dsi *dsi = platform_get_drvdata(pdev);
  1018. nwl_dsi_deselect_input(dsi);
  1019. mipi_dsi_host_unregister(&dsi->dsi_host);
  1020. drm_bridge_remove(&dsi->bridge);
  1021. pm_runtime_disable(&pdev->dev);
  1022. return 0;
  1023. }
  1024. static struct platform_driver nwl_dsi_driver = {
  1025. .probe = nwl_dsi_probe,
  1026. .remove = nwl_dsi_remove,
  1027. .driver = {
  1028. .of_match_table = nwl_dsi_dt_ids,
  1029. .name = DRV_NAME,
  1030. },
  1031. };
  1032. module_platform_driver(nwl_dsi_driver);
  1033. MODULE_AUTHOR("NXP Semiconductor");
  1034. MODULE_AUTHOR("Purism SPC");
  1035. MODULE_DESCRIPTION("Northwest Logic MIPI-DSI driver");
  1036. MODULE_LICENSE("GPL"); /* GPLv2 or later */