owl-mmc.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Actions Semi Owl SoCs SD/MMC driver
  4. *
  5. * Copyright (c) 2014 Actions Semi Inc.
  6. * Copyright (c) 2019 Manivannan Sadhasivam <[email protected]>
  7. *
  8. * TODO: SDIO support
  9. */
  10. #include <linux/clk.h>
  11. #include <linux/delay.h>
  12. #include <linux/dmaengine.h>
  13. #include <linux/dma-direction.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/mmc/host.h>
  17. #include <linux/mmc/slot-gpio.h>
  18. #include <linux/module.h>
  19. #include <linux/of_platform.h>
  20. #include <linux/reset.h>
  21. #include <linux/spinlock.h>
  22. /*
  23. * SDC registers
  24. */
  25. #define OWL_REG_SD_EN 0x0000
  26. #define OWL_REG_SD_CTL 0x0004
  27. #define OWL_REG_SD_STATE 0x0008
  28. #define OWL_REG_SD_CMD 0x000c
  29. #define OWL_REG_SD_ARG 0x0010
  30. #define OWL_REG_SD_RSPBUF0 0x0014
  31. #define OWL_REG_SD_RSPBUF1 0x0018
  32. #define OWL_REG_SD_RSPBUF2 0x001c
  33. #define OWL_REG_SD_RSPBUF3 0x0020
  34. #define OWL_REG_SD_RSPBUF4 0x0024
  35. #define OWL_REG_SD_DAT 0x0028
  36. #define OWL_REG_SD_BLK_SIZE 0x002c
  37. #define OWL_REG_SD_BLK_NUM 0x0030
  38. #define OWL_REG_SD_BUF_SIZE 0x0034
  39. /* SD_EN Bits */
  40. #define OWL_SD_EN_RANE BIT(31)
  41. #define OWL_SD_EN_RAN_SEED(x) (((x) & 0x3f) << 24)
  42. #define OWL_SD_EN_S18EN BIT(12)
  43. #define OWL_SD_EN_RESE BIT(10)
  44. #define OWL_SD_EN_DAT1_S BIT(9)
  45. #define OWL_SD_EN_CLK_S BIT(8)
  46. #define OWL_SD_ENABLE BIT(7)
  47. #define OWL_SD_EN_BSEL BIT(6)
  48. #define OWL_SD_EN_SDIOEN BIT(3)
  49. #define OWL_SD_EN_DDREN BIT(2)
  50. #define OWL_SD_EN_DATAWID(x) (((x) & 0x3) << 0)
  51. /* SD_CTL Bits */
  52. #define OWL_SD_CTL_TOUTEN BIT(31)
  53. #define OWL_SD_CTL_TOUTCNT(x) (((x) & 0x7f) << 24)
  54. #define OWL_SD_CTL_DELAY_MSK GENMASK(23, 16)
  55. #define OWL_SD_CTL_RDELAY(x) (((x) & 0xf) << 20)
  56. #define OWL_SD_CTL_WDELAY(x) (((x) & 0xf) << 16)
  57. #define OWL_SD_CTL_CMDLEN BIT(13)
  58. #define OWL_SD_CTL_SCC BIT(12)
  59. #define OWL_SD_CTL_TCN(x) (((x) & 0xf) << 8)
  60. #define OWL_SD_CTL_TS BIT(7)
  61. #define OWL_SD_CTL_LBE BIT(6)
  62. #define OWL_SD_CTL_C7EN BIT(5)
  63. #define OWL_SD_CTL_TM(x) (((x) & 0xf) << 0)
  64. #define OWL_SD_DELAY_LOW_CLK 0x0f
  65. #define OWL_SD_DELAY_MID_CLK 0x0a
  66. #define OWL_SD_DELAY_HIGH_CLK 0x09
  67. #define OWL_SD_RDELAY_DDR50 0x0a
  68. #define OWL_SD_WDELAY_DDR50 0x08
  69. /* SD_STATE Bits */
  70. #define OWL_SD_STATE_DAT1BS BIT(18)
  71. #define OWL_SD_STATE_SDIOB_P BIT(17)
  72. #define OWL_SD_STATE_SDIOB_EN BIT(16)
  73. #define OWL_SD_STATE_TOUTE BIT(15)
  74. #define OWL_SD_STATE_BAEP BIT(14)
  75. #define OWL_SD_STATE_MEMRDY BIT(12)
  76. #define OWL_SD_STATE_CMDS BIT(11)
  77. #define OWL_SD_STATE_DAT1AS BIT(10)
  78. #define OWL_SD_STATE_SDIOA_P BIT(9)
  79. #define OWL_SD_STATE_SDIOA_EN BIT(8)
  80. #define OWL_SD_STATE_DAT0S BIT(7)
  81. #define OWL_SD_STATE_TEIE BIT(6)
  82. #define OWL_SD_STATE_TEI BIT(5)
  83. #define OWL_SD_STATE_CLNR BIT(4)
  84. #define OWL_SD_STATE_CLC BIT(3)
  85. #define OWL_SD_STATE_WC16ER BIT(2)
  86. #define OWL_SD_STATE_RC16ER BIT(1)
  87. #define OWL_SD_STATE_CRC7ER BIT(0)
  88. #define OWL_CMD_TIMEOUT_MS 30000
  89. struct owl_mmc_host {
  90. struct device *dev;
  91. struct reset_control *reset;
  92. void __iomem *base;
  93. struct clk *clk;
  94. struct completion sdc_complete;
  95. spinlock_t lock;
  96. int irq;
  97. u32 clock;
  98. bool ddr_50;
  99. enum dma_data_direction dma_dir;
  100. struct dma_chan *dma;
  101. struct dma_async_tx_descriptor *desc;
  102. struct dma_slave_config dma_cfg;
  103. struct completion dma_complete;
  104. struct mmc_host *mmc;
  105. struct mmc_request *mrq;
  106. struct mmc_command *cmd;
  107. struct mmc_data *data;
  108. };
  109. static void owl_mmc_update_reg(void __iomem *reg, unsigned int val, bool state)
  110. {
  111. unsigned int regval;
  112. regval = readl(reg);
  113. if (state)
  114. regval |= val;
  115. else
  116. regval &= ~val;
  117. writel(regval, reg);
  118. }
  119. static irqreturn_t owl_irq_handler(int irq, void *devid)
  120. {
  121. struct owl_mmc_host *owl_host = devid;
  122. u32 state;
  123. spin_lock(&owl_host->lock);
  124. state = readl(owl_host->base + OWL_REG_SD_STATE);
  125. if (state & OWL_SD_STATE_TEI) {
  126. state = readl(owl_host->base + OWL_REG_SD_STATE);
  127. state |= OWL_SD_STATE_TEI;
  128. writel(state, owl_host->base + OWL_REG_SD_STATE);
  129. complete(&owl_host->sdc_complete);
  130. }
  131. spin_unlock(&owl_host->lock);
  132. return IRQ_HANDLED;
  133. }
  134. static void owl_mmc_finish_request(struct owl_mmc_host *owl_host)
  135. {
  136. struct mmc_request *mrq = owl_host->mrq;
  137. struct mmc_data *data = mrq->data;
  138. /* Should never be NULL */
  139. WARN_ON(!mrq);
  140. owl_host->mrq = NULL;
  141. if (data)
  142. dma_unmap_sg(owl_host->dma->device->dev, data->sg, data->sg_len,
  143. owl_host->dma_dir);
  144. /* Finally finish request */
  145. mmc_request_done(owl_host->mmc, mrq);
  146. }
  147. static void owl_mmc_send_cmd(struct owl_mmc_host *owl_host,
  148. struct mmc_command *cmd,
  149. struct mmc_data *data)
  150. {
  151. unsigned long timeout;
  152. u32 mode, state, resp[2];
  153. u32 cmd_rsp_mask = 0;
  154. init_completion(&owl_host->sdc_complete);
  155. switch (mmc_resp_type(cmd)) {
  156. case MMC_RSP_NONE:
  157. mode = OWL_SD_CTL_TM(0);
  158. break;
  159. case MMC_RSP_R1:
  160. if (data) {
  161. if (data->flags & MMC_DATA_READ)
  162. mode = OWL_SD_CTL_TM(4);
  163. else
  164. mode = OWL_SD_CTL_TM(5);
  165. } else {
  166. mode = OWL_SD_CTL_TM(1);
  167. }
  168. cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
  169. break;
  170. case MMC_RSP_R1B:
  171. mode = OWL_SD_CTL_TM(3);
  172. cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
  173. break;
  174. case MMC_RSP_R2:
  175. mode = OWL_SD_CTL_TM(2);
  176. cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
  177. break;
  178. case MMC_RSP_R3:
  179. mode = OWL_SD_CTL_TM(1);
  180. cmd_rsp_mask = OWL_SD_STATE_CLNR;
  181. break;
  182. default:
  183. dev_warn(owl_host->dev, "Unknown MMC command\n");
  184. cmd->error = -EINVAL;
  185. return;
  186. }
  187. /* Keep current WDELAY and RDELAY */
  188. mode |= (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
  189. /* Start to send corresponding command type */
  190. writel(cmd->arg, owl_host->base + OWL_REG_SD_ARG);
  191. writel(cmd->opcode, owl_host->base + OWL_REG_SD_CMD);
  192. /* Set LBE to send clk at the end of last read block */
  193. if (data) {
  194. mode |= (OWL_SD_CTL_TS | OWL_SD_CTL_LBE | 0x64000000);
  195. } else {
  196. mode &= ~(OWL_SD_CTL_TOUTEN | OWL_SD_CTL_LBE);
  197. mode |= OWL_SD_CTL_TS;
  198. }
  199. owl_host->cmd = cmd;
  200. /* Start transfer */
  201. writel(mode, owl_host->base + OWL_REG_SD_CTL);
  202. if (data)
  203. return;
  204. timeout = msecs_to_jiffies(cmd->busy_timeout ? cmd->busy_timeout :
  205. OWL_CMD_TIMEOUT_MS);
  206. if (!wait_for_completion_timeout(&owl_host->sdc_complete, timeout)) {
  207. dev_err(owl_host->dev, "CMD interrupt timeout\n");
  208. cmd->error = -ETIMEDOUT;
  209. return;
  210. }
  211. state = readl(owl_host->base + OWL_REG_SD_STATE);
  212. if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
  213. if (cmd_rsp_mask & state) {
  214. if (state & OWL_SD_STATE_CLNR) {
  215. dev_err(owl_host->dev, "Error CMD_NO_RSP\n");
  216. cmd->error = -EILSEQ;
  217. return;
  218. }
  219. if (state & OWL_SD_STATE_CRC7ER) {
  220. dev_err(owl_host->dev, "Error CMD_RSP_CRC\n");
  221. cmd->error = -EILSEQ;
  222. return;
  223. }
  224. }
  225. if (mmc_resp_type(cmd) & MMC_RSP_136) {
  226. cmd->resp[3] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
  227. cmd->resp[2] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
  228. cmd->resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF2);
  229. cmd->resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF3);
  230. } else {
  231. resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
  232. resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
  233. cmd->resp[0] = resp[1] << 24 | resp[0] >> 8;
  234. cmd->resp[1] = resp[1] >> 8;
  235. }
  236. }
  237. }
  238. static void owl_mmc_dma_complete(void *param)
  239. {
  240. struct owl_mmc_host *owl_host = param;
  241. struct mmc_data *data = owl_host->data;
  242. if (data)
  243. complete(&owl_host->dma_complete);
  244. }
  245. static int owl_mmc_prepare_data(struct owl_mmc_host *owl_host,
  246. struct mmc_data *data)
  247. {
  248. u32 total;
  249. owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN, OWL_SD_EN_BSEL,
  250. true);
  251. writel(data->blocks, owl_host->base + OWL_REG_SD_BLK_NUM);
  252. writel(data->blksz, owl_host->base + OWL_REG_SD_BLK_SIZE);
  253. total = data->blksz * data->blocks;
  254. if (total < 512)
  255. writel(total, owl_host->base + OWL_REG_SD_BUF_SIZE);
  256. else
  257. writel(512, owl_host->base + OWL_REG_SD_BUF_SIZE);
  258. if (data->flags & MMC_DATA_WRITE) {
  259. owl_host->dma_dir = DMA_TO_DEVICE;
  260. owl_host->dma_cfg.direction = DMA_MEM_TO_DEV;
  261. } else {
  262. owl_host->dma_dir = DMA_FROM_DEVICE;
  263. owl_host->dma_cfg.direction = DMA_DEV_TO_MEM;
  264. }
  265. dma_map_sg(owl_host->dma->device->dev, data->sg,
  266. data->sg_len, owl_host->dma_dir);
  267. dmaengine_slave_config(owl_host->dma, &owl_host->dma_cfg);
  268. owl_host->desc = dmaengine_prep_slave_sg(owl_host->dma, data->sg,
  269. data->sg_len,
  270. owl_host->dma_cfg.direction,
  271. DMA_PREP_INTERRUPT |
  272. DMA_CTRL_ACK);
  273. if (!owl_host->desc) {
  274. dev_err(owl_host->dev, "Can't prepare slave sg\n");
  275. return -EBUSY;
  276. }
  277. owl_host->data = data;
  278. owl_host->desc->callback = owl_mmc_dma_complete;
  279. owl_host->desc->callback_param = (void *)owl_host;
  280. data->error = 0;
  281. return 0;
  282. }
  283. static void owl_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
  284. {
  285. struct owl_mmc_host *owl_host = mmc_priv(mmc);
  286. struct mmc_data *data = mrq->data;
  287. int ret;
  288. owl_host->mrq = mrq;
  289. if (mrq->data) {
  290. ret = owl_mmc_prepare_data(owl_host, data);
  291. if (ret < 0) {
  292. data->error = ret;
  293. goto err_out;
  294. }
  295. init_completion(&owl_host->dma_complete);
  296. dmaengine_submit(owl_host->desc);
  297. dma_async_issue_pending(owl_host->dma);
  298. }
  299. owl_mmc_send_cmd(owl_host, mrq->cmd, data);
  300. if (data) {
  301. if (!wait_for_completion_timeout(&owl_host->sdc_complete,
  302. 10 * HZ)) {
  303. dev_err(owl_host->dev, "CMD interrupt timeout\n");
  304. mrq->cmd->error = -ETIMEDOUT;
  305. dmaengine_terminate_all(owl_host->dma);
  306. goto err_out;
  307. }
  308. if (!wait_for_completion_timeout(&owl_host->dma_complete,
  309. 5 * HZ)) {
  310. dev_err(owl_host->dev, "DMA interrupt timeout\n");
  311. mrq->cmd->error = -ETIMEDOUT;
  312. dmaengine_terminate_all(owl_host->dma);
  313. goto err_out;
  314. }
  315. if (data->stop)
  316. owl_mmc_send_cmd(owl_host, data->stop, NULL);
  317. data->bytes_xfered = data->blocks * data->blksz;
  318. }
  319. err_out:
  320. owl_mmc_finish_request(owl_host);
  321. }
  322. static int owl_mmc_set_clk_rate(struct owl_mmc_host *owl_host,
  323. unsigned int rate)
  324. {
  325. unsigned long clk_rate;
  326. int ret;
  327. u32 reg;
  328. reg = readl(owl_host->base + OWL_REG_SD_CTL);
  329. reg &= ~OWL_SD_CTL_DELAY_MSK;
  330. /* Set RDELAY and WDELAY based on the clock */
  331. if (rate <= 1000000) {
  332. writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_LOW_CLK) |
  333. OWL_SD_CTL_WDELAY(OWL_SD_DELAY_LOW_CLK),
  334. owl_host->base + OWL_REG_SD_CTL);
  335. } else if ((rate > 1000000) && (rate <= 26000000)) {
  336. writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_MID_CLK) |
  337. OWL_SD_CTL_WDELAY(OWL_SD_DELAY_MID_CLK),
  338. owl_host->base + OWL_REG_SD_CTL);
  339. } else if ((rate > 26000000) && (rate <= 52000000) && !owl_host->ddr_50) {
  340. writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_HIGH_CLK) |
  341. OWL_SD_CTL_WDELAY(OWL_SD_DELAY_HIGH_CLK),
  342. owl_host->base + OWL_REG_SD_CTL);
  343. /* DDR50 mode has special delay chain */
  344. } else if ((rate > 26000000) && (rate <= 52000000) && owl_host->ddr_50) {
  345. writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_RDELAY_DDR50) |
  346. OWL_SD_CTL_WDELAY(OWL_SD_WDELAY_DDR50),
  347. owl_host->base + OWL_REG_SD_CTL);
  348. } else {
  349. dev_err(owl_host->dev, "SD clock rate not supported\n");
  350. return -EINVAL;
  351. }
  352. clk_rate = clk_round_rate(owl_host->clk, rate << 1);
  353. ret = clk_set_rate(owl_host->clk, clk_rate);
  354. return ret;
  355. }
  356. static void owl_mmc_set_clk(struct owl_mmc_host *owl_host, struct mmc_ios *ios)
  357. {
  358. if (!ios->clock)
  359. return;
  360. owl_host->clock = ios->clock;
  361. owl_mmc_set_clk_rate(owl_host, ios->clock);
  362. }
  363. static void owl_mmc_set_bus_width(struct owl_mmc_host *owl_host,
  364. struct mmc_ios *ios)
  365. {
  366. u32 reg;
  367. reg = readl(owl_host->base + OWL_REG_SD_EN);
  368. reg &= ~0x03;
  369. switch (ios->bus_width) {
  370. case MMC_BUS_WIDTH_1:
  371. break;
  372. case MMC_BUS_WIDTH_4:
  373. reg |= OWL_SD_EN_DATAWID(1);
  374. break;
  375. case MMC_BUS_WIDTH_8:
  376. reg |= OWL_SD_EN_DATAWID(2);
  377. break;
  378. }
  379. writel(reg, owl_host->base + OWL_REG_SD_EN);
  380. }
  381. static void owl_mmc_ctr_reset(struct owl_mmc_host *owl_host)
  382. {
  383. reset_control_assert(owl_host->reset);
  384. udelay(20);
  385. reset_control_deassert(owl_host->reset);
  386. }
  387. static void owl_mmc_power_on(struct owl_mmc_host *owl_host)
  388. {
  389. u32 mode;
  390. init_completion(&owl_host->sdc_complete);
  391. /* Enable transfer end IRQ */
  392. owl_mmc_update_reg(owl_host->base + OWL_REG_SD_STATE,
  393. OWL_SD_STATE_TEIE, true);
  394. /* Send init clk */
  395. mode = (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
  396. mode |= OWL_SD_CTL_TS | OWL_SD_CTL_TCN(5) | OWL_SD_CTL_TM(8);
  397. writel(mode, owl_host->base + OWL_REG_SD_CTL);
  398. if (!wait_for_completion_timeout(&owl_host->sdc_complete, HZ)) {
  399. dev_err(owl_host->dev, "CMD interrupt timeout\n");
  400. return;
  401. }
  402. }
  403. static void owl_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  404. {
  405. struct owl_mmc_host *owl_host = mmc_priv(mmc);
  406. switch (ios->power_mode) {
  407. case MMC_POWER_UP:
  408. dev_dbg(owl_host->dev, "Powering card up\n");
  409. /* Reset the SDC controller to clear all previous states */
  410. owl_mmc_ctr_reset(owl_host);
  411. clk_prepare_enable(owl_host->clk);
  412. writel(OWL_SD_ENABLE | OWL_SD_EN_RESE,
  413. owl_host->base + OWL_REG_SD_EN);
  414. break;
  415. case MMC_POWER_ON:
  416. dev_dbg(owl_host->dev, "Powering card on\n");
  417. owl_mmc_power_on(owl_host);
  418. break;
  419. case MMC_POWER_OFF:
  420. dev_dbg(owl_host->dev, "Powering card off\n");
  421. clk_disable_unprepare(owl_host->clk);
  422. return;
  423. default:
  424. dev_dbg(owl_host->dev, "Ignoring unknown card power state\n");
  425. break;
  426. }
  427. if (ios->clock != owl_host->clock)
  428. owl_mmc_set_clk(owl_host, ios);
  429. owl_mmc_set_bus_width(owl_host, ios);
  430. /* Enable DDR mode if requested */
  431. if (ios->timing == MMC_TIMING_UHS_DDR50) {
  432. owl_host->ddr_50 = true;
  433. owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
  434. OWL_SD_EN_DDREN, true);
  435. } else {
  436. owl_host->ddr_50 = false;
  437. }
  438. }
  439. static int owl_mmc_start_signal_voltage_switch(struct mmc_host *mmc,
  440. struct mmc_ios *ios)
  441. {
  442. struct owl_mmc_host *owl_host = mmc_priv(mmc);
  443. /* It is enough to change the pad ctrl bit for voltage switch */
  444. switch (ios->signal_voltage) {
  445. case MMC_SIGNAL_VOLTAGE_330:
  446. owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
  447. OWL_SD_EN_S18EN, false);
  448. break;
  449. case MMC_SIGNAL_VOLTAGE_180:
  450. owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
  451. OWL_SD_EN_S18EN, true);
  452. break;
  453. default:
  454. return -ENOTSUPP;
  455. }
  456. return 0;
  457. }
  458. static const struct mmc_host_ops owl_mmc_ops = {
  459. .request = owl_mmc_request,
  460. .set_ios = owl_mmc_set_ios,
  461. .get_ro = mmc_gpio_get_ro,
  462. .get_cd = mmc_gpio_get_cd,
  463. .start_signal_voltage_switch = owl_mmc_start_signal_voltage_switch,
  464. };
  465. static int owl_mmc_probe(struct platform_device *pdev)
  466. {
  467. struct owl_mmc_host *owl_host;
  468. struct mmc_host *mmc;
  469. struct resource *res;
  470. int ret;
  471. mmc = mmc_alloc_host(sizeof(struct owl_mmc_host), &pdev->dev);
  472. if (!mmc) {
  473. dev_err(&pdev->dev, "mmc alloc host failed\n");
  474. return -ENOMEM;
  475. }
  476. platform_set_drvdata(pdev, mmc);
  477. owl_host = mmc_priv(mmc);
  478. owl_host->dev = &pdev->dev;
  479. owl_host->mmc = mmc;
  480. spin_lock_init(&owl_host->lock);
  481. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  482. owl_host->base = devm_ioremap_resource(&pdev->dev, res);
  483. if (IS_ERR(owl_host->base)) {
  484. ret = PTR_ERR(owl_host->base);
  485. goto err_free_host;
  486. }
  487. owl_host->clk = devm_clk_get(&pdev->dev, NULL);
  488. if (IS_ERR(owl_host->clk)) {
  489. dev_err(&pdev->dev, "No clock defined\n");
  490. ret = PTR_ERR(owl_host->clk);
  491. goto err_free_host;
  492. }
  493. owl_host->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
  494. if (IS_ERR(owl_host->reset)) {
  495. dev_err(&pdev->dev, "Could not get reset control\n");
  496. ret = PTR_ERR(owl_host->reset);
  497. goto err_free_host;
  498. }
  499. mmc->ops = &owl_mmc_ops;
  500. mmc->max_blk_count = 512;
  501. mmc->max_blk_size = 512;
  502. mmc->max_segs = 256;
  503. mmc->max_seg_size = 262144;
  504. mmc->max_req_size = 262144;
  505. /* 100kHz ~ 52MHz */
  506. mmc->f_min = 100000;
  507. mmc->f_max = 52000000;
  508. mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
  509. MMC_CAP_4_BIT_DATA;
  510. mmc->caps2 = (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_NO_SDIO);
  511. mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 |
  512. MMC_VDD_165_195;
  513. ret = mmc_of_parse(mmc);
  514. if (ret)
  515. goto err_free_host;
  516. pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  517. pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
  518. owl_host->dma = dma_request_chan(&pdev->dev, "mmc");
  519. if (IS_ERR(owl_host->dma)) {
  520. dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
  521. ret = PTR_ERR(owl_host->dma);
  522. goto err_free_host;
  523. }
  524. dev_info(&pdev->dev, "Using %s for DMA transfers\n",
  525. dma_chan_name(owl_host->dma));
  526. owl_host->dma_cfg.src_addr = res->start + OWL_REG_SD_DAT;
  527. owl_host->dma_cfg.dst_addr = res->start + OWL_REG_SD_DAT;
  528. owl_host->dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  529. owl_host->dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  530. owl_host->dma_cfg.device_fc = false;
  531. owl_host->irq = platform_get_irq(pdev, 0);
  532. if (owl_host->irq < 0) {
  533. ret = owl_host->irq;
  534. goto err_release_channel;
  535. }
  536. ret = devm_request_irq(&pdev->dev, owl_host->irq, owl_irq_handler,
  537. 0, dev_name(&pdev->dev), owl_host);
  538. if (ret) {
  539. dev_err(&pdev->dev, "Failed to request irq %d\n",
  540. owl_host->irq);
  541. goto err_release_channel;
  542. }
  543. ret = mmc_add_host(mmc);
  544. if (ret) {
  545. dev_err(&pdev->dev, "Failed to add host\n");
  546. goto err_release_channel;
  547. }
  548. dev_dbg(&pdev->dev, "Owl MMC Controller Initialized\n");
  549. return 0;
  550. err_release_channel:
  551. dma_release_channel(owl_host->dma);
  552. err_free_host:
  553. mmc_free_host(mmc);
  554. return ret;
  555. }
  556. static int owl_mmc_remove(struct platform_device *pdev)
  557. {
  558. struct mmc_host *mmc = platform_get_drvdata(pdev);
  559. struct owl_mmc_host *owl_host = mmc_priv(mmc);
  560. mmc_remove_host(mmc);
  561. disable_irq(owl_host->irq);
  562. dma_release_channel(owl_host->dma);
  563. mmc_free_host(mmc);
  564. return 0;
  565. }
  566. static const struct of_device_id owl_mmc_of_match[] = {
  567. {.compatible = "actions,owl-mmc",},
  568. { /* sentinel */ }
  569. };
  570. MODULE_DEVICE_TABLE(of, owl_mmc_of_match);
  571. static struct platform_driver owl_mmc_driver = {
  572. .driver = {
  573. .name = "owl_mmc",
  574. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  575. .of_match_table = owl_mmc_of_match,
  576. },
  577. .probe = owl_mmc_probe,
  578. .remove = owl_mmc_remove,
  579. };
  580. module_platform_driver(owl_mmc_driver);
  581. MODULE_DESCRIPTION("Actions Semi Owl SoCs SD/MMC Driver");
  582. MODULE_AUTHOR("Actions Semi");
  583. MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
  584. MODULE_LICENSE("GPL");