jz4740_mmc.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
  4. * Copyright (C) 2013, Imagination Technologies
  5. *
  6. * JZ4740 SD/MMC controller driver
  7. */
  8. #include <linux/bitops.h>
  9. #include <linux/clk.h>
  10. #include <linux/delay.h>
  11. #include <linux/dmaengine.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/err.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/io.h>
  16. #include <linux/irq.h>
  17. #include <linux/mmc/host.h>
  18. #include <linux/mmc/slot-gpio.h>
  19. #include <linux/module.h>
  20. #include <linux/of_device.h>
  21. #include <linux/pinctrl/consumer.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/scatterlist.h>
  24. #include <asm/cacheflush.h>
  25. #define JZ_REG_MMC_STRPCL 0x00
  26. #define JZ_REG_MMC_STATUS 0x04
  27. #define JZ_REG_MMC_CLKRT 0x08
  28. #define JZ_REG_MMC_CMDAT 0x0C
  29. #define JZ_REG_MMC_RESTO 0x10
  30. #define JZ_REG_MMC_RDTO 0x14
  31. #define JZ_REG_MMC_BLKLEN 0x18
  32. #define JZ_REG_MMC_NOB 0x1C
  33. #define JZ_REG_MMC_SNOB 0x20
  34. #define JZ_REG_MMC_IMASK 0x24
  35. #define JZ_REG_MMC_IREG 0x28
  36. #define JZ_REG_MMC_CMD 0x2C
  37. #define JZ_REG_MMC_ARG 0x30
  38. #define JZ_REG_MMC_RESP_FIFO 0x34
  39. #define JZ_REG_MMC_RXFIFO 0x38
  40. #define JZ_REG_MMC_TXFIFO 0x3C
  41. #define JZ_REG_MMC_LPM 0x40
  42. #define JZ_REG_MMC_DMAC 0x44
  43. #define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
  44. #define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6)
  45. #define JZ_MMC_STRPCL_START_READWAIT BIT(5)
  46. #define JZ_MMC_STRPCL_STOP_READWAIT BIT(4)
  47. #define JZ_MMC_STRPCL_RESET BIT(3)
  48. #define JZ_MMC_STRPCL_START_OP BIT(2)
  49. #define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0))
  50. #define JZ_MMC_STRPCL_CLOCK_STOP BIT(0)
  51. #define JZ_MMC_STRPCL_CLOCK_START BIT(1)
  52. #define JZ_MMC_STATUS_IS_RESETTING BIT(15)
  53. #define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14)
  54. #define JZ_MMC_STATUS_PRG_DONE BIT(13)
  55. #define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12)
  56. #define JZ_MMC_STATUS_END_CMD_RES BIT(11)
  57. #define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10)
  58. #define JZ_MMC_STATUS_IS_READWAIT BIT(9)
  59. #define JZ_MMC_STATUS_CLK_EN BIT(8)
  60. #define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7)
  61. #define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6)
  62. #define JZ_MMC_STATUS_CRC_RES_ERR BIT(5)
  63. #define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4)
  64. #define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3)
  65. #define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2)
  66. #define JZ_MMC_STATUS_TIMEOUT_RES BIT(1)
  67. #define JZ_MMC_STATUS_TIMEOUT_READ BIT(0)
  68. #define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0))
  69. #define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2))
  70. #define JZ_MMC_CMDAT_IO_ABORT BIT(11)
  71. #define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
  72. #define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9))
  73. #define JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9))
  74. #define JZ_MMC_CMDAT_DMA_EN BIT(8)
  75. #define JZ_MMC_CMDAT_INIT BIT(7)
  76. #define JZ_MMC_CMDAT_BUSY BIT(6)
  77. #define JZ_MMC_CMDAT_STREAM BIT(5)
  78. #define JZ_MMC_CMDAT_WRITE BIT(4)
  79. #define JZ_MMC_CMDAT_DATA_EN BIT(3)
  80. #define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0))
  81. #define JZ_MMC_CMDAT_RSP_R1 1
  82. #define JZ_MMC_CMDAT_RSP_R2 2
  83. #define JZ_MMC_CMDAT_RSP_R3 3
  84. #define JZ_MMC_IRQ_SDIO BIT(7)
  85. #define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6)
  86. #define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5)
  87. #define JZ_MMC_IRQ_END_CMD_RES BIT(2)
  88. #define JZ_MMC_IRQ_PRG_DONE BIT(1)
  89. #define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0)
  90. #define JZ_MMC_DMAC_DMA_SEL BIT(1)
  91. #define JZ_MMC_DMAC_DMA_EN BIT(0)
  92. #define JZ_MMC_LPM_DRV_RISING BIT(31)
  93. #define JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31)
  94. #define JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30)
  95. #define JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29)
  96. #define JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0)
  97. #define JZ_MMC_CLK_RATE 24000000
  98. #define JZ_MMC_REQ_TIMEOUT_MS 5000
  99. enum jz4740_mmc_version {
  100. JZ_MMC_JZ4740,
  101. JZ_MMC_JZ4725B,
  102. JZ_MMC_JZ4760,
  103. JZ_MMC_JZ4780,
  104. JZ_MMC_X1000,
  105. };
  106. enum jz4740_mmc_state {
  107. JZ4740_MMC_STATE_READ_RESPONSE,
  108. JZ4740_MMC_STATE_TRANSFER_DATA,
  109. JZ4740_MMC_STATE_SEND_STOP,
  110. JZ4740_MMC_STATE_DONE,
  111. };
  112. /*
  113. * The MMC core allows to prepare a mmc_request while another mmc_request
  114. * is in-flight. This is used via the pre_req/post_req hooks.
  115. * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request.
  116. * Following what other drivers do (sdhci, dw_mmc) we use the following cookie
  117. * flags to keep track of the mmc_request mapping state.
  118. *
  119. * COOKIE_UNMAPPED: the request is not mapped.
  120. * COOKIE_PREMAPPED: the request was mapped in pre_req,
  121. * and should be unmapped in post_req.
  122. * COOKIE_MAPPED: the request was mapped in the irq handler,
  123. * and should be unmapped before mmc_request_done is called..
  124. */
  125. enum jz4780_cookie {
  126. COOKIE_UNMAPPED = 0,
  127. COOKIE_PREMAPPED,
  128. COOKIE_MAPPED,
  129. };
  130. struct jz4740_mmc_host {
  131. struct mmc_host *mmc;
  132. struct platform_device *pdev;
  133. struct clk *clk;
  134. enum jz4740_mmc_version version;
  135. int irq;
  136. void __iomem *base;
  137. struct resource *mem_res;
  138. struct mmc_request *req;
  139. struct mmc_command *cmd;
  140. unsigned long waiting;
  141. uint32_t cmdat;
  142. uint32_t irq_mask;
  143. spinlock_t lock;
  144. struct timer_list timeout_timer;
  145. struct sg_mapping_iter miter;
  146. enum jz4740_mmc_state state;
  147. /* DMA support */
  148. struct dma_chan *dma_rx;
  149. struct dma_chan *dma_tx;
  150. bool use_dma;
  151. /* The DMA trigger level is 8 words, that is to say, the DMA read
  152. * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
  153. * trigger is when data words in MSC_TXFIFO is < 8.
  154. */
  155. #define JZ4740_MMC_FIFO_HALF_SIZE 8
  156. };
  157. static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host,
  158. uint32_t val)
  159. {
  160. if (host->version >= JZ_MMC_JZ4725B)
  161. return writel(val, host->base + JZ_REG_MMC_IMASK);
  162. else
  163. return writew(val, host->base + JZ_REG_MMC_IMASK);
  164. }
  165. static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host,
  166. uint32_t val)
  167. {
  168. if (host->version >= JZ_MMC_JZ4780)
  169. writel(val, host->base + JZ_REG_MMC_IREG);
  170. else
  171. writew(val, host->base + JZ_REG_MMC_IREG);
  172. }
  173. static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host)
  174. {
  175. if (host->version >= JZ_MMC_JZ4780)
  176. return readl(host->base + JZ_REG_MMC_IREG);
  177. else
  178. return readw(host->base + JZ_REG_MMC_IREG);
  179. }
  180. /*----------------------------------------------------------------------------*/
  181. /* DMA infrastructure */
  182. static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
  183. {
  184. if (!host->use_dma)
  185. return;
  186. dma_release_channel(host->dma_tx);
  187. if (host->dma_rx)
  188. dma_release_channel(host->dma_rx);
  189. }
  190. static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
  191. {
  192. struct device *dev = mmc_dev(host->mmc);
  193. host->dma_tx = dma_request_chan(dev, "tx-rx");
  194. if (!IS_ERR(host->dma_tx))
  195. return 0;
  196. if (PTR_ERR(host->dma_tx) != -ENODEV) {
  197. dev_err(dev, "Failed to get dma tx-rx channel\n");
  198. return PTR_ERR(host->dma_tx);
  199. }
  200. host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
  201. if (IS_ERR(host->dma_tx)) {
  202. dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
  203. return PTR_ERR(host->dma_tx);
  204. }
  205. host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
  206. if (IS_ERR(host->dma_rx)) {
  207. dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
  208. dma_release_channel(host->dma_tx);
  209. return PTR_ERR(host->dma_rx);
  210. }
  211. /*
  212. * Limit the maximum segment size in any SG entry according to
  213. * the parameters of the DMA engine device.
  214. */
  215. if (host->dma_tx) {
  216. struct device *dev = host->dma_tx->device->dev;
  217. unsigned int max_seg_size = dma_get_max_seg_size(dev);
  218. if (max_seg_size < host->mmc->max_seg_size)
  219. host->mmc->max_seg_size = max_seg_size;
  220. }
  221. if (host->dma_rx) {
  222. struct device *dev = host->dma_rx->device->dev;
  223. unsigned int max_seg_size = dma_get_max_seg_size(dev);
  224. if (max_seg_size < host->mmc->max_seg_size)
  225. host->mmc->max_seg_size = max_seg_size;
  226. }
  227. return 0;
  228. }
  229. static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
  230. struct mmc_data *data)
  231. {
  232. if ((data->flags & MMC_DATA_READ) && host->dma_rx)
  233. return host->dma_rx;
  234. else
  235. return host->dma_tx;
  236. }
  237. static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
  238. struct mmc_data *data)
  239. {
  240. struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
  241. enum dma_data_direction dir = mmc_get_dma_dir(data);
  242. dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
  243. data->host_cookie = COOKIE_UNMAPPED;
  244. }
  245. /* Prepares DMA data for current or next transfer.
  246. * A request can be in-flight when this is called.
  247. */
  248. static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
  249. struct mmc_data *data,
  250. int cookie)
  251. {
  252. struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
  253. enum dma_data_direction dir = mmc_get_dma_dir(data);
  254. unsigned int sg_count;
  255. if (data->host_cookie == COOKIE_PREMAPPED)
  256. return data->sg_count;
  257. sg_count = dma_map_sg(chan->device->dev,
  258. data->sg,
  259. data->sg_len,
  260. dir);
  261. if (!sg_count) {
  262. dev_err(mmc_dev(host->mmc),
  263. "Failed to map scatterlist for DMA operation\n");
  264. return -EINVAL;
  265. }
  266. data->sg_count = sg_count;
  267. data->host_cookie = cookie;
  268. return data->sg_count;
  269. }
  270. static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
  271. struct mmc_data *data)
  272. {
  273. struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
  274. struct dma_async_tx_descriptor *desc;
  275. struct dma_slave_config conf = {
  276. .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
  277. .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
  278. .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
  279. .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
  280. };
  281. int sg_count;
  282. if (data->flags & MMC_DATA_WRITE) {
  283. conf.direction = DMA_MEM_TO_DEV;
  284. conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
  285. } else {
  286. conf.direction = DMA_DEV_TO_MEM;
  287. conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
  288. }
  289. sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED);
  290. if (sg_count < 0)
  291. return sg_count;
  292. dmaengine_slave_config(chan, &conf);
  293. desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count,
  294. conf.direction,
  295. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  296. if (!desc) {
  297. dev_err(mmc_dev(host->mmc),
  298. "Failed to allocate DMA %s descriptor",
  299. conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
  300. goto dma_unmap;
  301. }
  302. dmaengine_submit(desc);
  303. dma_async_issue_pending(chan);
  304. return 0;
  305. dma_unmap:
  306. if (data->host_cookie == COOKIE_MAPPED)
  307. jz4740_mmc_dma_unmap(host, data);
  308. return -ENOMEM;
  309. }
  310. static void jz4740_mmc_pre_request(struct mmc_host *mmc,
  311. struct mmc_request *mrq)
  312. {
  313. struct jz4740_mmc_host *host = mmc_priv(mmc);
  314. struct mmc_data *data = mrq->data;
  315. if (!host->use_dma)
  316. return;
  317. data->host_cookie = COOKIE_UNMAPPED;
  318. if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0)
  319. data->host_cookie = COOKIE_UNMAPPED;
  320. }
  321. static void jz4740_mmc_post_request(struct mmc_host *mmc,
  322. struct mmc_request *mrq,
  323. int err)
  324. {
  325. struct jz4740_mmc_host *host = mmc_priv(mmc);
  326. struct mmc_data *data = mrq->data;
  327. if (data && data->host_cookie != COOKIE_UNMAPPED)
  328. jz4740_mmc_dma_unmap(host, data);
  329. if (err) {
  330. struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
  331. dmaengine_terminate_all(chan);
  332. }
  333. }
  334. /*----------------------------------------------------------------------------*/
  335. static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
  336. unsigned int irq, bool enabled)
  337. {
  338. unsigned long flags;
  339. spin_lock_irqsave(&host->lock, flags);
  340. if (enabled)
  341. host->irq_mask &= ~irq;
  342. else
  343. host->irq_mask |= irq;
  344. jz4740_mmc_write_irq_mask(host, host->irq_mask);
  345. spin_unlock_irqrestore(&host->lock, flags);
  346. }
  347. static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
  348. bool start_transfer)
  349. {
  350. uint16_t val = JZ_MMC_STRPCL_CLOCK_START;
  351. if (start_transfer)
  352. val |= JZ_MMC_STRPCL_START_OP;
  353. writew(val, host->base + JZ_REG_MMC_STRPCL);
  354. }
  355. static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host)
  356. {
  357. uint32_t status;
  358. unsigned int timeout = 1000;
  359. writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL);
  360. do {
  361. status = readl(host->base + JZ_REG_MMC_STATUS);
  362. } while (status & JZ_MMC_STATUS_CLK_EN && --timeout);
  363. }
  364. static void jz4740_mmc_reset(struct jz4740_mmc_host *host)
  365. {
  366. uint32_t status;
  367. unsigned int timeout = 1000;
  368. writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL);
  369. udelay(10);
  370. do {
  371. status = readl(host->base + JZ_REG_MMC_STATUS);
  372. } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout);
  373. }
  374. static void jz4740_mmc_request_done(struct jz4740_mmc_host *host)
  375. {
  376. struct mmc_request *req;
  377. struct mmc_data *data;
  378. req = host->req;
  379. data = req->data;
  380. host->req = NULL;
  381. if (data && data->host_cookie == COOKIE_MAPPED)
  382. jz4740_mmc_dma_unmap(host, data);
  383. mmc_request_done(host->mmc, req);
  384. }
  385. static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host,
  386. unsigned int irq)
  387. {
  388. unsigned int timeout = 0x800;
  389. uint32_t status;
  390. do {
  391. status = jz4740_mmc_read_irq_reg(host);
  392. } while (!(status & irq) && --timeout);
  393. if (timeout == 0) {
  394. set_bit(0, &host->waiting);
  395. mod_timer(&host->timeout_timer,
  396. jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
  397. jz4740_mmc_set_irq_enabled(host, irq, true);
  398. return true;
  399. }
  400. return false;
  401. }
  402. static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host,
  403. struct mmc_data *data)
  404. {
  405. int status;
  406. status = readl(host->base + JZ_REG_MMC_STATUS);
  407. if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) {
  408. if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) {
  409. host->req->cmd->error = -ETIMEDOUT;
  410. data->error = -ETIMEDOUT;
  411. } else {
  412. host->req->cmd->error = -EIO;
  413. data->error = -EIO;
  414. }
  415. } else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) {
  416. if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) {
  417. host->req->cmd->error = -ETIMEDOUT;
  418. data->error = -ETIMEDOUT;
  419. } else {
  420. host->req->cmd->error = -EIO;
  421. data->error = -EIO;
  422. }
  423. }
  424. }
  425. static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host,
  426. struct mmc_data *data)
  427. {
  428. struct sg_mapping_iter *miter = &host->miter;
  429. void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO;
  430. uint32_t *buf;
  431. bool timeout;
  432. size_t i, j;
  433. while (sg_miter_next(miter)) {
  434. buf = miter->addr;
  435. i = miter->length / 4;
  436. j = i / 8;
  437. i = i & 0x7;
  438. while (j) {
  439. timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
  440. if (unlikely(timeout))
  441. goto poll_timeout;
  442. writel(buf[0], fifo_addr);
  443. writel(buf[1], fifo_addr);
  444. writel(buf[2], fifo_addr);
  445. writel(buf[3], fifo_addr);
  446. writel(buf[4], fifo_addr);
  447. writel(buf[5], fifo_addr);
  448. writel(buf[6], fifo_addr);
  449. writel(buf[7], fifo_addr);
  450. buf += 8;
  451. --j;
  452. }
  453. if (unlikely(i)) {
  454. timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
  455. if (unlikely(timeout))
  456. goto poll_timeout;
  457. while (i) {
  458. writel(*buf, fifo_addr);
  459. ++buf;
  460. --i;
  461. }
  462. }
  463. data->bytes_xfered += miter->length;
  464. }
  465. sg_miter_stop(miter);
  466. return false;
  467. poll_timeout:
  468. miter->consumed = (void *)buf - miter->addr;
  469. data->bytes_xfered += miter->consumed;
  470. sg_miter_stop(miter);
  471. return true;
  472. }
  473. static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
  474. struct mmc_data *data)
  475. {
  476. struct sg_mapping_iter *miter = &host->miter;
  477. void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
  478. uint32_t *buf;
  479. uint32_t d;
  480. uint32_t status;
  481. size_t i, j;
  482. unsigned int timeout;
  483. while (sg_miter_next(miter)) {
  484. buf = miter->addr;
  485. i = miter->length;
  486. j = i / 32;
  487. i = i & 0x1f;
  488. while (j) {
  489. timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
  490. if (unlikely(timeout))
  491. goto poll_timeout;
  492. buf[0] = readl(fifo_addr);
  493. buf[1] = readl(fifo_addr);
  494. buf[2] = readl(fifo_addr);
  495. buf[3] = readl(fifo_addr);
  496. buf[4] = readl(fifo_addr);
  497. buf[5] = readl(fifo_addr);
  498. buf[6] = readl(fifo_addr);
  499. buf[7] = readl(fifo_addr);
  500. buf += 8;
  501. --j;
  502. }
  503. if (unlikely(i)) {
  504. timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
  505. if (unlikely(timeout))
  506. goto poll_timeout;
  507. while (i >= 4) {
  508. *buf++ = readl(fifo_addr);
  509. i -= 4;
  510. }
  511. if (unlikely(i > 0)) {
  512. d = readl(fifo_addr);
  513. memcpy(buf, &d, i);
  514. }
  515. }
  516. data->bytes_xfered += miter->length;
  517. }
  518. sg_miter_stop(miter);
  519. /* For whatever reason there is sometime one word more in the fifo then
  520. * requested */
  521. timeout = 1000;
  522. status = readl(host->base + JZ_REG_MMC_STATUS);
  523. while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
  524. d = readl(fifo_addr);
  525. status = readl(host->base + JZ_REG_MMC_STATUS);
  526. }
  527. return false;
  528. poll_timeout:
  529. miter->consumed = (void *)buf - miter->addr;
  530. data->bytes_xfered += miter->consumed;
  531. sg_miter_stop(miter);
  532. return true;
  533. }
  534. static void jz4740_mmc_timeout(struct timer_list *t)
  535. {
  536. struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
  537. if (!test_and_clear_bit(0, &host->waiting))
  538. return;
  539. jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false);
  540. host->req->cmd->error = -ETIMEDOUT;
  541. jz4740_mmc_request_done(host);
  542. }
  543. static void jz4740_mmc_read_response(struct jz4740_mmc_host *host,
  544. struct mmc_command *cmd)
  545. {
  546. int i;
  547. uint16_t tmp;
  548. void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO;
  549. if (cmd->flags & MMC_RSP_136) {
  550. tmp = readw(fifo_addr);
  551. for (i = 0; i < 4; ++i) {
  552. cmd->resp[i] = tmp << 24;
  553. tmp = readw(fifo_addr);
  554. cmd->resp[i] |= tmp << 8;
  555. tmp = readw(fifo_addr);
  556. cmd->resp[i] |= tmp >> 8;
  557. }
  558. } else {
  559. cmd->resp[0] = readw(fifo_addr) << 24;
  560. cmd->resp[0] |= readw(fifo_addr) << 8;
  561. cmd->resp[0] |= readw(fifo_addr) & 0xff;
  562. }
  563. }
  564. static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
  565. struct mmc_command *cmd)
  566. {
  567. uint32_t cmdat = host->cmdat;
  568. host->cmdat &= ~JZ_MMC_CMDAT_INIT;
  569. jz4740_mmc_clock_disable(host);
  570. host->cmd = cmd;
  571. if (cmd->flags & MMC_RSP_BUSY)
  572. cmdat |= JZ_MMC_CMDAT_BUSY;
  573. switch (mmc_resp_type(cmd)) {
  574. case MMC_RSP_R1B:
  575. case MMC_RSP_R1:
  576. cmdat |= JZ_MMC_CMDAT_RSP_R1;
  577. break;
  578. case MMC_RSP_R2:
  579. cmdat |= JZ_MMC_CMDAT_RSP_R2;
  580. break;
  581. case MMC_RSP_R3:
  582. cmdat |= JZ_MMC_CMDAT_RSP_R3;
  583. break;
  584. default:
  585. break;
  586. }
  587. if (cmd->data) {
  588. cmdat |= JZ_MMC_CMDAT_DATA_EN;
  589. if (cmd->data->flags & MMC_DATA_WRITE)
  590. cmdat |= JZ_MMC_CMDAT_WRITE;
  591. if (host->use_dma) {
  592. /*
  593. * The JZ4780's MMC controller has integrated DMA ability
  594. * in addition to being able to use the external DMA
  595. * controller. It moves DMA control bits to a separate
  596. * register. The DMA_SEL bit chooses the external
  597. * controller over the integrated one. Earlier SoCs
  598. * can only use the external controller, and have a
  599. * single DMA enable bit in CMDAT.
  600. */
  601. if (host->version >= JZ_MMC_JZ4780) {
  602. writel(JZ_MMC_DMAC_DMA_EN | JZ_MMC_DMAC_DMA_SEL,
  603. host->base + JZ_REG_MMC_DMAC);
  604. } else {
  605. cmdat |= JZ_MMC_CMDAT_DMA_EN;
  606. }
  607. } else if (host->version >= JZ_MMC_JZ4780) {
  608. writel(0, host->base + JZ_REG_MMC_DMAC);
  609. }
  610. writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
  611. writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
  612. }
  613. writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD);
  614. writel(cmd->arg, host->base + JZ_REG_MMC_ARG);
  615. writel(cmdat, host->base + JZ_REG_MMC_CMDAT);
  616. jz4740_mmc_clock_enable(host, 1);
  617. }
  618. static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host)
  619. {
  620. struct mmc_command *cmd = host->req->cmd;
  621. struct mmc_data *data = cmd->data;
  622. int direction;
  623. if (data->flags & MMC_DATA_READ)
  624. direction = SG_MITER_TO_SG;
  625. else
  626. direction = SG_MITER_FROM_SG;
  627. sg_miter_start(&host->miter, data->sg, data->sg_len, direction);
  628. }
  629. static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
  630. {
  631. struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
  632. struct mmc_command *cmd = host->req->cmd;
  633. struct mmc_request *req = host->req;
  634. struct mmc_data *data = cmd->data;
  635. bool timeout = false;
  636. if (cmd->error)
  637. host->state = JZ4740_MMC_STATE_DONE;
  638. switch (host->state) {
  639. case JZ4740_MMC_STATE_READ_RESPONSE:
  640. if (cmd->flags & MMC_RSP_PRESENT)
  641. jz4740_mmc_read_response(host, cmd);
  642. if (!data)
  643. break;
  644. jz_mmc_prepare_data_transfer(host);
  645. fallthrough;
  646. case JZ4740_MMC_STATE_TRANSFER_DATA:
  647. if (host->use_dma) {
  648. /* Use DMA if enabled.
  649. * Data transfer direction is defined later by
  650. * relying on data flags in
  651. * jz4740_mmc_prepare_dma_data() and
  652. * jz4740_mmc_start_dma_transfer().
  653. */
  654. timeout = jz4740_mmc_start_dma_transfer(host, data);
  655. data->bytes_xfered = data->blocks * data->blksz;
  656. } else if (data->flags & MMC_DATA_READ)
  657. /* Use PIO if DMA is not enabled.
  658. * Data transfer direction was defined before
  659. * by relying on data flags in
  660. * jz_mmc_prepare_data_transfer().
  661. */
  662. timeout = jz4740_mmc_read_data(host, data);
  663. else
  664. timeout = jz4740_mmc_write_data(host, data);
  665. if (unlikely(timeout)) {
  666. host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
  667. break;
  668. }
  669. jz4740_mmc_transfer_check_state(host, data);
  670. timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
  671. if (unlikely(timeout)) {
  672. host->state = JZ4740_MMC_STATE_SEND_STOP;
  673. break;
  674. }
  675. jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
  676. fallthrough;
  677. case JZ4740_MMC_STATE_SEND_STOP:
  678. if (!req->stop)
  679. break;
  680. jz4740_mmc_send_command(host, req->stop);
  681. if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) {
  682. timeout = jz4740_mmc_poll_irq(host,
  683. JZ_MMC_IRQ_PRG_DONE);
  684. if (timeout) {
  685. host->state = JZ4740_MMC_STATE_DONE;
  686. break;
  687. }
  688. }
  689. fallthrough;
  690. case JZ4740_MMC_STATE_DONE:
  691. break;
  692. }
  693. if (!timeout)
  694. jz4740_mmc_request_done(host);
  695. return IRQ_HANDLED;
  696. }
  697. static irqreturn_t jz_mmc_irq(int irq, void *devid)
  698. {
  699. struct jz4740_mmc_host *host = devid;
  700. struct mmc_command *cmd = host->cmd;
  701. uint32_t irq_reg, status, tmp;
  702. status = readl(host->base + JZ_REG_MMC_STATUS);
  703. irq_reg = jz4740_mmc_read_irq_reg(host);
  704. tmp = irq_reg;
  705. irq_reg &= ~host->irq_mask;
  706. tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ |
  707. JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE);
  708. if (tmp != irq_reg)
  709. jz4740_mmc_write_irq_reg(host, tmp & ~irq_reg);
  710. if (irq_reg & JZ_MMC_IRQ_SDIO) {
  711. jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_SDIO);
  712. mmc_signal_sdio_irq(host->mmc);
  713. irq_reg &= ~JZ_MMC_IRQ_SDIO;
  714. }
  715. if (host->req && cmd && irq_reg) {
  716. if (test_and_clear_bit(0, &host->waiting)) {
  717. del_timer(&host->timeout_timer);
  718. if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
  719. cmd->error = -ETIMEDOUT;
  720. } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) {
  721. cmd->error = -EIO;
  722. } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
  723. JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
  724. if (cmd->data)
  725. cmd->data->error = -EIO;
  726. cmd->error = -EIO;
  727. }
  728. jz4740_mmc_set_irq_enabled(host, irq_reg, false);
  729. jz4740_mmc_write_irq_reg(host, irq_reg);
  730. return IRQ_WAKE_THREAD;
  731. }
  732. }
  733. return IRQ_HANDLED;
  734. }
  735. static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
  736. {
  737. int div = 0;
  738. int real_rate;
  739. jz4740_mmc_clock_disable(host);
  740. clk_set_rate(host->clk, host->mmc->f_max);
  741. real_rate = clk_get_rate(host->clk);
  742. while (real_rate > rate && div < 7) {
  743. ++div;
  744. real_rate >>= 1;
  745. }
  746. writew(div, host->base + JZ_REG_MMC_CLKRT);
  747. if (real_rate > 25000000) {
  748. if (host->version >= JZ_MMC_JZ4780) {
  749. writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY |
  750. JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY |
  751. JZ_MMC_LPM_LOW_POWER_MODE_EN,
  752. host->base + JZ_REG_MMC_LPM);
  753. } else if (host->version >= JZ_MMC_JZ4760) {
  754. writel(JZ_MMC_LPM_DRV_RISING |
  755. JZ_MMC_LPM_LOW_POWER_MODE_EN,
  756. host->base + JZ_REG_MMC_LPM);
  757. } else if (host->version >= JZ_MMC_JZ4725B)
  758. writel(JZ_MMC_LPM_LOW_POWER_MODE_EN,
  759. host->base + JZ_REG_MMC_LPM);
  760. }
  761. return real_rate;
  762. }
  763. static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
  764. {
  765. struct jz4740_mmc_host *host = mmc_priv(mmc);
  766. host->req = req;
  767. jz4740_mmc_write_irq_reg(host, ~0);
  768. jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true);
  769. host->state = JZ4740_MMC_STATE_READ_RESPONSE;
  770. set_bit(0, &host->waiting);
  771. mod_timer(&host->timeout_timer,
  772. jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
  773. jz4740_mmc_send_command(host, req->cmd);
  774. }
  775. static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  776. {
  777. struct jz4740_mmc_host *host = mmc_priv(mmc);
  778. if (ios->clock)
  779. jz4740_mmc_set_clock_rate(host, ios->clock);
  780. switch (ios->power_mode) {
  781. case MMC_POWER_UP:
  782. jz4740_mmc_reset(host);
  783. if (!IS_ERR(mmc->supply.vmmc))
  784. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
  785. host->cmdat |= JZ_MMC_CMDAT_INIT;
  786. clk_prepare_enable(host->clk);
  787. break;
  788. case MMC_POWER_ON:
  789. break;
  790. default:
  791. if (!IS_ERR(mmc->supply.vmmc))
  792. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
  793. clk_disable_unprepare(host->clk);
  794. break;
  795. }
  796. switch (ios->bus_width) {
  797. case MMC_BUS_WIDTH_1:
  798. host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
  799. break;
  800. case MMC_BUS_WIDTH_4:
  801. host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
  802. host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
  803. break;
  804. case MMC_BUS_WIDTH_8:
  805. host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
  806. host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT;
  807. break;
  808. default:
  809. break;
  810. }
  811. }
  812. static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
  813. {
  814. struct jz4740_mmc_host *host = mmc_priv(mmc);
  815. jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
  816. }
  817. static const struct mmc_host_ops jz4740_mmc_ops = {
  818. .request = jz4740_mmc_request,
  819. .pre_req = jz4740_mmc_pre_request,
  820. .post_req = jz4740_mmc_post_request,
  821. .set_ios = jz4740_mmc_set_ios,
  822. .get_ro = mmc_gpio_get_ro,
  823. .get_cd = mmc_gpio_get_cd,
  824. .enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
  825. };
  826. static const struct of_device_id jz4740_mmc_of_match[] = {
  827. { .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
  828. { .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
  829. { .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
  830. { .compatible = "ingenic,jz4775-mmc", .data = (void *) JZ_MMC_JZ4780 },
  831. { .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
  832. { .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
  833. {},
  834. };
  835. MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match);
  836. static int jz4740_mmc_probe(struct platform_device* pdev)
  837. {
  838. int ret;
  839. struct mmc_host *mmc;
  840. struct jz4740_mmc_host *host;
  841. const struct of_device_id *match;
  842. mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
  843. if (!mmc) {
  844. dev_err(&pdev->dev, "Failed to alloc mmc host structure\n");
  845. return -ENOMEM;
  846. }
  847. host = mmc_priv(mmc);
  848. match = of_match_device(jz4740_mmc_of_match, &pdev->dev);
  849. if (match) {
  850. host->version = (enum jz4740_mmc_version)match->data;
  851. } else {
  852. /* JZ4740 should be the only one using legacy probe */
  853. host->version = JZ_MMC_JZ4740;
  854. }
  855. ret = mmc_of_parse(mmc);
  856. if (ret) {
  857. dev_err_probe(&pdev->dev, ret, "could not parse device properties\n");
  858. goto err_free_host;
  859. }
  860. mmc_regulator_get_supply(mmc);
  861. host->irq = platform_get_irq(pdev, 0);
  862. if (host->irq < 0) {
  863. ret = host->irq;
  864. goto err_free_host;
  865. }
  866. host->clk = devm_clk_get(&pdev->dev, "mmc");
  867. if (IS_ERR(host->clk)) {
  868. ret = PTR_ERR(host->clk);
  869. dev_err(&pdev->dev, "Failed to get mmc clock\n");
  870. goto err_free_host;
  871. }
  872. host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  873. host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
  874. if (IS_ERR(host->base)) {
  875. ret = PTR_ERR(host->base);
  876. goto err_free_host;
  877. }
  878. mmc->ops = &jz4740_mmc_ops;
  879. if (!mmc->f_max)
  880. mmc->f_max = JZ_MMC_CLK_RATE;
  881. /*
  882. * There seems to be a problem with this driver on the JZ4760 and
  883. * JZ4760B SoCs. There, when using the maximum rate supported (50 MHz),
  884. * the communication fails with many SD cards.
  885. * Until this bug is sorted out, limit the maximum rate to 24 MHz.
  886. */
  887. if (host->version == JZ_MMC_JZ4760 && mmc->f_max > JZ_MMC_CLK_RATE)
  888. mmc->f_max = JZ_MMC_CLK_RATE;
  889. mmc->f_min = mmc->f_max / 128;
  890. mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
  891. /*
  892. * We use a fixed timeout of 5s, hence inform the core about it. A
  893. * future improvement should instead respect the cmd->busy_timeout.
  894. */
  895. mmc->max_busy_timeout = JZ_MMC_REQ_TIMEOUT_MS;
  896. mmc->max_blk_size = (1 << 10) - 1;
  897. mmc->max_blk_count = (1 << 15) - 1;
  898. mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
  899. mmc->max_segs = 128;
  900. mmc->max_seg_size = mmc->max_req_size;
  901. host->mmc = mmc;
  902. host->pdev = pdev;
  903. spin_lock_init(&host->lock);
  904. host->irq_mask = ~0;
  905. jz4740_mmc_reset(host);
  906. ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,
  907. dev_name(&pdev->dev), host);
  908. if (ret) {
  909. dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
  910. goto err_free_host;
  911. }
  912. jz4740_mmc_clock_disable(host);
  913. timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
  914. ret = jz4740_mmc_acquire_dma_channels(host);
  915. if (ret == -EPROBE_DEFER)
  916. goto err_free_irq;
  917. host->use_dma = !ret;
  918. platform_set_drvdata(pdev, host);
  919. ret = mmc_add_host(mmc);
  920. if (ret) {
  921. dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
  922. goto err_release_dma;
  923. }
  924. dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n");
  925. dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
  926. host->use_dma ? "DMA" : "PIO",
  927. (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 :
  928. ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1));
  929. return 0;
  930. err_release_dma:
  931. if (host->use_dma)
  932. jz4740_mmc_release_dma_channels(host);
  933. err_free_irq:
  934. free_irq(host->irq, host);
  935. err_free_host:
  936. mmc_free_host(mmc);
  937. return ret;
  938. }
  939. static int jz4740_mmc_remove(struct platform_device *pdev)
  940. {
  941. struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
  942. del_timer_sync(&host->timeout_timer);
  943. jz4740_mmc_set_irq_enabled(host, 0xff, false);
  944. jz4740_mmc_reset(host);
  945. mmc_remove_host(host->mmc);
  946. free_irq(host->irq, host);
  947. if (host->use_dma)
  948. jz4740_mmc_release_dma_channels(host);
  949. mmc_free_host(host->mmc);
  950. return 0;
  951. }
  952. static int jz4740_mmc_suspend(struct device *dev)
  953. {
  954. return pinctrl_pm_select_sleep_state(dev);
  955. }
  956. static int jz4740_mmc_resume(struct device *dev)
  957. {
  958. return pinctrl_select_default_state(dev);
  959. }
  960. static DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
  961. jz4740_mmc_resume);
  962. static struct platform_driver jz4740_mmc_driver = {
  963. .probe = jz4740_mmc_probe,
  964. .remove = jz4740_mmc_remove,
  965. .driver = {
  966. .name = "jz4740-mmc",
  967. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  968. .of_match_table = of_match_ptr(jz4740_mmc_of_match),
  969. .pm = pm_sleep_ptr(&jz4740_mmc_pm_ops),
  970. },
  971. };
  972. module_platform_driver(jz4740_mmc_driver);
  973. MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
  974. MODULE_LICENSE("GPL");
  975. MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");