sdhci-tegra.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2010 Google, Inc.
  4. */
  5. #include <linux/delay.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/err.h>
  8. #include <linux/module.h>
  9. #include <linux/init.h>
  10. #include <linux/iopoll.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/clk.h>
  13. #include <linux/io.h>
  14. #include <linux/of.h>
  15. #include <linux/of_device.h>
  16. #include <linux/pinctrl/consumer.h>
  17. #include <linux/pm_opp.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/regulator/consumer.h>
  20. #include <linux/reset.h>
  21. #include <linux/mmc/card.h>
  22. #include <linux/mmc/host.h>
  23. #include <linux/mmc/mmc.h>
  24. #include <linux/mmc/slot-gpio.h>
  25. #include <linux/gpio/consumer.h>
  26. #include <linux/ktime.h>
  27. #include <soc/tegra/common.h>
  28. #include "sdhci-cqhci.h"
  29. #include "sdhci-pltfm.h"
  30. #include "cqhci.h"
  31. /* Tegra SDHOST controller vendor register definitions */
  32. #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100
  33. #define SDHCI_CLOCK_CTRL_TAP_MASK 0x00ff0000
  34. #define SDHCI_CLOCK_CTRL_TAP_SHIFT 16
  35. #define SDHCI_CLOCK_CTRL_TRIM_MASK 0x1f000000
  36. #define SDHCI_CLOCK_CTRL_TRIM_SHIFT 24
  37. #define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE BIT(5)
  38. #define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE BIT(3)
  39. #define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE BIT(2)
  40. #define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL 0x104
  41. #define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE BIT(31)
  42. #define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES 0x10c
  43. #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK 0x00003f00
  44. #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8
  45. #define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
  46. #define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT BIT(0)
  47. #define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8
  48. #define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10
  49. #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
  50. #define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200
  51. #define SDHCI_TEGRA_VENDOR_DLLCAL_CFG 0x1b0
  52. #define SDHCI_TEGRA_DLLCAL_CALIBRATE BIT(31)
  53. #define SDHCI_TEGRA_VENDOR_DLLCAL_STA 0x1bc
  54. #define SDHCI_TEGRA_DLLCAL_STA_ACTIVE BIT(31)
  55. #define SDHCI_VNDR_TUN_CTRL0_0 0x1c0
  56. #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000
  57. #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK 0x03fc0000
  58. #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT 18
  59. #define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK 0x00001fc0
  60. #define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT 6
  61. #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK 0x000e000
  62. #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT 13
  63. #define TRIES_128 2
  64. #define TRIES_256 4
  65. #define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK 0x7
  66. #define SDHCI_TEGRA_VNDR_TUN_CTRL1_0 0x1c4
  67. #define SDHCI_TEGRA_VNDR_TUN_STATUS0 0x1C8
  68. #define SDHCI_TEGRA_VNDR_TUN_STATUS1 0x1CC
  69. #define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK 0xFF
  70. #define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT 0x8
  71. #define TUNING_WORD_BIT_SIZE 32
  72. #define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4
  73. #define SDHCI_AUTO_CAL_START BIT(31)
  74. #define SDHCI_AUTO_CAL_ENABLE BIT(29)
  75. #define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK 0x0000ffff
  76. #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL 0x1e0
  77. #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f
  78. #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7
  79. #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31)
  80. #define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK 0x07FFF000
  81. #define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec
  82. #define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31)
  83. #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
  84. #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
  85. #define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
  86. #define NVQUIRK_ENABLE_SDR50 BIT(3)
  87. #define NVQUIRK_ENABLE_SDR104 BIT(4)
  88. #define NVQUIRK_ENABLE_DDR50 BIT(5)
  89. /*
  90. * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
  91. * drive strength.
  92. */
  93. #define NVQUIRK_HAS_PADCALIB BIT(6)
  94. /*
  95. * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
  96. * 3V3/1V8 pad selection happens through pinctrl state selection depending
  97. * on the signaling mode.
  98. */
  99. #define NVQUIRK_NEEDS_PAD_CONTROL BIT(7)
  100. #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8)
  101. #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9)
  102. /*
  103. * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
  104. * SDMMC hardware data timeout.
  105. */
  106. #define NVQUIRK_HAS_TMCLK BIT(10)
  107. #define NVQUIRK_HAS_ANDROID_GPT_SECTOR BIT(11)
  108. /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
  109. #define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
  110. #define SDHCI_TEGRA_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \
  111. SDHCI_TRNS_BLK_CNT_EN | \
  112. SDHCI_TRNS_DMA)
  113. struct sdhci_tegra_soc_data {
  114. const struct sdhci_pltfm_data *pdata;
  115. u64 dma_mask;
  116. u32 nvquirks;
  117. u8 min_tap_delay;
  118. u8 max_tap_delay;
  119. };
  120. /* Magic pull up and pull down pad calibration offsets */
  121. struct sdhci_tegra_autocal_offsets {
  122. u32 pull_up_3v3;
  123. u32 pull_down_3v3;
  124. u32 pull_up_3v3_timeout;
  125. u32 pull_down_3v3_timeout;
  126. u32 pull_up_1v8;
  127. u32 pull_down_1v8;
  128. u32 pull_up_1v8_timeout;
  129. u32 pull_down_1v8_timeout;
  130. u32 pull_up_sdr104;
  131. u32 pull_down_sdr104;
  132. u32 pull_up_hs400;
  133. u32 pull_down_hs400;
  134. };
  135. struct sdhci_tegra {
  136. const struct sdhci_tegra_soc_data *soc_data;
  137. struct gpio_desc *power_gpio;
  138. struct clk *tmclk;
  139. bool ddr_signaling;
  140. bool pad_calib_required;
  141. bool pad_control_available;
  142. struct reset_control *rst;
  143. struct pinctrl *pinctrl_sdmmc;
  144. struct pinctrl_state *pinctrl_state_3v3;
  145. struct pinctrl_state *pinctrl_state_1v8;
  146. struct pinctrl_state *pinctrl_state_3v3_drv;
  147. struct pinctrl_state *pinctrl_state_1v8_drv;
  148. struct sdhci_tegra_autocal_offsets autocal_offsets;
  149. ktime_t last_calib;
  150. u32 default_tap;
  151. u32 default_trim;
  152. u32 dqs_trim;
  153. bool enable_hwcq;
  154. unsigned long curr_clk_rate;
  155. u8 tuned_tap_delay;
  156. };
  157. static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
  158. {
  159. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  160. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  161. const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
  162. if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
  163. (reg == SDHCI_HOST_VERSION))) {
  164. /* Erratum: Version register is invalid in HW. */
  165. return SDHCI_SPEC_200;
  166. }
  167. return readw(host->ioaddr + reg);
  168. }
  169. static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
  170. {
  171. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  172. switch (reg) {
  173. case SDHCI_TRANSFER_MODE:
  174. /*
  175. * Postpone this write, we must do it together with a
  176. * command write that is down below.
  177. */
  178. pltfm_host->xfer_mode_shadow = val;
  179. return;
  180. case SDHCI_COMMAND:
  181. writel((val << 16) | pltfm_host->xfer_mode_shadow,
  182. host->ioaddr + SDHCI_TRANSFER_MODE);
  183. return;
  184. }
  185. writew(val, host->ioaddr + reg);
  186. }
  187. static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
  188. {
  189. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  190. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  191. const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
  192. /* Seems like we're getting spurious timeout and crc errors, so
  193. * disable signalling of them. In case of real errors software
  194. * timers should take care of eventually detecting them.
  195. */
  196. if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
  197. val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
  198. writel(val, host->ioaddr + reg);
  199. if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
  200. (reg == SDHCI_INT_ENABLE))) {
  201. /* Erratum: Must enable block gap interrupt detection */
  202. u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
  203. if (val & SDHCI_INT_CARD_INT)
  204. gap_ctrl |= 0x8;
  205. else
  206. gap_ctrl &= ~0x8;
  207. writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
  208. }
  209. }
  210. static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
  211. {
  212. bool status;
  213. u32 reg;
  214. reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
  215. status = !!(reg & SDHCI_CLOCK_CARD_EN);
  216. if (status == enable)
  217. return status;
  218. if (enable)
  219. reg |= SDHCI_CLOCK_CARD_EN;
  220. else
  221. reg &= ~SDHCI_CLOCK_CARD_EN;
  222. sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
  223. return status;
  224. }
  225. static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
  226. {
  227. bool is_tuning_cmd = 0;
  228. bool clk_enabled;
  229. u8 cmd;
  230. if (reg == SDHCI_COMMAND) {
  231. cmd = SDHCI_GET_CMD(val);
  232. is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
  233. cmd == MMC_SEND_TUNING_BLOCK_HS200;
  234. }
  235. if (is_tuning_cmd)
  236. clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
  237. writew(val, host->ioaddr + reg);
  238. if (is_tuning_cmd) {
  239. udelay(1);
  240. sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
  241. tegra_sdhci_configure_card_clk(host, clk_enabled);
  242. }
  243. }
  244. static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
  245. {
  246. /*
  247. * Write-enable shall be assumed if GPIO is missing in a board's
  248. * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
  249. * Tegra.
  250. */
  251. return mmc_gpio_get_ro(host->mmc);
  252. }
  253. static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
  254. {
  255. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  256. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  257. int has_1v8, has_3v3;
  258. /*
  259. * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
  260. * voltage configuration in order to perform voltage switching. This
  261. * means that valid pinctrl info is required on SDHCI instances capable
  262. * of performing voltage switching. Whether or not an SDHCI instance is
  263. * capable of voltage switching is determined based on the regulator.
  264. */
  265. if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
  266. return true;
  267. if (IS_ERR(host->mmc->supply.vqmmc))
  268. return false;
  269. has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
  270. 1700000, 1950000);
  271. has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
  272. 2700000, 3600000);
  273. if (has_1v8 == 1 && has_3v3 == 1)
  274. return tegra_host->pad_control_available;
  275. /* Fixed voltage, no pad control required. */
  276. return true;
  277. }
  278. static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
  279. {
  280. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  281. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  282. const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
  283. bool card_clk_enabled = false;
  284. u32 reg;
  285. /*
  286. * Touching the tap values is a bit tricky on some SoC generations.
  287. * The quirk enables a workaround for a glitch that sometimes occurs if
  288. * the tap values are changed.
  289. */
  290. if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
  291. card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
  292. reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
  293. reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
  294. reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
  295. sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
  296. if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
  297. card_clk_enabled) {
  298. udelay(1);
  299. sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
  300. tegra_sdhci_configure_card_clk(host, card_clk_enabled);
  301. }
  302. }
  303. static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
  304. {
  305. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  306. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  307. const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
  308. u32 misc_ctrl, clk_ctrl, pad_ctrl;
  309. sdhci_and_cqhci_reset(host, mask);
  310. if (!(mask & SDHCI_RESET_ALL))
  311. return;
  312. tegra_sdhci_set_tap(host, tegra_host->default_tap);
  313. misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
  314. clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
  315. misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
  316. SDHCI_MISC_CTRL_ENABLE_SDR50 |
  317. SDHCI_MISC_CTRL_ENABLE_DDR50 |
  318. SDHCI_MISC_CTRL_ENABLE_SDR104);
  319. clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
  320. SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
  321. if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
  322. /* Erratum: Enable SDHCI spec v3.00 support */
  323. if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
  324. misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
  325. /* Advertise UHS modes as supported by host */
  326. if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
  327. misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
  328. if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
  329. misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
  330. if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
  331. misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
  332. if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
  333. clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
  334. }
  335. clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
  336. sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
  337. sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
  338. if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
  339. pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
  340. pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
  341. pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
  342. sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
  343. tegra_host->pad_calib_required = true;
  344. }
  345. tegra_host->ddr_signaling = false;
  346. }
  347. static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
  348. {
  349. u32 val;
  350. /*
  351. * Enable or disable the additional I/O pad used by the drive strength
  352. * calibration process.
  353. */
  354. val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
  355. if (enable)
  356. val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
  357. else
  358. val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
  359. sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
  360. if (enable)
  361. usleep_range(1, 2);
  362. }
  363. static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
  364. u16 pdpu)
  365. {
  366. u32 reg;
  367. reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
  368. reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
  369. reg |= pdpu;
  370. sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
  371. }
  372. static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
  373. bool state_drvupdn)
  374. {
  375. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  376. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  377. struct sdhci_tegra_autocal_offsets *offsets =
  378. &tegra_host->autocal_offsets;
  379. struct pinctrl_state *pinctrl_drvupdn = NULL;
  380. int ret = 0;
  381. u8 drvup = 0, drvdn = 0;
  382. u32 reg;
  383. if (!state_drvupdn) {
  384. /* PADS Drive Strength */
  385. if (voltage == MMC_SIGNAL_VOLTAGE_180) {
  386. if (tegra_host->pinctrl_state_1v8_drv) {
  387. pinctrl_drvupdn =
  388. tegra_host->pinctrl_state_1v8_drv;
  389. } else {
  390. drvup = offsets->pull_up_1v8_timeout;
  391. drvdn = offsets->pull_down_1v8_timeout;
  392. }
  393. } else {
  394. if (tegra_host->pinctrl_state_3v3_drv) {
  395. pinctrl_drvupdn =
  396. tegra_host->pinctrl_state_3v3_drv;
  397. } else {
  398. drvup = offsets->pull_up_3v3_timeout;
  399. drvdn = offsets->pull_down_3v3_timeout;
  400. }
  401. }
  402. if (pinctrl_drvupdn != NULL) {
  403. ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
  404. pinctrl_drvupdn);
  405. if (ret < 0)
  406. dev_err(mmc_dev(host->mmc),
  407. "failed pads drvupdn, ret: %d\n", ret);
  408. } else if ((drvup) || (drvdn)) {
  409. reg = sdhci_readl(host,
  410. SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
  411. reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
  412. reg |= (drvup << 20) | (drvdn << 12);
  413. sdhci_writel(host, reg,
  414. SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
  415. }
  416. } else {
  417. /* Dual Voltage PADS Voltage selection */
  418. if (!tegra_host->pad_control_available)
  419. return 0;
  420. if (voltage == MMC_SIGNAL_VOLTAGE_180) {
  421. ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
  422. tegra_host->pinctrl_state_1v8);
  423. if (ret < 0)
  424. dev_err(mmc_dev(host->mmc),
  425. "setting 1.8V failed, ret: %d\n", ret);
  426. } else {
  427. ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
  428. tegra_host->pinctrl_state_3v3);
  429. if (ret < 0)
  430. dev_err(mmc_dev(host->mmc),
  431. "setting 3.3V failed, ret: %d\n", ret);
  432. }
  433. }
  434. return ret;
  435. }
  436. static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
  437. {
  438. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  439. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  440. struct sdhci_tegra_autocal_offsets offsets =
  441. tegra_host->autocal_offsets;
  442. struct mmc_ios *ios = &host->mmc->ios;
  443. bool card_clk_enabled;
  444. u16 pdpu;
  445. u32 reg;
  446. int ret;
  447. switch (ios->timing) {
  448. case MMC_TIMING_UHS_SDR104:
  449. pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
  450. break;
  451. case MMC_TIMING_MMC_HS400:
  452. pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
  453. break;
  454. default:
  455. if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
  456. pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
  457. else
  458. pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
  459. }
  460. /* Set initial offset before auto-calibration */
  461. tegra_sdhci_set_pad_autocal_offset(host, pdpu);
  462. card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
  463. tegra_sdhci_configure_cal_pad(host, true);
  464. reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
  465. reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
  466. sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
  467. usleep_range(1, 2);
  468. /* 10 ms timeout */
  469. ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
  470. reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
  471. 1000, 10000);
  472. tegra_sdhci_configure_cal_pad(host, false);
  473. tegra_sdhci_configure_card_clk(host, card_clk_enabled);
  474. if (ret) {
  475. dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
  476. /* Disable automatic cal and use fixed Drive Strengths */
  477. reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
  478. reg &= ~SDHCI_AUTO_CAL_ENABLE;
  479. sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
  480. ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
  481. if (ret < 0)
  482. dev_err(mmc_dev(host->mmc),
  483. "Setting drive strengths failed: %d\n", ret);
  484. }
  485. }
  486. static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
  487. {
  488. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  489. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  490. struct sdhci_tegra_autocal_offsets *autocal =
  491. &tegra_host->autocal_offsets;
  492. int err;
  493. err = device_property_read_u32(mmc_dev(host->mmc),
  494. "nvidia,pad-autocal-pull-up-offset-3v3",
  495. &autocal->pull_up_3v3);
  496. if (err)
  497. autocal->pull_up_3v3 = 0;
  498. err = device_property_read_u32(mmc_dev(host->mmc),
  499. "nvidia,pad-autocal-pull-down-offset-3v3",
  500. &autocal->pull_down_3v3);
  501. if (err)
  502. autocal->pull_down_3v3 = 0;
  503. err = device_property_read_u32(mmc_dev(host->mmc),
  504. "nvidia,pad-autocal-pull-up-offset-1v8",
  505. &autocal->pull_up_1v8);
  506. if (err)
  507. autocal->pull_up_1v8 = 0;
  508. err = device_property_read_u32(mmc_dev(host->mmc),
  509. "nvidia,pad-autocal-pull-down-offset-1v8",
  510. &autocal->pull_down_1v8);
  511. if (err)
  512. autocal->pull_down_1v8 = 0;
  513. err = device_property_read_u32(mmc_dev(host->mmc),
  514. "nvidia,pad-autocal-pull-up-offset-sdr104",
  515. &autocal->pull_up_sdr104);
  516. if (err)
  517. autocal->pull_up_sdr104 = autocal->pull_up_1v8;
  518. err = device_property_read_u32(mmc_dev(host->mmc),
  519. "nvidia,pad-autocal-pull-down-offset-sdr104",
  520. &autocal->pull_down_sdr104);
  521. if (err)
  522. autocal->pull_down_sdr104 = autocal->pull_down_1v8;
  523. err = device_property_read_u32(mmc_dev(host->mmc),
  524. "nvidia,pad-autocal-pull-up-offset-hs400",
  525. &autocal->pull_up_hs400);
  526. if (err)
  527. autocal->pull_up_hs400 = autocal->pull_up_1v8;
  528. err = device_property_read_u32(mmc_dev(host->mmc),
  529. "nvidia,pad-autocal-pull-down-offset-hs400",
  530. &autocal->pull_down_hs400);
  531. if (err)
  532. autocal->pull_down_hs400 = autocal->pull_down_1v8;
  533. /*
  534. * Different fail-safe drive strength values based on the signaling
  535. * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
  536. * So, avoid reading below device tree properties for SoCs that don't
  537. * have NVQUIRK_NEEDS_PAD_CONTROL.
  538. */
  539. if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
  540. return;
  541. err = device_property_read_u32(mmc_dev(host->mmc),
  542. "nvidia,pad-autocal-pull-up-offset-3v3-timeout",
  543. &autocal->pull_up_3v3_timeout);
  544. if (err) {
  545. if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
  546. (tegra_host->pinctrl_state_3v3_drv == NULL))
  547. pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
  548. mmc_hostname(host->mmc));
  549. autocal->pull_up_3v3_timeout = 0;
  550. }
  551. err = device_property_read_u32(mmc_dev(host->mmc),
  552. "nvidia,pad-autocal-pull-down-offset-3v3-timeout",
  553. &autocal->pull_down_3v3_timeout);
  554. if (err) {
  555. if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
  556. (tegra_host->pinctrl_state_3v3_drv == NULL))
  557. pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
  558. mmc_hostname(host->mmc));
  559. autocal->pull_down_3v3_timeout = 0;
  560. }
  561. err = device_property_read_u32(mmc_dev(host->mmc),
  562. "nvidia,pad-autocal-pull-up-offset-1v8-timeout",
  563. &autocal->pull_up_1v8_timeout);
  564. if (err) {
  565. if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
  566. (tegra_host->pinctrl_state_1v8_drv == NULL))
  567. pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
  568. mmc_hostname(host->mmc));
  569. autocal->pull_up_1v8_timeout = 0;
  570. }
  571. err = device_property_read_u32(mmc_dev(host->mmc),
  572. "nvidia,pad-autocal-pull-down-offset-1v8-timeout",
  573. &autocal->pull_down_1v8_timeout);
  574. if (err) {
  575. if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
  576. (tegra_host->pinctrl_state_1v8_drv == NULL))
  577. pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
  578. mmc_hostname(host->mmc));
  579. autocal->pull_down_1v8_timeout = 0;
  580. }
  581. }
  582. static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
  583. {
  584. struct sdhci_host *host = mmc_priv(mmc);
  585. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  586. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  587. ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
  588. /* 100 ms calibration interval is specified in the TRM */
  589. if (ktime_to_ms(since_calib) > 100) {
  590. tegra_sdhci_pad_autocalib(host);
  591. tegra_host->last_calib = ktime_get();
  592. }
  593. sdhci_request(mmc, mrq);
  594. }
  595. static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
  596. {
  597. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  598. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  599. int err;
  600. err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-tap",
  601. &tegra_host->default_tap);
  602. if (err)
  603. tegra_host->default_tap = 0;
  604. err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-trim",
  605. &tegra_host->default_trim);
  606. if (err)
  607. tegra_host->default_trim = 0;
  608. err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,dqs-trim",
  609. &tegra_host->dqs_trim);
  610. if (err)
  611. tegra_host->dqs_trim = 0x11;
  612. }
  613. static void tegra_sdhci_parse_dt(struct sdhci_host *host)
  614. {
  615. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  616. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  617. if (device_property_read_bool(mmc_dev(host->mmc), "supports-cqe"))
  618. tegra_host->enable_hwcq = true;
  619. else
  620. tegra_host->enable_hwcq = false;
  621. tegra_sdhci_parse_pad_autocal_dt(host);
  622. tegra_sdhci_parse_tap_and_trim(host);
  623. }
  624. static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
  625. {
  626. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  627. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  628. struct device *dev = mmc_dev(host->mmc);
  629. unsigned long host_clk;
  630. int err;
  631. if (!clock)
  632. return sdhci_set_clock(host, clock);
  633. /*
  634. * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
  635. * divider to be configured to divided the host clock by two. The SDHCI
  636. * clock divider is calculated as part of sdhci_set_clock() by
  637. * sdhci_calc_clk(). The divider is calculated from host->max_clk and
  638. * the requested clock rate.
  639. *
  640. * By setting the host->max_clk to clock * 2 the divider calculation
  641. * will always result in the correct value for DDR50/52 modes,
  642. * regardless of clock rate rounding, which may happen if the value
  643. * from clk_get_rate() is used.
  644. */
  645. host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
  646. err = dev_pm_opp_set_rate(dev, host_clk);
  647. if (err)
  648. dev_err(dev, "failed to set clk rate to %luHz: %d\n",
  649. host_clk, err);
  650. tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
  651. if (tegra_host->ddr_signaling)
  652. host->max_clk = host_clk;
  653. else
  654. host->max_clk = clk_get_rate(pltfm_host->clk);
  655. sdhci_set_clock(host, clock);
  656. if (tegra_host->pad_calib_required) {
  657. tegra_sdhci_pad_autocalib(host);
  658. tegra_host->pad_calib_required = false;
  659. }
  660. }
  661. static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
  662. struct mmc_ios *ios)
  663. {
  664. struct sdhci_host *host = mmc_priv(mmc);
  665. u32 val;
  666. val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
  667. if (ios->enhanced_strobe) {
  668. val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
  669. /*
  670. * When CMD13 is sent from mmc_select_hs400es() after
  671. * switching to HS400ES mode, the bus is operating at
  672. * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
  673. * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
  674. * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
  675. * controller CAR clock and the interface clock are rate matched.
  676. */
  677. tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
  678. } else {
  679. val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
  680. }
  681. sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
  682. }
  683. static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
  684. {
  685. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  686. return clk_round_rate(pltfm_host->clk, UINT_MAX);
  687. }
  688. static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
  689. {
  690. u32 val;
  691. val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
  692. val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
  693. val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
  694. sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
  695. }
  696. static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
  697. {
  698. u32 reg;
  699. int err;
  700. reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
  701. reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
  702. sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
  703. /* 1 ms sleep, 5 ms timeout */
  704. err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
  705. reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
  706. 1000, 5000);
  707. if (err)
  708. dev_err(mmc_dev(host->mmc),
  709. "HS400 delay line calibration timed out\n");
  710. }
  711. static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
  712. u8 thd_low, u8 fixed_tap)
  713. {
  714. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  715. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  716. u32 val, tun_status;
  717. u8 word, bit, edge1, tap, window;
  718. bool tap_result;
  719. bool start_fail = false;
  720. bool start_pass = false;
  721. bool end_pass = false;
  722. bool first_fail = false;
  723. bool first_pass = false;
  724. u8 start_pass_tap = 0;
  725. u8 end_pass_tap = 0;
  726. u8 first_fail_tap = 0;
  727. u8 first_pass_tap = 0;
  728. u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
  729. /*
  730. * Read auto-tuned results and extract good valid passing window by
  731. * filtering out un-wanted bubble/partial/merged windows.
  732. */
  733. for (word = 0; word < total_tuning_words; word++) {
  734. val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
  735. val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
  736. val |= word;
  737. sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
  738. tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
  739. bit = 0;
  740. while (bit < TUNING_WORD_BIT_SIZE) {
  741. tap = word * TUNING_WORD_BIT_SIZE + bit;
  742. tap_result = tun_status & (1 << bit);
  743. if (!tap_result && !start_fail) {
  744. start_fail = true;
  745. if (!first_fail) {
  746. first_fail_tap = tap;
  747. first_fail = true;
  748. }
  749. } else if (tap_result && start_fail && !start_pass) {
  750. start_pass_tap = tap;
  751. start_pass = true;
  752. if (!first_pass) {
  753. first_pass_tap = tap;
  754. first_pass = true;
  755. }
  756. } else if (!tap_result && start_fail && start_pass &&
  757. !end_pass) {
  758. end_pass_tap = tap - 1;
  759. end_pass = true;
  760. } else if (tap_result && start_pass && start_fail &&
  761. end_pass) {
  762. window = end_pass_tap - start_pass_tap;
  763. /* discard merged window and bubble window */
  764. if (window >= thd_up || window < thd_low) {
  765. start_pass_tap = tap;
  766. end_pass = false;
  767. } else {
  768. /* set tap at middle of valid window */
  769. tap = start_pass_tap + window / 2;
  770. tegra_host->tuned_tap_delay = tap;
  771. return;
  772. }
  773. }
  774. bit++;
  775. }
  776. }
  777. if (!first_fail) {
  778. WARN(1, "no edge detected, continue with hw tuned delay.\n");
  779. } else if (first_pass) {
  780. /* set tap location at fixed tap relative to the first edge */
  781. edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
  782. if (edge1 - 1 > fixed_tap)
  783. tegra_host->tuned_tap_delay = edge1 - fixed_tap;
  784. else
  785. tegra_host->tuned_tap_delay = edge1 + fixed_tap;
  786. }
  787. }
  788. static void tegra_sdhci_post_tuning(struct sdhci_host *host)
  789. {
  790. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  791. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  792. const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
  793. u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
  794. u8 fixed_tap, start_tap, end_tap, window_width;
  795. u8 thdupper, thdlower;
  796. u8 num_iter;
  797. u32 clk_rate_mhz, period_ps, bestcase, worstcase;
  798. /* retain HW tuned tap to use incase if no correction is needed */
  799. val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
  800. tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
  801. SDHCI_CLOCK_CTRL_TAP_SHIFT;
  802. if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
  803. min_tap_dly = soc_data->min_tap_delay;
  804. max_tap_dly = soc_data->max_tap_delay;
  805. clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
  806. period_ps = USEC_PER_SEC / clk_rate_mhz;
  807. bestcase = period_ps / min_tap_dly;
  808. worstcase = period_ps / max_tap_dly;
  809. /*
  810. * Upper and Lower bound thresholds used to detect merged and
  811. * bubble windows
  812. */
  813. thdupper = (2 * worstcase + bestcase) / 2;
  814. thdlower = worstcase / 4;
  815. /*
  816. * fixed tap is used when HW tuning result contains single edge
  817. * and tap is set at fixed tap delay relative to the first edge
  818. */
  819. avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
  820. fixed_tap = avg_tap_dly / 2;
  821. val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
  822. start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
  823. end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
  824. SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
  825. window_width = end_tap - start_tap;
  826. num_iter = host->tuning_loop_count;
  827. /*
  828. * partial window includes edges of the tuning range.
  829. * merged window includes more taps so window width is higher
  830. * than upper threshold.
  831. */
  832. if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
  833. (end_tap == num_iter - 2) || window_width >= thdupper) {
  834. pr_debug("%s: Apply tuning correction\n",
  835. mmc_hostname(host->mmc));
  836. tegra_sdhci_tap_correction(host, thdupper, thdlower,
  837. fixed_tap);
  838. }
  839. }
  840. tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
  841. }
  842. static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
  843. {
  844. struct sdhci_host *host = mmc_priv(mmc);
  845. int err;
  846. err = sdhci_execute_tuning(mmc, opcode);
  847. if (!err && !host->tuning_err)
  848. tegra_sdhci_post_tuning(host);
  849. return err;
  850. }
  851. static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
  852. unsigned timing)
  853. {
  854. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  855. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  856. bool set_default_tap = false;
  857. bool set_dqs_trim = false;
  858. bool do_hs400_dll_cal = false;
  859. u8 iter = TRIES_256;
  860. u32 val;
  861. tegra_host->ddr_signaling = false;
  862. switch (timing) {
  863. case MMC_TIMING_UHS_SDR50:
  864. break;
  865. case MMC_TIMING_UHS_SDR104:
  866. case MMC_TIMING_MMC_HS200:
  867. /* Don't set default tap on tunable modes. */
  868. iter = TRIES_128;
  869. break;
  870. case MMC_TIMING_MMC_HS400:
  871. set_dqs_trim = true;
  872. do_hs400_dll_cal = true;
  873. iter = TRIES_128;
  874. break;
  875. case MMC_TIMING_MMC_DDR52:
  876. case MMC_TIMING_UHS_DDR50:
  877. tegra_host->ddr_signaling = true;
  878. set_default_tap = true;
  879. break;
  880. default:
  881. set_default_tap = true;
  882. break;
  883. }
  884. val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
  885. val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
  886. SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
  887. SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
  888. val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
  889. 0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
  890. 1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
  891. sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
  892. sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
  893. host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
  894. sdhci_set_uhs_signaling(host, timing);
  895. tegra_sdhci_pad_autocalib(host);
  896. if (tegra_host->tuned_tap_delay && !set_default_tap)
  897. tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
  898. else
  899. tegra_sdhci_set_tap(host, tegra_host->default_tap);
  900. if (set_dqs_trim)
  901. tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
  902. if (do_hs400_dll_cal)
  903. tegra_sdhci_hs400_dll_cal(host);
  904. }
  905. static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
  906. {
  907. unsigned int min, max;
  908. /*
  909. * Start search for minimum tap value at 10, as smaller values are
  910. * may wrongly be reported as working but fail at higher speeds,
  911. * according to the TRM.
  912. */
  913. min = 10;
  914. while (min < 255) {
  915. tegra_sdhci_set_tap(host, min);
  916. if (!mmc_send_tuning(host->mmc, opcode, NULL))
  917. break;
  918. min++;
  919. }
  920. /* Find the maximum tap value that still passes. */
  921. max = min + 1;
  922. while (max < 255) {
  923. tegra_sdhci_set_tap(host, max);
  924. if (mmc_send_tuning(host->mmc, opcode, NULL)) {
  925. max--;
  926. break;
  927. }
  928. max++;
  929. }
  930. /* The TRM states the ideal tap value is at 75% in the passing range. */
  931. tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
  932. return mmc_send_tuning(host->mmc, opcode, NULL);
  933. }
  934. static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
  935. struct mmc_ios *ios)
  936. {
  937. struct sdhci_host *host = mmc_priv(mmc);
  938. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  939. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  940. int ret = 0;
  941. if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
  942. ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
  943. if (ret < 0)
  944. return ret;
  945. ret = sdhci_start_signal_voltage_switch(mmc, ios);
  946. } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
  947. ret = sdhci_start_signal_voltage_switch(mmc, ios);
  948. if (ret < 0)
  949. return ret;
  950. ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
  951. }
  952. if (tegra_host->pad_calib_required)
  953. tegra_sdhci_pad_autocalib(host);
  954. return ret;
  955. }
  956. static int tegra_sdhci_init_pinctrl_info(struct device *dev,
  957. struct sdhci_tegra *tegra_host)
  958. {
  959. tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
  960. if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
  961. dev_dbg(dev, "No pinctrl info, err: %ld\n",
  962. PTR_ERR(tegra_host->pinctrl_sdmmc));
  963. return -1;
  964. }
  965. tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
  966. tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
  967. if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
  968. if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
  969. tegra_host->pinctrl_state_1v8_drv = NULL;
  970. }
  971. tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
  972. tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
  973. if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
  974. if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
  975. tegra_host->pinctrl_state_3v3_drv = NULL;
  976. }
  977. tegra_host->pinctrl_state_3v3 =
  978. pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
  979. if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
  980. dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
  981. PTR_ERR(tegra_host->pinctrl_state_3v3));
  982. return -1;
  983. }
  984. tegra_host->pinctrl_state_1v8 =
  985. pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
  986. if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
  987. dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
  988. PTR_ERR(tegra_host->pinctrl_state_1v8));
  989. return -1;
  990. }
  991. tegra_host->pad_control_available = true;
  992. return 0;
  993. }
  994. static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
  995. {
  996. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  997. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  998. const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
  999. if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
  1000. tegra_host->pad_calib_required = true;
  1001. }
  1002. static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
  1003. {
  1004. struct mmc_host *mmc = cq_host->mmc;
  1005. struct sdhci_host *host = mmc_priv(mmc);
  1006. u8 ctrl;
  1007. ktime_t timeout;
  1008. bool timed_out;
  1009. /*
  1010. * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
  1011. * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
  1012. * to be re-configured.
  1013. * Tegra CQHCI/SDHCI prevents write access to block size register when
  1014. * CQE is unhalted. So handling CQE resume sequence here to configure
  1015. * SDHCI block registers prior to exiting CQE halt state.
  1016. */
  1017. if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
  1018. cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
  1019. sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
  1020. sdhci_cqe_enable(mmc);
  1021. writel(val, cq_host->mmio + reg);
  1022. timeout = ktime_add_us(ktime_get(), 50);
  1023. while (1) {
  1024. timed_out = ktime_compare(ktime_get(), timeout) > 0;
  1025. ctrl = cqhci_readl(cq_host, CQHCI_CTL);
  1026. if (!(ctrl & CQHCI_HALT) || timed_out)
  1027. break;
  1028. }
  1029. /*
  1030. * CQE usually resumes very quick, but incase if Tegra CQE
  1031. * doesn't resume retry unhalt.
  1032. */
  1033. if (timed_out)
  1034. writel(val, cq_host->mmio + reg);
  1035. } else {
  1036. writel(val, cq_host->mmio + reg);
  1037. }
  1038. }
  1039. static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
  1040. struct mmc_request *mrq, u64 *data)
  1041. {
  1042. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
  1043. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  1044. const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
  1045. if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
  1046. mrq->cmd->flags & MMC_RSP_R1B)
  1047. *data |= CQHCI_CMD_TIMING(1);
  1048. }
  1049. static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
  1050. {
  1051. struct cqhci_host *cq_host = mmc->cqe_private;
  1052. struct sdhci_host *host = mmc_priv(mmc);
  1053. u32 val;
  1054. /*
  1055. * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
  1056. * register when CQE is enabled and unhalted.
  1057. * CQHCI driver enables CQE prior to activation, so disable CQE before
  1058. * programming block size in sdhci controller and enable it back.
  1059. */
  1060. if (!cq_host->activated) {
  1061. val = cqhci_readl(cq_host, CQHCI_CFG);
  1062. if (val & CQHCI_ENABLE)
  1063. cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
  1064. CQHCI_CFG);
  1065. sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
  1066. sdhci_cqe_enable(mmc);
  1067. if (val & CQHCI_ENABLE)
  1068. cqhci_writel(cq_host, val, CQHCI_CFG);
  1069. }
  1070. /*
  1071. * CMD CRC errors are seen sometimes with some eMMC devices when status
  1072. * command is sent during transfer of last data block which is the
  1073. * default case as send status command block counter (CBC) is 1.
  1074. * Recommended fix to set CBC to 0 allowing send status command only
  1075. * when data lines are idle.
  1076. */
  1077. val = cqhci_readl(cq_host, CQHCI_SSC1);
  1078. val &= ~CQHCI_SSC1_CBC_MASK;
  1079. cqhci_writel(cq_host, val, CQHCI_SSC1);
  1080. }
  1081. static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
  1082. {
  1083. sdhci_dumpregs(mmc_priv(mmc));
  1084. }
  1085. static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
  1086. {
  1087. int cmd_error = 0;
  1088. int data_error = 0;
  1089. if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
  1090. return intmask;
  1091. cqhci_irq(host->mmc, intmask, cmd_error, data_error);
  1092. return 0;
  1093. }
  1094. static void tegra_sdhci_set_timeout(struct sdhci_host *host,
  1095. struct mmc_command *cmd)
  1096. {
  1097. u32 val;
  1098. /*
  1099. * HW busy detection timeout is based on programmed data timeout
  1100. * counter and maximum supported timeout is 11s which may not be
  1101. * enough for long operations like cache flush, sleep awake, erase.
  1102. *
  1103. * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
  1104. * host controller to wait for busy state until the card is busy
  1105. * without HW timeout.
  1106. *
  1107. * So, use infinite busy wait mode for operations that may take
  1108. * more than maximum HW busy timeout of 11s otherwise use finite
  1109. * busy wait mode.
  1110. */
  1111. val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
  1112. if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC)
  1113. val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
  1114. else
  1115. val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
  1116. sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
  1117. __sdhci_set_timeout(host, cmd);
  1118. }
  1119. static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
  1120. {
  1121. struct cqhci_host *cq_host = mmc->cqe_private;
  1122. u32 reg;
  1123. reg = cqhci_readl(cq_host, CQHCI_CFG);
  1124. reg |= CQHCI_ENABLE;
  1125. cqhci_writel(cq_host, reg, CQHCI_CFG);
  1126. }
  1127. static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
  1128. {
  1129. struct cqhci_host *cq_host = mmc->cqe_private;
  1130. struct sdhci_host *host = mmc_priv(mmc);
  1131. u32 reg;
  1132. reg = cqhci_readl(cq_host, CQHCI_CFG);
  1133. reg &= ~CQHCI_ENABLE;
  1134. cqhci_writel(cq_host, reg, CQHCI_CFG);
  1135. sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
  1136. }
  1137. static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
  1138. .write_l = tegra_cqhci_writel,
  1139. .enable = sdhci_tegra_cqe_enable,
  1140. .disable = sdhci_cqe_disable,
  1141. .dumpregs = sdhci_tegra_dumpregs,
  1142. .update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
  1143. .pre_enable = sdhci_tegra_cqe_pre_enable,
  1144. .post_disable = sdhci_tegra_cqe_post_disable,
  1145. };
  1146. static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
  1147. {
  1148. struct sdhci_pltfm_host *platform = sdhci_priv(host);
  1149. struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
  1150. const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
  1151. struct device *dev = mmc_dev(host->mmc);
  1152. if (soc->dma_mask)
  1153. return dma_set_mask_and_coherent(dev, soc->dma_mask);
  1154. return 0;
  1155. }
  1156. static const struct sdhci_ops tegra_sdhci_ops = {
  1157. .get_ro = tegra_sdhci_get_ro,
  1158. .read_w = tegra_sdhci_readw,
  1159. .write_l = tegra_sdhci_writel,
  1160. .set_clock = tegra_sdhci_set_clock,
  1161. .set_dma_mask = tegra_sdhci_set_dma_mask,
  1162. .set_bus_width = sdhci_set_bus_width,
  1163. .reset = tegra_sdhci_reset,
  1164. .platform_execute_tuning = tegra_sdhci_execute_tuning,
  1165. .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
  1166. .voltage_switch = tegra_sdhci_voltage_switch,
  1167. .get_max_clock = tegra_sdhci_get_max_clock,
  1168. };
  1169. static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
  1170. .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
  1171. SDHCI_QUIRK_SINGLE_POWER_WRITE |
  1172. SDHCI_QUIRK_NO_HISPD_BIT |
  1173. SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
  1174. SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
  1175. .ops = &tegra_sdhci_ops,
  1176. };
  1177. static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
  1178. .pdata = &sdhci_tegra20_pdata,
  1179. .dma_mask = DMA_BIT_MASK(32),
  1180. .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
  1181. NVQUIRK_HAS_ANDROID_GPT_SECTOR |
  1182. NVQUIRK_ENABLE_BLOCK_GAP_DET,
  1183. };
  1184. static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
  1185. .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
  1186. SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
  1187. SDHCI_QUIRK_SINGLE_POWER_WRITE |
  1188. SDHCI_QUIRK_NO_HISPD_BIT |
  1189. SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
  1190. SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
  1191. .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
  1192. SDHCI_QUIRK2_BROKEN_HS200 |
  1193. /*
  1194. * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
  1195. * though no command operation was in progress."
  1196. *
  1197. * The exact reason is unknown, as the same hardware seems
  1198. * to support Auto CMD23 on a downstream 3.1 kernel.
  1199. */
  1200. SDHCI_QUIRK2_ACMD23_BROKEN,
  1201. .ops = &tegra_sdhci_ops,
  1202. };
  1203. static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
  1204. .pdata = &sdhci_tegra30_pdata,
  1205. .dma_mask = DMA_BIT_MASK(32),
  1206. .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
  1207. NVQUIRK_ENABLE_SDR50 |
  1208. NVQUIRK_ENABLE_SDR104 |
  1209. NVQUIRK_HAS_ANDROID_GPT_SECTOR |
  1210. NVQUIRK_HAS_PADCALIB,
  1211. };
  1212. static const struct sdhci_ops tegra114_sdhci_ops = {
  1213. .get_ro = tegra_sdhci_get_ro,
  1214. .read_w = tegra_sdhci_readw,
  1215. .write_w = tegra_sdhci_writew,
  1216. .write_l = tegra_sdhci_writel,
  1217. .set_clock = tegra_sdhci_set_clock,
  1218. .set_dma_mask = tegra_sdhci_set_dma_mask,
  1219. .set_bus_width = sdhci_set_bus_width,
  1220. .reset = tegra_sdhci_reset,
  1221. .platform_execute_tuning = tegra_sdhci_execute_tuning,
  1222. .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
  1223. .voltage_switch = tegra_sdhci_voltage_switch,
  1224. .get_max_clock = tegra_sdhci_get_max_clock,
  1225. };
  1226. static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
  1227. .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
  1228. SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
  1229. SDHCI_QUIRK_SINGLE_POWER_WRITE |
  1230. SDHCI_QUIRK_NO_HISPD_BIT |
  1231. SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
  1232. SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
  1233. .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
  1234. .ops = &tegra114_sdhci_ops,
  1235. };
  1236. static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
  1237. .pdata = &sdhci_tegra114_pdata,
  1238. .dma_mask = DMA_BIT_MASK(32),
  1239. .nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
  1240. };
  1241. static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
  1242. .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
  1243. SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
  1244. SDHCI_QUIRK_SINGLE_POWER_WRITE |
  1245. SDHCI_QUIRK_NO_HISPD_BIT |
  1246. SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
  1247. SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
  1248. .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
  1249. .ops = &tegra114_sdhci_ops,
  1250. };
  1251. static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
  1252. .pdata = &sdhci_tegra124_pdata,
  1253. .dma_mask = DMA_BIT_MASK(34),
  1254. .nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
  1255. };
  1256. static const struct sdhci_ops tegra210_sdhci_ops = {
  1257. .get_ro = tegra_sdhci_get_ro,
  1258. .read_w = tegra_sdhci_readw,
  1259. .write_w = tegra210_sdhci_writew,
  1260. .write_l = tegra_sdhci_writel,
  1261. .set_clock = tegra_sdhci_set_clock,
  1262. .set_dma_mask = tegra_sdhci_set_dma_mask,
  1263. .set_bus_width = sdhci_set_bus_width,
  1264. .reset = tegra_sdhci_reset,
  1265. .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
  1266. .voltage_switch = tegra_sdhci_voltage_switch,
  1267. .get_max_clock = tegra_sdhci_get_max_clock,
  1268. .set_timeout = tegra_sdhci_set_timeout,
  1269. };
  1270. static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
  1271. .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
  1272. SDHCI_QUIRK_SINGLE_POWER_WRITE |
  1273. SDHCI_QUIRK_NO_HISPD_BIT |
  1274. SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
  1275. SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
  1276. .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
  1277. .ops = &tegra210_sdhci_ops,
  1278. };
  1279. static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
  1280. .pdata = &sdhci_tegra210_pdata,
  1281. .dma_mask = DMA_BIT_MASK(34),
  1282. .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
  1283. NVQUIRK_HAS_PADCALIB |
  1284. NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
  1285. NVQUIRK_ENABLE_SDR50 |
  1286. NVQUIRK_ENABLE_SDR104 |
  1287. NVQUIRK_HAS_TMCLK,
  1288. .min_tap_delay = 106,
  1289. .max_tap_delay = 185,
  1290. };
  1291. static const struct sdhci_ops tegra186_sdhci_ops = {
  1292. .get_ro = tegra_sdhci_get_ro,
  1293. .read_w = tegra_sdhci_readw,
  1294. .write_l = tegra_sdhci_writel,
  1295. .set_clock = tegra_sdhci_set_clock,
  1296. .set_dma_mask = tegra_sdhci_set_dma_mask,
  1297. .set_bus_width = sdhci_set_bus_width,
  1298. .reset = tegra_sdhci_reset,
  1299. .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
  1300. .voltage_switch = tegra_sdhci_voltage_switch,
  1301. .get_max_clock = tegra_sdhci_get_max_clock,
  1302. .irq = sdhci_tegra_cqhci_irq,
  1303. .set_timeout = tegra_sdhci_set_timeout,
  1304. };
  1305. static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
  1306. .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
  1307. SDHCI_QUIRK_SINGLE_POWER_WRITE |
  1308. SDHCI_QUIRK_NO_HISPD_BIT |
  1309. SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
  1310. SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
  1311. .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
  1312. SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER,
  1313. .ops = &tegra186_sdhci_ops,
  1314. };
  1315. static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
  1316. .pdata = &sdhci_tegra186_pdata,
  1317. .dma_mask = DMA_BIT_MASK(40),
  1318. .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
  1319. NVQUIRK_HAS_PADCALIB |
  1320. NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
  1321. NVQUIRK_ENABLE_SDR50 |
  1322. NVQUIRK_ENABLE_SDR104 |
  1323. NVQUIRK_HAS_TMCLK |
  1324. NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
  1325. .min_tap_delay = 84,
  1326. .max_tap_delay = 136,
  1327. };
  1328. static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
  1329. .pdata = &sdhci_tegra186_pdata,
  1330. .dma_mask = DMA_BIT_MASK(39),
  1331. .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
  1332. NVQUIRK_HAS_PADCALIB |
  1333. NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
  1334. NVQUIRK_ENABLE_SDR50 |
  1335. NVQUIRK_ENABLE_SDR104 |
  1336. NVQUIRK_HAS_TMCLK,
  1337. .min_tap_delay = 96,
  1338. .max_tap_delay = 139,
  1339. };
  1340. static const struct of_device_id sdhci_tegra_dt_match[] = {
  1341. { .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
  1342. { .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
  1343. { .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
  1344. { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
  1345. { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
  1346. { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
  1347. { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
  1348. {}
  1349. };
  1350. MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
  1351. static int sdhci_tegra_add_host(struct sdhci_host *host)
  1352. {
  1353. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1354. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  1355. struct cqhci_host *cq_host;
  1356. bool dma64;
  1357. int ret;
  1358. if (!tegra_host->enable_hwcq)
  1359. return sdhci_add_host(host);
  1360. sdhci_enable_v4_mode(host);
  1361. ret = sdhci_setup_host(host);
  1362. if (ret)
  1363. return ret;
  1364. host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
  1365. cq_host = devm_kzalloc(mmc_dev(host->mmc),
  1366. sizeof(*cq_host), GFP_KERNEL);
  1367. if (!cq_host) {
  1368. ret = -ENOMEM;
  1369. goto cleanup;
  1370. }
  1371. cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
  1372. cq_host->ops = &sdhci_tegra_cqhci_ops;
  1373. dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
  1374. if (dma64)
  1375. cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
  1376. ret = cqhci_init(cq_host, host->mmc, dma64);
  1377. if (ret)
  1378. goto cleanup;
  1379. ret = __sdhci_add_host(host);
  1380. if (ret)
  1381. goto cleanup;
  1382. return 0;
  1383. cleanup:
  1384. sdhci_cleanup_host(host);
  1385. return ret;
  1386. }
  1387. static int sdhci_tegra_probe(struct platform_device *pdev)
  1388. {
  1389. const struct sdhci_tegra_soc_data *soc_data;
  1390. struct sdhci_host *host;
  1391. struct sdhci_pltfm_host *pltfm_host;
  1392. struct sdhci_tegra *tegra_host;
  1393. struct clk *clk;
  1394. int rc;
  1395. soc_data = of_device_get_match_data(&pdev->dev);
  1396. if (!soc_data)
  1397. return -EINVAL;
  1398. host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
  1399. if (IS_ERR(host))
  1400. return PTR_ERR(host);
  1401. pltfm_host = sdhci_priv(host);
  1402. tegra_host = sdhci_pltfm_priv(pltfm_host);
  1403. tegra_host->ddr_signaling = false;
  1404. tegra_host->pad_calib_required = false;
  1405. tegra_host->pad_control_available = false;
  1406. tegra_host->soc_data = soc_data;
  1407. if (soc_data->nvquirks & NVQUIRK_HAS_ANDROID_GPT_SECTOR)
  1408. host->mmc->caps2 |= MMC_CAP2_ALT_GPT_TEGRA;
  1409. if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
  1410. rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
  1411. if (rc == 0)
  1412. host->mmc_host_ops.start_signal_voltage_switch =
  1413. sdhci_tegra_start_signal_voltage_switch;
  1414. }
  1415. /* Hook to periodically rerun pad calibration */
  1416. if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
  1417. host->mmc_host_ops.request = tegra_sdhci_request;
  1418. host->mmc_host_ops.hs400_enhanced_strobe =
  1419. tegra_sdhci_hs400_enhanced_strobe;
  1420. if (!host->ops->platform_execute_tuning)
  1421. host->mmc_host_ops.execute_tuning =
  1422. tegra_sdhci_execute_hw_tuning;
  1423. rc = mmc_of_parse(host->mmc);
  1424. if (rc)
  1425. goto err_parse_dt;
  1426. if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
  1427. host->mmc->caps |= MMC_CAP_1_8V_DDR;
  1428. /* HW busy detection is supported, but R1B responses are required. */
  1429. host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
  1430. /* GPIO CD can be set as a wakeup source */
  1431. host->mmc->caps |= MMC_CAP_CD_WAKE;
  1432. tegra_sdhci_parse_dt(host);
  1433. tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
  1434. GPIOD_OUT_HIGH);
  1435. if (IS_ERR(tegra_host->power_gpio)) {
  1436. rc = PTR_ERR(tegra_host->power_gpio);
  1437. goto err_power_req;
  1438. }
  1439. /*
  1440. * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
  1441. * timeout clock and SW can choose TMCLK or SDCLK for hardware
  1442. * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
  1443. * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
  1444. *
  1445. * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
  1446. * 12Mhz TMCLK which is advertised in host capability register.
  1447. * With TMCLK of 12Mhz provides maximum data timeout period that can
  1448. * be achieved is 11s better than using SDCLK for data timeout.
  1449. *
  1450. * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
  1451. * supporting separate TMCLK.
  1452. */
  1453. if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
  1454. clk = devm_clk_get(&pdev->dev, "tmclk");
  1455. if (IS_ERR(clk)) {
  1456. rc = PTR_ERR(clk);
  1457. if (rc == -EPROBE_DEFER)
  1458. goto err_power_req;
  1459. dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
  1460. clk = NULL;
  1461. }
  1462. clk_set_rate(clk, 12000000);
  1463. rc = clk_prepare_enable(clk);
  1464. if (rc) {
  1465. dev_err(&pdev->dev,
  1466. "failed to enable tmclk: %d\n", rc);
  1467. goto err_power_req;
  1468. }
  1469. tegra_host->tmclk = clk;
  1470. }
  1471. clk = devm_clk_get(mmc_dev(host->mmc), NULL);
  1472. if (IS_ERR(clk)) {
  1473. rc = dev_err_probe(&pdev->dev, PTR_ERR(clk),
  1474. "failed to get clock\n");
  1475. goto err_clk_get;
  1476. }
  1477. pltfm_host->clk = clk;
  1478. tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
  1479. "sdhci");
  1480. if (IS_ERR(tegra_host->rst)) {
  1481. rc = PTR_ERR(tegra_host->rst);
  1482. dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
  1483. goto err_rst_get;
  1484. }
  1485. rc = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
  1486. if (rc)
  1487. goto err_rst_get;
  1488. pm_runtime_enable(&pdev->dev);
  1489. rc = pm_runtime_resume_and_get(&pdev->dev);
  1490. if (rc)
  1491. goto err_pm_get;
  1492. rc = reset_control_assert(tegra_host->rst);
  1493. if (rc)
  1494. goto err_rst_assert;
  1495. usleep_range(2000, 4000);
  1496. rc = reset_control_deassert(tegra_host->rst);
  1497. if (rc)
  1498. goto err_rst_assert;
  1499. usleep_range(2000, 4000);
  1500. rc = sdhci_tegra_add_host(host);
  1501. if (rc)
  1502. goto err_add_host;
  1503. return 0;
  1504. err_add_host:
  1505. reset_control_assert(tegra_host->rst);
  1506. err_rst_assert:
  1507. pm_runtime_put_sync_suspend(&pdev->dev);
  1508. err_pm_get:
  1509. pm_runtime_disable(&pdev->dev);
  1510. err_rst_get:
  1511. err_clk_get:
  1512. clk_disable_unprepare(tegra_host->tmclk);
  1513. err_power_req:
  1514. err_parse_dt:
  1515. sdhci_pltfm_free(pdev);
  1516. return rc;
  1517. }
  1518. static int sdhci_tegra_remove(struct platform_device *pdev)
  1519. {
  1520. struct sdhci_host *host = platform_get_drvdata(pdev);
  1521. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1522. struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
  1523. sdhci_remove_host(host, 0);
  1524. reset_control_assert(tegra_host->rst);
  1525. usleep_range(2000, 4000);
  1526. pm_runtime_put_sync_suspend(&pdev->dev);
  1527. pm_runtime_force_suspend(&pdev->dev);
  1528. clk_disable_unprepare(tegra_host->tmclk);
  1529. sdhci_pltfm_free(pdev);
  1530. return 0;
  1531. }
  1532. static int __maybe_unused sdhci_tegra_runtime_suspend(struct device *dev)
  1533. {
  1534. struct sdhci_host *host = dev_get_drvdata(dev);
  1535. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1536. clk_disable_unprepare(pltfm_host->clk);
  1537. return 0;
  1538. }
  1539. static int __maybe_unused sdhci_tegra_runtime_resume(struct device *dev)
  1540. {
  1541. struct sdhci_host *host = dev_get_drvdata(dev);
  1542. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  1543. return clk_prepare_enable(pltfm_host->clk);
  1544. }
  1545. #ifdef CONFIG_PM_SLEEP
  1546. static int sdhci_tegra_suspend(struct device *dev)
  1547. {
  1548. struct sdhci_host *host = dev_get_drvdata(dev);
  1549. int ret;
  1550. if (host->mmc->caps2 & MMC_CAP2_CQE) {
  1551. ret = cqhci_suspend(host->mmc);
  1552. if (ret)
  1553. return ret;
  1554. }
  1555. ret = sdhci_suspend_host(host);
  1556. if (ret) {
  1557. cqhci_resume(host->mmc);
  1558. return ret;
  1559. }
  1560. ret = pm_runtime_force_suspend(dev);
  1561. if (ret) {
  1562. sdhci_resume_host(host);
  1563. cqhci_resume(host->mmc);
  1564. return ret;
  1565. }
  1566. return mmc_gpio_set_cd_wake(host->mmc, true);
  1567. }
  1568. static int sdhci_tegra_resume(struct device *dev)
  1569. {
  1570. struct sdhci_host *host = dev_get_drvdata(dev);
  1571. int ret;
  1572. ret = mmc_gpio_set_cd_wake(host->mmc, false);
  1573. if (ret)
  1574. return ret;
  1575. ret = pm_runtime_force_resume(dev);
  1576. if (ret)
  1577. return ret;
  1578. ret = sdhci_resume_host(host);
  1579. if (ret)
  1580. goto disable_clk;
  1581. if (host->mmc->caps2 & MMC_CAP2_CQE) {
  1582. ret = cqhci_resume(host->mmc);
  1583. if (ret)
  1584. goto suspend_host;
  1585. }
  1586. return 0;
  1587. suspend_host:
  1588. sdhci_suspend_host(host);
  1589. disable_clk:
  1590. pm_runtime_force_suspend(dev);
  1591. return ret;
  1592. }
  1593. #endif
  1594. static const struct dev_pm_ops sdhci_tegra_dev_pm_ops = {
  1595. SET_RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend, sdhci_tegra_runtime_resume,
  1596. NULL)
  1597. SET_SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend, sdhci_tegra_resume)
  1598. };
  1599. static struct platform_driver sdhci_tegra_driver = {
  1600. .driver = {
  1601. .name = "sdhci-tegra",
  1602. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  1603. .of_match_table = sdhci_tegra_dt_match,
  1604. .pm = &sdhci_tegra_dev_pm_ops,
  1605. },
  1606. .probe = sdhci_tegra_probe,
  1607. .remove = sdhci_tegra_remove,
  1608. };
  1609. module_platform_driver(sdhci_tegra_driver);
  1610. MODULE_DESCRIPTION("SDHCI driver for Tegra");
  1611. MODULE_AUTHOR("Google, Inc.");
  1612. MODULE_LICENSE("GPL v2");