ufs-mediatek.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2019 MediaTek Inc.
  4. * Authors:
  5. * Stanley Chu <[email protected]>
  6. * Peter Wang <[email protected]>
  7. */
  8. #include <linux/arm-smccc.h>
  9. #include <linux/bitfield.h>
  10. #include <linux/clk.h>
  11. #include <linux/delay.h>
  12. #include <linux/module.h>
  13. #include <linux/of.h>
  14. #include <linux/of_address.h>
  15. #include <linux/of_device.h>
  16. #include <linux/phy/phy.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/pm_qos.h>
  19. #include <linux/regulator/consumer.h>
  20. #include <linux/reset.h>
  21. #include <linux/soc/mediatek/mtk_sip_svc.h>
  22. #include <ufs/ufshcd.h>
  23. #include "ufshcd-pltfrm.h"
  24. #include <ufs/ufs_quirks.h>
  25. #include <ufs/unipro.h>
  26. #include "ufs-mediatek.h"
  27. static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
  28. #define CREATE_TRACE_POINTS
  29. #include "ufs-mediatek-trace.h"
  30. #undef CREATE_TRACE_POINTS
  31. #define MAX_SUPP_MAC 64
  32. #define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
  33. static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
  34. { .wmanufacturerid = UFS_ANY_VENDOR,
  35. .model = UFS_ANY_MODEL,
  36. .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
  37. UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
  38. { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
  39. .model = "H9HQ21AFAMZDAR",
  40. .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
  41. {}
  42. };
  43. static const struct of_device_id ufs_mtk_of_match[] = {
  44. { .compatible = "mediatek,mt8183-ufshci" },
  45. {},
  46. };
  47. /*
  48. * Details of UIC Errors
  49. */
  50. static const char *const ufs_uic_err_str[] = {
  51. "PHY Adapter Layer",
  52. "Data Link Layer",
  53. "Network Link Layer",
  54. "Transport Link Layer",
  55. "DME"
  56. };
  57. static const char *const ufs_uic_pa_err_str[] = {
  58. "PHY error on Lane 0",
  59. "PHY error on Lane 1",
  60. "PHY error on Lane 2",
  61. "PHY error on Lane 3",
  62. "Generic PHY Adapter Error. This should be the LINERESET indication"
  63. };
  64. static const char *const ufs_uic_dl_err_str[] = {
  65. "NAC_RECEIVED",
  66. "TCx_REPLAY_TIMER_EXPIRED",
  67. "AFCx_REQUEST_TIMER_EXPIRED",
  68. "FCx_PROTECTION_TIMER_EXPIRED",
  69. "CRC_ERROR",
  70. "RX_BUFFER_OVERFLOW",
  71. "MAX_FRAME_LENGTH_EXCEEDED",
  72. "WRONG_SEQUENCE_NUMBER",
  73. "AFC_FRAME_SYNTAX_ERROR",
  74. "NAC_FRAME_SYNTAX_ERROR",
  75. "EOF_SYNTAX_ERROR",
  76. "FRAME_SYNTAX_ERROR",
  77. "BAD_CTRL_SYMBOL_TYPE",
  78. "PA_INIT_ERROR",
  79. "PA_ERROR_IND_RECEIVED",
  80. "PA_INIT"
  81. };
  82. static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
  83. {
  84. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  85. return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
  86. }
  87. static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
  88. {
  89. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  90. return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
  91. }
  92. static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
  93. {
  94. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  95. return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
  96. }
  97. static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
  98. {
  99. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  100. return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
  101. }
  102. static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
  103. {
  104. u32 tmp;
  105. if (enable) {
  106. ufshcd_dme_get(hba,
  107. UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
  108. tmp = tmp |
  109. (1 << RX_SYMBOL_CLK_GATE_EN) |
  110. (1 << SYS_CLK_GATE_EN) |
  111. (1 << TX_CLK_GATE_EN);
  112. ufshcd_dme_set(hba,
  113. UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
  114. ufshcd_dme_get(hba,
  115. UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
  116. tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
  117. ufshcd_dme_set(hba,
  118. UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
  119. } else {
  120. ufshcd_dme_get(hba,
  121. UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
  122. tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
  123. (1 << SYS_CLK_GATE_EN) |
  124. (1 << TX_CLK_GATE_EN));
  125. ufshcd_dme_set(hba,
  126. UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
  127. ufshcd_dme_get(hba,
  128. UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
  129. tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
  130. ufshcd_dme_set(hba,
  131. UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
  132. }
  133. }
  134. static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
  135. {
  136. struct arm_smccc_res res;
  137. ufs_mtk_crypto_ctrl(res, 1);
  138. if (res.a0) {
  139. dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
  140. __func__, res.a0);
  141. hba->caps &= ~UFSHCD_CAP_CRYPTO;
  142. }
  143. }
  144. static void ufs_mtk_host_reset(struct ufs_hba *hba)
  145. {
  146. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  147. reset_control_assert(host->hci_reset);
  148. reset_control_assert(host->crypto_reset);
  149. reset_control_assert(host->unipro_reset);
  150. usleep_range(100, 110);
  151. reset_control_deassert(host->unipro_reset);
  152. reset_control_deassert(host->crypto_reset);
  153. reset_control_deassert(host->hci_reset);
  154. }
  155. static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
  156. struct reset_control **rc,
  157. char *str)
  158. {
  159. *rc = devm_reset_control_get(hba->dev, str);
  160. if (IS_ERR(*rc)) {
  161. dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
  162. str, PTR_ERR(*rc));
  163. *rc = NULL;
  164. }
  165. }
  166. static void ufs_mtk_init_reset(struct ufs_hba *hba)
  167. {
  168. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  169. ufs_mtk_init_reset_control(hba, &host->hci_reset,
  170. "hci_rst");
  171. ufs_mtk_init_reset_control(hba, &host->unipro_reset,
  172. "unipro_rst");
  173. ufs_mtk_init_reset_control(hba, &host->crypto_reset,
  174. "crypto_rst");
  175. }
  176. static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
  177. enum ufs_notify_change_status status)
  178. {
  179. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  180. if (status == PRE_CHANGE) {
  181. if (host->unipro_lpm) {
  182. hba->vps->hba_enable_delay_us = 0;
  183. } else {
  184. hba->vps->hba_enable_delay_us = 600;
  185. ufs_mtk_host_reset(hba);
  186. }
  187. if (hba->caps & UFSHCD_CAP_CRYPTO)
  188. ufs_mtk_crypto_enable(hba);
  189. if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
  190. ufshcd_writel(hba, 0,
  191. REG_AUTO_HIBERNATE_IDLE_TIMER);
  192. hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
  193. hba->ahit = 0;
  194. }
  195. /*
  196. * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
  197. * to prevent host hang issue
  198. */
  199. ufshcd_writel(hba,
  200. ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
  201. REG_UFS_XOUFS_CTRL);
  202. }
  203. return 0;
  204. }
  205. static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
  206. {
  207. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  208. struct device *dev = hba->dev;
  209. struct device_node *np = dev->of_node;
  210. int err = 0;
  211. host->mphy = devm_of_phy_get_by_index(dev, np, 0);
  212. if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
  213. /*
  214. * UFS driver might be probed before the phy driver does.
  215. * In that case we would like to return EPROBE_DEFER code.
  216. */
  217. err = -EPROBE_DEFER;
  218. dev_info(dev,
  219. "%s: required phy hasn't probed yet. err = %d\n",
  220. __func__, err);
  221. } else if (IS_ERR(host->mphy)) {
  222. err = PTR_ERR(host->mphy);
  223. if (err != -ENODEV) {
  224. dev_info(dev, "%s: PHY get failed %d\n", __func__,
  225. err);
  226. }
  227. }
  228. if (err)
  229. host->mphy = NULL;
  230. /*
  231. * Allow unbound mphy because not every platform needs specific
  232. * mphy control.
  233. */
  234. if (err == -ENODEV)
  235. err = 0;
  236. return err;
  237. }
  238. static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
  239. {
  240. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  241. struct arm_smccc_res res;
  242. ktime_t timeout, time_checked;
  243. u32 value;
  244. if (host->ref_clk_enabled == on)
  245. return 0;
  246. ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
  247. if (on) {
  248. ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
  249. } else {
  250. ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
  251. ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
  252. }
  253. /* Wait for ack */
  254. timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
  255. do {
  256. time_checked = ktime_get();
  257. value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
  258. /* Wait until ack bit equals to req bit */
  259. if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
  260. goto out;
  261. usleep_range(100, 200);
  262. } while (ktime_before(time_checked, timeout));
  263. dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
  264. ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
  265. return -ETIMEDOUT;
  266. out:
  267. host->ref_clk_enabled = on;
  268. if (on)
  269. ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
  270. ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
  271. return 0;
  272. }
  273. static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
  274. u16 gating_us)
  275. {
  276. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  277. if (hba->dev_info.clk_gating_wait_us) {
  278. host->ref_clk_gating_wait_us =
  279. hba->dev_info.clk_gating_wait_us;
  280. } else {
  281. host->ref_clk_gating_wait_us = gating_us;
  282. }
  283. host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
  284. }
  285. static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
  286. {
  287. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  288. if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
  289. ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
  290. ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
  291. ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
  292. ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
  293. ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
  294. } else {
  295. ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
  296. }
  297. }
  298. static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
  299. unsigned long retry_ms)
  300. {
  301. u64 timeout, time_checked;
  302. u32 val, sm;
  303. bool wait_idle;
  304. /* cannot use plain ktime_get() in suspend */
  305. timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
  306. /* wait a specific time after check base */
  307. udelay(10);
  308. wait_idle = false;
  309. do {
  310. time_checked = ktime_get_mono_fast_ns();
  311. ufs_mtk_dbg_sel(hba);
  312. val = ufshcd_readl(hba, REG_UFS_PROBE);
  313. sm = val & 0x1f;
  314. /*
  315. * if state is in H8 enter and H8 enter confirm
  316. * wait until return to idle state.
  317. */
  318. if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
  319. wait_idle = true;
  320. udelay(50);
  321. continue;
  322. } else if (!wait_idle)
  323. break;
  324. if (wait_idle && (sm == VS_HCE_BASE))
  325. break;
  326. } while (time_checked < timeout);
  327. if (wait_idle && sm != VS_HCE_BASE)
  328. dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
  329. }
  330. static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
  331. unsigned long max_wait_ms)
  332. {
  333. ktime_t timeout, time_checked;
  334. u32 val;
  335. timeout = ktime_add_ms(ktime_get(), max_wait_ms);
  336. do {
  337. time_checked = ktime_get();
  338. ufs_mtk_dbg_sel(hba);
  339. val = ufshcd_readl(hba, REG_UFS_PROBE);
  340. val = val >> 28;
  341. if (val == state)
  342. return 0;
  343. /* Sleep for max. 200us */
  344. usleep_range(100, 200);
  345. } while (ktime_before(time_checked, timeout));
  346. if (val == state)
  347. return 0;
  348. return -ETIMEDOUT;
  349. }
  350. static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
  351. {
  352. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  353. struct phy *mphy = host->mphy;
  354. struct arm_smccc_res res;
  355. int ret = 0;
  356. if (!mphy || !(on ^ host->mphy_powered_on))
  357. return 0;
  358. if (on) {
  359. if (ufs_mtk_is_va09_supported(hba)) {
  360. ret = regulator_enable(host->reg_va09);
  361. if (ret < 0)
  362. goto out;
  363. /* wait 200 us to stablize VA09 */
  364. usleep_range(200, 210);
  365. ufs_mtk_va09_pwr_ctrl(res, 1);
  366. }
  367. phy_power_on(mphy);
  368. } else {
  369. phy_power_off(mphy);
  370. if (ufs_mtk_is_va09_supported(hba)) {
  371. ufs_mtk_va09_pwr_ctrl(res, 0);
  372. ret = regulator_disable(host->reg_va09);
  373. }
  374. }
  375. out:
  376. if (ret) {
  377. dev_info(hba->dev,
  378. "failed to %s va09: %d\n",
  379. on ? "enable" : "disable",
  380. ret);
  381. } else {
  382. host->mphy_powered_on = on;
  383. }
  384. return ret;
  385. }
  386. static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
  387. struct clk **clk_out)
  388. {
  389. struct clk *clk;
  390. int err = 0;
  391. clk = devm_clk_get(dev, name);
  392. if (IS_ERR(clk))
  393. err = PTR_ERR(clk);
  394. else
  395. *clk_out = clk;
  396. return err;
  397. }
  398. static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
  399. {
  400. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  401. struct ufs_mtk_crypt_cfg *cfg;
  402. struct regulator *reg;
  403. int volt, ret;
  404. if (!ufs_mtk_is_boost_crypt_enabled(hba))
  405. return;
  406. cfg = host->crypt;
  407. volt = cfg->vcore_volt;
  408. reg = cfg->reg_vcore;
  409. ret = clk_prepare_enable(cfg->clk_crypt_mux);
  410. if (ret) {
  411. dev_info(hba->dev, "clk_prepare_enable(): %d\n",
  412. ret);
  413. return;
  414. }
  415. if (boost) {
  416. ret = regulator_set_voltage(reg, volt, INT_MAX);
  417. if (ret) {
  418. dev_info(hba->dev,
  419. "failed to set vcore to %d\n", volt);
  420. goto out;
  421. }
  422. ret = clk_set_parent(cfg->clk_crypt_mux,
  423. cfg->clk_crypt_perf);
  424. if (ret) {
  425. dev_info(hba->dev,
  426. "failed to set clk_crypt_perf\n");
  427. regulator_set_voltage(reg, 0, INT_MAX);
  428. goto out;
  429. }
  430. } else {
  431. ret = clk_set_parent(cfg->clk_crypt_mux,
  432. cfg->clk_crypt_lp);
  433. if (ret) {
  434. dev_info(hba->dev,
  435. "failed to set clk_crypt_lp\n");
  436. goto out;
  437. }
  438. ret = regulator_set_voltage(reg, 0, INT_MAX);
  439. if (ret) {
  440. dev_info(hba->dev,
  441. "failed to set vcore to MIN\n");
  442. }
  443. }
  444. out:
  445. clk_disable_unprepare(cfg->clk_crypt_mux);
  446. }
  447. static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
  448. struct clk **clk)
  449. {
  450. int ret;
  451. ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
  452. if (ret) {
  453. dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
  454. name, ret);
  455. }
  456. return ret;
  457. }
  458. static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
  459. {
  460. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  461. struct ufs_mtk_crypt_cfg *cfg;
  462. struct device *dev = hba->dev;
  463. struct regulator *reg;
  464. u32 volt;
  465. host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
  466. GFP_KERNEL);
  467. if (!host->crypt)
  468. goto disable_caps;
  469. reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
  470. if (IS_ERR(reg)) {
  471. dev_info(dev, "failed to get dvfsrc-vcore: %ld",
  472. PTR_ERR(reg));
  473. goto disable_caps;
  474. }
  475. if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
  476. &volt)) {
  477. dev_info(dev, "failed to get boost-crypt-vcore-min");
  478. goto disable_caps;
  479. }
  480. cfg = host->crypt;
  481. if (ufs_mtk_init_host_clk(hba, "crypt_mux",
  482. &cfg->clk_crypt_mux))
  483. goto disable_caps;
  484. if (ufs_mtk_init_host_clk(hba, "crypt_lp",
  485. &cfg->clk_crypt_lp))
  486. goto disable_caps;
  487. if (ufs_mtk_init_host_clk(hba, "crypt_perf",
  488. &cfg->clk_crypt_perf))
  489. goto disable_caps;
  490. cfg->reg_vcore = reg;
  491. cfg->vcore_volt = volt;
  492. host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
  493. disable_caps:
  494. return;
  495. }
  496. static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
  497. {
  498. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  499. host->reg_va09 = regulator_get(hba->dev, "va09");
  500. if (IS_ERR(host->reg_va09))
  501. dev_info(hba->dev, "failed to get va09");
  502. else
  503. host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
  504. }
  505. static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
  506. {
  507. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  508. struct device_node *np = hba->dev->of_node;
  509. if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
  510. ufs_mtk_init_boost_crypt(hba);
  511. if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
  512. ufs_mtk_init_va09_pwr_ctrl(hba);
  513. if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
  514. host->caps |= UFS_MTK_CAP_DISABLE_AH8;
  515. if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
  516. host->caps |= UFS_MTK_CAP_BROKEN_VCC;
  517. if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
  518. host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
  519. dev_info(hba->dev, "caps: 0x%x", host->caps);
  520. }
  521. static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
  522. {
  523. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  524. if (!host || !host->pm_qos_init)
  525. return;
  526. cpu_latency_qos_update_request(&host->pm_qos_req,
  527. boost ? 0 : PM_QOS_DEFAULT_VALUE);
  528. }
  529. static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
  530. {
  531. ufs_mtk_boost_crypt(hba, scale_up);
  532. ufs_mtk_boost_pm_qos(hba, scale_up);
  533. }
  534. static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
  535. {
  536. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  537. if (on) {
  538. phy_power_on(host->mphy);
  539. ufs_mtk_setup_ref_clk(hba, on);
  540. if (!ufshcd_is_clkscaling_supported(hba))
  541. ufs_mtk_scale_perf(hba, on);
  542. } else {
  543. if (!ufshcd_is_clkscaling_supported(hba))
  544. ufs_mtk_scale_perf(hba, on);
  545. ufs_mtk_setup_ref_clk(hba, on);
  546. phy_power_off(host->mphy);
  547. }
  548. }
  549. /**
  550. * ufs_mtk_setup_clocks - enables/disable clocks
  551. * @hba: host controller instance
  552. * @on: If true, enable clocks else disable them.
  553. * @status: PRE_CHANGE or POST_CHANGE notify
  554. *
  555. * Returns 0 on success, non-zero on failure.
  556. */
  557. static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
  558. enum ufs_notify_change_status status)
  559. {
  560. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  561. bool clk_pwr_off = false;
  562. int ret = 0;
  563. /*
  564. * In case ufs_mtk_init() is not yet done, simply ignore.
  565. * This ufs_mtk_setup_clocks() shall be called from
  566. * ufs_mtk_init() after init is done.
  567. */
  568. if (!host)
  569. return 0;
  570. if (!on && status == PRE_CHANGE) {
  571. if (ufshcd_is_link_off(hba)) {
  572. clk_pwr_off = true;
  573. } else if (ufshcd_is_link_hibern8(hba) ||
  574. (!ufshcd_can_hibern8_during_gating(hba) &&
  575. ufshcd_is_auto_hibern8_enabled(hba))) {
  576. /*
  577. * Gate ref-clk and poweroff mphy if link state is in
  578. * OFF or Hibern8 by either Auto-Hibern8 or
  579. * ufshcd_link_state_transition().
  580. */
  581. ret = ufs_mtk_wait_link_state(hba,
  582. VS_LINK_HIBERN8,
  583. 15);
  584. if (!ret)
  585. clk_pwr_off = true;
  586. }
  587. if (clk_pwr_off)
  588. ufs_mtk_pwr_ctrl(hba, false);
  589. } else if (on && status == POST_CHANGE) {
  590. ufs_mtk_pwr_ctrl(hba, true);
  591. }
  592. return ret;
  593. }
  594. static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
  595. {
  596. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  597. int ret, ver = 0;
  598. if (host->hw_ver.major)
  599. return;
  600. /* Set default (minimum) version anyway */
  601. host->hw_ver.major = 2;
  602. ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
  603. if (!ret) {
  604. if (ver >= UFS_UNIPRO_VER_1_8) {
  605. host->hw_ver.major = 3;
  606. /*
  607. * Fix HCI version for some platforms with
  608. * incorrect version
  609. */
  610. if (hba->ufs_version < ufshci_version(3, 0))
  611. hba->ufs_version = ufshci_version(3, 0);
  612. }
  613. }
  614. }
  615. static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
  616. {
  617. return hba->ufs_version;
  618. }
  619. /**
  620. * ufs_mtk_init_clocks - Init mtk driver private clocks
  621. *
  622. * @hba: per adapter instance
  623. */
  624. static void ufs_mtk_init_clocks(struct ufs_hba *hba)
  625. {
  626. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  627. struct list_head *head = &hba->clk_list_head;
  628. struct ufs_mtk_clk *mclk = &host->mclk;
  629. struct ufs_clk_info *clki, *clki_tmp;
  630. /*
  631. * Find private clocks and store them in struct ufs_mtk_clk.
  632. * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
  633. * being switched on/off in clock gating.
  634. */
  635. list_for_each_entry_safe(clki, clki_tmp, head, list) {
  636. if (!strcmp(clki->name, "ufs_sel")) {
  637. host->mclk.ufs_sel_clki = clki;
  638. } else if (!strcmp(clki->name, "ufs_sel_max_src")) {
  639. host->mclk.ufs_sel_max_clki = clki;
  640. clk_disable_unprepare(clki->clk);
  641. list_del(&clki->list);
  642. } else if (!strcmp(clki->name, "ufs_sel_min_src")) {
  643. host->mclk.ufs_sel_min_clki = clki;
  644. clk_disable_unprepare(clki->clk);
  645. list_del(&clki->list);
  646. }
  647. }
  648. if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
  649. !mclk->ufs_sel_min_clki) {
  650. hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
  651. dev_info(hba->dev,
  652. "%s: Clk-scaling not ready. Feature disabled.",
  653. __func__);
  654. }
  655. }
  656. #define MAX_VCC_NAME 30
  657. static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
  658. {
  659. struct ufs_vreg_info *info = &hba->vreg_info;
  660. struct device_node *np = hba->dev->of_node;
  661. struct device *dev = hba->dev;
  662. char vcc_name[MAX_VCC_NAME];
  663. struct arm_smccc_res res;
  664. int err, ver;
  665. if (hba->vreg_info.vcc)
  666. return 0;
  667. if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
  668. ufs_mtk_get_vcc_num(res);
  669. if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
  670. snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
  671. else
  672. return -ENODEV;
  673. } else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
  674. ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
  675. snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
  676. } else {
  677. return 0;
  678. }
  679. err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
  680. if (err)
  681. return err;
  682. err = ufshcd_get_vreg(dev, info->vcc);
  683. if (err)
  684. return err;
  685. err = regulator_enable(info->vcc->reg);
  686. if (!err) {
  687. info->vcc->enabled = true;
  688. dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
  689. }
  690. return err;
  691. }
  692. static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
  693. {
  694. struct ufs_vreg_info *info = &hba->vreg_info;
  695. struct ufs_vreg **vreg_on, **vreg_off;
  696. if (hba->dev_info.wspecversion >= 0x0300) {
  697. vreg_on = &info->vccq;
  698. vreg_off = &info->vccq2;
  699. } else {
  700. vreg_on = &info->vccq2;
  701. vreg_off = &info->vccq;
  702. }
  703. if (*vreg_on)
  704. (*vreg_on)->always_on = true;
  705. if (*vreg_off) {
  706. regulator_disable((*vreg_off)->reg);
  707. devm_kfree(hba->dev, (*vreg_off)->name);
  708. devm_kfree(hba->dev, *vreg_off);
  709. *vreg_off = NULL;
  710. }
  711. }
  712. static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
  713. {
  714. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  715. struct platform_device *pdev;
  716. int i;
  717. int irq;
  718. host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
  719. pdev = container_of(hba->dev, struct platform_device, dev);
  720. for (i = 0; i < host->mcq_nr_intr; i++) {
  721. /* irq index 0 is legacy irq, sq/cq irq start from index 1 */
  722. irq = platform_get_irq(pdev, i + 1);
  723. if (irq < 0) {
  724. host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
  725. dev_err(hba->dev, "get platform mcq irq fail: %d\n", i);
  726. goto failed;
  727. }
  728. host->mcq_intr_info[i].hba = hba;
  729. host->mcq_intr_info[i].irq = irq;
  730. dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
  731. }
  732. return;
  733. failed:
  734. /* invalidate irq info */
  735. for (i = 0; i < host->mcq_nr_intr; i++)
  736. host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
  737. host->mcq_nr_intr = 0;
  738. }
  739. /**
  740. * ufs_mtk_init - find other essential mmio bases
  741. * @hba: host controller instance
  742. *
  743. * Binds PHY with controller and powers up PHY enabling clocks
  744. * and regulators.
  745. *
  746. * Returns -EPROBE_DEFER if binding fails, returns negative error
  747. * on phy power up failure and returns zero on success.
  748. */
  749. static int ufs_mtk_init(struct ufs_hba *hba)
  750. {
  751. const struct of_device_id *id;
  752. struct device *dev = hba->dev;
  753. struct ufs_mtk_host *host;
  754. int err = 0;
  755. host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
  756. if (!host) {
  757. err = -ENOMEM;
  758. dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
  759. goto out;
  760. }
  761. host->hba = hba;
  762. ufshcd_set_variant(hba, host);
  763. id = of_match_device(ufs_mtk_of_match, dev);
  764. if (!id) {
  765. err = -EINVAL;
  766. goto out;
  767. }
  768. /* Initialize host capability */
  769. ufs_mtk_init_host_caps(hba);
  770. ufs_mtk_init_mcq_irq(hba);
  771. err = ufs_mtk_bind_mphy(hba);
  772. if (err)
  773. goto out_variant_clear;
  774. ufs_mtk_init_reset(hba);
  775. /* Enable runtime autosuspend */
  776. hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
  777. /* Enable clock-gating */
  778. hba->caps |= UFSHCD_CAP_CLK_GATING;
  779. /* Enable inline encryption */
  780. hba->caps |= UFSHCD_CAP_CRYPTO;
  781. /* Enable WriteBooster */
  782. hba->caps |= UFSHCD_CAP_WB_EN;
  783. /* Enable clk scaling*/
  784. hba->caps |= UFSHCD_CAP_CLK_SCALING;
  785. hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
  786. hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
  787. hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
  788. hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
  789. if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
  790. hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
  791. ufs_mtk_init_clocks(hba);
  792. /*
  793. * ufshcd_vops_init() is invoked after
  794. * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
  795. * phy clock setup is skipped.
  796. *
  797. * Enable phy clocks specifically here.
  798. */
  799. ufs_mtk_mphy_power_on(hba, true);
  800. ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
  801. host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
  802. /* Initialize pm-qos request */
  803. cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE);
  804. host->pm_qos_init = true;
  805. goto out;
  806. out_variant_clear:
  807. ufshcd_set_variant(hba, NULL);
  808. out:
  809. return err;
  810. }
  811. static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
  812. struct ufs_pa_layer_attr *dev_req_params)
  813. {
  814. if (!ufs_mtk_is_pmc_via_fastauto(hba))
  815. return false;
  816. if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
  817. return false;
  818. if (dev_req_params->pwr_tx != FAST_MODE &&
  819. dev_req_params->gear_tx < UFS_HS_G4)
  820. return false;
  821. if (dev_req_params->pwr_rx != FAST_MODE &&
  822. dev_req_params->gear_rx < UFS_HS_G4)
  823. return false;
  824. return true;
  825. }
  826. static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
  827. struct ufs_pa_layer_attr *dev_max_params,
  828. struct ufs_pa_layer_attr *dev_req_params)
  829. {
  830. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  831. struct ufs_dev_params host_cap;
  832. int ret;
  833. ufshcd_init_pwr_dev_param(&host_cap);
  834. host_cap.hs_rx_gear = UFS_HS_G5;
  835. host_cap.hs_tx_gear = UFS_HS_G5;
  836. ret = ufshcd_get_pwr_dev_param(&host_cap,
  837. dev_max_params,
  838. dev_req_params);
  839. if (ret) {
  840. pr_info("%s: failed to determine capabilities\n",
  841. __func__);
  842. }
  843. if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
  844. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
  845. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
  846. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
  847. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
  848. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
  849. dev_req_params->lane_tx);
  850. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
  851. dev_req_params->lane_rx);
  852. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
  853. dev_req_params->hs_rate);
  854. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
  855. PA_NO_ADAPT);
  856. ret = ufshcd_uic_change_pwr_mode(hba,
  857. FASTAUTO_MODE << 4 | FASTAUTO_MODE);
  858. if (ret) {
  859. dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
  860. __func__, ret);
  861. }
  862. }
  863. if (host->hw_ver.major >= 3) {
  864. ret = ufshcd_dme_configure_adapt(hba,
  865. dev_req_params->gear_tx,
  866. PA_INITIAL_ADAPT);
  867. }
  868. return ret;
  869. }
  870. static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
  871. enum ufs_notify_change_status stage,
  872. struct ufs_pa_layer_attr *dev_max_params,
  873. struct ufs_pa_layer_attr *dev_req_params)
  874. {
  875. int ret = 0;
  876. switch (stage) {
  877. case PRE_CHANGE:
  878. ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
  879. dev_req_params);
  880. break;
  881. case POST_CHANGE:
  882. break;
  883. default:
  884. ret = -EINVAL;
  885. break;
  886. }
  887. return ret;
  888. }
  889. static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
  890. {
  891. int ret;
  892. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  893. ret = ufshcd_dme_set(hba,
  894. UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
  895. lpm ? 1 : 0);
  896. if (!ret || !lpm) {
  897. /*
  898. * Forcibly set as non-LPM mode if UIC commands is failed
  899. * to use default hba_enable_delay_us value for re-enabling
  900. * the host.
  901. */
  902. host->unipro_lpm = lpm;
  903. }
  904. return ret;
  905. }
  906. static int ufs_mtk_pre_link(struct ufs_hba *hba)
  907. {
  908. int ret;
  909. u32 tmp;
  910. ufs_mtk_get_controller_version(hba);
  911. ret = ufs_mtk_unipro_set_lpm(hba, false);
  912. if (ret)
  913. return ret;
  914. /*
  915. * Setting PA_Local_TX_LCC_Enable to 0 before link startup
  916. * to make sure that both host and device TX LCC are disabled
  917. * once link startup is completed.
  918. */
  919. ret = ufshcd_disable_host_tx_lcc(hba);
  920. if (ret)
  921. return ret;
  922. /* disable deep stall */
  923. ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
  924. if (ret)
  925. return ret;
  926. tmp &= ~(1 << 6);
  927. ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
  928. return ret;
  929. }
  930. static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
  931. {
  932. u32 ah_ms;
  933. if (ufshcd_is_clkgating_allowed(hba)) {
  934. if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
  935. ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
  936. hba->ahit);
  937. else
  938. ah_ms = 10;
  939. ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
  940. }
  941. }
  942. static void ufs_mtk_post_link(struct ufs_hba *hba)
  943. {
  944. /* enable unipro clock gating feature */
  945. ufs_mtk_cfg_unipro_cg(hba, true);
  946. /* will be configured during probe hba */
  947. if (ufshcd_is_auto_hibern8_supported(hba))
  948. hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
  949. FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
  950. ufs_mtk_setup_clk_gating(hba);
  951. }
  952. static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
  953. enum ufs_notify_change_status stage)
  954. {
  955. int ret = 0;
  956. switch (stage) {
  957. case PRE_CHANGE:
  958. ret = ufs_mtk_pre_link(hba);
  959. break;
  960. case POST_CHANGE:
  961. ufs_mtk_post_link(hba);
  962. break;
  963. default:
  964. ret = -EINVAL;
  965. break;
  966. }
  967. return ret;
  968. }
  969. static int ufs_mtk_device_reset(struct ufs_hba *hba)
  970. {
  971. struct arm_smccc_res res;
  972. /* disable hba before device reset */
  973. ufshcd_hba_stop(hba);
  974. ufs_mtk_device_reset_ctrl(0, res);
  975. /*
  976. * The reset signal is active low. UFS devices shall detect
  977. * more than or equal to 1us of positive or negative RST_n
  978. * pulse width.
  979. *
  980. * To be on safe side, keep the reset low for at least 10us.
  981. */
  982. usleep_range(10, 15);
  983. ufs_mtk_device_reset_ctrl(1, res);
  984. /* Some devices may need time to respond to rst_n */
  985. usleep_range(10000, 15000);
  986. dev_info(hba->dev, "device reset done\n");
  987. return 0;
  988. }
  989. static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
  990. {
  991. int err;
  992. err = ufshcd_hba_enable(hba);
  993. if (err)
  994. return err;
  995. err = ufs_mtk_unipro_set_lpm(hba, false);
  996. if (err)
  997. return err;
  998. err = ufshcd_uic_hibern8_exit(hba);
  999. if (!err)
  1000. ufshcd_set_link_active(hba);
  1001. else
  1002. return err;
  1003. if (!hba->mcq_enabled) {
  1004. err = ufshcd_make_hba_operational(hba);
  1005. } else {
  1006. ufs_mtk_config_mcq(hba, false);
  1007. ufshcd_mcq_make_queues_operational(hba);
  1008. ufshcd_mcq_config_mac(hba, hba->nutrs);
  1009. /* Enable MCQ mode */
  1010. ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
  1011. REG_UFS_MEM_CFG);
  1012. }
  1013. if (err)
  1014. return err;
  1015. return 0;
  1016. }
  1017. static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
  1018. {
  1019. int err;
  1020. /* Disable reset confirm feature by UniPro */
  1021. ufshcd_writel(hba,
  1022. (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
  1023. REG_UFS_XOUFS_CTRL);
  1024. err = ufs_mtk_unipro_set_lpm(hba, true);
  1025. if (err) {
  1026. /* Resume UniPro state for following error recovery */
  1027. ufs_mtk_unipro_set_lpm(hba, false);
  1028. return err;
  1029. }
  1030. return 0;
  1031. }
  1032. static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
  1033. {
  1034. struct ufs_vreg *vccqx = NULL;
  1035. if (hba->vreg_info.vccq)
  1036. vccqx = hba->vreg_info.vccq;
  1037. else
  1038. vccqx = hba->vreg_info.vccq2;
  1039. regulator_set_mode(vccqx->reg,
  1040. lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
  1041. }
  1042. static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
  1043. {
  1044. struct arm_smccc_res res;
  1045. ufs_mtk_device_pwr_ctrl(!lpm,
  1046. (unsigned long)hba->dev_info.wspecversion,
  1047. res);
  1048. }
  1049. static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
  1050. {
  1051. if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
  1052. return;
  1053. /* Skip if VCC is assumed always-on */
  1054. if (!hba->vreg_info.vcc)
  1055. return;
  1056. /* Bypass LPM when device is still active */
  1057. if (lpm && ufshcd_is_ufs_dev_active(hba))
  1058. return;
  1059. /* Bypass LPM if VCC is enabled */
  1060. if (lpm && hba->vreg_info.vcc->enabled)
  1061. return;
  1062. if (lpm) {
  1063. ufs_mtk_vccqx_set_lpm(hba, lpm);
  1064. ufs_mtk_vsx_set_lpm(hba, lpm);
  1065. } else {
  1066. ufs_mtk_vsx_set_lpm(hba, lpm);
  1067. ufs_mtk_vccqx_set_lpm(hba, lpm);
  1068. }
  1069. }
  1070. static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
  1071. {
  1072. int ret;
  1073. /* disable auto-hibern8 */
  1074. ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
  1075. /* wait host return to idle state when auto-hibern8 off */
  1076. ufs_mtk_wait_idle_state(hba, 5);
  1077. ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
  1078. if (ret)
  1079. dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
  1080. }
  1081. static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
  1082. enum ufs_notify_change_status status)
  1083. {
  1084. int err;
  1085. struct arm_smccc_res res;
  1086. if (status == PRE_CHANGE) {
  1087. if (ufshcd_is_auto_hibern8_supported(hba))
  1088. ufs_mtk_auto_hibern8_disable(hba);
  1089. return 0;
  1090. }
  1091. if (ufshcd_is_link_hibern8(hba)) {
  1092. err = ufs_mtk_link_set_lpm(hba);
  1093. if (err)
  1094. goto fail;
  1095. }
  1096. if (!ufshcd_is_link_active(hba)) {
  1097. /*
  1098. * Make sure no error will be returned to prevent
  1099. * ufshcd_suspend() re-enabling regulators while vreg is still
  1100. * in low-power mode.
  1101. */
  1102. err = ufs_mtk_mphy_power_on(hba, false);
  1103. if (err)
  1104. goto fail;
  1105. }
  1106. if (ufshcd_is_link_off(hba))
  1107. ufs_mtk_device_reset_ctrl(0, res);
  1108. ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
  1109. return 0;
  1110. fail:
  1111. /*
  1112. * Set link as off state enforcedly to trigger
  1113. * ufshcd_host_reset_and_restore() in ufshcd_suspend()
  1114. * for completed host reset.
  1115. */
  1116. ufshcd_set_link_off(hba);
  1117. return -EAGAIN;
  1118. }
  1119. static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
  1120. {
  1121. int err;
  1122. struct arm_smccc_res res;
  1123. if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
  1124. ufs_mtk_dev_vreg_set_lpm(hba, false);
  1125. ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
  1126. err = ufs_mtk_mphy_power_on(hba, true);
  1127. if (err)
  1128. goto fail;
  1129. if (ufshcd_is_link_hibern8(hba)) {
  1130. err = ufs_mtk_link_set_hpm(hba);
  1131. if (err)
  1132. goto fail;
  1133. }
  1134. return 0;
  1135. fail:
  1136. return ufshcd_link_recovery(hba);
  1137. }
  1138. static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
  1139. {
  1140. /* Dump ufshci register 0x140 ~ 0x14C */
  1141. ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
  1142. "XOUFS Ctrl (0x140): ");
  1143. ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
  1144. /* Dump ufshci register 0x2200 ~ 0x22AC */
  1145. ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
  1146. REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
  1147. "MPHY Ctrl (0x2200): ");
  1148. /* Direct debugging information to REG_MTK_PROBE */
  1149. ufs_mtk_dbg_sel(hba);
  1150. ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
  1151. }
  1152. static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
  1153. {
  1154. struct ufs_dev_info *dev_info = &hba->dev_info;
  1155. u16 mid = dev_info->wmanufacturerid;
  1156. if (mid == UFS_VENDOR_SAMSUNG) {
  1157. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
  1158. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
  1159. }
  1160. /*
  1161. * Decide waiting time before gating reference clock and
  1162. * after ungating reference clock according to vendors'
  1163. * requirements.
  1164. */
  1165. if (mid == UFS_VENDOR_SAMSUNG)
  1166. ufs_mtk_setup_ref_clk_wait_us(hba, 1);
  1167. else if (mid == UFS_VENDOR_SKHYNIX)
  1168. ufs_mtk_setup_ref_clk_wait_us(hba, 30);
  1169. else if (mid == UFS_VENDOR_TOSHIBA)
  1170. ufs_mtk_setup_ref_clk_wait_us(hba, 100);
  1171. else
  1172. ufs_mtk_setup_ref_clk_wait_us(hba,
  1173. REFCLK_DEFAULT_WAIT_US);
  1174. return 0;
  1175. }
  1176. static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
  1177. {
  1178. ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
  1179. if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
  1180. (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
  1181. hba->vreg_info.vcc->always_on = true;
  1182. /*
  1183. * VCC will be kept always-on thus we don't
  1184. * need any delay during regulator operations
  1185. */
  1186. hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
  1187. UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
  1188. }
  1189. ufs_mtk_vreg_fix_vcc(hba);
  1190. ufs_mtk_vreg_fix_vccqx(hba);
  1191. }
  1192. static void ufs_mtk_event_notify(struct ufs_hba *hba,
  1193. enum ufs_event_type evt, void *data)
  1194. {
  1195. unsigned int val = *(u32 *)data;
  1196. unsigned long reg;
  1197. u8 bit;
  1198. trace_ufs_mtk_event(evt, val);
  1199. /* Print details of UIC Errors */
  1200. if (evt <= UFS_EVT_DME_ERR) {
  1201. dev_info(hba->dev,
  1202. "Host UIC Error Code (%s): %08x\n",
  1203. ufs_uic_err_str[evt], val);
  1204. reg = val;
  1205. }
  1206. if (evt == UFS_EVT_PA_ERR) {
  1207. for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
  1208. dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
  1209. }
  1210. if (evt == UFS_EVT_DL_ERR) {
  1211. for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
  1212. dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
  1213. }
  1214. }
  1215. static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
  1216. struct devfreq_dev_profile *profile,
  1217. struct devfreq_simple_ondemand_data *data)
  1218. {
  1219. /* Customize min gear in clk scaling */
  1220. hba->clk_scaling.min_gear = UFS_HS_G4;
  1221. hba->vps->devfreq_profile.polling_ms = 200;
  1222. hba->vps->ondemand_data.upthreshold = 50;
  1223. hba->vps->ondemand_data.downdifferential = 20;
  1224. }
  1225. /**
  1226. * ufs_mtk_clk_scale - Internal clk scaling operation
  1227. *
  1228. * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
  1229. * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
  1230. * Max and min clocks rate of ufs_sel defined in dts should match rate of
  1231. * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
  1232. * This prevent changing rate of pll clock that is shared between modules.
  1233. *
  1234. * @hba: per adapter instance
  1235. * @scale_up: True for scaling up and false for scaling down
  1236. */
  1237. static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
  1238. {
  1239. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  1240. struct ufs_mtk_clk *mclk = &host->mclk;
  1241. struct ufs_clk_info *clki = mclk->ufs_sel_clki;
  1242. int ret = 0;
  1243. ret = clk_prepare_enable(clki->clk);
  1244. if (ret) {
  1245. dev_info(hba->dev,
  1246. "clk_prepare_enable() fail, ret: %d\n", ret);
  1247. return;
  1248. }
  1249. if (scale_up) {
  1250. ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
  1251. clki->curr_freq = clki->max_freq;
  1252. } else {
  1253. ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
  1254. clki->curr_freq = clki->min_freq;
  1255. }
  1256. if (ret) {
  1257. dev_info(hba->dev,
  1258. "Failed to set ufs_sel_clki, ret: %d\n", ret);
  1259. }
  1260. clk_disable_unprepare(clki->clk);
  1261. trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
  1262. }
  1263. static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
  1264. enum ufs_notify_change_status status)
  1265. {
  1266. if (!ufshcd_is_clkscaling_supported(hba))
  1267. return 0;
  1268. if (status == PRE_CHANGE) {
  1269. /* Switch parent before clk_set_rate() */
  1270. ufs_mtk_clk_scale(hba, scale_up);
  1271. } else {
  1272. /* Request interrupt latency QoS accordingly */
  1273. ufs_mtk_scale_perf(hba, scale_up);
  1274. }
  1275. return 0;
  1276. }
  1277. static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
  1278. {
  1279. return MAX_SUPP_MAC;
  1280. }
  1281. static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
  1282. {
  1283. struct ufshcd_mcq_opr_info_t *opr;
  1284. int i;
  1285. hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
  1286. hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
  1287. hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
  1288. hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
  1289. for (i = 0; i < OPR_MAX; i++) {
  1290. opr = &hba->mcq_opr[i];
  1291. opr->stride = REG_UFS_MCQ_STRIDE;
  1292. opr->base = hba->mmio_base + opr->offset;
  1293. }
  1294. return 0;
  1295. }
  1296. static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
  1297. {
  1298. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  1299. /* fail mcq initialization if interrupt is not filled properly */
  1300. if (!host->mcq_nr_intr) {
  1301. dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
  1302. return -EINVAL;
  1303. }
  1304. hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
  1305. return 0;
  1306. }
  1307. static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
  1308. {
  1309. struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info;
  1310. struct ufs_hba *hba = mcq_intr_info->hba;
  1311. struct ufs_hw_queue *hwq;
  1312. u32 events;
  1313. int qid = mcq_intr_info->qid;
  1314. hwq = &hba->uhq[qid];
  1315. events = ufshcd_mcq_read_cqis(hba, qid);
  1316. if (events)
  1317. ufshcd_mcq_write_cqis(hba, events, qid);
  1318. if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
  1319. ufshcd_mcq_poll_cqe_lock(hba, hwq);
  1320. return IRQ_HANDLED;
  1321. }
  1322. static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
  1323. {
  1324. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  1325. u32 irq, i;
  1326. int ret;
  1327. for (i = 0; i < host->mcq_nr_intr; i++) {
  1328. irq = host->mcq_intr_info[i].irq;
  1329. if (irq == MTK_MCQ_INVALID_IRQ) {
  1330. dev_err(hba->dev, "invalid irq. %d\n", i);
  1331. return -ENOPARAM;
  1332. }
  1333. host->mcq_intr_info[i].qid = i;
  1334. ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
  1335. &host->mcq_intr_info[i]);
  1336. dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
  1337. if (ret) {
  1338. dev_err(hba->dev, "Cannot request irq %d\n", ret);
  1339. return ret;
  1340. }
  1341. }
  1342. return 0;
  1343. }
  1344. static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
  1345. {
  1346. struct ufs_mtk_host *host = ufshcd_get_variant(hba);
  1347. int ret = 0;
  1348. if (!host->mcq_set_intr) {
  1349. /* Disable irq option register */
  1350. ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
  1351. if (irq) {
  1352. ret = ufs_mtk_config_mcq_irq(hba);
  1353. if (ret)
  1354. return ret;
  1355. }
  1356. host->mcq_set_intr = true;
  1357. }
  1358. ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
  1359. ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
  1360. return 0;
  1361. }
  1362. static int ufs_mtk_config_esi(struct ufs_hba *hba)
  1363. {
  1364. return ufs_mtk_config_mcq(hba, true);
  1365. }
  1366. /*
  1367. * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
  1368. *
  1369. * The variant operations configure the necessary controller and PHY
  1370. * handshake during initialization.
  1371. */
  1372. static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
  1373. .name = "mediatek.ufshci",
  1374. .init = ufs_mtk_init,
  1375. .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
  1376. .setup_clocks = ufs_mtk_setup_clocks,
  1377. .hce_enable_notify = ufs_mtk_hce_enable_notify,
  1378. .link_startup_notify = ufs_mtk_link_startup_notify,
  1379. .pwr_change_notify = ufs_mtk_pwr_change_notify,
  1380. .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
  1381. .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
  1382. .suspend = ufs_mtk_suspend,
  1383. .resume = ufs_mtk_resume,
  1384. .dbg_register_dump = ufs_mtk_dbg_register_dump,
  1385. .device_reset = ufs_mtk_device_reset,
  1386. .event_notify = ufs_mtk_event_notify,
  1387. .config_scaling_param = ufs_mtk_config_scaling_param,
  1388. .clk_scale_notify = ufs_mtk_clk_scale_notify,
  1389. /* mcq vops */
  1390. .get_hba_mac = ufs_mtk_get_hba_mac,
  1391. .op_runtime_config = ufs_mtk_op_runtime_config,
  1392. .mcq_config_resource = ufs_mtk_mcq_config_resource,
  1393. .config_esi = ufs_mtk_config_esi,
  1394. };
  1395. /**
  1396. * ufs_mtk_probe - probe routine of the driver
  1397. * @pdev: pointer to Platform device handle
  1398. *
  1399. * Return zero for success and non-zero for failure
  1400. */
  1401. static int ufs_mtk_probe(struct platform_device *pdev)
  1402. {
  1403. int err;
  1404. struct device *dev = &pdev->dev;
  1405. struct device_node *reset_node;
  1406. struct platform_device *reset_pdev;
  1407. struct device_link *link;
  1408. reset_node = of_find_compatible_node(NULL, NULL,
  1409. "ti,syscon-reset");
  1410. if (!reset_node) {
  1411. dev_notice(dev, "find ti,syscon-reset fail\n");
  1412. goto skip_reset;
  1413. }
  1414. reset_pdev = of_find_device_by_node(reset_node);
  1415. if (!reset_pdev) {
  1416. dev_notice(dev, "find reset_pdev fail\n");
  1417. goto skip_reset;
  1418. }
  1419. link = device_link_add(dev, &reset_pdev->dev,
  1420. DL_FLAG_AUTOPROBE_CONSUMER);
  1421. put_device(&reset_pdev->dev);
  1422. if (!link) {
  1423. dev_notice(dev, "add reset device_link fail\n");
  1424. goto skip_reset;
  1425. }
  1426. /* supplier is not probed */
  1427. if (link->status == DL_STATE_DORMANT) {
  1428. err = -EPROBE_DEFER;
  1429. goto out;
  1430. }
  1431. skip_reset:
  1432. /* perform generic probe */
  1433. err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
  1434. out:
  1435. if (err)
  1436. dev_err(dev, "probe failed %d\n", err);
  1437. of_node_put(reset_node);
  1438. return err;
  1439. }
  1440. /**
  1441. * ufs_mtk_remove - set driver_data of the device to NULL
  1442. * @pdev: pointer to platform device handle
  1443. *
  1444. * Always return 0
  1445. */
  1446. static int ufs_mtk_remove(struct platform_device *pdev)
  1447. {
  1448. struct ufs_hba *hba = platform_get_drvdata(pdev);
  1449. pm_runtime_get_sync(&(pdev)->dev);
  1450. ufshcd_remove(hba);
  1451. return 0;
  1452. }
  1453. #ifdef CONFIG_PM_SLEEP
  1454. static int ufs_mtk_system_suspend(struct device *dev)
  1455. {
  1456. struct ufs_hba *hba = dev_get_drvdata(dev);
  1457. int ret;
  1458. ret = ufshcd_system_suspend(dev);
  1459. if (ret)
  1460. return ret;
  1461. ufs_mtk_dev_vreg_set_lpm(hba, true);
  1462. return 0;
  1463. }
  1464. static int ufs_mtk_system_resume(struct device *dev)
  1465. {
  1466. struct ufs_hba *hba = dev_get_drvdata(dev);
  1467. ufs_mtk_dev_vreg_set_lpm(hba, false);
  1468. return ufshcd_system_resume(dev);
  1469. }
  1470. #endif
  1471. #ifdef CONFIG_PM
  1472. static int ufs_mtk_runtime_suspend(struct device *dev)
  1473. {
  1474. struct ufs_hba *hba = dev_get_drvdata(dev);
  1475. int ret = 0;
  1476. ret = ufshcd_runtime_suspend(dev);
  1477. if (ret)
  1478. return ret;
  1479. ufs_mtk_dev_vreg_set_lpm(hba, true);
  1480. return 0;
  1481. }
  1482. static int ufs_mtk_runtime_resume(struct device *dev)
  1483. {
  1484. struct ufs_hba *hba = dev_get_drvdata(dev);
  1485. ufs_mtk_dev_vreg_set_lpm(hba, false);
  1486. return ufshcd_runtime_resume(dev);
  1487. }
  1488. #endif
  1489. static const struct dev_pm_ops ufs_mtk_pm_ops = {
  1490. SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
  1491. ufs_mtk_system_resume)
  1492. SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
  1493. ufs_mtk_runtime_resume, NULL)
  1494. .prepare = ufshcd_suspend_prepare,
  1495. .complete = ufshcd_resume_complete,
  1496. };
  1497. static struct platform_driver ufs_mtk_pltform = {
  1498. .probe = ufs_mtk_probe,
  1499. .remove = ufs_mtk_remove,
  1500. .shutdown = ufshcd_pltfrm_shutdown,
  1501. .driver = {
  1502. .name = "ufshcd-mtk",
  1503. .pm = &ufs_mtk_pm_ops,
  1504. .of_match_table = ufs_mtk_of_match,
  1505. },
  1506. };
  1507. MODULE_AUTHOR("Stanley Chu <[email protected]>");
  1508. MODULE_AUTHOR("Peter Wang <[email protected]>");
  1509. MODULE_DESCRIPTION("MediaTek UFS Host Driver");
  1510. MODULE_LICENSE("GPL v2");
  1511. module_platform_driver(ufs_mtk_pltform);