ufs-exynos.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * UFS Host Controller driver for Exynos specific extensions
  4. *
  5. * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd.
  6. * Author: Seungwon Jeon <[email protected]>
  7. * Author: Alim Akhtar <[email protected]>
  8. *
  9. */
  10. #include <linux/clk.h>
  11. #include <linux/delay.h>
  12. #include <linux/module.h>
  13. #include <linux/of.h>
  14. #include <linux/of_address.h>
  15. #include <linux/mfd/syscon.h>
  16. #include <linux/phy/phy.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/regmap.h>
  19. #include <ufs/ufshcd.h>
  20. #include "ufshcd-pltfrm.h"
  21. #include <ufs/ufshci.h>
  22. #include <ufs/unipro.h>
  23. #include "ufs-exynos.h"
  24. /*
  25. * Exynos's Vendor specific registers for UFSHCI
  26. */
  27. #define HCI_TXPRDT_ENTRY_SIZE 0x00
  28. #define PRDT_PREFECT_EN BIT(31)
  29. #define PRDT_SET_SIZE(x) ((x) & 0x1F)
  30. #define HCI_RXPRDT_ENTRY_SIZE 0x04
  31. #define HCI_1US_TO_CNT_VAL 0x0C
  32. #define CNT_VAL_1US_MASK 0x3FF
  33. #define HCI_UTRL_NEXUS_TYPE 0x40
  34. #define HCI_UTMRL_NEXUS_TYPE 0x44
  35. #define HCI_SW_RST 0x50
  36. #define UFS_LINK_SW_RST BIT(0)
  37. #define UFS_UNIPRO_SW_RST BIT(1)
  38. #define UFS_SW_RST_MASK (UFS_UNIPRO_SW_RST | UFS_LINK_SW_RST)
  39. #define HCI_DATA_REORDER 0x60
  40. #define HCI_UNIPRO_APB_CLK_CTRL 0x68
  41. #define UNIPRO_APB_CLK(v, x) (((v) & ~0xF) | ((x) & 0xF))
  42. #define HCI_AXIDMA_RWDATA_BURST_LEN 0x6C
  43. #define HCI_GPIO_OUT 0x70
  44. #define HCI_ERR_EN_PA_LAYER 0x78
  45. #define HCI_ERR_EN_DL_LAYER 0x7C
  46. #define HCI_ERR_EN_N_LAYER 0x80
  47. #define HCI_ERR_EN_T_LAYER 0x84
  48. #define HCI_ERR_EN_DME_LAYER 0x88
  49. #define HCI_CLKSTOP_CTRL 0xB0
  50. #define REFCLKOUT_STOP BIT(4)
  51. #define MPHY_APBCLK_STOP BIT(3)
  52. #define REFCLK_STOP BIT(2)
  53. #define UNIPRO_MCLK_STOP BIT(1)
  54. #define UNIPRO_PCLK_STOP BIT(0)
  55. #define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\
  56. UNIPRO_MCLK_STOP | MPHY_APBCLK_STOP|\
  57. UNIPRO_PCLK_STOP)
  58. #define HCI_MISC 0xB4
  59. #define REFCLK_CTRL_EN BIT(7)
  60. #define UNIPRO_PCLK_CTRL_EN BIT(6)
  61. #define UNIPRO_MCLK_CTRL_EN BIT(5)
  62. #define HCI_CORECLK_CTRL_EN BIT(4)
  63. #define CLK_CTRL_EN_MASK (REFCLK_CTRL_EN |\
  64. UNIPRO_PCLK_CTRL_EN |\
  65. UNIPRO_MCLK_CTRL_EN)
  66. /* Device fatal error */
  67. #define DFES_ERR_EN BIT(31)
  68. #define DFES_DEF_L2_ERRS (UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\
  69. UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
  70. #define DFES_DEF_L3_ERRS (UIC_NETWORK_UNSUPPORTED_HEADER_TYPE |\
  71. UIC_NETWORK_BAD_DEVICEID_ENC |\
  72. UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING)
  73. #define DFES_DEF_L4_ERRS (UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE |\
  74. UIC_TRANSPORT_UNKNOWN_CPORTID |\
  75. UIC_TRANSPORT_NO_CONNECTION_RX |\
  76. UIC_TRANSPORT_BAD_TC)
  77. /* FSYS UFS Shareability */
  78. #define UFS_WR_SHARABLE BIT(2)
  79. #define UFS_RD_SHARABLE BIT(1)
  80. #define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE)
  81. #define UFS_SHAREABILITY_OFFSET 0x710
  82. /* Multi-host registers */
  83. #define MHCTRL 0xC4
  84. #define MHCTRL_EN_VH_MASK (0xE)
  85. #define MHCTRL_EN_VH(vh) (vh << 1)
  86. #define PH2VH_MBOX 0xD8
  87. #define MH_MSG_MASK (0xFF)
  88. #define MH_MSG(id, msg) ((id << 8) | (msg & 0xFF))
  89. #define MH_MSG_PH_READY 0x1
  90. #define MH_MSG_VH_READY 0x2
  91. #define ALLOW_INQUIRY BIT(25)
  92. #define ALLOW_MODE_SELECT BIT(24)
  93. #define ALLOW_MODE_SENSE BIT(23)
  94. #define ALLOW_PRE_FETCH GENMASK(22, 21)
  95. #define ALLOW_READ_CMD_ALL GENMASK(20, 18) /* read_6/10/16 */
  96. #define ALLOW_READ_BUFFER BIT(17)
  97. #define ALLOW_READ_CAPACITY GENMASK(16, 15)
  98. #define ALLOW_REPORT_LUNS BIT(14)
  99. #define ALLOW_REQUEST_SENSE BIT(13)
  100. #define ALLOW_SYNCHRONIZE_CACHE GENMASK(8, 7)
  101. #define ALLOW_TEST_UNIT_READY BIT(6)
  102. #define ALLOW_UNMAP BIT(5)
  103. #define ALLOW_VERIFY BIT(4)
  104. #define ALLOW_WRITE_CMD_ALL GENMASK(3, 1) /* write_6/10/16 */
  105. #define ALLOW_TRANS_VH_DEFAULT (ALLOW_INQUIRY | ALLOW_MODE_SELECT | \
  106. ALLOW_MODE_SENSE | ALLOW_PRE_FETCH | \
  107. ALLOW_READ_CMD_ALL | ALLOW_READ_BUFFER | \
  108. ALLOW_READ_CAPACITY | ALLOW_REPORT_LUNS | \
  109. ALLOW_REQUEST_SENSE | ALLOW_SYNCHRONIZE_CACHE | \
  110. ALLOW_TEST_UNIT_READY | ALLOW_UNMAP | \
  111. ALLOW_VERIFY | ALLOW_WRITE_CMD_ALL)
  112. #define HCI_MH_ALLOWABLE_TRAN_OF_VH 0x30C
  113. #define HCI_MH_IID_IN_TASK_TAG 0X308
  114. #define PH_READY_TIMEOUT_MS (5 * MSEC_PER_SEC)
  115. enum {
  116. UNIPRO_L1_5 = 0,/* PHY Adapter */
  117. UNIPRO_L2, /* Data Link */
  118. UNIPRO_L3, /* Network */
  119. UNIPRO_L4, /* Transport */
  120. UNIPRO_DME, /* DME */
  121. };
  122. /*
  123. * UNIPRO registers
  124. */
  125. #define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0 0x78B8
  126. #define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1 0x78BC
  127. #define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2 0x78C0
  128. /*
  129. * UFS Protector registers
  130. */
  131. #define UFSPRSECURITY 0x010
  132. #define NSSMU BIT(14)
  133. #define UFSPSBEGIN0 0x200
  134. #define UFSPSEND0 0x204
  135. #define UFSPSLUN0 0x208
  136. #define UFSPSCTRL0 0x20C
  137. #define CNTR_DIV_VAL 40
  138. static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en);
  139. static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en);
  140. static inline void exynos_ufs_enable_auto_ctrl_hcc(struct exynos_ufs *ufs)
  141. {
  142. exynos_ufs_auto_ctrl_hcc(ufs, true);
  143. }
  144. static inline void exynos_ufs_disable_auto_ctrl_hcc(struct exynos_ufs *ufs)
  145. {
  146. exynos_ufs_auto_ctrl_hcc(ufs, false);
  147. }
  148. static inline void exynos_ufs_disable_auto_ctrl_hcc_save(
  149. struct exynos_ufs *ufs, u32 *val)
  150. {
  151. *val = hci_readl(ufs, HCI_MISC);
  152. exynos_ufs_auto_ctrl_hcc(ufs, false);
  153. }
  154. static inline void exynos_ufs_auto_ctrl_hcc_restore(
  155. struct exynos_ufs *ufs, u32 *val)
  156. {
  157. hci_writel(ufs, *val, HCI_MISC);
  158. }
  159. static inline void exynos_ufs_gate_clks(struct exynos_ufs *ufs)
  160. {
  161. exynos_ufs_ctrl_clkstop(ufs, true);
  162. }
  163. static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs)
  164. {
  165. exynos_ufs_ctrl_clkstop(ufs, false);
  166. }
  167. static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
  168. {
  169. return 0;
  170. }
  171. static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
  172. {
  173. struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
  174. /* IO Coherency setting */
  175. if (ufs->sysreg) {
  176. return regmap_update_bits(ufs->sysreg,
  177. ufs->shareability_reg_offset,
  178. UFS_SHARABLE, UFS_SHARABLE);
  179. }
  180. attr->tx_dif_p_nsec = 3200000;
  181. return 0;
  182. }
  183. static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs)
  184. {
  185. struct ufs_hba *hba = ufs->hba;
  186. /* Enable Virtual Host #1 */
  187. ufshcd_rmwl(hba, MHCTRL_EN_VH_MASK, MHCTRL_EN_VH(1), MHCTRL);
  188. /* Default VH Transfer permissions */
  189. hci_writel(ufs, ALLOW_TRANS_VH_DEFAULT, HCI_MH_ALLOWABLE_TRAN_OF_VH);
  190. /* IID information is replaced in TASKTAG[7:5] instead of IID in UCD */
  191. hci_writel(ufs, 0x1, HCI_MH_IID_IN_TASK_TAG);
  192. return 0;
  193. }
  194. static int exynosauto_ufs_pre_link(struct exynos_ufs *ufs)
  195. {
  196. struct ufs_hba *hba = ufs->hba;
  197. int i;
  198. u32 tx_line_reset_period, rx_line_reset_period;
  199. rx_line_reset_period = (RX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
  200. tx_line_reset_period = (TX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
  201. ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
  202. for_each_ufs_rx_lane(ufs, i) {
  203. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, i),
  204. DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
  205. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, i), 0x0);
  206. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE2, i),
  207. (rx_line_reset_period >> 16) & 0xFF);
  208. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE1, i),
  209. (rx_line_reset_period >> 8) & 0xFF);
  210. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE0, i),
  211. (rx_line_reset_period) & 0xFF);
  212. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x2f, i), 0x79);
  213. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x84, i), 0x1);
  214. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x25, i), 0xf6);
  215. }
  216. for_each_ufs_tx_lane(ufs, i) {
  217. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, i),
  218. DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
  219. /* Not to affect VND_TX_LINERESET_PVALUE to VND_TX_CLK_PRD */
  220. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, i),
  221. 0x02);
  222. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, i),
  223. (tx_line_reset_period >> 16) & 0xFF);
  224. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, i),
  225. (tx_line_reset_period >> 8) & 0xFF);
  226. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE0, i),
  227. (tx_line_reset_period) & 0xFF);
  228. /* TX PWM Gear Capability / PWM_G1_ONLY */
  229. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x04, i), 0x1);
  230. }
  231. ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
  232. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
  233. ufshcd_dme_set(hba, UIC_ARG_MIB(0xa011), 0x8000);
  234. return 0;
  235. }
  236. static int exynosauto_ufs_pre_pwr_change(struct exynos_ufs *ufs,
  237. struct ufs_pa_layer_attr *pwr)
  238. {
  239. struct ufs_hba *hba = ufs->hba;
  240. /* PACP_PWR_req and delivered to the remote DME */
  241. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000);
  242. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000);
  243. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000);
  244. return 0;
  245. }
  246. static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs,
  247. struct ufs_pa_layer_attr *pwr)
  248. {
  249. struct ufs_hba *hba = ufs->hba;
  250. u32 enabled_vh;
  251. enabled_vh = ufshcd_readl(hba, MHCTRL) & MHCTRL_EN_VH_MASK;
  252. /* Send physical host ready message to virtual hosts */
  253. ufshcd_writel(hba, MH_MSG(enabled_vh, MH_MSG_PH_READY), PH2VH_MBOX);
  254. return 0;
  255. }
  256. static int exynos7_ufs_pre_link(struct exynos_ufs *ufs)
  257. {
  258. struct ufs_hba *hba = ufs->hba;
  259. u32 val = ufs->drv_data->uic_attr->pa_dbg_option_suite;
  260. int i;
  261. exynos_ufs_enable_ov_tm(hba);
  262. for_each_ufs_tx_lane(ufs, i)
  263. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x297, i), 0x17);
  264. for_each_ufs_rx_lane(ufs, i) {
  265. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x362, i), 0xff);
  266. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x363, i), 0x00);
  267. }
  268. exynos_ufs_disable_ov_tm(hba);
  269. for_each_ufs_tx_lane(ufs, i)
  270. ufshcd_dme_set(hba,
  271. UIC_ARG_MIB_SEL(TX_HIBERN8_CONTROL, i), 0x0);
  272. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_TXPHY_CFGUPDT), 0x1);
  273. udelay(1);
  274. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val | (1 << 12));
  275. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_RESET_PHY), 0x1);
  276. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_LINE_RESET), 0x1);
  277. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_LINE_RESET_REQ), 0x1);
  278. udelay(1600);
  279. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val);
  280. return 0;
  281. }
  282. static int exynos7_ufs_post_link(struct exynos_ufs *ufs)
  283. {
  284. struct ufs_hba *hba = ufs->hba;
  285. int i;
  286. exynos_ufs_enable_ov_tm(hba);
  287. for_each_ufs_tx_lane(ufs, i) {
  288. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x28b, i), 0x83);
  289. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x29a, i), 0x07);
  290. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x277, i),
  291. TX_LINERESET_N(exynos_ufs_calc_time_cntr(ufs, 200000)));
  292. }
  293. exynos_ufs_disable_ov_tm(hba);
  294. exynos_ufs_enable_dbg_mode(hba);
  295. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xbb8);
  296. exynos_ufs_disable_dbg_mode(hba);
  297. return 0;
  298. }
  299. static int exynos7_ufs_pre_pwr_change(struct exynos_ufs *ufs,
  300. struct ufs_pa_layer_attr *pwr)
  301. {
  302. unipro_writel(ufs, 0x22, UNIPRO_DBG_FORCE_DME_CTRL_STATE);
  303. return 0;
  304. }
  305. static int exynos7_ufs_post_pwr_change(struct exynos_ufs *ufs,
  306. struct ufs_pa_layer_attr *pwr)
  307. {
  308. struct ufs_hba *hba = ufs->hba;
  309. int lanes = max_t(u32, pwr->lane_rx, pwr->lane_tx);
  310. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_RXPHY_CFGUPDT), 0x1);
  311. if (lanes == 1) {
  312. exynos_ufs_enable_dbg_mode(hba);
  313. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 0x1);
  314. exynos_ufs_disable_dbg_mode(hba);
  315. }
  316. return 0;
  317. }
  318. /*
  319. * exynos_ufs_auto_ctrl_hcc - HCI core clock control by h/w
  320. * Control should be disabled in the below cases
  321. * - Before host controller S/W reset
  322. * - Access to UFS protector's register
  323. */
  324. static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en)
  325. {
  326. u32 misc = hci_readl(ufs, HCI_MISC);
  327. if (en)
  328. hci_writel(ufs, misc | HCI_CORECLK_CTRL_EN, HCI_MISC);
  329. else
  330. hci_writel(ufs, misc & ~HCI_CORECLK_CTRL_EN, HCI_MISC);
  331. }
  332. static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en)
  333. {
  334. u32 ctrl = hci_readl(ufs, HCI_CLKSTOP_CTRL);
  335. u32 misc = hci_readl(ufs, HCI_MISC);
  336. if (en) {
  337. hci_writel(ufs, misc | CLK_CTRL_EN_MASK, HCI_MISC);
  338. hci_writel(ufs, ctrl | CLK_STOP_MASK, HCI_CLKSTOP_CTRL);
  339. } else {
  340. hci_writel(ufs, ctrl & ~CLK_STOP_MASK, HCI_CLKSTOP_CTRL);
  341. hci_writel(ufs, misc & ~CLK_CTRL_EN_MASK, HCI_MISC);
  342. }
  343. }
  344. static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
  345. {
  346. struct ufs_hba *hba = ufs->hba;
  347. struct list_head *head = &hba->clk_list_head;
  348. struct ufs_clk_info *clki;
  349. unsigned long pclk_rate;
  350. u32 f_min, f_max;
  351. u8 div = 0;
  352. int ret = 0;
  353. if (list_empty(head))
  354. goto out;
  355. list_for_each_entry(clki, head, list) {
  356. if (!IS_ERR(clki->clk)) {
  357. if (!strcmp(clki->name, "core_clk"))
  358. ufs->clk_hci_core = clki->clk;
  359. else if (!strcmp(clki->name, "sclk_unipro_main"))
  360. ufs->clk_unipro_main = clki->clk;
  361. }
  362. }
  363. if (!ufs->clk_hci_core || !ufs->clk_unipro_main) {
  364. dev_err(hba->dev, "failed to get clk info\n");
  365. ret = -EINVAL;
  366. goto out;
  367. }
  368. ufs->mclk_rate = clk_get_rate(ufs->clk_unipro_main);
  369. pclk_rate = clk_get_rate(ufs->clk_hci_core);
  370. f_min = ufs->pclk_avail_min;
  371. f_max = ufs->pclk_avail_max;
  372. if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) {
  373. do {
  374. pclk_rate /= (div + 1);
  375. if (pclk_rate <= f_max)
  376. break;
  377. div++;
  378. } while (pclk_rate >= f_min);
  379. }
  380. if (unlikely(pclk_rate < f_min || pclk_rate > f_max)) {
  381. dev_err(hba->dev, "not available pclk range %lu\n", pclk_rate);
  382. ret = -EINVAL;
  383. goto out;
  384. }
  385. ufs->pclk_rate = pclk_rate;
  386. ufs->pclk_div = div;
  387. out:
  388. return ret;
  389. }
  390. static void exynos_ufs_set_unipro_pclk_div(struct exynos_ufs *ufs)
  391. {
  392. if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) {
  393. u32 val;
  394. val = hci_readl(ufs, HCI_UNIPRO_APB_CLK_CTRL);
  395. hci_writel(ufs, UNIPRO_APB_CLK(val, ufs->pclk_div),
  396. HCI_UNIPRO_APB_CLK_CTRL);
  397. }
  398. }
  399. static void exynos_ufs_set_pwm_clk_div(struct exynos_ufs *ufs)
  400. {
  401. struct ufs_hba *hba = ufs->hba;
  402. struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
  403. ufshcd_dme_set(hba,
  404. UIC_ARG_MIB(CMN_PWM_CLK_CTRL), attr->cmn_pwm_clk_ctrl);
  405. }
  406. static void exynos_ufs_calc_pwm_clk_div(struct exynos_ufs *ufs)
  407. {
  408. struct ufs_hba *hba = ufs->hba;
  409. struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
  410. const unsigned int div = 30, mult = 20;
  411. const unsigned long pwm_min = 3 * 1000 * 1000;
  412. const unsigned long pwm_max = 9 * 1000 * 1000;
  413. const int divs[] = {32, 16, 8, 4};
  414. unsigned long clk = 0, _clk, clk_period;
  415. int i = 0, clk_idx = -1;
  416. clk_period = UNIPRO_PCLK_PERIOD(ufs);
  417. for (i = 0; i < ARRAY_SIZE(divs); i++) {
  418. _clk = NSEC_PER_SEC * mult / (clk_period * divs[i] * div);
  419. if (_clk >= pwm_min && _clk <= pwm_max) {
  420. if (_clk > clk) {
  421. clk_idx = i;
  422. clk = _clk;
  423. }
  424. }
  425. }
  426. if (clk_idx == -1) {
  427. ufshcd_dme_get(hba, UIC_ARG_MIB(CMN_PWM_CLK_CTRL), &clk_idx);
  428. dev_err(hba->dev,
  429. "failed to decide pwm clock divider, will not change\n");
  430. }
  431. attr->cmn_pwm_clk_ctrl = clk_idx & PWM_CLK_CTRL_MASK;
  432. }
  433. long exynos_ufs_calc_time_cntr(struct exynos_ufs *ufs, long period)
  434. {
  435. const int precise = 10;
  436. long pclk_rate = ufs->pclk_rate;
  437. long clk_period, fraction;
  438. clk_period = UNIPRO_PCLK_PERIOD(ufs);
  439. fraction = ((NSEC_PER_SEC % pclk_rate) * precise) / pclk_rate;
  440. return (period * precise) / ((clk_period * precise) + fraction);
  441. }
  442. static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs *ufs)
  443. {
  444. struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
  445. struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg;
  446. t_cfg->tx_linereset_p =
  447. exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec);
  448. t_cfg->tx_linereset_n =
  449. exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_n_nsec);
  450. t_cfg->tx_high_z_cnt =
  451. exynos_ufs_calc_time_cntr(ufs, attr->tx_high_z_cnt_nsec);
  452. t_cfg->tx_base_n_val =
  453. exynos_ufs_calc_time_cntr(ufs, attr->tx_base_unit_nsec);
  454. t_cfg->tx_gran_n_val =
  455. exynos_ufs_calc_time_cntr(ufs, attr->tx_gran_unit_nsec);
  456. t_cfg->tx_sleep_cnt =
  457. exynos_ufs_calc_time_cntr(ufs, attr->tx_sleep_cnt);
  458. t_cfg->rx_linereset =
  459. exynos_ufs_calc_time_cntr(ufs, attr->rx_dif_p_nsec);
  460. t_cfg->rx_hibern8_wait =
  461. exynos_ufs_calc_time_cntr(ufs, attr->rx_hibern8_wait_nsec);
  462. t_cfg->rx_base_n_val =
  463. exynos_ufs_calc_time_cntr(ufs, attr->rx_base_unit_nsec);
  464. t_cfg->rx_gran_n_val =
  465. exynos_ufs_calc_time_cntr(ufs, attr->rx_gran_unit_nsec);
  466. t_cfg->rx_sleep_cnt =
  467. exynos_ufs_calc_time_cntr(ufs, attr->rx_sleep_cnt);
  468. t_cfg->rx_stall_cnt =
  469. exynos_ufs_calc_time_cntr(ufs, attr->rx_stall_cnt);
  470. }
  471. static void exynos_ufs_config_phy_time_attr(struct exynos_ufs *ufs)
  472. {
  473. struct ufs_hba *hba = ufs->hba;
  474. struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg;
  475. int i;
  476. exynos_ufs_set_pwm_clk_div(ufs);
  477. exynos_ufs_enable_ov_tm(hba);
  478. for_each_ufs_rx_lane(ufs, i) {
  479. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_FILLER_ENABLE, i),
  480. ufs->drv_data->uic_attr->rx_filler_enable);
  481. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_LINERESET_VAL, i),
  482. RX_LINERESET(t_cfg->rx_linereset));
  483. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_07_00, i),
  484. RX_BASE_NVAL_L(t_cfg->rx_base_n_val));
  485. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_15_08, i),
  486. RX_BASE_NVAL_H(t_cfg->rx_base_n_val));
  487. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_07_00, i),
  488. RX_GRAN_NVAL_L(t_cfg->rx_gran_n_val));
  489. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_10_08, i),
  490. RX_GRAN_NVAL_H(t_cfg->rx_gran_n_val));
  491. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_SLEEP_CNT_TIMER, i),
  492. RX_OV_SLEEP_CNT(t_cfg->rx_sleep_cnt));
  493. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_STALL_CNT_TIMER, i),
  494. RX_OV_STALL_CNT(t_cfg->rx_stall_cnt));
  495. }
  496. for_each_ufs_tx_lane(ufs, i) {
  497. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_LINERESET_P_VAL, i),
  498. TX_LINERESET_P(t_cfg->tx_linereset_p));
  499. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_07_00, i),
  500. TX_HIGH_Z_CNT_L(t_cfg->tx_high_z_cnt));
  501. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_11_08, i),
  502. TX_HIGH_Z_CNT_H(t_cfg->tx_high_z_cnt));
  503. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_07_00, i),
  504. TX_BASE_NVAL_L(t_cfg->tx_base_n_val));
  505. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_15_08, i),
  506. TX_BASE_NVAL_H(t_cfg->tx_base_n_val));
  507. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_07_00, i),
  508. TX_GRAN_NVAL_L(t_cfg->tx_gran_n_val));
  509. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_10_08, i),
  510. TX_GRAN_NVAL_H(t_cfg->tx_gran_n_val));
  511. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_OV_SLEEP_CNT_TIMER, i),
  512. TX_OV_H8_ENTER_EN |
  513. TX_OV_SLEEP_CNT(t_cfg->tx_sleep_cnt));
  514. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_MIN_ACTIVATETIME, i),
  515. ufs->drv_data->uic_attr->tx_min_activatetime);
  516. }
  517. exynos_ufs_disable_ov_tm(hba);
  518. }
  519. static void exynos_ufs_config_phy_cap_attr(struct exynos_ufs *ufs)
  520. {
  521. struct ufs_hba *hba = ufs->hba;
  522. struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
  523. int i;
  524. exynos_ufs_enable_ov_tm(hba);
  525. for_each_ufs_rx_lane(ufs, i) {
  526. ufshcd_dme_set(hba,
  527. UIC_ARG_MIB_SEL(RX_HS_G1_SYNC_LENGTH_CAP, i),
  528. attr->rx_hs_g1_sync_len_cap);
  529. ufshcd_dme_set(hba,
  530. UIC_ARG_MIB_SEL(RX_HS_G2_SYNC_LENGTH_CAP, i),
  531. attr->rx_hs_g2_sync_len_cap);
  532. ufshcd_dme_set(hba,
  533. UIC_ARG_MIB_SEL(RX_HS_G3_SYNC_LENGTH_CAP, i),
  534. attr->rx_hs_g3_sync_len_cap);
  535. ufshcd_dme_set(hba,
  536. UIC_ARG_MIB_SEL(RX_HS_G1_PREP_LENGTH_CAP, i),
  537. attr->rx_hs_g1_prep_sync_len_cap);
  538. ufshcd_dme_set(hba,
  539. UIC_ARG_MIB_SEL(RX_HS_G2_PREP_LENGTH_CAP, i),
  540. attr->rx_hs_g2_prep_sync_len_cap);
  541. ufshcd_dme_set(hba,
  542. UIC_ARG_MIB_SEL(RX_HS_G3_PREP_LENGTH_CAP, i),
  543. attr->rx_hs_g3_prep_sync_len_cap);
  544. }
  545. if (attr->rx_adv_fine_gran_sup_en == 0) {
  546. for_each_ufs_rx_lane(ufs, i) {
  547. ufshcd_dme_set(hba,
  548. UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP, i), 0);
  549. if (attr->rx_min_actv_time_cap)
  550. ufshcd_dme_set(hba,
  551. UIC_ARG_MIB_SEL(
  552. RX_MIN_ACTIVATETIME_CAPABILITY, i),
  553. attr->rx_min_actv_time_cap);
  554. if (attr->rx_hibern8_time_cap)
  555. ufshcd_dme_set(hba,
  556. UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAP, i),
  557. attr->rx_hibern8_time_cap);
  558. }
  559. } else if (attr->rx_adv_fine_gran_sup_en == 1) {
  560. for_each_ufs_rx_lane(ufs, i) {
  561. if (attr->rx_adv_fine_gran_step)
  562. ufshcd_dme_set(hba,
  563. UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP,
  564. i), RX_ADV_FINE_GRAN_STEP(
  565. attr->rx_adv_fine_gran_step));
  566. if (attr->rx_adv_min_actv_time_cap)
  567. ufshcd_dme_set(hba,
  568. UIC_ARG_MIB_SEL(
  569. RX_ADV_MIN_ACTIVATETIME_CAP, i),
  570. attr->rx_adv_min_actv_time_cap);
  571. if (attr->rx_adv_hibern8_time_cap)
  572. ufshcd_dme_set(hba,
  573. UIC_ARG_MIB_SEL(RX_ADV_HIBERN8TIME_CAP,
  574. i),
  575. attr->rx_adv_hibern8_time_cap);
  576. }
  577. }
  578. exynos_ufs_disable_ov_tm(hba);
  579. }
  580. static void exynos_ufs_establish_connt(struct exynos_ufs *ufs)
  581. {
  582. struct ufs_hba *hba = ufs->hba;
  583. enum {
  584. DEV_ID = 0x00,
  585. PEER_DEV_ID = 0x01,
  586. PEER_CPORT_ID = 0x00,
  587. TRAFFIC_CLASS = 0x00,
  588. };
  589. /* allow cport attributes to be set */
  590. ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_IDLE);
  591. /* local unipro attributes */
  592. ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID), DEV_ID);
  593. ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), true);
  594. ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), PEER_DEV_ID);
  595. ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), PEER_CPORT_ID);
  596. ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), CPORT_DEF_FLAGS);
  597. ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS), TRAFFIC_CLASS);
  598. ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_CONNECTED);
  599. }
  600. static void exynos_ufs_config_smu(struct exynos_ufs *ufs)
  601. {
  602. u32 reg, val;
  603. exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val);
  604. /* make encryption disabled by default */
  605. reg = ufsp_readl(ufs, UFSPRSECURITY);
  606. ufsp_writel(ufs, reg | NSSMU, UFSPRSECURITY);
  607. ufsp_writel(ufs, 0x0, UFSPSBEGIN0);
  608. ufsp_writel(ufs, 0xffffffff, UFSPSEND0);
  609. ufsp_writel(ufs, 0xff, UFSPSLUN0);
  610. ufsp_writel(ufs, 0xf1, UFSPSCTRL0);
  611. exynos_ufs_auto_ctrl_hcc_restore(ufs, &val);
  612. }
  613. static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs,
  614. struct ufs_pa_layer_attr *pwr)
  615. {
  616. struct ufs_hba *hba = ufs->hba;
  617. u8 g = max_t(u32, pwr->gear_rx, pwr->gear_tx);
  618. u32 mask, sync_len;
  619. enum {
  620. SYNC_LEN_G1 = 80 * 1000, /* 80us */
  621. SYNC_LEN_G2 = 40 * 1000, /* 44us */
  622. SYNC_LEN_G3 = 20 * 1000, /* 20us */
  623. };
  624. int i;
  625. if (g == 1)
  626. sync_len = SYNC_LEN_G1;
  627. else if (g == 2)
  628. sync_len = SYNC_LEN_G2;
  629. else if (g == 3)
  630. sync_len = SYNC_LEN_G3;
  631. else
  632. return;
  633. mask = exynos_ufs_calc_time_cntr(ufs, sync_len);
  634. mask = (mask >> 8) & 0xff;
  635. exynos_ufs_enable_ov_tm(hba);
  636. for_each_ufs_rx_lane(ufs, i)
  637. ufshcd_dme_set(hba,
  638. UIC_ARG_MIB_SEL(RX_SYNC_MASK_LENGTH, i), mask);
  639. exynos_ufs_disable_ov_tm(hba);
  640. }
  641. static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
  642. struct ufs_pa_layer_attr *dev_max_params,
  643. struct ufs_pa_layer_attr *dev_req_params)
  644. {
  645. struct exynos_ufs *ufs = ufshcd_get_variant(hba);
  646. struct phy *generic_phy = ufs->phy;
  647. struct ufs_dev_params ufs_exynos_cap;
  648. int ret;
  649. if (!dev_req_params) {
  650. pr_err("%s: incoming dev_req_params is NULL\n", __func__);
  651. ret = -EINVAL;
  652. goto out;
  653. }
  654. ufshcd_init_pwr_dev_param(&ufs_exynos_cap);
  655. ret = ufshcd_get_pwr_dev_param(&ufs_exynos_cap,
  656. dev_max_params, dev_req_params);
  657. if (ret) {
  658. pr_err("%s: failed to determine capabilities\n", __func__);
  659. goto out;
  660. }
  661. if (ufs->drv_data->pre_pwr_change)
  662. ufs->drv_data->pre_pwr_change(ufs, dev_req_params);
  663. if (ufshcd_is_hs_mode(dev_req_params)) {
  664. exynos_ufs_config_sync_pattern_mask(ufs, dev_req_params);
  665. switch (dev_req_params->hs_rate) {
  666. case PA_HS_MODE_A:
  667. case PA_HS_MODE_B:
  668. phy_calibrate(generic_phy);
  669. break;
  670. }
  671. }
  672. /* setting for three timeout values for traffic class #0 */
  673. ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064);
  674. ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224);
  675. ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160);
  676. return 0;
  677. out:
  678. return ret;
  679. }
  680. #define PWR_MODE_STR_LEN 64
  681. static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba,
  682. struct ufs_pa_layer_attr *pwr_req)
  683. {
  684. struct exynos_ufs *ufs = ufshcd_get_variant(hba);
  685. struct phy *generic_phy = ufs->phy;
  686. int gear = max_t(u32, pwr_req->gear_rx, pwr_req->gear_tx);
  687. int lanes = max_t(u32, pwr_req->lane_rx, pwr_req->lane_tx);
  688. char pwr_str[PWR_MODE_STR_LEN] = "";
  689. /* let default be PWM Gear 1, Lane 1 */
  690. if (!gear)
  691. gear = 1;
  692. if (!lanes)
  693. lanes = 1;
  694. if (ufs->drv_data->post_pwr_change)
  695. ufs->drv_data->post_pwr_change(ufs, pwr_req);
  696. if ((ufshcd_is_hs_mode(pwr_req))) {
  697. switch (pwr_req->hs_rate) {
  698. case PA_HS_MODE_A:
  699. case PA_HS_MODE_B:
  700. phy_calibrate(generic_phy);
  701. break;
  702. }
  703. snprintf(pwr_str, PWR_MODE_STR_LEN, "%s series_%s G_%d L_%d",
  704. "FAST", pwr_req->hs_rate == PA_HS_MODE_A ? "A" : "B",
  705. gear, lanes);
  706. } else {
  707. snprintf(pwr_str, PWR_MODE_STR_LEN, "%s G_%d L_%d",
  708. "SLOW", gear, lanes);
  709. }
  710. dev_info(hba->dev, "Power mode changed to : %s\n", pwr_str);
  711. return 0;
  712. }
  713. static void exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba *hba,
  714. int tag, bool is_scsi_cmd)
  715. {
  716. struct exynos_ufs *ufs = ufshcd_get_variant(hba);
  717. u32 type;
  718. type = hci_readl(ufs, HCI_UTRL_NEXUS_TYPE);
  719. if (is_scsi_cmd)
  720. hci_writel(ufs, type | (1 << tag), HCI_UTRL_NEXUS_TYPE);
  721. else
  722. hci_writel(ufs, type & ~(1 << tag), HCI_UTRL_NEXUS_TYPE);
  723. }
  724. static void exynos_ufs_specify_nexus_t_tm_req(struct ufs_hba *hba,
  725. int tag, u8 func)
  726. {
  727. struct exynos_ufs *ufs = ufshcd_get_variant(hba);
  728. u32 type;
  729. type = hci_readl(ufs, HCI_UTMRL_NEXUS_TYPE);
  730. switch (func) {
  731. case UFS_ABORT_TASK:
  732. case UFS_QUERY_TASK:
  733. hci_writel(ufs, type | (1 << tag), HCI_UTMRL_NEXUS_TYPE);
  734. break;
  735. case UFS_ABORT_TASK_SET:
  736. case UFS_CLEAR_TASK_SET:
  737. case UFS_LOGICAL_RESET:
  738. case UFS_QUERY_TASK_SET:
  739. hci_writel(ufs, type & ~(1 << tag), HCI_UTMRL_NEXUS_TYPE);
  740. break;
  741. }
  742. }
  743. static int exynos_ufs_phy_init(struct exynos_ufs *ufs)
  744. {
  745. struct ufs_hba *hba = ufs->hba;
  746. struct phy *generic_phy = ufs->phy;
  747. int ret = 0;
  748. if (ufs->avail_ln_rx == 0 || ufs->avail_ln_tx == 0) {
  749. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES),
  750. &ufs->avail_ln_rx);
  751. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES),
  752. &ufs->avail_ln_tx);
  753. WARN(ufs->avail_ln_rx != ufs->avail_ln_tx,
  754. "available data lane is not equal(rx:%d, tx:%d)\n",
  755. ufs->avail_ln_rx, ufs->avail_ln_tx);
  756. }
  757. phy_set_bus_width(generic_phy, ufs->avail_ln_rx);
  758. ret = phy_init(generic_phy);
  759. if (ret) {
  760. dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
  761. __func__, ret);
  762. return ret;
  763. }
  764. ret = phy_power_on(generic_phy);
  765. if (ret)
  766. goto out_exit_phy;
  767. return 0;
  768. out_exit_phy:
  769. phy_exit(generic_phy);
  770. return ret;
  771. }
  772. static void exynos_ufs_config_unipro(struct exynos_ufs *ufs)
  773. {
  774. struct ufs_hba *hba = ufs->hba;
  775. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD),
  776. DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
  777. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTRAILINGCLOCKS),
  778. ufs->drv_data->uic_attr->tx_trailingclks);
  779. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE),
  780. ufs->drv_data->uic_attr->pa_dbg_option_suite);
  781. }
  782. static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index)
  783. {
  784. switch (index) {
  785. case UNIPRO_L1_5:
  786. hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_PA_LAYER);
  787. break;
  788. case UNIPRO_L2:
  789. hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DL_LAYER);
  790. break;
  791. case UNIPRO_L3:
  792. hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_N_LAYER);
  793. break;
  794. case UNIPRO_L4:
  795. hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_T_LAYER);
  796. break;
  797. case UNIPRO_DME:
  798. hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DME_LAYER);
  799. break;
  800. }
  801. }
  802. static int exynos_ufs_setup_clocks(struct ufs_hba *hba, bool on,
  803. enum ufs_notify_change_status status)
  804. {
  805. struct exynos_ufs *ufs = ufshcd_get_variant(hba);
  806. if (!ufs)
  807. return 0;
  808. if (on && status == PRE_CHANGE) {
  809. if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
  810. exynos_ufs_disable_auto_ctrl_hcc(ufs);
  811. exynos_ufs_ungate_clks(ufs);
  812. } else if (!on && status == POST_CHANGE) {
  813. exynos_ufs_gate_clks(ufs);
  814. if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
  815. exynos_ufs_enable_auto_ctrl_hcc(ufs);
  816. }
  817. return 0;
  818. }
  819. static int exynos_ufs_pre_link(struct ufs_hba *hba)
  820. {
  821. struct exynos_ufs *ufs = ufshcd_get_variant(hba);
  822. /* hci */
  823. exynos_ufs_config_intr(ufs, DFES_DEF_L2_ERRS, UNIPRO_L2);
  824. exynos_ufs_config_intr(ufs, DFES_DEF_L3_ERRS, UNIPRO_L3);
  825. exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4);
  826. exynos_ufs_set_unipro_pclk_div(ufs);
  827. /* unipro */
  828. exynos_ufs_config_unipro(ufs);
  829. /* m-phy */
  830. exynos_ufs_phy_init(ufs);
  831. if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
  832. exynos_ufs_config_phy_time_attr(ufs);
  833. exynos_ufs_config_phy_cap_attr(ufs);
  834. }
  835. exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
  836. if (ufs->drv_data->pre_link)
  837. ufs->drv_data->pre_link(ufs);
  838. return 0;
  839. }
  840. static void exynos_ufs_fit_aggr_timeout(struct exynos_ufs *ufs)
  841. {
  842. u32 val;
  843. val = exynos_ufs_calc_time_cntr(ufs, IATOVAL_NSEC / CNTR_DIV_VAL);
  844. hci_writel(ufs, val & CNT_VAL_1US_MASK, HCI_1US_TO_CNT_VAL);
  845. }
  846. static int exynos_ufs_post_link(struct ufs_hba *hba)
  847. {
  848. struct exynos_ufs *ufs = ufshcd_get_variant(hba);
  849. struct phy *generic_phy = ufs->phy;
  850. struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
  851. exynos_ufs_establish_connt(ufs);
  852. exynos_ufs_fit_aggr_timeout(ufs);
  853. hci_writel(ufs, 0xa, HCI_DATA_REORDER);
  854. hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE);
  855. hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE);
  856. hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
  857. hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
  858. hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN);
  859. if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)
  860. ufshcd_dme_set(hba,
  861. UIC_ARG_MIB(T_DBG_SKIP_INIT_HIBERN8_EXIT), true);
  862. if (attr->pa_granularity) {
  863. exynos_ufs_enable_dbg_mode(hba);
  864. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_GRANULARITY),
  865. attr->pa_granularity);
  866. exynos_ufs_disable_dbg_mode(hba);
  867. if (attr->pa_tactivate)
  868. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
  869. attr->pa_tactivate);
  870. if (attr->pa_hibern8time &&
  871. !(ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER))
  872. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
  873. attr->pa_hibern8time);
  874. }
  875. if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
  876. if (!attr->pa_granularity)
  877. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
  878. &attr->pa_granularity);
  879. if (!attr->pa_hibern8time)
  880. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
  881. &attr->pa_hibern8time);
  882. /*
  883. * not wait for HIBERN8 time to exit hibernation
  884. */
  885. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 0);
  886. if (attr->pa_granularity < 1 || attr->pa_granularity > 6) {
  887. /* Valid range for granularity: 1 ~ 6 */
  888. dev_warn(hba->dev,
  889. "%s: pa_granularity %d is invalid, assuming backwards compatibility\n",
  890. __func__,
  891. attr->pa_granularity);
  892. attr->pa_granularity = 6;
  893. }
  894. }
  895. phy_calibrate(generic_phy);
  896. if (ufs->drv_data->post_link)
  897. ufs->drv_data->post_link(ufs);
  898. return 0;
  899. }
  900. static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
  901. {
  902. struct device_node *np = dev->of_node;
  903. struct exynos_ufs_uic_attr *attr;
  904. int ret = 0;
  905. ufs->drv_data = device_get_match_data(dev);
  906. if (ufs->drv_data && ufs->drv_data->uic_attr) {
  907. attr = ufs->drv_data->uic_attr;
  908. } else {
  909. dev_err(dev, "failed to get uic attributes\n");
  910. ret = -EINVAL;
  911. goto out;
  912. }
  913. ufs->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
  914. if (IS_ERR(ufs->sysreg))
  915. ufs->sysreg = NULL;
  916. else {
  917. if (of_property_read_u32_index(np, "samsung,sysreg", 1,
  918. &ufs->shareability_reg_offset)) {
  919. dev_warn(dev, "can't get an offset from sysreg. Set to default value\n");
  920. ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET;
  921. }
  922. }
  923. ufs->pclk_avail_min = PCLK_AVAIL_MIN;
  924. ufs->pclk_avail_max = PCLK_AVAIL_MAX;
  925. attr->rx_adv_fine_gran_sup_en = RX_ADV_FINE_GRAN_SUP_EN;
  926. attr->rx_adv_fine_gran_step = RX_ADV_FINE_GRAN_STEP_VAL;
  927. attr->rx_adv_min_actv_time_cap = RX_ADV_MIN_ACTV_TIME_CAP;
  928. attr->pa_granularity = PA_GRANULARITY_VAL;
  929. attr->pa_tactivate = PA_TACTIVATE_VAL;
  930. attr->pa_hibern8time = PA_HIBERN8TIME_VAL;
  931. out:
  932. return ret;
  933. }
  934. static inline void exynos_ufs_priv_init(struct ufs_hba *hba,
  935. struct exynos_ufs *ufs)
  936. {
  937. ufs->hba = hba;
  938. ufs->opts = ufs->drv_data->opts;
  939. ufs->rx_sel_idx = PA_MAXDATALANES;
  940. if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX)
  941. ufs->rx_sel_idx = 0;
  942. hba->priv = (void *)ufs;
  943. hba->quirks = ufs->drv_data->quirks;
  944. }
  945. static int exynos_ufs_init(struct ufs_hba *hba)
  946. {
  947. struct device *dev = hba->dev;
  948. struct platform_device *pdev = to_platform_device(dev);
  949. struct exynos_ufs *ufs;
  950. int ret;
  951. ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
  952. if (!ufs)
  953. return -ENOMEM;
  954. /* exynos-specific hci */
  955. ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
  956. if (IS_ERR(ufs->reg_hci)) {
  957. dev_err(dev, "cannot ioremap for hci vendor register\n");
  958. return PTR_ERR(ufs->reg_hci);
  959. }
  960. /* unipro */
  961. ufs->reg_unipro = devm_platform_ioremap_resource_byname(pdev, "unipro");
  962. if (IS_ERR(ufs->reg_unipro)) {
  963. dev_err(dev, "cannot ioremap for unipro register\n");
  964. return PTR_ERR(ufs->reg_unipro);
  965. }
  966. /* ufs protector */
  967. ufs->reg_ufsp = devm_platform_ioremap_resource_byname(pdev, "ufsp");
  968. if (IS_ERR(ufs->reg_ufsp)) {
  969. dev_err(dev, "cannot ioremap for ufs protector register\n");
  970. return PTR_ERR(ufs->reg_ufsp);
  971. }
  972. ret = exynos_ufs_parse_dt(dev, ufs);
  973. if (ret) {
  974. dev_err(dev, "failed to get dt info.\n");
  975. goto out;
  976. }
  977. ufs->phy = devm_phy_get(dev, "ufs-phy");
  978. if (IS_ERR(ufs->phy)) {
  979. ret = PTR_ERR(ufs->phy);
  980. dev_err(dev, "failed to get ufs-phy\n");
  981. goto out;
  982. }
  983. exynos_ufs_priv_init(hba, ufs);
  984. if (ufs->drv_data->drv_init) {
  985. ret = ufs->drv_data->drv_init(dev, ufs);
  986. if (ret) {
  987. dev_err(dev, "failed to init drv-data\n");
  988. goto out;
  989. }
  990. }
  991. ret = exynos_ufs_get_clk_info(ufs);
  992. if (ret)
  993. goto out;
  994. exynos_ufs_specify_phy_time_attr(ufs);
  995. exynos_ufs_config_smu(ufs);
  996. return 0;
  997. out:
  998. hba->priv = NULL;
  999. return ret;
  1000. }
  1001. static int exynos_ufs_host_reset(struct ufs_hba *hba)
  1002. {
  1003. struct exynos_ufs *ufs = ufshcd_get_variant(hba);
  1004. unsigned long timeout = jiffies + msecs_to_jiffies(1);
  1005. u32 val;
  1006. int ret = 0;
  1007. exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val);
  1008. hci_writel(ufs, UFS_SW_RST_MASK, HCI_SW_RST);
  1009. do {
  1010. if (!(hci_readl(ufs, HCI_SW_RST) & UFS_SW_RST_MASK))
  1011. goto out;
  1012. } while (time_before(jiffies, timeout));
  1013. dev_err(hba->dev, "timeout host sw-reset\n");
  1014. ret = -ETIMEDOUT;
  1015. out:
  1016. exynos_ufs_auto_ctrl_hcc_restore(ufs, &val);
  1017. return ret;
  1018. }
  1019. static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba)
  1020. {
  1021. struct exynos_ufs *ufs = ufshcd_get_variant(hba);
  1022. hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
  1023. udelay(5);
  1024. hci_writel(ufs, 1 << 0, HCI_GPIO_OUT);
  1025. }
  1026. static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter)
  1027. {
  1028. struct exynos_ufs *ufs = ufshcd_get_variant(hba);
  1029. struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
  1030. if (!enter) {
  1031. if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
  1032. exynos_ufs_disable_auto_ctrl_hcc(ufs);
  1033. exynos_ufs_ungate_clks(ufs);
  1034. if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
  1035. static const unsigned int granularity_tbl[] = {
  1036. 1, 4, 8, 16, 32, 100
  1037. };
  1038. int h8_time = attr->pa_hibern8time *
  1039. granularity_tbl[attr->pa_granularity - 1];
  1040. unsigned long us;
  1041. s64 delta;
  1042. do {
  1043. delta = h8_time - ktime_us_delta(ktime_get(),
  1044. ufs->entry_hibern8_t);
  1045. if (delta <= 0)
  1046. break;
  1047. us = min_t(s64, delta, USEC_PER_MSEC);
  1048. if (us >= 10)
  1049. usleep_range(us, us + 10);
  1050. } while (1);
  1051. }
  1052. }
  1053. }
  1054. static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter)
  1055. {
  1056. struct exynos_ufs *ufs = ufshcd_get_variant(hba);
  1057. if (!enter) {
  1058. u32 cur_mode = 0;
  1059. u32 pwrmode;
  1060. if (ufshcd_is_hs_mode(&ufs->dev_req_params))
  1061. pwrmode = FAST_MODE;
  1062. else
  1063. pwrmode = SLOW_MODE;
  1064. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &cur_mode);
  1065. if (cur_mode != (pwrmode << 4 | pwrmode)) {
  1066. dev_warn(hba->dev, "%s: power mode change\n", __func__);
  1067. hba->pwr_info.pwr_rx = (cur_mode >> 4) & 0xf;
  1068. hba->pwr_info.pwr_tx = cur_mode & 0xf;
  1069. ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
  1070. }
  1071. if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB))
  1072. exynos_ufs_establish_connt(ufs);
  1073. } else {
  1074. ufs->entry_hibern8_t = ktime_get();
  1075. exynos_ufs_gate_clks(ufs);
  1076. if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
  1077. exynos_ufs_enable_auto_ctrl_hcc(ufs);
  1078. }
  1079. }
  1080. static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
  1081. enum ufs_notify_change_status status)
  1082. {
  1083. struct exynos_ufs *ufs = ufshcd_get_variant(hba);
  1084. int ret = 0;
  1085. switch (status) {
  1086. case PRE_CHANGE:
  1087. /*
  1088. * The maximum segment size must be set after scsi_host_alloc()
  1089. * has been called and before LUN scanning starts
  1090. * (ufshcd_async_scan()). Note: this callback may also be called
  1091. * from other functions than ufshcd_init().
  1092. */
  1093. hba->host->max_segment_size = 4096;
  1094. if (ufs->drv_data->pre_hce_enable) {
  1095. ret = ufs->drv_data->pre_hce_enable(ufs);
  1096. if (ret)
  1097. return ret;
  1098. }
  1099. ret = exynos_ufs_host_reset(hba);
  1100. if (ret)
  1101. return ret;
  1102. exynos_ufs_dev_hw_reset(hba);
  1103. break;
  1104. case POST_CHANGE:
  1105. exynos_ufs_calc_pwm_clk_div(ufs);
  1106. if (!(ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL))
  1107. exynos_ufs_enable_auto_ctrl_hcc(ufs);
  1108. if (ufs->drv_data->post_hce_enable)
  1109. ret = ufs->drv_data->post_hce_enable(ufs);
  1110. break;
  1111. }
  1112. return ret;
  1113. }
  1114. static int exynos_ufs_link_startup_notify(struct ufs_hba *hba,
  1115. enum ufs_notify_change_status status)
  1116. {
  1117. int ret = 0;
  1118. switch (status) {
  1119. case PRE_CHANGE:
  1120. ret = exynos_ufs_pre_link(hba);
  1121. break;
  1122. case POST_CHANGE:
  1123. ret = exynos_ufs_post_link(hba);
  1124. break;
  1125. }
  1126. return ret;
  1127. }
  1128. static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
  1129. enum ufs_notify_change_status status,
  1130. struct ufs_pa_layer_attr *dev_max_params,
  1131. struct ufs_pa_layer_attr *dev_req_params)
  1132. {
  1133. int ret = 0;
  1134. switch (status) {
  1135. case PRE_CHANGE:
  1136. ret = exynos_ufs_pre_pwr_mode(hba, dev_max_params,
  1137. dev_req_params);
  1138. break;
  1139. case POST_CHANGE:
  1140. ret = exynos_ufs_post_pwr_mode(hba, dev_req_params);
  1141. break;
  1142. }
  1143. return ret;
  1144. }
  1145. static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
  1146. enum uic_cmd_dme enter,
  1147. enum ufs_notify_change_status notify)
  1148. {
  1149. switch ((u8)notify) {
  1150. case PRE_CHANGE:
  1151. exynos_ufs_pre_hibern8(hba, enter);
  1152. break;
  1153. case POST_CHANGE:
  1154. exynos_ufs_post_hibern8(hba, enter);
  1155. break;
  1156. }
  1157. }
  1158. static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
  1159. enum ufs_notify_change_status status)
  1160. {
  1161. struct exynos_ufs *ufs = ufshcd_get_variant(hba);
  1162. if (status == PRE_CHANGE)
  1163. return 0;
  1164. if (!ufshcd_is_link_active(hba))
  1165. phy_power_off(ufs->phy);
  1166. return 0;
  1167. }
  1168. static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
  1169. {
  1170. struct exynos_ufs *ufs = ufshcd_get_variant(hba);
  1171. if (!ufshcd_is_link_active(hba))
  1172. phy_power_on(ufs->phy);
  1173. exynos_ufs_config_smu(ufs);
  1174. return 0;
  1175. }
  1176. static int exynosauto_ufs_vh_link_startup_notify(struct ufs_hba *hba,
  1177. enum ufs_notify_change_status status)
  1178. {
  1179. if (status == POST_CHANGE) {
  1180. ufshcd_set_link_active(hba);
  1181. ufshcd_set_ufs_dev_active(hba);
  1182. }
  1183. return 0;
  1184. }
  1185. static int exynosauto_ufs_vh_wait_ph_ready(struct ufs_hba *hba)
  1186. {
  1187. u32 mbox;
  1188. ktime_t start, stop;
  1189. start = ktime_get();
  1190. stop = ktime_add(start, ms_to_ktime(PH_READY_TIMEOUT_MS));
  1191. do {
  1192. mbox = ufshcd_readl(hba, PH2VH_MBOX);
  1193. /* TODO: Mailbox message protocols between the PH and VHs are
  1194. * not implemented yet. This will be supported later
  1195. */
  1196. if ((mbox & MH_MSG_MASK) == MH_MSG_PH_READY)
  1197. return 0;
  1198. usleep_range(40, 50);
  1199. } while (ktime_before(ktime_get(), stop));
  1200. return -ETIME;
  1201. }
  1202. static int exynosauto_ufs_vh_init(struct ufs_hba *hba)
  1203. {
  1204. struct device *dev = hba->dev;
  1205. struct platform_device *pdev = to_platform_device(dev);
  1206. struct exynos_ufs *ufs;
  1207. int ret;
  1208. ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
  1209. if (!ufs)
  1210. return -ENOMEM;
  1211. /* exynos-specific hci */
  1212. ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
  1213. if (IS_ERR(ufs->reg_hci)) {
  1214. dev_err(dev, "cannot ioremap for hci vendor register\n");
  1215. return PTR_ERR(ufs->reg_hci);
  1216. }
  1217. ret = exynosauto_ufs_vh_wait_ph_ready(hba);
  1218. if (ret)
  1219. return ret;
  1220. ufs->drv_data = device_get_match_data(dev);
  1221. if (!ufs->drv_data)
  1222. return -ENODEV;
  1223. exynos_ufs_priv_init(hba, ufs);
  1224. return 0;
  1225. }
  1226. static int fsd_ufs_pre_link(struct exynos_ufs *ufs)
  1227. {
  1228. int i;
  1229. struct ufs_hba *hba = ufs->hba;
  1230. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD),
  1231. DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
  1232. ufshcd_dme_set(hba, UIC_ARG_MIB(0x201), 0x12);
  1233. ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
  1234. for_each_ufs_tx_lane(ufs, i) {
  1235. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xAA, i),
  1236. DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
  1237. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8F, i), 0x3F);
  1238. }
  1239. for_each_ufs_rx_lane(ufs, i) {
  1240. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x12, i),
  1241. DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
  1242. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x5C, i), 0x38);
  1243. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0F, i), 0x0);
  1244. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x65, i), 0x1);
  1245. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x69, i), 0x1);
  1246. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x21, i), 0x0);
  1247. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x22, i), 0x0);
  1248. }
  1249. ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
  1250. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_AUTOMODE_THLD), 0x4E20);
  1251. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), 0x2e820183);
  1252. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
  1253. exynos_ufs_establish_connt(ufs);
  1254. return 0;
  1255. }
  1256. static int fsd_ufs_post_link(struct exynos_ufs *ufs)
  1257. {
  1258. int i;
  1259. struct ufs_hba *hba = ufs->hba;
  1260. u32 hw_cap_min_tactivate;
  1261. u32 peer_rx_min_actv_time_cap;
  1262. u32 max_rx_hibern8_time_cap;
  1263. ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0x8F, 4),
  1264. &hw_cap_min_tactivate); /* HW Capability of MIN_TACTIVATE */
  1265. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
  1266. &peer_rx_min_actv_time_cap); /* PA_TActivate */
  1267. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
  1268. &max_rx_hibern8_time_cap); /* PA_Hibern8Time */
  1269. if (peer_rx_min_actv_time_cap >= hw_cap_min_tactivate)
  1270. ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
  1271. peer_rx_min_actv_time_cap + 1);
  1272. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), max_rx_hibern8_time_cap + 1);
  1273. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x01);
  1274. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xFA);
  1275. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x00);
  1276. ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
  1277. for_each_ufs_rx_lane(ufs, i) {
  1278. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x35, i), 0x05);
  1279. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x73, i), 0x01);
  1280. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x41, i), 0x02);
  1281. ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x42, i), 0xAC);
  1282. }
  1283. ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
  1284. return 0;
  1285. }
  1286. static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs,
  1287. struct ufs_pa_layer_attr *pwr)
  1288. {
  1289. struct ufs_hba *hba = ufs->hba;
  1290. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), 0x1);
  1291. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), 0x1);
  1292. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000);
  1293. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000);
  1294. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000);
  1295. unipro_writel(ufs, 12000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0);
  1296. unipro_writel(ufs, 32000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1);
  1297. unipro_writel(ufs, 16000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2);
  1298. return 0;
  1299. }
  1300. static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
  1301. .name = "exynos_ufs",
  1302. .init = exynos_ufs_init,
  1303. .hce_enable_notify = exynos_ufs_hce_enable_notify,
  1304. .link_startup_notify = exynos_ufs_link_startup_notify,
  1305. .pwr_change_notify = exynos_ufs_pwr_change_notify,
  1306. .setup_clocks = exynos_ufs_setup_clocks,
  1307. .setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req,
  1308. .setup_task_mgmt = exynos_ufs_specify_nexus_t_tm_req,
  1309. .hibern8_notify = exynos_ufs_hibern8_notify,
  1310. .suspend = exynos_ufs_suspend,
  1311. .resume = exynos_ufs_resume,
  1312. };
  1313. static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = {
  1314. .name = "exynosauto_ufs_vh",
  1315. .init = exynosauto_ufs_vh_init,
  1316. .link_startup_notify = exynosauto_ufs_vh_link_startup_notify,
  1317. };
  1318. static int exynos_ufs_probe(struct platform_device *pdev)
  1319. {
  1320. int err;
  1321. struct device *dev = &pdev->dev;
  1322. const struct ufs_hba_variant_ops *vops = &ufs_hba_exynos_ops;
  1323. const struct exynos_ufs_drv_data *drv_data =
  1324. device_get_match_data(dev);
  1325. if (drv_data && drv_data->vops)
  1326. vops = drv_data->vops;
  1327. err = ufshcd_pltfrm_init(pdev, vops);
  1328. if (err)
  1329. dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
  1330. return err;
  1331. }
  1332. static int exynos_ufs_remove(struct platform_device *pdev)
  1333. {
  1334. struct ufs_hba *hba = platform_get_drvdata(pdev);
  1335. struct exynos_ufs *ufs = ufshcd_get_variant(hba);
  1336. pm_runtime_get_sync(&(pdev)->dev);
  1337. ufshcd_remove(hba);
  1338. phy_power_off(ufs->phy);
  1339. phy_exit(ufs->phy);
  1340. return 0;
  1341. }
  1342. static struct exynos_ufs_uic_attr exynos7_uic_attr = {
  1343. .tx_trailingclks = 0x10,
  1344. .tx_dif_p_nsec = 3000000, /* unit: ns */
  1345. .tx_dif_n_nsec = 1000000, /* unit: ns */
  1346. .tx_high_z_cnt_nsec = 20000, /* unit: ns */
  1347. .tx_base_unit_nsec = 100000, /* unit: ns */
  1348. .tx_gran_unit_nsec = 4000, /* unit: ns */
  1349. .tx_sleep_cnt = 1000, /* unit: ns */
  1350. .tx_min_activatetime = 0xa,
  1351. .rx_filler_enable = 0x2,
  1352. .rx_dif_p_nsec = 1000000, /* unit: ns */
  1353. .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
  1354. .rx_base_unit_nsec = 100000, /* unit: ns */
  1355. .rx_gran_unit_nsec = 4000, /* unit: ns */
  1356. .rx_sleep_cnt = 1280, /* unit: ns */
  1357. .rx_stall_cnt = 320, /* unit: ns */
  1358. .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
  1359. .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
  1360. .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
  1361. .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
  1362. .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
  1363. .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
  1364. .pa_dbg_option_suite = 0x30103,
  1365. };
  1366. static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
  1367. .uic_attr = &exynos7_uic_attr,
  1368. .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
  1369. UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
  1370. UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
  1371. UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
  1372. .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
  1373. EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
  1374. EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
  1375. .drv_init = exynosauto_ufs_drv_init,
  1376. .post_hce_enable = exynosauto_ufs_post_hce_enable,
  1377. .pre_link = exynosauto_ufs_pre_link,
  1378. .pre_pwr_change = exynosauto_ufs_pre_pwr_change,
  1379. .post_pwr_change = exynosauto_ufs_post_pwr_change,
  1380. };
  1381. static const struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
  1382. .vops = &ufs_hba_exynosauto_vh_ops,
  1383. .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
  1384. UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
  1385. UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
  1386. UFSHCI_QUIRK_BROKEN_HCE |
  1387. UFSHCD_QUIRK_BROKEN_UIC_CMD |
  1388. UFSHCD_QUIRK_SKIP_PH_CONFIGURATION |
  1389. UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
  1390. .opts = EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
  1391. };
  1392. static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
  1393. .uic_attr = &exynos7_uic_attr,
  1394. .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
  1395. UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
  1396. UFSHCI_QUIRK_BROKEN_HCE |
  1397. UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
  1398. UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
  1399. UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
  1400. UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
  1401. UFSHCD_QUIRK_4KB_DMA_ALIGNMENT,
  1402. .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
  1403. EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
  1404. EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
  1405. EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB |
  1406. EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER,
  1407. .drv_init = exynos7_ufs_drv_init,
  1408. .pre_link = exynos7_ufs_pre_link,
  1409. .post_link = exynos7_ufs_post_link,
  1410. .pre_pwr_change = exynos7_ufs_pre_pwr_change,
  1411. .post_pwr_change = exynos7_ufs_post_pwr_change,
  1412. };
  1413. static struct exynos_ufs_uic_attr fsd_uic_attr = {
  1414. .tx_trailingclks = 0x10,
  1415. .tx_dif_p_nsec = 3000000, /* unit: ns */
  1416. .tx_dif_n_nsec = 1000000, /* unit: ns */
  1417. .tx_high_z_cnt_nsec = 20000, /* unit: ns */
  1418. .tx_base_unit_nsec = 100000, /* unit: ns */
  1419. .tx_gran_unit_nsec = 4000, /* unit: ns */
  1420. .tx_sleep_cnt = 1000, /* unit: ns */
  1421. .tx_min_activatetime = 0xa,
  1422. .rx_filler_enable = 0x2,
  1423. .rx_dif_p_nsec = 1000000, /* unit: ns */
  1424. .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
  1425. .rx_base_unit_nsec = 100000, /* unit: ns */
  1426. .rx_gran_unit_nsec = 4000, /* unit: ns */
  1427. .rx_sleep_cnt = 1280, /* unit: ns */
  1428. .rx_stall_cnt = 320, /* unit: ns */
  1429. .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
  1430. .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
  1431. .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
  1432. .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
  1433. .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
  1434. .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
  1435. .pa_dbg_option_suite = 0x2E820183,
  1436. };
  1437. static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
  1438. .uic_attr = &fsd_uic_attr,
  1439. .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
  1440. UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
  1441. UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
  1442. UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
  1443. UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR,
  1444. .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
  1445. EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
  1446. EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
  1447. EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
  1448. .pre_link = fsd_ufs_pre_link,
  1449. .post_link = fsd_ufs_post_link,
  1450. .pre_pwr_change = fsd_ufs_pre_pwr_change,
  1451. };
  1452. static const struct of_device_id exynos_ufs_of_match[] = {
  1453. { .compatible = "samsung,exynos7-ufs",
  1454. .data = &exynos_ufs_drvs },
  1455. { .compatible = "samsung,exynosautov9-ufs",
  1456. .data = &exynosauto_ufs_drvs },
  1457. { .compatible = "samsung,exynosautov9-ufs-vh",
  1458. .data = &exynosauto_ufs_vh_drvs },
  1459. { .compatible = "tesla,fsd-ufs",
  1460. .data = &fsd_ufs_drvs },
  1461. {},
  1462. };
  1463. static const struct dev_pm_ops exynos_ufs_pm_ops = {
  1464. SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
  1465. SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
  1466. .prepare = ufshcd_suspend_prepare,
  1467. .complete = ufshcd_resume_complete,
  1468. };
  1469. static struct platform_driver exynos_ufs_pltform = {
  1470. .probe = exynos_ufs_probe,
  1471. .remove = exynos_ufs_remove,
  1472. .shutdown = ufshcd_pltfrm_shutdown,
  1473. .driver = {
  1474. .name = "exynos-ufshc",
  1475. .pm = &exynos_ufs_pm_ops,
  1476. .of_match_table = of_match_ptr(exynos_ufs_of_match),
  1477. },
  1478. };
  1479. module_platform_driver(exynos_ufs_pltform);
  1480. MODULE_AUTHOR("Alim Akhtar <[email protected]>");
  1481. MODULE_AUTHOR("Seungwon Jeon <[email protected]>");
  1482. MODULE_DESCRIPTION("Exynos UFS HCI Driver");
  1483. MODULE_LICENSE("GPL v2");