ufshcd.h 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Universal Flash Storage Host controller driver
  4. * Copyright (C) 2011-2013 Samsung India Software Operations
  5. * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
  6. *
  7. * Authors:
  8. * Santosh Yaraganavi <[email protected]>
  9. * Vinayak Holikatti <[email protected]>
  10. */
  11. #ifndef _UFSHCD_H
  12. #define _UFSHCD_H
  13. #include <linux/bitfield.h>
  14. #include <linux/blk-crypto-profile.h>
  15. #include <linux/blk-mq.h>
  16. #include <linux/devfreq.h>
  17. #include <linux/msi.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/dma-direction.h>
  20. #include <scsi/scsi_device.h>
  21. #include <ufs/unipro.h>
  22. #include <ufs/ufs.h>
  23. #include <ufs/ufs_quirks.h>
  24. #include <ufs/ufshci.h>
  25. #define UFSHCD "ufshcd"
  26. struct ufs_hba;
  27. enum dev_cmd_type {
  28. DEV_CMD_TYPE_NOP = 0x0,
  29. DEV_CMD_TYPE_QUERY = 0x1,
  30. DEV_CMD_TYPE_RPMB = 0x2,
  31. };
  32. enum ufs_event_type {
  33. /* uic specific errors */
  34. UFS_EVT_PA_ERR = 0,
  35. UFS_EVT_DL_ERR,
  36. UFS_EVT_NL_ERR,
  37. UFS_EVT_TL_ERR,
  38. UFS_EVT_DME_ERR,
  39. /* fatal errors */
  40. UFS_EVT_AUTO_HIBERN8_ERR,
  41. UFS_EVT_FATAL_ERR,
  42. UFS_EVT_LINK_STARTUP_FAIL,
  43. UFS_EVT_RESUME_ERR,
  44. UFS_EVT_SUSPEND_ERR,
  45. UFS_EVT_WL_SUSP_ERR,
  46. UFS_EVT_WL_RES_ERR,
  47. /* abnormal events */
  48. UFS_EVT_DEV_RESET,
  49. UFS_EVT_HOST_RESET,
  50. UFS_EVT_ABORT,
  51. UFS_EVT_CNT,
  52. };
  53. /**
  54. * struct uic_command - UIC command structure
  55. * @command: UIC command
  56. * @argument1: UIC command argument 1
  57. * @argument2: UIC command argument 2
  58. * @argument3: UIC command argument 3
  59. * @cmd_active: Indicate if UIC command is outstanding
  60. * @done: UIC command completion
  61. */
  62. struct uic_command {
  63. u32 command;
  64. u32 argument1;
  65. u32 argument2;
  66. u32 argument3;
  67. int cmd_active;
  68. struct completion done;
  69. };
  70. /* Used to differentiate the power management options */
  71. enum ufs_pm_op {
  72. UFS_RUNTIME_PM,
  73. UFS_SYSTEM_PM,
  74. UFS_SHUTDOWN_PM,
  75. };
  76. /* Host <-> Device UniPro Link state */
  77. enum uic_link_state {
  78. UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
  79. UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
  80. UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
  81. UIC_LINK_BROKEN_STATE = 3, /* Link is in broken state */
  82. };
  83. #define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
  84. #define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
  85. UIC_LINK_ACTIVE_STATE)
  86. #define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
  87. UIC_LINK_HIBERN8_STATE)
  88. #define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \
  89. UIC_LINK_BROKEN_STATE)
  90. #define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
  91. #define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
  92. UIC_LINK_ACTIVE_STATE)
  93. #define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
  94. UIC_LINK_HIBERN8_STATE)
  95. #define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \
  96. UIC_LINK_BROKEN_STATE)
  97. #define ufshcd_set_ufs_dev_active(h) \
  98. ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
  99. #define ufshcd_set_ufs_dev_sleep(h) \
  100. ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
  101. #define ufshcd_set_ufs_dev_poweroff(h) \
  102. ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
  103. #define ufshcd_set_ufs_dev_deepsleep(h) \
  104. ((h)->curr_dev_pwr_mode = UFS_DEEPSLEEP_PWR_MODE)
  105. #define ufshcd_is_ufs_dev_active(h) \
  106. ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
  107. #define ufshcd_is_ufs_dev_sleep(h) \
  108. ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
  109. #define ufshcd_is_ufs_dev_poweroff(h) \
  110. ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
  111. #define ufshcd_is_ufs_dev_deepsleep(h) \
  112. ((h)->curr_dev_pwr_mode == UFS_DEEPSLEEP_PWR_MODE)
  113. /*
  114. * UFS Power management levels.
  115. * Each level is in increasing order of power savings, except DeepSleep
  116. * which is lower than PowerDown with power on but not PowerDown with
  117. * power off.
  118. */
  119. enum ufs_pm_level {
  120. UFS_PM_LVL_0,
  121. UFS_PM_LVL_1,
  122. UFS_PM_LVL_2,
  123. UFS_PM_LVL_3,
  124. UFS_PM_LVL_4,
  125. UFS_PM_LVL_5,
  126. UFS_PM_LVL_6,
  127. UFS_PM_LVL_MAX
  128. };
  129. struct ufs_pm_lvl_states {
  130. enum ufs_dev_pwr_mode dev_state;
  131. enum uic_link_state link_state;
  132. };
  133. /**
  134. * struct ufshcd_lrb - local reference block
  135. * @utr_descriptor_ptr: UTRD address of the command
  136. * @ucd_req_ptr: UCD address of the command
  137. * @ucd_rsp_ptr: Response UPIU address for this command
  138. * @ucd_prdt_ptr: PRDT address of the command
  139. * @utrd_dma_addr: UTRD dma address for debug
  140. * @ucd_prdt_dma_addr: PRDT dma address for debug
  141. * @ucd_rsp_dma_addr: UPIU response dma address for debug
  142. * @ucd_req_dma_addr: UPIU request dma address for debug
  143. * @cmd: pointer to SCSI command
  144. * @scsi_status: SCSI status of the command
  145. * @command_type: SCSI, UFS, Query.
  146. * @task_tag: Task tag of the command
  147. * @lun: LUN of the command
  148. * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
  149. * @issue_time_stamp: time stamp for debug purposes (CLOCK_MONOTONIC)
  150. * @issue_time_stamp_local_clock: time stamp for debug purposes (local_clock)
  151. * @compl_time_stamp: time stamp for statistics (CLOCK_MONOTONIC)
  152. * @compl_time_stamp_local_clock: time stamp for debug purposes (local_clock)
  153. * @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
  154. * @data_unit_num: the data unit number for the first block for inline crypto
  155. * @req_abort_skip: skip request abort task flag
  156. */
  157. struct ufshcd_lrb {
  158. struct utp_transfer_req_desc *utr_descriptor_ptr;
  159. struct utp_upiu_req *ucd_req_ptr;
  160. struct utp_upiu_rsp *ucd_rsp_ptr;
  161. struct ufshcd_sg_entry *ucd_prdt_ptr;
  162. dma_addr_t utrd_dma_addr;
  163. dma_addr_t ucd_req_dma_addr;
  164. dma_addr_t ucd_rsp_dma_addr;
  165. dma_addr_t ucd_prdt_dma_addr;
  166. struct scsi_cmnd *cmd;
  167. int scsi_status;
  168. int command_type;
  169. int task_tag;
  170. u8 lun; /* UPIU LUN id field is only 8-bit wide */
  171. bool intr_cmd;
  172. ktime_t issue_time_stamp;
  173. u64 issue_time_stamp_local_clock;
  174. ktime_t compl_time_stamp;
  175. u64 compl_time_stamp_local_clock;
  176. #ifdef CONFIG_SCSI_UFS_CRYPTO
  177. int crypto_key_slot;
  178. u64 data_unit_num;
  179. #endif
  180. bool req_abort_skip;
  181. ANDROID_KABI_RESERVE(1);
  182. };
  183. /**
  184. * struct ufs_query - holds relevant data structures for query request
  185. * @request: request upiu and function
  186. * @descriptor: buffer for sending/receiving descriptor
  187. * @response: response upiu and response
  188. */
  189. struct ufs_query {
  190. struct ufs_query_req request;
  191. u8 *descriptor;
  192. struct ufs_query_res response;
  193. };
  194. /**
  195. * struct ufs_dev_cmd - all assosiated fields with device management commands
  196. * @type: device management command type - Query, NOP OUT
  197. * @lock: lock to allow one command at a time
  198. * @complete: internal commands completion
  199. * @query: Device management query information
  200. */
  201. struct ufs_dev_cmd {
  202. enum dev_cmd_type type;
  203. struct mutex lock;
  204. struct completion *complete;
  205. struct ufs_query query;
  206. struct cq_entry *cqe;
  207. };
  208. /**
  209. * struct ufs_clk_info - UFS clock related info
  210. * @list: list headed by hba->clk_list_head
  211. * @clk: clock node
  212. * @name: clock name
  213. * @max_freq: maximum frequency supported by the clock
  214. * @min_freq: min frequency that can be used for clock scaling
  215. * @curr_freq: indicates the current frequency that it is set to
  216. * @keep_link_active: indicates that the clk should not be disabled if
  217. * link is active
  218. * @enabled: variable to check against multiple enable/disable
  219. */
  220. struct ufs_clk_info {
  221. struct list_head list;
  222. struct clk *clk;
  223. const char *name;
  224. u32 max_freq;
  225. u32 min_freq;
  226. u32 curr_freq;
  227. bool keep_link_active;
  228. bool enabled;
  229. };
  230. enum ufs_notify_change_status {
  231. PRE_CHANGE,
  232. POST_CHANGE,
  233. };
  234. struct ufs_pa_layer_attr {
  235. u32 gear_rx;
  236. u32 gear_tx;
  237. u32 lane_rx;
  238. u32 lane_tx;
  239. u32 pwr_rx;
  240. u32 pwr_tx;
  241. u32 hs_rate;
  242. };
  243. struct ufs_pwr_mode_info {
  244. bool is_valid;
  245. struct ufs_pa_layer_attr info;
  246. };
  247. /**
  248. * struct ufs_hba_variant_ops - variant specific callbacks
  249. * @name: variant name
  250. * @init: called when the driver is initialized
  251. * @exit: called to cleanup everything done in init
  252. * @get_ufs_hci_version: called to get UFS HCI version
  253. * @clk_scale_notify: notifies that clks are scaled up/down
  254. * @setup_clocks: called before touching any of the controller registers
  255. * @hce_enable_notify: called before and after HCE enable bit is set to allow
  256. * variant specific Uni-Pro initialization.
  257. * @link_startup_notify: called before and after Link startup is carried out
  258. * to allow variant specific Uni-Pro initialization.
  259. * @pwr_change_notify: called before and after a power mode change
  260. * is carried out to allow vendor spesific capabilities
  261. * to be set.
  262. * @setup_xfer_req: called before any transfer request is issued
  263. * to set some things
  264. * @setup_task_mgmt: called before any task management request is issued
  265. * to set some things
  266. * @hibern8_notify: called around hibern8 enter/exit
  267. * @apply_dev_quirks: called to apply device specific quirks
  268. * @fixup_dev_quirks: called to modify device specific quirks
  269. * @suspend: called during host controller PM callback
  270. * @resume: called during host controller PM callback
  271. * @dbg_register_dump: used to dump controller debug information
  272. * @phy_initialization: used to initialize phys
  273. * @device_reset: called to issue a reset pulse on the UFS device
  274. * @config_scaling_param: called to configure clock scaling parameters
  275. * @program_key: program or evict an inline encryption key
  276. * @event_notify: called to notify important events
  277. * @reinit_notify: called to notify reinit of UFSHCD during max gear switch
  278. * @mcq_config_resource: called to configure MCQ platform resources
  279. * @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode
  280. * @op_runtime_config: called to config Operation and runtime regs Pointers
  281. * @get_outstanding_cqs: called to get outstanding completion queues
  282. * @config_esi: called to config Event Specific Interrupt
  283. */
  284. struct ufs_hba_variant_ops {
  285. const char *name;
  286. int (*init)(struct ufs_hba *);
  287. void (*exit)(struct ufs_hba *);
  288. u32 (*get_ufs_hci_version)(struct ufs_hba *);
  289. int (*clk_scale_notify)(struct ufs_hba *, bool,
  290. enum ufs_notify_change_status);
  291. int (*setup_clocks)(struct ufs_hba *, bool,
  292. enum ufs_notify_change_status);
  293. int (*hce_enable_notify)(struct ufs_hba *,
  294. enum ufs_notify_change_status);
  295. int (*link_startup_notify)(struct ufs_hba *,
  296. enum ufs_notify_change_status);
  297. int (*pwr_change_notify)(struct ufs_hba *,
  298. enum ufs_notify_change_status status,
  299. struct ufs_pa_layer_attr *,
  300. struct ufs_pa_layer_attr *);
  301. void (*setup_xfer_req)(struct ufs_hba *hba, int tag,
  302. bool is_scsi_cmd);
  303. void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
  304. void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
  305. enum ufs_notify_change_status);
  306. int (*apply_dev_quirks)(struct ufs_hba *hba);
  307. void (*fixup_dev_quirks)(struct ufs_hba *hba);
  308. int (*suspend)(struct ufs_hba *, enum ufs_pm_op,
  309. enum ufs_notify_change_status);
  310. int (*resume)(struct ufs_hba *, enum ufs_pm_op);
  311. void (*dbg_register_dump)(struct ufs_hba *hba);
  312. int (*phy_initialization)(struct ufs_hba *);
  313. int (*device_reset)(struct ufs_hba *hba);
  314. void (*config_scaling_param)(struct ufs_hba *hba,
  315. struct devfreq_dev_profile *profile,
  316. struct devfreq_simple_ondemand_data *data);
  317. int (*program_key)(struct ufs_hba *hba,
  318. const union ufs_crypto_cfg_entry *cfg, int slot);
  319. void (*event_notify)(struct ufs_hba *hba,
  320. enum ufs_event_type evt, void *data);
  321. void (*reinit_notify)(struct ufs_hba *);
  322. int (*mcq_config_resource)(struct ufs_hba *hba);
  323. int (*get_hba_mac)(struct ufs_hba *hba);
  324. int (*op_runtime_config)(struct ufs_hba *hba);
  325. int (*get_outstanding_cqs)(struct ufs_hba *hba,
  326. unsigned long *ocqs);
  327. int (*config_esi)(struct ufs_hba *hba);
  328. };
  329. /* clock gating state */
  330. enum clk_gating_state {
  331. CLKS_OFF,
  332. CLKS_ON,
  333. REQ_CLKS_OFF,
  334. REQ_CLKS_ON,
  335. };
  336. /**
  337. * struct ufs_clk_gating - UFS clock gating related info
  338. * @gate_work: worker to turn off clocks after some delay as specified in
  339. * delay_ms
  340. * @ungate_work: worker to turn on clocks that will be used in case of
  341. * interrupt context
  342. * @state: the current clocks state
  343. * @delay_ms: gating delay in ms
  344. * @is_suspended: clk gating is suspended when set to 1 which can be used
  345. * during suspend/resume
  346. * @delay_attr: sysfs attribute to control delay_attr
  347. * @enable_attr: sysfs attribute to enable/disable clock gating
  348. * @is_enabled: Indicates the current status of clock gating
  349. * @is_initialized: Indicates whether clock gating is initialized or not
  350. * @active_reqs: number of requests that are pending and should be waited for
  351. * completion before gating clocks.
  352. * @clk_gating_workq: workqueue for clock gating work.
  353. */
  354. struct ufs_clk_gating {
  355. struct delayed_work gate_work;
  356. struct work_struct ungate_work;
  357. enum clk_gating_state state;
  358. unsigned long delay_ms;
  359. bool is_suspended;
  360. struct device_attribute delay_attr;
  361. struct device_attribute enable_attr;
  362. bool is_enabled;
  363. bool is_initialized;
  364. int active_reqs;
  365. struct workqueue_struct *clk_gating_workq;
  366. ANDROID_KABI_RESERVE(1);
  367. };
  368. struct ufs_saved_pwr_info {
  369. struct ufs_pa_layer_attr info;
  370. bool is_valid;
  371. };
  372. /**
  373. * struct ufs_clk_scaling - UFS clock scaling related data
  374. * @active_reqs: number of requests that are pending. If this is zero when
  375. * devfreq ->target() function is called then schedule "suspend_work" to
  376. * suspend devfreq.
  377. * @tot_busy_t: Total busy time in current polling window
  378. * @window_start_t: Start time (in jiffies) of the current polling window
  379. * @busy_start_t: Start time of current busy period
  380. * @enable_attr: sysfs attribute to enable/disable clock scaling
  381. * @saved_pwr_info: UFS power mode may also be changed during scaling and this
  382. * one keeps track of previous power mode.
  383. * @workq: workqueue to schedule devfreq suspend/resume work
  384. * @suspend_work: worker to suspend devfreq
  385. * @resume_work: worker to resume devfreq
  386. * @min_gear: lowest HS gear to scale down to
  387. * @is_enabled: tracks if scaling is currently enabled or not, controlled by
  388. * clkscale_enable sysfs node
  389. * @is_allowed: tracks if scaling is currently allowed or not, used to block
  390. * clock scaling which is not invoked from devfreq governor
  391. * @is_initialized: Indicates whether clock scaling is initialized or not
  392. * @is_busy_started: tracks if busy period has started or not
  393. * @is_suspended: tracks if devfreq is suspended or not
  394. */
  395. struct ufs_clk_scaling {
  396. int active_reqs;
  397. unsigned long tot_busy_t;
  398. ktime_t window_start_t;
  399. ktime_t busy_start_t;
  400. struct device_attribute enable_attr;
  401. struct ufs_saved_pwr_info saved_pwr_info;
  402. struct workqueue_struct *workq;
  403. struct work_struct suspend_work;
  404. struct work_struct resume_work;
  405. u32 min_gear;
  406. bool is_enabled;
  407. bool is_allowed;
  408. bool is_initialized;
  409. bool is_busy_started;
  410. bool is_suspended;
  411. ANDROID_KABI_RESERVE(1);
  412. };
  413. #define UFS_EVENT_HIST_LENGTH 8
  414. /**
  415. * struct ufs_event_hist - keeps history of errors
  416. * @pos: index to indicate cyclic buffer position
  417. * @val: cyclic buffer for registers value
  418. * @tstamp: cyclic buffer for time stamp
  419. * @cnt: error counter
  420. */
  421. struct ufs_event_hist {
  422. int pos;
  423. u32 val[UFS_EVENT_HIST_LENGTH];
  424. u64 tstamp[UFS_EVENT_HIST_LENGTH];
  425. unsigned long long cnt;
  426. };
  427. /**
  428. * struct ufs_stats - keeps usage/err statistics
  429. * @last_intr_status: record the last interrupt status.
  430. * @last_intr_ts: record the last interrupt timestamp.
  431. * @hibern8_exit_cnt: Counter to keep track of number of exits,
  432. * reset this after link-startup.
  433. * @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
  434. * Clear after the first successful command completion.
  435. * @event: array with event history.
  436. */
  437. struct ufs_stats {
  438. u32 last_intr_status;
  439. u64 last_intr_ts;
  440. u32 hibern8_exit_cnt;
  441. u64 last_hibern8_exit_tstamp;
  442. struct ufs_event_hist event[UFS_EVT_CNT];
  443. };
  444. /**
  445. * enum ufshcd_state - UFS host controller state
  446. * @UFSHCD_STATE_RESET: Link is not operational. Postpone SCSI command
  447. * processing.
  448. * @UFSHCD_STATE_OPERATIONAL: The host controller is operational and can process
  449. * SCSI commands.
  450. * @UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: The error handler has been scheduled.
  451. * SCSI commands may be submitted to the controller.
  452. * @UFSHCD_STATE_EH_SCHEDULED_FATAL: The error handler has been scheduled. Fail
  453. * newly submitted SCSI commands with error code DID_BAD_TARGET.
  454. * @UFSHCD_STATE_ERROR: An unrecoverable error occurred, e.g. link recovery
  455. * failed. Fail all SCSI commands with error code DID_ERROR.
  456. */
  457. enum ufshcd_state {
  458. UFSHCD_STATE_RESET,
  459. UFSHCD_STATE_OPERATIONAL,
  460. UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
  461. UFSHCD_STATE_EH_SCHEDULED_FATAL,
  462. UFSHCD_STATE_ERROR,
  463. };
  464. enum ufshcd_quirks {
  465. /* Interrupt aggregation support is broken */
  466. UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
  467. /*
  468. * delay before each dme command is required as the unipro
  469. * layer has shown instabilities
  470. */
  471. UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1,
  472. /*
  473. * If UFS host controller is having issue in processing LCC (Line
  474. * Control Command) coming from device then enable this quirk.
  475. * When this quirk is enabled, host controller driver should disable
  476. * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
  477. * attribute of device to 0).
  478. */
  479. UFSHCD_QUIRK_BROKEN_LCC = 1 << 2,
  480. /*
  481. * The attribute PA_RXHSUNTERMCAP specifies whether or not the
  482. * inbound Link supports unterminated line in HS mode. Setting this
  483. * attribute to 1 fixes moving to HS gear.
  484. */
  485. UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3,
  486. /*
  487. * This quirk needs to be enabled if the host controller only allows
  488. * accessing the peer dme attributes in AUTO mode (FAST AUTO or
  489. * SLOW AUTO).
  490. */
  491. UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4,
  492. /*
  493. * This quirk needs to be enabled if the host controller doesn't
  494. * advertise the correct version in UFS_VER register. If this quirk
  495. * is enabled, standard UFS host driver will call the vendor specific
  496. * ops (get_ufs_hci_version) to get the correct version.
  497. */
  498. UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
  499. /*
  500. * Clear handling for transfer/task request list is just opposite.
  501. */
  502. UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6,
  503. /*
  504. * This quirk needs to be enabled if host controller doesn't allow
  505. * that the interrupt aggregation timer and counter are reset by s/w.
  506. */
  507. UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7,
  508. /*
  509. * This quirks needs to be enabled if host controller cannot be
  510. * enabled via HCE register.
  511. */
  512. UFSHCI_QUIRK_BROKEN_HCE = 1 << 8,
  513. /*
  514. * This quirk needs to be enabled if the host controller regards
  515. * resolution of the values of PRDTO and PRDTL in UTRD as byte.
  516. */
  517. UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9,
  518. /*
  519. * This quirk needs to be enabled if the host controller reports
  520. * OCS FATAL ERROR with device error through sense data
  521. */
  522. UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
  523. /*
  524. * This quirk needs to be enabled if the host controller has
  525. * auto-hibernate capability but it doesn't work.
  526. */
  527. UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
  528. /*
  529. * This quirk needs to disable manual flush for write booster
  530. */
  531. UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
  532. /*
  533. * This quirk needs to disable unipro timeout values
  534. * before power mode change
  535. */
  536. UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
  537. /*
  538. * Align DMA SG entries on a 4 KiB boundary.
  539. */
  540. UFSHCD_QUIRK_4KB_DMA_ALIGNMENT = 1 << 14,
  541. /*
  542. * This quirk needs to be enabled if the host controller does not
  543. * support UIC command
  544. */
  545. UFSHCD_QUIRK_BROKEN_UIC_CMD = 1 << 15,
  546. /*
  547. * This quirk needs to be enabled if the host controller cannot
  548. * support physical host configuration.
  549. */
  550. UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16,
  551. /*
  552. * This quirk needs to be enabled if the host controller has
  553. * 64-bit addressing supported capability but it doesn't work.
  554. */
  555. UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS = 1 << 17,
  556. /*
  557. * This quirk needs to be enabled if the host controller has
  558. * auto-hibernate capability but it's FASTAUTO only.
  559. */
  560. UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18,
  561. /*
  562. * Some host raises interrupt (per queue) in addition to
  563. * CQES (traditional) when ESI is disabled.
  564. * Enable this quirk will disable CQES and use per queue interrupt.
  565. */
  566. UFSHCD_QUIRK_MCQ_BROKEN_INTR = 1 << 20,
  567. /*
  568. * Some host does not implement SQ Run Time Command (SQRTC) register
  569. * thus need this quirk to skip related flow.
  570. */
  571. UFSHCD_QUIRK_MCQ_BROKEN_RTC = 1 << 21,
  572. };
  573. enum ufshcd_android_quirks {
  574. /*
  575. * IMPORTANT: set this in hba->android_quirks, not hba->quirks!
  576. *
  577. * This quirk needs to be enabled if the host controller supports inline
  578. * encryption, but it needs to initialize the crypto capabilities in a
  579. * nonstandard way and/or it needs to override blk_crypto_ll_ops. If
  580. * enabled, the standard code won't initialize the blk_crypto_profile;
  581. * ufs_hba_variant_ops::init() must do it instead.
  582. */
  583. UFSHCD_ANDROID_QUIRK_CUSTOM_CRYPTO_PROFILE = 1 << 0,
  584. /*
  585. * IMPORTANT: set this in hba->android_quirks, not hba->quirks!
  586. *
  587. * This quirk needs to be enabled if the host controller supports inline
  588. * encryption, but the CRYPTO_GENERAL_ENABLE bit is not implemented and
  589. * breaks the HCE sequence if used.
  590. */
  591. UFSHCD_ANDROID_QUIRK_BROKEN_CRYPTO_ENABLE = 1 << 1,
  592. /*
  593. * IMPORTANT: set this in hba->android_quirks, not hba->quirks!
  594. *
  595. * This quirk needs to be enabled if the host controller requires that
  596. * the PRDT be cleared after each encrypted request because encryption
  597. * keys were stored in it.
  598. */
  599. UFSHCD_ANDROID_QUIRK_KEYS_IN_PRDT = 1 << 2,
  600. };
  601. enum ufshcd_caps {
  602. /* Allow dynamic clk gating */
  603. UFSHCD_CAP_CLK_GATING = 1 << 0,
  604. /* Allow hiberb8 with clk gating */
  605. UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1,
  606. /* Allow dynamic clk scaling */
  607. UFSHCD_CAP_CLK_SCALING = 1 << 2,
  608. /* Allow auto bkops to enabled during runtime suspend */
  609. UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3,
  610. /*
  611. * This capability allows host controller driver to use the UFS HCI's
  612. * interrupt aggregation capability.
  613. * CAUTION: Enabling this might reduce overall UFS throughput.
  614. */
  615. UFSHCD_CAP_INTR_AGGR = 1 << 4,
  616. /*
  617. * This capability allows the device auto-bkops to be always enabled
  618. * except during suspend (both runtime and suspend).
  619. * Enabling this capability means that device will always be allowed
  620. * to do background operation when it's active but it might degrade
  621. * the performance of ongoing read/write operations.
  622. */
  623. UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5,
  624. /*
  625. * This capability allows host controller driver to automatically
  626. * enable runtime power management by itself instead of waiting
  627. * for userspace to control the power management.
  628. */
  629. UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
  630. /*
  631. * This capability allows the host controller driver to turn-on
  632. * WriteBooster, if the underlying device supports it and is
  633. * provisioned to be used. This would increase the write performance.
  634. */
  635. UFSHCD_CAP_WB_EN = 1 << 7,
  636. /*
  637. * This capability allows the host controller driver to use the
  638. * inline crypto engine, if it is present
  639. */
  640. UFSHCD_CAP_CRYPTO = 1 << 8,
  641. /*
  642. * This capability allows the controller regulators to be put into
  643. * lpm mode aggressively during clock gating.
  644. * This would increase power savings.
  645. */
  646. UFSHCD_CAP_AGGR_POWER_COLLAPSE = 1 << 9,
  647. /*
  648. * This capability allows the host controller driver to use DeepSleep,
  649. * if it is supported by the UFS device. The host controller driver must
  650. * support device hardware reset via the hba->device_reset() callback,
  651. * in order to exit DeepSleep state.
  652. */
  653. UFSHCD_CAP_DEEPSLEEP = 1 << 10,
  654. /*
  655. * This capability allows the host controller driver to use temperature
  656. * notification if it is supported by the UFS device.
  657. */
  658. UFSHCD_CAP_TEMP_NOTIF = 1 << 11,
  659. /*
  660. * Enable WriteBooster when scaling up the clock and disable
  661. * WriteBooster when scaling the clock down.
  662. */
  663. UFSHCD_CAP_WB_WITH_CLK_SCALING = 1 << 12,
  664. };
  665. struct ufs_hba_variant_params {
  666. struct devfreq_dev_profile devfreq_profile;
  667. struct devfreq_simple_ondemand_data ondemand_data;
  668. u16 hba_enable_delay_us;
  669. u32 wb_flush_threshold;
  670. };
  671. #ifdef CONFIG_SCSI_UFS_HPB
  672. /**
  673. * struct ufshpb_dev_info - UFSHPB device related info
  674. * @num_lu: the number of user logical unit to check whether all lu finished
  675. * initialization
  676. * @rgn_size: device reported HPB region size
  677. * @srgn_size: device reported HPB sub-region size
  678. * @slave_conf_cnt: counter to check all lu finished initialization
  679. * @hpb_disabled: flag to check if HPB is disabled
  680. * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value
  681. * @is_legacy: flag to check HPB 1.0
  682. * @control_mode: either host or device
  683. */
  684. struct ufshpb_dev_info {
  685. int num_lu;
  686. int rgn_size;
  687. int srgn_size;
  688. atomic_t slave_conf_cnt;
  689. bool hpb_disabled;
  690. u8 max_hpb_single_cmd;
  691. bool is_legacy;
  692. u8 control_mode;
  693. };
  694. #endif
  695. struct ufs_hba_monitor {
  696. unsigned long chunk_size;
  697. unsigned long nr_sec_rw[2];
  698. ktime_t total_busy[2];
  699. unsigned long nr_req[2];
  700. /* latencies*/
  701. ktime_t lat_sum[2];
  702. ktime_t lat_max[2];
  703. ktime_t lat_min[2];
  704. u32 nr_queued[2];
  705. ktime_t busy_start_ts[2];
  706. ktime_t enabled_ts;
  707. bool enabled;
  708. };
  709. /**
  710. * struct ufshcd_res_info_t - MCQ related resource regions
  711. *
  712. * @name: resource name
  713. * @resource: pointer to resource region
  714. * @base: register base address
  715. */
  716. struct ufshcd_res_info {
  717. const char *name;
  718. struct resource *resource;
  719. void __iomem *base;
  720. };
  721. enum ufshcd_res {
  722. RES_UFS,
  723. RES_MCQ,
  724. RES_MCQ_SQD,
  725. RES_MCQ_SQIS,
  726. RES_MCQ_CQD,
  727. RES_MCQ_CQIS,
  728. RES_MCQ_VS,
  729. RES_MAX,
  730. };
  731. /**
  732. * struct ufshcd_mcq_opr_info_t - Operation and Runtime registers
  733. *
  734. * @offset: Doorbell Address Offset
  735. * @stride: Steps proportional to queue [0...31]
  736. * @base: base address
  737. */
  738. struct ufshcd_mcq_opr_info_t {
  739. unsigned long offset;
  740. unsigned long stride;
  741. void __iomem *base;
  742. };
  743. enum ufshcd_mcq_opr {
  744. OPR_SQD,
  745. OPR_SQIS,
  746. OPR_CQD,
  747. OPR_CQIS,
  748. OPR_MAX,
  749. };
  750. /**
  751. * struct ufs_hba - per adapter private structure
  752. * @mmio_base: UFSHCI base register address
  753. * @ucdl_base_addr: UFS Command Descriptor base address
  754. * @utrdl_base_addr: UTP Transfer Request Descriptor base address
  755. * @utmrdl_base_addr: UTP Task Management Descriptor base address
  756. * @ucdl_dma_addr: UFS Command Descriptor DMA address
  757. * @utrdl_dma_addr: UTRDL DMA address
  758. * @utmrdl_dma_addr: UTMRDL DMA address
  759. * @host: Scsi_Host instance of the driver
  760. * @dev: device handle
  761. * @ufs_device_wlun: WLUN that controls the entire UFS device.
  762. * @hwmon_device: device instance registered with the hwmon core.
  763. * @curr_dev_pwr_mode: active UFS device power mode.
  764. * @uic_link_state: active state of the link to the UFS device.
  765. * @rpm_lvl: desired UFS power management level during runtime PM.
  766. * @spm_lvl: desired UFS power management level during system PM.
  767. * @pm_op_in_progress: whether or not a PM operation is in progress.
  768. * @ahit: value of Auto-Hibernate Idle Timer register.
  769. * @lrb: local reference block
  770. * @outstanding_tasks: Bits representing outstanding task requests
  771. * @outstanding_lock: Protects @outstanding_reqs.
  772. * @outstanding_reqs: Bits representing outstanding transfer requests
  773. * @capabilities: UFS Controller Capabilities
  774. * @mcq_capabilities: UFS Multi Circular Queue capabilities
  775. * @nutrs: Transfer Request Queue depth supported by controller
  776. * @nutmrs: Task Management Queue depth supported by controller
  777. * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
  778. * @ufs_version: UFS Version to which controller complies
  779. * @vops: pointer to variant specific operations
  780. * @vps: pointer to variant specific parameters
  781. * @priv: pointer to variant specific private data
  782. * @sg_entry_size: size of struct ufshcd_sg_entry (may include variant fields)
  783. * @irq: Irq number of the controller
  784. * @is_irq_enabled: whether or not the UFS controller interrupt is enabled.
  785. * @dev_ref_clk_freq: reference clock frequency
  786. * @quirks: bitmask with information about deviations from the UFSHCI standard.
  787. * @dev_quirks: bitmask with information about deviations from the UFS standard.
  788. * @tmf_tag_set: TMF tag set.
  789. * @tmf_queue: Used to allocate TMF tags.
  790. * @tmf_rqs: array with pointers to TMF requests while these are in progress.
  791. * @active_uic_cmd: handle of active UIC command
  792. * @uic_cmd_mutex: mutex for UIC command
  793. * @uic_async_done: completion used during UIC processing
  794. * @ufshcd_state: UFSHCD state
  795. * @eh_flags: Error handling flags
  796. * @intr_mask: Interrupt Mask Bits
  797. * @ee_ctrl_mask: Exception event control mask
  798. * @ee_drv_mask: Exception event mask for driver
  799. * @ee_usr_mask: Exception event mask for user (set via debugfs)
  800. * @ee_ctrl_mutex: Used to serialize exception event information.
  801. * @is_powered: flag to check if HBA is powered
  802. * @shutting_down: flag to check if shutdown has been invoked
  803. * @host_sem: semaphore used to serialize concurrent contexts
  804. * @eh_wq: Workqueue that eh_work works on
  805. * @eh_work: Worker to handle UFS errors that require s/w attention
  806. * @eeh_work: Worker to handle exception events
  807. * @errors: HBA errors
  808. * @uic_error: UFS interconnect layer error status
  809. * @saved_err: sticky error mask
  810. * @saved_uic_err: sticky UIC error mask
  811. * @ufs_stats: various error counters
  812. * @force_reset: flag to force eh_work perform a full reset
  813. * @force_pmc: flag to force a power mode change
  814. * @silence_err_logs: flag to silence error logs
  815. * @dev_cmd: ufs device management command information
  816. * @last_dme_cmd_tstamp: time stamp of the last completed DME command
  817. * @nop_out_timeout: NOP OUT timeout value
  818. * @dev_info: information about the UFS device
  819. * @auto_bkops_enabled: to track whether bkops is enabled in device
  820. * @vreg_info: UFS device voltage regulator information
  821. * @clk_list_head: UFS host controller clocks list node head
  822. * @req_abort_count: number of times ufshcd_abort() has been called
  823. * @lanes_per_direction: number of lanes per data direction between the UFS
  824. * controller and the UFS device.
  825. * @pwr_info: holds current power mode
  826. * @max_pwr_info: keeps the device max valid pwm
  827. * @clk_gating: information related to clock gating
  828. * @caps: bitmask with information about UFS controller capabilities
  829. * @devfreq: frequency scaling information owned by the devfreq core
  830. * @clk_scaling: frequency scaling information owned by the UFS driver
  831. * @system_suspending: system suspend has been started and system resume has
  832. * not yet finished.
  833. * @is_sys_suspended: UFS device has been suspended because of system suspend
  834. * @urgent_bkops_lvl: keeps track of urgent bkops level for device
  835. * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
  836. * device is known or not.
  837. * @wb_mutex: used to serialize devfreq and sysfs write booster toggling
  838. * @clk_scaling_lock: used to serialize device commands and clock scaling
  839. * @desc_size: descriptor sizes reported by device
  840. * @scsi_block_reqs_cnt: reference counting for scsi block requests
  841. * @bsg_dev: struct device associated with the BSG queue
  842. * @bsg_queue: BSG queue associated with the UFS controller
  843. * @rpm_dev_flush_recheck_work: used to suspend from RPM (runtime power
  844. * management) after the UFS device has finished a WriteBooster buffer
  845. * flush or auto BKOP.
  846. * @ufshpb_dev: information related to HPB (Host Performance Booster).
  847. * @monitor: statistics about UFS commands
  848. * @crypto_capabilities: Content of crypto capabilities register (0x100)
  849. * @crypto_cap_array: Array of crypto capabilities
  850. * @crypto_cfg_register: Start of the crypto cfg array
  851. * @crypto_profile: the crypto profile of this hba (if applicable)
  852. * @debugfs_root: UFS controller debugfs root directory
  853. * @debugfs_ee_work: used to restore ee_ctrl_mask after a delay
  854. * @debugfs_ee_rate_limit_ms: user configurable delay after which to restore
  855. * ee_ctrl_mask
  856. * @luns_avail: number of regular and well known LUNs supported by the UFS
  857. * device
  858. * @nr_hw_queues: number of hardware queues configured
  859. * @nr_queues: number of Queues of different queue types
  860. * @complete_put: whether or not to call ufshcd_rpm_put() from inside
  861. * ufshcd_resume_complete()
  862. * @ext_iid_sup: is EXT_IID is supported by UFSHC
  863. * @mcq_sup: is mcq supported by UFSHC
  864. * @mcq_enabled: is mcq ready to accept requests
  865. * @res: array of resource info of MCQ registers
  866. * @mcq_base: Multi circular queue registers base address
  867. * @uhq: array of supported hardware queues
  868. * @dev_cmd_queue: Queue for issuing device management commands
  869. */
  870. struct ufs_hba {
  871. void __iomem *mmio_base;
  872. /* Virtual memory reference */
  873. struct utp_transfer_cmd_desc *ucdl_base_addr;
  874. struct utp_transfer_req_desc *utrdl_base_addr;
  875. struct utp_task_req_desc *utmrdl_base_addr;
  876. /* DMA memory reference */
  877. dma_addr_t ucdl_dma_addr;
  878. dma_addr_t utrdl_dma_addr;
  879. dma_addr_t utmrdl_dma_addr;
  880. struct Scsi_Host *host;
  881. struct device *dev;
  882. struct scsi_device *ufs_device_wlun;
  883. #ifdef CONFIG_SCSI_UFS_HWMON
  884. struct device *hwmon_device;
  885. #endif
  886. enum ufs_dev_pwr_mode curr_dev_pwr_mode;
  887. enum uic_link_state uic_link_state;
  888. /* Desired UFS power management level during runtime PM */
  889. enum ufs_pm_level rpm_lvl;
  890. /* Desired UFS power management level during system PM */
  891. enum ufs_pm_level spm_lvl;
  892. int pm_op_in_progress;
  893. /* Auto-Hibernate Idle Timer register value */
  894. u32 ahit;
  895. struct ufshcd_lrb *lrb;
  896. unsigned long outstanding_tasks;
  897. spinlock_t outstanding_lock;
  898. unsigned long outstanding_reqs;
  899. u32 capabilities;
  900. int nutrs;
  901. u32 mcq_capabilities;
  902. int nutmrs;
  903. u32 reserved_slot;
  904. u32 ufs_version;
  905. const struct ufs_hba_variant_ops *vops;
  906. struct ufs_hba_variant_params *vps;
  907. void *priv;
  908. #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
  909. size_t sg_entry_size;
  910. #endif
  911. unsigned int irq;
  912. bool is_irq_enabled;
  913. enum ufs_ref_clk_freq dev_ref_clk_freq;
  914. unsigned int quirks; /* Deviations from standard UFSHCI spec. */
  915. unsigned int android_quirks; /* for UFSHCD_ANDROID_QUIRK_* flags */
  916. /* Device deviations from standard UFS device spec. */
  917. unsigned int dev_quirks;
  918. struct blk_mq_tag_set tmf_tag_set;
  919. struct request_queue *tmf_queue;
  920. struct request **tmf_rqs;
  921. struct uic_command *active_uic_cmd;
  922. struct mutex uic_cmd_mutex;
  923. struct completion *uic_async_done;
  924. enum ufshcd_state ufshcd_state;
  925. bool logical_unit_scan_finished;
  926. u32 eh_flags;
  927. u32 intr_mask;
  928. u16 ee_ctrl_mask;
  929. u16 ee_drv_mask;
  930. u16 ee_usr_mask;
  931. struct mutex ee_ctrl_mutex;
  932. bool is_powered;
  933. bool shutting_down;
  934. struct semaphore host_sem;
  935. /* Work Queues */
  936. struct workqueue_struct *eh_wq;
  937. struct work_struct eh_work;
  938. struct work_struct eeh_work;
  939. /* HBA Errors */
  940. u32 errors;
  941. u32 uic_error;
  942. u32 saved_err;
  943. u32 saved_uic_err;
  944. struct ufs_stats ufs_stats;
  945. bool force_reset;
  946. bool force_pmc;
  947. bool silence_err_logs;
  948. /* Device management request data */
  949. struct ufs_dev_cmd dev_cmd;
  950. ktime_t last_dme_cmd_tstamp;
  951. int nop_out_timeout;
  952. /* Keeps information of the UFS device connected to this host */
  953. struct ufs_dev_info dev_info;
  954. bool auto_bkops_enabled;
  955. struct ufs_vreg_info vreg_info;
  956. struct list_head clk_list_head;
  957. /* Number of requests aborts */
  958. int req_abort_count;
  959. /* Number of lanes available (1 or 2) for Rx/Tx */
  960. u32 lanes_per_direction;
  961. struct ufs_pa_layer_attr pwr_info;
  962. struct ufs_pwr_mode_info max_pwr_info;
  963. struct ufs_clk_gating clk_gating;
  964. /* Control to enable/disable host capabilities */
  965. u32 caps;
  966. struct devfreq *devfreq;
  967. struct ufs_clk_scaling clk_scaling;
  968. bool system_suspending;
  969. bool is_sys_suspended;
  970. enum bkops_status urgent_bkops_lvl;
  971. bool is_urgent_bkops_lvl_checked;
  972. struct mutex wb_mutex;
  973. struct rw_semaphore clk_scaling_lock;
  974. atomic_t scsi_block_reqs_cnt;
  975. struct device bsg_dev;
  976. struct request_queue *bsg_queue;
  977. struct delayed_work rpm_dev_flush_recheck_work;
  978. #ifdef CONFIG_SCSI_UFS_HPB
  979. struct ufshpb_dev_info ufshpb_dev;
  980. #endif
  981. struct ufs_hba_monitor monitor;
  982. #ifdef CONFIG_SCSI_UFS_CRYPTO
  983. union ufs_crypto_capabilities crypto_capabilities;
  984. union ufs_crypto_cap_entry *crypto_cap_array;
  985. u32 crypto_cfg_register;
  986. struct blk_crypto_profile crypto_profile;
  987. #endif
  988. #ifdef CONFIG_DEBUG_FS
  989. struct dentry *debugfs_root;
  990. struct delayed_work debugfs_ee_work;
  991. u32 debugfs_ee_rate_limit_ms;
  992. #endif
  993. u32 luns_avail;
  994. unsigned int nr_hw_queues;
  995. unsigned int nr_queues[HCTX_MAX_TYPES];
  996. bool complete_put;
  997. bool ext_iid_sup;
  998. bool scsi_host_added;
  999. bool mcq_sup;
  1000. bool mcq_enabled;
  1001. struct ufshcd_res_info res[RES_MAX];
  1002. void __iomem *mcq_base;
  1003. struct ufs_hw_queue *uhq;
  1004. struct ufs_hw_queue *dev_cmd_queue;
  1005. struct ufshcd_mcq_opr_info_t mcq_opr[OPR_MAX];
  1006. ANDROID_OEM_DATA(1);
  1007. };
  1008. /**
  1009. * struct ufs_hw_queue - per hardware queue structure
  1010. * @mcq_sq_head: base address of submission queue head pointer
  1011. * @mcq_sq_tail: base address of submission queue tail pointer
  1012. * @mcq_cq_head: base address of completion queue head pointer
  1013. * @mcq_cq_tail: base address of completion queue tail pointer
  1014. * @sqe_base_addr: submission queue entry base address
  1015. * @sqe_dma_addr: submission queue dma address
  1016. * @cqe_base_addr: completion queue base address
  1017. * @cqe_dma_addr: completion queue dma address
  1018. * @max_entries: max number of slots in this hardware queue
  1019. * @id: hardware queue ID
  1020. * @sq_tp_slot: current slot to which SQ tail pointer is pointing
  1021. * @sq_lock: serialize submission queue access
  1022. * @cq_tail_slot: current slot to which CQ tail pointer is pointing
  1023. * @cq_head_slot: current slot to which CQ head pointer is pointing
  1024. * @cq_lock: Synchronize between multiple polling instances
  1025. * @sq_mutex: prevent submission queue concurrent access
  1026. */
  1027. struct ufs_hw_queue {
  1028. void __iomem *mcq_sq_head;
  1029. void __iomem *mcq_sq_tail;
  1030. void __iomem *mcq_cq_head;
  1031. void __iomem *mcq_cq_tail;
  1032. void *sqe_base_addr;
  1033. dma_addr_t sqe_dma_addr;
  1034. struct cq_entry *cqe_base_addr;
  1035. dma_addr_t cqe_dma_addr;
  1036. u32 max_entries;
  1037. u32 id;
  1038. u32 sq_tail_slot;
  1039. spinlock_t sq_lock;
  1040. u32 cq_tail_slot;
  1041. u32 cq_head_slot;
  1042. spinlock_t cq_lock;
  1043. /* prevent concurrent access to submission queue */
  1044. struct mutex sq_mutex;
  1045. };
  1046. static inline bool is_mcq_enabled(struct ufs_hba *hba)
  1047. {
  1048. return hba->mcq_enabled;
  1049. }
  1050. #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
  1051. static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
  1052. {
  1053. return hba->sg_entry_size;
  1054. }
  1055. static inline void ufshcd_set_sg_entry_size(struct ufs_hba *hba, size_t sg_entry_size)
  1056. {
  1057. WARN_ON_ONCE(sg_entry_size < sizeof(struct ufshcd_sg_entry));
  1058. hba->sg_entry_size = sg_entry_size;
  1059. }
  1060. #else
  1061. static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
  1062. {
  1063. return sizeof(struct ufshcd_sg_entry);
  1064. }
  1065. #define ufshcd_set_sg_entry_size(hba, sg_entry_size) \
  1066. ({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); })
  1067. #endif
  1068. static inline size_t ufshcd_get_ucd_size(const struct ufs_hba *hba)
  1069. {
  1070. return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba);
  1071. }
  1072. /* Returns true if clocks can be gated. Otherwise false */
  1073. static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
  1074. {
  1075. return hba->caps & UFSHCD_CAP_CLK_GATING;
  1076. }
  1077. static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
  1078. {
  1079. return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
  1080. }
  1081. static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
  1082. {
  1083. return hba->caps & UFSHCD_CAP_CLK_SCALING;
  1084. }
  1085. static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
  1086. {
  1087. return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
  1088. }
  1089. static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
  1090. {
  1091. return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
  1092. }
  1093. static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
  1094. {
  1095. return (hba->caps & UFSHCD_CAP_INTR_AGGR) &&
  1096. !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR);
  1097. }
  1098. static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba)
  1099. {
  1100. return !!(ufshcd_is_link_hibern8(hba) &&
  1101. (hba->caps & UFSHCD_CAP_AGGR_POWER_COLLAPSE));
  1102. }
  1103. static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
  1104. {
  1105. return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
  1106. !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
  1107. }
  1108. static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
  1109. {
  1110. return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit);
  1111. }
  1112. static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
  1113. {
  1114. return hba->caps & UFSHCD_CAP_WB_EN;
  1115. }
  1116. static inline bool ufshcd_enable_wb_if_scaling_up(struct ufs_hba *hba)
  1117. {
  1118. return hba->caps & UFSHCD_CAP_WB_WITH_CLK_SCALING;
  1119. }
  1120. #define ufsmcq_writel(hba, val, reg) \
  1121. writel((val), (hba)->mcq_base + (reg))
  1122. #define ufsmcq_readl(hba, reg) \
  1123. readl((hba)->mcq_base + (reg))
  1124. #define ufsmcq_writelx(hba, val, reg) \
  1125. writel_relaxed((val), (hba)->mcq_base + (reg))
  1126. #define ufsmcq_readlx(hba, reg) \
  1127. readl_relaxed((hba)->mcq_base + (reg))
  1128. #define ufshcd_writel(hba, val, reg) \
  1129. writel((val), (hba)->mmio_base + (reg))
  1130. #define ufshcd_readl(hba, reg) \
  1131. readl((hba)->mmio_base + (reg))
  1132. /**
  1133. * ufshcd_rmwl - perform read/modify/write for a controller register
  1134. * @hba: per adapter instance
  1135. * @mask: mask to apply on read value
  1136. * @val: actual value to write
  1137. * @reg: register address
  1138. */
  1139. static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
  1140. {
  1141. u32 tmp;
  1142. tmp = ufshcd_readl(hba, reg);
  1143. tmp &= ~mask;
  1144. tmp |= (val & mask);
  1145. ufshcd_writel(hba, tmp, reg);
  1146. }
  1147. int ufshcd_alloc_host(struct device *, struct ufs_hba **);
  1148. void ufshcd_dealloc_host(struct ufs_hba *);
  1149. int ufshcd_hba_enable(struct ufs_hba *hba);
  1150. int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
  1151. int ufshcd_link_recovery(struct ufs_hba *hba);
  1152. int ufshcd_make_hba_operational(struct ufs_hba *hba);
  1153. void ufshcd_remove(struct ufs_hba *);
  1154. int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
  1155. int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
  1156. void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
  1157. void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
  1158. void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
  1159. void ufshcd_hba_stop(struct ufs_hba *hba);
  1160. void ufshcd_schedule_eh_work(struct ufs_hba *hba);
  1161. void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
  1162. u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
  1163. void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
  1164. unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
  1165. struct ufs_hw_queue *hwq);
  1166. unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
  1167. struct ufs_hw_queue *hwq);
  1168. void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
  1169. void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
  1170. void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);
  1171. /**
  1172. * ufshcd_set_variant - set variant specific data to the hba
  1173. * @hba: per adapter instance
  1174. * @variant: pointer to variant specific data
  1175. */
  1176. static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
  1177. {
  1178. BUG_ON(!hba);
  1179. hba->priv = variant;
  1180. }
  1181. /**
  1182. * ufshcd_get_variant - get variant specific data from the hba
  1183. * @hba: per adapter instance
  1184. */
  1185. static inline void *ufshcd_get_variant(struct ufs_hba *hba)
  1186. {
  1187. BUG_ON(!hba);
  1188. return hba->priv;
  1189. }
  1190. #ifdef CONFIG_PM
  1191. extern int ufshcd_runtime_suspend(struct device *dev);
  1192. extern int ufshcd_runtime_resume(struct device *dev);
  1193. #endif
  1194. #ifdef CONFIG_PM_SLEEP
  1195. extern int ufshcd_system_suspend(struct device *dev);
  1196. extern int ufshcd_system_resume(struct device *dev);
  1197. extern int ufshcd_system_freeze(struct device *dev);
  1198. extern int ufshcd_system_thaw(struct device *dev);
  1199. extern int ufshcd_system_restore(struct device *dev);
  1200. #endif
  1201. extern int ufshcd_shutdown(struct ufs_hba *hba);
  1202. extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
  1203. int agreed_gear,
  1204. int adapt_val);
  1205. extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
  1206. u8 attr_set, u32 mib_val, u8 peer);
  1207. extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
  1208. u32 *mib_val, u8 peer);
  1209. extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
  1210. struct ufs_pa_layer_attr *desired_pwr_mode);
  1211. extern int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode);
  1212. /* UIC command interfaces for DME primitives */
  1213. #define DME_LOCAL 0
  1214. #define DME_PEER 1
  1215. #define ATTR_SET_NOR 0 /* NORMAL */
  1216. #define ATTR_SET_ST 1 /* STATIC */
  1217. static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
  1218. u32 mib_val)
  1219. {
  1220. return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
  1221. mib_val, DME_LOCAL);
  1222. }
  1223. static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
  1224. u32 mib_val)
  1225. {
  1226. return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
  1227. mib_val, DME_LOCAL);
  1228. }
  1229. static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
  1230. u32 mib_val)
  1231. {
  1232. return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
  1233. mib_val, DME_PEER);
  1234. }
  1235. static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
  1236. u32 mib_val)
  1237. {
  1238. return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
  1239. mib_val, DME_PEER);
  1240. }
  1241. static inline int ufshcd_dme_get(struct ufs_hba *hba,
  1242. u32 attr_sel, u32 *mib_val)
  1243. {
  1244. return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
  1245. }
  1246. static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
  1247. u32 attr_sel, u32 *mib_val)
  1248. {
  1249. return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
  1250. }
  1251. static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
  1252. {
  1253. return (pwr_info->pwr_rx == FAST_MODE ||
  1254. pwr_info->pwr_rx == FASTAUTO_MODE) &&
  1255. (pwr_info->pwr_tx == FAST_MODE ||
  1256. pwr_info->pwr_tx == FASTAUTO_MODE);
  1257. }
  1258. static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
  1259. {
  1260. return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
  1261. }
  1262. int ufshcd_read_desc_param(struct ufs_hba *hba,
  1263. enum desc_idn desc_id,
  1264. int desc_index,
  1265. u8 param_offset,
  1266. u8 *param_read_buf,
  1267. u8 param_size);
  1268. int ufshcd_query_attr_retry(struct ufs_hba *hba,
  1269. enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
  1270. u32 *attr_val);
  1271. int ufshcd_query_flag_retry(struct ufs_hba *hba,
  1272. enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res);
  1273. int ufshcd_bkops_ctrl(struct ufs_hba *hba, enum bkops_status status);
  1274. void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
  1275. void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
  1276. void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
  1277. const struct ufs_dev_quirk *fixups);
  1278. #define SD_ASCII_STD true
  1279. #define SD_RAW false
  1280. int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
  1281. u8 **buf, bool ascii);
  1282. int ufshcd_hold(struct ufs_hba *hba, bool async);
  1283. void ufshcd_release(struct ufs_hba *hba);
  1284. void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
  1285. u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
  1286. int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg);
  1287. int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
  1288. int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
  1289. struct utp_upiu_req *req_upiu,
  1290. struct utp_upiu_req *rsp_upiu,
  1291. int msgcode,
  1292. u8 *desc_buff, int *buff_len,
  1293. enum query_opcode desc_op);
  1294. int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
  1295. struct utp_upiu_req *rsp_upiu, struct ufs_ehs *ehs_req,
  1296. struct ufs_ehs *ehs_rsp, int sg_cnt,
  1297. struct scatterlist *sg_list, enum dma_data_direction dir);
  1298. int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
  1299. int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable);
  1300. int ufshcd_suspend_prepare(struct device *dev);
  1301. int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
  1302. void ufshcd_resume_complete(struct device *dev);
  1303. /* Wrapper functions for safely calling variant operations */
  1304. static inline int ufshcd_vops_init(struct ufs_hba *hba)
  1305. {
  1306. if (hba->vops && hba->vops->init)
  1307. return hba->vops->init(hba);
  1308. return 0;
  1309. }
  1310. static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba)
  1311. {
  1312. if (hba->vops && hba->vops->phy_initialization)
  1313. return hba->vops->phy_initialization(hba);
  1314. return 0;
  1315. }
  1316. extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
  1317. int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
  1318. const char *prefix);
  1319. int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
  1320. int ufshcd_write_ee_control(struct ufs_hba *hba);
  1321. int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
  1322. const u16 *other_mask, u16 set, u16 clr);
  1323. #endif /* End of Header */