internal.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  4. *
  5. */
  6. #ifndef _MHI_INT_H
  7. #define _MHI_INT_H
  8. #include "../common.h"
  9. #include "misc.h"
  10. extern struct bus_type mhi_bus_type;
  11. /* Host request register */
  12. #define MHI_SOC_RESET_REQ_OFFSET 0xb0
  13. #define MHI_SOC_RESET_REQ BIT(0)
  14. #define SOC_HW_VERSION_OFFS 0x224
  15. #define SOC_HW_VERSION_FAM_NUM_BMSK GENMASK(31, 28)
  16. #define SOC_HW_VERSION_DEV_NUM_BMSK GENMASK(27, 16)
  17. #define SOC_HW_VERSION_MAJOR_VER_BMSK GENMASK(15, 8)
  18. #define SOC_HW_VERSION_MINOR_VER_BMSK GENMASK(7, 0)
  19. struct mhi_ctxt {
  20. struct mhi_event_ctxt *er_ctxt;
  21. struct mhi_chan_ctxt *chan_ctxt;
  22. struct mhi_cmd_ctxt *cmd_ctxt;
  23. dma_addr_t er_ctxt_addr;
  24. dma_addr_t chan_ctxt_addr;
  25. dma_addr_t cmd_ctxt_addr;
  26. };
  27. struct bhi_vec_entry {
  28. u64 dma_addr;
  29. u64 size;
  30. };
  31. enum mhi_ch_state_type {
  32. MHI_CH_STATE_TYPE_RESET,
  33. MHI_CH_STATE_TYPE_STOP,
  34. MHI_CH_STATE_TYPE_START,
  35. MHI_CH_STATE_TYPE_MAX,
  36. };
  37. extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX];
  38. #define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \
  39. "INVALID_STATE" : \
  40. mhi_ch_state_type_str[(state)])
  41. #define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
  42. mode != MHI_DB_BRST_ENABLE)
  43. extern const char * const mhi_ee_str[MHI_EE_MAX];
  44. #define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
  45. "INVALID_EE" : mhi_ee_str[ee])
  46. #define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
  47. ee == MHI_EE_EDL)
  48. #define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS)
  49. #define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL)
  50. #define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \
  51. ee == MHI_EE_FP)
  52. enum dev_st_transition {
  53. DEV_ST_TRANSITION_PBL,
  54. DEV_ST_TRANSITION_READY,
  55. DEV_ST_TRANSITION_SBL,
  56. DEV_ST_TRANSITION_MISSION_MODE,
  57. DEV_ST_TRANSITION_FP,
  58. DEV_ST_TRANSITION_SYS_ERR,
  59. DEV_ST_TRANSITION_DISABLE,
  60. DEV_ST_TRANSITION_MAX,
  61. };
  62. extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
  63. #define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
  64. "INVALID_STATE" : dev_state_tran_str[state])
  65. /* internal power states */
  66. enum mhi_pm_state {
  67. MHI_PM_STATE_DISABLE,
  68. MHI_PM_STATE_POR,
  69. MHI_PM_STATE_M0,
  70. MHI_PM_STATE_M2,
  71. MHI_PM_STATE_M3_ENTER,
  72. MHI_PM_STATE_M3,
  73. MHI_PM_STATE_M3_EXIT,
  74. MHI_PM_STATE_FW_DL_ERR,
  75. MHI_PM_STATE_SYS_ERR_DETECT,
  76. MHI_PM_STATE_SYS_ERR_PROCESS,
  77. MHI_PM_STATE_SHUTDOWN_PROCESS,
  78. MHI_PM_STATE_LD_ERR_FATAL_DETECT,
  79. MHI_PM_STATE_MAX
  80. };
  81. #define MHI_PM_DISABLE BIT(0)
  82. #define MHI_PM_POR BIT(1)
  83. #define MHI_PM_M0 BIT(2)
  84. #define MHI_PM_M2 BIT(3)
  85. #define MHI_PM_M3_ENTER BIT(4)
  86. #define MHI_PM_M3 BIT(5)
  87. #define MHI_PM_M3_EXIT BIT(6)
  88. /* firmware download failure state */
  89. #define MHI_PM_FW_DL_ERR BIT(7)
  90. #define MHI_PM_SYS_ERR_DETECT BIT(8)
  91. #define MHI_PM_SYS_ERR_PROCESS BIT(9)
  92. #define MHI_PM_SHUTDOWN_PROCESS BIT(10)
  93. /* link not accessible */
  94. #define MHI_PM_LD_ERR_FATAL_DETECT BIT(11)
  95. #define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
  96. MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
  97. MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
  98. MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
  99. #define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
  100. #define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
  101. #define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access)
  102. #define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
  103. MHI_PM_M2 | MHI_PM_M3_EXIT))
  104. #define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2)
  105. #define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state)
  106. #define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
  107. MHI_PM_IN_ERROR_STATE(pm_state))
  108. #define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
  109. (MHI_PM_M3_ENTER | MHI_PM_M3))
  110. #define NR_OF_CMD_RINGS 1
  111. #define CMD_EL_PER_RING 128
  112. #define PRIMARY_CMD_RING 0
  113. #define MHI_DEV_WAKE_DB 127
  114. #define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1)
  115. enum mhi_er_type {
  116. MHI_ER_TYPE_INVALID = 0x0,
  117. MHI_ER_TYPE_VALID = 0x1,
  118. };
  119. struct db_cfg {
  120. bool reset_req;
  121. bool db_mode;
  122. u32 pollcfg;
  123. enum mhi_db_brst_mode brstmode;
  124. dma_addr_t db_val;
  125. void (*process_db)(struct mhi_controller *mhi_cntrl,
  126. struct db_cfg *db_cfg, void __iomem *io_addr,
  127. dma_addr_t db_val);
  128. };
  129. struct mhi_pm_transitions {
  130. enum mhi_pm_state from_state;
  131. u32 to_states;
  132. };
  133. struct state_transition {
  134. struct list_head node;
  135. enum dev_st_transition state;
  136. };
  137. struct mhi_ring {
  138. dma_addr_t dma_handle;
  139. dma_addr_t iommu_base;
  140. __le64 *ctxt_wp; /* point to ctxt wp */
  141. void *pre_aligned;
  142. void *base;
  143. void *rp;
  144. void *wp;
  145. size_t el_size;
  146. size_t len;
  147. size_t elements;
  148. size_t alloc_size;
  149. void __iomem *db_addr;
  150. };
  151. struct mhi_cmd {
  152. struct mhi_ring ring;
  153. spinlock_t lock;
  154. };
  155. struct mhi_buf_info {
  156. void *v_addr;
  157. void *bb_addr;
  158. void *wp;
  159. void *cb_buf;
  160. dma_addr_t p_addr;
  161. size_t len;
  162. enum dma_data_direction dir;
  163. bool used; /* Indicates whether the buffer is used or not */
  164. bool pre_mapped; /* Already pre-mapped by client */
  165. };
  166. struct mhi_event {
  167. struct mhi_controller *mhi_cntrl;
  168. struct mhi_chan *mhi_chan; /* dedicated to channel */
  169. u32 er_index;
  170. u32 intmod;
  171. u32 irq;
  172. int chan; /* this event ring is dedicated to a channel (optional) */
  173. u32 priority;
  174. enum mhi_er_data_type data_type;
  175. struct mhi_ring ring;
  176. struct db_cfg db_cfg;
  177. struct tasklet_struct task;
  178. struct work_struct work;
  179. spinlock_t lock;
  180. int (*process_event)(struct mhi_controller *mhi_cntrl,
  181. struct mhi_event *mhi_event,
  182. u32 event_quota);
  183. bool hw_ring;
  184. bool cl_manage;
  185. bool offload_ev; /* managed by a device driver */
  186. };
  187. struct mhi_chan {
  188. const char *name;
  189. /*
  190. * Important: When consuming, increment tre_ring first and when
  191. * releasing, decrement buf_ring first. If tre_ring has space, buf_ring
  192. * is guranteed to have space so we do not need to check both rings.
  193. */
  194. struct mhi_ring buf_ring;
  195. struct mhi_ring tre_ring;
  196. u32 chan;
  197. u32 er_index;
  198. u32 intmod;
  199. enum mhi_ch_type type;
  200. enum dma_data_direction dir;
  201. struct db_cfg db_cfg;
  202. enum mhi_ch_ee_mask ee_mask;
  203. enum mhi_ch_state ch_state;
  204. enum mhi_ev_ccs ccs;
  205. struct mhi_device *mhi_dev;
  206. void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result);
  207. struct mutex mutex;
  208. struct completion completion;
  209. rwlock_t lock;
  210. struct list_head node;
  211. bool lpm_notify;
  212. bool configured;
  213. bool offload_ch;
  214. bool pre_alloc;
  215. bool wake_capable;
  216. };
  217. /* Default MHI timeout */
  218. #define MHI_TIMEOUT_MS (1000)
  219. /* debugfs related functions */
  220. #ifdef CONFIG_MHI_BUS_DEBUG
  221. void mhi_create_debugfs(struct mhi_controller *mhi_cntrl);
  222. void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl);
  223. void mhi_debugfs_init(void);
  224. void mhi_debugfs_exit(void);
  225. #else
  226. static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
  227. {
  228. }
  229. static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
  230. {
  231. }
  232. static inline void mhi_debugfs_init(void)
  233. {
  234. }
  235. static inline void mhi_debugfs_exit(void)
  236. {
  237. }
  238. #endif
  239. struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
  240. int mhi_destroy_device(struct device *dev, void *data);
  241. void mhi_create_devices(struct mhi_controller *mhi_cntrl);
  242. int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
  243. struct image_info **image_info, size_t alloc_size);
  244. void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
  245. struct image_info **image_info);
  246. /* Power management APIs */
  247. enum mhi_pm_state __must_check mhi_tryset_pm_state(
  248. struct mhi_controller *mhi_cntrl,
  249. enum mhi_pm_state state);
  250. const char *to_mhi_pm_state_str(u32 state);
  251. int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
  252. enum dev_st_transition state);
  253. void mhi_pm_st_worker(struct work_struct *work);
  254. void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
  255. int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
  256. int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
  257. void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
  258. int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
  259. int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
  260. int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
  261. enum mhi_cmd_type cmd);
  262. int mhi_download_amss_image(struct mhi_controller *mhi_cntrl);
  263. int mhi_rddm_download_status(struct mhi_controller *mhi_cntrl);
  264. static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
  265. {
  266. return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
  267. mhi_cntrl->dev_state <= MHI_STATE_M3_FAST);
  268. }
  269. static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl)
  270. {
  271. pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
  272. mhi_cntrl->runtime_get(mhi_cntrl);
  273. mhi_cntrl->runtime_put(mhi_cntrl);
  274. }
  275. static inline bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
  276. {
  277. return ((addr >= ring->iommu_base &&
  278. addr < ring->iommu_base + ring->len) && (addr % 16 == 0));
  279. }
  280. /* Register access methods */
  281. void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
  282. void __iomem *db_addr, dma_addr_t db_val);
  283. void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
  284. struct db_cfg *db_mode, void __iomem *db_addr,
  285. dma_addr_t db_val);
  286. int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
  287. void __iomem *base, u32 offset, u32 *out);
  288. int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
  289. void __iomem *base, u32 offset, u32 mask,
  290. u32 *out);
  291. int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
  292. void __iomem *base, u32 offset, u32 mask,
  293. u32 val, u32 delayus);
  294. void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
  295. u32 offset, u32 val);
  296. int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
  297. void __iomem *base, u32 offset, u32 mask,
  298. u32 val);
  299. void mhi_ring_er_db(struct mhi_event *mhi_event);
  300. void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
  301. dma_addr_t db_val);
  302. void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd);
  303. void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
  304. struct mhi_chan *mhi_chan);
  305. void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr);
  306. dma_addr_t mhi_to_physical(struct mhi_ring *ring, void *addr);
  307. /* Initialization methods */
  308. int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
  309. int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
  310. void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
  311. int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
  312. void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
  313. int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
  314. struct image_info *img_info);
  315. void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
  316. /* Automatically allocate and queue inbound buffers */
  317. #define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
  318. int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
  319. struct mhi_chan *mhi_chan, unsigned int flags);
  320. int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
  321. struct mhi_chan *mhi_chan);
  322. void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
  323. struct mhi_chan *mhi_chan);
  324. void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
  325. struct mhi_chan *mhi_chan);
  326. /* Event processing methods */
  327. void mhi_ctrl_ev_task(unsigned long data);
  328. void mhi_ev_task(unsigned long data);
  329. void mhi_process_ev_work(struct work_struct *work);
  330. void mhi_process_sleeping_events(struct mhi_controller *mhi_cntrl);
  331. int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
  332. struct mhi_event *mhi_event, u32 event_quota);
  333. int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
  334. struct mhi_event *mhi_event, u32 event_quota);
  335. /* ISR handlers */
  336. irqreturn_t mhi_irq_handler(int irq_number, void *dev);
  337. irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
  338. irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
  339. int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
  340. struct mhi_buf_info *info, enum mhi_flags flags);
  341. int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
  342. struct mhi_buf_info *buf_info);
  343. int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
  344. struct mhi_buf_info *buf_info);
  345. void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
  346. struct mhi_buf_info *buf_info);
  347. void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
  348. struct mhi_buf_info *buf_info);
  349. #endif /* _MHI_INT_H */