cqhci.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /* Copyright (c) 2015, 2021 The Linux Foundation. All rights reserved.
  3. */
  4. #ifndef LINUX_MMC_CQHCI_H
  5. #define LINUX_MMC_CQHCI_H
  6. #include <linux/compiler.h>
  7. #include <linux/bitops.h>
  8. #include <linux/spinlock_types.h>
  9. #include <linux/types.h>
  10. #include <linux/completion.h>
  11. #include <linux/wait.h>
  12. #include <linux/irqreturn.h>
  13. #include <asm/io.h>
  14. /* registers */
  15. /* version */
  16. #define CQHCI_VER 0x00
  17. #define CQHCI_VER_MAJOR(x) (((x) & GENMASK(11, 8)) >> 8)
  18. #define CQHCI_VER_MINOR1(x) (((x) & GENMASK(7, 4)) >> 4)
  19. #define CQHCI_VER_MINOR2(x) ((x) & GENMASK(3, 0))
  20. /* capabilities */
  21. #define CQHCI_CAP 0x04
  22. #define CQHCI_CAP_CS 0x10000000 /* Crypto Support */
  23. /* configuration */
  24. #define CQHCI_CFG 0x08
  25. #define CQHCI_DCMD 0x00001000
  26. #define CQHCI_TASK_DESC_SZ 0x00000100
  27. #define CQHCI_CRYPTO_GENERAL_ENABLE 0x00000002
  28. #define CQHCI_ENABLE 0x00000001
  29. /* control */
  30. #define CQHCI_CTL 0x0C
  31. #define CQHCI_CLEAR_ALL_TASKS 0x00000100
  32. #define CQHCI_HALT 0x00000001
  33. /* interrupt status */
  34. #define CQHCI_IS 0x10
  35. #define CQHCI_IS_HAC BIT(0)
  36. #define CQHCI_IS_TCC BIT(1)
  37. #define CQHCI_IS_RED BIT(2)
  38. #define CQHCI_IS_TCL BIT(3)
  39. #define CQHCI_IS_GCE BIT(4) /* General Crypto Error */
  40. #define CQHCI_IS_ICCE BIT(5) /* Invalid Crypto Config Error */
  41. #define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED | \
  42. CQHCI_IS_GCE | CQHCI_IS_ICCE)
  43. /* interrupt status enable */
  44. #define CQHCI_ISTE 0x14
  45. /* interrupt signal enable */
  46. #define CQHCI_ISGE 0x18
  47. /* interrupt coalescing */
  48. #define CQHCI_IC 0x1C
  49. #define CQHCI_IC_ENABLE BIT(31)
  50. #define CQHCI_IC_RESET BIT(16)
  51. #define CQHCI_IC_ICCTHWEN BIT(15)
  52. #define CQHCI_IC_ICCTH(x) (((x) & 0x1F) << 8)
  53. #define CQHCI_IC_ICTOVALWEN BIT(7)
  54. #define CQHCI_IC_ICTOVAL(x) ((x) & 0x7F)
  55. /* task list base address */
  56. #define CQHCI_TDLBA 0x20
  57. /* task list base address upper */
  58. #define CQHCI_TDLBAU 0x24
  59. /* door-bell */
  60. #define CQHCI_TDBR 0x28
  61. /* task completion notification */
  62. #define CQHCI_TCN 0x2C
  63. /* device queue status */
  64. #define CQHCI_DQS 0x30
  65. /* device pending tasks */
  66. #define CQHCI_DPT 0x34
  67. /* task clear */
  68. #define CQHCI_TCLR 0x38
  69. /* task descriptor processing error */
  70. #define CQHCI_TDPE 0x3c
  71. /* send status config 1 */
  72. #define CQHCI_SSC1 0x40
  73. #define CQHCI_SSC1_CBC_MASK GENMASK(19, 16)
  74. /* send status config 2 */
  75. #define CQHCI_SSC2 0x44
  76. /*
  77. * Value n means CQE would send CMD13 during the transfer of data block
  78. * BLOCK_CNT-n
  79. */
  80. #define SEND_QSR_INTERVAL 0x70001
  81. /* response for dcmd */
  82. #define CQHCI_CRDCT 0x48
  83. /* response mode error mask */
  84. #define CQHCI_RMEM 0x50
  85. /* task error info */
  86. #define CQHCI_TERRI 0x54
  87. #define CQHCI_TERRI_C_INDEX(x) ((x) & GENMASK(5, 0))
  88. #define CQHCI_TERRI_C_TASK(x) (((x) & GENMASK(12, 8)) >> 8)
  89. #define CQHCI_TERRI_C_VALID(x) ((x) & BIT(15))
  90. #define CQHCI_TERRI_D_INDEX(x) (((x) & GENMASK(21, 16)) >> 16)
  91. #define CQHCI_TERRI_D_TASK(x) (((x) & GENMASK(28, 24)) >> 24)
  92. #define CQHCI_TERRI_D_VALID(x) ((x) & BIT(31))
  93. /* command response index */
  94. #define CQHCI_CRI 0x58
  95. /* command response argument */
  96. #define CQHCI_CRA 0x5C
  97. /*
  98. * Add new macro for updated CQ vendor specific
  99. * register address for SDHC v5.0 onwards.
  100. */
  101. #define CQE_V5_VENDOR_CFG 0x900
  102. #define CQHCI_VENDOR_CFG 0x100
  103. #define CMDQ_SEND_STATUS_TRIGGER (1 << 31)
  104. /* crypto capabilities */
  105. #define CQHCI_CCAP 0x100
  106. #define CQHCI_CRYPTOCAP 0x104
  107. #define CQHCI_INT_ALL 0xF
  108. #define CQHCI_IC_DEFAULT_ICCTH 31
  109. #define CQHCI_IC_DEFAULT_ICTOVAL 1
  110. /* attribute fields */
  111. #define CQHCI_VALID(x) (((x) & 1) << 0)
  112. #define CQHCI_END(x) (((x) & 1) << 1)
  113. #define CQHCI_INT(x) (((x) & 1) << 2)
  114. #define CQHCI_ACT(x) (((x) & 0x7) << 3)
  115. /* data command task descriptor fields */
  116. #define CQHCI_FORCED_PROG(x) (((x) & 1) << 6)
  117. #define CQHCI_CONTEXT(x) (((x) & 0xF) << 7)
  118. #define CQHCI_DATA_TAG(x) (((x) & 1) << 11)
  119. #define CQHCI_DATA_DIR(x) (((x) & 1) << 12)
  120. #define CQHCI_PRIORITY(x) (((x) & 1) << 13)
  121. #define CQHCI_QBAR(x) (((x) & 1) << 14)
  122. #define CQHCI_REL_WRITE(x) (((x) & 1) << 15)
  123. #define CQHCI_BLK_COUNT(x) (((x) & 0xFFFF) << 16)
  124. #define CQHCI_BLK_ADDR(x) (((x) & 0xFFFFFFFF) << 32)
  125. /* direct command task descriptor fields */
  126. #define CQHCI_CMD_INDEX(x) (((x) & 0x3F) << 16)
  127. #define CQHCI_CMD_TIMING(x) (((x) & 1) << 22)
  128. #define CQHCI_RESP_TYPE(x) (((x) & 0x3) << 23)
  129. /* crypto task descriptor fields (for bits 64-127 of task descriptor) */
  130. #define CQHCI_CRYPTO_ENABLE_BIT (1ULL << 47)
  131. #define CQHCI_CRYPTO_KEYSLOT(x) ((u64)(x) << 32)
  132. /* transfer descriptor fields */
  133. #define CQHCI_DAT_LENGTH(x) (((x) & 0xFFFF) << 16)
  134. #define CQHCI_DAT_ADDR_LO(x) (((x) & 0xFFFFFFFF) << 32)
  135. #define CQHCI_DAT_ADDR_HI(x) (((x) & 0xFFFFFFFF) << 0)
  136. /* CCAP - Crypto Capability 100h */
  137. union cqhci_crypto_capabilities {
  138. __le32 reg_val;
  139. struct {
  140. u8 num_crypto_cap;
  141. u8 config_count;
  142. u8 reserved;
  143. u8 config_array_ptr;
  144. };
  145. };
  146. enum cqhci_crypto_key_size {
  147. CQHCI_CRYPTO_KEY_SIZE_INVALID = 0,
  148. CQHCI_CRYPTO_KEY_SIZE_128 = 1,
  149. CQHCI_CRYPTO_KEY_SIZE_192 = 2,
  150. CQHCI_CRYPTO_KEY_SIZE_256 = 3,
  151. CQHCI_CRYPTO_KEY_SIZE_512 = 4,
  152. };
  153. enum cqhci_crypto_alg {
  154. CQHCI_CRYPTO_ALG_AES_XTS = 0,
  155. CQHCI_CRYPTO_ALG_BITLOCKER_AES_CBC = 1,
  156. CQHCI_CRYPTO_ALG_AES_ECB = 2,
  157. CQHCI_CRYPTO_ALG_ESSIV_AES_CBC = 3,
  158. };
  159. /* x-CRYPTOCAP - Crypto Capability X */
  160. union cqhci_crypto_cap_entry {
  161. __le32 reg_val;
  162. struct {
  163. u8 algorithm_id;
  164. u8 sdus_mask; /* Supported data unit size mask */
  165. u8 key_size;
  166. u8 reserved;
  167. };
  168. };
  169. #define CQHCI_CRYPTO_CONFIGURATION_ENABLE (1 << 7)
  170. #define CQHCI_CRYPTO_KEY_MAX_SIZE 64
  171. /* x-CRYPTOCFG - Crypto Configuration X */
  172. union cqhci_crypto_cfg_entry {
  173. __le32 reg_val[32];
  174. struct {
  175. u8 crypto_key[CQHCI_CRYPTO_KEY_MAX_SIZE];
  176. u8 data_unit_size;
  177. u8 crypto_cap_idx;
  178. u8 reserved_1;
  179. u8 config_enable;
  180. u8 reserved_multi_host;
  181. u8 reserved_2;
  182. u8 vsb[2];
  183. u8 reserved_3[56];
  184. };
  185. };
  186. struct cqhci_host_ops;
  187. struct mmc_host;
  188. struct mmc_request;
  189. struct cqhci_slot;
  190. struct cqhci_host {
  191. const struct cqhci_host_ops *ops;
  192. void __iomem *mmio;
  193. struct mmc_host *mmc;
  194. spinlock_t lock;
  195. /* relative card address of device */
  196. unsigned int rca;
  197. /* 64 bit DMA */
  198. bool dma64;
  199. int num_slots;
  200. int qcnt;
  201. u32 dcmd_slot;
  202. u32 caps;
  203. #define CQHCI_TASK_DESC_SZ_128 0x1
  204. u32 quirks;
  205. #define CQHCI_QUIRK_SHORT_TXFR_DESC_SZ 0x1
  206. bool enabled;
  207. bool halted;
  208. bool init_done;
  209. bool activated;
  210. bool waiting_for_idle;
  211. bool recovery_halt;
  212. bool offset_changed;
  213. size_t desc_size;
  214. size_t data_size;
  215. u8 *desc_base;
  216. /* total descriptor size */
  217. u8 slot_sz;
  218. /* 64/128 bit depends on CQHCI_CFG */
  219. u8 task_desc_len;
  220. /* 64 bit on 32-bit arch, 128 bit on 64-bit */
  221. u8 link_desc_len;
  222. u8 *trans_desc_base;
  223. /* same length as transfer descriptor */
  224. u8 trans_desc_len;
  225. dma_addr_t desc_dma_base;
  226. dma_addr_t trans_desc_dma_base;
  227. struct completion halt_comp;
  228. wait_queue_head_t wait_queue;
  229. struct cqhci_slot *slot;
  230. #ifdef CONFIG_MMC_CRYPTO
  231. union cqhci_crypto_capabilities crypto_capabilities;
  232. union cqhci_crypto_cap_entry *crypto_cap_array;
  233. u32 crypto_cfg_register;
  234. void __iomem *ice_mmio;
  235. #endif
  236. #if IS_ENABLED(CONFIG_MMC_CRYPTO_QTI)
  237. struct platform_device *pdev;
  238. #endif
  239. #if (IS_ENABLED(CONFIG_QTI_HW_KEY_MANAGER) || IS_ENABLED(CONFIG_QTI_HW_KEY_MANAGER_V1))
  240. void __iomem *ice_hwkm_mmio;
  241. #endif
  242. };
  243. struct cqhci_host_ops {
  244. void (*dumpregs)(struct mmc_host *mmc);
  245. void (*write_l)(struct cqhci_host *host, u32 val, int reg);
  246. u32 (*read_l)(struct cqhci_host *host, int reg);
  247. void (*enable)(struct mmc_host *mmc);
  248. void (*disable)(struct mmc_host *mmc, bool recovery);
  249. void (*update_dcmd_desc)(struct mmc_host *mmc, struct mmc_request *mrq,
  250. u64 *data);
  251. void (*pre_enable)(struct mmc_host *mmc);
  252. void (*post_disable)(struct mmc_host *mmc);
  253. #ifdef CONFIG_MMC_CRYPTO
  254. int (*program_key)(struct cqhci_host *cq_host,
  255. const union cqhci_crypto_cfg_entry *cfg, int slot);
  256. #endif
  257. void (*enhanced_strobe_mask)(struct mmc_host *mmc, bool set);
  258. };
  259. static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
  260. {
  261. if (unlikely(host->ops->write_l))
  262. host->ops->write_l(host, val, reg);
  263. else
  264. writel_relaxed(val, host->mmio + reg);
  265. }
  266. static inline u32 cqhci_readl(struct cqhci_host *host, int reg)
  267. {
  268. if (unlikely(host->ops->read_l))
  269. return host->ops->read_l(host, reg);
  270. else
  271. return readl_relaxed(host->mmio + reg);
  272. }
  273. struct platform_device;
  274. irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
  275. int data_error);
  276. int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, bool dma64);
  277. struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev);
  278. int cqhci_deactivate(struct mmc_host *mmc);
  279. static inline int cqhci_suspend(struct mmc_host *mmc)
  280. {
  281. return cqhci_deactivate(mmc);
  282. }
  283. int cqhci_resume(struct mmc_host *mmc);
  284. #endif