ubwcp_hw.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "%s: hw: %s(): " fmt, KBUILD_MODNAME, __func__
  6. #include <linux/module.h>
  7. #include <linux/kernel.h>
  8. #include <linux/dma-buf.h>
  9. #include <linux/slab.h>
  10. #include <linux/cdev.h>
  11. #include <linux/qcom_scm.h>
  12. #include <linux/delay.h>
  13. #include <asm/barrier.h>
  14. #include "ubwcp_hw.h"
  15. static bool ubwcp_hw_trace_en;
  16. //#define DBG(fmt, args...)
  17. #define DBG(fmt, args...) \
  18. do { \
  19. if (unlikely(ubwcp_hw_trace_en)) \
  20. pr_err(fmt "\n", ##args); \
  21. } while (0)
  22. #define ERR(fmt, args...) pr_err_ratelimited(": %d: ~~~ERROR~~~: " fmt "\n", __LINE__, ##args)
  23. MODULE_LICENSE("GPL");
  24. #define PAGE_ADDR_4K(_x) ((_x) >> 12)
  25. /* register offsets from base */
  26. #define RANGE_LOWER 0x0000
  27. #define RANGE_HIGHER 0x0800
  28. #define DESC_BASE 0x1000
  29. #define DESC_BASE_STRIDE 0x1004
  30. #define CONFIG 0x1008
  31. #define ENCODER_CONFIG 0x100C
  32. #define ENCODER_STATUS 0x1010
  33. #define DECODER_CONFIG 0x1014
  34. #define DECODER_STATUS 0x1018
  35. #define RANGE_CHECK_FAIL 0x101C
  36. #define RANGE_CHECK_CONTROL 0x1020
  37. #define RANGE_CHECK_STATUS 0x1060
  38. #define FLUSH_CONTROL 0x10A0
  39. #define FLUSH_STATUS 0x10A4
  40. #define INTERRUPT_SET 0x10B0
  41. #define INTERRUPT_STATUS_READ 0x10C0
  42. #define INTERRUPT_STATUS_WRITE 0x10C4
  43. #define INTERRUPT_STATUS_ENCODE 0x10C8
  44. #define INTERRUPT_STATUS_DECODE 0x10CC
  45. #define INTERRUPT_READ_SRC_LOW 0x1100
  46. #define INTERRUPT_READ_SRC_HIGH 0x1104
  47. #define INTERRUPT_WRITE_SRC_LOW 0x1108
  48. #define INTERRUPT_WRITE_SRC_HIGH 0x110C
  49. #define INTERRUPT_ENCODE_SRC_LOW 0x1110
  50. #define INTERRUPT_ENCODE_SRC_HIGH 0x1114
  51. #define INTERRUPT_DECODE_SRC_LOW 0x1118
  52. #define INTERRUPT_DECODE_SRC_HIGH 0x111C
  53. #define INTERRUPT_CLEAR 0x1120
  54. #define QNS4_PARAMS 0x1124
  55. #define OVERRIDE 0x112C
  56. #define VERSION_CONTROL 0x1130
  57. #define SPARE 0x1188
  58. #define UBWCP_DEBUG_REG_RW
  59. /* read/write register */
  60. #if defined(UBWCP_DEBUG_REG_RW)
  61. #define UBWCP_REG_READ(_base, _offset) \
  62. ({u32 _reg; \
  63. _reg = ioread32(_base + _offset); \
  64. DBG("READ : 0x%x -> 0x%08x", _offset, _reg); \
  65. _reg; })
  66. #define UBWCP_REG_WRITE(_base, _offset, _value) \
  67. { \
  68. DBG("WRITE: 0x%x <- 0x%08x", _offset, _value); \
  69. iowrite32(_value, _base + _offset); \
  70. }
  71. #else
  72. #define UBWCP_REG_READ(_base, _offset) ioread32(_base + _offset)
  73. #define UBWCP_REG_WRITE(_base, _offset, _value) iowrite32(_value, _base + _offset)
  74. #endif
  75. #define UBWCP_REG_READ_NO_DBG(_base, _offset) ioread32(_base + _offset)
  76. #define UBWCP_REG_WRITE_NO_DBG(_base, _offset, _value) iowrite32(_value, _base + _offset)
  77. void ubwcp_hw_interrupt_enable(void __iomem *base, u16 interrupt, bool enable)
  78. {
  79. u32 value;
  80. value = UBWCP_REG_READ(base, INTERRUPT_SET);
  81. if (enable)
  82. value = value | (1 << interrupt);
  83. else
  84. value = value & ~(1 << interrupt);
  85. UBWCP_REG_WRITE(base, INTERRUPT_SET, value);
  86. }
  87. EXPORT_SYMBOL(ubwcp_hw_interrupt_enable);
  88. void ubwcp_hw_interrupt_clear(void __iomem *base, u16 interrupt)
  89. {
  90. UBWCP_REG_WRITE_NO_DBG(base, INTERRUPT_CLEAR, (1 << interrupt));
  91. }
  92. EXPORT_SYMBOL(ubwcp_hw_interrupt_clear);
  93. int ubwcp_hw_interrupt_status(void __iomem *base, u16 interrupt)
  94. {
  95. int value = -1;
  96. switch (interrupt) {
  97. case INTERRUPT_READ_ERROR:
  98. value = UBWCP_REG_READ(base, INTERRUPT_STATUS_READ) & 0x1;
  99. break;
  100. case INTERRUPT_WRITE_ERROR:
  101. value = UBWCP_REG_READ(base, INTERRUPT_STATUS_WRITE) & 0x1;
  102. break;
  103. case INTERRUPT_DECODE_ERROR:
  104. value = UBWCP_REG_READ(base, INTERRUPT_STATUS_DECODE) & 0x1;
  105. break;
  106. case INTERRUPT_ENCODE_ERROR:
  107. value = UBWCP_REG_READ(base, INTERRUPT_STATUS_ENCODE) & 0x1;
  108. break;
  109. default:
  110. /* TBD: fatal error? */
  111. break;
  112. }
  113. return value;
  114. }
  115. /* returns the address which caused this interrupt */
  116. u64 ubwcp_hw_interrupt_src_address(void __iomem *base, u16 interrupt)
  117. {
  118. u32 addr_low;
  119. u32 addr_high;
  120. switch (interrupt) {
  121. case INTERRUPT_READ_ERROR:
  122. addr_low = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_READ_SRC_LOW);
  123. addr_high = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_READ_SRC_HIGH) & 0xF;
  124. break;
  125. case INTERRUPT_WRITE_ERROR:
  126. addr_low = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_WRITE_SRC_LOW);
  127. addr_high = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_WRITE_SRC_HIGH) & 0xF;
  128. break;
  129. case INTERRUPT_DECODE_ERROR:
  130. addr_low = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_DECODE_SRC_LOW);
  131. addr_high = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_DECODE_SRC_HIGH) & 0xF;
  132. break;
  133. case INTERRUPT_ENCODE_ERROR:
  134. addr_low = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_ENCODE_SRC_LOW);
  135. addr_high = UBWCP_REG_READ_NO_DBG(base, INTERRUPT_ENCODE_SRC_HIGH) & 0xF;
  136. break;
  137. default:
  138. /* TBD: fatal error? */
  139. addr_low = 0x0;
  140. addr_high = 0x0;
  141. break;
  142. }
  143. return ((addr_high << 31) | addr_low);
  144. }
  145. EXPORT_SYMBOL(ubwcp_hw_interrupt_src_address);
  146. /*
  147. * @index: index of buffer (from 0 to 255)
  148. * @pa : ULA PA start address
  149. * @size : size of ULA PA address range
  150. */
  151. void ubwcp_hw_set_range_check(void __iomem *base, u16 index, phys_addr_t pa, size_t size)
  152. {
  153. u32 lower;
  154. u32 higher;
  155. lower = PAGE_ADDR_4K(pa);
  156. higher = PAGE_ADDR_4K(pa + size);
  157. UBWCP_REG_WRITE(base, RANGE_LOWER + index*4, lower);
  158. UBWCP_REG_WRITE(base, RANGE_HIGHER + index*4, higher);
  159. }
  160. EXPORT_SYMBOL(ubwcp_hw_set_range_check);
  161. /* enable range ck:
  162. * identify control register for this index.
  163. * 32bits in each ctrl reg. upto 8 regs for 256 indexes
  164. */
  165. void ubwcp_hw_enable_range_check(void __iomem *base, u16 index)
  166. {
  167. u32 val;
  168. u16 ctrl_reg = index >> 5;
  169. val = UBWCP_REG_READ(base, RANGE_CHECK_CONTROL + ctrl_reg*4);
  170. val |= (1 << (index & 0x1F));
  171. UBWCP_REG_WRITE(base, RANGE_CHECK_CONTROL + ctrl_reg*4, val);
  172. }
  173. EXPORT_SYMBOL(ubwcp_hw_enable_range_check);
  174. int ubwcp_hw_flush(void __iomem *base)
  175. {
  176. u32 flush_complete = 0;
  177. u32 count_no_delay = 1000;
  178. u32 count_delay = 2000;
  179. u32 count = count_no_delay + count_delay;
  180. UBWCP_REG_WRITE(base, FLUSH_CONTROL, 0x3);
  181. do {
  182. if (count < count_delay)
  183. udelay(1);
  184. flush_complete = UBWCP_REG_READ(base, FLUSH_STATUS) & 0x1;
  185. if (flush_complete) {
  186. UBWCP_REG_WRITE(base, FLUSH_CONTROL, 0x0);
  187. return 0;
  188. }
  189. } while (count--);
  190. ERR("~~~~~ FLUSH FAILED ~~~~~");
  191. return -1;
  192. }
  193. EXPORT_SYMBOL(ubwcp_hw_flush);
  194. /* Disable range check with flush */
  195. int ubwcp_hw_disable_range_check_with_flush(void __iomem *base, u16 index)
  196. {
  197. u32 val;
  198. u16 ctrl_reg = index >> 5;
  199. /*
  200. * It is not clear that the isb() calls in this sequence are
  201. * requried, we may be able to remove them.
  202. */
  203. //ensure all CMOs have completed
  204. isb();
  205. //disable range ck
  206. val = UBWCP_REG_READ(base, RANGE_CHECK_CONTROL + ctrl_reg*4);
  207. val &= ~(1 << (index & 0x1F));
  208. UBWCP_REG_WRITE(base, RANGE_CHECK_CONTROL + ctrl_reg*4, val);
  209. isb();
  210. //assert flush
  211. UBWCP_REG_WRITE(base, FLUSH_CONTROL, 0x3);
  212. return ubwcp_hw_flush(base);
  213. }
  214. EXPORT_SYMBOL(ubwcp_hw_disable_range_check_with_flush);
  215. void ubwcp_hw_set_buf_desc(void __iomem *base, u64 desc_addr, u16 desc_stride)
  216. {
  217. UBWCP_REG_WRITE(base, DESC_BASE, PAGE_ADDR_4K(desc_addr));
  218. UBWCP_REG_WRITE(base, DESC_BASE_STRIDE, desc_stride);
  219. }
  220. EXPORT_SYMBOL(ubwcp_hw_set_buf_desc);
  221. /* Value set here is returned upon read of an address that fails range check.
  222. * Writes are ignored.
  223. * Will also generate range_check_fail interrupt if enabled.
  224. * if we don't program, default value is: 0x92929292
  225. */
  226. void ubwcp_hw_set_default_range_check_value(void __iomem *base, u32 val)
  227. {
  228. UBWCP_REG_WRITE(base, RANGE_CHECK_FAIL, val);
  229. }
  230. void ubwcp_hw_version(void __iomem *base, u32 *major, u32 *minor)
  231. {
  232. u32 version;
  233. version = UBWCP_REG_READ(base, VERSION_CONTROL);
  234. *major = version & 0xF;
  235. *minor = (version & 0xF0) >> 4;
  236. }
  237. EXPORT_SYMBOL(ubwcp_hw_version);
  238. /* TBD: */
  239. void ubwcp_hw_macro_tile_config(void __iomem *base)
  240. {
  241. //TODO: In future add in support for LP4
  242. //May be able to determine DDR version via call to
  243. //of_fdt_get_ddrtype()
  244. /*
  245. * For Lanai assume 4 Channel LP5 DDR so from HSR
  246. * MAL Size 32B
  247. * Highest Bank Bit 16
  248. * Level 1 Bank Swizzling Disable
  249. * Level 2 Bank Swizzling Enable
  250. * Level 3 Bank Swizzling Enable
  251. * Bank Spreading Enable
  252. * Macrotiling Configuration (Num Channels) 8
  253. */
  254. UBWCP_REG_WRITE(base, CONFIG, 0x1E3);
  255. }
  256. /* TBD: */
  257. void ubwcp_hw_decoder_config(void __iomem *base)
  258. {
  259. /*
  260. * For Lanai assume AMSBC (UBWC4.4/4.3) algorithm is used == b11
  261. * For Lanai assume 4 Channel LP5 DDR so MAL Size 32B == b0
  262. */
  263. UBWCP_REG_WRITE(base, DECODER_CONFIG, 0x7);
  264. }
  265. /* TBD: */
  266. void ubwcp_hw_encoder_config(void __iomem *base)
  267. {
  268. /*
  269. * For Lanai assume AMSBC (UBWC4.4/4.3) algorithm is used == b11
  270. * For Lanai assume 4 Channel LP5 DDR so MAL Size 32B == b0
  271. */
  272. UBWCP_REG_WRITE(base, ENCODER_CONFIG, 0x7);
  273. }
  274. void ubwcp_hw_power_vote_status(void __iomem *pwr_ctrl, u8 *vote, u8 *status)
  275. {
  276. u32 reg;
  277. reg = UBWCP_REG_READ(pwr_ctrl, 0);
  278. *vote = (reg & BIT(0)) >> 0;
  279. *status = (reg & BIT(31)) >> 31;
  280. }
  281. void ubwcp_hw_one_time_init(void __iomem *base)
  282. {
  283. u32 reg;
  284. /* Spare reg config: set bit-9: SCC & bit-1: padding */
  285. reg = UBWCP_REG_READ(base, SPARE);
  286. reg |= BIT(9) | BIT(1);
  287. UBWCP_REG_WRITE(base, SPARE, reg);
  288. /* Configure SID */
  289. reg = UBWCP_REG_READ(base, QNS4_PARAMS);
  290. reg &= ~(0x3F);
  291. reg |= 0x1; /* desc buffer */
  292. reg |= (0 << 3); /* pixel data */
  293. UBWCP_REG_WRITE(base, QNS4_PARAMS, reg);
  294. ubwcp_hw_decoder_config(base);
  295. ubwcp_hw_encoder_config(base);
  296. ubwcp_hw_macro_tile_config(base);
  297. }
  298. EXPORT_SYMBOL(ubwcp_hw_one_time_init);
  299. void ubwcp_hw_trace_set(bool value)
  300. {
  301. ubwcp_hw_trace_en = value;
  302. }
  303. EXPORT_SYMBOL(ubwcp_hw_trace_set);
  304. void ubwcp_hw_trace_get(bool *value)
  305. {
  306. *value = ubwcp_hw_trace_en;
  307. }
  308. EXPORT_SYMBOL(ubwcp_hw_trace_get);