panfrost_gpu.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright 2018 Marty E. Plummer <[email protected]> */
  3. /* Copyright 2019 Linaro, Ltd., Rob Herring <[email protected]> */
  4. /* Copyright 2019 Collabora ltd. */
  5. #include <linux/bitfield.h>
  6. #include <linux/bitmap.h>
  7. #include <linux/delay.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/io.h>
  11. #include <linux/iopoll.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/pm_runtime.h>
  14. #include "panfrost_device.h"
  15. #include "panfrost_features.h"
  16. #include "panfrost_issues.h"
  17. #include "panfrost_gpu.h"
  18. #include "panfrost_perfcnt.h"
  19. #include "panfrost_regs.h"
  20. static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data)
  21. {
  22. struct panfrost_device *pfdev = data;
  23. u32 state = gpu_read(pfdev, GPU_INT_STAT);
  24. u32 fault_status = gpu_read(pfdev, GPU_FAULT_STATUS);
  25. if (!state)
  26. return IRQ_NONE;
  27. if (state & GPU_IRQ_MASK_ERROR) {
  28. u64 address = (u64) gpu_read(pfdev, GPU_FAULT_ADDRESS_HI) << 32;
  29. address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO);
  30. dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
  31. fault_status, panfrost_exception_name(fault_status & 0xFF),
  32. address);
  33. if (state & GPU_IRQ_MULTIPLE_FAULT)
  34. dev_warn(pfdev->dev, "There were multiple GPU faults - some have not been reported\n");
  35. gpu_write(pfdev, GPU_INT_MASK, 0);
  36. }
  37. if (state & GPU_IRQ_PERFCNT_SAMPLE_COMPLETED)
  38. panfrost_perfcnt_sample_done(pfdev);
  39. if (state & GPU_IRQ_CLEAN_CACHES_COMPLETED)
  40. panfrost_perfcnt_clean_cache_done(pfdev);
  41. gpu_write(pfdev, GPU_INT_CLEAR, state);
  42. return IRQ_HANDLED;
  43. }
  44. int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
  45. {
  46. int ret;
  47. u32 val;
  48. gpu_write(pfdev, GPU_INT_MASK, 0);
  49. gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
  50. gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET);
  51. ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT,
  52. val, val & GPU_IRQ_RESET_COMPLETED, 100, 10000);
  53. if (ret) {
  54. dev_err(pfdev->dev, "gpu soft reset timed out\n");
  55. return ret;
  56. }
  57. gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL);
  58. gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL);
  59. return 0;
  60. }
  61. void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev)
  62. {
  63. /*
  64. * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs
  65. * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order
  66. * to operate correctly.
  67. */
  68. gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK);
  69. gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16));
  70. }
  71. static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
  72. {
  73. u32 quirks = 0;
  74. if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8443) ||
  75. panfrost_has_hw_issue(pfdev, HW_ISSUE_11035))
  76. quirks |= SC_LS_PAUSEBUFFER_DISABLE;
  77. if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10327))
  78. quirks |= SC_SDC_DISABLE_OQ_DISCARD;
  79. if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10797))
  80. quirks |= SC_ENABLE_TEXGRD_FLAGS;
  81. if (!panfrost_has_hw_issue(pfdev, GPUCORE_1619)) {
  82. if (panfrost_model_cmp(pfdev, 0x750) < 0) /* T60x, T62x, T72x */
  83. quirks |= SC_LS_ATTR_CHECK_DISABLE;
  84. else if (panfrost_model_cmp(pfdev, 0x880) <= 0) /* T76x, T8xx */
  85. quirks |= SC_LS_ALLOW_ATTR_TYPES;
  86. }
  87. if (panfrost_has_hw_issue(pfdev, HW_ISSUE_TTRX_2968_TTRX_3162))
  88. quirks |= SC_VAR_ALGORITHM;
  89. if (panfrost_has_hw_feature(pfdev, HW_FEATURE_TLS_HASHING))
  90. quirks |= SC_TLS_HASH_ENABLE;
  91. if (quirks)
  92. gpu_write(pfdev, GPU_SHADER_CONFIG, quirks);
  93. quirks = gpu_read(pfdev, GPU_TILER_CONFIG);
  94. /* Set tiler clock gate override if required */
  95. if (panfrost_has_hw_issue(pfdev, HW_ISSUE_T76X_3953))
  96. quirks |= TC_CLOCK_GATE_OVERRIDE;
  97. gpu_write(pfdev, GPU_TILER_CONFIG, quirks);
  98. quirks = 0;
  99. if ((panfrost_model_eq(pfdev, 0x860) || panfrost_model_eq(pfdev, 0x880)) &&
  100. pfdev->features.revision >= 0x2000)
  101. quirks |= JM_MAX_JOB_THROTTLE_LIMIT << JM_JOB_THROTTLE_LIMIT_SHIFT;
  102. else if (panfrost_model_eq(pfdev, 0x6000) &&
  103. pfdev->features.coherency_features == COHERENCY_ACE)
  104. quirks |= (COHERENCY_ACE_LITE | COHERENCY_ACE) <<
  105. JM_FORCE_COHERENCY_FEATURES_SHIFT;
  106. if (panfrost_has_hw_feature(pfdev, HW_FEATURE_IDVS_GROUP_SIZE))
  107. quirks |= JM_DEFAULT_IDVS_GROUP_SIZE << JM_IDVS_GROUP_SIZE_SHIFT;
  108. if (quirks)
  109. gpu_write(pfdev, GPU_JM_CONFIG, quirks);
  110. /* Here goes platform specific quirks */
  111. if (pfdev->comp->vendor_quirk)
  112. pfdev->comp->vendor_quirk(pfdev);
  113. }
  114. #define MAX_HW_REVS 6
  115. struct panfrost_model {
  116. const char *name;
  117. u32 id;
  118. u32 id_mask;
  119. u64 features;
  120. u64 issues;
  121. struct {
  122. u32 revision;
  123. u64 issues;
  124. } revs[MAX_HW_REVS];
  125. };
  126. #define GPU_MODEL(_name, _id, ...) \
  127. {\
  128. .name = __stringify(_name), \
  129. .id = _id, \
  130. .features = hw_features_##_name, \
  131. .issues = hw_issues_##_name, \
  132. .revs = { __VA_ARGS__ }, \
  133. }
  134. #define GPU_REV_EXT(name, _rev, _p, _s, stat) \
  135. {\
  136. .revision = (_rev) << 12 | (_p) << 4 | (_s), \
  137. .issues = hw_issues_##name##_r##_rev##p##_p##stat, \
  138. }
  139. #define GPU_REV(name, r, p) GPU_REV_EXT(name, r, p, 0, )
  140. static const struct panfrost_model gpu_models[] = {
  141. /* T60x has an oddball version */
  142. GPU_MODEL(t600, 0x600,
  143. GPU_REV_EXT(t600, 0, 0, 1, _15dev0)),
  144. GPU_MODEL(t620, 0x620,
  145. GPU_REV(t620, 0, 1), GPU_REV(t620, 1, 0)),
  146. GPU_MODEL(t720, 0x720),
  147. GPU_MODEL(t760, 0x750,
  148. GPU_REV(t760, 0, 0), GPU_REV(t760, 0, 1),
  149. GPU_REV_EXT(t760, 0, 1, 0, _50rel0),
  150. GPU_REV(t760, 0, 2), GPU_REV(t760, 0, 3)),
  151. GPU_MODEL(t820, 0x820),
  152. GPU_MODEL(t830, 0x830),
  153. GPU_MODEL(t860, 0x860),
  154. GPU_MODEL(t880, 0x880),
  155. GPU_MODEL(g71, 0x6000,
  156. GPU_REV_EXT(g71, 0, 0, 1, _05dev0)),
  157. GPU_MODEL(g72, 0x6001),
  158. GPU_MODEL(g51, 0x7000),
  159. GPU_MODEL(g76, 0x7001),
  160. GPU_MODEL(g52, 0x7002),
  161. GPU_MODEL(g31, 0x7003,
  162. GPU_REV(g31, 1, 0)),
  163. GPU_MODEL(g57, 0x9001,
  164. GPU_REV(g57, 0, 0)),
  165. };
  166. static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
  167. {
  168. u32 gpu_id, num_js, major, minor, status, rev;
  169. const char *name = "unknown";
  170. u64 hw_feat = 0;
  171. u64 hw_issues = hw_issues_all;
  172. const struct panfrost_model *model;
  173. int i;
  174. pfdev->features.l2_features = gpu_read(pfdev, GPU_L2_FEATURES);
  175. pfdev->features.core_features = gpu_read(pfdev, GPU_CORE_FEATURES);
  176. pfdev->features.tiler_features = gpu_read(pfdev, GPU_TILER_FEATURES);
  177. pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES);
  178. pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES);
  179. pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES);
  180. pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS);
  181. pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
  182. pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
  183. pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
  184. pfdev->features.afbc_features = gpu_read(pfdev, GPU_AFBC_FEATURES);
  185. for (i = 0; i < 4; i++)
  186. pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
  187. pfdev->features.as_present = gpu_read(pfdev, GPU_AS_PRESENT);
  188. pfdev->features.js_present = gpu_read(pfdev, GPU_JS_PRESENT);
  189. num_js = hweight32(pfdev->features.js_present);
  190. for (i = 0; i < num_js; i++)
  191. pfdev->features.js_features[i] = gpu_read(pfdev, GPU_JS_FEATURES(i));
  192. pfdev->features.shader_present = gpu_read(pfdev, GPU_SHADER_PRESENT_LO);
  193. pfdev->features.shader_present |= (u64)gpu_read(pfdev, GPU_SHADER_PRESENT_HI) << 32;
  194. pfdev->features.tiler_present = gpu_read(pfdev, GPU_TILER_PRESENT_LO);
  195. pfdev->features.tiler_present |= (u64)gpu_read(pfdev, GPU_TILER_PRESENT_HI) << 32;
  196. pfdev->features.l2_present = gpu_read(pfdev, GPU_L2_PRESENT_LO);
  197. pfdev->features.l2_present |= (u64)gpu_read(pfdev, GPU_L2_PRESENT_HI) << 32;
  198. pfdev->features.nr_core_groups = hweight64(pfdev->features.l2_present);
  199. pfdev->features.stack_present = gpu_read(pfdev, GPU_STACK_PRESENT_LO);
  200. pfdev->features.stack_present |= (u64)gpu_read(pfdev, GPU_STACK_PRESENT_HI) << 32;
  201. pfdev->features.thread_tls_alloc = gpu_read(pfdev, GPU_THREAD_TLS_ALLOC);
  202. gpu_id = gpu_read(pfdev, GPU_ID);
  203. pfdev->features.revision = gpu_id & 0xffff;
  204. pfdev->features.id = gpu_id >> 16;
  205. /* The T60x has an oddball ID value. Fix it up to the standard Midgard
  206. * format so we (and userspace) don't have to special case it.
  207. */
  208. if (pfdev->features.id == 0x6956)
  209. pfdev->features.id = 0x0600;
  210. major = (pfdev->features.revision >> 12) & 0xf;
  211. minor = (pfdev->features.revision >> 4) & 0xff;
  212. status = pfdev->features.revision & 0xf;
  213. rev = pfdev->features.revision;
  214. gpu_id = pfdev->features.id;
  215. for (model = gpu_models; model->name; model++) {
  216. int best = -1;
  217. if (!panfrost_model_eq(pfdev, model->id))
  218. continue;
  219. name = model->name;
  220. hw_feat = model->features;
  221. hw_issues |= model->issues;
  222. for (i = 0; i < MAX_HW_REVS; i++) {
  223. if (model->revs[i].revision == rev) {
  224. best = i;
  225. break;
  226. } else if (model->revs[i].revision == (rev & ~0xf))
  227. best = i;
  228. }
  229. if (best >= 0)
  230. hw_issues |= model->revs[best].issues;
  231. break;
  232. }
  233. bitmap_from_u64(pfdev->features.hw_features, hw_feat);
  234. bitmap_from_u64(pfdev->features.hw_issues, hw_issues);
  235. dev_info(pfdev->dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
  236. name, gpu_id, major, minor, status);
  237. dev_info(pfdev->dev, "features: %64pb, issues: %64pb",
  238. pfdev->features.hw_features,
  239. pfdev->features.hw_issues);
  240. dev_info(pfdev->dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x",
  241. pfdev->features.l2_features,
  242. pfdev->features.core_features,
  243. pfdev->features.tiler_features,
  244. pfdev->features.mem_features,
  245. pfdev->features.mmu_features,
  246. pfdev->features.as_present,
  247. pfdev->features.js_present);
  248. dev_info(pfdev->dev, "shader_present=0x%0llx l2_present=0x%0llx",
  249. pfdev->features.shader_present, pfdev->features.l2_present);
  250. }
  251. void panfrost_gpu_power_on(struct panfrost_device *pfdev)
  252. {
  253. int ret;
  254. u32 val;
  255. u64 core_mask = U64_MAX;
  256. panfrost_gpu_init_quirks(pfdev);
  257. if (pfdev->features.l2_present != 1) {
  258. /*
  259. * Only support one core group now.
  260. * ~(l2_present - 1) unsets all bits in l2_present except
  261. * the bottom bit. (l2_present - 2) has all the bits in
  262. * the first core group set. AND them together to generate
  263. * a mask of cores in the first core group.
  264. */
  265. core_mask = ~(pfdev->features.l2_present - 1) &
  266. (pfdev->features.l2_present - 2);
  267. dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
  268. hweight64(core_mask),
  269. hweight64(pfdev->features.shader_present));
  270. }
  271. gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask);
  272. ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
  273. val, val == (pfdev->features.l2_present & core_mask),
  274. 100, 20000);
  275. if (ret)
  276. dev_err(pfdev->dev, "error powering up gpu L2");
  277. gpu_write(pfdev, SHADER_PWRON_LO,
  278. pfdev->features.shader_present & core_mask);
  279. ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
  280. val, val == (pfdev->features.shader_present & core_mask),
  281. 100, 20000);
  282. if (ret)
  283. dev_err(pfdev->dev, "error powering up gpu shader");
  284. gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
  285. ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
  286. val, val == pfdev->features.tiler_present, 100, 1000);
  287. if (ret)
  288. dev_err(pfdev->dev, "error powering up gpu tiler");
  289. }
  290. void panfrost_gpu_power_off(struct panfrost_device *pfdev)
  291. {
  292. gpu_write(pfdev, TILER_PWROFF_LO, 0);
  293. gpu_write(pfdev, SHADER_PWROFF_LO, 0);
  294. gpu_write(pfdev, L2_PWROFF_LO, 0);
  295. }
  296. int panfrost_gpu_init(struct panfrost_device *pfdev)
  297. {
  298. int err, irq;
  299. err = panfrost_gpu_soft_reset(pfdev);
  300. if (err)
  301. return err;
  302. panfrost_gpu_init_features(pfdev);
  303. err = dma_set_mask_and_coherent(pfdev->dev,
  304. DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features)));
  305. if (err)
  306. return err;
  307. dma_set_max_seg_size(pfdev->dev, UINT_MAX);
  308. irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
  309. if (irq <= 0)
  310. return -ENODEV;
  311. err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler,
  312. IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev);
  313. if (err) {
  314. dev_err(pfdev->dev, "failed to request gpu irq");
  315. return err;
  316. }
  317. panfrost_gpu_power_on(pfdev);
  318. return 0;
  319. }
  320. void panfrost_gpu_fini(struct panfrost_device *pfdev)
  321. {
  322. panfrost_gpu_power_off(pfdev);
  323. }
  324. u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev)
  325. {
  326. u32 flush_id;
  327. if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) {
  328. /* Flush reduction only makes sense when the GPU is kept powered on between jobs */
  329. if (pm_runtime_get_if_in_use(pfdev->dev)) {
  330. flush_id = gpu_read(pfdev, GPU_LATEST_FLUSH_ID);
  331. pm_runtime_put(pfdev->dev);
  332. return flush_id;
  333. }
  334. }
  335. return 0;
  336. }