intel_pcode.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. // SPDX-License-Identifier: MIT
  2. /*
  3. * Copyright © 2013-2021 Intel Corporation
  4. */
  5. #include "i915_drv.h"
  6. #include "i915_reg.h"
  7. #include "intel_pcode.h"
  8. static int gen6_check_mailbox_status(u32 mbox)
  9. {
  10. switch (mbox & GEN6_PCODE_ERROR_MASK) {
  11. case GEN6_PCODE_SUCCESS:
  12. return 0;
  13. case GEN6_PCODE_UNIMPLEMENTED_CMD:
  14. return -ENODEV;
  15. case GEN6_PCODE_ILLEGAL_CMD:
  16. return -ENXIO;
  17. case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
  18. case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
  19. return -EOVERFLOW;
  20. case GEN6_PCODE_TIMEOUT:
  21. return -ETIMEDOUT;
  22. default:
  23. MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
  24. return 0;
  25. }
  26. }
  27. static int gen7_check_mailbox_status(u32 mbox)
  28. {
  29. switch (mbox & GEN6_PCODE_ERROR_MASK) {
  30. case GEN6_PCODE_SUCCESS:
  31. return 0;
  32. case GEN6_PCODE_ILLEGAL_CMD:
  33. return -ENXIO;
  34. case GEN7_PCODE_TIMEOUT:
  35. return -ETIMEDOUT;
  36. case GEN7_PCODE_ILLEGAL_DATA:
  37. return -EINVAL;
  38. case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
  39. return -ENXIO;
  40. case GEN11_PCODE_LOCKED:
  41. return -EBUSY;
  42. case GEN11_PCODE_REJECTED:
  43. return -EACCES;
  44. case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
  45. return -EOVERFLOW;
  46. default:
  47. MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
  48. return 0;
  49. }
  50. }
  51. static int __snb_pcode_rw(struct intel_uncore *uncore, u32 mbox,
  52. u32 *val, u32 *val1,
  53. int fast_timeout_us, int slow_timeout_ms,
  54. bool is_read)
  55. {
  56. lockdep_assert_held(&uncore->i915->sb_lock);
  57. /*
  58. * GEN6_PCODE_* are outside of the forcewake domain, we can use
  59. * intel_uncore_read/write_fw variants to reduce the amount of work
  60. * required when reading/writing.
  61. */
  62. if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
  63. return -EAGAIN;
  64. intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
  65. intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
  66. intel_uncore_write_fw(uncore,
  67. GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
  68. if (__intel_wait_for_register_fw(uncore,
  69. GEN6_PCODE_MAILBOX,
  70. GEN6_PCODE_READY, 0,
  71. fast_timeout_us,
  72. slow_timeout_ms,
  73. &mbox))
  74. return -ETIMEDOUT;
  75. if (is_read)
  76. *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
  77. if (is_read && val1)
  78. *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
  79. if (GRAPHICS_VER(uncore->i915) > 6)
  80. return gen7_check_mailbox_status(mbox);
  81. else
  82. return gen6_check_mailbox_status(mbox);
  83. }
  84. int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
  85. {
  86. int err;
  87. mutex_lock(&uncore->i915->sb_lock);
  88. err = __snb_pcode_rw(uncore, mbox, val, val1, 500, 20, true);
  89. mutex_unlock(&uncore->i915->sb_lock);
  90. if (err) {
  91. drm_dbg(&uncore->i915->drm,
  92. "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
  93. mbox, __builtin_return_address(0), err);
  94. }
  95. return err;
  96. }
  97. int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
  98. int fast_timeout_us, int slow_timeout_ms)
  99. {
  100. int err;
  101. mutex_lock(&uncore->i915->sb_lock);
  102. err = __snb_pcode_rw(uncore, mbox, &val, NULL,
  103. fast_timeout_us, slow_timeout_ms, false);
  104. mutex_unlock(&uncore->i915->sb_lock);
  105. if (err) {
  106. drm_dbg(&uncore->i915->drm,
  107. "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
  108. val, mbox, __builtin_return_address(0), err);
  109. }
  110. return err;
  111. }
  112. static bool skl_pcode_try_request(struct intel_uncore *uncore, u32 mbox,
  113. u32 request, u32 reply_mask, u32 reply,
  114. u32 *status)
  115. {
  116. *status = __snb_pcode_rw(uncore, mbox, &request, NULL, 500, 0, true);
  117. return (*status == 0) && ((request & reply_mask) == reply);
  118. }
  119. /**
  120. * skl_pcode_request - send PCODE request until acknowledgment
  121. * @uncore: uncore
  122. * @mbox: PCODE mailbox ID the request is targeted for
  123. * @request: request ID
  124. * @reply_mask: mask used to check for request acknowledgment
  125. * @reply: value used to check for request acknowledgment
  126. * @timeout_base_ms: timeout for polling with preemption enabled
  127. *
  128. * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
  129. * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
  130. * The request is acknowledged once the PCODE reply dword equals @reply after
  131. * applying @reply_mask. Polling is first attempted with preemption enabled
  132. * for @timeout_base_ms and if this times out for another 50 ms with
  133. * preemption disabled.
  134. *
  135. * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
  136. * other error as reported by PCODE.
  137. */
  138. int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request,
  139. u32 reply_mask, u32 reply, int timeout_base_ms)
  140. {
  141. u32 status;
  142. int ret;
  143. mutex_lock(&uncore->i915->sb_lock);
  144. #define COND \
  145. skl_pcode_try_request(uncore, mbox, request, reply_mask, reply, &status)
  146. /*
  147. * Prime the PCODE by doing a request first. Normally it guarantees
  148. * that a subsequent request, at most @timeout_base_ms later, succeeds.
  149. * _wait_for() doesn't guarantee when its passed condition is evaluated
  150. * first, so send the first request explicitly.
  151. */
  152. if (COND) {
  153. ret = 0;
  154. goto out;
  155. }
  156. ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
  157. if (!ret)
  158. goto out;
  159. /*
  160. * The above can time out if the number of requests was low (2 in the
  161. * worst case) _and_ PCODE was busy for some reason even after a
  162. * (queued) request and @timeout_base_ms delay. As a workaround retry
  163. * the poll with preemption disabled to maximize the number of
  164. * requests. Increase the timeout from @timeout_base_ms to 50ms to
  165. * account for interrupts that could reduce the number of these
  166. * requests, and for any quirks of the PCODE firmware that delays
  167. * the request completion.
  168. */
  169. drm_dbg_kms(&uncore->i915->drm,
  170. "PCODE timeout, retrying with preemption disabled\n");
  171. drm_WARN_ON_ONCE(&uncore->i915->drm, timeout_base_ms > 3);
  172. preempt_disable();
  173. ret = wait_for_atomic(COND, 50);
  174. preempt_enable();
  175. out:
  176. mutex_unlock(&uncore->i915->sb_lock);
  177. return status ? status : ret;
  178. #undef COND
  179. }
  180. int intel_pcode_init(struct intel_uncore *uncore)
  181. {
  182. if (!IS_DGFX(uncore->i915))
  183. return 0;
  184. return skl_pcode_request(uncore, DG1_PCODE_STATUS,
  185. DG1_UNCORE_GET_INIT_STATUS,
  186. DG1_UNCORE_INIT_STATUS_COMPLETE,
  187. DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
  188. }
  189. int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val)
  190. {
  191. intel_wakeref_t wakeref;
  192. u32 mbox;
  193. int err;
  194. mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
  195. | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
  196. | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
  197. with_intel_runtime_pm(uncore->rpm, wakeref)
  198. err = snb_pcode_read(uncore, mbox, val, NULL);
  199. return err;
  200. }
  201. int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val)
  202. {
  203. intel_wakeref_t wakeref;
  204. u32 mbox;
  205. int err;
  206. mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
  207. | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
  208. | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
  209. with_intel_runtime_pm(uncore->rpm, wakeref)
  210. err = snb_pcode_write(uncore, mbox, val);
  211. return err;
  212. }