msm_ringbuffer.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <[email protected]>
  5. */
  6. #ifndef __MSM_RINGBUFFER_H__
  7. #define __MSM_RINGBUFFER_H__
  8. #include "drm/gpu_scheduler.h"
  9. #include "msm_drv.h"
  10. #define rbmemptr(ring, member) \
  11. ((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
  12. #define rbmemptr_stats(ring, index, member) \
  13. (rbmemptr((ring), stats) + \
  14. ((index) * sizeof(struct msm_gpu_submit_stats)) + \
  15. offsetof(struct msm_gpu_submit_stats, member))
  16. struct msm_gpu_submit_stats {
  17. u64 cpcycles_start;
  18. u64 cpcycles_end;
  19. u64 alwayson_start;
  20. u64 alwayson_end;
  21. };
  22. #define MSM_GPU_SUBMIT_STATS_COUNT 64
  23. struct msm_rbmemptrs {
  24. volatile uint32_t rptr;
  25. volatile uint32_t fence;
  26. volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
  27. volatile u64 ttbr0;
  28. };
  29. struct msm_cp_state {
  30. uint64_t ib1_base, ib2_base;
  31. uint32_t ib1_rem, ib2_rem;
  32. };
  33. struct msm_ringbuffer {
  34. struct msm_gpu *gpu;
  35. int id;
  36. struct drm_gem_object *bo;
  37. uint32_t *start, *end, *cur, *next;
  38. /*
  39. * The job scheduler for this ring.
  40. */
  41. struct drm_gpu_scheduler sched;
  42. /*
  43. * List of in-flight submits on this ring. Protected by submit_lock.
  44. *
  45. * Currently just submits that are already written into the ring, not
  46. * submits that are still in drm_gpu_scheduler's queues. At a later
  47. * step we could probably move to letting drm_gpu_scheduler manage
  48. * hangcheck detection and keep track of submit jobs that are in-
  49. * flight.
  50. */
  51. struct list_head submits;
  52. spinlock_t submit_lock;
  53. uint64_t iova;
  54. uint32_t hangcheck_fence;
  55. struct msm_rbmemptrs *memptrs;
  56. uint64_t memptrs_iova;
  57. struct msm_fence_context *fctx;
  58. /**
  59. * hangcheck_progress_retries:
  60. *
  61. * The number of extra hangcheck duration cycles that we have given
  62. * due to it appearing that the GPU is making forward progress.
  63. *
  64. * For GPU generations which support progress detection (see.
  65. * msm_gpu_funcs::progress()), if the GPU appears to be making progress
  66. * (ie. the CP has advanced in the command stream, we'll allow up to
  67. * DRM_MSM_HANGCHECK_PROGRESS_RETRIES expirations of the hangcheck timer
  68. * before killing the job. But to detect progress we need two sample
  69. * points, so the duration of the hangcheck timer is halved. In other
  70. * words we'll let the submit run for up to:
  71. *
  72. * (DRM_MSM_HANGCHECK_DEFAULT_PERIOD / 2) * (DRM_MSM_HANGCHECK_PROGRESS_RETRIES + 1)
  73. */
  74. int hangcheck_progress_retries;
  75. /**
  76. * last_cp_state: The state of the CP at the last call to gpu->progress()
  77. */
  78. struct msm_cp_state last_cp_state;
  79. /*
  80. * preempt_lock protects preemption and serializes wptr updates against
  81. * preemption. Can be aquired from irq context.
  82. */
  83. spinlock_t preempt_lock;
  84. };
  85. struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
  86. void *memptrs, uint64_t memptrs_iova);
  87. void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
  88. /* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
  89. static inline void
  90. OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
  91. {
  92. /*
  93. * ring->next points to the current command being written - it won't be
  94. * committed as ring->cur until the flush
  95. */
  96. if (ring->next == ring->end)
  97. ring->next = ring->start;
  98. *(ring->next++) = data;
  99. }
  100. #endif /* __MSM_RINGBUFFER_H__ */