virtgpu_fence.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. /*
  2. * Copyright (C) 2015 Red Hat, Inc.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining
  6. * a copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sublicense, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the
  14. * next paragraph) shall be included in all copies or substantial
  15. * portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. */
  25. #include <trace/events/dma_fence.h>
  26. #include "virtgpu_drv.h"
  27. #define to_virtio_gpu_fence(x) \
  28. container_of(x, struct virtio_gpu_fence, f)
  29. static const char *virtio_gpu_get_driver_name(struct dma_fence *f)
  30. {
  31. return "virtio_gpu";
  32. }
  33. static const char *virtio_gpu_get_timeline_name(struct dma_fence *f)
  34. {
  35. return "controlq";
  36. }
  37. static bool virtio_gpu_fence_signaled(struct dma_fence *f)
  38. {
  39. /* leaked fence outside driver before completing
  40. * initialization with virtio_gpu_fence_emit.
  41. */
  42. WARN_ON_ONCE(f->seqno == 0);
  43. return false;
  44. }
  45. static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
  46. {
  47. snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
  48. }
  49. static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
  50. int size)
  51. {
  52. struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
  53. snprintf(str, size, "%llu",
  54. (u64)atomic64_read(&fence->drv->last_fence_id));
  55. }
  56. static const struct dma_fence_ops virtio_gpu_fence_ops = {
  57. .get_driver_name = virtio_gpu_get_driver_name,
  58. .get_timeline_name = virtio_gpu_get_timeline_name,
  59. .signaled = virtio_gpu_fence_signaled,
  60. .fence_value_str = virtio_gpu_fence_value_str,
  61. .timeline_value_str = virtio_gpu_timeline_value_str,
  62. };
  63. struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
  64. uint64_t base_fence_ctx,
  65. uint32_t ring_idx)
  66. {
  67. uint64_t fence_context = base_fence_ctx + ring_idx;
  68. struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
  69. struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
  70. GFP_KERNEL);
  71. if (!fence)
  72. return fence;
  73. fence->drv = drv;
  74. fence->ring_idx = ring_idx;
  75. fence->emit_fence_info = !(base_fence_ctx == drv->context);
  76. /* This only partially initializes the fence because the seqno is
  77. * unknown yet. The fence must not be used outside of the driver
  78. * until virtio_gpu_fence_emit is called.
  79. */
  80. dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock,
  81. fence_context, 0);
  82. return fence;
  83. }
  84. void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
  85. struct virtio_gpu_ctrl_hdr *cmd_hdr,
  86. struct virtio_gpu_fence *fence)
  87. {
  88. struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
  89. unsigned long irq_flags;
  90. spin_lock_irqsave(&drv->lock, irq_flags);
  91. fence->fence_id = fence->f.seqno = ++drv->current_fence_id;
  92. dma_fence_get(&fence->f);
  93. list_add_tail(&fence->node, &drv->fences);
  94. spin_unlock_irqrestore(&drv->lock, irq_flags);
  95. trace_dma_fence_emit(&fence->f);
  96. cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
  97. cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
  98. /* Only currently defined fence param. */
  99. if (fence->emit_fence_info) {
  100. cmd_hdr->flags |=
  101. cpu_to_le32(VIRTIO_GPU_FLAG_INFO_RING_IDX);
  102. cmd_hdr->ring_idx = (u8)fence->ring_idx;
  103. }
  104. }
  105. void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
  106. u64 fence_id)
  107. {
  108. struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
  109. struct virtio_gpu_fence *signaled, *curr, *tmp;
  110. unsigned long irq_flags;
  111. spin_lock_irqsave(&drv->lock, irq_flags);
  112. atomic64_set(&vgdev->fence_drv.last_fence_id, fence_id);
  113. list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
  114. if (fence_id != curr->fence_id)
  115. continue;
  116. signaled = curr;
  117. /*
  118. * Signal any fences with a strictly smaller sequence number
  119. * than the current signaled fence.
  120. */
  121. list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
  122. /* dma-fence contexts must match */
  123. if (signaled->f.context != curr->f.context)
  124. continue;
  125. if (!dma_fence_is_later(&signaled->f, &curr->f))
  126. continue;
  127. dma_fence_signal_locked(&curr->f);
  128. if (curr->e) {
  129. drm_send_event(vgdev->ddev, &curr->e->base);
  130. curr->e = NULL;
  131. }
  132. list_del(&curr->node);
  133. dma_fence_put(&curr->f);
  134. }
  135. dma_fence_signal_locked(&signaled->f);
  136. if (signaled->e) {
  137. drm_send_event(vgdev->ddev, &signaled->e->base);
  138. signaled->e = NULL;
  139. }
  140. list_del(&signaled->node);
  141. dma_fence_put(&signaled->f);
  142. break;
  143. }
  144. spin_unlock_irqrestore(&drv->lock, irq_flags);
  145. }