drm/virtio: rework virtio_gpu_execbuffer_ioctl fencing
Rework fencing workflow, starting with virtio_gpu_execbuffer_ioctl. Stop using ttm helpers, use the virtio_gpu_array_* helpers (which work on the reservation objects directly) instead. Also store the object array in struct virtio_gpu_vbuffer, so we explicitly keep a reference of all buffers used instead of depending on ttm_bo_put() checking whenever the object is actually idle before releasing it. New workflow: (1) All gem objects needed by a command are added to a virtio_gpu_object_array. (2) All reservation objects will be locked (virtio_gpu_array_lock_resv). (3) virtio_gpu_fence_emit() completes fence initialization. (4) fence gets added to the objects, reservation objects are unlocked (virtio_gpu_array_add_fence, virtio_gpu_array_unlock_resv). (5) virtio command is submitted to the host. (6) The completion callback (virtio_gpu_dequeue_ctrl_func) will drop object references and free virtio_gpu_object_array. v6: rewrite most of the patch. Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Reviewed-by: Chia-I Wu <olvaffe@gmail.com> Link: http://patchwork.freedesktop.org/patch/msgid/20190829103301.3539-9-kraxel@redhat.com
This commit is contained in:
@@ -192,7 +192,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
|
||||
} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
|
||||
spin_unlock(&vgdev->ctrlq.qlock);
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
|
||||
list_for_each_entry(entry, &reclaim_list, list) {
|
||||
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
|
||||
|
||||
trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
|
||||
@@ -219,14 +219,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
|
||||
}
|
||||
if (entry->resp_cb)
|
||||
entry->resp_cb(vgdev, entry);
|
||||
|
||||
list_del(&entry->list);
|
||||
free_vbuf(vgdev, entry);
|
||||
}
|
||||
wake_up(&vgdev->ctrlq.ack_queue);
|
||||
|
||||
if (fence_id)
|
||||
virtio_gpu_fence_event_process(vgdev, fence_id);
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
|
||||
if (entry->objs)
|
||||
virtio_gpu_array_put_free(entry->objs);
|
||||
list_del(&entry->list);
|
||||
free_vbuf(vgdev, entry);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
|
||||
@@ -337,6 +341,10 @@ again:
|
||||
|
||||
if (fence)
|
||||
virtio_gpu_fence_emit(vgdev, hdr, fence);
|
||||
if (vbuf->objs) {
|
||||
virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
|
||||
virtio_gpu_array_unlock_resv(vbuf->objs);
|
||||
}
|
||||
notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
|
||||
spin_unlock(&vgdev->ctrlq.qlock);
|
||||
if (notify)
|
||||
@@ -940,7 +948,9 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
|
||||
|
||||
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
|
||||
void *data, uint32_t data_size,
|
||||
uint32_t ctx_id, struct virtio_gpu_fence *fence)
|
||||
uint32_t ctx_id,
|
||||
struct virtio_gpu_object_array *objs,
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_cmd_submit *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
@@ -950,6 +960,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
|
||||
|
||||
vbuf->data_buf = data;
|
||||
vbuf->data_size = data_size;
|
||||
vbuf->objs = objs;
|
||||
|
||||
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
|
||||
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
|
||||
|
Reference in New Issue
Block a user