drm/virtio: switch from ttm to gem shmem helpers

virtio-gpu basically needs a sg_table for the bo, to tell the host where
the backing pages for the object are.  So the gem shmem helpers are a
perfect fit.  Some drm_gem_object_funcs need thin wrappers to update the
host state, but otherwise the helpers handle everything just fine.

Once the fencing was sorted the switch was surprisingly easy and for the
most part just removing the ttm code.

v4: fix drm_gem_object_funcs name.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20190829103301.3539-15-kraxel@redhat.com
This commit is contained in:
Gerd Hoffmann
2019-08-29 12:32:57 +02:00
parent ad75f4f057
commit c66df701e7
11 changed files with 80 additions and 535 deletions

View File

@@ -35,12 +35,9 @@
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_placement.h>
#define DRIVER_NAME "virtio_gpu"
#define DRIVER_DESC "virtio GPU"
@@ -68,21 +65,16 @@ struct virtio_gpu_object_params {
};
struct virtio_gpu_object {
struct drm_gem_object gem_base;
struct drm_gem_shmem_object base;
uint32_t hw_res_handle;
struct sg_table *pages;
uint32_t mapped;
void *vmap;
bool dumb;
struct ttm_place placement_code;
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
bool created;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, gem_base)
container_of((gobj), struct virtio_gpu_object, base.base)
struct virtio_gpu_object_array {
struct ww_acquire_ctx ticket;
@@ -153,10 +145,6 @@ struct virtio_gpu_framebuffer {
#define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
struct virtio_gpu_mman {
struct ttm_bo_device bdev;
};
struct virtio_gpu_queue {
struct virtqueue *vq;
spinlock_t qlock;
@@ -185,8 +173,6 @@ struct virtio_gpu_device {
struct virtio_device *vdev;
struct virtio_gpu_mman mman;
struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS];
uint32_t num_scanouts;
@@ -358,11 +344,6 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
enum drm_plane_type type,
int index);
/* virtio_gpu_ttm.c */
int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
/* virtio_gpu_fence.c */
bool virtio_fence_signaled(struct dma_fence *f);
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
@@ -374,58 +355,47 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq);
/* virtio_gpu_object */
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size);
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence);
void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
struct virtio_gpu_object *bo);
void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
/* virtgpu_prime.c */
struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma);
static inline struct virtio_gpu_object*
virtio_gpu_object_ref(struct virtio_gpu_object *bo)
{
ttm_bo_get(&bo->tbo);
drm_gem_object_get(&bo->base.base);
return bo;
}
static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
{
struct ttm_buffer_object *tbo;
if ((*bo) == NULL)
return;
tbo = &((*bo)->tbo);
ttm_bo_put(tbo);
drm_gem_object_put(&(*bo)->base.base);
*bo = NULL;
}
static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
{
return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
return drm_vma_node_offset_addr(&bo->base.base.vma_node);
}
static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo)
{
int r;
r = dma_resv_lock_interruptible(bo->gem_base.resv, NULL);
r = dma_resv_lock_interruptible(bo->base.base.resv, NULL);
if (unlikely(r != 0)) {
if (r != -EINTR) {
struct virtio_gpu_device *qdev =
bo->gem_base.dev->dev_private;
bo->base.base.dev->dev_private;
dev_err(qdev->dev, "%p reserve failed\n", bo);
}
return r;
@@ -435,7 +405,7 @@ static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo)
static inline void virtio_gpu_object_unreserve(struct virtio_gpu_object *bo)
{
dma_resv_unlock(bo->gem_base.resv);
dma_resv_unlock(bo->base.base.resv);
}
/* virgl debufs */