drm/udl: Switch to SHMEM

Udl's GEM code and the generic SHMEM are almost identical. Replace
the former with SHMEM. The dmabuf support in udl is being replaced
with generic GEM PRIME functions.

The main difference is in the caching flags for mmap pages. By
default, SHMEM always sets (uncached) write combining. In udl's
memory management code, only imported buffers use write combining.
Memory pages of locally created buffer objects are mmap'ed with
caching enabled. To keep the optimization, udl provides its own
mmap function for GEM objects where it fixes up the mapping flags.

v3:
	- restore udl vmap that enables caching
v2:
	- remove obsolete code in a separate patch

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Acked-by: Gerd Hoffmann <kraxel@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191107094307.19870-4-tzimmermann@suse.de
This commit is contained in:
Thomas Zimmermann
2019-11-07 10:43:06 +01:00
parent 1d48b9e988
commit 08b22f65b3
5 changed files with 138 additions and 60 deletions

View File

@@ -7,11 +7,100 @@
#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_mode.h>
#include <drm/drm_prime.h>
#include "udl_drv.h"
/*
* GEM object funcs
*/
static void udl_gem_object_free_object(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
/* Fbdev emulation vmaps the buffer. Unmap it here for consistency
* with the original udl GEM code.
*
* TODO: Switch to generic fbdev emulation and release the
* GEM object with drm_gem_shmem_free_object().
*/
if (shmem->vaddr)
drm_gem_shmem_vunmap(obj, shmem->vaddr);
drm_gem_shmem_free_object(obj);
}
static int udl_gem_object_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
int ret;
ret = drm_gem_shmem_mmap(obj, vma);
if (ret)
return ret;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (obj->import_attach)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
return 0;
}
static void *udl_gem_object_vmap(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
int ret;
ret = mutex_lock_interruptible(&shmem->vmap_lock);
if (ret)
return ERR_PTR(ret);
if (shmem->vmap_use_count++ > 0)
goto out;
ret = drm_gem_shmem_get_pages(shmem);
if (ret)
goto err_zero_use;
if (obj->import_attach)
shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
else
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, PAGE_KERNEL);
if (!shmem->vaddr) {
DRM_DEBUG_KMS("Failed to vmap pages\n");
ret = -ENOMEM;
goto err_put_pages;
}
out:
mutex_unlock(&shmem->vmap_lock);
return shmem->vaddr;
err_put_pages:
drm_gem_shmem_put_pages(shmem);
err_zero_use:
shmem->vmap_use_count = 0;
mutex_unlock(&shmem->vmap_lock);
return ERR_PTR(ret);
}
static const struct drm_gem_object_funcs udl_gem_object_funcs = {
.free = udl_gem_object_free_object,
.print_info = drm_gem_shmem_print_info,
.pin = drm_gem_shmem_pin,
.unpin = drm_gem_shmem_unpin,
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = udl_gem_object_vmap,
.vunmap = drm_gem_shmem_vunmap,
.mmap = udl_gem_object_mmap,
};
/*
* Helpers for struct drm_driver
*/
@@ -19,13 +108,17 @@
struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev,
size_t size)
{
struct udl_gem_object *obj;
struct drm_gem_shmem_object *shmem;
struct drm_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
if (!shmem)
return NULL;
return &obj->base;
obj = &shmem->base;
obj->funcs = &udl_gem_object_funcs;
return obj;
}
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,