disp: msm: adapt msm_gem ops and drm_drver callbacks for multiple SIs

Commit d1d1173 ("disp: msm: update msm_gem ops and remove unused
drm_driver callbacks") Update msm_gem and msm_drv to comply with
latest 5.15 kernel.Modify dma_buf_vmap() and dma-buf's vmap callback
to use truct dma_buf_map. Rename dma_resv_get_excl_rcu to _unlocked.
Remove deprecated GEM and PRIME callbacks.
This change adapts all the interface change for kernel version 5.10
and version 5.15..

Change-Id: Icb495dc4e5d20999f773ed5881eff233ff3a48bc
Signed-off-by: Bruce Hoo <bingchua@codeaurora.org>
This commit is contained in:
Bruce Hoo
2021-11-25 15:50:44 +08:00
parent a5c5224acd
commit 87c65e29e4
5 changed files with 88 additions and 3 deletions

View File

@@ -537,7 +537,11 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
obj = msm_framebuffer_bo(new_state->fb, 0); obj = msm_framebuffer_bo(new_state->fb, 0);
msm_obj = to_msm_bo(obj); msm_obj = to_msm_bo(obj);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
fence = dma_resv_get_excl_unlocked(msm_obj->resv); fence = dma_resv_get_excl_unlocked(msm_obj->resv);
#else
fence = dma_resv_get_excl_rcu(msm_obj->resv);
#endif
drm_atomic_set_fence_for_plane(new_state, fence); drm_atomic_set_fence_for_plane(new_state, fence);
@@ -745,7 +749,11 @@ int msm_atomic_commit(struct drm_device *dev,
msm_framebuffer_bo(new_plane_state->fb, 0); msm_framebuffer_bo(new_plane_state->fb, 0);
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_fence *fence = struct dma_fence *fence =
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
dma_resv_get_excl_unlocked(msm_obj->resv); dma_resv_get_excl_unlocked(msm_obj->resv);
#else
dma_resv_get_excl_rcu(msm_obj->resv);
#endif
drm_atomic_set_fence_for_plane(new_plane_state, fence); drm_atomic_set_fence_for_plane(new_plane_state, fence);
} }

View File

@@ -46,6 +46,7 @@
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
#include <drm/drm_auth.h> #include <drm/drm_auth.h>
#include <drm/drm_probe_helper.h> #include <drm/drm_probe_helper.h>
#include <linux/version.h>
#include "msm_drv.h" #include "msm_drv.h"
#include "msm_gem.h" #include "msm_gem.h"
@@ -1715,6 +1716,14 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_UNLOCKED), DRM_UNLOCKED),
}; };
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
static const struct vm_operations_struct vm_ops = {
.fault = msm_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
#endif
static const struct file_operations fops = { static const struct file_operations fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = drm_open, .open = drm_open,
@@ -1735,6 +1744,16 @@ static struct drm_driver msm_driver = {
.open = msm_open, .open = msm_open,
.postclose = msm_postclose, .postclose = msm_postclose,
.lastclose = msm_lastclose, .lastclose = msm_lastclose,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
.gem_free_object_unlocked = msm_gem_free_object,
.gem_vm_ops = &vm_ops,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_pin = msm_gem_prime_pin,
.gem_prime_unpin = msm_gem_prime_unpin,
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
.gem_prime_vmap = msm_gem_prime_vmap,
.gem_prime_vunmap = msm_gem_prime_vunmap,
#endif
.dumb_create = msm_gem_dumb_create, .dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset, .dumb_map_offset = msm_gem_dumb_map_offset,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_handle_to_fd = drm_gem_prime_handle_to_fd,

View File

@@ -37,6 +37,7 @@
#include <linux/sde_vm_event.h> #include <linux/sde_vm_event.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/version.h>
#include <drm/drm_atomic.h> #include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_helper.h>
@@ -1159,8 +1160,14 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset); uint32_t handle, uint64_t *offset);
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
#else
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
vm_fault_t msm_gem_fault(struct vm_fault *vmf);
#endif
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg); struct dma_buf_attachment *attach, struct sg_table *sg);

View File

@@ -21,6 +21,10 @@
#include <linux/shmem_fs.h> #include <linux/shmem_fs.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/version.h>
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
#include <linux/ion.h>
#endif
#include "msm_drv.h" #include "msm_drv.h"
#include "msm_gem.h" #include "msm_gem.h"
@@ -255,7 +259,11 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return msm_gem_mmap_obj(vma->vm_private_data, vma); return msm_gem_mmap_obj(vma->vm_private_data, vma);
} }
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
static vm_fault_t msm_gem_fault(struct vm_fault *vmf) static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
#else
vm_fault_t msm_gem_fault(struct vm_fault *vmf)
#endif
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_object *obj = vma->vm_private_data;
@@ -651,7 +659,9 @@ fail:
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
struct dma_buf_map map; struct dma_buf_map map;
#endif
int ret = 0; int ret = 0;
mutex_lock(&msm_obj->lock); mutex_lock(&msm_obj->lock);
@@ -686,10 +696,14 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
goto fail; goto fail;
} }
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
ret = dma_buf_vmap(obj->import_attach->dmabuf, &map); ret = dma_buf_vmap(obj->import_attach->dmabuf, &map);
if (ret) if (ret)
return ERR_PTR(ret); goto fail;
msm_obj->vaddr = map.vaddr; msm_obj->vaddr = map.vaddr;
#else
msm_obj->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
#endif
} else { } else {
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, PAGE_KERNEL); VM_MAP, PAGE_KERNEL);
@@ -749,7 +763,9 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
static void msm_gem_vunmap_locked(struct drm_gem_object *obj) static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(msm_obj->vaddr); struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(msm_obj->vaddr);
#endif
WARN_ON(!mutex_is_locked(&msm_obj->lock)); WARN_ON(!mutex_is_locked(&msm_obj->lock));
@@ -757,8 +773,14 @@ static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
return; return;
if (obj->import_attach) { if (obj->import_attach) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
dma_buf_vunmap(obj->import_attach->dmabuf, &map); dma_buf_vunmap(obj->import_attach->dmabuf, &map);
dma_buf_end_cpu_access(obj->import_attach->dmabuf, DMA_BIDIRECTIONAL); dma_buf_end_cpu_access(obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
#else
dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
if (obj->dev && obj->dev->dev && !dev_is_dma_coherent(obj->dev->dev))
dma_buf_end_cpu_access(obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
#endif
} else { } else {
vunmap(msm_obj->vaddr); vunmap(msm_obj->vaddr);
} }
@@ -783,8 +805,11 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret; long ret;
ret = dma_resv_wait_timeout(msm_obj->resv, write, #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
true, remain); ret = dma_resv_wait_timeout(msm_obj->resv, write, true, remain);
#else
ret = dma_resv_wait_timeout_rcu(msm_obj->resv, write, true, remain);
#endif
if (ret == 0) if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT; return remain == 0 ? -EBUSY : -ETIMEDOUT;
else if (ret < 0) else if (ret < 0)
@@ -807,7 +832,9 @@ void msm_gem_free_object(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(msm_obj->vaddr); struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(msm_obj->vaddr);
#endif
/* object should not be on active list: */ /* object should not be on active list: */
WARN_ON(is_active(msm_obj)); WARN_ON(is_active(msm_obj));
@@ -828,7 +855,11 @@ void msm_gem_free_object(struct drm_gem_object *obj)
if (obj->import_attach) { if (obj->import_attach) {
if (msm_obj->vaddr) if (msm_obj->vaddr)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
dma_buf_vunmap(obj->import_attach->dmabuf, &map); dma_buf_vunmap(obj->import_attach->dmabuf, &map);
#else
dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
#endif
/* Don't drop the pages for imported dmabuf, as they are not /* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated: * ours, just free the array we allocated:
@@ -875,6 +906,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
return ret; return ret;
} }
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
static const struct vm_operations_struct vm_ops = { static const struct vm_operations_struct vm_ops = {
.fault = msm_gem_fault, .fault = msm_gem_fault,
.open = drm_gem_vm_open, .open = drm_gem_vm_open,
@@ -890,6 +922,7 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.vunmap = msm_gem_prime_vunmap, .vunmap = msm_gem_prime_vunmap,
.vm_ops = &vm_ops, .vm_ops = &vm_ops,
}; };
#endif
static int msm_gem_new_impl(struct drm_device *dev, static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags, uint32_t size, uint32_t flags,
@@ -936,7 +969,9 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->obj_dirty = false; msm_obj->obj_dirty = false;
*obj = &msm_obj->base; *obj = &msm_obj->base;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
(*obj)->funcs = &msm_gem_object_funcs; (*obj)->funcs = &msm_gem_object_funcs;
#endif
return 0; return 0;
} }

View File

@@ -25,6 +25,11 @@
#include <linux/qcom-dma-mapping.h> #include <linux/qcom-dma-mapping.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <linux/version.h>
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
#include <linux/ion.h>
#include <linux/msm_ion.h>
#endif
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{ {
@@ -37,13 +42,24 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages); return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages);
} }
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{ {
map->vaddr = msm_gem_get_vaddr(obj); map->vaddr = msm_gem_get_vaddr(obj);
return IS_ERR_OR_NULL(map->vaddr); return IS_ERR_OR_NULL(map->vaddr);
} }
#else
void *msm_gem_prime_vmap(struct drm_gem_object *obj)
{
return msm_gem_get_vaddr(obj);
}
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
#else
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
#endif
{ {
msm_gem_put_vaddr(obj); msm_gem_put_vaddr(obj);
} }