disp: msm: update msm_gem ops and remove unused drm_driver callbacks

Update msm_gem and msm_drv to comply with latest 5.15 kernel.
Modify dma_buf_vmap() and dma-buf's vmap callback to use
struct dma_buf_map. Rename dma_resv_get_excl_rcu to _unlocked.
Remove deprecated GEM and PRIME callbacks.

Change-Id: Ifdfc7f872c988d8455f465e7d17dfbcf212bb5f6
Signed-off-by: Bruce Hoo <bingchua@codeaurora.org>
Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
Этот коммит содержится в:
Bruce Hoo
2021-10-13 09:29:00 +08:00
коммит произвёл Gerrit - the friendly Code Review server
родитель 283560cb39
Коммит d1d11731f1
6 изменённых файлов: 41 добавлений и 31 удалений

Просмотреть файл

@@ -532,7 +532,7 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
obj = msm_framebuffer_bo(new_state->fb, 0);
msm_obj = to_msm_bo(obj);
fence = dma_resv_get_excl_rcu(msm_obj->resv);
fence = dma_resv_get_excl_unlocked(msm_obj->resv);
drm_atomic_set_fence_for_plane(new_state, fence);
@@ -740,7 +740,7 @@ int msm_atomic_commit(struct drm_device *dev,
msm_framebuffer_bo(new_plane_state->fb, 0);
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_fence *fence =
dma_resv_get_excl_rcu(msm_obj->resv);
dma_resv_get_excl_unlocked(msm_obj->resv);
drm_atomic_set_fence_for_plane(new_plane_state, fence);
}

Просмотреть файл

@@ -1688,12 +1688,6 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_UNLOCKED),
};
static const struct vm_operations_struct vm_ops = {
.fault = msm_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -1718,20 +1712,12 @@ static struct drm_driver msm_driver = {
.irq_preinstall = msm_irq_preinstall,
.irq_postinstall = msm_irq_postinstall,
.irq_uninstall = msm_irq_uninstall,
.gem_free_object_unlocked = msm_gem_free_object,
.gem_vm_ops = &vm_ops,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = msm_gem_prime_import,
.gem_prime_pin = msm_gem_prime_pin,
.gem_prime_unpin = msm_gem_prime_unpin,
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
.gem_prime_vmap = msm_gem_prime_vmap,
.gem_prime_vunmap = msm_gem_prime_vunmap,
.gem_prime_mmap = msm_gem_prime_mmap,
.ioctls = msm_ioctls,
.num_ioctls = ARRAY_SIZE(msm_ioctls),

Просмотреть файл

@@ -1146,7 +1146,6 @@ void msm_gem_sync(struct drm_gem_object *obj);
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
@@ -1166,8 +1165,8 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);

Просмотреть файл

@@ -255,7 +255,7 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
vm_fault_t msm_gem_fault(struct vm_fault *vmf)
static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
@@ -717,6 +717,7 @@ fail:
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_buf_map map;
int ret = 0;
mutex_lock(&msm_obj->lock);
@@ -751,8 +752,10 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
goto fail;
}
msm_obj->vaddr =
dma_buf_vmap(obj->import_attach->dmabuf);
ret = dma_buf_vmap(obj->import_attach->dmabuf, &map);
if (ret)
return ERR_PTR(ret);
msm_obj->vaddr = map.vaddr;
} else {
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, PAGE_KERNEL);
@@ -864,6 +867,7 @@ void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(msm_obj->vaddr);
WARN_ON(!mutex_is_locked(&msm_obj->lock));
@@ -871,9 +875,8 @@ static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
return;
if (obj->import_attach) {
dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
if (obj->dev && obj->dev->dev && !dev_is_dma_coherent(obj->dev->dev))
dma_buf_end_cpu_access(obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
dma_buf_vunmap(obj->import_attach->dmabuf, &map);
dma_buf_end_cpu_access(obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
} else {
vunmap(msm_obj->vaddr);
}
@@ -898,7 +901,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
ret = dma_resv_wait_timeout_rcu(msm_obj->resv, write,
ret = dma_resv_wait_timeout(msm_obj->resv, write,
true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
@@ -1015,6 +1018,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(msm_obj->vaddr);
/* object should not be on active list: */
WARN_ON(is_active(msm_obj));
@@ -1035,7 +1039,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
if (obj->import_attach) {
if (msm_obj->vaddr)
dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
dma_buf_vunmap(obj->import_attach->dmabuf, &map);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
@@ -1082,6 +1086,22 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
return ret;
}
static const struct vm_operations_struct vm_ops = {
.fault = msm_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.free = msm_gem_free_object,
.pin = msm_gem_prime_pin,
.unpin = msm_gem_prime_unpin,
.get_sg_table = msm_gem_prime_get_sg_table,
.vmap = msm_gem_prime_vmap,
.vunmap = msm_gem_prime_vunmap,
.vm_ops = &vm_ops,
};
static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags,
struct dma_resv *resv,
@@ -1127,6 +1147,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->obj_dirty = false;
*obj = &msm_obj->base;
(*obj)->funcs = &msm_gem_object_funcs;
return 0;
}

Просмотреть файл

@@ -37,12 +37,13 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages);
}
void *msm_gem_prime_vmap(struct drm_gem_object *obj)
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
return msm_gem_get_vaddr(obj);
map->vaddr = msm_gem_get_vaddr(obj);
return IS_ERR_OR_NULL(map->vaddr);
}
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
msm_gem_put_vaddr(obj);
}

Просмотреть файл

@@ -1212,6 +1212,8 @@ static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
struct sde_layer_buffer *buf, struct sde_mdp_data *data)
{
struct dma_buf_map map;
dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
@@ -1221,7 +1223,8 @@ static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
dma_buf_begin_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
dbgbuf->vaddr = dma_buf_vmap(dbgbuf->dmabuf);
dma_buf_vmap(dbgbuf->dmabuf, &map);
dbgbuf->vaddr = map.vaddr;
SDEROT_DBG("vaddr mapping: 0x%pK/%ld w:%d/h:%d\n",
dbgbuf->vaddr, dbgbuf->buflen,
dbgbuf->width, dbgbuf->height);