|
@@ -38,32 +38,46 @@ struct vb2_queue *msm_vidc_get_vb2q(struct msm_vidc_inst *inst,
|
|
return q;
|
|
return q;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
|
|
void *msm_vb2_get_userptr(struct device *dev, unsigned long vaddr,
|
|
void *msm_vb2_get_userptr(struct device *dev, unsigned long vaddr,
|
|
unsigned long size, enum dma_data_direction dma_dir)
|
|
unsigned long size, enum dma_data_direction dma_dir)
|
|
{
|
|
{
|
|
return (void *)0xdeadbeef;
|
|
return (void *)0xdeadbeef;
|
|
}
|
|
}
|
|
|
|
|
|
-void msm_vb2_put_userptr(void *buf_priv)
|
|
|
|
|
|
+void *msm_vb2_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
|
|
|
|
+ unsigned long size, enum dma_data_direction dma_dir)
|
|
|
|
+{
|
|
|
|
+ return (void *)0xdeadbeef;
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+void *msm_vb2_get_userptr(struct vb2_buffer *vb, struct device *dev,
|
|
|
|
+ unsigned long vaddr, unsigned long size)
|
|
|
|
+{
|
|
|
|
+ return (void *)0xdeadbeef;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void *msm_vb2_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
|
|
|
|
+ struct dma_buf *dbuf, unsigned long size)
|
|
{
|
|
{
|
|
|
|
+ return (void *)0xdeadbeef;
|
|
}
|
|
}
|
|
|
|
+#endif
|
|
|
|
|
|
-void* msm_vb2_attach_dmabuf(struct device* dev, struct dma_buf* dbuf,
|
|
|
|
- unsigned long size, enum dma_data_direction dma_dir)
|
|
|
|
|
|
+void msm_vb2_put_userptr(void *buf_priv)
|
|
{
|
|
{
|
|
- return (void*)0xdeadbeef;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
-void msm_vb2_detach_dmabuf(void* buf_priv)
|
|
|
|
|
|
+void msm_vb2_detach_dmabuf(void *buf_priv)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
|
|
-int msm_vb2_map_dmabuf(void* buf_priv)
|
|
|
|
|
|
+int msm_vb2_map_dmabuf(void *buf_priv)
|
|
{
|
|
{
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-void msm_vb2_unmap_dmabuf(void* buf_priv)
|
|
|
|
|
|
+void msm_vb2_unmap_dmabuf(void *buf_priv)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
|