media: drivers/media/common/videobuf2: rename from videobuf
This directory contains the videobuf2 framework, so name the directory accordingly. The name 'videobuf' typically refers to the old and deprecated videobuf version 1 framework so that was confusing. Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com> Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
This commit is contained in:

committed by
Mauro Carvalho Chehab

父節點
c81ceb58e1
當前提交
7952be9b6e
31
drivers/media/common/videobuf2/Kconfig
Normal file
31
drivers/media/common/videobuf2/Kconfig
Normal file
@@ -0,0 +1,31 @@
|
||||
# Used by drivers that need Videobuf2 modules
|
||||
config VIDEOBUF2_CORE
|
||||
select DMA_SHARED_BUFFER
|
||||
tristate
|
||||
|
||||
config VIDEOBUF2_MEMOPS
|
||||
tristate
|
||||
select FRAME_VECTOR
|
||||
|
||||
config VIDEOBUF2_DMA_CONTIG
|
||||
tristate
|
||||
depends on HAS_DMA
|
||||
select VIDEOBUF2_CORE
|
||||
select VIDEOBUF2_MEMOPS
|
||||
select DMA_SHARED_BUFFER
|
||||
|
||||
config VIDEOBUF2_VMALLOC
|
||||
tristate
|
||||
select VIDEOBUF2_CORE
|
||||
select VIDEOBUF2_MEMOPS
|
||||
select DMA_SHARED_BUFFER
|
||||
|
||||
config VIDEOBUF2_DMA_SG
|
||||
tristate
|
||||
depends on HAS_DMA
|
||||
select VIDEOBUF2_CORE
|
||||
select VIDEOBUF2_MEMOPS
|
||||
|
||||
config VIDEOBUF2_DVB
|
||||
tristate
|
||||
select VIDEOBUF2_CORE
|
7
drivers/media/common/videobuf2/Makefile
Normal file
7
drivers/media/common/videobuf2/Makefile
Normal file
@@ -0,0 +1,7 @@
|
||||
|
||||
obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-core.o videobuf2-v4l2.o
|
||||
obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
|
||||
obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
|
||||
obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o
|
||||
obj-$(CONFIG_VIDEOBUF2_DMA_SG) += videobuf2-dma-sg.o
|
||||
obj-$(CONFIG_VIDEOBUF2_DVB) += videobuf2-dvb.o
|
2620
drivers/media/common/videobuf2/videobuf2-core.c
Normal file
2620
drivers/media/common/videobuf2/videobuf2-core.c
Normal file
文件差異過大導致無法顯示
Load Diff
787
drivers/media/common/videobuf2/videobuf2-dma-contig.c
Normal file
787
drivers/media/common/videobuf2/videobuf2-dma-contig.c
Normal file
@@ -0,0 +1,787 @@
|
||||
/*
|
||||
* videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
|
||||
*
|
||||
* Copyright (C) 2010 Samsung Electronics
|
||||
*
|
||||
* Author: Pawel Osciak <pawel@osciak.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include <media/videobuf2-v4l2.h>
|
||||
#include <media/videobuf2-dma-contig.h>
|
||||
#include <media/videobuf2-memops.h>
|
||||
|
||||
struct vb2_dc_buf {
|
||||
struct device *dev;
|
||||
void *vaddr;
|
||||
unsigned long size;
|
||||
void *cookie;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned long attrs;
|
||||
enum dma_data_direction dma_dir;
|
||||
struct sg_table *dma_sgt;
|
||||
struct frame_vector *vec;
|
||||
|
||||
/* MMAP related */
|
||||
struct vb2_vmarea_handler handler;
|
||||
refcount_t refcount;
|
||||
struct sg_table *sgt_base;
|
||||
|
||||
/* DMABUF related */
|
||||
struct dma_buf_attachment *db_attach;
|
||||
};
|
||||
|
||||
/*********************************************/
|
||||
/* scatterlist table functions */
|
||||
/*********************************************/
|
||||
|
||||
static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
dma_addr_t expected = sg_dma_address(sgt->sgl);
|
||||
unsigned int i;
|
||||
unsigned long size = 0;
|
||||
|
||||
for_each_sg(sgt->sgl, s, sgt->nents, i) {
|
||||
if (sg_dma_address(s) != expected)
|
||||
break;
|
||||
expected = sg_dma_address(s) + sg_dma_len(s);
|
||||
size += sg_dma_len(s);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* callbacks for all buffers */
|
||||
/*********************************************/
|
||||
|
||||
static void *vb2_dc_cookie(void *buf_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
|
||||
return &buf->dma_addr;
|
||||
}
|
||||
|
||||
static void *vb2_dc_vaddr(void *buf_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
|
||||
if (!buf->vaddr && buf->db_attach)
|
||||
buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
|
||||
|
||||
return buf->vaddr;
|
||||
}
|
||||
|
||||
static unsigned int vb2_dc_num_users(void *buf_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
|
||||
return refcount_read(&buf->refcount);
|
||||
}
|
||||
|
||||
static void vb2_dc_prepare(void *buf_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
|
||||
/* DMABUF exporter will flush the cache for us */
|
||||
if (!sgt || buf->db_attach)
|
||||
return;
|
||||
|
||||
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir);
|
||||
}
|
||||
|
||||
static void vb2_dc_finish(void *buf_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
|
||||
/* DMABUF exporter will flush the cache for us */
|
||||
if (!sgt || buf->db_attach)
|
||||
return;
|
||||
|
||||
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* callbacks for MMAP buffers */
|
||||
/*********************************************/
|
||||
|
||||
static void vb2_dc_put(void *buf_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
|
||||
if (!refcount_dec_and_test(&buf->refcount))
|
||||
return;
|
||||
|
||||
if (buf->sgt_base) {
|
||||
sg_free_table(buf->sgt_base);
|
||||
kfree(buf->sgt_base);
|
||||
}
|
||||
dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
|
||||
buf->attrs);
|
||||
put_device(buf->dev);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
|
||||
unsigned long size, enum dma_data_direction dma_dir,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
struct vb2_dc_buf *buf;
|
||||
|
||||
if (WARN_ON(!dev))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
buf = kzalloc(sizeof *buf, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (attrs)
|
||||
buf->attrs = attrs;
|
||||
buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
|
||||
GFP_KERNEL | gfp_flags, buf->attrs);
|
||||
if (!buf->cookie) {
|
||||
dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
|
||||
kfree(buf);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
|
||||
buf->vaddr = buf->cookie;
|
||||
|
||||
/* Prevent the device from being released while the buffer is used */
|
||||
buf->dev = get_device(dev);
|
||||
buf->size = size;
|
||||
buf->dma_dir = dma_dir;
|
||||
|
||||
buf->handler.refcount = &buf->refcount;
|
||||
buf->handler.put = vb2_dc_put;
|
||||
buf->handler.arg = buf;
|
||||
|
||||
refcount_set(&buf->refcount, 1);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
int ret;
|
||||
|
||||
if (!buf) {
|
||||
printk(KERN_ERR "No buffer to map\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
|
||||
* map whole buffer
|
||||
*/
|
||||
vma->vm_pgoff = 0;
|
||||
|
||||
ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
|
||||
buf->dma_addr, buf->size, buf->attrs);
|
||||
|
||||
if (ret) {
|
||||
pr_err("Remapping memory failed, error: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_private_data = &buf->handler;
|
||||
vma->vm_ops = &vb2_common_vm_ops;
|
||||
|
||||
vma->vm_ops->open(vma);
|
||||
|
||||
pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
|
||||
__func__, (unsigned long)buf->dma_addr, vma->vm_start,
|
||||
buf->size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* DMABUF ops for exporters */
|
||||
/*********************************************/
|
||||
|
||||
struct vb2_dc_attachment {
|
||||
struct sg_table sgt;
|
||||
enum dma_data_direction dma_dir;
|
||||
};
|
||||
|
||||
static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
|
||||
struct dma_buf_attachment *dbuf_attach)
|
||||
{
|
||||
struct vb2_dc_attachment *attach;
|
||||
unsigned int i;
|
||||
struct scatterlist *rd, *wr;
|
||||
struct sg_table *sgt;
|
||||
struct vb2_dc_buf *buf = dbuf->priv;
|
||||
int ret;
|
||||
|
||||
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
|
||||
if (!attach)
|
||||
return -ENOMEM;
|
||||
|
||||
sgt = &attach->sgt;
|
||||
/* Copy the buf->base_sgt scatter list to the attachment, as we can't
|
||||
* map the same scatter list to multiple attachments at the same time.
|
||||
*/
|
||||
ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
|
||||
if (ret) {
|
||||
kfree(attach);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rd = buf->sgt_base->sgl;
|
||||
wr = sgt->sgl;
|
||||
for (i = 0; i < sgt->orig_nents; ++i) {
|
||||
sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
|
||||
rd = sg_next(rd);
|
||||
wr = sg_next(wr);
|
||||
}
|
||||
|
||||
attach->dma_dir = DMA_NONE;
|
||||
dbuf_attach->priv = attach;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
|
||||
struct dma_buf_attachment *db_attach)
|
||||
{
|
||||
struct vb2_dc_attachment *attach = db_attach->priv;
|
||||
struct sg_table *sgt;
|
||||
|
||||
if (!attach)
|
||||
return;
|
||||
|
||||
sgt = &attach->sgt;
|
||||
|
||||
/* release the scatterlist cache */
|
||||
if (attach->dma_dir != DMA_NONE)
|
||||
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
attach->dma_dir);
|
||||
sg_free_table(sgt);
|
||||
kfree(attach);
|
||||
db_attach->priv = NULL;
|
||||
}
|
||||
|
||||
static struct sg_table *vb2_dc_dmabuf_ops_map(
|
||||
struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
|
||||
{
|
||||
struct vb2_dc_attachment *attach = db_attach->priv;
|
||||
/* stealing dmabuf mutex to serialize map/unmap operations */
|
||||
struct mutex *lock = &db_attach->dmabuf->lock;
|
||||
struct sg_table *sgt;
|
||||
|
||||
mutex_lock(lock);
|
||||
|
||||
sgt = &attach->sgt;
|
||||
/* return previously mapped sg table */
|
||||
if (attach->dma_dir == dma_dir) {
|
||||
mutex_unlock(lock);
|
||||
return sgt;
|
||||
}
|
||||
|
||||
/* release any previous cache */
|
||||
if (attach->dma_dir != DMA_NONE) {
|
||||
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
attach->dma_dir);
|
||||
attach->dma_dir = DMA_NONE;
|
||||
}
|
||||
|
||||
/* mapping to the client with new direction */
|
||||
sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
dma_dir);
|
||||
if (!sgt->nents) {
|
||||
pr_err("failed to map scatterlist\n");
|
||||
mutex_unlock(lock);
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
|
||||
attach->dma_dir = dma_dir;
|
||||
|
||||
mutex_unlock(lock);
|
||||
|
||||
return sgt;
|
||||
}
|
||||
|
||||
static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
|
||||
struct sg_table *sgt, enum dma_data_direction dma_dir)
|
||||
{
|
||||
/* nothing to be done here */
|
||||
}
|
||||
|
||||
static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
|
||||
{
|
||||
/* drop reference obtained in vb2_dc_get_dmabuf */
|
||||
vb2_dc_put(dbuf->priv);
|
||||
}
|
||||
|
||||
static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
|
||||
{
|
||||
struct vb2_dc_buf *buf = dbuf->priv;
|
||||
|
||||
return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
|
||||
}
|
||||
|
||||
static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
|
||||
{
|
||||
struct vb2_dc_buf *buf = dbuf->priv;
|
||||
|
||||
return buf->vaddr;
|
||||
}
|
||||
|
||||
static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return vb2_dc_mmap(dbuf->priv, vma);
|
||||
}
|
||||
|
||||
static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
|
||||
.attach = vb2_dc_dmabuf_ops_attach,
|
||||
.detach = vb2_dc_dmabuf_ops_detach,
|
||||
.map_dma_buf = vb2_dc_dmabuf_ops_map,
|
||||
.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
|
||||
.map = vb2_dc_dmabuf_ops_kmap,
|
||||
.map_atomic = vb2_dc_dmabuf_ops_kmap,
|
||||
.vmap = vb2_dc_dmabuf_ops_vmap,
|
||||
.mmap = vb2_dc_dmabuf_ops_mmap,
|
||||
.release = vb2_dc_dmabuf_ops_release,
|
||||
};
|
||||
|
||||
static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
|
||||
{
|
||||
int ret;
|
||||
struct sg_table *sgt;
|
||||
|
||||
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
if (!sgt) {
|
||||
dev_err(buf->dev, "failed to alloc sg table\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
|
||||
buf->size, buf->attrs);
|
||||
if (ret < 0) {
|
||||
dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
|
||||
kfree(sgt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return sgt;
|
||||
}
|
||||
|
||||
static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
struct dma_buf *dbuf;
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
exp_info.ops = &vb2_dc_dmabuf_ops;
|
||||
exp_info.size = buf->size;
|
||||
exp_info.flags = flags;
|
||||
exp_info.priv = buf;
|
||||
|
||||
if (!buf->sgt_base)
|
||||
buf->sgt_base = vb2_dc_get_base_sgt(buf);
|
||||
|
||||
if (WARN_ON(!buf->sgt_base))
|
||||
return NULL;
|
||||
|
||||
dbuf = dma_buf_export(&exp_info);
|
||||
if (IS_ERR(dbuf))
|
||||
return NULL;
|
||||
|
||||
/* dmabuf keeps reference to vb2 buffer */
|
||||
refcount_inc(&buf->refcount);
|
||||
|
||||
return dbuf;
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* callbacks for USERPTR buffers */
|
||||
/*********************************************/
|
||||
|
||||
static void vb2_dc_put_userptr(void *buf_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
int i;
|
||||
struct page **pages;
|
||||
|
||||
if (sgt) {
|
||||
/*
|
||||
* No need to sync to CPU, it's already synced to the CPU
|
||||
* since the finish() memop will have been called before this.
|
||||
*/
|
||||
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
pages = frame_vector_pages(buf->vec);
|
||||
/* sgt should exist only if vector contains pages... */
|
||||
BUG_ON(IS_ERR(pages));
|
||||
if (buf->dma_dir == DMA_FROM_DEVICE ||
|
||||
buf->dma_dir == DMA_BIDIRECTIONAL)
|
||||
for (i = 0; i < frame_vector_count(buf->vec); i++)
|
||||
set_page_dirty_lock(pages[i]);
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
}
|
||||
vb2_destroy_framevec(buf->vec);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* For some kind of reserved memory there might be no struct page available,
|
||||
* so all that can be done to support such 'pages' is to try to convert
|
||||
* pfn to dma address or at the last resort just assume that
|
||||
* dma address == physical address (like it has been assumed in earlier version
|
||||
* of videobuf2-dma-contig
|
||||
*/
|
||||
|
||||
#ifdef __arch_pfn_to_dma
|
||||
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
|
||||
}
|
||||
#elif defined(__pfn_to_bus)
|
||||
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
return (dma_addr_t)__pfn_to_bus(pfn);
|
||||
}
|
||||
#elif defined(__pfn_to_phys)
|
||||
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
return (dma_addr_t)__pfn_to_phys(pfn);
|
||||
}
|
||||
#else
|
||||
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
/* really, we cannot do anything better at this point */
|
||||
return (dma_addr_t)(pfn) << PAGE_SHIFT;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
|
||||
unsigned long size, enum dma_data_direction dma_dir)
|
||||
{
|
||||
struct vb2_dc_buf *buf;
|
||||
struct frame_vector *vec;
|
||||
unsigned int offset;
|
||||
int n_pages, i;
|
||||
int ret = 0;
|
||||
struct sg_table *sgt;
|
||||
unsigned long contig_size;
|
||||
unsigned long dma_align = dma_get_cache_alignment();
|
||||
|
||||
/* Only cache aligned DMA transfers are reliable */
|
||||
if (!IS_ALIGNED(vaddr | size, dma_align)) {
|
||||
pr_debug("user data must be aligned to %lu bytes\n", dma_align);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (!size) {
|
||||
pr_debug("size is zero\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (WARN_ON(!dev))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
buf = kzalloc(sizeof *buf, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
buf->dev = dev;
|
||||
buf->dma_dir = dma_dir;
|
||||
|
||||
offset = lower_32_bits(offset_in_page(vaddr));
|
||||
vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
|
||||
dma_dir == DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(vec)) {
|
||||
ret = PTR_ERR(vec);
|
||||
goto fail_buf;
|
||||
}
|
||||
buf->vec = vec;
|
||||
n_pages = frame_vector_count(vec);
|
||||
ret = frame_vector_to_pages(vec);
|
||||
if (ret < 0) {
|
||||
unsigned long *nums = frame_vector_pfns(vec);
|
||||
|
||||
/*
|
||||
* Failed to convert to pages... Check the memory is physically
|
||||
* contiguous and use direct mapping
|
||||
*/
|
||||
for (i = 1; i < n_pages; i++)
|
||||
if (nums[i-1] + 1 != nums[i])
|
||||
goto fail_pfnvec;
|
||||
buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
|
||||
goto out;
|
||||
}
|
||||
|
||||
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
if (!sgt) {
|
||||
pr_err("failed to allocate sg table\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail_pfnvec;
|
||||
}
|
||||
|
||||
ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
|
||||
offset, size, GFP_KERNEL);
|
||||
if (ret) {
|
||||
pr_err("failed to initialize sg table\n");
|
||||
goto fail_sgt;
|
||||
}
|
||||
|
||||
/*
|
||||
* No need to sync to the device, this will happen later when the
|
||||
* prepare() memop is called.
|
||||
*/
|
||||
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (sgt->nents <= 0) {
|
||||
pr_err("failed to map scatterlist\n");
|
||||
ret = -EIO;
|
||||
goto fail_sgt_init;
|
||||
}
|
||||
|
||||
contig_size = vb2_dc_get_contiguous_size(sgt);
|
||||
if (contig_size < size) {
|
||||
pr_err("contiguous mapping is too small %lu/%lu\n",
|
||||
contig_size, size);
|
||||
ret = -EFAULT;
|
||||
goto fail_map_sg;
|
||||
}
|
||||
|
||||
buf->dma_addr = sg_dma_address(sgt->sgl);
|
||||
buf->dma_sgt = sgt;
|
||||
out:
|
||||
buf->size = size;
|
||||
|
||||
return buf;
|
||||
|
||||
fail_map_sg:
|
||||
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
|
||||
fail_sgt_init:
|
||||
sg_free_table(sgt);
|
||||
|
||||
fail_sgt:
|
||||
kfree(sgt);
|
||||
|
||||
fail_pfnvec:
|
||||
vb2_destroy_framevec(vec);
|
||||
|
||||
fail_buf:
|
||||
kfree(buf);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* callbacks for DMABUF buffers */
|
||||
/*********************************************/
|
||||
|
||||
static int vb2_dc_map_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = mem_priv;
|
||||
struct sg_table *sgt;
|
||||
unsigned long contig_size;
|
||||
|
||||
if (WARN_ON(!buf->db_attach)) {
|
||||
pr_err("trying to pin a non attached buffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (WARN_ON(buf->dma_sgt)) {
|
||||
pr_err("dmabuf buffer is already pinned\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* get the associated scatterlist for this buffer */
|
||||
sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
|
||||
if (IS_ERR(sgt)) {
|
||||
pr_err("Error getting dmabuf scatterlist\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* checking if dmabuf is big enough to store contiguous chunk */
|
||||
contig_size = vb2_dc_get_contiguous_size(sgt);
|
||||
if (contig_size < buf->size) {
|
||||
pr_err("contiguous chunk is too small %lu/%lu b\n",
|
||||
contig_size, buf->size);
|
||||
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
buf->dma_addr = sg_dma_address(sgt->sgl);
|
||||
buf->dma_sgt = sgt;
|
||||
buf->vaddr = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vb2_dc_unmap_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = mem_priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
|
||||
if (WARN_ON(!buf->db_attach)) {
|
||||
pr_err("trying to unpin a not attached buffer\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (WARN_ON(!sgt)) {
|
||||
pr_err("dmabuf buffer is already unpinned\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (buf->vaddr) {
|
||||
dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
|
||||
buf->vaddr = NULL;
|
||||
}
|
||||
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
|
||||
|
||||
buf->dma_addr = 0;
|
||||
buf->dma_sgt = NULL;
|
||||
}
|
||||
|
||||
static void vb2_dc_detach_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = mem_priv;
|
||||
|
||||
/* if vb2 works correctly you should never detach mapped buffer */
|
||||
if (WARN_ON(buf->dma_addr))
|
||||
vb2_dc_unmap_dmabuf(buf);
|
||||
|
||||
/* detach this attachment */
|
||||
dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
|
||||
unsigned long size, enum dma_data_direction dma_dir)
|
||||
{
|
||||
struct vb2_dc_buf *buf;
|
||||
struct dma_buf_attachment *dba;
|
||||
|
||||
if (dbuf->size < size)
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
if (WARN_ON(!dev))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
buf->dev = dev;
|
||||
/* create attachment for the dmabuf with the user device */
|
||||
dba = dma_buf_attach(dbuf, buf->dev);
|
||||
if (IS_ERR(dba)) {
|
||||
pr_err("failed to attach dmabuf\n");
|
||||
kfree(buf);
|
||||
return dba;
|
||||
}
|
||||
|
||||
buf->dma_dir = dma_dir;
|
||||
buf->size = size;
|
||||
buf->db_attach = dba;
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* DMA CONTIG exported functions */
|
||||
/*********************************************/
|
||||
|
||||
const struct vb2_mem_ops vb2_dma_contig_memops = {
|
||||
.alloc = vb2_dc_alloc,
|
||||
.put = vb2_dc_put,
|
||||
.get_dmabuf = vb2_dc_get_dmabuf,
|
||||
.cookie = vb2_dc_cookie,
|
||||
.vaddr = vb2_dc_vaddr,
|
||||
.mmap = vb2_dc_mmap,
|
||||
.get_userptr = vb2_dc_get_userptr,
|
||||
.put_userptr = vb2_dc_put_userptr,
|
||||
.prepare = vb2_dc_prepare,
|
||||
.finish = vb2_dc_finish,
|
||||
.map_dmabuf = vb2_dc_map_dmabuf,
|
||||
.unmap_dmabuf = vb2_dc_unmap_dmabuf,
|
||||
.attach_dmabuf = vb2_dc_attach_dmabuf,
|
||||
.detach_dmabuf = vb2_dc_detach_dmabuf,
|
||||
.num_users = vb2_dc_num_users,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
|
||||
|
||||
/**
|
||||
* vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
|
||||
* @dev: device for configuring DMA parameters
|
||||
* @size: size of DMA max segment size to set
|
||||
*
|
||||
* To allow mapping the scatter-list into a single chunk in the DMA
|
||||
* address space, the device is required to have the DMA max segment
|
||||
* size parameter set to a value larger than the buffer size. Otherwise,
|
||||
* the DMA-mapping subsystem will split the mapping into max segment
|
||||
* size chunks. This function sets the DMA max segment size
|
||||
* parameter to let DMA-mapping map a buffer as a single chunk in DMA
|
||||
* address space.
|
||||
* This code assumes that the DMA-mapping subsystem will merge all
|
||||
* scatterlist segments if this is really possible (for example when
|
||||
* an IOMMU is available and enabled).
|
||||
* Ideally, this parameter should be set by the generic bus code, but it
|
||||
* is left with the default 64KiB value due to historical litmiations in
|
||||
* other subsystems (like limited USB host drivers) and there no good
|
||||
* place to set it to the proper value.
|
||||
* This function should be called from the drivers, which are known to
|
||||
* operate on platforms with IOMMU and provide access to shared buffers
|
||||
* (either USERPTR or DMABUF). This should be done before initializing
|
||||
* videobuf2 queue.
|
||||
*/
|
||||
int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
|
||||
{
|
||||
if (!dev->dma_parms) {
|
||||
dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
|
||||
if (!dev->dma_parms)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (dma_get_max_seg_size(dev) < size)
|
||||
return dma_set_max_seg_size(dev, size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
|
||||
|
||||
/*
|
||||
* vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
|
||||
* @dev: device for configuring DMA parameters
|
||||
*
|
||||
* This function releases resources allocated to configure DMA parameters
|
||||
* (see vb2_dma_contig_set_max_seg_size() function). It should be called from
|
||||
* device drivers on driver remove.
|
||||
*/
|
||||
void vb2_dma_contig_clear_max_seg_size(struct device *dev)
|
||||
{
|
||||
kfree(dev->dma_parms);
|
||||
dev->dma_parms = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
|
||||
|
||||
MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
|
||||
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
|
||||
MODULE_LICENSE("GPL");
|
669
drivers/media/common/videobuf2/videobuf2-dma-sg.c
Normal file
669
drivers/media/common/videobuf2/videobuf2-dma-sg.c
Normal file
@@ -0,0 +1,669 @@
|
||||
/*
|
||||
* videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
|
||||
*
|
||||
* Copyright (C) 2010 Samsung Electronics
|
||||
*
|
||||
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <media/videobuf2-v4l2.h>
|
||||
#include <media/videobuf2-memops.h>
|
||||
#include <media/videobuf2-dma-sg.h>
|
||||
|
||||
static int debug;
|
||||
module_param(debug, int, 0644);
|
||||
|
||||
#define dprintk(level, fmt, arg...) \
|
||||
do { \
|
||||
if (debug >= level) \
|
||||
printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
|
||||
} while (0)
|
||||
|
||||
struct vb2_dma_sg_buf {
|
||||
struct device *dev;
|
||||
void *vaddr;
|
||||
struct page **pages;
|
||||
struct frame_vector *vec;
|
||||
int offset;
|
||||
enum dma_data_direction dma_dir;
|
||||
struct sg_table sg_table;
|
||||
/*
|
||||
* This will point to sg_table when used with the MMAP or USERPTR
|
||||
* memory model, and to the dma_buf sglist when used with the
|
||||
* DMABUF memory model.
|
||||
*/
|
||||
struct sg_table *dma_sgt;
|
||||
size_t size;
|
||||
unsigned int num_pages;
|
||||
refcount_t refcount;
|
||||
struct vb2_vmarea_handler handler;
|
||||
|
||||
struct dma_buf_attachment *db_attach;
|
||||
};
|
||||
|
||||
static void vb2_dma_sg_put(void *buf_priv);
|
||||
|
||||
static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
unsigned int last_page = 0;
|
||||
int size = buf->size;
|
||||
|
||||
while (size > 0) {
|
||||
struct page *pages;
|
||||
int order;
|
||||
int i;
|
||||
|
||||
order = get_order(size);
|
||||
/* Dont over allocate*/
|
||||
if ((PAGE_SIZE << order) > size)
|
||||
order--;
|
||||
|
||||
pages = NULL;
|
||||
while (!pages) {
|
||||
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
|
||||
__GFP_NOWARN | gfp_flags, order);
|
||||
if (pages)
|
||||
break;
|
||||
|
||||
if (order == 0) {
|
||||
while (last_page--)
|
||||
__free_page(buf->pages[last_page]);
|
||||
return -ENOMEM;
|
||||
}
|
||||
order--;
|
||||
}
|
||||
|
||||
split_page(pages, order);
|
||||
for (i = 0; i < (1 << order); i++)
|
||||
buf->pages[last_page++] = &pages[i];
|
||||
|
||||
size -= PAGE_SIZE << order;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
|
||||
unsigned long size, enum dma_data_direction dma_dir,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf;
|
||||
struct sg_table *sgt;
|
||||
int ret;
|
||||
int num_pages;
|
||||
|
||||
if (WARN_ON(!dev))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
buf = kzalloc(sizeof *buf, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
buf->vaddr = NULL;
|
||||
buf->dma_dir = dma_dir;
|
||||
buf->offset = 0;
|
||||
buf->size = size;
|
||||
/* size is already page aligned */
|
||||
buf->num_pages = size >> PAGE_SHIFT;
|
||||
buf->dma_sgt = &buf->sg_table;
|
||||
|
||||
buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!buf->pages)
|
||||
goto fail_pages_array_alloc;
|
||||
|
||||
ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
|
||||
if (ret)
|
||||
goto fail_pages_alloc;
|
||||
|
||||
ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
|
||||
buf->num_pages, 0, size, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto fail_table_alloc;
|
||||
|
||||
/* Prevent the device from being released while the buffer is used */
|
||||
buf->dev = get_device(dev);
|
||||
|
||||
sgt = &buf->sg_table;
|
||||
/*
|
||||
* No need to sync to the device, this will happen later when the
|
||||
* prepare() memop is called.
|
||||
*/
|
||||
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (!sgt->nents)
|
||||
goto fail_map;
|
||||
|
||||
buf->handler.refcount = &buf->refcount;
|
||||
buf->handler.put = vb2_dma_sg_put;
|
||||
buf->handler.arg = buf;
|
||||
|
||||
refcount_set(&buf->refcount, 1);
|
||||
|
||||
dprintk(1, "%s: Allocated buffer of %d pages\n",
|
||||
__func__, buf->num_pages);
|
||||
return buf;
|
||||
|
||||
fail_map:
|
||||
put_device(buf->dev);
|
||||
sg_free_table(buf->dma_sgt);
|
||||
fail_table_alloc:
|
||||
num_pages = buf->num_pages;
|
||||
while (num_pages--)
|
||||
__free_page(buf->pages[num_pages]);
|
||||
fail_pages_alloc:
|
||||
kvfree(buf->pages);
|
||||
fail_pages_array_alloc:
|
||||
kfree(buf);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static void vb2_dma_sg_put(void *buf_priv)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
struct sg_table *sgt = &buf->sg_table;
|
||||
int i = buf->num_pages;
|
||||
|
||||
if (refcount_dec_and_test(&buf->refcount)) {
|
||||
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
|
||||
buf->num_pages);
|
||||
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (buf->vaddr)
|
||||
vm_unmap_ram(buf->vaddr, buf->num_pages);
|
||||
sg_free_table(buf->dma_sgt);
|
||||
while (--i >= 0)
|
||||
__free_page(buf->pages[i]);
|
||||
kvfree(buf->pages);
|
||||
put_device(buf->dev);
|
||||
kfree(buf);
|
||||
}
|
||||
}
|
||||
|
||||
static void vb2_dma_sg_prepare(void *buf_priv)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
|
||||
/* DMABUF exporter will flush the cache for us */
|
||||
if (buf->db_attach)
|
||||
return;
|
||||
|
||||
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir);
|
||||
}
|
||||
|
||||
static void vb2_dma_sg_finish(void *buf_priv)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
|
||||
/* DMABUF exporter will flush the cache for us */
|
||||
if (buf->db_attach)
|
||||
return;
|
||||
|
||||
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
|
||||
}
|
||||
|
||||
static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
|
||||
unsigned long size,
|
||||
enum dma_data_direction dma_dir)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf;
|
||||
struct sg_table *sgt;
|
||||
struct frame_vector *vec;
|
||||
|
||||
if (WARN_ON(!dev))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
buf = kzalloc(sizeof *buf, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
buf->vaddr = NULL;
|
||||
buf->dev = dev;
|
||||
buf->dma_dir = dma_dir;
|
||||
buf->offset = vaddr & ~PAGE_MASK;
|
||||
buf->size = size;
|
||||
buf->dma_sgt = &buf->sg_table;
|
||||
vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
|
||||
dma_dir == DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(vec))
|
||||
goto userptr_fail_pfnvec;
|
||||
buf->vec = vec;
|
||||
|
||||
buf->pages = frame_vector_pages(vec);
|
||||
if (IS_ERR(buf->pages))
|
||||
goto userptr_fail_sgtable;
|
||||
buf->num_pages = frame_vector_count(vec);
|
||||
|
||||
if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
|
||||
buf->num_pages, buf->offset, size, 0))
|
||||
goto userptr_fail_sgtable;
|
||||
|
||||
sgt = &buf->sg_table;
|
||||
/*
|
||||
* No need to sync to the device, this will happen later when the
|
||||
* prepare() memop is called.
|
||||
*/
|
||||
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (!sgt->nents)
|
||||
goto userptr_fail_map;
|
||||
|
||||
return buf;
|
||||
|
||||
userptr_fail_map:
|
||||
sg_free_table(&buf->sg_table);
|
||||
userptr_fail_sgtable:
|
||||
vb2_destroy_framevec(vec);
|
||||
userptr_fail_pfnvec:
|
||||
kfree(buf);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/*
|
||||
* @put_userptr: inform the allocator that a USERPTR buffer will no longer
|
||||
* be used
|
||||
*/
|
||||
static void vb2_dma_sg_put_userptr(void *buf_priv)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
struct sg_table *sgt = &buf->sg_table;
|
||||
int i = buf->num_pages;
|
||||
|
||||
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
|
||||
__func__, buf->num_pages);
|
||||
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (buf->vaddr)
|
||||
vm_unmap_ram(buf->vaddr, buf->num_pages);
|
||||
sg_free_table(buf->dma_sgt);
|
||||
if (buf->dma_dir == DMA_FROM_DEVICE ||
|
||||
buf->dma_dir == DMA_BIDIRECTIONAL)
|
||||
while (--i >= 0)
|
||||
set_page_dirty_lock(buf->pages[i]);
|
||||
vb2_destroy_framevec(buf->vec);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void *vb2_dma_sg_vaddr(void *buf_priv)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
|
||||
BUG_ON(!buf);
|
||||
|
||||
if (!buf->vaddr) {
|
||||
if (buf->db_attach)
|
||||
buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
|
||||
else
|
||||
buf->vaddr = vm_map_ram(buf->pages,
|
||||
buf->num_pages, -1, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
/* add offset in case userptr is not page-aligned */
|
||||
return buf->vaddr ? buf->vaddr + buf->offset : NULL;
|
||||
}
|
||||
|
||||
static unsigned int vb2_dma_sg_num_users(void *buf_priv)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
|
||||
return refcount_read(&buf->refcount);
|
||||
}
|
||||
|
||||
static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
unsigned long uaddr = vma->vm_start;
|
||||
unsigned long usize = vma->vm_end - vma->vm_start;
|
||||
int i = 0;
|
||||
|
||||
if (!buf) {
|
||||
printk(KERN_ERR "No memory to map\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
do {
|
||||
int ret;
|
||||
|
||||
ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Remapping memory, error: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uaddr += PAGE_SIZE;
|
||||
usize -= PAGE_SIZE;
|
||||
} while (usize > 0);
|
||||
|
||||
|
||||
/*
|
||||
* Use common vm_area operations to track buffer refcount.
|
||||
*/
|
||||
vma->vm_private_data = &buf->handler;
|
||||
vma->vm_ops = &vb2_common_vm_ops;
|
||||
|
||||
vma->vm_ops->open(vma);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* DMABUF ops for exporters */
|
||||
/*********************************************/
|
||||
|
||||
struct vb2_dma_sg_attachment {
|
||||
struct sg_table sgt;
|
||||
enum dma_data_direction dma_dir;
|
||||
};
|
||||
|
||||
static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
|
||||
struct dma_buf_attachment *dbuf_attach)
|
||||
{
|
||||
struct vb2_dma_sg_attachment *attach;
|
||||
unsigned int i;
|
||||
struct scatterlist *rd, *wr;
|
||||
struct sg_table *sgt;
|
||||
struct vb2_dma_sg_buf *buf = dbuf->priv;
|
||||
int ret;
|
||||
|
||||
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
|
||||
if (!attach)
|
||||
return -ENOMEM;
|
||||
|
||||
sgt = &attach->sgt;
|
||||
/* Copy the buf->base_sgt scatter list to the attachment, as we can't
|
||||
* map the same scatter list to multiple attachments at the same time.
|
||||
*/
|
||||
ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
|
||||
if (ret) {
|
||||
kfree(attach);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rd = buf->dma_sgt->sgl;
|
||||
wr = sgt->sgl;
|
||||
for (i = 0; i < sgt->orig_nents; ++i) {
|
||||
sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
|
||||
rd = sg_next(rd);
|
||||
wr = sg_next(wr);
|
||||
}
|
||||
|
||||
attach->dma_dir = DMA_NONE;
|
||||
dbuf_attach->priv = attach;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
|
||||
struct dma_buf_attachment *db_attach)
|
||||
{
|
||||
struct vb2_dma_sg_attachment *attach = db_attach->priv;
|
||||
struct sg_table *sgt;
|
||||
|
||||
if (!attach)
|
||||
return;
|
||||
|
||||
sgt = &attach->sgt;
|
||||
|
||||
/* release the scatterlist cache */
|
||||
if (attach->dma_dir != DMA_NONE)
|
||||
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
attach->dma_dir);
|
||||
sg_free_table(sgt);
|
||||
kfree(attach);
|
||||
db_attach->priv = NULL;
|
||||
}
|
||||
|
||||
static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
|
||||
struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
|
||||
{
|
||||
struct vb2_dma_sg_attachment *attach = db_attach->priv;
|
||||
/* stealing dmabuf mutex to serialize map/unmap operations */
|
||||
struct mutex *lock = &db_attach->dmabuf->lock;
|
||||
struct sg_table *sgt;
|
||||
|
||||
mutex_lock(lock);
|
||||
|
||||
sgt = &attach->sgt;
|
||||
/* return previously mapped sg table */
|
||||
if (attach->dma_dir == dma_dir) {
|
||||
mutex_unlock(lock);
|
||||
return sgt;
|
||||
}
|
||||
|
||||
/* release any previous cache */
|
||||
if (attach->dma_dir != DMA_NONE) {
|
||||
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
attach->dma_dir);
|
||||
attach->dma_dir = DMA_NONE;
|
||||
}
|
||||
|
||||
/* mapping to the client with new direction */
|
||||
sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
dma_dir);
|
||||
if (!sgt->nents) {
|
||||
pr_err("failed to map scatterlist\n");
|
||||
mutex_unlock(lock);
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
|
||||
attach->dma_dir = dma_dir;
|
||||
|
||||
mutex_unlock(lock);
|
||||
|
||||
return sgt;
|
||||
}
|
||||
|
||||
static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
|
||||
struct sg_table *sgt, enum dma_data_direction dma_dir)
|
||||
{
|
||||
/* nothing to be done here */
|
||||
}
|
||||
|
||||
static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
|
||||
{
|
||||
/* drop reference obtained in vb2_dma_sg_get_dmabuf */
|
||||
vb2_dma_sg_put(dbuf->priv);
|
||||
}
|
||||
|
||||
static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = dbuf->priv;
|
||||
|
||||
return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
|
||||
}
|
||||
|
||||
static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = dbuf->priv;
|
||||
|
||||
return vb2_dma_sg_vaddr(buf);
|
||||
}
|
||||
|
||||
static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return vb2_dma_sg_mmap(dbuf->priv, vma);
|
||||
}
|
||||
|
||||
static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
|
||||
.attach = vb2_dma_sg_dmabuf_ops_attach,
|
||||
.detach = vb2_dma_sg_dmabuf_ops_detach,
|
||||
.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
|
||||
.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
|
||||
.map = vb2_dma_sg_dmabuf_ops_kmap,
|
||||
.map_atomic = vb2_dma_sg_dmabuf_ops_kmap,
|
||||
.vmap = vb2_dma_sg_dmabuf_ops_vmap,
|
||||
.mmap = vb2_dma_sg_dmabuf_ops_mmap,
|
||||
.release = vb2_dma_sg_dmabuf_ops_release,
|
||||
};
|
||||
|
||||
static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
struct dma_buf *dbuf;
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
exp_info.ops = &vb2_dma_sg_dmabuf_ops;
|
||||
exp_info.size = buf->size;
|
||||
exp_info.flags = flags;
|
||||
exp_info.priv = buf;
|
||||
|
||||
if (WARN_ON(!buf->dma_sgt))
|
||||
return NULL;
|
||||
|
||||
dbuf = dma_buf_export(&exp_info);
|
||||
if (IS_ERR(dbuf))
|
||||
return NULL;
|
||||
|
||||
/* dmabuf keeps reference to vb2 buffer */
|
||||
refcount_inc(&buf->refcount);
|
||||
|
||||
return dbuf;
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* callbacks for DMABUF buffers */
|
||||
/*********************************************/
|
||||
|
||||
static int vb2_dma_sg_map_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = mem_priv;
|
||||
struct sg_table *sgt;
|
||||
|
||||
if (WARN_ON(!buf->db_attach)) {
|
||||
pr_err("trying to pin a non attached buffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (WARN_ON(buf->dma_sgt)) {
|
||||
pr_err("dmabuf buffer is already pinned\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* get the associated scatterlist for this buffer */
|
||||
sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
|
||||
if (IS_ERR(sgt)) {
|
||||
pr_err("Error getting dmabuf scatterlist\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buf->dma_sgt = sgt;
|
||||
buf->vaddr = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = mem_priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
|
||||
if (WARN_ON(!buf->db_attach)) {
|
||||
pr_err("trying to unpin a not attached buffer\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (WARN_ON(!sgt)) {
|
||||
pr_err("dmabuf buffer is already unpinned\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (buf->vaddr) {
|
||||
dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
|
||||
buf->vaddr = NULL;
|
||||
}
|
||||
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
|
||||
|
||||
buf->dma_sgt = NULL;
|
||||
}
|
||||
|
||||
static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = mem_priv;
|
||||
|
||||
/* if vb2 works correctly you should never detach mapped buffer */
|
||||
if (WARN_ON(buf->dma_sgt))
|
||||
vb2_dma_sg_unmap_dmabuf(buf);
|
||||
|
||||
/* detach this attachment */
|
||||
dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
|
||||
unsigned long size, enum dma_data_direction dma_dir)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf;
|
||||
struct dma_buf_attachment *dba;
|
||||
|
||||
if (WARN_ON(!dev))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (dbuf->size < size)
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
buf->dev = dev;
|
||||
/* create attachment for the dmabuf with the user device */
|
||||
dba = dma_buf_attach(dbuf, buf->dev);
|
||||
if (IS_ERR(dba)) {
|
||||
pr_err("failed to attach dmabuf\n");
|
||||
kfree(buf);
|
||||
return dba;
|
||||
}
|
||||
|
||||
buf->dma_dir = dma_dir;
|
||||
buf->size = size;
|
||||
buf->db_attach = dba;
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static void *vb2_dma_sg_cookie(void *buf_priv)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
|
||||
return buf->dma_sgt;
|
||||
}
|
||||
|
||||
const struct vb2_mem_ops vb2_dma_sg_memops = {
|
||||
.alloc = vb2_dma_sg_alloc,
|
||||
.put = vb2_dma_sg_put,
|
||||
.get_userptr = vb2_dma_sg_get_userptr,
|
||||
.put_userptr = vb2_dma_sg_put_userptr,
|
||||
.prepare = vb2_dma_sg_prepare,
|
||||
.finish = vb2_dma_sg_finish,
|
||||
.vaddr = vb2_dma_sg_vaddr,
|
||||
.mmap = vb2_dma_sg_mmap,
|
||||
.num_users = vb2_dma_sg_num_users,
|
||||
.get_dmabuf = vb2_dma_sg_get_dmabuf,
|
||||
.map_dmabuf = vb2_dma_sg_map_dmabuf,
|
||||
.unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
|
||||
.attach_dmabuf = vb2_dma_sg_attach_dmabuf,
|
||||
.detach_dmabuf = vb2_dma_sg_detach_dmabuf,
|
||||
.cookie = vb2_dma_sg_cookie,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
|
||||
|
||||
MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
|
||||
MODULE_AUTHOR("Andrzej Pietrasiewicz");
|
||||
MODULE_LICENSE("GPL");
|
345
drivers/media/common/videobuf2/videobuf2-dvb.c
Normal file
345
drivers/media/common/videobuf2/videobuf2-dvb.c
Normal file
@@ -0,0 +1,345 @@
|
||||
/*
|
||||
*
|
||||
* some helper function for simple DVB cards which simply DMA the
|
||||
* complete transport stream and let the computer sort everything else
|
||||
* (i.e. we are using the software demux, ...). Also uses the
|
||||
* video-buf to manage DMA buffers.
|
||||
*
|
||||
* (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <media/videobuf2-dvb.h>
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
static int dvb_fnc(struct vb2_buffer *vb, void *priv)
|
||||
{
|
||||
struct vb2_dvb *dvb = priv;
|
||||
|
||||
dvb_dmx_swfilter(&dvb->demux, vb2_plane_vaddr(vb, 0),
|
||||
vb2_get_plane_payload(vb, 0));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vb2_dvb_start_feed(struct dvb_demux_feed *feed)
|
||||
{
|
||||
struct dvb_demux *demux = feed->demux;
|
||||
struct vb2_dvb *dvb = demux->priv;
|
||||
int rc = 0;
|
||||
|
||||
if (!demux->dmx.frontend)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dvb->lock);
|
||||
dvb->nfeeds++;
|
||||
|
||||
if (!dvb->dvbq.threadio) {
|
||||
rc = vb2_thread_start(&dvb->dvbq, dvb_fnc, dvb, dvb->name);
|
||||
if (rc)
|
||||
dvb->nfeeds--;
|
||||
}
|
||||
if (!rc)
|
||||
rc = dvb->nfeeds;
|
||||
mutex_unlock(&dvb->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int vb2_dvb_stop_feed(struct dvb_demux_feed *feed)
|
||||
{
|
||||
struct dvb_demux *demux = feed->demux;
|
||||
struct vb2_dvb *dvb = demux->priv;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&dvb->lock);
|
||||
dvb->nfeeds--;
|
||||
if (0 == dvb->nfeeds)
|
||||
err = vb2_thread_stop(&dvb->dvbq);
|
||||
mutex_unlock(&dvb->lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vb2_dvb_register_adapter(struct vb2_dvb_frontends *fe,
|
||||
struct module *module,
|
||||
void *adapter_priv,
|
||||
struct device *device,
|
||||
struct media_device *mdev,
|
||||
char *adapter_name,
|
||||
short *adapter_nr,
|
||||
int mfe_shared)
|
||||
{
|
||||
int result;
|
||||
|
||||
mutex_init(&fe->lock);
|
||||
|
||||
/* register adapter */
|
||||
result = dvb_register_adapter(&fe->adapter, adapter_name, module,
|
||||
device, adapter_nr);
|
||||
if (result < 0) {
|
||||
pr_warn("%s: dvb_register_adapter failed (errno = %d)\n",
|
||||
adapter_name, result);
|
||||
}
|
||||
fe->adapter.priv = adapter_priv;
|
||||
fe->adapter.mfe_shared = mfe_shared;
|
||||
#ifdef CONFIG_MEDIA_CONTROLLER_DVB
|
||||
if (mdev)
|
||||
fe->adapter.mdev = mdev;
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
static int vb2_dvb_register_frontend(struct dvb_adapter *adapter,
|
||||
struct vb2_dvb *dvb)
|
||||
{
|
||||
int result;
|
||||
|
||||
/* register frontend */
|
||||
result = dvb_register_frontend(adapter, dvb->frontend);
|
||||
if (result < 0) {
|
||||
pr_warn("%s: dvb_register_frontend failed (errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_frontend;
|
||||
}
|
||||
|
||||
/* register demux stuff */
|
||||
dvb->demux.dmx.capabilities =
|
||||
DMX_TS_FILTERING | DMX_SECTION_FILTERING |
|
||||
DMX_MEMORY_BASED_FILTERING;
|
||||
dvb->demux.priv = dvb;
|
||||
dvb->demux.filternum = 256;
|
||||
dvb->demux.feednum = 256;
|
||||
dvb->demux.start_feed = vb2_dvb_start_feed;
|
||||
dvb->demux.stop_feed = vb2_dvb_stop_feed;
|
||||
result = dvb_dmx_init(&dvb->demux);
|
||||
if (result < 0) {
|
||||
pr_warn("%s: dvb_dmx_init failed (errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_dmx;
|
||||
}
|
||||
|
||||
dvb->dmxdev.filternum = 256;
|
||||
dvb->dmxdev.demux = &dvb->demux.dmx;
|
||||
dvb->dmxdev.capabilities = 0;
|
||||
result = dvb_dmxdev_init(&dvb->dmxdev, adapter);
|
||||
|
||||
if (result < 0) {
|
||||
pr_warn("%s: dvb_dmxdev_init failed (errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_dmxdev;
|
||||
}
|
||||
|
||||
dvb->fe_hw.source = DMX_FRONTEND_0;
|
||||
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
|
||||
if (result < 0) {
|
||||
pr_warn("%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_fe_hw;
|
||||
}
|
||||
|
||||
dvb->fe_mem.source = DMX_MEMORY_FE;
|
||||
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
|
||||
if (result < 0) {
|
||||
pr_warn("%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_fe_mem;
|
||||
}
|
||||
|
||||
result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
|
||||
if (result < 0) {
|
||||
pr_warn("%s: connect_frontend failed (errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_fe_conn;
|
||||
}
|
||||
|
||||
/* register network adapter */
|
||||
result = dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx);
|
||||
if (result < 0) {
|
||||
pr_warn("%s: dvb_net_init failed (errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_fe_conn;
|
||||
}
|
||||
return 0;
|
||||
|
||||
fail_fe_conn:
|
||||
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
|
||||
fail_fe_mem:
|
||||
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
|
||||
fail_fe_hw:
|
||||
dvb_dmxdev_release(&dvb->dmxdev);
|
||||
fail_dmxdev:
|
||||
dvb_dmx_release(&dvb->demux);
|
||||
fail_dmx:
|
||||
dvb_unregister_frontend(dvb->frontend);
|
||||
fail_frontend:
|
||||
dvb_frontend_detach(dvb->frontend);
|
||||
dvb->frontend = NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Register a single adapter and one or more frontends */
|
||||
int vb2_dvb_register_bus(struct vb2_dvb_frontends *f,
|
||||
struct module *module,
|
||||
void *adapter_priv,
|
||||
struct device *device,
|
||||
struct media_device *mdev,
|
||||
short *adapter_nr,
|
||||
int mfe_shared)
|
||||
{
|
||||
struct list_head *list, *q;
|
||||
struct vb2_dvb_frontend *fe;
|
||||
int res;
|
||||
|
||||
fe = vb2_dvb_get_frontend(f, 1);
|
||||
if (!fe) {
|
||||
pr_warn("Unable to register the adapter which has no frontends\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Bring up the adapter */
|
||||
res = vb2_dvb_register_adapter(f, module, adapter_priv, device, mdev,
|
||||
fe->dvb.name, adapter_nr, mfe_shared);
|
||||
if (res < 0) {
|
||||
pr_warn("vb2_dvb_register_adapter failed (errno = %d)\n", res);
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Attach all of the frontends to the adapter */
|
||||
mutex_lock(&f->lock);
|
||||
list_for_each_safe(list, q, &f->felist) {
|
||||
fe = list_entry(list, struct vb2_dvb_frontend, felist);
|
||||
res = vb2_dvb_register_frontend(&f->adapter, &fe->dvb);
|
||||
if (res < 0) {
|
||||
pr_warn("%s: vb2_dvb_register_frontend failed (errno = %d)\n",
|
||||
fe->dvb.name, res);
|
||||
goto err;
|
||||
}
|
||||
res = dvb_create_media_graph(&f->adapter, false);
|
||||
if (res < 0)
|
||||
goto err;
|
||||
}
|
||||
|
||||
mutex_unlock(&f->lock);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mutex_unlock(&f->lock);
|
||||
vb2_dvb_unregister_bus(f);
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_dvb_register_bus);
|
||||
|
||||
void vb2_dvb_unregister_bus(struct vb2_dvb_frontends *f)
|
||||
{
|
||||
vb2_dvb_dealloc_frontends(f);
|
||||
|
||||
dvb_unregister_adapter(&f->adapter);
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_dvb_unregister_bus);
|
||||
|
||||
struct vb2_dvb_frontend *vb2_dvb_get_frontend(
|
||||
struct vb2_dvb_frontends *f, int id)
|
||||
{
|
||||
struct list_head *list, *q;
|
||||
struct vb2_dvb_frontend *fe, *ret = NULL;
|
||||
|
||||
mutex_lock(&f->lock);
|
||||
|
||||
list_for_each_safe(list, q, &f->felist) {
|
||||
fe = list_entry(list, struct vb2_dvb_frontend, felist);
|
||||
if (fe->id == id) {
|
||||
ret = fe;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&f->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_dvb_get_frontend);
|
||||
|
||||
int vb2_dvb_find_frontend(struct vb2_dvb_frontends *f,
|
||||
struct dvb_frontend *p)
|
||||
{
|
||||
struct list_head *list, *q;
|
||||
struct vb2_dvb_frontend *fe = NULL;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&f->lock);
|
||||
|
||||
list_for_each_safe(list, q, &f->felist) {
|
||||
fe = list_entry(list, struct vb2_dvb_frontend, felist);
|
||||
if (fe->dvb.frontend == p) {
|
||||
ret = fe->id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&f->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_dvb_find_frontend);
|
||||
|
||||
struct vb2_dvb_frontend *vb2_dvb_alloc_frontend(
|
||||
struct vb2_dvb_frontends *f, int id)
|
||||
{
|
||||
struct vb2_dvb_frontend *fe;
|
||||
|
||||
fe = kzalloc(sizeof(struct vb2_dvb_frontend), GFP_KERNEL);
|
||||
if (fe == NULL)
|
||||
return NULL;
|
||||
|
||||
fe->id = id;
|
||||
mutex_init(&fe->dvb.lock);
|
||||
|
||||
mutex_lock(&f->lock);
|
||||
list_add_tail(&fe->felist, &f->felist);
|
||||
mutex_unlock(&f->lock);
|
||||
return fe;
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_dvb_alloc_frontend);
|
||||
|
||||
void vb2_dvb_dealloc_frontends(struct vb2_dvb_frontends *f)
|
||||
{
|
||||
struct list_head *list, *q;
|
||||
struct vb2_dvb_frontend *fe;
|
||||
|
||||
mutex_lock(&f->lock);
|
||||
list_for_each_safe(list, q, &f->felist) {
|
||||
fe = list_entry(list, struct vb2_dvb_frontend, felist);
|
||||
if (fe->dvb.net.dvbdev) {
|
||||
dvb_net_release(&fe->dvb.net);
|
||||
fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
|
||||
&fe->dvb.fe_mem);
|
||||
fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
|
||||
&fe->dvb.fe_hw);
|
||||
dvb_dmxdev_release(&fe->dvb.dmxdev);
|
||||
dvb_dmx_release(&fe->dvb.demux);
|
||||
dvb_unregister_frontend(fe->dvb.frontend);
|
||||
}
|
||||
if (fe->dvb.frontend)
|
||||
/* always allocated, may have been reset */
|
||||
dvb_frontend_detach(fe->dvb.frontend);
|
||||
list_del(list); /* remove list entry */
|
||||
kfree(fe); /* free frontend allocation */
|
||||
}
|
||||
mutex_unlock(&f->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_dvb_dealloc_frontends);
|
135
drivers/media/common/videobuf2/videobuf2-memops.c
Normal file
135
drivers/media/common/videobuf2/videobuf2-memops.c
Normal file
@@ -0,0 +1,135 @@
|
||||
/*
|
||||
* videobuf2-memops.c - generic memory handling routines for videobuf2
|
||||
*
|
||||
* Copyright (C) 2010 Samsung Electronics
|
||||
*
|
||||
* Author: Pawel Osciak <pawel@osciak.com>
|
||||
* Marek Szyprowski <m.szyprowski@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/file.h>
|
||||
|
||||
#include <media/videobuf2-v4l2.h>
|
||||
#include <media/videobuf2-memops.h>
|
||||
|
||||
/**
|
||||
* vb2_create_framevec() - map virtual addresses to pfns
|
||||
* @start: Virtual user address where we start mapping
|
||||
* @length: Length of a range to map
|
||||
* @write: Should we map for writing into the area
|
||||
*
|
||||
* This function allocates and fills in a vector with pfns corresponding to
|
||||
* virtual address range passed in arguments. If pfns have corresponding pages,
|
||||
* page references are also grabbed to pin pages in memory. The function
|
||||
* returns pointer to the vector on success and error pointer in case of
|
||||
* failure. Returned vector needs to be freed via vb2_destroy_pfnvec().
|
||||
*/
|
||||
struct frame_vector *vb2_create_framevec(unsigned long start,
|
||||
unsigned long length,
|
||||
bool write)
|
||||
{
|
||||
int ret;
|
||||
unsigned long first, last;
|
||||
unsigned long nr;
|
||||
struct frame_vector *vec;
|
||||
unsigned int flags = FOLL_FORCE;
|
||||
|
||||
if (write)
|
||||
flags |= FOLL_WRITE;
|
||||
|
||||
first = start >> PAGE_SHIFT;
|
||||
last = (start + length - 1) >> PAGE_SHIFT;
|
||||
nr = last - first + 1;
|
||||
vec = frame_vector_create(nr);
|
||||
if (!vec)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec);
|
||||
if (ret < 0)
|
||||
goto out_destroy;
|
||||
/* We accept only complete set of PFNs */
|
||||
if (ret != nr) {
|
||||
ret = -EFAULT;
|
||||
goto out_release;
|
||||
}
|
||||
return vec;
|
||||
out_release:
|
||||
put_vaddr_frames(vec);
|
||||
out_destroy:
|
||||
frame_vector_destroy(vec);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_create_framevec);
|
||||
|
||||
/**
|
||||
* vb2_destroy_framevec() - release vector of mapped pfns
|
||||
* @vec: vector of pfns / pages to release
|
||||
*
|
||||
* This releases references to all pages in the vector @vec (if corresponding
|
||||
* pfns are backed by pages) and frees the passed vector.
|
||||
*/
|
||||
void vb2_destroy_framevec(struct frame_vector *vec)
|
||||
{
|
||||
put_vaddr_frames(vec);
|
||||
frame_vector_destroy(vec);
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_destroy_framevec);
|
||||
|
||||
/**
|
||||
* vb2_common_vm_open() - increase refcount of the vma
|
||||
* @vma: virtual memory region for the mapping
|
||||
*
|
||||
* This function adds another user to the provided vma. It expects
|
||||
* struct vb2_vmarea_handler pointer in vma->vm_private_data.
|
||||
*/
|
||||
static void vb2_common_vm_open(struct vm_area_struct *vma)
|
||||
{
|
||||
struct vb2_vmarea_handler *h = vma->vm_private_data;
|
||||
|
||||
pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
|
||||
__func__, h, refcount_read(h->refcount), vma->vm_start,
|
||||
vma->vm_end);
|
||||
|
||||
refcount_inc(h->refcount);
|
||||
}
|
||||
|
||||
/**
|
||||
* vb2_common_vm_close() - decrease refcount of the vma
|
||||
* @vma: virtual memory region for the mapping
|
||||
*
|
||||
* This function releases the user from the provided vma. It expects
|
||||
* struct vb2_vmarea_handler pointer in vma->vm_private_data.
|
||||
*/
|
||||
static void vb2_common_vm_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct vb2_vmarea_handler *h = vma->vm_private_data;
|
||||
|
||||
pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
|
||||
__func__, h, refcount_read(h->refcount), vma->vm_start,
|
||||
vma->vm_end);
|
||||
|
||||
h->put(h->arg);
|
||||
}
|
||||
|
||||
/*
|
||||
* vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
|
||||
* video buffers
|
||||
*/
|
||||
const struct vm_operations_struct vb2_common_vm_ops = {
|
||||
.open = vb2_common_vm_open,
|
||||
.close = vb2_common_vm_close,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
|
||||
|
||||
MODULE_DESCRIPTION("common memory handling routines for videobuf2");
|
||||
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
|
||||
MODULE_LICENSE("GPL");
|
966
drivers/media/common/videobuf2/videobuf2-v4l2.c
Normal file
966
drivers/media/common/videobuf2/videobuf2-v4l2.c
Normal file
@@ -0,0 +1,966 @@
|
||||
/*
|
||||
* videobuf2-v4l2.c - V4L2 driver helper framework
|
||||
*
|
||||
* Copyright (C) 2010 Samsung Electronics
|
||||
*
|
||||
* Author: Pawel Osciak <pawel@osciak.com>
|
||||
* Marek Szyprowski <m.szyprowski@samsung.com>
|
||||
*
|
||||
* The vb2_thread implementation was based on code from videobuf-dvb.c:
|
||||
* (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#include <media/v4l2-dev.h>
|
||||
#include <media/v4l2-fh.h>
|
||||
#include <media/v4l2-event.h>
|
||||
#include <media/v4l2-common.h>
|
||||
|
||||
#include <media/videobuf2-v4l2.h>
|
||||
|
||||
static int debug;
|
||||
module_param(debug, int, 0644);
|
||||
|
||||
#define dprintk(level, fmt, arg...) \
|
||||
do { \
|
||||
if (debug >= level) \
|
||||
pr_info("vb2-v4l2: %s: " fmt, __func__, ## arg); \
|
||||
} while (0)
|
||||
|
||||
/* Flags that are set by the vb2 core */
|
||||
#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
|
||||
V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
|
||||
V4L2_BUF_FLAG_PREPARED | \
|
||||
V4L2_BUF_FLAG_TIMESTAMP_MASK)
|
||||
/* Output buffer flags that should be passed on to the driver */
|
||||
#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
|
||||
V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
|
||||
|
||||
/*
|
||||
* __verify_planes_array() - verify that the planes array passed in struct
|
||||
* v4l2_buffer from userspace can be safely used
|
||||
*/
|
||||
static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
|
||||
{
|
||||
if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
|
||||
return 0;
|
||||
|
||||
/* Is memory for copying plane information present? */
|
||||
if (b->m.planes == NULL) {
|
||||
dprintk(1, "multi-planar buffer passed but planes array not provided\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
|
||||
dprintk(1, "incorrect planes array length, expected %d, got %d\n",
|
||||
vb->num_planes, b->length);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
|
||||
{
|
||||
return __verify_planes_array(vb, pb);
|
||||
}
|
||||
|
||||
/*
|
||||
* __verify_length() - Verify that the bytesused value for each plane fits in
|
||||
* the plane length and that the data offset doesn't exceed the bytesused value.
|
||||
*/
|
||||
static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
|
||||
{
|
||||
unsigned int length;
|
||||
unsigned int bytesused;
|
||||
unsigned int plane;
|
||||
|
||||
if (!V4L2_TYPE_IS_OUTPUT(b->type))
|
||||
return 0;
|
||||
|
||||
if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
|
||||
for (plane = 0; plane < vb->num_planes; ++plane) {
|
||||
length = (b->memory == VB2_MEMORY_USERPTR ||
|
||||
b->memory == VB2_MEMORY_DMABUF)
|
||||
? b->m.planes[plane].length
|
||||
: vb->planes[plane].length;
|
||||
bytesused = b->m.planes[plane].bytesused
|
||||
? b->m.planes[plane].bytesused : length;
|
||||
|
||||
if (b->m.planes[plane].bytesused > length)
|
||||
return -EINVAL;
|
||||
|
||||
if (b->m.planes[plane].data_offset > 0 &&
|
||||
b->m.planes[plane].data_offset >= bytesused)
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
length = (b->memory == VB2_MEMORY_USERPTR)
|
||||
? b->length : vb->planes[0].length;
|
||||
|
||||
if (b->bytesused > length)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
|
||||
{
|
||||
const struct v4l2_buffer *b = pb;
|
||||
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
|
||||
struct vb2_queue *q = vb->vb2_queue;
|
||||
|
||||
if (q->is_output) {
|
||||
/*
|
||||
* For output buffers copy the timestamp if needed,
|
||||
* and the timecode field and flag if needed.
|
||||
*/
|
||||
if (q->copy_timestamp)
|
||||
vb->timestamp = timeval_to_ns(&b->timestamp);
|
||||
vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
|
||||
if (b->flags & V4L2_BUF_FLAG_TIMECODE)
|
||||
vbuf->timecode = b->timecode;
|
||||
}
|
||||
};
|
||||
|
||||
static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
|
||||
{
|
||||
static bool check_once;
|
||||
|
||||
if (check_once)
|
||||
return;
|
||||
|
||||
check_once = true;
|
||||
WARN_ON(1);
|
||||
|
||||
pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
|
||||
if (vb->vb2_queue->allow_zero_bytesused)
|
||||
pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
|
||||
else
|
||||
pr_warn("use the actual size instead.\n");
|
||||
}
|
||||
|
||||
static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
|
||||
const char *opname)
|
||||
{
|
||||
if (b->type != q->type) {
|
||||
dprintk(1, "%s: invalid buffer type\n", opname);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (b->index >= q->num_buffers) {
|
||||
dprintk(1, "%s: buffer index out of range\n", opname);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (q->bufs[b->index] == NULL) {
|
||||
/* Should never happen */
|
||||
dprintk(1, "%s: buffer is NULL\n", opname);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (b->memory != q->memory) {
|
||||
dprintk(1, "%s: invalid memory type\n", opname);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return __verify_planes_array(q->bufs[b->index], b);
|
||||
}
|
||||
|
||||
/*
|
||||
* __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
|
||||
* returned to userspace
|
||||
*/
|
||||
static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
|
||||
{
|
||||
struct v4l2_buffer *b = pb;
|
||||
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
|
||||
struct vb2_queue *q = vb->vb2_queue;
|
||||
unsigned int plane;
|
||||
|
||||
/* Copy back data such as timestamp, flags, etc. */
|
||||
b->index = vb->index;
|
||||
b->type = vb->type;
|
||||
b->memory = vb->memory;
|
||||
b->bytesused = 0;
|
||||
|
||||
b->flags = vbuf->flags;
|
||||
b->field = vbuf->field;
|
||||
b->timestamp = ns_to_timeval(vb->timestamp);
|
||||
b->timecode = vbuf->timecode;
|
||||
b->sequence = vbuf->sequence;
|
||||
b->reserved2 = 0;
|
||||
b->reserved = 0;
|
||||
|
||||
if (q->is_multiplanar) {
|
||||
/*
|
||||
* Fill in plane-related data if userspace provided an array
|
||||
* for it. The caller has already verified memory and size.
|
||||
*/
|
||||
b->length = vb->num_planes;
|
||||
for (plane = 0; plane < vb->num_planes; ++plane) {
|
||||
struct v4l2_plane *pdst = &b->m.planes[plane];
|
||||
struct vb2_plane *psrc = &vb->planes[plane];
|
||||
|
||||
pdst->bytesused = psrc->bytesused;
|
||||
pdst->length = psrc->length;
|
||||
if (q->memory == VB2_MEMORY_MMAP)
|
||||
pdst->m.mem_offset = psrc->m.offset;
|
||||
else if (q->memory == VB2_MEMORY_USERPTR)
|
||||
pdst->m.userptr = psrc->m.userptr;
|
||||
else if (q->memory == VB2_MEMORY_DMABUF)
|
||||
pdst->m.fd = psrc->m.fd;
|
||||
pdst->data_offset = psrc->data_offset;
|
||||
memset(pdst->reserved, 0, sizeof(pdst->reserved));
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* We use length and offset in v4l2_planes array even for
|
||||
* single-planar buffers, but userspace does not.
|
||||
*/
|
||||
b->length = vb->planes[0].length;
|
||||
b->bytesused = vb->planes[0].bytesused;
|
||||
if (q->memory == VB2_MEMORY_MMAP)
|
||||
b->m.offset = vb->planes[0].m.offset;
|
||||
else if (q->memory == VB2_MEMORY_USERPTR)
|
||||
b->m.userptr = vb->planes[0].m.userptr;
|
||||
else if (q->memory == VB2_MEMORY_DMABUF)
|
||||
b->m.fd = vb->planes[0].m.fd;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear any buffer state related flags.
|
||||
*/
|
||||
b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
|
||||
b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
|
||||
if (!q->copy_timestamp) {
|
||||
/*
|
||||
* For non-COPY timestamps, drop timestamp source bits
|
||||
* and obtain the timestamp source from the queue.
|
||||
*/
|
||||
b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
|
||||
b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
|
||||
}
|
||||
|
||||
switch (vb->state) {
|
||||
case VB2_BUF_STATE_QUEUED:
|
||||
case VB2_BUF_STATE_ACTIVE:
|
||||
b->flags |= V4L2_BUF_FLAG_QUEUED;
|
||||
break;
|
||||
case VB2_BUF_STATE_ERROR:
|
||||
b->flags |= V4L2_BUF_FLAG_ERROR;
|
||||
/* fall through */
|
||||
case VB2_BUF_STATE_DONE:
|
||||
b->flags |= V4L2_BUF_FLAG_DONE;
|
||||
break;
|
||||
case VB2_BUF_STATE_PREPARED:
|
||||
b->flags |= V4L2_BUF_FLAG_PREPARED;
|
||||
break;
|
||||
case VB2_BUF_STATE_PREPARING:
|
||||
case VB2_BUF_STATE_DEQUEUED:
|
||||
case VB2_BUF_STATE_REQUEUEING:
|
||||
/* nothing */
|
||||
break;
|
||||
}
|
||||
|
||||
if (vb2_buffer_in_use(q, vb))
|
||||
b->flags |= V4L2_BUF_FLAG_MAPPED;
|
||||
|
||||
if (!q->is_output &&
|
||||
b->flags & V4L2_BUF_FLAG_DONE &&
|
||||
b->flags & V4L2_BUF_FLAG_LAST)
|
||||
q->last_buffer_dequeued = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
|
||||
* v4l2_buffer by the userspace. It also verifies that struct
|
||||
* v4l2_buffer has a valid number of planes.
|
||||
*/
|
||||
static int __fill_vb2_buffer(struct vb2_buffer *vb,
|
||||
const void *pb, struct vb2_plane *planes)
|
||||
{
|
||||
struct vb2_queue *q = vb->vb2_queue;
|
||||
const struct v4l2_buffer *b = pb;
|
||||
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
|
||||
unsigned int plane;
|
||||
int ret;
|
||||
|
||||
ret = __verify_length(vb, b);
|
||||
if (ret < 0) {
|
||||
dprintk(1, "plane parameters verification failed: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
|
||||
/*
|
||||
* If the format's field is ALTERNATE, then the buffer's field
|
||||
* should be either TOP or BOTTOM, not ALTERNATE since that
|
||||
* makes no sense. The driver has to know whether the
|
||||
* buffer represents a top or a bottom field in order to
|
||||
* program any DMA correctly. Using ALTERNATE is wrong, since
|
||||
* that just says that it is either a top or a bottom field,
|
||||
* but not which of the two it is.
|
||||
*/
|
||||
dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
vb->timestamp = 0;
|
||||
vbuf->sequence = 0;
|
||||
|
||||
if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
|
||||
if (b->memory == VB2_MEMORY_USERPTR) {
|
||||
for (plane = 0; plane < vb->num_planes; ++plane) {
|
||||
planes[plane].m.userptr =
|
||||
b->m.planes[plane].m.userptr;
|
||||
planes[plane].length =
|
||||
b->m.planes[plane].length;
|
||||
}
|
||||
}
|
||||
if (b->memory == VB2_MEMORY_DMABUF) {
|
||||
for (plane = 0; plane < vb->num_planes; ++plane) {
|
||||
planes[plane].m.fd =
|
||||
b->m.planes[plane].m.fd;
|
||||
planes[plane].length =
|
||||
b->m.planes[plane].length;
|
||||
}
|
||||
}
|
||||
|
||||
/* Fill in driver-provided information for OUTPUT types */
|
||||
if (V4L2_TYPE_IS_OUTPUT(b->type)) {
|
||||
/*
|
||||
* Will have to go up to b->length when API starts
|
||||
* accepting variable number of planes.
|
||||
*
|
||||
* If bytesused == 0 for the output buffer, then fall
|
||||
* back to the full buffer size. In that case
|
||||
* userspace clearly never bothered to set it and
|
||||
* it's a safe assumption that they really meant to
|
||||
* use the full plane sizes.
|
||||
*
|
||||
* Some drivers, e.g. old codec drivers, use bytesused == 0
|
||||
* as a way to indicate that streaming is finished.
|
||||
* In that case, the driver should use the
|
||||
* allow_zero_bytesused flag to keep old userspace
|
||||
* applications working.
|
||||
*/
|
||||
for (plane = 0; plane < vb->num_planes; ++plane) {
|
||||
struct vb2_plane *pdst = &planes[plane];
|
||||
struct v4l2_plane *psrc = &b->m.planes[plane];
|
||||
|
||||
if (psrc->bytesused == 0)
|
||||
vb2_warn_zero_bytesused(vb);
|
||||
|
||||
if (vb->vb2_queue->allow_zero_bytesused)
|
||||
pdst->bytesused = psrc->bytesused;
|
||||
else
|
||||
pdst->bytesused = psrc->bytesused ?
|
||||
psrc->bytesused : pdst->length;
|
||||
pdst->data_offset = psrc->data_offset;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Single-planar buffers do not use planes array,
|
||||
* so fill in relevant v4l2_buffer struct fields instead.
|
||||
* In videobuf we use our internal V4l2_planes struct for
|
||||
* single-planar buffers as well, for simplicity.
|
||||
*
|
||||
* If bytesused == 0 for the output buffer, then fall back
|
||||
* to the full buffer size as that's a sensible default.
|
||||
*
|
||||
* Some drivers, e.g. old codec drivers, use bytesused == 0 as
|
||||
* a way to indicate that streaming is finished. In that case,
|
||||
* the driver should use the allow_zero_bytesused flag to keep
|
||||
* old userspace applications working.
|
||||
*/
|
||||
if (b->memory == VB2_MEMORY_USERPTR) {
|
||||
planes[0].m.userptr = b->m.userptr;
|
||||
planes[0].length = b->length;
|
||||
}
|
||||
|
||||
if (b->memory == VB2_MEMORY_DMABUF) {
|
||||
planes[0].m.fd = b->m.fd;
|
||||
planes[0].length = b->length;
|
||||
}
|
||||
|
||||
if (V4L2_TYPE_IS_OUTPUT(b->type)) {
|
||||
if (b->bytesused == 0)
|
||||
vb2_warn_zero_bytesused(vb);
|
||||
|
||||
if (vb->vb2_queue->allow_zero_bytesused)
|
||||
planes[0].bytesused = b->bytesused;
|
||||
else
|
||||
planes[0].bytesused = b->bytesused ?
|
||||
b->bytesused : planes[0].length;
|
||||
} else
|
||||
planes[0].bytesused = 0;
|
||||
|
||||
}
|
||||
|
||||
/* Zero flags that the vb2 core handles */
|
||||
vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
|
||||
if (!vb->vb2_queue->copy_timestamp || !V4L2_TYPE_IS_OUTPUT(b->type)) {
|
||||
/*
|
||||
* Non-COPY timestamps and non-OUTPUT queues will get
|
||||
* their timestamp and timestamp source flags from the
|
||||
* queue.
|
||||
*/
|
||||
vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
|
||||
}
|
||||
|
||||
if (V4L2_TYPE_IS_OUTPUT(b->type)) {
|
||||
/*
|
||||
* For output buffers mask out the timecode flag:
|
||||
* this will be handled later in vb2_qbuf().
|
||||
* The 'field' is valid metadata for this output buffer
|
||||
* and so that needs to be copied here.
|
||||
*/
|
||||
vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
|
||||
vbuf->field = b->field;
|
||||
} else {
|
||||
/* Zero any output buffer flags as this is a capture buffer */
|
||||
vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
|
||||
/* Zero last flag, this is a signal from driver to userspace */
|
||||
vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct vb2_buf_ops v4l2_buf_ops = {
|
||||
.verify_planes_array = __verify_planes_array_core,
|
||||
.fill_user_buffer = __fill_v4l2_buffer,
|
||||
.fill_vb2_buffer = __fill_vb2_buffer,
|
||||
.copy_timestamp = __copy_timestamp,
|
||||
};
|
||||
|
||||
/*
|
||||
* vb2_querybuf() - query video buffer information
|
||||
* @q: videobuf queue
|
||||
* @b: buffer struct passed from userspace to vidioc_querybuf handler
|
||||
* in driver
|
||||
*
|
||||
* Should be called from vidioc_querybuf ioctl handler in driver.
|
||||
* This function will verify the passed v4l2_buffer structure and fill the
|
||||
* relevant information for the userspace.
|
||||
*
|
||||
* The return values from this function are intended to be directly returned
|
||||
* from vidioc_querybuf handler in driver.
|
||||
*/
|
||||
int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
|
||||
{
|
||||
struct vb2_buffer *vb;
|
||||
int ret;
|
||||
|
||||
if (b->type != q->type) {
|
||||
dprintk(1, "wrong buffer type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (b->index >= q->num_buffers) {
|
||||
dprintk(1, "buffer index out of range\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
vb = q->bufs[b->index];
|
||||
ret = __verify_planes_array(vb, b);
|
||||
if (!ret)
|
||||
vb2_core_querybuf(q, b->index, b);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_querybuf);
|
||||
|
||||
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
|
||||
{
|
||||
int ret = vb2_verify_memory_type(q, req->memory, req->type);
|
||||
|
||||
return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_reqbufs);
|
||||
|
||||
int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (vb2_fileio_is_active(q)) {
|
||||
dprintk(1, "file io in progress\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
|
||||
|
||||
return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_prepare_buf);
|
||||
|
||||
int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
|
||||
{
|
||||
unsigned requested_planes = 1;
|
||||
unsigned requested_sizes[VIDEO_MAX_PLANES];
|
||||
struct v4l2_format *f = &create->format;
|
||||
int ret = vb2_verify_memory_type(q, create->memory, f->type);
|
||||
unsigned i;
|
||||
|
||||
create->index = q->num_buffers;
|
||||
if (create->count == 0)
|
||||
return ret != -EBUSY ? ret : 0;
|
||||
|
||||
switch (f->type) {
|
||||
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
|
||||
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
|
||||
requested_planes = f->fmt.pix_mp.num_planes;
|
||||
if (requested_planes == 0 ||
|
||||
requested_planes > VIDEO_MAX_PLANES)
|
||||
return -EINVAL;
|
||||
for (i = 0; i < requested_planes; i++)
|
||||
requested_sizes[i] =
|
||||
f->fmt.pix_mp.plane_fmt[i].sizeimage;
|
||||
break;
|
||||
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
|
||||
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
|
||||
requested_sizes[0] = f->fmt.pix.sizeimage;
|
||||
break;
|
||||
case V4L2_BUF_TYPE_VBI_CAPTURE:
|
||||
case V4L2_BUF_TYPE_VBI_OUTPUT:
|
||||
requested_sizes[0] = f->fmt.vbi.samples_per_line *
|
||||
(f->fmt.vbi.count[0] + f->fmt.vbi.count[1]);
|
||||
break;
|
||||
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
|
||||
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
|
||||
requested_sizes[0] = f->fmt.sliced.io_size;
|
||||
break;
|
||||
case V4L2_BUF_TYPE_SDR_CAPTURE:
|
||||
case V4L2_BUF_TYPE_SDR_OUTPUT:
|
||||
requested_sizes[0] = f->fmt.sdr.buffersize;
|
||||
break;
|
||||
case V4L2_BUF_TYPE_META_CAPTURE:
|
||||
requested_sizes[0] = f->fmt.meta.buffersize;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < requested_planes; i++)
|
||||
if (requested_sizes[i] == 0)
|
||||
return -EINVAL;
|
||||
return ret ? ret : vb2_core_create_bufs(q, create->memory,
|
||||
&create->count, requested_planes, requested_sizes);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_create_bufs);
|
||||
|
||||
int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (vb2_fileio_is_active(q)) {
|
||||
dprintk(1, "file io in progress\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
|
||||
return ret ? ret : vb2_core_qbuf(q, b->index, b);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_qbuf);
|
||||
|
||||
int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (vb2_fileio_is_active(q)) {
|
||||
dprintk(1, "file io in progress\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (b->type != q->type) {
|
||||
dprintk(1, "invalid buffer type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
|
||||
|
||||
/*
|
||||
* After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
|
||||
* cleared.
|
||||
*/
|
||||
b->flags &= ~V4L2_BUF_FLAG_DONE;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_dqbuf);
|
||||
|
||||
int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
|
||||
{
|
||||
if (vb2_fileio_is_active(q)) {
|
||||
dprintk(1, "file io in progress\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
return vb2_core_streamon(q, type);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_streamon);
|
||||
|
||||
int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
|
||||
{
|
||||
if (vb2_fileio_is_active(q)) {
|
||||
dprintk(1, "file io in progress\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
return vb2_core_streamoff(q, type);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_streamoff);
|
||||
|
||||
int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
|
||||
{
|
||||
return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
|
||||
eb->plane, eb->flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_expbuf);
|
||||
|
||||
int vb2_queue_init(struct vb2_queue *q)
|
||||
{
|
||||
/*
|
||||
* Sanity check
|
||||
*/
|
||||
if (WARN_ON(!q) ||
|
||||
WARN_ON(q->timestamp_flags &
|
||||
~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
|
||||
V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Warn that the driver should choose an appropriate timestamp type */
|
||||
WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
|
||||
V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
|
||||
|
||||
/* Warn that vb2_memory should match with v4l2_memory */
|
||||
if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
|
||||
|| WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
|
||||
|| WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
|
||||
return -EINVAL;
|
||||
|
||||
if (q->buf_struct_size == 0)
|
||||
q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
|
||||
|
||||
q->buf_ops = &v4l2_buf_ops;
|
||||
q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
|
||||
q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
|
||||
q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
|
||||
== V4L2_BUF_FLAG_TIMESTAMP_COPY;
|
||||
/*
|
||||
* For compatibility with vb1: if QBUF hasn't been called yet, then
|
||||
* return POLLERR as well. This only affects capture queues, output
|
||||
* queues will always initialize waiting_for_buffers to false.
|
||||
*/
|
||||
q->quirk_poll_must_check_waiting_for_buffers = true;
|
||||
|
||||
return vb2_core_queue_init(q);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_queue_init);
|
||||
|
||||
void vb2_queue_release(struct vb2_queue *q)
|
||||
{
|
||||
vb2_core_queue_release(q);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_queue_release);
|
||||
|
||||
unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
|
||||
{
|
||||
struct video_device *vfd = video_devdata(file);
|
||||
unsigned long req_events = poll_requested_events(wait);
|
||||
unsigned int res = 0;
|
||||
|
||||
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
|
||||
struct v4l2_fh *fh = file->private_data;
|
||||
|
||||
if (v4l2_event_pending(fh))
|
||||
res = POLLPRI;
|
||||
else if (req_events & POLLPRI)
|
||||
poll_wait(file, &fh->wait, wait);
|
||||
}
|
||||
|
||||
return res | vb2_core_poll(q, file, wait);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_poll);
|
||||
|
||||
/*
|
||||
* The following functions are not part of the vb2 core API, but are helper
|
||||
* functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
|
||||
* and struct vb2_ops.
|
||||
* They contain boilerplate code that most if not all drivers have to do
|
||||
* and so they simplify the driver code.
|
||||
*/
|
||||
|
||||
/* The queue is busy if there is a owner and you are not that owner. */
|
||||
static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
|
||||
{
|
||||
return vdev->queue->owner && vdev->queue->owner != file->private_data;
|
||||
}
|
||||
|
||||
/* vb2 ioctl helpers */
|
||||
|
||||
int vb2_ioctl_reqbufs(struct file *file, void *priv,
|
||||
struct v4l2_requestbuffers *p)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
|
||||
|
||||
if (res)
|
||||
return res;
|
||||
if (vb2_queue_is_busy(vdev, file))
|
||||
return -EBUSY;
|
||||
res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count);
|
||||
/* If count == 0, then the owner has released all buffers and he
|
||||
is no longer owner of the queue. Otherwise we have a new owner. */
|
||||
if (res == 0)
|
||||
vdev->queue->owner = p->count ? file->private_data : NULL;
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
|
||||
|
||||
int vb2_ioctl_create_bufs(struct file *file, void *priv,
|
||||
struct v4l2_create_buffers *p)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
int res = vb2_verify_memory_type(vdev->queue, p->memory,
|
||||
p->format.type);
|
||||
|
||||
p->index = vdev->queue->num_buffers;
|
||||
/*
|
||||
* If count == 0, then just check if memory and type are valid.
|
||||
* Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
|
||||
*/
|
||||
if (p->count == 0)
|
||||
return res != -EBUSY ? res : 0;
|
||||
if (res)
|
||||
return res;
|
||||
if (vb2_queue_is_busy(vdev, file))
|
||||
return -EBUSY;
|
||||
|
||||
res = vb2_create_bufs(vdev->queue, p);
|
||||
if (res == 0)
|
||||
vdev->queue->owner = file->private_data;
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
|
||||
|
||||
int vb2_ioctl_prepare_buf(struct file *file, void *priv,
|
||||
struct v4l2_buffer *p)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
|
||||
if (vb2_queue_is_busy(vdev, file))
|
||||
return -EBUSY;
|
||||
return vb2_prepare_buf(vdev->queue, p);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
|
||||
|
||||
int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
|
||||
/* No need to call vb2_queue_is_busy(), anyone can query buffers. */
|
||||
return vb2_querybuf(vdev->queue, p);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
|
||||
|
||||
int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
|
||||
if (vb2_queue_is_busy(vdev, file))
|
||||
return -EBUSY;
|
||||
return vb2_qbuf(vdev->queue, p);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
|
||||
|
||||
int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
|
||||
if (vb2_queue_is_busy(vdev, file))
|
||||
return -EBUSY;
|
||||
return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
|
||||
|
||||
int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
|
||||
if (vb2_queue_is_busy(vdev, file))
|
||||
return -EBUSY;
|
||||
return vb2_streamon(vdev->queue, i);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
|
||||
|
||||
int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
|
||||
if (vb2_queue_is_busy(vdev, file))
|
||||
return -EBUSY;
|
||||
return vb2_streamoff(vdev->queue, i);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
|
||||
|
||||
int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
|
||||
if (vb2_queue_is_busy(vdev, file))
|
||||
return -EBUSY;
|
||||
return vb2_expbuf(vdev->queue, p);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
|
||||
|
||||
/* v4l2_file_operations helpers */
|
||||
|
||||
int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
|
||||
return vb2_mmap(vdev->queue, vma);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_fop_mmap);
|
||||
|
||||
int _vb2_fop_release(struct file *file, struct mutex *lock)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
|
||||
if (lock)
|
||||
mutex_lock(lock);
|
||||
if (file->private_data == vdev->queue->owner) {
|
||||
vb2_queue_release(vdev->queue);
|
||||
vdev->queue->owner = NULL;
|
||||
}
|
||||
if (lock)
|
||||
mutex_unlock(lock);
|
||||
return v4l2_fh_release(file);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(_vb2_fop_release);
|
||||
|
||||
int vb2_fop_release(struct file *file)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
|
||||
|
||||
return _vb2_fop_release(file, lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_fop_release);
|
||||
|
||||
ssize_t vb2_fop_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
|
||||
int err = -EBUSY;
|
||||
|
||||
if (!(vdev->queue->io_modes & VB2_WRITE))
|
||||
return -EINVAL;
|
||||
if (lock && mutex_lock_interruptible(lock))
|
||||
return -ERESTARTSYS;
|
||||
if (vb2_queue_is_busy(vdev, file))
|
||||
goto exit;
|
||||
err = vb2_write(vdev->queue, buf, count, ppos,
|
||||
file->f_flags & O_NONBLOCK);
|
||||
if (vdev->queue->fileio)
|
||||
vdev->queue->owner = file->private_data;
|
||||
exit:
|
||||
if (lock)
|
||||
mutex_unlock(lock);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_fop_write);
|
||||
|
||||
ssize_t vb2_fop_read(struct file *file, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
|
||||
int err = -EBUSY;
|
||||
|
||||
if (!(vdev->queue->io_modes & VB2_READ))
|
||||
return -EINVAL;
|
||||
if (lock && mutex_lock_interruptible(lock))
|
||||
return -ERESTARTSYS;
|
||||
if (vb2_queue_is_busy(vdev, file))
|
||||
goto exit;
|
||||
err = vb2_read(vdev->queue, buf, count, ppos,
|
||||
file->f_flags & O_NONBLOCK);
|
||||
if (vdev->queue->fileio)
|
||||
vdev->queue->owner = file->private_data;
|
||||
exit:
|
||||
if (lock)
|
||||
mutex_unlock(lock);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_fop_read);
|
||||
|
||||
unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
struct vb2_queue *q = vdev->queue;
|
||||
struct mutex *lock = q->lock ? q->lock : vdev->lock;
|
||||
unsigned res;
|
||||
void *fileio;
|
||||
|
||||
/*
|
||||
* If this helper doesn't know how to lock, then you shouldn't be using
|
||||
* it but you should write your own.
|
||||
*/
|
||||
WARN_ON(!lock);
|
||||
|
||||
if (lock && mutex_lock_interruptible(lock))
|
||||
return POLLERR;
|
||||
|
||||
fileio = q->fileio;
|
||||
|
||||
res = vb2_poll(vdev->queue, file, wait);
|
||||
|
||||
/* If fileio was started, then we have a new queue owner. */
|
||||
if (!fileio && q->fileio)
|
||||
q->owner = file->private_data;
|
||||
if (lock)
|
||||
mutex_unlock(lock);
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_fop_poll);
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
|
||||
return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
|
||||
#endif
|
||||
|
||||
/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
|
||||
|
||||
void vb2_ops_wait_prepare(struct vb2_queue *vq)
|
||||
{
|
||||
mutex_unlock(vq->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
|
||||
|
||||
void vb2_ops_wait_finish(struct vb2_queue *vq)
|
||||
{
|
||||
mutex_lock(vq->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
|
||||
|
||||
MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
|
||||
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
|
||||
MODULE_LICENSE("GPL");
|
452
drivers/media/common/videobuf2/videobuf2-vmalloc.c
Normal file
452
drivers/media/common/videobuf2/videobuf2-vmalloc.c
Normal file
@@ -0,0 +1,452 @@
|
||||
/*
|
||||
* videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
|
||||
*
|
||||
* Copyright (C) 2010 Samsung Electronics
|
||||
*
|
||||
* Author: Pawel Osciak <pawel@osciak.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <media/videobuf2-v4l2.h>
|
||||
#include <media/videobuf2-vmalloc.h>
|
||||
#include <media/videobuf2-memops.h>
|
||||
|
||||
struct vb2_vmalloc_buf {
|
||||
void *vaddr;
|
||||
struct frame_vector *vec;
|
||||
enum dma_data_direction dma_dir;
|
||||
unsigned long size;
|
||||
refcount_t refcount;
|
||||
struct vb2_vmarea_handler handler;
|
||||
struct dma_buf *dbuf;
|
||||
};
|
||||
|
||||
static void vb2_vmalloc_put(void *buf_priv);
|
||||
|
||||
static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
|
||||
unsigned long size, enum dma_data_direction dma_dir,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf;
|
||||
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
buf->size = size;
|
||||
buf->vaddr = vmalloc_user(buf->size);
|
||||
buf->dma_dir = dma_dir;
|
||||
buf->handler.refcount = &buf->refcount;
|
||||
buf->handler.put = vb2_vmalloc_put;
|
||||
buf->handler.arg = buf;
|
||||
|
||||
if (!buf->vaddr) {
|
||||
pr_debug("vmalloc of size %ld failed\n", buf->size);
|
||||
kfree(buf);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
refcount_set(&buf->refcount, 1);
|
||||
return buf;
|
||||
}
|
||||
|
||||
static void vb2_vmalloc_put(void *buf_priv)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = buf_priv;
|
||||
|
||||
if (refcount_dec_and_test(&buf->refcount)) {
|
||||
vfree(buf->vaddr);
|
||||
kfree(buf);
|
||||
}
|
||||
}
|
||||
|
||||
static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
|
||||
unsigned long size,
|
||||
enum dma_data_direction dma_dir)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf;
|
||||
struct frame_vector *vec;
|
||||
int n_pages, offset, i;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
buf->dma_dir = dma_dir;
|
||||
offset = vaddr & ~PAGE_MASK;
|
||||
buf->size = size;
|
||||
vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
|
||||
dma_dir == DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(vec)) {
|
||||
ret = PTR_ERR(vec);
|
||||
goto fail_pfnvec_create;
|
||||
}
|
||||
buf->vec = vec;
|
||||
n_pages = frame_vector_count(vec);
|
||||
if (frame_vector_to_pages(vec) < 0) {
|
||||
unsigned long *nums = frame_vector_pfns(vec);
|
||||
|
||||
/*
|
||||
* We cannot get page pointers for these pfns. Check memory is
|
||||
* physically contiguous and use direct mapping.
|
||||
*/
|
||||
for (i = 1; i < n_pages; i++)
|
||||
if (nums[i-1] + 1 != nums[i])
|
||||
goto fail_map;
|
||||
buf->vaddr = (__force void *)
|
||||
ioremap_nocache(nums[0] << PAGE_SHIFT, size);
|
||||
} else {
|
||||
buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
|
||||
PAGE_KERNEL);
|
||||
}
|
||||
|
||||
if (!buf->vaddr)
|
||||
goto fail_map;
|
||||
buf->vaddr += offset;
|
||||
return buf;
|
||||
|
||||
fail_map:
|
||||
vb2_destroy_framevec(vec);
|
||||
fail_pfnvec_create:
|
||||
kfree(buf);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void vb2_vmalloc_put_userptr(void *buf_priv)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = buf_priv;
|
||||
unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
|
||||
unsigned int i;
|
||||
struct page **pages;
|
||||
unsigned int n_pages;
|
||||
|
||||
if (!buf->vec->is_pfns) {
|
||||
n_pages = frame_vector_count(buf->vec);
|
||||
pages = frame_vector_pages(buf->vec);
|
||||
if (vaddr)
|
||||
vm_unmap_ram((void *)vaddr, n_pages);
|
||||
if (buf->dma_dir == DMA_FROM_DEVICE ||
|
||||
buf->dma_dir == DMA_BIDIRECTIONAL)
|
||||
for (i = 0; i < n_pages; i++)
|
||||
set_page_dirty_lock(pages[i]);
|
||||
} else {
|
||||
iounmap((__force void __iomem *)buf->vaddr);
|
||||
}
|
||||
vb2_destroy_framevec(buf->vec);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void *vb2_vmalloc_vaddr(void *buf_priv)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = buf_priv;
|
||||
|
||||
if (!buf->vaddr) {
|
||||
pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return buf->vaddr;
|
||||
}
|
||||
|
||||
static unsigned int vb2_vmalloc_num_users(void *buf_priv)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = buf_priv;
|
||||
return refcount_read(&buf->refcount);
|
||||
}
|
||||
|
||||
static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = buf_priv;
|
||||
int ret;
|
||||
|
||||
if (!buf) {
|
||||
pr_err("No memory to map\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = remap_vmalloc_range(vma, buf->vaddr, 0);
|
||||
if (ret) {
|
||||
pr_err("Remapping vmalloc memory, error: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that vm_areas for 2 buffers won't be merged together
|
||||
*/
|
||||
vma->vm_flags |= VM_DONTEXPAND;
|
||||
|
||||
/*
|
||||
* Use common vm_area operations to track buffer refcount.
|
||||
*/
|
||||
vma->vm_private_data = &buf->handler;
|
||||
vma->vm_ops = &vb2_common_vm_ops;
|
||||
|
||||
vma->vm_ops->open(vma);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAS_DMA
|
||||
/*********************************************/
|
||||
/* DMABUF ops for exporters */
|
||||
/*********************************************/
|
||||
|
||||
struct vb2_vmalloc_attachment {
|
||||
struct sg_table sgt;
|
||||
enum dma_data_direction dma_dir;
|
||||
};
|
||||
|
||||
static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
|
||||
struct dma_buf_attachment *dbuf_attach)
|
||||
{
|
||||
struct vb2_vmalloc_attachment *attach;
|
||||
struct vb2_vmalloc_buf *buf = dbuf->priv;
|
||||
int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
|
||||
struct sg_table *sgt;
|
||||
struct scatterlist *sg;
|
||||
void *vaddr = buf->vaddr;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
|
||||
if (!attach)
|
||||
return -ENOMEM;
|
||||
|
||||
sgt = &attach->sgt;
|
||||
ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
|
||||
if (ret) {
|
||||
kfree(attach);
|
||||
return ret;
|
||||
}
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
struct page *page = vmalloc_to_page(vaddr);
|
||||
|
||||
if (!page) {
|
||||
sg_free_table(sgt);
|
||||
kfree(attach);
|
||||
return -ENOMEM;
|
||||
}
|
||||
sg_set_page(sg, page, PAGE_SIZE, 0);
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
attach->dma_dir = DMA_NONE;
|
||||
dbuf_attach->priv = attach;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
|
||||
struct dma_buf_attachment *db_attach)
|
||||
{
|
||||
struct vb2_vmalloc_attachment *attach = db_attach->priv;
|
||||
struct sg_table *sgt;
|
||||
|
||||
if (!attach)
|
||||
return;
|
||||
|
||||
sgt = &attach->sgt;
|
||||
|
||||
/* release the scatterlist cache */
|
||||
if (attach->dma_dir != DMA_NONE)
|
||||
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
attach->dma_dir);
|
||||
sg_free_table(sgt);
|
||||
kfree(attach);
|
||||
db_attach->priv = NULL;
|
||||
}
|
||||
|
||||
static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
|
||||
struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
|
||||
{
|
||||
struct vb2_vmalloc_attachment *attach = db_attach->priv;
|
||||
/* stealing dmabuf mutex to serialize map/unmap operations */
|
||||
struct mutex *lock = &db_attach->dmabuf->lock;
|
||||
struct sg_table *sgt;
|
||||
|
||||
mutex_lock(lock);
|
||||
|
||||
sgt = &attach->sgt;
|
||||
/* return previously mapped sg table */
|
||||
if (attach->dma_dir == dma_dir) {
|
||||
mutex_unlock(lock);
|
||||
return sgt;
|
||||
}
|
||||
|
||||
/* release any previous cache */
|
||||
if (attach->dma_dir != DMA_NONE) {
|
||||
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
attach->dma_dir);
|
||||
attach->dma_dir = DMA_NONE;
|
||||
}
|
||||
|
||||
/* mapping to the client with new direction */
|
||||
sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
dma_dir);
|
||||
if (!sgt->nents) {
|
||||
pr_err("failed to map scatterlist\n");
|
||||
mutex_unlock(lock);
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
|
||||
attach->dma_dir = dma_dir;
|
||||
|
||||
mutex_unlock(lock);
|
||||
|
||||
return sgt;
|
||||
}
|
||||
|
||||
static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
|
||||
struct sg_table *sgt, enum dma_data_direction dma_dir)
|
||||
{
|
||||
/* nothing to be done here */
|
||||
}
|
||||
|
||||
static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
|
||||
{
|
||||
/* drop reference obtained in vb2_vmalloc_get_dmabuf */
|
||||
vb2_vmalloc_put(dbuf->priv);
|
||||
}
|
||||
|
||||
static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = dbuf->priv;
|
||||
|
||||
return buf->vaddr + pgnum * PAGE_SIZE;
|
||||
}
|
||||
|
||||
static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = dbuf->priv;
|
||||
|
||||
return buf->vaddr;
|
||||
}
|
||||
|
||||
static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return vb2_vmalloc_mmap(dbuf->priv, vma);
|
||||
}
|
||||
|
||||
static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
|
||||
.attach = vb2_vmalloc_dmabuf_ops_attach,
|
||||
.detach = vb2_vmalloc_dmabuf_ops_detach,
|
||||
.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
|
||||
.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
|
||||
.map = vb2_vmalloc_dmabuf_ops_kmap,
|
||||
.map_atomic = vb2_vmalloc_dmabuf_ops_kmap,
|
||||
.vmap = vb2_vmalloc_dmabuf_ops_vmap,
|
||||
.mmap = vb2_vmalloc_dmabuf_ops_mmap,
|
||||
.release = vb2_vmalloc_dmabuf_ops_release,
|
||||
};
|
||||
|
||||
static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = buf_priv;
|
||||
struct dma_buf *dbuf;
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
exp_info.ops = &vb2_vmalloc_dmabuf_ops;
|
||||
exp_info.size = buf->size;
|
||||
exp_info.flags = flags;
|
||||
exp_info.priv = buf;
|
||||
|
||||
if (WARN_ON(!buf->vaddr))
|
||||
return NULL;
|
||||
|
||||
dbuf = dma_buf_export(&exp_info);
|
||||
if (IS_ERR(dbuf))
|
||||
return NULL;
|
||||
|
||||
/* dmabuf keeps reference to vb2 buffer */
|
||||
refcount_inc(&buf->refcount);
|
||||
|
||||
return dbuf;
|
||||
}
|
||||
#endif /* CONFIG_HAS_DMA */
|
||||
|
||||
|
||||
/*********************************************/
|
||||
/* callbacks for DMABUF buffers */
|
||||
/*********************************************/
|
||||
|
||||
static int vb2_vmalloc_map_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = mem_priv;
|
||||
|
||||
buf->vaddr = dma_buf_vmap(buf->dbuf);
|
||||
|
||||
return buf->vaddr ? 0 : -EFAULT;
|
||||
}
|
||||
|
||||
static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = mem_priv;
|
||||
|
||||
dma_buf_vunmap(buf->dbuf, buf->vaddr);
|
||||
buf->vaddr = NULL;
|
||||
}
|
||||
|
||||
static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = mem_priv;
|
||||
|
||||
if (buf->vaddr)
|
||||
dma_buf_vunmap(buf->dbuf, buf->vaddr);
|
||||
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
|
||||
unsigned long size, enum dma_data_direction dma_dir)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf;
|
||||
|
||||
if (dbuf->size < size)
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
buf->dbuf = dbuf;
|
||||
buf->dma_dir = dma_dir;
|
||||
buf->size = size;
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
||||
const struct vb2_mem_ops vb2_vmalloc_memops = {
|
||||
.alloc = vb2_vmalloc_alloc,
|
||||
.put = vb2_vmalloc_put,
|
||||
.get_userptr = vb2_vmalloc_get_userptr,
|
||||
.put_userptr = vb2_vmalloc_put_userptr,
|
||||
#ifdef CONFIG_HAS_DMA
|
||||
.get_dmabuf = vb2_vmalloc_get_dmabuf,
|
||||
#endif
|
||||
.map_dmabuf = vb2_vmalloc_map_dmabuf,
|
||||
.unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
|
||||
.attach_dmabuf = vb2_vmalloc_attach_dmabuf,
|
||||
.detach_dmabuf = vb2_vmalloc_detach_dmabuf,
|
||||
.vaddr = vb2_vmalloc_vaddr,
|
||||
.mmap = vb2_vmalloc_mmap,
|
||||
.num_users = vb2_vmalloc_num_users,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
|
||||
|
||||
MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
|
||||
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
|
||||
MODULE_LICENSE("GPL");
|
Reference in New Issue
Block a user