Răsfoiți Sursa

msm: camera: sync: Add support for dma fences

Add support to create, release, signal and import
a dma fence. When a sync object is imported for a dma fence
signal the dma fence when signaling the sync object with
appropriate status. This is achieved by implementing
generic fence scheme. The change also adds support
for batched fences.

CRs-Fixed: 3207212
Change-Id: I23fbc0992281cfee0303e213a30bce138ae7bdff
Signed-off-by: Karthik Anantha Ram <[email protected]>
Karthik Anantha Ram 3 ani în urmă
părinte
comite
a130199cd3

+ 1 - 0
Kbuild

@@ -89,6 +89,7 @@ camera-y := \
 	drivers/cam_smmu/cam_smmu_api.o \
 	drivers/cam_sync/cam_sync.o \
 	drivers/cam_sync/cam_sync_util.o \
+	drivers/cam_sync/cam_sync_dma_fence.o \
 	drivers/cam_cpas/cpas_top/cam_cpastop_hw.o \
 	drivers/cam_cpas/camss_top/cam_camsstop_hw.o \
 	drivers/cam_cpas/cam_cpas_soc.o \

Fișier diff suprimat deoarece este prea mare
+ 799 - 62
drivers/cam_sync/cam_sync.c


+ 668 - 0
drivers/cam_sync/cam_sync_dma_fence.c

@@ -0,0 +1,668 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include "cam_sync_dma_fence.h"
+
+/**
+ * struct cam_dma_fence_row - DMA fence row
+ */
+struct cam_dma_fence_row {
+	char                            name[CAM_DMA_FENCE_NAME_LEN];
+	struct dma_fence               *fence;
+	int32_t                         fd;
+	enum cam_dma_fence_state        state;
+	struct dma_fence_cb             fence_cb;
+	int32_t                         sync_obj;
+	cam_sync_callback_for_dma_fence sync_cb;
+	bool                            cb_registered_for_sync;
+	bool                            ext_dma_fence;
+	bool                            sync_signal_dma;
+};
+
+/**
+ * struct cam_dma_fence_device - DMA fence device
+ */
+struct cam_dma_fence_device {
+	uint64_t dma_fence_context;
+	struct cam_dma_fence_row rows[CAM_DMA_FENCE_MAX_FENCES];
+	spinlock_t row_spinlocks[CAM_DMA_FENCE_MAX_FENCES];
+	struct mutex dev_lock;
+	DECLARE_BITMAP(bitmap, CAM_DMA_FENCE_MAX_FENCES);
+};
+
+static atomic64_t g_cam_dma_fence_seq_no;
+static struct cam_dma_fence_device *g_cam_dma_fence_dev;
+
+bool __cam_dma_fence_enable_signaling(
+	struct dma_fence *fence)
+{
+	return true;
+}
+
+const char *__cam_dma_fence_get_driver_name(
+	struct dma_fence *fence)
+{
+	return "Camera DMA fence driver";
+}
+
+void __cam_dma_fence_free(struct dma_fence *fence)
+{
+	CAM_DBG(CAM_DMA_FENCE,
+		"Free memory for dma fence seqno: %llu", fence->seqno);
+	kfree(fence->lock);
+	kfree(fence);
+}
+
+static struct dma_fence_ops cam_sync_dma_fence_ops = {
+	.enable_signaling = __cam_dma_fence_enable_signaling,
+	.get_driver_name = __cam_dma_fence_get_driver_name,
+	.get_timeline_name = __cam_dma_fence_get_driver_name,
+	.release = __cam_dma_fence_free,
+};
+
+static void __cam_dma_fence_print_table(void)
+{
+	int i;
+	struct cam_dma_fence_row *row;
+	struct dma_fence *fence;
+
+	for (i = 0; i < CAM_DMA_FENCE_MAX_FENCES; i++) {
+		spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]);
+		row = &g_cam_dma_fence_dev->rows[i];
+		fence = row->fence;
+		CAM_INFO(CAM_DMA_FENCE,
+			"Idx: %d seqno: %llu name: %s state: %d",
+			i, fence->seqno, row->name, row->state);
+		spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]);
+	}
+}
+
+static int __cam_dma_fence_find_free_idx(uint32_t *idx)
+{
+	int rc = 0;
+
+	*idx = find_first_zero_bit(g_cam_dma_fence_dev->bitmap, CAM_DMA_FENCE_MAX_FENCES);
+	if (*idx < CAM_DMA_FENCE_MAX_FENCES)
+		set_bit(*idx, g_cam_dma_fence_dev->bitmap);
+	else
+		rc = -ENOMEM;
+
+	if (rc) {
+		CAM_ERR(CAM_DMA_FENCE, "No free idx, printing dma fence table......");
+		__cam_dma_fence_print_table();
+	}
+
+	return rc;
+}
+
+static struct dma_fence *__cam_dma_fence_find_fence_in_table(
+	int32_t fd, int32_t *idx)
+{
+	int i;
+	struct dma_fence *fence = NULL;
+	struct cam_dma_fence_row *row = NULL;
+
+	for (i = 0; i < CAM_DMA_FENCE_MAX_FENCES; i++) {
+		spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]);
+
+		row = &g_cam_dma_fence_dev->rows[i];
+		if ((row->state != CAM_DMA_FENCE_STATE_INVALID) && (row->fd == fd)) {
+			*idx = i;
+			fence = row->fence;
+			spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]);
+			break;
+		}
+		spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]);
+	}
+
+	return fence;
+}
+
+static void __cam_dma_fence_init_row(const char *name,
+	struct dma_fence *dma_fence, int32_t fd, uint32_t idx,
+	bool ext_dma_fence)
+{
+	struct cam_dma_fence_row *row;
+
+	spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[idx]);
+	row = &g_cam_dma_fence_dev->rows[idx];
+	memset(row, 0, sizeof(*row));
+	row->fence = dma_fence;
+	row->fd = fd;
+	row->state = CAM_DMA_FENCE_STATE_ACTIVE;
+	row->ext_dma_fence = ext_dma_fence;
+	strscpy(row->name, name, CAM_DMA_FENCE_NAME_LEN);
+	spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[idx]);
+}
+
+void __cam_dma_fence_signal_cb(
+	struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+	struct cam_dma_fence_signal_sync_obj signal_sync_obj;
+	struct cam_dma_fence_row *dma_fence_row =
+		container_of(cb, struct cam_dma_fence_row, fence_cb);
+
+	if (dma_fence_row->state == CAM_DMA_FENCE_STATE_INVALID) {
+		CAM_ERR(CAM_DMA_FENCE, "dma fence seqno: %llu is in invalid state: %d",
+			fence->seqno, dma_fence_row->state);
+		return;
+	}
+
+	/* If this dma fence is signaled by sync obj, skip cb */
+	if (dma_fence_row->sync_signal_dma)
+		return;
+
+	CAM_DBG(CAM_DMA_FENCE, "dma fence seqno: %llu fd: %d signaled, signal sync obj: %d",
+		fence->seqno, dma_fence_row->fd, dma_fence_row->sync_obj);
+	if ((dma_fence_row->cb_registered_for_sync) && (dma_fence_row->sync_cb)) {
+		signal_sync_obj.fd = dma_fence_row->fd;
+
+		/*
+		 * Signal is invoked with the fence lock held,
+		 * lock not needed to query status
+		 */
+		signal_sync_obj.status = dma_fence_get_status_locked(fence);
+		dma_fence_row->state = CAM_DMA_FENCE_STATE_SIGNALED;
+		dma_fence_row->sync_cb(dma_fence_row->sync_obj, &signal_sync_obj);
+	}
+}
+
+int cam_dma_fence_get_put_ref(
+	bool get_or_put, int32_t dma_fence_row_idx)
+{
+	struct dma_fence *dma_fence;
+	struct cam_dma_fence_row *row;
+
+	if ((dma_fence_row_idx < 0) ||
+		(dma_fence_row_idx >= CAM_DMA_FENCE_MAX_FENCES)) {
+		CAM_ERR(CAM_DMA_FENCE, "dma fence idx: %d is invalid",
+			dma_fence_row_idx);
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]);
+	row = &g_cam_dma_fence_dev->rows[dma_fence_row_idx];
+
+	if (row->state == CAM_DMA_FENCE_STATE_INVALID) {
+		CAM_ERR(CAM_DMA_FENCE,
+			"dma fence at idx: %d is in invalid state: %d",
+			dma_fence_row_idx, row->state);
+		spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]);
+		return -EINVAL;
+	}
+
+	dma_fence = row->fence;
+
+	if (get_or_put)
+		dma_fence_get(dma_fence);
+	else
+		dma_fence_put(dma_fence);
+
+	spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]);
+
+	CAM_DBG(CAM_DMA_FENCE, "Refcnt: %u after %s for dma fence with seqno: %llu",
+		kref_read(&dma_fence->refcount), (get_or_put ? "getref" : "putref"),
+		dma_fence->seqno);
+
+	return 0;
+}
+
+static struct dma_fence *cam_dma_fence_get_fence_from_sync_file(
+	int32_t fd, int32_t *dma_fence_row_idx)
+{
+	uint32_t idx;
+	struct dma_fence *dma_fence = NULL;
+
+	dma_fence = sync_file_get_fence(fd);
+	if (IS_ERR_OR_NULL(dma_fence)) {
+		CAM_ERR(CAM_DMA_FENCE, "Invalid fd: %d no dma fence found", fd);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (__cam_dma_fence_find_free_idx(&idx)) {
+		CAM_ERR(CAM_DMA_FENCE, "No free idx");
+		goto end;
+	}
+
+	__cam_dma_fence_init_row(dma_fence->ops->get_driver_name(dma_fence),
+		dma_fence, fd, idx, true);
+	*dma_fence_row_idx = idx;
+	CAM_DBG(CAM_DMA_FENCE,
+		"External dma fence with fd: %d seqno: %llu ref_cnt: %u updated in tbl",
+		fd, dma_fence->seqno, kref_read(&dma_fence->refcount));
+
+	return dma_fence;
+
+end:
+	dma_fence_put(dma_fence);
+	return NULL;
+}
+
+struct dma_fence *cam_dma_fence_get_fence_from_fd(
+	int32_t fd, int32_t *dma_fence_row_idx)
+{
+	struct dma_fence *dma_fence = NULL;
+
+	dma_fence = __cam_dma_fence_find_fence_in_table(fd, dma_fence_row_idx);
+	if (IS_ERR_OR_NULL(dma_fence)) {
+		CAM_WARN(CAM_DMA_FENCE,
+			"dma fence with fd: %d is an external fence, querying sync file",
+			fd);
+		return cam_dma_fence_get_fence_from_sync_file(fd, dma_fence_row_idx);
+	}
+
+	dma_fence_get(dma_fence);
+
+	CAM_DBG(CAM_DMA_FENCE, "dma fence found for fd: %d with seqno: %llu ref_cnt: %u",
+		fd, dma_fence->seqno, kref_read(&dma_fence->refcount));
+
+	return dma_fence;
+}
+
+int cam_dma_fence_register_cb(int32_t *sync_obj, int32_t *dma_fence_idx,
+	cam_sync_callback_for_dma_fence sync_cb)
+{
+	int rc = 0;
+	int dma_fence_row_idx = 0;
+	struct cam_dma_fence_row *row = NULL;
+	struct dma_fence *dma_fence = NULL;
+
+	if (!sync_obj || !dma_fence_idx || !sync_cb) {
+		CAM_ERR(CAM_DMA_FENCE,
+			"Invalid args sync_obj: %p dma_fence_idx: %p sync_cb: %p",
+			sync_obj, dma_fence_idx, sync_cb);
+		return -EINVAL;
+	}
+
+	dma_fence_row_idx = *dma_fence_idx;
+	if ((dma_fence_row_idx < 0) ||
+		(dma_fence_row_idx >= CAM_DMA_FENCE_MAX_FENCES)) {
+		CAM_ERR(CAM_DMA_FENCE, "dma fence idx: %d is invalid",
+			dma_fence_row_idx);
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]);
+	row = &g_cam_dma_fence_dev->rows[dma_fence_row_idx];
+	dma_fence = row->fence;
+
+	if (row->state != CAM_DMA_FENCE_STATE_ACTIVE) {
+		CAM_ERR(CAM_DMA_FENCE,
+			"dma fence at idx: %d fd: %d seqno: %llu is not active, current state: %d",
+			dma_fence_row_idx, row->fd, dma_fence->seqno, row->state);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/**
+	 * If the cb is already registered, return
+	 * If a fd is closed by userspace without releasing the dma fence, it is
+	 * possible that same fd is returned to a new fence.
+	 */
+	if (row->cb_registered_for_sync) {
+		CAM_WARN(CAM_DMA_FENCE,
+			"dma fence at idx: %d fd: %d seqno: %llu has already registered a cb for sync: %d - same fd for 2 fences?",
+			dma_fence_row_idx, row->fd, dma_fence->seqno, row->sync_obj);
+		goto end;
+	}
+
+	rc = dma_fence_add_callback(row->fence, &row->fence_cb,
+		__cam_dma_fence_signal_cb);
+	if (rc) {
+		CAM_ERR(CAM_DMA_FENCE,
+			"Failed to register cb for dma fence fd: %d seqno: %llu rc: %d",
+			row->fd, dma_fence->seqno, rc);
+		goto end;
+	}
+
+	row->cb_registered_for_sync = true;
+	row->sync_obj = *sync_obj;
+	row->sync_cb = sync_cb;
+
+	CAM_DBG(CAM_DMA_FENCE,
+		"CB successfully registered for dma fence fd: %d seqno: %llu for sync_obj: %d",
+		row->fd, dma_fence->seqno, *sync_obj);
+
+end:
+	spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]);
+	return rc;
+}
+
+static int __cam_dma_fence_signal_fence(
+	struct dma_fence *dma_fence,
+	int32_t status)
+{
+	bool fence_signaled = false;
+
+	fence_signaled = dma_fence_is_signaled(dma_fence);
+	if (fence_signaled) {
+		CAM_WARN(CAM_DMA_FENCE,
+			"dma fence seqno: %llu is already signaled",
+			dma_fence->seqno);
+		return 0;
+	}
+
+	if (status)
+		dma_fence_set_error(dma_fence, status);
+
+	return dma_fence_signal(dma_fence);
+}
+
+int cam_dma_fence_internal_signal(
+	int32_t dma_fence_row_idx,
+	struct cam_dma_fence_signal *signal_dma_fence)
+{
+	int rc = 0;
+	struct dma_fence *dma_fence = NULL;
+	struct cam_dma_fence_row *row = NULL;
+
+	if ((dma_fence_row_idx < 0) ||
+		(dma_fence_row_idx >= CAM_DMA_FENCE_MAX_FENCES)) {
+		CAM_ERR(CAM_DMA_FENCE, "dma fence idx: %d is invalid",
+			dma_fence_row_idx);
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]);
+	row = &g_cam_dma_fence_dev->rows[dma_fence_row_idx];
+
+	/* Ensures sync obj cb is not invoked */
+	row->sync_signal_dma = true;
+	dma_fence = row->fence;
+
+	if (IS_ERR_OR_NULL(dma_fence)) {
+		spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]);
+		CAM_ERR(CAM_DMA_FENCE, "DMA fence in row: %d is invalid",
+			dma_fence_row_idx);
+		return -EINVAL;
+	}
+
+	if (row->state == CAM_DMA_FENCE_STATE_SIGNALED) {
+		spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]);
+		CAM_WARN(CAM_DMA_FENCE,
+			"dma fence fd: %d[seqno: %llu] already in signaled state",
+			signal_dma_fence->dma_fence_fd, dma_fence->seqno);
+		return 0;
+	}
+
+	rc = __cam_dma_fence_signal_fence(dma_fence, signal_dma_fence->status);
+	if (rc)
+		CAM_WARN(CAM_DMA_FENCE,
+			"dma fence seqno: %llu fd: %d already signaled rc: %d",
+			dma_fence->seqno, row->fd, rc);
+
+	row->state = CAM_DMA_FENCE_STATE_SIGNALED;
+	spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_fence_row_idx]);
+
+	CAM_DBG(CAM_DMA_FENCE,
+		"dma fence fd: %d[seqno: %llu] signaled with status: %d rc: %d",
+		signal_dma_fence->dma_fence_fd, dma_fence->seqno,
+		signal_dma_fence->status, rc);
+
+	return rc;
+}
+
+int cam_dma_fence_signal_fd(struct cam_dma_fence_signal *signal_dma_fence)
+{
+	int rc = 0;
+	uint32_t idx;
+	struct dma_fence *dma_fence = NULL;
+	struct cam_dma_fence_row *row = NULL;
+
+	dma_fence = __cam_dma_fence_find_fence_in_table(
+		signal_dma_fence->dma_fence_fd, &idx);
+
+	if (IS_ERR_OR_NULL(dma_fence)) {
+		CAM_ERR(CAM_DMA_FENCE, "Failed to find dma fence for fd: %d",
+			signal_dma_fence->dma_fence_fd);
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[idx]);
+	row = &g_cam_dma_fence_dev->rows[idx];
+	if (row->state == CAM_DMA_FENCE_STATE_SIGNALED) {
+		spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[idx]);
+		CAM_WARN(CAM_DMA_FENCE,
+			"dma fence fd: %d[seqno: %llu] already in signaled state",
+			signal_dma_fence->dma_fence_fd, dma_fence->seqno);
+		return 0;
+	}
+
+	rc = __cam_dma_fence_signal_fence(dma_fence, signal_dma_fence->status);
+	if (rc)
+		CAM_WARN(CAM_DMA_FENCE,
+			"dma fence seqno: %llu fd: %d already signaled rc: %d",
+			dma_fence->seqno, row->fd, rc);
+
+	row->state = CAM_DMA_FENCE_STATE_SIGNALED;
+	spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[idx]);
+
+	CAM_DBG(CAM_DMA_FENCE,
+		"dma fence fd: %d[seqno: %llu] signaled with status: %d rc: %d",
+		signal_dma_fence->dma_fence_fd, dma_fence->seqno,
+		signal_dma_fence->status, rc);
+
+	return rc;
+}
+
+static int __cam_dma_fence_get_fd(int32_t *row_idx,
+	const char *name)
+{
+	int fd = -1;
+	uint32_t idx;
+	struct dma_fence *dma_fence = NULL;
+	spinlock_t       *dma_fence_lock = NULL;
+	struct sync_file *sync_file = NULL;
+
+	if (__cam_dma_fence_find_free_idx(&idx))
+		goto end;
+
+	dma_fence_lock = kzalloc(sizeof(spinlock_t), GFP_KERNEL);
+	if (!dma_fence_lock)
+		goto free_idx;
+
+	dma_fence = kzalloc(sizeof(struct dma_fence), GFP_KERNEL);
+	if (!dma_fence) {
+		kfree(dma_fence_lock);
+		goto free_idx;
+	}
+
+	spin_lock_init(dma_fence_lock);
+	dma_fence_init(dma_fence, &cam_sync_dma_fence_ops, dma_fence_lock,
+		g_cam_dma_fence_dev->dma_fence_context,
+		atomic64_inc_return(&g_cam_dma_fence_seq_no));
+	fd = get_unused_fd_flags(O_CLOEXEC);
+	if (fd < 0) {
+		CAM_ERR(CAM_DMA_FENCE, "failed to get a unused fd: %d", fd);
+		dma_fence_put(dma_fence);
+		goto free_idx;
+	}
+
+	sync_file = sync_file_create(dma_fence);
+	if (!sync_file) {
+		put_unused_fd(fd);
+		fd = -1;
+		dma_fence_put(dma_fence);
+		goto free_idx;
+	}
+
+	fd_install(fd, sync_file->file);
+
+	*row_idx = idx;
+	__cam_dma_fence_init_row(name, dma_fence, fd, idx, false);
+
+	CAM_DBG(CAM_DMA_FENCE, "Created dma fence fd: %d[%s] seqno: %llu row_idx: %u ref_cnt: %u",
+		fd, name, dma_fence->seqno, idx, kref_read(&dma_fence->refcount));
+
+	return fd;
+
+free_idx:
+	clear_bit(idx, g_cam_dma_fence_dev->bitmap);
+end:
+	return fd;
+}
+
+int cam_dma_fence_create_fd(
+	int32_t *dma_fence_fd, int32_t *dma_fence_row_idx, const char *name)
+{
+	int fd = -1, rc = 0;
+
+	if (!dma_fence_fd || !dma_fence_row_idx) {
+		CAM_ERR(CAM_DMA_FENCE, "Invalid args fd: %pK dma_fence_row_idx: %pK",
+			dma_fence_fd, dma_fence_row_idx);
+		return -EINVAL;
+	}
+
+	fd = __cam_dma_fence_get_fd(dma_fence_row_idx, name);
+	if (fd < 0) {
+		rc = -EBADFD;
+		goto end;
+	}
+
+	*dma_fence_fd = fd;
+
+end:
+	return rc;
+}
+
+static int __cam_dma_fence_release(int32_t dma_row_idx)
+{
+	struct dma_fence *dma_fence = NULL;
+	struct cam_dma_fence_row *row = NULL;
+
+	spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_row_idx]);
+	row = &g_cam_dma_fence_dev->rows[dma_row_idx];
+	dma_fence = row->fence;
+
+	if (row->state == CAM_DMA_FENCE_STATE_ACTIVE) {
+		CAM_WARN(CAM_DMA_FENCE,
+			"Unsignaled fence being released name: %s seqno: %llu fd:%d",
+			row->name, dma_fence->seqno, row->fd);
+		__cam_dma_fence_signal_fence(dma_fence, -ECANCELED);
+	}
+
+	CAM_DBG(CAM_DMA_FENCE,
+		"Releasing dma fence with fd: %d[%s] row_idx: %u current ref_cnt: %u",
+		row->fd, row->name, dma_row_idx, kref_read(&dma_fence->refcount));
+
+	/* putref on dma fence */
+	dma_fence_put(dma_fence);
+
+	/* deinit row */
+	memset(row, 0, sizeof(struct cam_dma_fence_row));
+	clear_bit(dma_row_idx, g_cam_dma_fence_dev->bitmap);
+	spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[dma_row_idx]);
+	return 0;
+}
+
+static int __cam_dma_fence_release_fd(int fd)
+{
+	int32_t idx;
+	struct dma_fence *dma_fence = NULL;
+
+	dma_fence = __cam_dma_fence_find_fence_in_table(fd, &idx);
+	if (IS_ERR_OR_NULL(dma_fence)) {
+		CAM_ERR(CAM_DMA_FENCE, "Failed to find dma fence for fd: %d", fd);
+		return -EINVAL;
+	}
+
+	return __cam_dma_fence_release(idx);
+}
+
+static int __cam_dma_fence_release_row(
+	int32_t dma_fence_row_idx)
+{
+	if ((dma_fence_row_idx < 0) ||
+		(dma_fence_row_idx >= CAM_DMA_FENCE_MAX_FENCES)) {
+		CAM_ERR(CAM_DMA_FENCE, "dma fence idx: %d is invalid",
+			dma_fence_row_idx);
+		return -EINVAL;
+	}
+
+	return __cam_dma_fence_release(dma_fence_row_idx);
+}
+
+int cam_dma_fence_release(
+	struct cam_dma_fence_release_params *release_params)
+{
+	if (release_params->use_row_idx)
+		return __cam_dma_fence_release_row(release_params->u.dma_row_idx);
+	else
+		return __cam_dma_fence_release_fd(release_params->u.dma_fence_fd);
+}
+
+void cam_dma_fence_close(void)
+{
+	int i;
+	struct cam_dma_fence_row *row = NULL;
+
+	mutex_lock(&g_cam_dma_fence_dev->dev_lock);
+	for (i = 0; i < CAM_DMA_FENCE_MAX_FENCES; i++) {
+		spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]);
+
+		row = &g_cam_dma_fence_dev->rows[i];
+		if (row->state != CAM_DMA_FENCE_STATE_INVALID) {
+			CAM_DBG(CAM_DMA_FENCE,
+				"Releasing dma fence seqno: %llu associated with fd: %d[%s] ref_cnt: %u",
+				row->fence->seqno, row->fd, row->name,
+				kref_read(&row->fence->refcount));
+
+			/* If registered for cb, remove cb */
+			if (row->cb_registered_for_sync)
+				dma_fence_remove_callback(row->fence, &row->fence_cb);
+
+			/* Signal and put if the dma fence is created from camera */
+			if (!row->ext_dma_fence) {
+				if (row->state != CAM_DMA_FENCE_STATE_SIGNALED)
+					__cam_dma_fence_signal_fence(row->fence, -EADV);
+				dma_fence_put(row->fence);
+			}
+
+			memset(row, 0, sizeof(struct cam_dma_fence_row));
+			clear_bit(i, g_cam_dma_fence_dev->bitmap);
+		}
+		spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[i]);
+	}
+
+	mutex_unlock(&g_cam_dma_fence_dev->dev_lock);
+	CAM_DBG(CAM_DMA_FENCE, "Close on Camera DMA fence driver");
+}
+
+void cam_dma_fence_open(void)
+{
+	mutex_lock(&g_cam_dma_fence_dev->dev_lock);
+
+	/* DMA fence seqno reset */
+	atomic64_set(&g_cam_dma_fence_seq_no, 0);
+	mutex_unlock(&g_cam_dma_fence_dev->dev_lock);
+	CAM_DBG(CAM_DMA_FENCE, "Camera DMA fence driver opened");
+}
+
+int cam_dma_fence_driver_init(void)
+{
+	int i;
+
+	g_cam_dma_fence_dev = kzalloc(sizeof(struct cam_dma_fence_device), GFP_KERNEL);
+	if (!g_cam_dma_fence_dev)
+		return -ENOMEM;
+
+	mutex_init(&g_cam_dma_fence_dev->dev_lock);
+	for (i = 0; i < CAM_DMA_FENCE_MAX_FENCES; i++)
+		spin_lock_init(&g_cam_dma_fence_dev->row_spinlocks[i]);
+
+	memset(&g_cam_dma_fence_dev->rows, 0, sizeof(g_cam_dma_fence_dev->rows));
+	memset(&g_cam_dma_fence_dev->bitmap, 0, sizeof(g_cam_dma_fence_dev->bitmap));
+	bitmap_zero(g_cam_dma_fence_dev->bitmap, CAM_DMA_FENCE_MAX_FENCES);
+	g_cam_dma_fence_dev->dma_fence_context = dma_fence_context_alloc(1);
+
+	CAM_DBG(CAM_DMA_FENCE, "Camera DMA fence driver initialized");
+	return 0;
+}
+
+void cam_dma_fence_driver_deinit(void)
+{
+	kfree(g_cam_dma_fence_dev);
+	g_cam_dma_fence_dev = NULL;
+	CAM_DBG(CAM_DMA_FENCE, "Camera DMA fence driver deinitialized");
+}

+ 178 - 0
drivers/cam_sync/cam_sync_dma_fence.h

@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef __CAM_SYNC_DMA_FENCE_H__
+#define __CAM_SYNC_DMA_FENCE_H__
+
+#include <linux/dma-fence.h>
+#include <linux/spinlock_types.h>
+#include <linux/sync_file.h>
+#include <linux/file.h>
+#include <linux/bitmap.h>
+
+#include "cam_sync.h"
+#include "cam_debug_util.h"
+
+#define CAM_DMA_FENCE_MAX_FENCES  128
+#define CAM_DMA_FENCE_NAME_LEN    128
+
+/* DMA fence state */
+enum cam_dma_fence_state {
+	CAM_DMA_FENCE_STATE_INVALID,
+	CAM_DMA_FENCE_STATE_ACTIVE,
+	CAM_DMA_FENCE_STATE_SIGNALED,
+};
+
+/**
+ * struct cam_dma_fence_release_params - DMA release payload
+ *                     Based on the flag row_idx or fd is consumed
+ *
+ * @dma_row_idx      : DMA fence row idx
+ * @dma_fence_fd     : DMA fence fd
+ * @use_row_idx      : Use row idx
+ */
+struct cam_dma_fence_release_params {
+	union {
+		int32_t dma_row_idx;
+		int32_t dma_fence_fd;
+	} u;
+	bool use_row_idx;
+};
+
+/**
+ * struct cam_dma_fence_signal_sync_obj - DMA -> sync signal info
+ *                           Payload to signal sync on a dma fence
+ *                           being signaled
+ *
+ * @status                 : DMA fence status
+ * @fd                     : DMA fence fd if any
+ */
+struct cam_dma_fence_signal_sync_obj {
+	int32_t status;
+	int32_t fd;
+};
+
+/* DMA fence callback function type */
+typedef int (*cam_sync_callback_for_dma_fence)(int32_t sync_obj,
+	struct cam_dma_fence_signal_sync_obj *signal_sync_obj);
+
+/**
+ * struct cam_dma_fence_create_sync_obj_payload -
+ *                           Payload to create sync for a dma fence
+ *
+ * @dma_fence_row_idx      : DMA fence row idx
+ * @fd                     : DMA fence fd
+ * @sync_created_with_dma  : Set if dma fence and sync obj are being
+ *                           created in a single IOCTL call
+ */
+struct cam_dma_fence_create_sync_obj_payload {
+	int32_t dma_fence_row_idx;
+	int32_t fd;
+	bool    sync_created_with_dma;
+};
+
+/**
+ * @brief: Signal a dma fence fd [userspace API]
+ *
+ * @param signal_dma_fence: Info on DMA fence to be signaled
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_dma_fence_signal_fd(
+	struct cam_dma_fence_signal *signal_dma_fence);
+
+/**
+ * @brief: Signal a dma fence when sync obj is signaled
+ *
+ * @param dma_fence_row_idx : DMA fence row idx
+ * @param signal_dma_fence  : Info on DMA fence to be signaled
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_dma_fence_internal_signal(int32_t dma_fence_row_idx,
+	struct cam_dma_fence_signal *signal_dma_fence);
+
+/**
+ * @brief: Create a dma fence fd
+ *
+ * @param  name              : DMA fence name, optional param
+ *                             will accommodate names of length
+ *                             CAM_DMA_FENCE_NAME_LEN
+ * @output dma_fence_fd      : DMA fence fd
+ * @output dma_fence_row_idx : Row Idx corresponding to DMA fence in the table
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_dma_fence_create_fd(
+	int32_t *dma_fence_fd, int32_t *dma_fence_row_idx, const char *name);
+
+/**
+ * @brief: Release a dma fence
+ *
+ * @param release_params : dma fence info to be released
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_dma_fence_release(
+	struct cam_dma_fence_release_params *release_params);
+
+/**
+ * @brief: Gets the dma fence from a fd, increments refcnt
+ *
+ * @param  fd                : File descriptor
+ * @output dma_fence_row_idx : Row idx pertaining to this dma fence
+ *
+ * @return Status of operation. Error or valid fence.
+ */
+struct dma_fence *cam_dma_fence_get_fence_from_fd(int32_t fd,
+	int32_t *dma_fence_row_idx);
+
+/**
+ * @brief: DMA fence register cb
+ *
+ * @param sync_obj      : Sync object
+ * @param dma_fence_idx : DMA fence row idx
+ * @param sync_cb       : Sync object callback
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_dma_fence_register_cb(int32_t *sync_obj,
+	int32_t *dma_fence_row_idx, cam_sync_callback_for_dma_fence sync_cb);
+
+/**
+ * @brief: get/put on dma fence
+ *
+ * @get_or_put              : True for get, false for put
+ * @param dma_fence_row_idx : Idx in the dma fence table pertaining to
+ *                            the dma fence on which get/put ref is invoked
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_dma_fence_get_put_ref(bool get_or_put, int32_t dma_fence_row_idx);
+
+/**
+ * @brief: dma fence driver open
+ *
+ */
+void cam_dma_fence_open(void);
+
+/**
+ * @brief: dma fence driver close
+ *
+ */
+void cam_dma_fence_close(void);
+
+/**
+ * @brief: dma fence driver initialize
+ *
+ */
+int cam_dma_fence_driver_init(void);
+
+/**
+ * @brief: dma fence driver deinit
+ *
+ */
+void cam_dma_fence_driver_deinit(void);
+
+#endif /* __CAM_SYNC_DMA_FENCE_H__ */

+ 34 - 13
drivers/cam_sync/cam_sync_private.h

@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef __CAM_SYNC_PRIVATE_H__
@@ -17,6 +18,7 @@
 #include <media/v4l2-event.h>
 #include <media/v4l2-ioctl.h>
 #include "cam_sync_api.h"
+#include "cam_sync_dma_fence.h"
 
 #if IS_REACHABLE(CONFIG_MSM_GLOBAL_SYNX)
 #include <synx_api.h>
@@ -30,6 +32,7 @@
 
 #define CAM_SYNC_OBJ_NAME_LEN           64
 #define CAM_SYNC_MAX_OBJS               2048
+#define CAM_GENERIC_FENCE_BATCH_MAX     10
 #define CAM_SYNC_MAX_V4L2_EVENTS        250
 #define CAM_SYNC_DEBUG_FILENAME         "cam_debug"
 #define CAM_SYNC_DEBUG_BASEDIR          "cam"
@@ -124,22 +127,38 @@ struct sync_user_payload {
 	struct list_head list;
 };
 
+/**
+ * struct sync_dma_fence_info - DMA fence info associated with this sync obj
+ *
+ * @dma_fence_fd          : DMA fence fd
+ * @dma_fence_row_idx     : Index of the row corresponding to this dma fence
+ *                          in the dma fence table
+ * @sync_created_with_dma : If sync obj and dma fence are created together
+ */
+struct sync_dma_fence_info {
+	int32_t dma_fence_fd;
+	int32_t dma_fence_row_idx;
+	bool    sync_created_with_dma;
+};
+
 /**
  * struct sync_table_row - Single row of information about a sync object, used
  * for internal book keeping in the sync driver
  *
- * @name              : Optional string representation of the sync object
- * @type              : Type of the sync object (individual or group)
- * @sync_id           : Integer id representing this sync object
- * @parents_list      : Linked list of parents of this sync object
- * @children_list     : Linked list of children of this sync object
- * @state             : State (INVALID, ACTIVE, SIGNALED_SUCCESS or
- *                      SIGNALED_ERROR)
- * @remaining         : Count of remaining children that not been signaled
- * @signaled          : Completion variable on which block calls will wait
- * @callback_list     : Linked list of kernel callbacks registered
- * @user_payload_list : LInked list of user space payloads registered
- * @ref_cnt           : ref count of the number of usage of the fence.
+ * @name               : Optional string representation of the sync object
+ * @type               : Type of the sync object (individual or group)
+ * @sync_id            : Integer id representing this sync object
+ * @parents_list       : Linked list of parents of this sync object
+ * @children_list      : Linked list of children of this sync object
+ * @state              : State (INVALID, ACTIVE, SIGNALED_SUCCESS or
+ *                       SIGNALED_ERROR)
+ * @remaining          : Count of remaining children that not been signaled
+ * @signaled           : Completion variable on which block calls will wait
+ * @callback_list      : Linked list of kernel callbacks registered
+ * @user_payload_list  : LInked list of user space payloads registered
+ * @ref_cnt            : ref count of the number of usage of the fence.
+ * @ext_fence_mask     : Mask to indicate associated external fence types
+ * @dma_fence_info     : dma fence info if associated
  */
 struct sync_table_row {
 	char name[CAM_SYNC_OBJ_NAME_LEN];
@@ -155,6 +174,8 @@ struct sync_table_row {
 	struct list_head callback_list;
 	struct list_head user_payload_list;
 	atomic_t ref_cnt;
+	unsigned long ext_fence_mask;
+	struct sync_dma_fence_info dma_fence_info;
 };
 
 /**

+ 19 - 1
drivers/cam_sync/cam_sync_util.c

@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2017-2018, 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include "cam_sync_util.h"
@@ -147,7 +148,8 @@ clean_children_info:
 	return rc;
 }
 
-int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
+int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx,
+	struct cam_sync_check_for_dma_release *check_for_dma_release)
 {
 	struct sync_table_row      *row = table + idx;
 	struct sync_child_info     *child_info, *temp_child;
@@ -278,6 +280,22 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
 		kfree(sync_cb);
 	}
 
+	/* Decrement ref cnt for imported dma fence */
+	if (test_bit(CAM_GENERIC_FENCE_TYPE_DMA_FENCE, &row->ext_fence_mask)) {
+		cam_dma_fence_get_put_ref(false, row->dma_fence_info.dma_fence_row_idx);
+
+		/* Check if same dma fence is being released with the sync obj */
+		if (check_for_dma_release) {
+			if (row->dma_fence_info.dma_fence_fd ==
+				check_for_dma_release->dma_fence_fd) {
+				check_for_dma_release->sync_created_with_dma =
+					row->dma_fence_info.sync_created_with_dma;
+				check_for_dma_release->dma_fence_row_idx =
+					row->dma_fence_info.dma_fence_row_idx;
+			}
+		}
+	}
+
 	memset(row, 0, sizeof(*row));
 	clear_bit(idx, sync_dev->bitmap);
 	INIT_LIST_HEAD(&row->callback_list);

+ 25 - 6
drivers/cam_sync/cam_sync_util.h

@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef __CAM_SYNC_UTIL_H__
@@ -12,6 +13,24 @@
 
 extern struct sync_device *sync_dev;
 
+/**
+ * struct cam_sync_check_for_dma_release -
+ *                          Checks if the dma fence being released
+ *                          was created with the sync obj
+ *
+ * @dma_fence_row_idx     : Get DMA fence row idx that is associated with
+ *                          the sync obj
+ * @dma_fence_fd          : Check if DMA fence fd is associated with
+ *                          sync obj
+ * @sync_created_with_dma : Set if the dma fence fd was created
+ *                          with sync obj
+ */
+struct cam_sync_check_for_dma_release {
+	int32_t dma_fence_row_idx;
+	int32_t dma_fence_fd;
+	bool sync_created_with_dma;
+};
+
 /**
  * @brief: Finds an empty row in the sync table and sets its corresponding bit
  * in the bit array
@@ -42,12 +61,14 @@ int cam_sync_init_row(struct sync_table_row *table,
 /**
  * @brief: Function to uninitialize a row in the sync table
  *
- * @param table : Pointer to the sync objects table
- * @param idx   : Index of row to initialize
+ * @param table                          : Pointer to the sync objects table
+ * @param idx                            : Index of row to initialize
+ * @optional param check_for_dma_release : checks for dma fence release
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
-int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx);
+int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx,
+	struct cam_sync_check_for_dma_release *check_for_dma_release);
 
 /**
  * @brief: Function to initialize a row in the sync table when the object is a
@@ -66,8 +87,6 @@ int cam_sync_init_group_object(struct sync_table_row *table,
 	uint32_t *sync_objs,
 	uint32_t num_objs);
 
-int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx);
-
 /**
  * @brief: Function to dispatch a kernel callback for a sync callback
  *

+ 5 - 1
drivers/cam_utils/cam_debug_util.h

@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef _CAM_DEBUG_UTIL_H_
@@ -52,6 +53,7 @@ enum cam_debug_module_id {
 	CAM_CRE,                 /* bit 31 */
 	CAM_PRESIL_CORE,         /* bit 32 */
 	CAM_TPG,                 /* bit 33 */
+	CAM_DMA_FENCE,           /* bit 34 */
 	CAM_DBG_MOD_MAX
 };
 
@@ -109,6 +111,7 @@ static const char *cam_debug_mod_name[CAM_DBG_MOD_MAX] = {
 	[CAM_CRE]         = "CAM-CRE",
 	[CAM_PRESIL_CORE] = "CAM-CORE-PRESIL",
 	[CAM_TPG]         = "CAM-TPG",
+	[CAM_DMA_FENCE]   = "CAM_DMA_FENCE",
 };
 
 #define ___CAM_DBG_MOD_NAME(module_id)                                      \
@@ -146,7 +149,8 @@ __builtin_choose_expr(((module_id) == CAM_SFE), "CAM-SFE",                  \
 __builtin_choose_expr(((module_id) == CAM_CRE), "CAM-CRE",                  \
 __builtin_choose_expr(((module_id) == CAM_PRESIL_CORE), "CAM-CORE-PRESIL",  \
 __builtin_choose_expr(((module_id) == CAM_TPG), "CAM-TPG",                  \
-"CAMERA"))))))))))))))))))))))))))))))))))
+__builtin_choose_expr(((module_id) == CAM_DMA_FENCE), "CAM-DMA-FENCE",      \
+"CAMERA")))))))))))))))))))))))))))))))))))
 
 #define CAM_DBG_MOD_NAME(module_id) \
 ((module_id < CAM_DBG_MOD_MAX) ? cam_debug_mod_name[module_id] : "CAMERA")

Unele fișiere nu au fost afișate deoarece prea multe fișiere au fost modificate în acest diff