فهرست منبع

Merge remote-tracking branch 'origin/display-kernel.lnx.5.15' into display-kernel.lnx.1.0

* origin/display-kernel.lnx.5.15:
  mm-drivers: configure max driver instances base on build config
  mm-drivers: hw_fence: avoid signal during reset for signaled hw fences
  mm-drivers: hw_fence: modify hw fence queue payload structure
  mm-drivers: hw-fence: fix static analysis issue
  mm-drivers: hw_fence: add bounds check for hw fence deregistration

Change-Id: I6ebfcde295704d218e015fdf1e7c0b1575c95a1e
Signed-off-by: Ashwin Pillai <[email protected]>
Ashwin Pillai 2 سال پیش
والد
کامیت
674640bc0c

+ 27 - 2
hw_fence/include/hw_fence_drv_priv.h

@@ -69,6 +69,12 @@
  */
 #define MSM_HW_FENCE_MAX_JOIN_PARENTS	3
 
+/**
+ * HW_FENCE_PAYLOAD_REV:
+ * Payload version with major and minor version information
+ */
+#define HW_FENCE_PAYLOAD_REV(major, minor) (major << 8 | (minor & 0xFF))
+
 enum hw_fence_lookup_ops {
 	HW_FENCE_LOOKUP_OP_CREATE = 0x1,
 	HW_FENCE_LOOKUP_OP_DESTROY,
@@ -129,6 +135,13 @@ struct msm_hw_fence_queue {
 	phys_addr_t pa_queue;
 };
 
+/**
+ * enum payload_type - Enum with the queue payload types.
+ */
+enum payload_type {
+	HW_FENCE_PAYLOAD_TYPE_1 = 1
+};
+
 /**
  * struct msm_hw_fence_client - Structure holding the per-Client allocated resources.
  * @client_id: id of the client
@@ -319,22 +332,34 @@ struct hw_fence_driver_data {
 
 /**
  * struct msm_hw_fence_queue_payload - hardware fence clients queues payload.
+ * @size: size of queue payload
+ * @type: type of queue payload
+ * @version: version of queue payload. High eight bits are for major and lower eight
+ *           bits are for minor version
  * @ctxt_id: context id of the dma fence
  * @seqno: sequence number of the dma fence
  * @hash: fence hash
  * @flags: see MSM_HW_FENCE_FLAG_* flags descriptions
+ * @client_data: data passed from and returned to waiting client upon fence signaling
  * @error: error code for this fence, fence controller receives this
  *		  error from the signaling client through the tx queue and
  *		  propagates the error to the waiting client through rx queue
- * @timestamp: qtime when the payload is written into the queue
+ * @timestamp_lo: low 32-bits of qtime of when the payload is written into the queue
+ * @timestamp_hi: high 32-bits of qtime of when the payload is written into the queue
  */
 struct msm_hw_fence_queue_payload {
+	u32 size;
+	u16 type;
+	u16 version;
 	u64 ctxt_id;
 	u64 seqno;
 	u64 hash;
 	u64 flags;
+	u64 client_data;
 	u32 error;
-	u32 timestamp;
+	u32 timestamp_lo;
+	u32 timestamp_hi;
+	u32 reserve;
 };
 
 /**

+ 6 - 4
hw_fence/src/hw_fence_drv_debug.c

@@ -590,9 +590,10 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user
 		hash = readq_relaxed(&read_ptr_payload->hash);
 		flags = readq_relaxed(&read_ptr_payload->flags);
 		error = readl_relaxed(&read_ptr_payload->error);
-		timestamp = readl_relaxed(&read_ptr_payload->timestamp);
+		timestamp = (u64)readl_relaxed(&read_ptr_payload->timestamp_lo) |
+			((u64)readl_relaxed(&read_ptr_payload->timestamp_hi) << 32);
 
-		HWFNC_DBG_L("rx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%u\n",
+		HWFNC_DBG_L("rx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%llu\n",
 			i, hash, ctx_id, seqno, flags, error, timestamp);
 	}
 
@@ -607,8 +608,9 @@ static ssize_t hw_fence_dbg_dump_queues_wr(struct file *file, const char __user
 		hash = readq_relaxed(&read_ptr_payload->hash);
 		flags = readq_relaxed(&read_ptr_payload->flags);
 		error = readl_relaxed(&read_ptr_payload->error);
-		timestamp = readl_relaxed(&read_ptr_payload->timestamp);
-		HWFNC_DBG_L("tx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%u\n",
+		timestamp = (u64)readl_relaxed(&read_ptr_payload->timestamp_lo) |
+			((u64)readl_relaxed(&read_ptr_payload->timestamp_hi) << 32);
+		HWFNC_DBG_L("tx[%d]: hash:%d ctx:%llu seqno:%llu f:%llu err:%u time:%llu\n",
 			i, hash, ctx_id, seqno, flags, error, timestamp);
 	}
 

+ 33 - 17
hw_fence/src/hw_fence_drv_priv.c

@@ -112,10 +112,10 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
 		queues[i].pa_queue = qphys;
 		queues[i].va_header = hfi_queue_header;
 		queues[i].q_size_bytes = queue_size;
-		HWFNC_DBG_INIT("init:%s client:%d queue[%d]: va=0x%pK pa=0x%x va_hd:0x%pK sz:%d\n",
+		HWFNC_DBG_INIT("init:%s client:%d q[%d] va=0x%pK pa=0x%x hd:0x%pK sz:%u pkt:%d\n",
 			hfi_queue_header->type == HW_FENCE_TX_QUEUE ? "TX_QUEUE" : "RX_QUEUE",
 			client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header,
-			queues[i].q_size_bytes);
+			queues[i].q_size_bytes, payload_size);
 
 		/* Next header */
 		hfi_queue_header++;
@@ -232,10 +232,11 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
 	u32 q_size_u32;
 	u32 q_free_u32;
 	u32 *q_payload_write_ptr;
-	u32 payload_size_u32;
+	u32 payload_size, payload_size_u32;
 	struct msm_hw_fence_queue_payload *write_ptr_payload;
 	bool lock_client = false;
 	u32 lock_idx;
+	u64 timestamp;
 	int ret = 0;
 
 	if (queue_type >= HW_FENCE_CLIENT_QUEUES) {
@@ -247,7 +248,8 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
 	hfi_header = queue->va_header;
 
 	q_size_u32 = (queue->q_size_bytes / sizeof(u32));
-	payload_size_u32 = (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32));
+	payload_size = sizeof(struct msm_hw_fence_queue_payload);
+	payload_size_u32 = (payload_size / sizeof(u32));
 
 	if (!hfi_header) {
 		HWFNC_ERR("Invalid queue\n");
@@ -319,12 +321,17 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
 		to_write_idx = 0;
 
 	/* Update Client Queue */
+	writeq_relaxed(payload_size, &write_ptr_payload->size);
+	writew_relaxed(HW_FENCE_PAYLOAD_TYPE_1, &write_ptr_payload->type);
+	writew_relaxed(HW_FENCE_PAYLOAD_REV(1, 0), &write_ptr_payload->version);
 	writeq_relaxed(ctxt_id, &write_ptr_payload->ctxt_id);
 	writeq_relaxed(seqno, &write_ptr_payload->seqno);
 	writeq_relaxed(hash, &write_ptr_payload->hash);
 	writeq_relaxed(flags, &write_ptr_payload->flags);
 	writel_relaxed(error, &write_ptr_payload->error);
-	writel_relaxed(hw_fence_get_qtime(drv_data), &write_ptr_payload->timestamp);
+	timestamp = hw_fence_get_qtime(drv_data);
+	writel_relaxed(timestamp, &write_ptr_payload->timestamp_lo);
+	writel_relaxed(timestamp >> 32, &write_ptr_payload->timestamp_hi);
 
 	/* update memory for the message */
 	wmb();
@@ -1284,13 +1291,29 @@ int hw_fence_process_fence(struct hw_fence_driver_data *drv_data,
 	return ret;
 }
 
+static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data,
+	struct msm_hw_fence *hw_fence, u64 hash, int error)
+{
+	enum hw_fence_client_id wait_client_id;
+	struct msm_hw_fence_client *hw_fence_wait_client;
+
+	/* signal with an error all the waiting clients for this fence */
+	for (wait_client_id = 0; wait_client_id < HW_FENCE_CLIENT_MAX; wait_client_id++) {
+		if (hw_fence->wait_client_mask & BIT(wait_client_id)) {
+			hw_fence_wait_client = drv_data->clients[wait_client_id];
+
+			if (hw_fence_wait_client)
+				_fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence,
+					hash, 0, error);
+		}
+	}
+}
+
 int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data,
 	struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
 	u32 reset_flags)
 {
 	int ret = 0;
-	enum hw_fence_client_id wait_client_id;
-	struct msm_hw_fence_client *hw_fence_wait_client;
 	int error = (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_ERROR) ? 0 : MSM_HW_FENCE_ERROR_RESET;
 
 	GLOBAL_ATOMIC_STORE(&hw_fence->lock, 1); /* lock */
@@ -1307,16 +1330,9 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data,
 
 	if (hw_fence->fence_allocator == hw_fence_client->client_id) {
 
-		/* signal with an error all the waiting clients for this fence */
-		for (wait_client_id = 0; wait_client_id < HW_FENCE_CLIENT_MAX; wait_client_id++) {
-			if (hw_fence->wait_client_mask & BIT(wait_client_id)) {
-				hw_fence_wait_client = drv_data->clients[wait_client_id];
-
-				if (hw_fence_wait_client)
-					_fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence,
-						hash, 0, error);
-			}
-		}
+		/* if fence is not signaled, signal with error all the waiting clients */
+		if (!(hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL))
+			_signal_all_wait_clients(drv_data, hw_fence, hash, error);
 
 		if (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY)
 			goto skip_destroy;

+ 1 - 1
hw_fence/src/hw_fence_ioctl.c

@@ -655,7 +655,7 @@ static long hw_sync_ioctl_reset_client(struct hw_sync_obj *obj, unsigned long ar
 	if (!_is_valid_client(obj)) {
 		return -EINVAL;
 	} else if (IS_ERR_OR_NULL(obj->client_handle)) {
-		HWFNC_ERR("client:%d handle doesn't exists\n", data.client_id);
+		HWFNC_ERR("client:%d handle doesn't exists\n", obj->client_id);
 		return -EINVAL;
 	}
 

+ 5 - 0
hw_fence/src/msm_hw_fence.c

@@ -124,6 +124,11 @@ int msm_hw_fence_deregister(void *client_handle)
 	}
 	hw_fence_client = (struct msm_hw_fence_client *)client_handle;
 
+	if (hw_fence_client->client_id >= HW_FENCE_CLIENT_MAX) {
+		HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id);
+		return -EINVAL;
+	}
+
 	HWFNC_DBG_H("+\n");
 
 	/* Free all the allocated resources */

+ 9 - 3
sync_fence/src/qcom_sync_file.c

@@ -30,6 +30,12 @@
 #define FENCE_MIN	1
 #define FENCE_MAX	32
 
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+	#define MAX_DEVICE_SUPPORTED	2
+#else
+	#define MAX_DEVICE_SUPPORTED	1
+#endif
+
 struct sync_device {
 	/* device info */
 	struct class *dev_class;
@@ -105,9 +111,9 @@ static void clear_fence_array_tracker(bool force_clear)
 
 static struct sync_device *spec_fence_init_locked(struct sync_device *obj, const char *name)
 {
-	if (atomic_read(&obj->device_available) > 1) {
-		pr_err("number of device fds are limited by 2, device opened:%d\n",
-			atomic_read(&obj->device_available));
+	if (atomic_read(&obj->device_available) >= MAX_DEVICE_SUPPORTED) {
+		pr_err("number of device fds are limited to %d, device opened:%d\n",
+			MAX_DEVICE_SUPPORTED, atomic_read(&obj->device_available));
 		return NULL;
 	} else if (!atomic_read(&obj->device_available)) {
 		memset(obj->name, 0, NAME_LEN);