Jelajahi Sumber

Merge "msm: camera: sensor: Add threaded irq support for cci" into camera-kernel.lnx.7.0

Camera Software Integration 1 tahun lalu
induk
melakukan
3e9d2a6729

+ 155 - 107
drivers/cam_sensor_module/cam_cci/cam_cci_core.c

@@ -740,6 +740,130 @@ static int32_t cam_cci_set_clk_param(struct cci_device *cci_dev,
 	return 0;
 }
 
+int32_t cam_cci_data_queue_burst_apply(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master, enum cci_i2c_queue_t queue,
+	uint32_t triggerHalfQueue)
+{
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
+	uint32_t reg_val = 1 << ((master * 2) + queue);
+	uint32_t iterate = 0;
+	uint32_t numBytes = 0;
+	bool condition = false;
+	uint32_t num_word_written_to_queue = 0;
+	uint32_t *data_queue = NULL;
+	uint32_t index = 0;
+	uint32_t reg_offset;
+	uint32_t queue_size = cci_dev->cci_i2c_queue_info[master][queue].max_queue_size;
+	uint32_t numWordsInQueue = 0, queueStartThreshold = 0;
+
+	reg_offset = master * 0x200 + queue * 0x100;
+	data_queue = cci_dev->cci_master_info[master].data_queue[queue];
+	num_word_written_to_queue = cci_dev->cci_master_info[master].num_words_in_data_queue[queue];
+	index = cci_dev->cci_master_info[master].data_queue_start_index[queue];
+	queueStartThreshold = cci_dev->cci_master_info[master].half_queue_mark[queue];
+
+	if (data_queue == NULL)	{
+		CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d data_queue is NULL",
+			cci_dev->soc_info.index, master, queue);
+		return -EINVAL;
+	}
+
+	/* At First this routine is called from process context with FULL QUEUE
+	 * Execution. and next iteration will be called from IRQ Context to process
+	 * only HALF QUEUE size decided by precomputed value "queueStartThreshold"
+	 * */
+	if (triggerHalfQueue == 1) {
+		// Apply HALF QUEUE
+		trace_cam_cci_burst(cci_dev->soc_info.index, master, queue,
+			"thirq raised Buflvl",
+			cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]);
+		CAM_DBG(CAM_CCI,
+			"CCI%d_I2C_M%d_Q%d Threshold IRQ Raised, BufferLevel: %d",
+			cci_dev->soc_info.index, master, queue,
+			cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset));
+	} else {
+		// Apply FULL QUEUE
+		numWordsInQueue = cam_io_r_mb(base +
+			CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+	}
+
+	while (index < num_word_written_to_queue) {
+		numBytes = (data_queue[index] & 0xF0) >> 4;
+		if ((numBytes == 0xF) || (numBytes == 0xE)) {
+		       iterate = 3;
+		} else {
+			numBytes = (numBytes + 4) & ~0x03;
+			iterate = numBytes / 4;
+		}
+		if (numBytes == 0xE) {
+			CAM_DBG(CAM_CCI,
+				"CCI%d_I2C_M%d_Q%d THRESHOLD IRQ Enabled; data_queue[%d]: 0x%x refcnt: %d",
+				cci_dev->soc_info.index, master, queue, index, data_queue[index],
+				cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]);
+		}
+		if (triggerHalfQueue == 0) {
+			condition = ((numWordsInQueue + iterate + 1) > queue_size);
+		} else {
+			condition = (cci_dev->cci_master_info[master].th_irq_ref_cnt[queue] > 0) ?
+				(numWordsInQueue >= queueStartThreshold) : 0;
+		}
+
+		if (condition == true) {
+			CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d CUR_WORD_CNT_ADDR %d len %d max %d",
+				cci_dev->soc_info.index, master, queue, numWordsInQueue, iterate, queue_size);
+			if ((cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]) > 0) {
+				cam_io_w_mb(numWordsInQueue, base +
+					CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+				cam_io_w_mb(reg_val, base + CCI_QUEUE_START_ADDR);
+				triggerHalfQueue = 1;
+				numWordsInQueue = 0;
+				CAM_INFO(CAM_CCI,
+					"CCI%d_I2C_M%d_Q%d Issued QUEUE_START, "
+					"wait for Threshold_IRQ, th_irq_ref_cnt[%d]:%d",
+					cci_dev->soc_info.index, master, queue, queue,
+					cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]);
+				trace_cam_cci_burst(cci_dev->soc_info.index, master, queue,
+					"Q_START thirq_cnt",
+					cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]);
+
+				return 0;
+			}
+		} else {
+			while (iterate > 0) {
+				cam_io_w_mb(data_queue[index], base +
+					CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+					master * 0x200 + queue * 0x100);
+				CAM_DBG(CAM_CCI,
+					"CCI%d_I2C_M%d_Q%d LOAD_DATA_ADDR 0x%x, "
+					"index: %d trig: %d numWordsInQueue: %d",
+					cci_dev->soc_info.index, master, queue,
+					data_queue[index], (index + 1),
+					triggerHalfQueue, (numWordsInQueue + 1));
+				numWordsInQueue++;
+				index++;
+				cci_dev->cci_master_info[master].data_queue_start_index[queue] = index;
+				iterate--;
+			}
+		}
+	}
+
+	if ((numWordsInQueue > 0) && (cci_dev->cci_master_info[master].th_irq_ref_cnt[queue] > 0)) {
+		cam_io_w_mb(numWordsInQueue, base +
+			CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+		cam_io_w_mb(reg_val, base + CCI_QUEUE_START_ADDR);
+		CAM_DBG(CAM_CCI,
+			"CCI%d_I2C_M%d_Q%d Issued ****** FINAL QUEUE_START********, "
+			"numWordsInQueue: %d, th_irq_ref_cnt[%d]:%d",
+			cci_dev->soc_info.index, master, queue, queue, numWordsInQueue,
+			cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]);
+		numWordsInQueue = 0;
+	}
+
+	return 0;
+}
+
 static int32_t cam_cci_data_queue_burst(struct cci_device *cci_dev,
 	struct cam_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
 	enum cci_i2c_sync sync_en)
@@ -756,7 +880,6 @@ static int32_t cam_cci_data_queue_burst(struct cci_device *cci_dev,
 	struct cam_hw_soc_info *soc_info =
 		&cci_dev->soc_info;
 	void __iomem *base = soc_info->reg_map[0].mem_base;
-	uint32_t reg_val = 1 << ((master * 2) + queue);
 	unsigned long flags;
 	uint8_t next_position = i2c_msg->data_type;
 	uint32_t half_queue_mark = 0, full_queue_mark = 0, num_payload = 0;
@@ -765,12 +888,9 @@ static int32_t cam_cci_data_queue_burst(struct cci_device *cci_dev,
 	uint8_t data_len = 0, addr_len = 0;
 	uint32_t index = 0;
 	uint8_t *buf = NULL;
-	uint32_t iterate = 0;
-	uint32_t num_bytes = 0;
 	uint32_t last_i2c_full_payload = 0;
-	uint32_t num_words_in_queue = 0;
 	uint32_t trigger_half_queue = 0, queue_start_threshold = 0;
-	bool condition = false;
+	uint32_t en_threshold_irq = 0, cci_enable_th_irq = 0;
 
 	if (i2c_cmd == NULL) {
 		CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d Failed: i2c cmd is NULL",
@@ -804,6 +924,9 @@ static int32_t cam_cci_data_queue_burst(struct cci_device *cci_dev,
 	CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d : START for sid: 0x%x size: %d",
 		cci_dev->soc_info.index, master, queue, c_ctrl->cci_info->sid, i2c_msg->size);
 
+	cci_dev->cci_master_info[master].is_burst_enable[queue] = false;
+	cci_dev->cci_master_info[master].num_words_exec[queue] = 0;
+
 	addr_len = cam_cci_convert_type_to_num_bytes(i2c_msg->addr_type);
 	data_len = cam_cci_convert_type_to_num_bytes(i2c_msg->data_type);
 	len = (cmd_size * data_len + addr_len);
@@ -819,7 +942,8 @@ static int32_t cam_cci_data_queue_burst(struct cci_device *cci_dev,
 	} else {
 		len = len/4;
 	}
-	/* Its possible that 8 number of CCI cmds, each 32-bit
+	/*
+	 * Its possible that 8 number of CCI cmds, each 32-bit
 	 * can co-exisist in QUEUE along with I2C Data
 	 */
 	len = len + 8;
@@ -880,6 +1004,16 @@ static int32_t cam_cci_data_queue_burst(struct cci_device *cci_dev,
 		queue_size = max_queue_size / 2;
 	reg_addr = i2c_cmd->reg_addr;
 
+	if (len < queue_size) {
+		CAM_DBG(CAM_CCI,
+			"CCI%d_I2C_M%d_Q%d: len: %d < QueueSize: %d "
+			"No need of threshold IRQ",
+			cci_dev->soc_info.index, master, queue, len, queue_size);
+		cci_enable_th_irq = 0;
+	} else {
+		cci_enable_th_irq = CCI_ENABLE_THRESHOLD_IRQ;
+	}
+
 	if (sync_en == MSM_SYNC_ENABLE && cci_dev->valid_sync &&
 		cmd_size < max_queue_size) {
 		val = CCI_I2C_WAIT_SYNC_CMD |
@@ -983,9 +1117,10 @@ static int32_t cam_cci_data_queue_burst(struct cci_device *cci_dev,
 		} while ((cmd_size > 0) && (i <= cci_dev->payload_size));
 
 		num_payload++;
+		en_threshold_irq = cci_enable_th_irq &&
+			(((num_payload % half_queue_mark) == 0) || (num_payload == last_i2c_full_payload));
 		if (cmd_size > 0) {
-			if (((num_payload % half_queue_mark) == 0) ||
-				(num_payload == last_i2c_full_payload)) {
+			if (en_threshold_irq) {
 				buf[0] |= 0xE0;
 				cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]++;
 				CAM_DBG(CAM_CCI,
@@ -1007,9 +1142,9 @@ static int32_t cam_cci_data_queue_burst(struct cci_device *cci_dev,
 		num_word_written_to_queue += len;
 	}
 
-	CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d num words to Queue: %d th_irq_ref_cnt: %d",
+	CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d num words to Queue: %d th_irq_ref_cnt: %d cci_dev: %p",
 		cci_dev->soc_info.index, master, queue, num_word_written_to_queue,
-		cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]);
+		cci_dev->cci_master_info[master].th_irq_ref_cnt[queue], cci_dev);
 
 	trace_cam_cci_burst(cci_dev->soc_info.index, master, queue,
 		"thirq_cnt",
@@ -1017,103 +1152,13 @@ static int32_t cam_cci_data_queue_burst(struct cci_device *cci_dev,
 
 	index = 0;
 	queue_start_threshold = half_queue_mark * MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_WORDS;
-	num_words_in_queue = cam_io_r_mb(base +
-		CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
 
-	while (index < num_word_written_to_queue) {
-		num_bytes = (data_queue[index] & 0xF0) >> 4;
-		if ((num_bytes == 0xF) || (num_bytes == 0xE)) {
-			iterate = 3;
-		} else {
-			num_bytes = (num_bytes + 4) & ~0x03;
-			iterate = num_bytes / 4;
-		}
-		if (num_bytes == 0xE) {
-			CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d THRESHOLD IRQ Enabled; data_queue[%d]: 0x%x refcnt: %d",
-				cci_dev->soc_info.index, master, queue, index, data_queue[index],
-				cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]);
-		}
-		if (trigger_half_queue == 0) {
-			condition = ((num_words_in_queue + iterate + 1) > queue_size);
-		} else {
-			condition = (num_words_in_queue >= queue_start_threshold);
-		}
+	cci_dev->cci_master_info[master].data_queue[queue] = data_queue;
+	cci_dev->cci_master_info[master].num_words_in_data_queue[queue] = num_word_written_to_queue;
+	cci_dev->cci_master_info[master].data_queue_start_index[queue] = index;
+	cci_dev->cci_master_info[master].half_queue_mark[queue] = queue_start_threshold;
 
-		if (condition == true) {
-			CAM_DBG(CAM_CCI, "CCI%d_I2C_M%d_Q%d CUR_WORD_CNT_ADDR %d len %d max %d",
-				cci_dev->soc_info.index, master, queue, num_words_in_queue, iterate, max_queue_size);
-			if ((cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]) > 0) {
-				cam_io_w_mb(num_words_in_queue, base +
-					CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
-				cam_io_w_mb(reg_val, base + CCI_QUEUE_START_ADDR);
-				trigger_half_queue = 1;
-				num_words_in_queue = 0;
-				CAM_DBG(CAM_CCI,
-					"CCI%d_I2C_M%d_Q%d Issued QUEUE_START, "
-					"wait for Threshold_IRQ, th_irq_ref_cnt[%d]:%d",
-					cci_dev->soc_info.index, master, queue, queue,
-					cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]);
-				trace_cam_cci_burst(cci_dev->soc_info.index, master, queue,
-					"Q_START thirq_cnt",
-					cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]);
-
-				if (!cam_common_wait_for_completion_timeout(
-					&cci_dev->cci_master_info[master].th_burst_complete[queue],
-					CCI_TIMEOUT)) {
-					CAM_ERR(CAM_CCI, "CCI%d_I2C_M%d_Q%d : M0_STATUS: 0x%x",
-						cci_dev->soc_info.index, master, queue,
-						cam_io_r_mb(base + CCI_I2C_M0_STATUS_ADDR + reg_offset));
-					cam_cci_dump_registers(cci_dev, master, queue);
-
-					CAM_ERR(CAM_CCI,
-						"CCI%d_I2C_M%d_Q%d wait timeout, rc: %d",
-						cci_dev->soc_info.index, master, queue, rc);
-					rc = -ETIMEDOUT;
-					cam_cci_flush_queue(cci_dev, master);
-					CAM_INFO(CAM_CCI,
-						"CCI%d_I2C_M%d_Q%d dump register after reset",
-						cci_dev->soc_info.index, master, queue);
-					cam_cci_dump_registers(cci_dev, master, queue);
-					goto ERROR;
-				}
-				cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]--;
-				trace_cam_cci_burst(cci_dev->soc_info.index, master, queue,
-					"thirq raised Buflvl",
-					cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]);
-				CAM_DBG(CAM_CCI,
-					"CCI%d_I2C_M%d_Q%d Threshold IRQ Raised, BufferLevel: %d",
-					cci_dev->soc_info.index, master, queue,
-					cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset));
-			}
-		} else {
-			while (iterate > 0) {
-				cam_io_w_mb(data_queue[index], base +
-					CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
-					master * 0x200 + queue * 0x100);
-				CAM_DBG(CAM_CCI,
-					"CCI%d_I2C_M%d_Q%d LOAD_DATA_ADDR 0x%x, "
-					"index: %d trig: %d numWordsInQueue: %d",
-					cci_dev->soc_info.index, master, queue,
-					data_queue[index], (index + 1),
-					trigger_half_queue, (num_words_in_queue + 1));
-				num_words_in_queue++;
-				index++;
-				iterate--;
-			}
-		}
-	}
-
-	if (num_words_in_queue > 0) {
-		cam_io_w_mb(num_words_in_queue, base +
-			CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
-		cam_io_w_mb(reg_val, base + CCI_QUEUE_START_ADDR);
-		CAM_DBG(CAM_CCI,
-			"CCI%d_I2C_M%d_Q%d Issued ****** FINAL QUEUE_START********, "
-			"numWordsInQueue: %d, th_irq_ref_cnt[%d]:%d",
-			cci_dev->soc_info.index, master, queue, queue, num_words_in_queue,
-			cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]);
-		num_words_in_queue = 0;
-	}
+	cam_cci_data_queue_burst_apply(cci_dev, master, queue, trigger_half_queue);
 
 	while ((cci_dev->cci_master_info[master].th_irq_ref_cnt[queue]) > 0) {
 		if (!cam_common_wait_for_completion_timeout(
@@ -1139,8 +1184,10 @@ static int32_t cam_cci_data_queue_burst(struct cci_device *cci_dev,
 			cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset));
 	}
 
-	cci_dev->cci_master_info[master].is_burst_enable[queue] = true;
-	cci_dev->cci_master_info[master].num_words_exec[queue] = 0;
+	if (cci_dev->cci_master_info[master].th_irq_ref_cnt[queue] > 0) {
+		cci_dev->cci_master_info[master].is_burst_enable[queue] = true;
+		cci_dev->cci_master_info[master].num_words_exec[queue] = 0;
+	}
 
 	rc = cam_cci_transfer_end(cci_dev, master, queue);
 	if (rc < 0) {
@@ -1156,6 +1203,7 @@ static int32_t cam_cci_data_queue_burst(struct cci_device *cci_dev,
 
 ERROR:
 	kfree(data_queue);
+	cci_dev->cci_master_info[master].data_queue[queue] = NULL;
 	return rc;
 }
 

+ 21 - 0
drivers/cam_sensor_module/cam_cci/cam_cci_core.h

@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #ifndef _CAM_CCI_CORE_H_
 #define _CAM_CCI_CORE_H_
@@ -36,4 +37,24 @@ int32_t cam_cci_core_cfg(struct v4l2_subdev *sd,
  */
 irqreturn_t cam_cci_irq(int irq_num, void *data);
 
+/**
+ * @irq_num: IRQ number
+ * @data: CCI private structure
+ *
+ * This API handles CCI Threaded IRQs
+ */
+irqreturn_t cam_cci_threaded_irq(int irq_num, void *data);
+
+/**
+ * @cci_dev: CCI device structure
+ * @master: CCI master index
+ * @queue: CCI master Queue index
+ * @triggerHalfQueue: Flag to execute FULL/HALF Queue
+ *
+ * This API handles I2C operations for CCI
+ */
+int32_t cam_cci_data_queue_burst_apply(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master, enum cci_i2c_queue_t queue,
+	uint32_t triggerHalfQueue);
+
 #endif /* _CAM_CCI_CORE_H_ */

+ 189 - 32
drivers/cam_sensor_module/cam_cci/cam_cci_dev.c

@@ -9,11 +9,31 @@
 #include "cam_cci_soc.h"
 #include "cam_cci_core.h"
 #include "camera_main.h"
+#include "uapi/linux/sched/types.h"
+#include "linux/sched/types.h"
+#include "linux/sched.h"
 
 #define CCI_MAX_DELAY 1000000
+#define QUEUE_SIZE 100
+
+struct cci_irq_data {
+	int32_t  is_valid;
+	uint32_t irq_status0;
+	uint32_t irq_status1;
+	enum cci_i2c_master_t master;
+	enum cci_i2c_queue_t queue;
+};
 
 static struct v4l2_subdev *g_cci_subdev[MAX_CCI] = { 0 };
 static struct dentry *debugfs_root;
+static struct cci_irq_data cci_irq_queue[QUEUE_SIZE] = { 0 };
+static int32_t head;
+static int32_t tail;
+
+static inline int32_t increment_index(int32_t index)
+{
+	return (index + 1) % QUEUE_SIZE;
+}
 
 struct v4l2_subdev *cam_cci_get_subdev(int cci_dev_index)
 {
@@ -71,6 +91,8 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
 	unsigned long flags;
 	bool rd_done_th_assert = false;
 	struct cam_cci_master_info *cci_master_info;
+	irqreturn_t rc = IRQ_HANDLED;
+	int32_t  next_head;
 
 	irq_status0 = cam_io_r_mb(base + CCI_IRQ_STATUS_0_ADDR);
 	irq_status1 = cam_io_r_mb(base + CCI_IRQ_STATUS_1_ADDR);
@@ -185,54 +207,141 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
 	if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_Q0_THRESHOLD)
 	{
 		cci_master_info = &cci_dev->cci_master_info[MASTER_1];
-		spin_lock_irqsave(
-			&cci_master_info->lock_q[QUEUE_0],
-			flags);
+		spin_lock_irqsave(&cci_dev->lock_status, flags);
 		trace_cam_cci_burst(cci_dev->soc_info.index, 1, 0,
-			"th_irq honoured irq1", irq_status1);
-		complete(&cci_master_info->th_burst_complete[QUEUE_0]);
-		spin_unlock_irqrestore(
-			&cci_master_info->lock_q[QUEUE_0],
-			flags);
+			"th_irq honoured irq1",	irq_status1);
+		CAM_DBG(CAM_CCI, "CCI%d_M1_Q0: th_irq honoured irq1: 0x%x th_irq_ref_cnt: %d",
+			cci_dev->soc_info.index, irq_status1,
+			cci_master_info->th_irq_ref_cnt[QUEUE_0]);
+		if (cci_master_info->th_irq_ref_cnt[QUEUE_0] == 1) {
+			complete(&cci_master_info->th_burst_complete[QUEUE_0]);
+		} else {
+			// Decrement Threshold irq ref count
+			cci_master_info->th_irq_ref_cnt[QUEUE_0]--;
+			next_head = increment_index(head);
+			if (next_head == tail) {
+				CAM_ERR(CAM_CCI,
+					"CCI%d_M1_Q0 CPU Scheduing Issue: "
+					"Unable to process BURST",
+					cci_dev->soc_info.index);
+				rc = IRQ_NONE;
+			} else {
+				cci_irq_queue[head].irq_status0 = irq_status0;
+				cci_irq_queue[head].irq_status1 = irq_status1;
+				cci_irq_queue[head].master = MASTER_1;
+				cci_irq_queue[head].queue = QUEUE_0;
+				cci_irq_queue[head].is_valid = 1;
+				head = next_head;
+				// wake up Threaded irq Handler
+				rc = IRQ_WAKE_THREAD;
+			}
+		}
+		spin_unlock_irqrestore(&cci_dev->lock_status, flags);
 	}
 	if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_Q1_THRESHOLD)
 	{
 		cci_master_info = &cci_dev->cci_master_info[MASTER_1];
-		spin_lock_irqsave(
-			&cci_master_info->lock_q[QUEUE_1],
-			flags);
+		spin_lock_irqsave(&cci_dev->lock_status, flags);
 		trace_cam_cci_burst(cci_dev->soc_info.index, 1, 1,
-			"th_irq honoured irq1", irq_status1);
-		complete(&cci_master_info->th_burst_complete[QUEUE_1]);
-		spin_unlock_irqrestore(
-			&cci_master_info->lock_q[QUEUE_1],
-			flags);
+			"th_irq honoured irq1",	irq_status1);
+		CAM_DBG(CAM_CCI,
+			"CCI%d_M1_Q1: th_irq honoured irq1: 0x%x th_irq_ref_cnt: %d",
+			cci_dev->soc_info.index, irq_status1,
+			cci_master_info->th_irq_ref_cnt[QUEUE_1]);
+		if (cci_master_info->th_irq_ref_cnt[QUEUE_1] == 1) {
+			complete(&cci_master_info->th_burst_complete[QUEUE_1]);
+		} else {
+			// Decrement Threshold irq ref count
+			cci_master_info->th_irq_ref_cnt[QUEUE_1]--;
+			next_head = increment_index(head);
+			if (next_head == tail) {
+				CAM_ERR(CAM_CCI,
+					"CCI%d_M1_Q0 CPU Scheduing Issue: "
+					"Unable to process BURST",
+					cci_dev->soc_info.index);
+				rc = IRQ_NONE;
+			} else {
+				cci_irq_queue[head].irq_status0 = irq_status0;
+				cci_irq_queue[head].irq_status1 = irq_status1;
+				cci_irq_queue[head].master = MASTER_1;
+				cci_irq_queue[head].queue = QUEUE_1;
+				cci_irq_queue[head].is_valid = 1;
+				head = next_head;
+				// wake up Threaded irq Handler
+				rc = IRQ_WAKE_THREAD;
+			}
+		}
+		spin_unlock_irqrestore(&cci_dev->lock_status, flags);
 	}
 	if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_Q0_THRESHOLD)
 	{
 		cci_master_info = &cci_dev->cci_master_info[MASTER_0];
-		spin_lock_irqsave(
-			&cci_master_info->lock_q[QUEUE_0],
-			flags);
+		spin_lock_irqsave(&cci_dev->lock_status, flags);
 		trace_cam_cci_burst(cci_dev->soc_info.index, 0, 0,
-			"th_irq honoured irq1", irq_status1);
-		complete(&cci_master_info->th_burst_complete[QUEUE_0]);
-		spin_unlock_irqrestore(
-			&cci_master_info->lock_q[QUEUE_0],
-			flags);
+			"th_irq honoured irq1",	irq_status1);
+		CAM_DBG(CAM_CCI,
+			"CCI%d_M0_Q0: th_irq honoured irq1: 0x%x th_irq_ref_cnt: %d",
+			cci_dev->soc_info.index, irq_status1,
+			cci_master_info->th_irq_ref_cnt[QUEUE_0]);
+		if (cci_master_info->th_irq_ref_cnt[QUEUE_0] == 1) {
+			complete(&cci_master_info->th_burst_complete[QUEUE_0]);
+		} else {
+			// Decrement Threshold irq ref count
+			cci_master_info->th_irq_ref_cnt[QUEUE_0]--;
+			next_head = increment_index(head);
+			if (next_head == tail) {
+				CAM_ERR(CAM_CCI,
+					"CCI%d_M1_Q0 CPU Scheduing Issue: "
+					"Unable to process BURST",
+					cci_dev->soc_info.index);
+				rc = IRQ_NONE;
+			} else {
+				cci_irq_queue[head].irq_status0 = irq_status0;
+				cci_irq_queue[head].irq_status1 = irq_status1;
+				cci_irq_queue[head].master = MASTER_0;
+				cci_irq_queue[head].queue = QUEUE_0;
+				cci_irq_queue[head].is_valid = 1;
+				head = next_head;
+				// wake up Threaded irq Handler
+				rc = IRQ_WAKE_THREAD;
+			}
+		}
+		spin_unlock_irqrestore(&cci_dev->lock_status, flags);
 	}
 	if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_Q1_THRESHOLD)
 	{
 		cci_master_info = &cci_dev->cci_master_info[MASTER_0];
-		spin_lock_irqsave(
-			&cci_master_info->lock_q[QUEUE_1],
-			flags);
+		spin_lock_irqsave(&cci_dev->lock_status, flags);
 		trace_cam_cci_burst(cci_dev->soc_info.index, 0, 1,
-			"th_irq honoured irq1", irq_status1);
-		complete(&cci_master_info->th_burst_complete[QUEUE_1]);
-		spin_unlock_irqrestore(
-			&cci_master_info->lock_q[QUEUE_1],
-			flags);
+			"th_irq honoured irq1",	irq_status1);
+		CAM_DBG(CAM_CCI,
+			"CCI%d_M0_Q1: th_irq honoured irq1: 0x%x th_irq_ref_cnt: %d",
+			cci_dev->soc_info.index, irq_status1,
+			cci_master_info->th_irq_ref_cnt[QUEUE_1]);
+		if (cci_master_info->th_irq_ref_cnt[QUEUE_1] == 1) {
+			complete(&cci_master_info->th_burst_complete[QUEUE_1]);
+		} else {
+			// Decrement Threshold irq ref count
+			cci_master_info->th_irq_ref_cnt[QUEUE_1]--;
+			next_head = increment_index(head);
+			if (next_head == tail) {
+				CAM_ERR(CAM_CCI,
+					"CCI%d_M1_Q0 CPU Scheduing Issue: "
+					"Unable to process BURST",
+					cci_dev->soc_info.index);
+				rc = IRQ_NONE;
+			} else {
+				cci_irq_queue[head].irq_status0 = irq_status0;
+				cci_irq_queue[head].irq_status1 = irq_status1;
+				cci_irq_queue[head].master = MASTER_0;
+				cci_irq_queue[head].queue = QUEUE_1;
+				cci_irq_queue[head].is_valid = 1;
+				head = next_head;
+				// wake up Threaded irq Handler
+				rc = IRQ_WAKE_THREAD;
+			}
+		}
+		spin_unlock_irqrestore(&cci_dev->lock_status, flags);
 	}
 	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) {
 		struct cam_cci_master_info *cci_master_info;
@@ -356,6 +465,10 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
 			}
 			cam_cci_dump_registers(cci_dev, MASTER_0,
 					QUEUE_0);
+			if ((cci_dev->cci_master_info[MASTER_0].th_irq_ref_cnt[QUEUE_0]) > 0) {
+				complete_all(&cci_dev->cci_master_info[MASTER_0].
+					th_burst_complete[QUEUE_0]);
+			}
 			complete_all(&cci_dev->cci_master_info[MASTER_0]
 				.report_q[QUEUE_0]);
 		}
@@ -373,6 +486,10 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
 			}
 			cam_cci_dump_registers(cci_dev, MASTER_0,
 					QUEUE_1);
+			if ((cci_dev->cci_master_info[MASTER_0].th_irq_ref_cnt[QUEUE_1]) > 0) {
+				complete_all(&cci_dev->cci_master_info[MASTER_0].
+					th_burst_complete[QUEUE_1]);
+			}
 			complete_all(&cci_dev->cci_master_info[MASTER_0]
 			.report_q[QUEUE_1]);
 		}
@@ -404,6 +521,10 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
 			}
 			cam_cci_dump_registers(cci_dev, MASTER_1,
 					QUEUE_0);
+			if ((cci_dev->cci_master_info[MASTER_1].th_irq_ref_cnt[QUEUE_0]) > 0) {
+				complete_all(&cci_dev->cci_master_info[MASTER_1].
+					th_burst_complete[QUEUE_0]);
+			}
 			complete_all(&cci_dev->cci_master_info[MASTER_1]
 			.report_q[QUEUE_0]);
 		}
@@ -421,6 +542,10 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
 			}
 			cam_cci_dump_registers(cci_dev, MASTER_1,
 				QUEUE_1);
+			if ((cci_dev->cci_master_info[MASTER_1].th_irq_ref_cnt[QUEUE_1]) > 0) {
+				complete_all(&cci_dev->cci_master_info[MASTER_1].
+					th_burst_complete[QUEUE_1]);
+			}
 			complete_all(&cci_dev->cci_master_info[MASTER_1]
 			.report_q[QUEUE_1]);
 		}
@@ -437,6 +562,36 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
 		cam_io_w_mb(CCI_M1_RESET_RMSK, base + CCI_RESET_CMD_ADDR);
 	}
 
+	return rc;
+}
+
+irqreturn_t cam_cci_threaded_irq(int irq_num, void *data)
+{
+	struct cci_device *cci_dev = data;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	struct cci_irq_data cci_data;
+	unsigned long flags;
+	uint32_t triggerHalfQueue = 1;
+	struct task_struct *task = current;
+
+	CAM_INFO(CAM_CCI, "CCI%d: nice: %d rt-Priority: %d cci_dev: %p",
+		soc_info->index, task_nice(current), task->rt_priority, cci_dev);
+	spin_lock_irqsave(&cci_dev->lock_status, flags);
+	if (tail != head) {
+		cci_data = cci_irq_queue[tail];
+		tail = increment_index(tail);
+		/*
+		 * "head" and "tail" variables are shared across
+		 * Top half and Bottom Half routines, Hence place a
+		 * lock while accessing these variables.
+		 */
+		spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+		cam_cci_data_queue_burst_apply(cci_dev,
+			cci_data.master, cci_data.queue, triggerHalfQueue);
+		spin_lock_irqsave(&cci_dev->lock_status, flags);
+	}
+	spin_unlock_irqrestore(&cci_dev->lock_status, flags);
 	return IRQ_HANDLED;
 }
 
@@ -614,6 +769,8 @@ static int cam_cci_component_bind(struct device *dev,
 		CAM_WARN(CAM_CCI, "debugfs creation failed");
 		rc = 0;
 	}
+	head = 0;
+	tail = 0;
 	CAM_DBG(CAM_CCI, "Component bound successfully");
 	return rc;
 

+ 6 - 0
drivers/cam_sensor_module/cam_cci/cam_cci_dev.h

@@ -55,6 +55,7 @@
 #define CCI_I2C_READ_MAX_RETRIES 3
 #define CCI_I2C_MAX_READ 20480
 #define CCI_I2C_MAX_WRITE 20480
+#define CCI_ENABLE_THRESHOLD_IRQ 1
 #define CCI_I2C_MAX_BYTE_COUNT 65535
 
 #define CAMX_CCI_DEV_NAME "cam-cci-driver"
@@ -146,6 +147,10 @@ struct cam_cci_master_info {
 	uint32_t th_irq_ref_cnt[NUM_QUEUES];
 	bool is_burst_enable[NUM_QUEUES];
 	uint32_t num_words_exec[NUM_QUEUES];
+	uint32_t *data_queue[NUM_QUEUES];
+	uint32_t num_words_in_data_queue[NUM_QUEUES];
+	int32_t data_queue_start_index[NUM_QUEUES];
+	int32_t half_queue_mark[NUM_QUEUES];
 };
 
 struct cam_cci_clk_params_t {
@@ -311,6 +316,7 @@ struct cci_write_async {
 };
 
 irqreturn_t cam_cci_irq(int irq_num, void *data);
+irqreturn_t cam_cci_threaded_irq(int irq_num, void *data);
 
 struct v4l2_subdev *cam_cci_get_subdev(int cci_dev_index);
 void cam_cci_dump_registers(struct cci_device *cci_dev,

+ 47 - 1
drivers/cam_sensor_module/cam_cci/cam_cci_soc.c

@@ -6,6 +6,9 @@
 
 #include "cam_cci_dev.h"
 #include "cam_cci_core.h"
+#include "linux/interrupt.h"
+#include "uapi/linux/sched/types.h"
+#include "linux/sched/types.h"
 
 static int cam_cci_init_master(struct cci_device *cci_dev,
 	enum cci_i2c_master_t master)
@@ -392,6 +395,11 @@ int cam_cci_parse_dt_info(struct platform_device *pdev,
 	struct cam_hw_soc_info *soc_info =
 		&new_cci_dev->soc_info;
 	void *irq_data[CAM_SOC_MAX_IRQ_LINES_PER_DEV] = {0};
+	int32_t  num_irq = 0;
+	struct task_struct  *task = NULL;
+	struct irq_desc     *desc = NULL;
+	struct sched_param param = {0};
+
 
 	rc = cam_soc_util_get_dt_properties(soc_info);
 	if (rc < 0) {
@@ -403,13 +411,51 @@ int cam_cci_parse_dt_info(struct platform_device *pdev,
 
 	for (i = 0; i < soc_info->irq_count; i++)
 		irq_data[i] = new_cci_dev;
-
+	/*
+	 * Bypass devm_request_irq() and induce
+	 * devm_request_threaded_irq() externally.
+	 */
+	num_irq = soc_info->irq_count;
+	soc_info->irq_count = 0;
 	rc = cam_soc_util_request_platform_resource(soc_info,
 		cam_cci_irq, &(irq_data[0]));
 	if (rc < 0) {
 		CAM_ERR(CAM_CCI, "requesting platform resources failed:%d", rc);
 		return -EINVAL;
 	}
+	soc_info->irq_count = num_irq;
+	for (i = 0; i < soc_info->irq_count; i++) {
+		rc = devm_request_threaded_irq(&pdev->dev,
+			soc_info->irq_num[i],
+			cam_cci_irq,
+			cam_cci_threaded_irq,
+			IRQF_TRIGGER_RISING,
+			soc_info->irq_name[i],
+			(void *)(irq_data[i]));
+		if (rc < 0) {
+			CAM_ERR(CAM_CCI, "Failed to reserve IRQ: %d", rc);
+			return -EINVAL;
+		}
+		disable_irq(soc_info->irq_num[i]);
+		desc = irq_to_desc(soc_info->irq_num[i]);
+		if (!desc) {
+			CAM_WARN(CAM_CCI,
+				"Unable to locate Descriptor for irq_num: %d",
+				soc_info->irq_num[i]);
+		} else {
+			task = desc->action->thread;
+			param.sched_priority = MAX_RT_PRIO - 1;
+			if (task) {
+				rc = sched_setscheduler(task, SCHED_FIFO, &param);
+				if (rc) {
+					CAM_ERR(CAM_CCI,
+						"non-fatal: Failed to set Scheduler Priority: %d",
+						rc);
+				}
+			}
+		}
+	}
+
 	new_cci_dev->v4l2_dev_str.pdev = pdev;
 	cam_cci_init_cci_params(new_cci_dev);
 	cam_cci_init_clk_params(new_cci_dev);