Browse Source

msm: camera: cci: Enhance burst read timing

In burst read flow, we need to wait CCI irq
once one set data is read, but the fifo may has
new data what can be read immediately and don't
need to wait the irq. This change also optimize
the irq processing, we can reset irq mask first,
then process the irq status, it can avoids some
timing issue, e.g. the read context runs too
quickly, then read context enable the read
threshold bit in irq mask1, but irq context runs
late, then irq context clear the read threshold,
then we can't receive read threshold again.

CRs-Fixed: 2995920
Change-Id: I8acfadf07b95782725bf4bf8b05b88789874c1da
Signed-off-by: Depeng Shao <[email protected]>
Depeng Shao 4 năm trước cách đây
mục cha
commit
86c23361e6

+ 11 - 3
drivers/cam_sensor_module/cam_cci/cam_cci_core.c

@@ -1131,9 +1131,10 @@ static int32_t cam_cci_burst_read(struct v4l2_subdev *sd,
 			CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
 		if (read_words <= 0) {
 			CAM_DBG(CAM_CCI, "FIFO Buffer lvl is 0");
-			continue;
+			goto enable_irq;
 		}
 
+read_again:
 		j++;
 		CAM_DBG(CAM_CCI, "Iteration: %u read_words %d", j, read_words);
 
@@ -1162,11 +1163,18 @@ static int32_t cam_cci_burst_read(struct v4l2_subdev *sd,
 		CAM_DBG(CAM_CCI, "Iteraion:%u total_read_words %d",
 			j, total_read_words);
 
+		read_words = cam_io_r_mb(base +
+			CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+		if (read_words > 0) {
+			CAM_DBG(CAM_CCI, "FIFO Buffer lvl is %d", read_words);
+			goto read_again;
+		}
+
+enable_irq:
 		spin_lock_irqsave(&cci_dev->lock_status, flags);
 		if (cci_dev->irqs_disabled) {
 			irq_mask_update =
-				cam_io_r_mb(base + CCI_IRQ_MASK_1_ADDR) |
-				CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+				cam_io_r_mb(base + CCI_IRQ_MASK_1_ADDR);
 			if (master == MASTER_0 && cci_dev->irqs_disabled &
 				CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD)
 				irq_mask_update |=

+ 59 - 58
drivers/cam_sensor_module/cam_cci/cam_cci_dev.c

@@ -77,6 +77,65 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
 		"BASE: %p, irq0:%x irq1:%x",
 		base, irq_status0, irq_status1);
 
+	cam_io_w_mb(irq_status0, base + CCI_IRQ_CLEAR_0_ADDR);
+	cam_io_w_mb(irq_status1, base + CCI_IRQ_CLEAR_1_ADDR);
+
+	reg_bmsk = CCI_IRQ_MASK_1_RMSK;
+	if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) &&
+	!(irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK)) {
+		reg_bmsk &= ~CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
+		spin_lock_irqsave(&cci_dev->lock_status, flags);
+		cci_dev->irqs_disabled |=
+			CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
+		spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+	}
+
+	if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) &&
+	!(irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK)) {
+		reg_bmsk &= ~CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+		spin_lock_irqsave(&cci_dev->lock_status, flags);
+		cci_dev->irqs_disabled |=
+			CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+		spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+	}
+
+	if (reg_bmsk != CCI_IRQ_MASK_1_RMSK) {
+		cam_io_w_mb(reg_bmsk, base + CCI_IRQ_MASK_1_ADDR);
+		CAM_DBG(CAM_CCI, "Updating the reg mask for irq1: 0x%x",
+			reg_bmsk);
+	} else if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK ||
+		irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) {
+		if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) {
+			spin_lock_irqsave(&cci_dev->lock_status, flags);
+			if (cci_dev->irqs_disabled &
+				CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) {
+				irq_update_rd_done |=
+					CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+				cci_dev->irqs_disabled &=
+					~CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+			}
+			spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+		}
+		if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) {
+			spin_lock_irqsave(&cci_dev->lock_status, flags);
+			if (cci_dev->irqs_disabled &
+				CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) {
+				irq_update_rd_done |=
+					CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
+				cci_dev->irqs_disabled &=
+					~CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
+			}
+			spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+		}
+	}
+
+	if (irq_update_rd_done != 0) {
+		irq_update_rd_done |= cam_io_r_mb(base + CCI_IRQ_MASK_1_ADDR);
+		cam_io_w_mb(irq_update_rd_done, base + CCI_IRQ_MASK_1_ADDR);
+	}
+
+	cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
 	if (irq_status0 & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
 		struct cam_cci_master_info *cci_master_info;
 		if (cci_dev->cci_master_info[MASTER_0].reset_pending == true) {
@@ -293,64 +352,6 @@ irqreturn_t cam_cci_irq(int irq_num, void *data)
 		cam_io_w_mb(CCI_M1_RESET_RMSK, base + CCI_RESET_CMD_ADDR);
 	}
 
-	cam_io_w_mb(irq_status0, base + CCI_IRQ_CLEAR_0_ADDR);
-
-	reg_bmsk = CCI_IRQ_MASK_1_RMSK;
-	if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) &&
-	!(irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK)) {
-		reg_bmsk &= ~CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
-		spin_lock_irqsave(&cci_dev->lock_status, flags);
-		cci_dev->irqs_disabled |=
-			CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
-		spin_unlock_irqrestore(&cci_dev->lock_status, flags);
-	}
-
-	if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) &&
-	!(irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK)) {
-		reg_bmsk &= ~CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
-		spin_lock_irqsave(&cci_dev->lock_status, flags);
-		cci_dev->irqs_disabled |=
-			CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
-		spin_unlock_irqrestore(&cci_dev->lock_status, flags);
-	}
-
-	if (reg_bmsk != CCI_IRQ_MASK_1_RMSK) {
-		cam_io_w_mb(reg_bmsk, base + CCI_IRQ_MASK_1_ADDR);
-		CAM_DBG(CAM_CCI, "Updating the reg mask for irq1: 0x%x",
-			reg_bmsk);
-	} else if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK ||
-		irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) {
-		if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) {
-			spin_lock_irqsave(&cci_dev->lock_status, flags);
-			if (cci_dev->irqs_disabled &
-				CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) {
-				irq_update_rd_done |=
-					CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
-				cci_dev->irqs_disabled &=
-					~CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
-			}
-			spin_unlock_irqrestore(&cci_dev->lock_status, flags);
-		}
-		if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) {
-			spin_lock_irqsave(&cci_dev->lock_status, flags);
-			if (cci_dev->irqs_disabled &
-				CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) {
-				irq_update_rd_done |=
-					CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
-				cci_dev->irqs_disabled &=
-					~CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
-			}
-			spin_unlock_irqrestore(&cci_dev->lock_status, flags);
-		}
-	}
-
-	if (irq_update_rd_done != 0) {
-		irq_update_rd_done |= cam_io_r_mb(base + CCI_IRQ_MASK_1_ADDR);
-		cam_io_w_mb(irq_update_rd_done, base + CCI_IRQ_MASK_1_ADDR);
-	}
-
-	cam_io_w_mb(irq_status1, base + CCI_IRQ_CLEAR_1_ADDR);
-	cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
 	return IRQ_HANDLED;
 }