Selaa lähdekoodia

Merge "ubwcp: remove invalidate for lock with DMA_TO_DEVICE"

qctecmdr 1 vuosi sitten
vanhempi
sitoutus
ee7e52e24b
2 muutettua tiedostoa jossa 44 lisäystä ja 48 poistoa
  1. 24 37
      ubwcp/ubwcp_hw.c
  2. 20 11
      ubwcp/ubwcp_main.c

+ 24 - 37
ubwcp/ubwcp_hw.c

@@ -201,12 +201,33 @@ void ubwcp_hw_enable_range_check(void __iomem *base, u16 index)
 }
 EXPORT_SYMBOL(ubwcp_hw_enable_range_check);
 
+int ubwcp_hw_flush(void __iomem *base)
+{
+	u32 flush_complete = 0;
+	u32 count_no_delay = 1000;
+	u32 count_delay = 2000;
+	u32 count = count_no_delay + count_delay;
+
+	UBWCP_REG_WRITE(base, FLUSH_CONTROL, 0x3);
+	do {
+		if (count < count_delay)
+			udelay(1);
+
+		flush_complete = UBWCP_REG_READ(base, FLUSH_STATUS) & 0x1;
+		if (flush_complete) {
+			UBWCP_REG_WRITE(base, FLUSH_CONTROL, 0x0);
+			return 0;
+		}
+	} while (count--);
+
+	ERR("~~~~~ FLUSH FAILED ~~~~~");
+	return -1;
+}
+EXPORT_SYMBOL(ubwcp_hw_flush);
 
 /* Disable range check with flush */
 int ubwcp_hw_disable_range_check_with_flush(void __iomem *base, u16 index)
 {
-	u32 flush_complete = 0;
-	u32 count = 20;
 	u32 val;
 	u16 ctrl_reg = index >> 5;
 
@@ -226,19 +247,7 @@ int ubwcp_hw_disable_range_check_with_flush(void __iomem *base, u16 index)
 	//assert flush
 	UBWCP_REG_WRITE(base, FLUSH_CONTROL, 0x3);
 
-	//poll for flush done
-	do {
-		flush_complete = UBWCP_REG_READ(base, FLUSH_STATUS) & 0x1;
-		if (flush_complete) {
-			//clear flush
-			UBWCP_REG_WRITE(base, FLUSH_CONTROL, 0x0);
-			return 0;
-		}
-		udelay(100);
-	} while (count--);
-
-	ERR("~~~~~ FLUSH FAILED ~~~~~");
-	return -1;
+	return ubwcp_hw_flush(base);
 }
 EXPORT_SYMBOL(ubwcp_hw_disable_range_check_with_flush);
 
@@ -311,28 +320,6 @@ void ubwcp_hw_encoder_config(void __iomem *base)
 	UBWCP_REG_WRITE(base, ENCODER_CONFIG, 0x7);
 }
 
-
-int ubwcp_hw_flush(void __iomem *base)
-{
-	u32 flush_complete = 0;
-	u32 count = 20;
-
-	UBWCP_REG_WRITE(base, FLUSH_CONTROL, 0x3);
-	do {
-		flush_complete = UBWCP_REG_READ(base, FLUSH_STATUS) & 0x1;
-		if (flush_complete) {
-			UBWCP_REG_WRITE(base, FLUSH_CONTROL, 0x0);
-			return 0;
-		}
-		udelay(100);
-	} while (count--);
-
-	ERR("~~~~~ FLUSH FAILED ~~~~~");
-	return -1;
-}
-EXPORT_SYMBOL(ubwcp_hw_flush);
-
-
 void ubwcp_hw_power_vote_status(void __iomem *pwr_ctrl, u8 *vote, u8 *status)
 {
 	u32 reg;

+ 20 - 11
ubwcp/ubwcp_main.c

@@ -2027,15 +2027,12 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir)
 			goto err_flush_failed;
 		}
 
-		/* Flush/invalidate ULA PA from CPU caches
-		 * Always invalidate cache, even when writing.
-		 * Upgrade direction to force invalidate.
-		 */
-		if (dir == DMA_TO_DEVICE)
-			dir = DMA_BIDIRECTIONAL;
-		trace_ubwcp_dma_sync_single_for_cpu_start(buf->ula_size, dir);
-		dma_sync_single_for_cpu(ubwcp->dev, buf->ula_pa, buf->ula_size, dir);
-		trace_ubwcp_dma_sync_single_for_cpu_end(buf->ula_size, dir);
+		/* Only apply CMOs if there are potential CPU reads */
+		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
+			trace_ubwcp_dma_sync_single_for_cpu_start(buf->ula_size, dir);
+			dma_sync_single_for_cpu(ubwcp->dev, buf->ula_pa, buf->ula_size, dir);
+			trace_ubwcp_dma_sync_single_for_cpu_end(buf->ula_size, dir);
+		}
 		buf->dma_dir = dir;
 	} else {
 		DBG("buf already locked");
@@ -2043,8 +2040,19 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir)
 		 * A previous read lock will now become write lock.
 		 * This will ensure a flush when the last unlock comes in.
 		 */
-		if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
+		if (buf->dma_dir == DMA_TO_DEVICE &&
+		    buf->dma_dir != dir) {
+			/*
+			 * Locking for read would require doing a cache invalidation which
+			 * we don't want to do while a client may be writing to the buffer
+			 * as that could drop valid lines from the cache.
+			 */
+			ret = -EINVAL;
+			ERR("no support for locking a write only buffer for read");
+			goto err;
+		} else if (buf->dma_dir != dir) {
 			buf->dma_dir = DMA_BIDIRECTIONAL;
+		}
 	}
 	buf->lock_count++;
 	DBG("new lock_count: %d", buf->lock_count);
@@ -2078,8 +2086,9 @@ static int unlock_internal(struct ubwcp_buf *buf, enum dma_data_direction dir, b
 		DBG("Forced lock_count: %d", buf->lock_count);
 	} else {
 		/* for write unlocks, remember the direction so we flush on last unlock */
-		if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
+		if (buf->dma_dir != dir)
 			buf->dma_dir = DMA_BIDIRECTIONAL;
+
 		buf->lock_count--;
 		DBG("new lock_count: %d", buf->lock_count);
 		if (buf->lock_count) {