ubwcp: remove invalidate for lock with DMA_TO_DEVICE

Remove cache invalidation when lock is called with DMA direction
DMA_TO_DEVICE.
Doing so will allow clients to optimize the performance of their
code.

Clients can use DMA direction DMA_TO_DEVICE when either:
- The complete buffer will be written.
- Writes will be tile aligned.
- The buffer doesn't contain valid data.

Change-Id: I33e0b64c028498396af2c26828449bb3fc2c5930
Signed-off-by: Liam Mark <quic_lmark@quicinc.com>
This commit is contained in:
Liam Mark
2023-06-28 11:20:09 -07:00
parent 7c0fc2b70f
commit 7f67bdd635

View File

@@ -2026,15 +2026,12 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir)
goto err_flush_failed; goto err_flush_failed;
} }
/* Flush/invalidate ULA PA from CPU caches /* Only apply CMOs if there are potential CPU reads */
* Always invalidate cache, even when writing. if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
* Upgrade direction to force invalidate. trace_ubwcp_dma_sync_single_for_cpu_start(buf->ula_size, dir);
*/ dma_sync_single_for_cpu(ubwcp->dev, buf->ula_pa, buf->ula_size, dir);
if (dir == DMA_TO_DEVICE) trace_ubwcp_dma_sync_single_for_cpu_end(buf->ula_size, dir);
dir = DMA_BIDIRECTIONAL; }
trace_ubwcp_dma_sync_single_for_cpu_start(buf->ula_size, dir);
dma_sync_single_for_cpu(ubwcp->dev, buf->ula_pa, buf->ula_size, dir);
trace_ubwcp_dma_sync_single_for_cpu_end(buf->ula_size, dir);
buf->dma_dir = dir; buf->dma_dir = dir;
} else { } else {
DBG("buf already locked"); DBG("buf already locked");
@@ -2042,8 +2039,19 @@ static int ubwcp_lock(struct dma_buf *dmabuf, enum dma_data_direction dir)
* A previous read lock will now become write lock. * A previous read lock will now become write lock.
* This will ensure a flush when the last unlock comes in. * This will ensure a flush when the last unlock comes in.
*/ */
if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL)) if (buf->dma_dir == DMA_TO_DEVICE &&
buf->dma_dir != dir) {
/*
* Locking for read would require doing a cache invalidation which
* we don't want to do while a client may be writing to the buffer
* as that could drop valid lines from the cache.
*/
ret = -EINVAL;
ERR("no support for locking a write only buffer for read");
goto err;
} else if (buf->dma_dir != dir) {
buf->dma_dir = DMA_BIDIRECTIONAL; buf->dma_dir = DMA_BIDIRECTIONAL;
}
} }
buf->lock_count++; buf->lock_count++;
DBG("new lock_count: %d", buf->lock_count); DBG("new lock_count: %d", buf->lock_count);
@@ -2077,8 +2085,9 @@ static int unlock_internal(struct ubwcp_buf *buf, enum dma_data_direction dir, b
DBG("Forced lock_count: %d", buf->lock_count); DBG("Forced lock_count: %d", buf->lock_count);
} else { } else {
/* for write unlocks, remember the direction so we flush on last unlock */ /* for write unlocks, remember the direction so we flush on last unlock */
if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL)) if (buf->dma_dir != dir)
buf->dma_dir = DMA_BIDIRECTIONAL; buf->dma_dir = DMA_BIDIRECTIONAL;
buf->lock_count--; buf->lock_count--;
DBG("new lock_count: %d", buf->lock_count); DBG("new lock_count: %d", buf->lock_count);
if (buf->lock_count) { if (buf->lock_count) {