UPSTREAM: coresight: tmc-etr: Speed up for bounce buffer in flat mode
The AUX bounce buffer is allocated with API dma_alloc_coherent(), in the
low level's architecture code, e.g. for Arm64, it maps the memory with
the attribution "Normal non-cacheable"; this can be concluded from the
definition for pgprot_dmacoherent() in arch/arm64/include/asm/pgtable.h.
Later when access the AUX bounce buffer, since the memory mapping is
non-cacheable, it's low efficiency due to every load instruction must
reach out DRAM.
This patch changes to allocate pages with dma_alloc_noncoherent(), the
driver can access the memory via cacheable mapping; therefore, load
instructions can fetch data from cache lines rather than always read
data from DRAM, the driver can boost memory performance. After using
the cacheable mapping, the driver uses dma_sync_single_for_cpu() to
invalidate cacheline prior to read bounce buffer so can avoid read stale
trace data.
By measurement the duration for function tmc_update_etr_buffer() with
ftrace function_graph tracer, it shows the performance significant
improvement for copying 4MiB data from bounce buffer:
# echo tmc_etr_get_data_flat_buf > set_graph_notrace // avoid noise
# echo tmc_update_etr_buffer > set_graph_function
# echo function_graph > current_tracer
before:
# CPU DURATION FUNCTION CALLS
# | | | | | | |
2) | tmc_update_etr_buffer() {
...
2) # 8148.320 us | }
after:
# CPU DURATION FUNCTION CALLS
# | | | | | | |
2) | tmc_update_etr_buffer() {
...
2) # 2525.420 us | }
Bug: 213931796
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Link: https://lore.kernel.org/r/20210905032144.966766-1-leo.yan@linaro.org
Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
(cherry picked from commit 0abd076217a39c4abc47dcd84d0c8f491f87cbe7)
Signed-off-by: Qais Yousef <qais.yousef@arm.com>
Change-Id: I6adf07f35c90ea298b5a875298afc062696d0d6f
This commit is contained in:
@@ -609,8 +609,9 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
|
|||||||
if (!flat_buf)
|
if (!flat_buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
flat_buf->vaddr = dma_alloc_coherent(real_dev, etr_buf->size,
|
flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size,
|
||||||
&flat_buf->daddr, GFP_KERNEL);
|
&flat_buf->daddr,
|
||||||
|
DMA_FROM_DEVICE, GFP_KERNEL);
|
||||||
if (!flat_buf->vaddr) {
|
if (!flat_buf->vaddr) {
|
||||||
kfree(flat_buf);
|
kfree(flat_buf);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -631,14 +632,18 @@ static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
|
|||||||
if (flat_buf && flat_buf->daddr) {
|
if (flat_buf && flat_buf->daddr) {
|
||||||
struct device *real_dev = flat_buf->dev->parent;
|
struct device *real_dev = flat_buf->dev->parent;
|
||||||
|
|
||||||
dma_free_coherent(real_dev, flat_buf->size,
|
dma_free_noncoherent(real_dev, etr_buf->size,
|
||||||
flat_buf->vaddr, flat_buf->daddr);
|
flat_buf->vaddr, flat_buf->daddr,
|
||||||
|
DMA_FROM_DEVICE);
|
||||||
}
|
}
|
||||||
kfree(flat_buf);
|
kfree(flat_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
|
static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
|
||||||
{
|
{
|
||||||
|
struct etr_flat_buf *flat_buf = etr_buf->private;
|
||||||
|
struct device *real_dev = flat_buf->dev->parent;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Adjust the buffer to point to the beginning of the trace data
|
* Adjust the buffer to point to the beginning of the trace data
|
||||||
* and update the available trace data.
|
* and update the available trace data.
|
||||||
@@ -648,6 +653,19 @@ static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
|
|||||||
etr_buf->len = etr_buf->size;
|
etr_buf->len = etr_buf->size;
|
||||||
else
|
else
|
||||||
etr_buf->len = rwp - rrp;
|
etr_buf->len = rwp - rrp;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The driver always starts tracing at the beginning of the buffer,
|
||||||
|
* the only reason why we would get a wrap around is when the buffer
|
||||||
|
* is full. Sync the entire buffer in one go for this case.
|
||||||
|
*/
|
||||||
|
if (etr_buf->offset + etr_buf->len > etr_buf->size)
|
||||||
|
dma_sync_single_for_cpu(real_dev, flat_buf->daddr,
|
||||||
|
etr_buf->size, DMA_FROM_DEVICE);
|
||||||
|
else
|
||||||
|
dma_sync_single_for_cpu(real_dev,
|
||||||
|
flat_buf->daddr + etr_buf->offset,
|
||||||
|
etr_buf->len, DMA_FROM_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
|
static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
|
||||||
|
|||||||
Reference in New Issue
Block a user