Merge commit 'dmaengine-3.13-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine changes from Dan 1/ Bartlomiej and Dan finalized a rework of the dma address unmap implementation. 2/ In the course of testing 1/ a collection of enhancements to dmatest fell out. Notably basic performance statistics, and fixed / enhanced test control through new module parameters 'run', 'wait', 'noverify', and 'verbose'. Thanks to Andriy and Linus for their review. 3/ Testing the raid related corner cases of 1/ triggered bugs in the recently added 16-source operation support in the ioatdma driver. 4/ Some minor fixes / cleanups to mv_xor and ioatdma. Conflicts: drivers/dma/dmatest.c Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
@@ -60,14 +60,6 @@ static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
|
||||
return hw_desc->phy_dest_addr;
|
||||
}
|
||||
|
||||
static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
|
||||
int src_idx)
|
||||
{
|
||||
struct mv_xor_desc *hw_desc = desc->hw_desc;
|
||||
return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
|
||||
}
|
||||
|
||||
|
||||
static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
|
||||
u32 byte_count)
|
||||
{
|
||||
@@ -278,42 +270,9 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
|
||||
desc->async_tx.callback(
|
||||
desc->async_tx.callback_param);
|
||||
|
||||
/* unmap dma addresses
|
||||
* (unmap_single vs unmap_page?)
|
||||
*/
|
||||
if (desc->group_head && desc->unmap_len) {
|
||||
struct mv_xor_desc_slot *unmap = desc->group_head;
|
||||
struct device *dev = mv_chan_to_devp(mv_chan);
|
||||
u32 len = unmap->unmap_len;
|
||||
enum dma_ctrl_flags flags = desc->async_tx.flags;
|
||||
u32 src_cnt;
|
||||
dma_addr_t addr;
|
||||
dma_addr_t dest;
|
||||
|
||||
src_cnt = unmap->unmap_src_cnt;
|
||||
dest = mv_desc_get_dest_addr(unmap);
|
||||
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
|
||||
enum dma_data_direction dir;
|
||||
|
||||
if (src_cnt > 1) /* is xor ? */
|
||||
dir = DMA_BIDIRECTIONAL;
|
||||
else
|
||||
dir = DMA_FROM_DEVICE;
|
||||
dma_unmap_page(dev, dest, len, dir);
|
||||
}
|
||||
|
||||
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
|
||||
while (src_cnt--) {
|
||||
addr = mv_desc_get_src_addr(unmap,
|
||||
src_cnt);
|
||||
if (addr == dest)
|
||||
continue;
|
||||
dma_unmap_page(dev, addr, len,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
}
|
||||
dma_descriptor_unmap(&desc->async_tx);
|
||||
if (desc->group_head)
|
||||
desc->group_head = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* run dependent operations */
|
||||
@@ -1076,10 +1035,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
|
||||
}
|
||||
|
||||
mv_chan->mmr_base = xordev->xor_base;
|
||||
if (!mv_chan->mmr_base) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_dma;
|
||||
}
|
||||
mv_chan->mmr_high_base = xordev->xor_high_base;
|
||||
tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
|
||||
mv_chan);
|
||||
|
||||
@@ -1138,7 +1094,7 @@ static void
|
||||
mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
|
||||
const struct mbus_dram_target_info *dram)
|
||||
{
|
||||
void __iomem *base = xordev->xor_base;
|
||||
void __iomem *base = xordev->xor_high_base;
|
||||
u32 win_enable = 0;
|
||||
int i;
|
||||
|
||||
|
Reference in New Issue
Block a user