Merge branch 'for-linus' into next
Conflicts: drivers/dma/edma.c Moved the memory leak fix post merge Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
@@ -46,8 +46,14 @@
|
||||
#define EDMA_CHANS 64
|
||||
#endif /* CONFIG_ARCH_DAVINCI_DA8XX */
|
||||
|
||||
/* Max of 16 segments per channel to conserve PaRAM slots */
|
||||
#define MAX_NR_SG 16
|
||||
/*
|
||||
* Max of 20 segments per channel to conserve PaRAM slots
|
||||
* Also note that MAX_NR_SG should be atleast the no.of periods
|
||||
* that are required for ASoC, otherwise DMA prep calls will
|
||||
* fail. Today davinci-pcm is the only user of this driver and
|
||||
* requires atleast 17 slots, so we setup the default to 20.
|
||||
*/
|
||||
#define MAX_NR_SG 20
|
||||
#define EDMA_MAX_SLOTS MAX_NR_SG
|
||||
#define EDMA_DESCRIPTORS 16
|
||||
|
||||
@@ -250,6 +256,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* A PaRAM set configuration abstraction used by other modes
|
||||
* @chan: Channel who's PaRAM set we're configuring
|
||||
* @pset: PaRAM set to initialize and setup.
|
||||
* @src_addr: Source address of the DMA
|
||||
* @dst_addr: Destination address of the DMA
|
||||
* @burst: In units of dev_width, how much to send
|
||||
* @dev_width: How much is the dev_width
|
||||
* @dma_length: Total length of the DMA transfer
|
||||
* @direction: Direction of the transfer
|
||||
*/
|
||||
static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
|
||||
dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
|
||||
enum dma_slave_buswidth dev_width, unsigned int dma_length,
|
||||
enum dma_transfer_direction direction)
|
||||
{
|
||||
struct edma_chan *echan = to_edma_chan(chan);
|
||||
struct device *dev = chan->device->dev;
|
||||
int acnt, bcnt, ccnt, cidx;
|
||||
int src_bidx, dst_bidx, src_cidx, dst_cidx;
|
||||
int absync;
|
||||
|
||||
acnt = dev_width;
|
||||
/*
|
||||
* If the maxburst is equal to the fifo width, use
|
||||
* A-synced transfers. This allows for large contiguous
|
||||
* buffer transfers using only one PaRAM set.
|
||||
*/
|
||||
if (burst == 1) {
|
||||
/*
|
||||
* For the A-sync case, bcnt and ccnt are the remainder
|
||||
* and quotient respectively of the division of:
|
||||
* (dma_length / acnt) by (SZ_64K -1). This is so
|
||||
* that in case bcnt over flows, we have ccnt to use.
|
||||
* Note: In A-sync tranfer only, bcntrld is used, but it
|
||||
* only applies for sg_dma_len(sg) >= SZ_64K.
|
||||
* In this case, the best way adopted is- bccnt for the
|
||||
* first frame will be the remainder below. Then for
|
||||
* every successive frame, bcnt will be SZ_64K-1. This
|
||||
* is assured as bcntrld = 0xffff in end of function.
|
||||
*/
|
||||
absync = false;
|
||||
ccnt = dma_length / acnt / (SZ_64K - 1);
|
||||
bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
|
||||
/*
|
||||
* If bcnt is non-zero, we have a remainder and hence an
|
||||
* extra frame to transfer, so increment ccnt.
|
||||
*/
|
||||
if (bcnt)
|
||||
ccnt++;
|
||||
else
|
||||
bcnt = SZ_64K - 1;
|
||||
cidx = acnt;
|
||||
} else {
|
||||
/*
|
||||
* If maxburst is greater than the fifo address_width,
|
||||
* use AB-synced transfers where A count is the fifo
|
||||
* address_width and B count is the maxburst. In this
|
||||
* case, we are limited to transfers of C count frames
|
||||
* of (address_width * maxburst) where C count is limited
|
||||
* to SZ_64K-1. This places an upper bound on the length
|
||||
* of an SG segment that can be handled.
|
||||
*/
|
||||
absync = true;
|
||||
bcnt = burst;
|
||||
ccnt = dma_length / (acnt * bcnt);
|
||||
if (ccnt > (SZ_64K - 1)) {
|
||||
dev_err(dev, "Exceeded max SG segment size\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
cidx = acnt * bcnt;
|
||||
}
|
||||
|
||||
if (direction == DMA_MEM_TO_DEV) {
|
||||
src_bidx = acnt;
|
||||
src_cidx = cidx;
|
||||
dst_bidx = 0;
|
||||
dst_cidx = 0;
|
||||
} else if (direction == DMA_DEV_TO_MEM) {
|
||||
src_bidx = 0;
|
||||
src_cidx = 0;
|
||||
dst_bidx = acnt;
|
||||
dst_cidx = cidx;
|
||||
} else {
|
||||
dev_err(dev, "%s: direction not implemented yet\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
|
||||
/* Configure A or AB synchronized transfers */
|
||||
if (absync)
|
||||
pset->opt |= SYNCDIM;
|
||||
|
||||
pset->src = src_addr;
|
||||
pset->dst = dst_addr;
|
||||
|
||||
pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
|
||||
pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
|
||||
|
||||
pset->a_b_cnt = bcnt << 16 | acnt;
|
||||
pset->ccnt = ccnt;
|
||||
/*
|
||||
* Only time when (bcntrld) auto reload is required is for
|
||||
* A-sync case, and in this case, a requirement of reload value
|
||||
* of SZ_64K-1 only is assured. 'link' is initially set to NULL
|
||||
* and then later will be populated by edma_execute.
|
||||
*/
|
||||
pset->link_bcntrld = 0xffffffff;
|
||||
return absync;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||
@@ -258,23 +375,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
||||
struct edma_chan *echan = to_edma_chan(chan);
|
||||
struct device *dev = chan->device->dev;
|
||||
struct edma_desc *edesc;
|
||||
dma_addr_t dev_addr;
|
||||
dma_addr_t src_addr = 0, dst_addr = 0;
|
||||
enum dma_slave_buswidth dev_width;
|
||||
u32 burst;
|
||||
struct scatterlist *sg;
|
||||
int acnt, bcnt, ccnt, src, dst, cidx;
|
||||
int src_bidx, dst_bidx, src_cidx, dst_cidx;
|
||||
int i, nslots;
|
||||
int i, nslots, ret;
|
||||
|
||||
if (unlikely(!echan || !sgl || !sg_len))
|
||||
return NULL;
|
||||
|
||||
if (direction == DMA_DEV_TO_MEM) {
|
||||
dev_addr = echan->cfg.src_addr;
|
||||
src_addr = echan->cfg.src_addr;
|
||||
dev_width = echan->cfg.src_addr_width;
|
||||
burst = echan->cfg.src_maxburst;
|
||||
} else if (direction == DMA_MEM_TO_DEV) {
|
||||
dev_addr = echan->cfg.dst_addr;
|
||||
dst_addr = echan->cfg.dst_addr;
|
||||
dev_width = echan->cfg.dst_addr_width;
|
||||
burst = echan->cfg.dst_maxburst;
|
||||
} else {
|
||||
@@ -315,64 +430,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
||||
|
||||
/* Configure PaRAM sets for each SG */
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
/* Get address for each SG */
|
||||
if (direction == DMA_DEV_TO_MEM)
|
||||
dst_addr = sg_dma_address(sg);
|
||||
else
|
||||
src_addr = sg_dma_address(sg);
|
||||
|
||||
acnt = dev_width;
|
||||
|
||||
/*
|
||||
* If the maxburst is equal to the fifo width, use
|
||||
* A-synced transfers. This allows for large contiguous
|
||||
* buffer transfers using only one PaRAM set.
|
||||
*/
|
||||
if (burst == 1) {
|
||||
edesc->absync = false;
|
||||
ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
|
||||
bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
|
||||
if (bcnt)
|
||||
ccnt++;
|
||||
else
|
||||
bcnt = SZ_64K - 1;
|
||||
cidx = acnt;
|
||||
/*
|
||||
* If maxburst is greater than the fifo address_width,
|
||||
* use AB-synced transfers where A count is the fifo
|
||||
* address_width and B count is the maxburst. In this
|
||||
* case, we are limited to transfers of C count frames
|
||||
* of (address_width * maxburst) where C count is limited
|
||||
* to SZ_64K-1. This places an upper bound on the length
|
||||
* of an SG segment that can be handled.
|
||||
*/
|
||||
} else {
|
||||
edesc->absync = true;
|
||||
bcnt = burst;
|
||||
ccnt = sg_dma_len(sg) / (acnt * bcnt);
|
||||
if (ccnt > (SZ_64K - 1)) {
|
||||
dev_err(dev, "Exceeded max SG segment size\n");
|
||||
kfree(edesc);
|
||||
return NULL;
|
||||
}
|
||||
cidx = acnt * bcnt;
|
||||
ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
|
||||
dst_addr, burst, dev_width,
|
||||
sg_dma_len(sg), direction);
|
||||
if (ret < 0) {
|
||||
kfree(edesc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (direction == DMA_MEM_TO_DEV) {
|
||||
src = sg_dma_address(sg);
|
||||
dst = dev_addr;
|
||||
src_bidx = acnt;
|
||||
src_cidx = cidx;
|
||||
dst_bidx = 0;
|
||||
dst_cidx = 0;
|
||||
} else {
|
||||
src = dev_addr;
|
||||
dst = sg_dma_address(sg);
|
||||
src_bidx = 0;
|
||||
src_cidx = 0;
|
||||
dst_bidx = acnt;
|
||||
dst_cidx = cidx;
|
||||
}
|
||||
|
||||
edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
|
||||
/* Configure A or AB synchronized transfers */
|
||||
if (edesc->absync)
|
||||
edesc->pset[i].opt |= SYNCDIM;
|
||||
edesc->absync = ret;
|
||||
|
||||
/* If this is the last in a current SG set of transactions,
|
||||
enable interrupts so that next set is processed */
|
||||
@@ -382,17 +454,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
||||
/* If this is the last set, enable completion interrupt flag */
|
||||
if (i == sg_len - 1)
|
||||
edesc->pset[i].opt |= TCINTEN;
|
||||
|
||||
edesc->pset[i].src = src;
|
||||
edesc->pset[i].dst = dst;
|
||||
|
||||
edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
|
||||
edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
|
||||
|
||||
edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
|
||||
edesc->pset[i].ccnt = ccnt;
|
||||
edesc->pset[i].link_bcntrld = 0xffffffff;
|
||||
|
||||
}
|
||||
|
||||
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
|
||||
|
Reference in New Issue
Block a user