dmaengine, async_tx: support alignment checks
Some engines have transfer size and address alignment restrictions. Add a per-operation alignment property to struct dma_device that the async routines and dmatest can use to check alignment capabilities. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
此提交包含在:
@@ -50,7 +50,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
|
||||
struct dma_device *device = chan ? chan->device : NULL;
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
|
||||
if (device) {
|
||||
if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
|
||||
dma_addr_t dma_dest, dma_src;
|
||||
unsigned long dma_prep_flags = 0;
|
||||
|
||||
|
@@ -47,7 +47,7 @@ async_memset(struct page *dest, int val, unsigned int offset, size_t len,
|
||||
struct dma_device *device = chan ? chan->device : NULL;
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
|
||||
if (device) {
|
||||
if (device && is_dma_fill_aligned(device, offset, 0, len)) {
|
||||
dma_addr_t dma_dest;
|
||||
unsigned long dma_prep_flags = 0;
|
||||
|
||||
|
@@ -211,7 +211,8 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
|
||||
|
||||
if (dma_src && device &&
|
||||
(src_cnt <= dma_maxpq(device, 0) ||
|
||||
dma_maxpq(device, DMA_PREP_CONTINUE) > 0)) {
|
||||
dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
|
||||
is_dma_pq_aligned(device, offset, 0, len)) {
|
||||
/* run the p+q asynchronously */
|
||||
pr_debug("%s: (async) disks: %d len: %zu\n",
|
||||
__func__, disks, len);
|
||||
@@ -274,7 +275,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
|
||||
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
|
||||
dma_src = (dma_addr_t *) blocks;
|
||||
|
||||
if (dma_src && device && disks <= dma_maxpq(device, 0)) {
|
||||
if (dma_src && device && disks <= dma_maxpq(device, 0) &&
|
||||
is_dma_pq_aligned(device, offset, 0, len)) {
|
||||
struct device *dev = device->dev;
|
||||
dma_addr_t *pq = &dma_src[disks-2];
|
||||
int i;
|
||||
|
@@ -193,7 +193,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
|
||||
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
|
||||
dma_src = (dma_addr_t *) src_list;
|
||||
|
||||
if (dma_src && chan) {
|
||||
if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) {
|
||||
/* run the xor asynchronously */
|
||||
pr_debug("%s (async): len: %zu\n", __func__, len);
|
||||
|
||||
@@ -265,7 +265,8 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
|
||||
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
|
||||
dma_src = (dma_addr_t *) src_list;
|
||||
|
||||
if (dma_src && device && src_cnt <= device->max_xor) {
|
||||
if (dma_src && device && src_cnt <= device->max_xor &&
|
||||
is_dma_xor_aligned(device, offset, 0, len)) {
|
||||
unsigned long dma_prep_flags = 0;
|
||||
int i;
|
||||
|
||||
|
新增問題並參考
封鎖使用者