cross-tree: phase out dma_zalloc_coherent()

We already need to zero out memory for dma_alloc_coherent(), as such
using dma_zalloc_coherent() is superflous. Phase it out.

This change was generated with the following Coccinelle SmPL patch:

@ replace_dma_zalloc_coherent @
expression dev, size, data, handle, flags;
@@

-dma_zalloc_coherent(dev, size, handle, flags)
+dma_alloc_coherent(dev, size, handle, flags)

Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
[hch: re-ran the script on the latest tree]
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Luis Chamberlain
2019-01-04 09:23:09 +01:00
committed by Christoph Hellwig
parent 3bd6e94bec
commit 750afb08ca
173 changed files with 915 additions and 949 deletions

View File

@@ -899,10 +899,10 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
goto done;
/* allocate dummy tail memory for all receive contexts */
dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
&dd->pcidev->dev, sizeof(u64),
&dd->rcvhdrtail_dummy_dma,
GFP_KERNEL);
dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
sizeof(u64),
&dd->rcvhdrtail_dummy_dma,
GFP_KERNEL);
if (!dd->rcvhdrtail_dummy_kvaddr) {
dd_dev_err(dd, "cannot allocate dummy tail memory\n");
@@ -1863,9 +1863,9 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
gfp_flags = GFP_KERNEL;
else
gfp_flags = GFP_USER;
rcd->rcvhdrq = dma_zalloc_coherent(
&dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
gfp_flags | __GFP_COMP);
rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
&rcd->rcvhdrq_dma,
gfp_flags | __GFP_COMP);
if (!rcd->rcvhdrq) {
dd_dev_err(dd,
@@ -1876,9 +1876,10 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
&dd->pcidev->dev, PAGE_SIZE,
&rcd->rcvhdrqtailaddr_dma, gfp_flags);
rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
PAGE_SIZE,
&rcd->rcvhdrqtailaddr_dma,
gfp_flags);
if (!rcd->rcvhdrtail_kvaddr)
goto bail_free;
}
@@ -1974,10 +1975,10 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
while (alloced_bytes < rcd->egrbufs.size &&
rcd->egrbufs.alloced < rcd->egrbufs.count) {
rcd->egrbufs.buffers[idx].addr =
dma_zalloc_coherent(&dd->pcidev->dev,
rcd->egrbufs.rcvtid_size,
&rcd->egrbufs.buffers[idx].dma,
gfp_flags);
dma_alloc_coherent(&dd->pcidev->dev,
rcd->egrbufs.rcvtid_size,
&rcd->egrbufs.buffers[idx].dma,
gfp_flags);
if (rcd->egrbufs.buffers[idx].addr) {
rcd->egrbufs.buffers[idx].len =
rcd->egrbufs.rcvtid_size;

View File

@@ -2098,11 +2098,10 @@ int init_credit_return(struct hfi1_devdata *dd)
int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
set_dev_node(&dd->pcidev->dev, i);
dd->cr_base[i].va = dma_zalloc_coherent(
&dd->pcidev->dev,
bytes,
&dd->cr_base[i].dma,
GFP_KERNEL);
dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev,
bytes,
&dd->cr_base[i].dma,
GFP_KERNEL);
if (!dd->cr_base[i].va) {
set_dev_node(&dd->pcidev->dev, dd->node);
dd_dev_err(dd,

View File

@@ -1453,12 +1453,9 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
timer_setup(&sde->err_progress_check_timer,
sdma_err_progress_check, 0);
sde->descq = dma_zalloc_coherent(
&dd->pcidev->dev,
descq_cnt * sizeof(u64[2]),
&sde->descq_phys,
GFP_KERNEL
);
sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
descq_cnt * sizeof(u64[2]),
&sde->descq_phys, GFP_KERNEL);
if (!sde->descq)
goto bail;
sde->tx_ring =
@@ -1471,24 +1468,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
/* Allocate memory for DMA of head registers to memory */
dd->sdma_heads_dma = dma_zalloc_coherent(
&dd->pcidev->dev,
dd->sdma_heads_size,
&dd->sdma_heads_phys,
GFP_KERNEL
);
dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
dd->sdma_heads_size,
&dd->sdma_heads_phys,
GFP_KERNEL);
if (!dd->sdma_heads_dma) {
dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
goto bail;
}
/* Allocate memory for pad */
dd->sdma_pad_dma = dma_zalloc_coherent(
&dd->pcidev->dev,
sizeof(u32),
&dd->sdma_pad_phys,
GFP_KERNEL
);
dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
&dd->sdma_pad_phys, GFP_KERNEL);
if (!dd->sdma_pad_dma) {
dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
goto bail;