Merge branch 'devx-async' into k.o/for-next
Yishai Hadas says: Enable DEVX asynchronous query commands This series enables querying a DEVX object in an asynchronous mode. The userspace application won't block when calling the firmware and it will be able to get the response back once that it will be ready. To enable the above functionality: - DEVX asynchronous command completion FD object was introduced. - The applicable file operations were implemented to enable using it by the user application. - Query asynchronous method was added to the DEVX object, it will call the firmware asynchronously and manages the response on the given input FD. - Hot unplug support was added for the FD to work properly upon unbind/disassociate. - mlx5 core fence for asynchronous commands was implemented and used to prevent racing upon unbind/disassociate. This branch is based on mlx5-next & v5.0-rc2 due to dependencies, from git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux * branch 'devx-async': IB/mlx5: Implement DEVX hot unplug for async command FD IB/mlx5: Implement the file ops of DEVX async command FD IB/mlx5: Introduce async DEVX obj query API IB/mlx5: Introduce MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
此提交包含在:
@@ -899,10 +899,10 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
|
||||
goto done;
|
||||
|
||||
/* allocate dummy tail memory for all receive contexts */
|
||||
dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev, sizeof(u64),
|
||||
&dd->rcvhdrtail_dummy_dma,
|
||||
GFP_KERNEL);
|
||||
dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
|
||||
sizeof(u64),
|
||||
&dd->rcvhdrtail_dummy_dma,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!dd->rcvhdrtail_dummy_kvaddr) {
|
||||
dd_dev_err(dd, "cannot allocate dummy tail memory\n");
|
||||
@@ -1863,9 +1863,9 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
|
||||
gfp_flags = GFP_KERNEL;
|
||||
else
|
||||
gfp_flags = GFP_USER;
|
||||
rcd->rcvhdrq = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
|
||||
gfp_flags | __GFP_COMP);
|
||||
rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
|
||||
&rcd->rcvhdrq_dma,
|
||||
gfp_flags | __GFP_COMP);
|
||||
|
||||
if (!rcd->rcvhdrq) {
|
||||
dd_dev_err(dd,
|
||||
@@ -1876,9 +1876,10 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
|
||||
|
||||
if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
|
||||
HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
|
||||
rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev, PAGE_SIZE,
|
||||
&rcd->rcvhdrqtailaddr_dma, gfp_flags);
|
||||
rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
|
||||
PAGE_SIZE,
|
||||
&rcd->rcvhdrqtailaddr_dma,
|
||||
gfp_flags);
|
||||
if (!rcd->rcvhdrtail_kvaddr)
|
||||
goto bail_free;
|
||||
}
|
||||
@@ -1974,10 +1975,10 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
|
||||
while (alloced_bytes < rcd->egrbufs.size &&
|
||||
rcd->egrbufs.alloced < rcd->egrbufs.count) {
|
||||
rcd->egrbufs.buffers[idx].addr =
|
||||
dma_zalloc_coherent(&dd->pcidev->dev,
|
||||
rcd->egrbufs.rcvtid_size,
|
||||
&rcd->egrbufs.buffers[idx].dma,
|
||||
gfp_flags);
|
||||
dma_alloc_coherent(&dd->pcidev->dev,
|
||||
rcd->egrbufs.rcvtid_size,
|
||||
&rcd->egrbufs.buffers[idx].dma,
|
||||
gfp_flags);
|
||||
if (rcd->egrbufs.buffers[idx].addr) {
|
||||
rcd->egrbufs.buffers[idx].len =
|
||||
rcd->egrbufs.rcvtid_size;
|
||||
|
@@ -2098,11 +2098,10 @@ int init_credit_return(struct hfi1_devdata *dd)
|
||||
int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
|
||||
|
||||
set_dev_node(&dd->pcidev->dev, i);
|
||||
dd->cr_base[i].va = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev,
|
||||
bytes,
|
||||
&dd->cr_base[i].dma,
|
||||
GFP_KERNEL);
|
||||
dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev,
|
||||
bytes,
|
||||
&dd->cr_base[i].dma,
|
||||
GFP_KERNEL);
|
||||
if (!dd->cr_base[i].va) {
|
||||
set_dev_node(&dd->pcidev->dev, dd->node);
|
||||
dd_dev_err(dd,
|
||||
|
@@ -1453,12 +1453,9 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
|
||||
timer_setup(&sde->err_progress_check_timer,
|
||||
sdma_err_progress_check, 0);
|
||||
|
||||
sde->descq = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev,
|
||||
descq_cnt * sizeof(u64[2]),
|
||||
&sde->descq_phys,
|
||||
GFP_KERNEL
|
||||
);
|
||||
sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
|
||||
descq_cnt * sizeof(u64[2]),
|
||||
&sde->descq_phys, GFP_KERNEL);
|
||||
if (!sde->descq)
|
||||
goto bail;
|
||||
sde->tx_ring =
|
||||
@@ -1471,24 +1468,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
|
||||
|
||||
dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
|
||||
/* Allocate memory for DMA of head registers to memory */
|
||||
dd->sdma_heads_dma = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev,
|
||||
dd->sdma_heads_size,
|
||||
&dd->sdma_heads_phys,
|
||||
GFP_KERNEL
|
||||
);
|
||||
dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
|
||||
dd->sdma_heads_size,
|
||||
&dd->sdma_heads_phys,
|
||||
GFP_KERNEL);
|
||||
if (!dd->sdma_heads_dma) {
|
||||
dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/* Allocate memory for pad */
|
||||
dd->sdma_pad_dma = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev,
|
||||
sizeof(u32),
|
||||
&dd->sdma_pad_phys,
|
||||
GFP_KERNEL
|
||||
);
|
||||
dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
|
||||
&dd->sdma_pad_phys, GFP_KERNEL);
|
||||
if (!dd->sdma_pad_dma) {
|
||||
dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
|
||||
goto bail;
|
||||
|
新增問題並參考
封鎖使用者