Merge branch 'devx-async' into k.o/for-next
Yishai Hadas says: Enable DEVX asynchronous query commands This series enables querying a DEVX object in an asynchronous mode. The userspace application won't block when calling the firmware and it will be able to get the response back once that it will be ready. To enable the above functionality: - DEVX asynchronous command completion FD object was introduced. - The applicable file operations were implemented to enable using it by the user application. - Query asynchronous method was added to the DEVX object, it will call the firmware asynchronously and manages the response on the given input FD. - Hot unplug support was added for the FD to work properly upon unbind/disassociate. - mlx5 core fence for asynchronous commands was implemented and used to prevent racing upon unbind/disassociate. This branch is based on mlx5-next & v5.0-rc2 due to dependencies, from git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux * branch 'devx-async': IB/mlx5: Implement DEVX hot unplug for async command FD IB/mlx5: Implement the file ops of DEVX async command FD IB/mlx5: Introduce async DEVX obj query API IB/mlx5: Introduce MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
@@ -380,8 +380,8 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev,
|
||||
q->len = len;
|
||||
q->entry_size = entry_size;
|
||||
q->size = len * entry_size;
|
||||
q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size,
|
||||
&q->dma, GFP_KERNEL);
|
||||
q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma,
|
||||
GFP_KERNEL);
|
||||
if (!q->va)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
@@ -1819,7 +1819,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
|
||||
return -ENOMEM;
|
||||
ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
|
||||
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
|
||||
cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
|
||||
cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
|
||||
if (!cq->va) {
|
||||
status = -ENOMEM;
|
||||
goto mem_err;
|
||||
@@ -2209,7 +2209,7 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
|
||||
qp->sq.max_cnt = max_wqe_allocated;
|
||||
len = (hw_pages * hw_page_size);
|
||||
|
||||
qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
|
||||
qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
|
||||
if (!qp->sq.va)
|
||||
return -EINVAL;
|
||||
qp->sq.len = len;
|
||||
@@ -2259,7 +2259,7 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
|
||||
qp->rq.max_cnt = max_rqe_allocated;
|
||||
len = (hw_pages * hw_page_size);
|
||||
|
||||
qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
|
||||
qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
|
||||
if (!qp->rq.va)
|
||||
return -ENOMEM;
|
||||
qp->rq.pa = pa;
|
||||
@@ -2315,8 +2315,8 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
|
||||
if (dev->attr.ird == 0)
|
||||
return 0;
|
||||
|
||||
qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa,
|
||||
GFP_KERNEL);
|
||||
qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa,
|
||||
GFP_KERNEL);
|
||||
if (!qp->ird_q_va)
|
||||
return -ENOMEM;
|
||||
ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
|
||||
|
@@ -73,8 +73,8 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
|
||||
mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
|
||||
sizeof(struct ocrdma_rdma_stats_resp));
|
||||
|
||||
mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size,
|
||||
&mem->pa, GFP_KERNEL);
|
||||
mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
|
||||
&mem->pa, GFP_KERNEL);
|
||||
if (!mem->va) {
|
||||
pr_err("%s: stats mbox allocation failed\n", __func__);
|
||||
return false;
|
||||
|
@@ -504,8 +504,8 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
|
||||
INIT_LIST_HEAD(&ctx->mm_head);
|
||||
mutex_init(&ctx->mm_list_lock);
|
||||
|
||||
ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len,
|
||||
&ctx->ah_tbl.pa, GFP_KERNEL);
|
||||
ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
|
||||
&ctx->ah_tbl.pa, GFP_KERNEL);
|
||||
if (!ctx->ah_tbl.va) {
|
||||
kfree(ctx);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@@ -838,7 +838,7 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < mr->num_pbls; i++) {
|
||||
va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
|
||||
va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
|
||||
if (!va) {
|
||||
ocrdma_free_mr_pbl_tbl(dev, mr);
|
||||
status = -ENOMEM;
|
||||
|
Reference in New Issue
Block a user