Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband updates from Roland Dreier: "First batch of InfiniBand/RDMA changes for the 3.7 merge window: - mlx4 IB support for SR-IOV - A couple of SRP initiator fixes - Batch of nes hardware driver fixes - Fix for long-standing use-after-free crash in IPoIB - Other miscellaneous fixes" This merge also removes a new use of __cancel_delayed_work(), and replaces it with the regular cancel_delayed_work() that is now irq-safe thanks to the workqueue updates. That said, I suspect the sequence in question should probably use "mod_delayed_work()". I just did the minimal "don't use deprecated functions" fixup, though. * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (45 commits) IB/qib: Fix local access validation for user MRs mlx4_core: Disable SENSE_PORT for multifunction devices mlx4_core: Clean up enabling of SENSE_PORT for older (ConnectX-1/-2) HCAs mlx4_core: Stash PCI ID driver_data in mlx4_priv structure IB/srp: Avoid having aborted requests hang IB/srp: Fix use-after-free in srp_reset_req() IB/qib: Add a qib driver version RDMA/nes: Fix compilation error when nes_debug is enabled RDMA/nes: Print hardware resource type RDMA/nes: Fix for crash when TX checksum offload is off RDMA/nes: Cosmetic changes RDMA/nes: Fix for incorrect MSS when TSO is on RDMA/nes: Fix incorrect resolving of the loopback MAC address mlx4_core: Fix crash on uninitialized priv->cmd.slave_sem mlx4_core: Trivial cleanups to driver log messages mlx4_core: Trivial readability fix: "0X30" -> "0x30" IB/mlx4: Create paravirt contexts for VFs when master IB driver initializes mlx4: Modify proxy/tunnel QP mechanism so that guests do no calculations mlx4: Paravirtualize Node Guids for slaves mlx4: Activate SR-IOV mode for IB ...
This commit is contained in:
@@ -137,19 +137,25 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
return -ENOMEM;
|
||||
|
||||
wq->rq.qid = c4iw_get_qpid(rdev, uctx);
|
||||
if (!wq->rq.qid)
|
||||
goto err1;
|
||||
if (!wq->rq.qid) {
|
||||
ret = -ENOMEM;
|
||||
goto free_sq_qid;
|
||||
}
|
||||
|
||||
if (!user) {
|
||||
wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
|
||||
GFP_KERNEL);
|
||||
if (!wq->sq.sw_sq)
|
||||
goto err2;
|
||||
if (!wq->sq.sw_sq) {
|
||||
ret = -ENOMEM;
|
||||
goto free_rq_qid;
|
||||
}
|
||||
|
||||
wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
|
||||
GFP_KERNEL);
|
||||
if (!wq->rq.sw_rq)
|
||||
goto err3;
|
||||
if (!wq->rq.sw_rq) {
|
||||
ret = -ENOMEM;
|
||||
goto free_sw_sq;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -157,15 +163,23 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
*/
|
||||
wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
|
||||
wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
|
||||
if (!wq->rq.rqt_hwaddr)
|
||||
goto err4;
|
||||
if (!wq->rq.rqt_hwaddr) {
|
||||
ret = -ENOMEM;
|
||||
goto free_sw_rq;
|
||||
}
|
||||
|
||||
if (user) {
|
||||
if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
|
||||
goto err5;
|
||||
ret = alloc_oc_sq(rdev, &wq->sq);
|
||||
if (ret)
|
||||
goto free_hwaddr;
|
||||
|
||||
ret = alloc_host_sq(rdev, &wq->sq);
|
||||
if (ret)
|
||||
goto free_sq;
|
||||
} else
|
||||
if (alloc_host_sq(rdev, &wq->sq))
|
||||
goto err5;
|
||||
ret = alloc_host_sq(rdev, &wq->sq);
|
||||
if (ret)
|
||||
goto free_hwaddr;
|
||||
memset(wq->sq.queue, 0, wq->sq.memsize);
|
||||
dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
|
||||
|
||||
@@ -173,7 +187,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
wq->rq.memsize, &(wq->rq.dma_addr),
|
||||
GFP_KERNEL);
|
||||
if (!wq->rq.queue)
|
||||
goto err6;
|
||||
goto free_sq;
|
||||
PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
|
||||
__func__, wq->sq.queue,
|
||||
(unsigned long long)virt_to_phys(wq->sq.queue),
|
||||
@@ -201,7 +215,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
skb = alloc_skb(wr_len, GFP_KERNEL);
|
||||
if (!skb) {
|
||||
ret = -ENOMEM;
|
||||
goto err7;
|
||||
goto free_dma;
|
||||
}
|
||||
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
|
||||
|
||||
@@ -266,33 +280,33 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
|
||||
ret = c4iw_ofld_send(rdev, skb);
|
||||
if (ret)
|
||||
goto err7;
|
||||
goto free_dma;
|
||||
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
|
||||
if (ret)
|
||||
goto err7;
|
||||
goto free_dma;
|
||||
|
||||
PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
|
||||
__func__, wq->sq.qid, wq->rq.qid, wq->db,
|
||||
(unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
|
||||
|
||||
return 0;
|
||||
err7:
|
||||
free_dma:
|
||||
dma_free_coherent(&(rdev->lldi.pdev->dev),
|
||||
wq->rq.memsize, wq->rq.queue,
|
||||
dma_unmap_addr(&wq->rq, mapping));
|
||||
err6:
|
||||
free_sq:
|
||||
dealloc_sq(rdev, &wq->sq);
|
||||
err5:
|
||||
free_hwaddr:
|
||||
c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
|
||||
err4:
|
||||
free_sw_rq:
|
||||
kfree(wq->rq.sw_rq);
|
||||
err3:
|
||||
free_sw_sq:
|
||||
kfree(wq->sq.sw_sq);
|
||||
err2:
|
||||
free_rq_qid:
|
||||
c4iw_put_qpid(rdev, wq->rq.qid, uctx);
|
||||
err1:
|
||||
free_sq_qid:
|
||||
c4iw_put_qpid(rdev, wq->sq.qid, uctx);
|
||||
return -ENOMEM;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
|
||||
|
Reference in New Issue
Block a user