Merge branch 'from-rc' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git

Patches for 4.16 that are dependent on patches sent to 4.15-rc.

These are small clean ups for the vmw_pvrdma and i40iw drivers.

* 'from-rc' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git:
  RDMA/vmw_pvrdma: Remove usage of BIT() from UAPI header
  RDMA/vmw_pvrdma: Use refcount_t instead of atomic_t
  RDMA/vmw_pvrdma: Use more specific sizeof in kcalloc
  RDMA/vmw_pvrdma: Clarify QP and CQ is_kernel logic
  RDMA/vmw_pvrdma: Add UAR SRQ macros in ABI header file
  i40iw: Change accelerated flag to bool
这个提交包含在:
Jason Gunthorpe
2017-12-27 21:50:46 -07:00
当前提交 76a895d9e1
修改 40 个文件,包含 421 行新增225 行删除

查看文件

@@ -93,8 +93,8 @@ struct pvrdma_cq {
struct pvrdma_page_dir pdir;
u32 cq_handle;
bool is_kernel;
atomic_t refcnt;
wait_queue_head_t wait;
refcount_t refcnt;
struct completion free;
};
struct pvrdma_id_table {
@@ -175,7 +175,7 @@ struct pvrdma_srq {
u32 srq_handle;
int npages;
refcount_t refcnt;
wait_queue_head_t wait;
struct completion free;
};
struct pvrdma_qp {
@@ -196,8 +196,8 @@ struct pvrdma_qp {
u8 state;
bool is_kernel;
struct mutex mutex; /* QP state mutex. */
atomic_t refcnt;
wait_queue_head_t wait;
refcount_t refcnt;
struct completion free;
};
struct pvrdma_dev {

查看文件

@@ -132,8 +132,9 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
}
cq->ibcq.cqe = entries;
cq->is_kernel = !context;
if (context) {
if (!cq->is_kernel) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
ret = -EFAULT;
goto err_cq;
@@ -148,8 +149,6 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
npages = ib_umem_page_count(cq->umem);
} else {
cq->is_kernel = true;
/* One extra page for shared ring state */
npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
PAGE_SIZE - 1) / PAGE_SIZE;
@@ -178,8 +177,8 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
else
pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
atomic_set(&cq->refcnt, 1);
init_waitqueue_head(&cq->wait);
refcount_set(&cq->refcnt, 1);
init_completion(&cq->free);
spin_lock_init(&cq->cq_lock);
memset(cmd, 0, sizeof(*cmd));
@@ -202,7 +201,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (context) {
if (!cq->is_kernel) {
cq->uar = &(to_vucontext(context)->uar);
/* Copy udata back. */
@@ -219,7 +218,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
err_page_dir:
pvrdma_page_dir_cleanup(dev, &cq->pdir);
err_umem:
if (context)
if (!cq->is_kernel)
ib_umem_release(cq->umem);
err_cq:
atomic_dec(&dev->num_cqs);
@@ -230,8 +229,9 @@ err_cq:
static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
{
atomic_dec(&cq->refcnt);
wait_event(cq->wait, !atomic_read(&cq->refcnt));
if (refcount_dec_and_test(&cq->refcnt))
complete(&cq->free);
wait_for_completion(&cq->free);
if (!cq->is_kernel)
ib_umem_release(cq->umem);

查看文件

@@ -243,13 +243,13 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
mutex_init(&dev->port_mutex);
spin_lock_init(&dev->desc_lock);
dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(void *),
dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *),
GFP_KERNEL);
if (!dev->cq_tbl)
return ret;
spin_lock_init(&dev->cq_tbl_lock);
dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(void *),
dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *),
GFP_KERNEL);
if (!dev->qp_tbl)
goto err_cq_free;
@@ -333,7 +333,7 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
spin_lock_irqsave(&dev->qp_tbl_lock, flags);
qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
if (qp)
atomic_inc(&qp->refcnt);
refcount_inc(&qp->refcnt);
spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
if (qp && qp->ibqp.event_handler) {
@@ -346,9 +346,8 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
ibqp->event_handler(&e, ibqp->qp_context);
}
if (qp) {
atomic_dec(&qp->refcnt);
if (atomic_read(&qp->refcnt) == 0)
wake_up(&qp->wait);
if (refcount_dec_and_test(&qp->refcnt))
complete(&qp->free);
}
}
@@ -360,7 +359,7 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
spin_lock_irqsave(&dev->cq_tbl_lock, flags);
cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
if (cq)
atomic_inc(&cq->refcnt);
refcount_inc(&cq->refcnt);
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (cq && cq->ibcq.event_handler) {
@@ -373,9 +372,8 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
ibcq->event_handler(&e, ibcq->cq_context);
}
if (cq) {
atomic_dec(&cq->refcnt);
if (atomic_read(&cq->refcnt) == 0)
wake_up(&cq->wait);
if (refcount_dec_and_test(&cq->refcnt))
complete(&cq->free);
}
}
@@ -404,7 +402,7 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
}
if (srq) {
if (refcount_dec_and_test(&srq->refcnt))
wake_up(&srq->wait);
complete(&srq->free);
}
}
@@ -533,15 +531,14 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
spin_lock_irqsave(&dev->cq_tbl_lock, flags);
cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
if (cq)
atomic_inc(&cq->refcnt);
refcount_inc(&cq->refcnt);
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (cq && cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
if (cq) {
atomic_dec(&cq->refcnt);
if (atomic_read(&cq->refcnt))
wake_up(&cq->wait);
if (refcount_dec_and_test(&cq->refcnt))
complete(&cq->free);
}
pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
}

查看文件

@@ -245,12 +245,13 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
mutex_init(&qp->mutex);
atomic_set(&qp->refcnt, 1);
init_waitqueue_head(&qp->wait);
refcount_set(&qp->refcnt, 1);
init_completion(&qp->free);
qp->state = IB_QPS_RESET;
qp->is_kernel = !(pd->uobject && udata);
if (pd->uobject && udata) {
if (!qp->is_kernel) {
dev_dbg(&dev->pdev->dev,
"create queuepair from user space\n");
@@ -291,8 +292,6 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->npages_recv = 0;
qp->npages = qp->npages_send + qp->npages_recv;
} else {
qp->is_kernel = true;
ret = pvrdma_set_sq_size(to_vdev(pd->device),
&init_attr->cap, qp);
if (ret)
@@ -394,7 +393,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
err_pdir:
pvrdma_page_dir_cleanup(dev, &qp->pdir);
err_umem:
if (pd->uobject && udata) {
if (!qp->is_kernel) {
if (qp->rumem)
ib_umem_release(qp->rumem);
if (qp->sumem)
@@ -428,8 +427,16 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
atomic_dec(&qp->refcnt);
wait_event(qp->wait, !atomic_read(&qp->refcnt));
if (refcount_dec_and_test(&qp->refcnt))
complete(&qp->free);
wait_for_completion(&qp->free);
if (!qp->is_kernel) {
if (qp->rumem)
ib_umem_release(qp->rumem);
if (qp->sumem)
ib_umem_release(qp->sumem);
}
pvrdma_page_dir_cleanup(dev, &qp->pdir);

查看文件

@@ -149,7 +149,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
spin_lock_init(&srq->lock);
refcount_set(&srq->refcnt, 1);
init_waitqueue_head(&srq->wait);
init_completion(&srq->free);
dev_dbg(&dev->pdev->dev,
"create shared receive queue from user space\n");
@@ -236,8 +236,9 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
dev->srq_tbl[srq->srq_handle] = NULL;
spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
refcount_dec(&srq->refcnt);
wait_event(srq->wait, !refcount_read(&srq->refcnt));
if (refcount_dec_and_test(&srq->refcnt))
complete(&srq->free);
wait_for_completion(&srq->free);
/* There is no support for kernel clients, so this is safe. */
ib_umem_release(srq->umem);