IB/hfi1: Move receive work queue struct into uapi directory
The rvt_rwqe and rvt_rwq struct elements are shared between rdmavt and the providers but are not in uapi directory. As per the comment in https://marc.info/?l=linux-rdma&m=152296522708522&w=2, The hfi1 driver and the rdma core driver are not using shared structures in the uapi directory. Move rvt_rwqe and rvt_rwq struct into rvt-abi.h header in uapi directory. Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com> Signed-off-by: Kamenee Arumugam <kamenee.arumugam@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Este cometimento está contido em:

cometido por
Jason Gunthorpe

ascendente
239b0e52d8
cometimento
dabac6e460
@@ -52,7 +52,7 @@
|
||||
|
||||
#include "srq.h"
|
||||
#include "vt.h"
|
||||
|
||||
#include "qp.h"
|
||||
/**
|
||||
* rvt_driver_srq_init - init srq resources on a per driver basis
|
||||
* @rdi: rvt dev structure
|
||||
@@ -97,11 +97,8 @@ int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
|
||||
srq->rq.max_sge = srq_init_attr->attr.max_sge;
|
||||
sz = sizeof(struct ib_sge) * srq->rq.max_sge +
|
||||
sizeof(struct rvt_rwqe);
|
||||
srq->rq.wq = udata ?
|
||||
vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz) :
|
||||
vzalloc_node(sizeof(struct rvt_rwq) + srq->rq.size * sz,
|
||||
dev->dparms.node);
|
||||
if (!srq->rq.wq) {
|
||||
if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz,
|
||||
dev->dparms.node, udata)) {
|
||||
ret = -ENOMEM;
|
||||
goto bail_srq;
|
||||
}
|
||||
@@ -152,7 +149,7 @@ int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
|
||||
bail_ip:
|
||||
kfree(srq->ip);
|
||||
bail_wq:
|
||||
vfree(srq->rq.wq);
|
||||
rvt_free_rq(&srq->rq);
|
||||
bail_srq:
|
||||
return ret;
|
||||
}
|
||||
@@ -172,11 +169,12 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
{
|
||||
struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
|
||||
struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
|
||||
struct rvt_rwq *wq;
|
||||
struct rvt_rq tmp_rq = {};
|
||||
int ret = 0;
|
||||
|
||||
if (attr_mask & IB_SRQ_MAX_WR) {
|
||||
struct rvt_rwq *owq;
|
||||
struct rvt_krwq *okwq = NULL;
|
||||
struct rvt_rwq *owq = NULL;
|
||||
struct rvt_rwqe *p;
|
||||
u32 sz, size, n, head, tail;
|
||||
|
||||
@@ -185,17 +183,12 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
((attr_mask & IB_SRQ_LIMIT) ?
|
||||
attr->srq_limit : srq->limit) > attr->max_wr)
|
||||
return -EINVAL;
|
||||
|
||||
sz = sizeof(struct rvt_rwqe) +
|
||||
srq->rq.max_sge * sizeof(struct ib_sge);
|
||||
size = attr->max_wr + 1;
|
||||
wq = udata ?
|
||||
vmalloc_user(sizeof(struct rvt_rwq) + size * sz) :
|
||||
vzalloc_node(sizeof(struct rvt_rwq) + size * sz,
|
||||
dev->dparms.node);
|
||||
if (!wq)
|
||||
if (rvt_alloc_rq(&tmp_rq, size * sz, dev->dparms.node,
|
||||
udata))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Check that we can write the offset to mmap. */
|
||||
if (udata && udata->inlen >= sizeof(__u64)) {
|
||||
__u64 offset_addr;
|
||||
@@ -218,9 +211,15 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
* validate head and tail pointer values and compute
|
||||
* the number of remaining WQEs.
|
||||
*/
|
||||
owq = srq->rq.wq;
|
||||
head = owq->head;
|
||||
tail = owq->tail;
|
||||
if (udata) {
|
||||
owq = srq->rq.wq;
|
||||
head = RDMA_READ_UAPI_ATOMIC(owq->head);
|
||||
tail = RDMA_READ_UAPI_ATOMIC(owq->tail);
|
||||
} else {
|
||||
okwq = srq->rq.kwq;
|
||||
head = okwq->head;
|
||||
tail = okwq->tail;
|
||||
}
|
||||
if (head >= srq->rq.size || tail >= srq->rq.size) {
|
||||
ret = -EINVAL;
|
||||
goto bail_unlock;
|
||||
@@ -235,7 +234,7 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
goto bail_unlock;
|
||||
}
|
||||
n = 0;
|
||||
p = wq->wq;
|
||||
p = tmp_rq.kwq->curr_wq;
|
||||
while (tail != head) {
|
||||
struct rvt_rwqe *wqe;
|
||||
int i;
|
||||
@@ -250,22 +249,29 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
if (++tail >= srq->rq.size)
|
||||
tail = 0;
|
||||
}
|
||||
srq->rq.wq = wq;
|
||||
srq->rq.kwq = tmp_rq.kwq;
|
||||
if (udata) {
|
||||
srq->rq.wq = tmp_rq.wq;
|
||||
RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->head, n);
|
||||
RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->tail, 0);
|
||||
} else {
|
||||
tmp_rq.kwq->head = n;
|
||||
tmp_rq.kwq->tail = 0;
|
||||
}
|
||||
srq->rq.size = size;
|
||||
wq->head = n;
|
||||
wq->tail = 0;
|
||||
if (attr_mask & IB_SRQ_LIMIT)
|
||||
srq->limit = attr->srq_limit;
|
||||
spin_unlock_irq(&srq->rq.lock);
|
||||
|
||||
vfree(owq);
|
||||
kvfree(okwq);
|
||||
|
||||
if (srq->ip) {
|
||||
struct rvt_mmap_info *ip = srq->ip;
|
||||
struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device);
|
||||
u32 s = sizeof(struct rvt_rwq) + size * sz;
|
||||
|
||||
rvt_update_mmap_info(dev, ip, s, wq);
|
||||
rvt_update_mmap_info(dev, ip, s, tmp_rq.wq);
|
||||
|
||||
/*
|
||||
* Return the offset to mmap.
|
||||
@@ -301,7 +307,7 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
bail_unlock:
|
||||
spin_unlock_irq(&srq->rq.lock);
|
||||
bail_free:
|
||||
vfree(wq);
|
||||
rvt_free_rq(&tmp_rq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -336,6 +342,5 @@ void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
|
||||
spin_unlock(&dev->n_srqs_lock);
|
||||
if (srq->ip)
|
||||
kref_put(&srq->ip->ref, rvt_release_mmap_info);
|
||||
else
|
||||
vfree(srq->rq.wq);
|
||||
kvfree(srq->rq.kwq);
|
||||
}
|
||||
|
Criar uma nova questão referindo esta
Bloquear um utilizador