Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "A very quiet cycle with few notable changes. Mostly the usual list of one or two patches to drivers changing something that isn't quite rc worthy. The subsystem seems to be seeing a larger number of rework and cleanup style patches right now, I feel that several vendors are prepping their drivers for new silicon. Summary: - Driver updates and cleanup for qedr, bnxt_re, hns, siw, mlx5, mlx4, rxe, i40iw - Larger series doing cleanup and rework for hns and hfi1. - Some general reworking of the CM code to make it a little more understandable - Unify the different code paths connected to the uverbs FD scheme - New UAPI ioctls conversions for get context and get async fd - Trace points for CQ and CM portions of the RDMA stack - mlx5 driver support for virtio-net formatted rings as RDMA raw ethernet QPs - verbs support for setting the PCI-E relaxed ordering bit on DMA traffic connected to a MR - A couple of bug fixes that came too late to make rc7" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (108 commits) RDMA/core: Make the entire API tree static RDMA/efa: Mask access flags with the correct optional range RDMA/cma: Fix unbalanced cm_id reference count during address resolve RDMA/umem: Fix ib_umem_find_best_pgsz() IB/mlx4: Fix leak in id_map_find_del IB/opa_vnic: Spelling correction of 'erorr' to 'error' IB/hfi1: Fix logical condition in msix_request_irq RDMA/cm: Remove CM message structs RDMA/cm: Use IBA functions for complex structure members RDMA/cm: Use IBA functions for simple structure members RDMA/cm: Use IBA functions for swapping get/set acessors RDMA/cm: Use IBA functions for simple get/set acessors RDMA/cm: Add SET/GET implementations to hide IBA wire format RDMA/cm: Add accessors for CM_REQ transport_type IB/mlx5: Return the administrative GUID if exists RDMA/core: Ensure that rdma_user_mmap_entry_remove() is a fence IB/mlx4: Fix memory leak in add_gid error flow IB/mlx5: Expose RoCE accelerator counters RDMA/mlx5: Set relaxed ordering when requested RDMA/core: Add the core support field to METHOD_GET_CONTEXT ...
This commit is contained in:
@@ -195,7 +195,14 @@ void rvt_get_credit(struct rvt_qp *qp, u32 aeth)
|
||||
}
|
||||
EXPORT_SYMBOL(rvt_get_credit);
|
||||
|
||||
/* rvt_restart_sge - rewind the sge state for a wqe */
|
||||
/**
|
||||
* rvt_restart_sge - rewind the sge state for a wqe
|
||||
* @ss: the sge state pointer
|
||||
* @wqe: the wqe to rewind
|
||||
* @len: the data length from the start of the wqe in bytes
|
||||
*
|
||||
* Returns the remaining data length.
|
||||
*/
|
||||
u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len)
|
||||
{
|
||||
ss->sge = wqe->sg_list[0];
|
||||
|
@@ -34,6 +34,8 @@
|
||||
#ifndef RXE_PARAM_H
|
||||
#define RXE_PARAM_H
|
||||
|
||||
#include <uapi/rdma/rdma_user_rxe.h>
|
||||
|
||||
static inline enum ib_mtu rxe_mtu_int_to_enum(int mtu)
|
||||
{
|
||||
if (mtu < 256)
|
||||
@@ -64,7 +66,6 @@ enum rxe_device_param {
|
||||
RXE_PAGE_SIZE_CAP = 0xfffff000,
|
||||
RXE_MAX_QP = 0x10000,
|
||||
RXE_MAX_QP_WR = 0x4000,
|
||||
RXE_MAX_INLINE_DATA = 400,
|
||||
RXE_DEVICE_CAP_FLAGS = IB_DEVICE_BAD_PKEY_CNTR
|
||||
| IB_DEVICE_BAD_QKEY_CNTR
|
||||
| IB_DEVICE_AUTO_PATH_MIG
|
||||
@@ -77,6 +78,10 @@ enum rxe_device_param {
|
||||
| IB_DEVICE_MEM_MGT_EXTENSIONS
|
||||
| IB_DEVICE_ALLOW_USER_UNREG,
|
||||
RXE_MAX_SGE = 32,
|
||||
RXE_MAX_WQE_SIZE = sizeof(struct rxe_send_wqe) +
|
||||
sizeof(struct ib_sge) * RXE_MAX_SGE,
|
||||
RXE_MAX_INLINE_DATA = RXE_MAX_WQE_SIZE -
|
||||
sizeof(struct rxe_send_wqe),
|
||||
RXE_MAX_SGE_RD = 32,
|
||||
RXE_MAX_CQ = 16384,
|
||||
RXE_MAX_LOG_CQE = 15,
|
||||
|
@@ -237,19 +237,17 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
*/
|
||||
qp->src_port = RXE_ROCE_V2_SPORT +
|
||||
(hash_32_generic(qp_num(qp), 14) & 0x3fff);
|
||||
|
||||
qp->sq.max_wr = init->cap.max_send_wr;
|
||||
qp->sq.max_sge = init->cap.max_send_sge;
|
||||
qp->sq.max_inline = init->cap.max_inline_data;
|
||||
|
||||
wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
|
||||
qp->sq.max_sge * sizeof(struct ib_sge),
|
||||
sizeof(struct rxe_send_wqe) +
|
||||
qp->sq.max_inline);
|
||||
/* These caps are limited by rxe_qp_chk_cap() done by the caller */
|
||||
wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
|
||||
init->cap.max_inline_data);
|
||||
qp->sq.max_sge = init->cap.max_send_sge =
|
||||
wqe_size / sizeof(struct ib_sge);
|
||||
qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
|
||||
wqe_size += sizeof(struct rxe_send_wqe);
|
||||
|
||||
qp->sq.queue = rxe_queue_init(rxe,
|
||||
&qp->sq.max_wr,
|
||||
wqe_size);
|
||||
qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size);
|
||||
if (!qp->sq.queue)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@@ -408,7 +408,7 @@ struct rxe_dev {
|
||||
struct list_head pending_mmaps;
|
||||
|
||||
spinlock_t mmap_offset_lock; /* guard mmap_offset */
|
||||
int mmap_offset;
|
||||
u64 mmap_offset;
|
||||
|
||||
atomic64_t stats_counters[RXE_NUM_OF_COUNTERS];
|
||||
|
||||
|
@@ -7,6 +7,7 @@
|
||||
#define _SIW_H
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/restrack.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <crypto/hash.h>
|
||||
@@ -209,7 +210,6 @@ struct siw_cq {
|
||||
u32 cq_put;
|
||||
u32 cq_get;
|
||||
u32 num_cqe;
|
||||
bool kernel_verbs;
|
||||
struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */
|
||||
u32 id; /* For debugging only */
|
||||
};
|
||||
@@ -254,8 +254,8 @@ struct siw_srq {
|
||||
u32 rq_get;
|
||||
u32 num_rqe; /* max # of wqe's allowed */
|
||||
struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */
|
||||
char armed; /* inform user if limit hit */
|
||||
char kernel_verbs; /* '1' if kernel client */
|
||||
bool armed:1; /* inform user if limit hit */
|
||||
bool is_kernel_res:1; /* true if kernel client */
|
||||
};
|
||||
|
||||
struct siw_qp_attrs {
|
||||
@@ -418,13 +418,11 @@ struct siw_iwarp_tx {
|
||||
};
|
||||
|
||||
struct siw_qp {
|
||||
struct ib_qp base_qp;
|
||||
struct siw_device *sdev;
|
||||
struct ib_qp *ib_qp;
|
||||
struct kref ref;
|
||||
u32 qp_num;
|
||||
struct list_head devq;
|
||||
int tx_cpu;
|
||||
bool kernel_verbs;
|
||||
struct siw_qp_attrs attrs;
|
||||
|
||||
struct siw_cep *cep;
|
||||
@@ -472,11 +470,6 @@ struct siw_qp {
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
struct siw_base_qp {
|
||||
struct ib_qp base_qp;
|
||||
struct siw_qp *qp;
|
||||
};
|
||||
|
||||
/* helper macros */
|
||||
#define rx_qp(rx) container_of(rx, struct siw_qp, rx_stream)
|
||||
#define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx)
|
||||
@@ -572,14 +565,9 @@ static inline struct siw_ucontext *to_siw_ctx(struct ib_ucontext *base_ctx)
|
||||
return container_of(base_ctx, struct siw_ucontext, base_ucontext);
|
||||
}
|
||||
|
||||
static inline struct siw_base_qp *to_siw_base_qp(struct ib_qp *base_qp)
|
||||
{
|
||||
return container_of(base_qp, struct siw_base_qp, base_qp);
|
||||
}
|
||||
|
||||
static inline struct siw_qp *to_siw_qp(struct ib_qp *base_qp)
|
||||
{
|
||||
return to_siw_base_qp(base_qp)->qp;
|
||||
return container_of(base_qp, struct siw_qp, base_qp);
|
||||
}
|
||||
|
||||
static inline struct siw_cq *to_siw_cq(struct ib_cq *base_cq)
|
||||
@@ -624,7 +612,7 @@ static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id)
|
||||
|
||||
static inline u32 qp_id(struct siw_qp *qp)
|
||||
{
|
||||
return qp->qp_num;
|
||||
return qp->base_qp.qp_num;
|
||||
}
|
||||
|
||||
static inline void siw_qp_get(struct siw_qp *qp)
|
||||
@@ -735,7 +723,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
|
||||
"MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
|
||||
|
||||
#define siw_dbg_cep(cep, fmt, ...) \
|
||||
ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
|
||||
ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
|
||||
cep, __func__, ##__VA_ARGS__)
|
||||
|
||||
void siw_cq_flush(struct siw_cq *cq);
|
||||
|
@@ -29,7 +29,7 @@
|
||||
* MPA_V2_RDMA_NO_RTR, MPA_V2_RDMA_READ_RTR, MPA_V2_RDMA_WRITE_RTR
|
||||
*/
|
||||
static __be16 rtr_type = MPA_V2_RDMA_READ_RTR | MPA_V2_RDMA_WRITE_RTR;
|
||||
static const bool relaxed_ird_negotiation = 1;
|
||||
static const bool relaxed_ird_negotiation = true;
|
||||
|
||||
static void siw_cm_llp_state_change(struct sock *s);
|
||||
static void siw_cm_llp_data_ready(struct sock *s);
|
||||
|
@@ -65,7 +65,7 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
|
||||
* reaped here, which do not hold a QP reference
|
||||
* and do not qualify for memory extension verbs.
|
||||
*/
|
||||
if (likely(cq->kernel_verbs)) {
|
||||
if (likely(rdma_is_kernel_res(&cq->base_cq.res))) {
|
||||
if (cqe->flags & SIW_WQE_REM_INVAL) {
|
||||
wc->ex.invalidate_rkey = cqe->inval_stag;
|
||||
wc->wc_flags = IB_WC_WITH_INVALIDATE;
|
||||
|
@@ -244,7 +244,7 @@ static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id)
|
||||
* siw_qp_id2obj() increments object reference count
|
||||
*/
|
||||
siw_qp_put(qp);
|
||||
return qp->ib_qp;
|
||||
return &qp->base_qp;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -1070,8 +1070,8 @@ int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
|
||||
cqe->imm_data = 0;
|
||||
cqe->bytes = bytes;
|
||||
|
||||
if (cq->kernel_verbs)
|
||||
cqe->base_qp = qp->ib_qp;
|
||||
if (rdma_is_kernel_res(&cq->base_cq.res))
|
||||
cqe->base_qp = &qp->base_qp;
|
||||
else
|
||||
cqe->qp_id = qp_id(qp);
|
||||
|
||||
@@ -1128,8 +1128,8 @@ int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
|
||||
cqe->imm_data = 0;
|
||||
cqe->bytes = bytes;
|
||||
|
||||
if (cq->kernel_verbs) {
|
||||
cqe->base_qp = qp->ib_qp;
|
||||
if (rdma_is_kernel_res(&cq->base_cq.res)) {
|
||||
cqe->base_qp = &qp->base_qp;
|
||||
if (inval_stag) {
|
||||
cqe_flags |= SIW_WQE_REM_INVAL;
|
||||
cqe->inval_stag = inval_stag;
|
||||
@@ -1297,13 +1297,12 @@ void siw_rq_flush(struct siw_qp *qp)
|
||||
|
||||
int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
|
||||
{
|
||||
int rv = xa_alloc(&sdev->qp_xa, &qp->ib_qp->qp_num, qp, xa_limit_32b,
|
||||
int rv = xa_alloc(&sdev->qp_xa, &qp->base_qp.qp_num, qp, xa_limit_32b,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!rv) {
|
||||
kref_init(&qp->ref);
|
||||
qp->sdev = sdev;
|
||||
qp->qp_num = qp->ib_qp->qp_num;
|
||||
siw_dbg_qp(qp, "new QP\n");
|
||||
}
|
||||
return rv;
|
||||
@@ -1312,7 +1311,6 @@ int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
|
||||
void siw_free_qp(struct kref *ref)
|
||||
{
|
||||
struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
|
||||
struct siw_base_qp *siw_base_qp = to_siw_base_qp(qp->ib_qp);
|
||||
struct siw_device *sdev = qp->sdev;
|
||||
unsigned long flags;
|
||||
|
||||
@@ -1335,5 +1333,4 @@ void siw_free_qp(struct kref *ref)
|
||||
atomic_dec(&sdev->num_qp);
|
||||
siw_dbg_qp(qp, "free QP\n");
|
||||
kfree_rcu(qp, rcu);
|
||||
kfree(siw_base_qp);
|
||||
}
|
||||
|
@@ -68,7 +68,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
|
||||
return -EFAULT;
|
||||
}
|
||||
if (srx->mpa_crc_hd) {
|
||||
if (rx_qp(srx)->kernel_verbs) {
|
||||
if (rdma_is_kernel_res(&rx_qp(srx)->base_qp.res)) {
|
||||
crypto_shash_update(srx->mpa_crc_hd,
|
||||
(u8 *)(dest + pg_off), bytes);
|
||||
kunmap_atomic(dest);
|
||||
@@ -388,7 +388,7 @@ static struct siw_wqe *siw_rqe_get(struct siw_qp *qp)
|
||||
struct siw_rqe *rqe2 = &srq->recvq[off];
|
||||
|
||||
if (!(rqe2->flags & SIW_WQE_VALID)) {
|
||||
srq->armed = 0;
|
||||
srq->armed = false;
|
||||
srq_event = true;
|
||||
}
|
||||
}
|
||||
@@ -1264,7 +1264,7 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error)
|
||||
|
||||
if (wc_status == SIW_WC_SUCCESS)
|
||||
wc_status = SIW_WC_GENERAL_ERR;
|
||||
} else if (qp->kernel_verbs &&
|
||||
} else if (rdma_is_kernel_res(&qp->base_qp.res) &&
|
||||
rx_type(wqe) == SIW_OP_READ_LOCAL_INV) {
|
||||
/*
|
||||
* Handle any STag invalidation request
|
||||
|
@@ -817,7 +817,7 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
|
||||
}
|
||||
} else {
|
||||
wqe->bytes = wqe->sqe.sge[0].length;
|
||||
if (!qp->kernel_verbs) {
|
||||
if (!rdma_is_kernel_res(&qp->base_qp.res)) {
|
||||
if (wqe->bytes > SIW_MAX_INLINE) {
|
||||
rv = -EINVAL;
|
||||
goto tx_error;
|
||||
|
@@ -303,7 +303,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct siw_qp *qp = NULL;
|
||||
struct siw_base_qp *siw_base_qp = NULL;
|
||||
struct ib_device *base_dev = pd->device;
|
||||
struct siw_device *sdev = to_siw_dev(base_dev);
|
||||
struct siw_ucontext *uctx =
|
||||
@@ -357,26 +356,16 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||
rv = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
siw_base_qp = kzalloc(sizeof(*siw_base_qp), GFP_KERNEL);
|
||||
if (!siw_base_qp) {
|
||||
rv = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp) {
|
||||
rv = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
siw_base_qp->qp = qp;
|
||||
qp->ib_qp = &siw_base_qp->base_qp;
|
||||
|
||||
init_rwsem(&qp->state_lock);
|
||||
spin_lock_init(&qp->sq_lock);
|
||||
spin_lock_init(&qp->rq_lock);
|
||||
spin_lock_init(&qp->orq_lock);
|
||||
|
||||
qp->kernel_verbs = !udata;
|
||||
|
||||
rv = siw_qp_add(sdev, qp);
|
||||
if (rv)
|
||||
goto err_out;
|
||||
@@ -389,10 +378,10 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||
num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
|
||||
num_rqe = roundup_pow_of_two(attrs->cap.max_recv_wr);
|
||||
|
||||
if (qp->kernel_verbs)
|
||||
qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
|
||||
else
|
||||
if (udata)
|
||||
qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
|
||||
else
|
||||
qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
|
||||
|
||||
if (qp->sendq == NULL) {
|
||||
siw_dbg(base_dev, "SQ size %d alloc failed\n", num_sqe);
|
||||
@@ -419,13 +408,14 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||
*/
|
||||
qp->srq = to_siw_srq(attrs->srq);
|
||||
qp->attrs.rq_size = 0;
|
||||
siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num);
|
||||
siw_dbg(base_dev, "QP [%u]: SRQ attached\n",
|
||||
qp->base_qp.qp_num);
|
||||
} else if (num_rqe) {
|
||||
if (qp->kernel_verbs)
|
||||
qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
|
||||
else
|
||||
if (udata)
|
||||
qp->recvq =
|
||||
vmalloc_user(num_rqe * sizeof(struct siw_rqe));
|
||||
else
|
||||
qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
|
||||
|
||||
if (qp->recvq == NULL) {
|
||||
siw_dbg(base_dev, "RQ size %d alloc failed\n", num_rqe);
|
||||
@@ -492,13 +482,11 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||
list_add_tail(&qp->devq, &sdev->qp_list);
|
||||
spin_unlock_irqrestore(&sdev->lock, flags);
|
||||
|
||||
return qp->ib_qp;
|
||||
return &qp->base_qp;
|
||||
|
||||
err_out_xa:
|
||||
xa_erase(&sdev->qp_xa, qp_id(qp));
|
||||
err_out:
|
||||
kfree(siw_base_qp);
|
||||
|
||||
if (qp) {
|
||||
if (uctx) {
|
||||
rdma_user_mmap_entry_remove(qp->sq_entry);
|
||||
@@ -742,7 +730,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
|
||||
unsigned long flags;
|
||||
int rv = 0;
|
||||
|
||||
if (wr && !qp->kernel_verbs) {
|
||||
if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
|
||||
siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
|
||||
*bad_wr = wr;
|
||||
return -EINVAL;
|
||||
@@ -939,7 +927,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
|
||||
if (rv <= 0)
|
||||
goto skip_direct_sending;
|
||||
|
||||
if (qp->kernel_verbs) {
|
||||
if (rdma_is_kernel_res(&qp->base_qp.res)) {
|
||||
rv = siw_sq_start(qp);
|
||||
} else {
|
||||
qp->tx_ctx.in_syscall = 1;
|
||||
@@ -984,8 +972,8 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
|
||||
*bad_wr = wr;
|
||||
return -EOPNOTSUPP; /* what else from errno.h? */
|
||||
}
|
||||
if (!qp->kernel_verbs) {
|
||||
siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
|
||||
if (!rdma_is_kernel_res(&qp->base_qp.res)) {
|
||||
siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
|
||||
*bad_wr = wr;
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -1127,14 +1115,13 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
|
||||
cq->base_cq.cqe = size;
|
||||
cq->num_cqe = size;
|
||||
|
||||
if (!udata) {
|
||||
cq->kernel_verbs = 1;
|
||||
cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
|
||||
sizeof(struct siw_cq_ctrl));
|
||||
} else {
|
||||
if (udata)
|
||||
cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) +
|
||||
sizeof(struct siw_cq_ctrl));
|
||||
}
|
||||
else
|
||||
cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
|
||||
sizeof(struct siw_cq_ctrl));
|
||||
|
||||
if (cq->queue == NULL) {
|
||||
rv = -ENOMEM;
|
||||
goto err_out;
|
||||
@@ -1589,9 +1576,9 @@ int siw_create_srq(struct ib_srq *base_srq,
|
||||
srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
|
||||
srq->limit = attrs->srq_limit;
|
||||
if (srq->limit)
|
||||
srq->armed = 1;
|
||||
srq->armed = true;
|
||||
|
||||
srq->kernel_verbs = !udata;
|
||||
srq->is_kernel_res = !udata;
|
||||
|
||||
if (udata)
|
||||
srq->recvq =
|
||||
@@ -1671,9 +1658,9 @@ int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs,
|
||||
rv = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
srq->armed = 1;
|
||||
srq->armed = true;
|
||||
} else {
|
||||
srq->armed = 0;
|
||||
srq->armed = false;
|
||||
}
|
||||
srq->limit = attrs->srq_limit;
|
||||
}
|
||||
@@ -1745,7 +1732,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
|
||||
unsigned long flags;
|
||||
int rv = 0;
|
||||
|
||||
if (unlikely(!srq->kernel_verbs)) {
|
||||
if (unlikely(!srq->is_kernel_res)) {
|
||||
siw_dbg_pd(base_srq->pd,
|
||||
"[SRQ]: no kernel post_recv for mapped srq\n");
|
||||
rv = -EINVAL;
|
||||
@@ -1797,7 +1784,7 @@ out:
|
||||
void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype)
|
||||
{
|
||||
struct ib_event event;
|
||||
struct ib_qp *base_qp = qp->ib_qp;
|
||||
struct ib_qp *base_qp = &qp->base_qp;
|
||||
|
||||
/*
|
||||
* Do not report asynchronous errors on QP which gets
|
||||
|
Reference in New Issue
Block a user