Merge branches 'misc', 'qedr', 'reject-helpers', 'rxe' and 'srp' into merge-test
This commit is contained in:
@@ -420,11 +420,12 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
||||
(wqe->wr.send_flags & IB_SEND_SIGNALED) ||
|
||||
(qp->req.state == QP_STATE_ERROR)) {
|
||||
make_send_cqe(qp, wqe, &cqe);
|
||||
advance_consumer(qp->sq.queue);
|
||||
rxe_cq_post(qp->scq, &cqe, 0);
|
||||
} else {
|
||||
advance_consumer(qp->sq.queue);
|
||||
}
|
||||
|
||||
advance_consumer(qp->sq.queue);
|
||||
|
||||
/*
|
||||
* we completed something so let req run again
|
||||
* if it is trying to fence
|
||||
@@ -510,6 +511,8 @@ int rxe_completer(void *arg)
|
||||
struct rxe_pkt_info *pkt = NULL;
|
||||
enum comp_state state;
|
||||
|
||||
rxe_add_ref(qp);
|
||||
|
||||
if (!qp->valid) {
|
||||
while ((skb = skb_dequeue(&qp->resp_pkts))) {
|
||||
rxe_drop_ref(qp);
|
||||
@@ -739,11 +742,13 @@ exit:
|
||||
/* we come here if we are done with processing and want the task to
|
||||
* exit from the loop calling us
|
||||
*/
|
||||
rxe_drop_ref(qp);
|
||||
return -EAGAIN;
|
||||
|
||||
done:
|
||||
/* we come here if we have processed a packet we want the task to call
|
||||
* us again to see if there is anything else to do
|
||||
*/
|
||||
rxe_drop_ref(qp);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -266,8 +266,6 @@ static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
return err;
|
||||
}
|
||||
|
||||
atomic_inc(&qp->skb_out);
|
||||
|
||||
if ((qp_type(qp) != IB_QPT_RC) &&
|
||||
(pkt->mask & RXE_END_MASK)) {
|
||||
pkt->wqe->state = wqe_state_done;
|
||||
|
@@ -355,6 +355,9 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
|
||||
size_t offset;
|
||||
u32 crc = crcp ? (*crcp) : 0;
|
||||
|
||||
if (length == 0)
|
||||
return 0;
|
||||
|
||||
if (mem->type == RXE_MEM_TYPE_DMA) {
|
||||
u8 *src, *dest;
|
||||
|
||||
|
@@ -46,7 +46,7 @@
|
||||
#include "rxe_loc.h"
|
||||
|
||||
static LIST_HEAD(rxe_dev_list);
|
||||
static spinlock_t dev_list_lock; /* spinlock for device list */
|
||||
static DEFINE_SPINLOCK(dev_list_lock); /* spinlock for device list */
|
||||
|
||||
struct rxe_dev *net_to_rxe(struct net_device *ndev)
|
||||
{
|
||||
@@ -459,6 +459,8 @@ static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (pkt->qp)
|
||||
atomic_inc(&pkt->qp->skb_out);
|
||||
kfree_skb(skb);
|
||||
|
||||
return 0;
|
||||
@@ -663,8 +665,6 @@ struct notifier_block rxe_net_notifier = {
|
||||
|
||||
int rxe_net_ipv4_init(void)
|
||||
{
|
||||
spin_lock_init(&dev_list_lock);
|
||||
|
||||
recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
|
||||
htons(ROCE_V2_UDP_DPORT), false);
|
||||
if (IS_ERR(recv_sockets.sk4)) {
|
||||
@@ -680,8 +680,6 @@ int rxe_net_ipv6_init(void)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
||||
spin_lock_init(&dev_list_lock);
|
||||
|
||||
recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
|
||||
htons(ROCE_V2_UDP_DPORT), true);
|
||||
if (IS_ERR(recv_sockets.sk6)) {
|
||||
|
@@ -391,16 +391,15 @@ int rxe_rcv(struct sk_buff *skb)
|
||||
payload_size(pkt));
|
||||
calc_icrc = cpu_to_be32(~calc_icrc);
|
||||
if (unlikely(calc_icrc != pack_icrc)) {
|
||||
char saddr[sizeof(struct in6_addr)];
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
sprintf(saddr, "%pI6", &ipv6_hdr(skb)->saddr);
|
||||
pr_warn_ratelimited("bad ICRC from %pI6c\n",
|
||||
&ipv6_hdr(skb)->saddr);
|
||||
else if (skb->protocol == htons(ETH_P_IP))
|
||||
sprintf(saddr, "%pI4", &ip_hdr(skb)->saddr);
|
||||
pr_warn_ratelimited("bad ICRC from %pI4\n",
|
||||
&ip_hdr(skb)->saddr);
|
||||
else
|
||||
sprintf(saddr, "unknown");
|
||||
pr_warn_ratelimited("bad ICRC from unknown\n");
|
||||
|
||||
pr_warn_ratelimited("bad ICRC from %s\n", saddr);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
|
@@ -548,23 +548,23 @@ static void update_wqe_psn(struct rxe_qp *qp,
|
||||
static void save_state(struct rxe_send_wqe *wqe,
|
||||
struct rxe_qp *qp,
|
||||
struct rxe_send_wqe *rollback_wqe,
|
||||
struct rxe_qp *rollback_qp)
|
||||
u32 *rollback_psn)
|
||||
{
|
||||
rollback_wqe->state = wqe->state;
|
||||
rollback_wqe->first_psn = wqe->first_psn;
|
||||
rollback_wqe->last_psn = wqe->last_psn;
|
||||
rollback_qp->req.psn = qp->req.psn;
|
||||
*rollback_psn = qp->req.psn;
|
||||
}
|
||||
|
||||
static void rollback_state(struct rxe_send_wqe *wqe,
|
||||
struct rxe_qp *qp,
|
||||
struct rxe_send_wqe *rollback_wqe,
|
||||
struct rxe_qp *rollback_qp)
|
||||
u32 rollback_psn)
|
||||
{
|
||||
wqe->state = rollback_wqe->state;
|
||||
wqe->first_psn = rollback_wqe->first_psn;
|
||||
wqe->last_psn = rollback_wqe->last_psn;
|
||||
qp->req.psn = rollback_qp->req.psn;
|
||||
qp->req.psn = rollback_psn;
|
||||
}
|
||||
|
||||
static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
||||
@@ -593,8 +593,10 @@ int rxe_requester(void *arg)
|
||||
int mtu;
|
||||
int opcode;
|
||||
int ret;
|
||||
struct rxe_qp rollback_qp;
|
||||
struct rxe_send_wqe rollback_wqe;
|
||||
u32 rollback_psn;
|
||||
|
||||
rxe_add_ref(qp);
|
||||
|
||||
next_wqe:
|
||||
if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
|
||||
@@ -718,7 +720,7 @@ next_wqe:
|
||||
* rxe_xmit_packet().
|
||||
* Otherwise, completer might initiate an unjustified retry flow.
|
||||
*/
|
||||
save_state(wqe, qp, &rollback_wqe, &rollback_qp);
|
||||
save_state(wqe, qp, &rollback_wqe, &rollback_psn);
|
||||
update_wqe_state(qp, wqe, &pkt);
|
||||
update_wqe_psn(qp, wqe, &pkt, payload);
|
||||
ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
|
||||
@@ -726,7 +728,7 @@ next_wqe:
|
||||
qp->need_req_skb = 1;
|
||||
kfree_skb(skb);
|
||||
|
||||
rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
|
||||
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
|
||||
|
||||
if (ret == -EAGAIN) {
|
||||
rxe_run_task(&qp->req.task, 1);
|
||||
@@ -750,9 +752,10 @@ complete:
|
||||
while (rxe_completer(qp) == 0)
|
||||
;
|
||||
}
|
||||
|
||||
rxe_drop_ref(qp);
|
||||
return 0;
|
||||
|
||||
exit:
|
||||
rxe_drop_ref(qp);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
@@ -444,6 +444,13 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
|
||||
return RESPST_EXECUTE;
|
||||
}
|
||||
|
||||
/* A zero-byte op is not required to set an addr or rkey. */
|
||||
if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
|
||||
(pkt->mask & RXE_RETH_MASK) &&
|
||||
reth_len(pkt) == 0) {
|
||||
return RESPST_EXECUTE;
|
||||
}
|
||||
|
||||
va = qp->resp.va;
|
||||
rkey = qp->resp.rkey;
|
||||
resid = qp->resp.resid;
|
||||
@@ -680,9 +687,14 @@ static enum resp_states read_reply(struct rxe_qp *qp,
|
||||
res->read.va_org = qp->resp.va;
|
||||
|
||||
res->first_psn = req_pkt->psn;
|
||||
res->last_psn = req_pkt->psn +
|
||||
(reth_len(req_pkt) + mtu - 1) /
|
||||
mtu - 1;
|
||||
|
||||
if (reth_len(req_pkt)) {
|
||||
res->last_psn = (req_pkt->psn +
|
||||
(reth_len(req_pkt) + mtu - 1) /
|
||||
mtu - 1) & BTH_PSN_MASK;
|
||||
} else {
|
||||
res->last_psn = res->first_psn;
|
||||
}
|
||||
res->cur_psn = req_pkt->psn;
|
||||
|
||||
res->read.resid = qp->resp.resid;
|
||||
@@ -742,7 +754,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
|
||||
} else {
|
||||
qp->resp.res = NULL;
|
||||
qp->resp.opcode = -1;
|
||||
qp->resp.psn = res->cur_psn;
|
||||
if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
|
||||
qp->resp.psn = res->cur_psn;
|
||||
state = RESPST_CLEANUP;
|
||||
}
|
||||
|
||||
@@ -1132,6 +1145,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
|
||||
pkt, skb_copy);
|
||||
if (rc) {
|
||||
pr_err("Failed resending result. This flow is not handled - skb ignored\n");
|
||||
rxe_drop_ref(qp);
|
||||
kfree_skb(skb_copy);
|
||||
rc = RESPST_CLEANUP;
|
||||
goto out;
|
||||
@@ -1198,6 +1212,8 @@ int rxe_responder(void *arg)
|
||||
struct rxe_pkt_info *pkt = NULL;
|
||||
int ret = 0;
|
||||
|
||||
rxe_add_ref(qp);
|
||||
|
||||
qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
|
||||
|
||||
if (!qp->valid) {
|
||||
@@ -1386,5 +1402,6 @@ int rxe_responder(void *arg)
|
||||
exit:
|
||||
ret = -EAGAIN;
|
||||
done:
|
||||
rxe_drop_ref(qp);
|
||||
return ret;
|
||||
}
|
||||
|
@@ -169,7 +169,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
}
|
||||
}
|
||||
|
||||
err = rxe_queue_resize(q, (unsigned int *)&attr->max_wr,
|
||||
err = rxe_queue_resize(q, &attr->max_wr,
|
||||
rcv_wqe_size(srq->rq.max_sge),
|
||||
srq->rq.queue->ip ?
|
||||
srq->rq.queue->ip->context :
|
||||
|
@@ -121,6 +121,7 @@ int rxe_init_task(void *obj, struct rxe_task *task,
|
||||
task->arg = arg;
|
||||
task->func = func;
|
||||
snprintf(task->name, sizeof(task->name), "%s", name);
|
||||
task->destroyed = false;
|
||||
|
||||
tasklet_init(&task->tasklet, rxe_do_task, (unsigned long)task);
|
||||
|
||||
@@ -132,11 +133,29 @@ int rxe_init_task(void *obj, struct rxe_task *task,
|
||||
|
||||
void rxe_cleanup_task(struct rxe_task *task)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool idle;
|
||||
|
||||
/*
|
||||
* Mark the task, then wait for it to finish. It might be
|
||||
* running in a non-tasklet (direct call) context.
|
||||
*/
|
||||
task->destroyed = true;
|
||||
|
||||
do {
|
||||
spin_lock_irqsave(&task->state_lock, flags);
|
||||
idle = (task->state == TASK_STATE_START);
|
||||
spin_unlock_irqrestore(&task->state_lock, flags);
|
||||
} while (!idle);
|
||||
|
||||
tasklet_kill(&task->tasklet);
|
||||
}
|
||||
|
||||
void rxe_run_task(struct rxe_task *task, int sched)
|
||||
{
|
||||
if (task->destroyed)
|
||||
return;
|
||||
|
||||
if (sched)
|
||||
tasklet_schedule(&task->tasklet);
|
||||
else
|
||||
|
@@ -54,6 +54,7 @@ struct rxe_task {
|
||||
int (*func)(void *arg);
|
||||
int ret;
|
||||
char name[16];
|
||||
bool destroyed;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@@ -566,7 +566,7 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
|
||||
if (udata) {
|
||||
if (udata->inlen) {
|
||||
err = -EINVAL;
|
||||
goto err1;
|
||||
goto err2;
|
||||
}
|
||||
qp->is_user = 1;
|
||||
}
|
||||
@@ -575,12 +575,13 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
|
||||
|
||||
err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
|
||||
if (err)
|
||||
goto err2;
|
||||
goto err3;
|
||||
|
||||
return &qp->ibqp;
|
||||
|
||||
err2:
|
||||
err3:
|
||||
rxe_drop_index(qp);
|
||||
err2:
|
||||
rxe_drop_ref(qp);
|
||||
err1:
|
||||
return ERR_PTR(err);
|
||||
@@ -1009,11 +1010,19 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
|
||||
static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
|
||||
{
|
||||
struct rxe_cq *cq = to_rcq(ibcq);
|
||||
unsigned long irq_flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&cq->cq_lock, irq_flags);
|
||||
if (cq->notify != IB_CQ_NEXT_COMP)
|
||||
cq->notify = flags & IB_CQ_SOLICITED_MASK;
|
||||
|
||||
return 0;
|
||||
if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
|
||||
ret = 1;
|
||||
|
||||
spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
|
||||
|
Reference in New Issue
Block a user