Merge branch 'rdma-cq.2' of git://git.infradead.org/users/hch/rdma into 4.5/rdma-cq
Signed-off-by: Doug Ledford <dledford@redhat.com> Conflicts: drivers/infiniband/ulp/srp/ib_srp.c - Conflicts with changes in ib_srp.c introduced during 4.4-rc updates
This commit is contained in:
@@ -132,8 +132,9 @@ MODULE_PARM_DESC(ch_count,
|
||||
|
||||
static void srp_add_one(struct ib_device *device);
|
||||
static void srp_remove_one(struct ib_device *device, void *client_data);
|
||||
static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
|
||||
static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
|
||||
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
|
||||
static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
|
||||
const char *opname);
|
||||
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
|
||||
|
||||
static struct scsi_transport_template *ib_srp_transport_template;
|
||||
@@ -445,6 +446,17 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
|
||||
dev->max_pages_per_mr);
|
||||
}
|
||||
|
||||
static void srp_drain_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
{
|
||||
struct srp_rdma_ch *ch = cq->cq_context;
|
||||
|
||||
complete(&ch->done);
|
||||
}
|
||||
|
||||
static struct ib_cqe srp_drain_cqe = {
|
||||
.done = srp_drain_done,
|
||||
};
|
||||
|
||||
/**
|
||||
* srp_destroy_qp() - destroy an RDMA queue pair
|
||||
* @ch: SRP RDMA channel.
|
||||
@@ -457,10 +469,11 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
|
||||
static void srp_destroy_qp(struct srp_rdma_ch *ch)
|
||||
{
|
||||
static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
|
||||
static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
|
||||
static struct ib_recv_wr wr = { 0 };
|
||||
struct ib_recv_wr *bad_wr;
|
||||
int ret;
|
||||
|
||||
wr.wr_cqe = &srp_drain_cqe;
|
||||
/* Destroying a QP and reusing ch->done is only safe if not connected */
|
||||
WARN_ON_ONCE(ch->connected);
|
||||
|
||||
@@ -489,34 +502,27 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
|
||||
struct ib_fmr_pool *fmr_pool = NULL;
|
||||
struct srp_fr_pool *fr_pool = NULL;
|
||||
const int m = dev->use_fast_reg ? 3 : 1;
|
||||
struct ib_cq_init_attr cq_attr = {};
|
||||
int ret;
|
||||
|
||||
init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
|
||||
if (!init_attr)
|
||||
return -ENOMEM;
|
||||
|
||||
/* + 1 for SRP_LAST_WR_ID */
|
||||
cq_attr.cqe = target->queue_size + 1;
|
||||
cq_attr.comp_vector = ch->comp_vector;
|
||||
recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
|
||||
&cq_attr);
|
||||
/* queue_size + 1 for ib_drain_qp */
|
||||
recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
|
||||
ch->comp_vector, IB_POLL_SOFTIRQ);
|
||||
if (IS_ERR(recv_cq)) {
|
||||
ret = PTR_ERR(recv_cq);
|
||||
goto err;
|
||||
}
|
||||
|
||||
cq_attr.cqe = m * target->queue_size;
|
||||
cq_attr.comp_vector = ch->comp_vector;
|
||||
send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
|
||||
&cq_attr);
|
||||
send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
|
||||
ch->comp_vector, IB_POLL_DIRECT);
|
||||
if (IS_ERR(send_cq)) {
|
||||
ret = PTR_ERR(send_cq);
|
||||
goto err_recv_cq;
|
||||
}
|
||||
|
||||
ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
|
||||
|
||||
init_attr->event_handler = srp_qp_event;
|
||||
init_attr->cap.max_send_wr = m * target->queue_size;
|
||||
init_attr->cap.max_recv_wr = target->queue_size + 1;
|
||||
@@ -558,9 +564,9 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
|
||||
if (ch->qp)
|
||||
srp_destroy_qp(ch);
|
||||
if (ch->recv_cq)
|
||||
ib_destroy_cq(ch->recv_cq);
|
||||
ib_free_cq(ch->recv_cq);
|
||||
if (ch->send_cq)
|
||||
ib_destroy_cq(ch->send_cq);
|
||||
ib_free_cq(ch->send_cq);
|
||||
|
||||
ch->qp = qp;
|
||||
ch->recv_cq = recv_cq;
|
||||
@@ -580,13 +586,13 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
|
||||
return 0;
|
||||
|
||||
err_qp:
|
||||
ib_destroy_qp(qp);
|
||||
srp_destroy_qp(ch);
|
||||
|
||||
err_send_cq:
|
||||
ib_destroy_cq(send_cq);
|
||||
ib_free_cq(send_cq);
|
||||
|
||||
err_recv_cq:
|
||||
ib_destroy_cq(recv_cq);
|
||||
ib_free_cq(recv_cq);
|
||||
|
||||
err:
|
||||
kfree(init_attr);
|
||||
@@ -622,9 +628,10 @@ static void srp_free_ch_ib(struct srp_target_port *target,
|
||||
if (ch->fmr_pool)
|
||||
ib_destroy_fmr_pool(ch->fmr_pool);
|
||||
}
|
||||
|
||||
srp_destroy_qp(ch);
|
||||
ib_destroy_cq(ch->send_cq);
|
||||
ib_destroy_cq(ch->recv_cq);
|
||||
ib_free_cq(ch->send_cq);
|
||||
ib_free_cq(ch->recv_cq);
|
||||
|
||||
/*
|
||||
* Avoid that the SCSI error handler tries to use this channel after
|
||||
@@ -1041,18 +1048,25 @@ out:
|
||||
return ret <= 0 ? ret : -ENODEV;
|
||||
}
|
||||
|
||||
static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
|
||||
static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
{
|
||||
srp_handle_qp_err(cq, wc, "INV RKEY");
|
||||
}
|
||||
|
||||
static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
|
||||
u32 rkey)
|
||||
{
|
||||
struct ib_send_wr *bad_wr;
|
||||
struct ib_send_wr wr = {
|
||||
.opcode = IB_WR_LOCAL_INV,
|
||||
.wr_id = LOCAL_INV_WR_ID_MASK,
|
||||
.next = NULL,
|
||||
.num_sge = 0,
|
||||
.send_flags = 0,
|
||||
.ex.invalidate_rkey = rkey,
|
||||
};
|
||||
|
||||
wr.wr_cqe = &req->reg_cqe;
|
||||
req->reg_cqe.done = srp_inv_rkey_err_done;
|
||||
return ib_post_send(ch->qp, &wr, &bad_wr);
|
||||
}
|
||||
|
||||
@@ -1074,7 +1088,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
|
||||
struct srp_fr_desc **pfr;
|
||||
|
||||
for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
|
||||
res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
|
||||
res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
|
||||
if (res < 0) {
|
||||
shost_printk(KERN_ERR, target->scsi_host, PFX
|
||||
"Queueing INV WR for rkey %#x failed (%d)\n",
|
||||
@@ -1312,7 +1326,13 @@ reset_state:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
{
|
||||
srp_handle_qp_err(cq, wc, "FAST REG");
|
||||
}
|
||||
|
||||
static int srp_map_finish_fr(struct srp_map_state *state,
|
||||
struct srp_request *req,
|
||||
struct srp_rdma_ch *ch, int sg_nents)
|
||||
{
|
||||
struct srp_target_port *target = ch->target;
|
||||
@@ -1349,9 +1369,11 @@ static int srp_map_finish_fr(struct srp_map_state *state,
|
||||
if (unlikely(n < 0))
|
||||
return n;
|
||||
|
||||
req->reg_cqe.done = srp_reg_mr_err_done;
|
||||
|
||||
wr.wr.next = NULL;
|
||||
wr.wr.opcode = IB_WR_REG_MR;
|
||||
wr.wr.wr_id = FAST_REG_WR_ID_MASK;
|
||||
wr.wr.wr_cqe = &req->reg_cqe;
|
||||
wr.wr.num_sge = 0;
|
||||
wr.wr.send_flags = 0;
|
||||
wr.mr = desc->mr;
|
||||
@@ -1455,7 +1477,7 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
|
||||
while (count) {
|
||||
int i, n;
|
||||
|
||||
n = srp_map_finish_fr(state, ch, count);
|
||||
n = srp_map_finish_fr(state, req, ch, count);
|
||||
if (unlikely(n < 0))
|
||||
return n;
|
||||
|
||||
@@ -1524,7 +1546,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
|
||||
#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
||||
idb_sg->dma_length = idb_sg->length; /* hack^2 */
|
||||
#endif
|
||||
ret = srp_map_finish_fr(&state, ch, 1);
|
||||
ret = srp_map_finish_fr(&state, req, ch, 1);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else if (dev->use_fmr) {
|
||||
@@ -1719,7 +1741,7 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
|
||||
s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
|
||||
struct srp_iu *iu;
|
||||
|
||||
srp_send_completion(ch->send_cq, ch);
|
||||
ib_process_cq_direct(ch->send_cq, -1);
|
||||
|
||||
if (list_empty(&ch->free_tx))
|
||||
return NULL;
|
||||
@@ -1739,6 +1761,19 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
|
||||
return iu;
|
||||
}
|
||||
|
||||
static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
{
|
||||
struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
|
||||
struct srp_rdma_ch *ch = cq->cq_context;
|
||||
|
||||
if (unlikely(wc->status != IB_WC_SUCCESS)) {
|
||||
srp_handle_qp_err(cq, wc, "SEND");
|
||||
return;
|
||||
}
|
||||
|
||||
list_add(&iu->list, &ch->free_tx);
|
||||
}
|
||||
|
||||
static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
|
||||
{
|
||||
struct srp_target_port *target = ch->target;
|
||||
@@ -1749,8 +1784,10 @@ static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
|
||||
list.length = len;
|
||||
list.lkey = target->lkey;
|
||||
|
||||
iu->cqe.done = srp_send_done;
|
||||
|
||||
wr.next = NULL;
|
||||
wr.wr_id = (uintptr_t) iu;
|
||||
wr.wr_cqe = &iu->cqe;
|
||||
wr.sg_list = &list;
|
||||
wr.num_sge = 1;
|
||||
wr.opcode = IB_WR_SEND;
|
||||
@@ -1769,8 +1806,10 @@ static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
|
||||
list.length = iu->size;
|
||||
list.lkey = target->lkey;
|
||||
|
||||
iu->cqe.done = srp_recv_done;
|
||||
|
||||
wr.next = NULL;
|
||||
wr.wr_id = (uintptr_t) iu;
|
||||
wr.wr_cqe = &iu->cqe;
|
||||
wr.sg_list = &list;
|
||||
wr.num_sge = 1;
|
||||
|
||||
@@ -1902,14 +1941,20 @@ static void srp_process_aer_req(struct srp_rdma_ch *ch,
|
||||
"problems processing SRP_AER_REQ\n");
|
||||
}
|
||||
|
||||
static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
|
||||
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
{
|
||||
struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
|
||||
struct srp_rdma_ch *ch = cq->cq_context;
|
||||
struct srp_target_port *target = ch->target;
|
||||
struct ib_device *dev = target->srp_host->srp_dev->dev;
|
||||
struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
|
||||
int res;
|
||||
u8 opcode;
|
||||
|
||||
if (unlikely(wc->status != IB_WC_SUCCESS)) {
|
||||
srp_handle_qp_err(cq, wc, "RECV");
|
||||
return;
|
||||
}
|
||||
|
||||
ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
@@ -1972,68 +2017,22 @@ static void srp_tl_err_work(struct work_struct *work)
|
||||
srp_start_tl_fail_timers(target->rport);
|
||||
}
|
||||
|
||||
static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
|
||||
bool send_err, struct srp_rdma_ch *ch)
|
||||
static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
|
||||
const char *opname)
|
||||
{
|
||||
struct srp_rdma_ch *ch = cq->cq_context;
|
||||
struct srp_target_port *target = ch->target;
|
||||
|
||||
if (wr_id == SRP_LAST_WR_ID) {
|
||||
complete(&ch->done);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ch->connected && !target->qp_in_error) {
|
||||
if (wr_id & LOCAL_INV_WR_ID_MASK) {
|
||||
shost_printk(KERN_ERR, target->scsi_host, PFX
|
||||
"LOCAL_INV failed with status %s (%d)\n",
|
||||
ib_wc_status_msg(wc_status), wc_status);
|
||||
} else if (wr_id & FAST_REG_WR_ID_MASK) {
|
||||
shost_printk(KERN_ERR, target->scsi_host, PFX
|
||||
"FAST_REG_MR failed status %s (%d)\n",
|
||||
ib_wc_status_msg(wc_status), wc_status);
|
||||
} else {
|
||||
shost_printk(KERN_ERR, target->scsi_host,
|
||||
PFX "failed %s status %s (%d) for iu %p\n",
|
||||
send_err ? "send" : "receive",
|
||||
ib_wc_status_msg(wc_status), wc_status,
|
||||
(void *)(uintptr_t)wr_id);
|
||||
}
|
||||
shost_printk(KERN_ERR, target->scsi_host,
|
||||
PFX "failed %s status %s (%d) for CQE %p\n",
|
||||
opname, ib_wc_status_msg(wc->status), wc->status,
|
||||
wc->wr_cqe);
|
||||
queue_work(system_long_wq, &target->tl_err_work);
|
||||
}
|
||||
target->qp_in_error = true;
|
||||
}
|
||||
|
||||
static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
|
||||
{
|
||||
struct srp_rdma_ch *ch = ch_ptr;
|
||||
struct ib_wc wc;
|
||||
|
||||
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
|
||||
while (ib_poll_cq(cq, 1, &wc) > 0) {
|
||||
if (likely(wc.status == IB_WC_SUCCESS)) {
|
||||
srp_handle_recv(ch, &wc);
|
||||
} else {
|
||||
srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
|
||||
{
|
||||
struct srp_rdma_ch *ch = ch_ptr;
|
||||
struct ib_wc wc;
|
||||
struct srp_iu *iu;
|
||||
|
||||
while (ib_poll_cq(cq, 1, &wc) > 0) {
|
||||
if (likely(wc.status == IB_WC_SUCCESS)) {
|
||||
iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
|
||||
list_add(&iu->list, &ch->free_tx);
|
||||
} else {
|
||||
srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
|
||||
{
|
||||
struct srp_target_port *target = host_to_target(shost);
|
||||
@@ -3587,8 +3586,6 @@ static int __init srp_init_module(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
|
||||
|
||||
if (srp_sg_tablesize) {
|
||||
pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
|
||||
if (!cmd_sg_entries)
|
||||
|
Reference in New Issue
Block a user