Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflict resolution of af_smc.c from Stephen Rothwell.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2019-04-17 11:26:25 -07:00
350 zmienionych plików z 3837 dodań i 1907 usunięć

Wyświetl plik

@@ -13232,7 +13232,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
int total_contexts;
int ret;
unsigned ngroups;
int qos_rmt_count;
int rmt_count;
int user_rmt_reduced;
u32 n_usr_ctxts;
u32 send_contexts = chip_send_contexts(dd);
@@ -13294,10 +13294,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
n_usr_ctxts = rcv_contexts - total_contexts;
}
/* each user context requires an entry in the RMT */
qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
/*
* The RMT entries are currently allocated as shown below:
* 1. QOS (0 to 128 entries);
* 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
* 3. VNIC (num_vnic_contexts).
* It should be noted that PSM FECN oversubscribe num_vnic_contexts
* entries of RMT because both VNIC and PSM could allocate any receive
* context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
* and PSM FECN must reserve an RMT entry for each possible PSM receive
* context.
*/
rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
dd_dev_err(dd,
"RMT size is reducing the number of user receive contexts from %u to %d\n",
n_usr_ctxts,
@@ -14285,9 +14295,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
u64 reg;
int i, idx, regoff, regidx;
u8 offset;
u32 total_cnt;
/* there needs to be enough room in the map table */
if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
return;
}
@@ -14341,7 +14353,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
/* add rule 1 */
add_rsm_rule(dd, RSM_INS_FECN, &rrd);
rmt->used += dd->num_user_contexts;
rmt->used += total_cnt;
}
/* Initialize RSM for VNIC */

Wyświetl plik

@@ -898,7 +898,9 @@ void notify_error_qp(struct rvt_qp *qp)
if (!list_empty(&priv->s_iowait.list) &&
!(qp->s_flags & RVT_S_BUSY) &&
!(priv->s_flags & RVT_S_BUSY)) {
qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL;
rvt_put_qp(qp);

Wyświetl plik

@@ -3088,7 +3088,7 @@ send_last:
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
@@ -3166,7 +3166,7 @@ send_last:
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}

Wyświetl plik

@@ -5017,24 +5017,14 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
make_tid_rdma_ack(qp, ohdr, ps))
return 1;
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (iowait_sdma_pending(&priv->s_iowait)) {
qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
clear_ahg(qp);
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
/* will get called again */
goto done_free_tx;
}
/*
* Bail out if we can't send data.
* Be reminded that this check must been done after the call to
* make_tid_rdma_ack() because the responding QP could be in
* RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA.
*/
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK))
goto bail;
if (priv->s_flags & RVT_S_WAIT_ACK)
goto bail;
@@ -5144,11 +5134,6 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
middle, ps);
return 1;
done_free_tx:
hfi1_put_txreq(ps->s_txreq);
ps->s_txreq = NULL;
return 1;
bail:
hfi1_put_txreq(ps->s_txreq);
bail_no_tx: