Merge tag 'v5.2-rc6' into rdma.git for-next
For dependencies in next patches. Resolve conflicts: - Use uverbs_get_cleared_udata() with new cq allocation flow - Continue to delete nes despite SPDX conflict - Resolve list appends in mlx5_command_str() - Use u16 for vport_rule stuff - Resolve list appends in struct ib_client Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config INFINIBAND_BNXT_RE
|
||||
tristate "Broadcom Netxtreme HCA support"
|
||||
depends on 64BIT
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config INFINIBAND_CXGB3
|
||||
tristate "Chelsio RDMA Driver"
|
||||
depends on CHELSIO_T3
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config INFINIBAND_CXGB4
|
||||
tristate "Chelsio T4/T5 RDMA Driver"
|
||||
depends on CHELSIO_T4 && INET
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
|
||||
ccflags-y += -I $(srctree)/drivers/net/ethernet/chelsio/libcxgb
|
||||
|
||||
|
@@ -138,9 +138,11 @@ int efa_com_destroy_qp(struct efa_com_dev *edev,
|
||||
sizeof(qp_cmd),
|
||||
(struct efa_admin_acq_entry *)&cmd_completion,
|
||||
sizeof(cmd_completion));
|
||||
if (err)
|
||||
if (err) {
|
||||
ibdev_err(edev->efa_dev, "Failed to destroy qp-%u [%d]\n",
|
||||
qp_cmd.qp_handle, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -198,9 +200,11 @@ int efa_com_destroy_cq(struct efa_com_dev *edev,
|
||||
(struct efa_admin_acq_entry *)&destroy_resp,
|
||||
sizeof(destroy_resp));
|
||||
|
||||
if (err)
|
||||
if (err) {
|
||||
ibdev_err(edev->efa_dev, "Failed to destroy CQ-%u [%d]\n",
|
||||
params->cq_idx, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -272,10 +276,12 @@ int efa_com_dereg_mr(struct efa_com_dev *edev,
|
||||
sizeof(mr_cmd),
|
||||
(struct efa_admin_acq_entry *)&cmd_completion,
|
||||
sizeof(cmd_completion));
|
||||
if (err)
|
||||
if (err) {
|
||||
ibdev_err(edev->efa_dev,
|
||||
"Failed to de-register mr(lkey-%u) [%d]\n",
|
||||
mr_cmd.l_key, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -327,9 +333,11 @@ int efa_com_destroy_ah(struct efa_com_dev *edev,
|
||||
sizeof(ah_cmd),
|
||||
(struct efa_admin_acq_entry *)&cmd_completion,
|
||||
sizeof(cmd_completion));
|
||||
if (err)
|
||||
if (err) {
|
||||
ibdev_err(edev->efa_dev, "Failed to destroy ah-%d pd-%d [%d]\n",
|
||||
ah_cmd.ah, ah_cmd.pd, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -387,10 +395,12 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev,
|
||||
get_resp,
|
||||
sizeof(*get_resp));
|
||||
|
||||
if (err)
|
||||
if (err) {
|
||||
ibdev_err(edev->efa_dev,
|
||||
"Failed to submit get_feature command %d [%d]\n",
|
||||
feature_id, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -534,10 +544,12 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev,
|
||||
(struct efa_admin_acq_entry *)set_resp,
|
||||
sizeof(*set_resp));
|
||||
|
||||
if (err)
|
||||
if (err) {
|
||||
ibdev_err(edev->efa_dev,
|
||||
"Failed to submit set_feature command %d error: %d\n",
|
||||
feature_id, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -204,6 +204,7 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
|
||||
void *obj, u64 address, u64 length, u8 mmap_flag)
|
||||
{
|
||||
struct efa_mmap_entry *entry;
|
||||
u32 next_mmap_page;
|
||||
int err;
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
@@ -216,15 +217,19 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
|
||||
entry->mmap_flag = mmap_flag;
|
||||
|
||||
xa_lock(&ucontext->mmap_xa);
|
||||
if (check_add_overflow(ucontext->mmap_xa_page,
|
||||
(u32)(length >> PAGE_SHIFT),
|
||||
&next_mmap_page))
|
||||
goto err_unlock;
|
||||
|
||||
entry->mmap_page = ucontext->mmap_xa_page;
|
||||
ucontext->mmap_xa_page += DIV_ROUND_UP(length, PAGE_SIZE);
|
||||
ucontext->mmap_xa_page = next_mmap_page;
|
||||
err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry,
|
||||
GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_unlock;
|
||||
|
||||
xa_unlock(&ucontext->mmap_xa);
|
||||
if (err){
|
||||
kfree(entry);
|
||||
return EFA_MMAP_INVALID;
|
||||
}
|
||||
|
||||
ibdev_dbg(
|
||||
&dev->ibdev,
|
||||
@@ -232,6 +237,12 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
|
||||
entry->obj, entry->address, entry->length, get_mmap_key(entry));
|
||||
|
||||
return get_mmap_key(entry);
|
||||
|
||||
err_unlock:
|
||||
xa_unlock(&ucontext->mmap_xa);
|
||||
kfree(entry);
|
||||
return EFA_MMAP_INVALID;
|
||||
|
||||
}
|
||||
|
||||
int efa_query_device(struct ib_device *ibdev,
|
||||
@@ -1628,7 +1639,6 @@ int efa_mmap(struct ib_ucontext *ibucontext,
|
||||
ibdev_dbg(&dev->ibdev, "Mapping executable pages is not permitted\n");
|
||||
return -EPERM;
|
||||
}
|
||||
vma->vm_flags &= ~VM_MAYEXEC;
|
||||
|
||||
return __efa_mmap(dev, ucontext, vma, key, length);
|
||||
}
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config INFINIBAND_HFI1
|
||||
tristate "Intel OPA Gen1 support"
|
||||
depends on X86_64 && INFINIBAND_RDMAVT && I2C
|
||||
|
@@ -9850,6 +9850,7 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
|
||||
|
||||
/* disable the port */
|
||||
clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
|
||||
cancel_work_sync(&ppd->freeze_work);
|
||||
}
|
||||
|
||||
static inline int init_cpu_counters(struct hfi1_devdata *dd)
|
||||
@@ -14030,6 +14031,19 @@ static void init_kdeth_qp(struct hfi1_devdata *dd)
|
||||
RCV_BTH_QP_KDETH_QP_SHIFT);
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_get_qp_map
|
||||
* @dd: device data
|
||||
* @idx: index to read
|
||||
*/
|
||||
u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
|
||||
{
|
||||
u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
|
||||
|
||||
reg >>= (idx % 8) * 8;
|
||||
return reg;
|
||||
}
|
||||
|
||||
/**
|
||||
* init_qpmap_table
|
||||
* @dd - device data
|
||||
|
@@ -1445,6 +1445,7 @@ void clear_all_interrupts(struct hfi1_devdata *dd);
|
||||
void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr);
|
||||
void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr);
|
||||
void reset_interrupts(struct hfi1_devdata *dd);
|
||||
u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx);
|
||||
|
||||
/*
|
||||
* Interrupt source table.
|
||||
|
@@ -153,6 +153,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
|
||||
char *dash;
|
||||
unsigned long range_start, range_end, i;
|
||||
bool remove = false;
|
||||
unsigned long bound = 1U << BITS_PER_BYTE;
|
||||
|
||||
end = strchr(ptr, ',');
|
||||
if (end)
|
||||
@@ -178,6 +179,10 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
|
||||
BITS_PER_BYTE);
|
||||
break;
|
||||
}
|
||||
/* Check the inputs */
|
||||
if (range_start >= bound || range_end >= bound)
|
||||
break;
|
||||
|
||||
for (i = range_start; i <= range_end; i++) {
|
||||
if (remove)
|
||||
clear_bit(i, fault->opcodes);
|
||||
|
@@ -539,6 +539,37 @@ static inline void hfi1_16B_set_qpn(struct opa_16b_mgmt *mgmt,
|
||||
mgmt->src_qpn = cpu_to_be32(src_qp & OPA_16B_MGMT_QPN_MASK);
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_get_rc_ohdr - get extended header
|
||||
* @opah - the opaheader
|
||||
*/
|
||||
static inline struct ib_other_headers *
|
||||
hfi1_get_rc_ohdr(struct hfi1_opa_header *opah)
|
||||
{
|
||||
struct ib_other_headers *ohdr;
|
||||
struct ib_header *hdr = NULL;
|
||||
struct hfi1_16b_header *hdr_16b = NULL;
|
||||
|
||||
/* Find out where the BTH is */
|
||||
if (opah->hdr_type == HFI1_PKT_TYPE_9B) {
|
||||
hdr = &opah->ibh;
|
||||
if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
|
||||
ohdr = &hdr->u.oth;
|
||||
else
|
||||
ohdr = &hdr->u.l.oth;
|
||||
} else {
|
||||
u8 l4;
|
||||
|
||||
hdr_16b = &opah->opah;
|
||||
l4 = hfi1_16B_get_l4(hdr_16b);
|
||||
if (l4 == OPA_16B_L4_IB_LOCAL)
|
||||
ohdr = &hdr_16b->u.oth;
|
||||
else
|
||||
ohdr = &hdr_16b->u.l.oth;
|
||||
}
|
||||
return ohdr;
|
||||
}
|
||||
|
||||
struct rvt_sge_state;
|
||||
|
||||
/*
|
||||
|
@@ -952,6 +952,22 @@ void sc_disable(struct send_context *sc)
|
||||
}
|
||||
}
|
||||
spin_unlock(&sc->release_lock);
|
||||
|
||||
write_seqlock(&sc->waitlock);
|
||||
while (!list_empty(&sc->piowait)) {
|
||||
struct iowait *wait;
|
||||
struct rvt_qp *qp;
|
||||
struct hfi1_qp_priv *priv;
|
||||
|
||||
wait = list_first_entry(&sc->piowait, struct iowait, list);
|
||||
qp = iowait_to_qp(wait);
|
||||
priv = qp->priv;
|
||||
list_del_init(&priv->s_iowait.list);
|
||||
priv->s_iowait.lock = NULL;
|
||||
hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
|
||||
}
|
||||
write_sequnlock(&sc->waitlock);
|
||||
|
||||
spin_unlock_irq(&sc->alloc_lock);
|
||||
}
|
||||
|
||||
@@ -1427,7 +1443,8 @@ void sc_stop(struct send_context *sc, int flag)
|
||||
* @cb: optional callback to call when the buffer is finished sending
|
||||
* @arg: argument for cb
|
||||
*
|
||||
* Return a pointer to a PIO buffer if successful, NULL if not enough room.
|
||||
* Return a pointer to a PIO buffer, NULL if not enough room, -ECOMM
|
||||
* when link is down.
|
||||
*/
|
||||
struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
|
||||
pio_release_cb cb, void *arg)
|
||||
@@ -1443,7 +1460,7 @@ struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
|
||||
spin_lock_irqsave(&sc->alloc_lock, flags);
|
||||
if (!(sc->flags & SCF_ENABLED)) {
|
||||
spin_unlock_irqrestore(&sc->alloc_lock, flags);
|
||||
goto done;
|
||||
return ERR_PTR(-ECOMM);
|
||||
}
|
||||
|
||||
retry:
|
||||
|
@@ -1432,7 +1432,7 @@ void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn)
|
||||
pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps,
|
||||
sc_to_vlt(ppd->dd, sc5), plen);
|
||||
pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL);
|
||||
if (!pbuf) {
|
||||
if (IS_ERR_OR_NULL(pbuf)) {
|
||||
/*
|
||||
* We have no room to send at the moment. Pass
|
||||
* responsibility for sending the ACK to the send engine
|
||||
@@ -1701,6 +1701,36 @@ static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_rc_verbs_aborted - handle abort status
|
||||
* @qp: the QP
|
||||
* @opah: the opa header
|
||||
*
|
||||
* This code modifies both ACK bit in BTH[2]
|
||||
* and the s_flags to go into send one mode.
|
||||
*
|
||||
* This serves to throttle the send engine to only
|
||||
* send a single packet in the likely case the
|
||||
* a link has gone down.
|
||||
*/
|
||||
void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah)
|
||||
{
|
||||
struct ib_other_headers *ohdr = hfi1_get_rc_ohdr(opah);
|
||||
u8 opcode = ib_bth_get_opcode(ohdr);
|
||||
u32 psn;
|
||||
|
||||
/* ignore responses */
|
||||
if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
|
||||
opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
|
||||
opcode == TID_OP(READ_RESP) ||
|
||||
opcode == TID_OP(WRITE_RESP))
|
||||
return;
|
||||
|
||||
psn = ib_bth_get_psn(ohdr) | IB_BTH_REQ_ACK;
|
||||
ohdr->bth[2] = cpu_to_be32(psn);
|
||||
qp->s_flags |= RVT_S_SEND_ONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* This should be called with the QP s_lock held and interrupts disabled.
|
||||
*/
|
||||
@@ -1709,8 +1739,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
|
||||
struct ib_other_headers *ohdr;
|
||||
struct hfi1_qp_priv *priv = qp->priv;
|
||||
struct rvt_swqe *wqe;
|
||||
struct ib_header *hdr = NULL;
|
||||
struct hfi1_16b_header *hdr_16b = NULL;
|
||||
u32 opcode, head, tail;
|
||||
u32 psn;
|
||||
struct tid_rdma_request *req;
|
||||
@@ -1719,24 +1747,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
|
||||
if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
|
||||
return;
|
||||
|
||||
/* Find out where the BTH is */
|
||||
if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
|
||||
hdr = &opah->ibh;
|
||||
if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
|
||||
ohdr = &hdr->u.oth;
|
||||
else
|
||||
ohdr = &hdr->u.l.oth;
|
||||
} else {
|
||||
u8 l4;
|
||||
|
||||
hdr_16b = &opah->opah;
|
||||
l4 = hfi1_16B_get_l4(hdr_16b);
|
||||
if (l4 == OPA_16B_L4_IB_LOCAL)
|
||||
ohdr = &hdr_16b->u.oth;
|
||||
else
|
||||
ohdr = &hdr_16b->u.l.oth;
|
||||
}
|
||||
|
||||
ohdr = hfi1_get_rc_ohdr(opah);
|
||||
opcode = ib_bth_get_opcode(ohdr);
|
||||
if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
|
||||
opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
|
||||
|
@@ -405,19 +405,33 @@ static void sdma_flush(struct sdma_engine *sde)
|
||||
struct sdma_txreq *txp, *txp_next;
|
||||
LIST_HEAD(flushlist);
|
||||
unsigned long flags;
|
||||
uint seq;
|
||||
|
||||
/* flush from head to tail */
|
||||
sdma_flush_descq(sde);
|
||||
spin_lock_irqsave(&sde->flushlist_lock, flags);
|
||||
/* copy flush list */
|
||||
list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
|
||||
list_del_init(&txp->list);
|
||||
list_add_tail(&txp->list, &flushlist);
|
||||
}
|
||||
list_splice_init(&sde->flushlist, &flushlist);
|
||||
spin_unlock_irqrestore(&sde->flushlist_lock, flags);
|
||||
/* flush from flush list */
|
||||
list_for_each_entry_safe(txp, txp_next, &flushlist, list)
|
||||
complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
|
||||
/* wakeup QPs orphaned on the dmawait list */
|
||||
do {
|
||||
struct iowait *w, *nw;
|
||||
|
||||
seq = read_seqbegin(&sde->waitlock);
|
||||
if (!list_empty(&sde->dmawait)) {
|
||||
write_seqlock(&sde->waitlock);
|
||||
list_for_each_entry_safe(w, nw, &sde->dmawait, list) {
|
||||
if (w->wakeup) {
|
||||
w->wakeup(w, SDMA_AVAIL_REASON);
|
||||
list_del_init(&w->list);
|
||||
}
|
||||
}
|
||||
write_sequnlock(&sde->waitlock);
|
||||
}
|
||||
} while (read_seqretry(&sde->waitlock, seq));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2413,7 +2427,7 @@ unlock_noconn:
|
||||
list_add_tail(&tx->list, &sde->flushlist);
|
||||
spin_unlock(&sde->flushlist_lock);
|
||||
iowait_inc_wait_count(wait, tx->num_desc);
|
||||
schedule_work(&sde->flush_worker);
|
||||
queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
|
||||
ret = -ECOMM;
|
||||
goto unlock;
|
||||
nodesc:
|
||||
@@ -2511,7 +2525,7 @@ unlock_noconn:
|
||||
iowait_inc_wait_count(wait, tx->num_desc);
|
||||
}
|
||||
spin_unlock(&sde->flushlist_lock);
|
||||
schedule_work(&sde->flush_worker);
|
||||
queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
|
||||
ret = -ECOMM;
|
||||
goto update_tail;
|
||||
nodesc:
|
||||
|
@@ -312,9 +312,7 @@ static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi,
|
||||
if (qp->ibqp.qp_num == 0)
|
||||
ctxt = 0;
|
||||
else
|
||||
ctxt = ((qp->ibqp.qp_num >> dd->qos_shift) %
|
||||
(dd->n_krcv_queues - 1)) + 1;
|
||||
|
||||
ctxt = hfi1_get_qp_map(dd, qp->ibqp.qp_num >> dd->qos_shift);
|
||||
return dd->rcd[ctxt];
|
||||
}
|
||||
|
||||
|
@@ -683,7 +683,7 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
|
||||
pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
|
||||
if (ctxt) {
|
||||
pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
|
||||
if (pbuf) {
|
||||
if (!IS_ERR_OR_NULL(pbuf)) {
|
||||
trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
|
||||
ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
|
||||
&hdr, hwords);
|
||||
@@ -738,7 +738,7 @@ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
|
||||
pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
|
||||
if (ctxt) {
|
||||
pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
|
||||
if (pbuf) {
|
||||
if (!IS_ERR_OR_NULL(pbuf)) {
|
||||
trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
|
||||
ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
|
||||
&hdr, hwords);
|
||||
|
@@ -324,6 +324,9 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
|
||||
u32 *tidlist = NULL;
|
||||
struct tid_user_buf *tidbuf;
|
||||
|
||||
if (!PAGE_ALIGNED(tinfo->vaddr))
|
||||
return -EINVAL;
|
||||
|
||||
tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
|
||||
if (!tidbuf)
|
||||
return -ENOMEM;
|
||||
|
@@ -130,20 +130,16 @@ static int defer_packet_queue(
|
||||
{
|
||||
struct hfi1_user_sdma_pkt_q *pq =
|
||||
container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
|
||||
struct user_sdma_txreq *tx =
|
||||
container_of(txreq, struct user_sdma_txreq, txreq);
|
||||
|
||||
if (sdma_progress(sde, seq, txreq)) {
|
||||
if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
|
||||
goto eagain;
|
||||
}
|
||||
write_seqlock(&sde->waitlock);
|
||||
if (sdma_progress(sde, seq, txreq))
|
||||
goto eagain;
|
||||
/*
|
||||
* We are assuming that if the list is enqueued somewhere, it
|
||||
* is to the dmawait list since that is the only place where
|
||||
* it is supposed to be enqueued.
|
||||
*/
|
||||
xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
|
||||
write_seqlock(&sde->waitlock);
|
||||
if (list_empty(&pq->busy.list)) {
|
||||
iowait_get_priority(&pq->busy);
|
||||
iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
|
||||
@@ -151,6 +147,7 @@ static int defer_packet_queue(
|
||||
write_sequnlock(&sde->waitlock);
|
||||
return -EBUSY;
|
||||
eagain:
|
||||
write_sequnlock(&sde->waitlock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
@@ -804,7 +801,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
|
||||
|
||||
tx->flags = 0;
|
||||
tx->req = req;
|
||||
tx->busycount = 0;
|
||||
INIT_LIST_HEAD(&tx->list);
|
||||
|
||||
/*
|
||||
|
@@ -245,7 +245,6 @@ struct user_sdma_txreq {
|
||||
struct list_head list;
|
||||
struct user_sdma_request *req;
|
||||
u16 flags;
|
||||
unsigned int busycount;
|
||||
u16 seqnum;
|
||||
};
|
||||
|
||||
|
@@ -638,6 +638,8 @@ static void verbs_sdma_complete(
|
||||
struct hfi1_opa_header *hdr;
|
||||
|
||||
hdr = &tx->phdr.hdr;
|
||||
if (unlikely(status == SDMA_TXREQ_S_ABORTED))
|
||||
hfi1_rc_verbs_aborted(qp, hdr);
|
||||
hfi1_rc_send_complete(qp, hdr);
|
||||
}
|
||||
spin_unlock(&qp->s_lock);
|
||||
@@ -1037,10 +1039,10 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
|
||||
if (cb)
|
||||
iowait_pio_inc(&priv->s_iowait);
|
||||
pbuf = sc_buffer_alloc(sc, plen, cb, qp);
|
||||
if (unlikely(!pbuf)) {
|
||||
if (unlikely(IS_ERR_OR_NULL(pbuf))) {
|
||||
if (cb)
|
||||
verbs_pio_complete(qp, 0);
|
||||
if (ppd->host_link_state != HLS_UP_ACTIVE) {
|
||||
if (IS_ERR(pbuf)) {
|
||||
/*
|
||||
* If we have filled the PIO buffers to capacity and are
|
||||
* not in an active state this request is not going to
|
||||
@@ -1095,15 +1097,15 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
|
||||
&ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
|
||||
|
||||
pio_bail:
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
if (qp->s_wqe) {
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
rvt_send_complete(qp, qp->s_wqe, wc_status);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
} else if (qp->ibqp.qp_type == IB_QPT_RC) {
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
if (unlikely(wc_status == IB_WC_GENERAL_ERR))
|
||||
hfi1_rc_verbs_aborted(qp, &ps->s_txreq->phdr.hdr);
|
||||
hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
|
||||
ret = 0;
|
||||
|
||||
@@ -1356,8 +1358,6 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
|
||||
rdi->dparms.props.max_cq = hfi1_max_cqs;
|
||||
rdi->dparms.props.max_ah = hfi1_max_ahs;
|
||||
rdi->dparms.props.max_cqe = hfi1_max_cqes;
|
||||
rdi->dparms.props.max_mr = rdi->lkey_table.max;
|
||||
rdi->dparms.props.max_fmr = rdi->lkey_table.max;
|
||||
rdi->dparms.props.max_map_per_fmr = 32767;
|
||||
rdi->dparms.props.max_pd = hfi1_max_pds;
|
||||
rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
|
||||
|
@@ -416,6 +416,7 @@ void hfi1_rc_hdrerr(
|
||||
|
||||
u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
|
||||
|
||||
void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah);
|
||||
void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah);
|
||||
|
||||
void hfi1_ud_rcv(struct hfi1_packet *packet);
|
||||
|
@@ -100,7 +100,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
|
||||
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
|
||||
struct hfi1_qp_priv *priv;
|
||||
|
||||
tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
|
||||
tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
|
||||
if (tx)
|
||||
goto out;
|
||||
priv = qp->priv;
|
||||
|
@@ -72,6 +72,7 @@ struct hfi1_ibdev;
|
||||
struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
|
||||
struct rvt_qp *qp);
|
||||
|
||||
#define VERBS_TXREQ_GFP (GFP_ATOMIC | __GFP_NOWARN)
|
||||
static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
|
||||
struct rvt_qp *qp)
|
||||
__must_hold(&qp->slock)
|
||||
@@ -79,7 +80,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
|
||||
struct verbs_txreq *tx;
|
||||
struct hfi1_qp_priv *priv = qp->priv;
|
||||
|
||||
tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
|
||||
tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
|
||||
if (unlikely(!tx)) {
|
||||
/* call slow path to get the lock */
|
||||
tx = __get_txreq(dev, qp);
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config INFINIBAND_HNS
|
||||
tristate "HNS RoCE Driver"
|
||||
depends on NET_VENDOR_HISILICON
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Makefile for the Hisilicon RoCE drivers.
|
||||
#
|
||||
|
@@ -902,6 +902,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
|
||||
hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
|
||||
kfree(&free_mr->mr_free_cq->ib_cq);
|
||||
hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
|
||||
kfree(&free_mr->mr_free_pd->ibpd);
|
||||
}
|
||||
|
||||
static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config INFINIBAND_I40IW
|
||||
tristate "Intel(R) Ethernet X722 iWARP Driver"
|
||||
depends on INET && I40E
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config MLX4_INFINIBAND
|
||||
tristate "Mellanox ConnectX HCA support"
|
||||
depends on NETDEVICES && ETHERNET && PCI && INET
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_MLX4_INFINIBAND) += mlx4_ib.o
|
||||
|
||||
mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o alias_GUID.o sysfs.o
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config MLX5_INFINIBAND
|
||||
tristate "Mellanox 5th generation network adapters (ConnectX series) support"
|
||||
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
|
||||
|
||||
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq_cmd.o \
|
||||
|
@@ -190,12 +190,12 @@ int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
|
||||
u16 uid, phys_addr_t *addr, u32 *obj_id)
|
||||
{
|
||||
struct mlx5_core_dev *dev = dm->dev;
|
||||
u32 num_blocks = DIV_ROUND_UP(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
|
||||
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
|
||||
unsigned long *block_map;
|
||||
u64 icm_start_addr;
|
||||
u32 log_icm_size;
|
||||
u32 num_blocks;
|
||||
u32 max_blocks;
|
||||
u64 block_idx;
|
||||
void *sw_icm;
|
||||
@@ -224,6 +224,8 @@ int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
|
||||
max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
|
||||
spin_lock(&dm->lock);
|
||||
block_idx = bitmap_find_next_zero_area(block_map,
|
||||
@@ -266,13 +268,16 @@ int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
|
||||
u16 uid, phys_addr_t addr, u32 obj_id)
|
||||
{
|
||||
struct mlx5_core_dev *dev = dm->dev;
|
||||
u32 num_blocks = DIV_ROUND_UP(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
|
||||
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
|
||||
unsigned long *block_map;
|
||||
u32 num_blocks;
|
||||
u64 start_idx;
|
||||
int err;
|
||||
|
||||
num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
|
||||
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
|
||||
|
||||
switch (type) {
|
||||
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
|
||||
start_idx =
|
||||
|
@@ -110,15 +110,15 @@ u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
|
||||
}
|
||||
|
||||
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
|
||||
int vport_index)
|
||||
u16 vport_num)
|
||||
{
|
||||
return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_IB);
|
||||
return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_IB);
|
||||
}
|
||||
|
||||
struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
|
||||
int vport_index)
|
||||
u16 vport_num)
|
||||
{
|
||||
return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_ETH);
|
||||
return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_ETH);
|
||||
}
|
||||
|
||||
struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
|
||||
@@ -126,9 +126,10 @@ struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
|
||||
return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB);
|
||||
}
|
||||
|
||||
struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport)
|
||||
struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
|
||||
u16 vport_num)
|
||||
{
|
||||
return mlx5_eswitch_vport_rep(esw, vport);
|
||||
return mlx5_eswitch_vport_rep(esw, vport_num);
|
||||
}
|
||||
|
||||
struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
|
||||
|
@@ -14,17 +14,17 @@ extern const struct mlx5_ib_profile uplink_rep_profile;
|
||||
|
||||
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw);
|
||||
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
|
||||
int vport_index);
|
||||
u16 vport_num);
|
||||
struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw);
|
||||
struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
|
||||
int vport_index);
|
||||
u16 vport_num);
|
||||
void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev);
|
||||
void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev);
|
||||
struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_ib_sq *sq,
|
||||
u16 port);
|
||||
struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
|
||||
int vport_index);
|
||||
u16 vport_num);
|
||||
#else /* CONFIG_MLX5_ESWITCH */
|
||||
static inline u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
|
||||
{
|
||||
@@ -33,7 +33,7 @@ static inline u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
|
||||
|
||||
static inline
|
||||
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
|
||||
int vport_index)
|
||||
u16 vport_num)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
@@ -46,7 +46,7 @@ struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
|
||||
|
||||
static inline
|
||||
struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
|
||||
int vport_index)
|
||||
u16 vport_num)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
@@ -63,7 +63,7 @@ struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
|
||||
|
||||
static inline
|
||||
struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
|
||||
int vport_index)
|
||||
u16 vport_num)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -2347,7 +2347,7 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
|
||||
/* Allocation size must a multiple of the basic block size
|
||||
* and a power of 2.
|
||||
*/
|
||||
act_size = roundup(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
|
||||
act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
|
||||
act_size = roundup_pow_of_two(act_size);
|
||||
|
||||
dm->size = act_size;
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config INFINIBAND_MTHCA
|
||||
tristate "Mellanox HCA support"
|
||||
depends on PCI
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config INFINIBAND_OCRDMA
|
||||
tristate "Emulex One Connect HCA support"
|
||||
depends on ETHERNET && NETDEVICES && PCI && INET && (IPV6 || IPV6=n)
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
ccflags-y := -I $(srctree)/drivers/net/ethernet/emulex/benet
|
||||
|
||||
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config INFINIBAND_QEDR
|
||||
tristate "QLogic RoCE driver"
|
||||
depends on 64BIT && QEDE
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o
|
||||
|
||||
qedr-y := main.o verbs.o qedr_roce_cm.o qedr_iw_cm.o
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config INFINIBAND_QIB
|
||||
tristate "Intel PCIe HCA support"
|
||||
depends on 64BIT && INFINIBAND_RDMAVT
|
||||
|
@@ -1459,8 +1459,6 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
|
||||
rdi->dparms.props.max_cq = ib_qib_max_cqs;
|
||||
rdi->dparms.props.max_cqe = ib_qib_max_cqes;
|
||||
rdi->dparms.props.max_ah = ib_qib_max_ahs;
|
||||
rdi->dparms.props.max_mr = rdi->lkey_table.max;
|
||||
rdi->dparms.props.max_fmr = rdi->lkey_table.max;
|
||||
rdi->dparms.props.max_map_per_fmr = 32767;
|
||||
rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
|
||||
rdi->dparms.props.max_qp_init_rd_atom = 255;
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config INFINIBAND_USNIC
|
||||
tristate "Verbs support for Cisco VIC"
|
||||
depends on NETDEVICES && ETHERNET && INET && PCI && INTEL_IOMMU
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config INFINIBAND_VMWARE_PVRDMA
|
||||
tristate "VMware Paravirtualized RDMA Driver"
|
||||
depends on NETDEVICES && ETHERNET && PCI && INET && VMXNET3
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma.o
|
||||
|
||||
vmw_pvrdma-y := pvrdma_cmd.o pvrdma_cq.o pvrdma_doorbell.o pvrdma_main.o pvrdma_misc.o pvrdma_mr.o pvrdma_qp.o pvrdma_srq.o pvrdma_verbs.o
|
||||
|
Fai riferimento in un nuovo problema
Block a user