Merge branch 'timers/vdso' into timers/core
so the hyper-v clocksource update can be applied.
This commit is contained in:
@@ -139,9 +139,11 @@ int efa_com_destroy_qp(struct efa_com_dev *edev,
|
||||
sizeof(qp_cmd),
|
||||
(struct efa_admin_acq_entry *)&cmd_completion,
|
||||
sizeof(cmd_completion));
|
||||
if (err)
|
||||
if (err) {
|
||||
ibdev_err(edev->efa_dev, "Failed to destroy qp-%u [%d]\n",
|
||||
qp_cmd.qp_handle, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -199,9 +201,11 @@ int efa_com_destroy_cq(struct efa_com_dev *edev,
|
||||
(struct efa_admin_acq_entry *)&destroy_resp,
|
||||
sizeof(destroy_resp));
|
||||
|
||||
if (err)
|
||||
if (err) {
|
||||
ibdev_err(edev->efa_dev, "Failed to destroy CQ-%u [%d]\n",
|
||||
params->cq_idx, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -273,10 +277,12 @@ int efa_com_dereg_mr(struct efa_com_dev *edev,
|
||||
sizeof(mr_cmd),
|
||||
(struct efa_admin_acq_entry *)&cmd_completion,
|
||||
sizeof(cmd_completion));
|
||||
if (err)
|
||||
if (err) {
|
||||
ibdev_err(edev->efa_dev,
|
||||
"Failed to de-register mr(lkey-%u) [%d]\n",
|
||||
mr_cmd.l_key, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -327,9 +333,11 @@ int efa_com_destroy_ah(struct efa_com_dev *edev,
|
||||
sizeof(ah_cmd),
|
||||
(struct efa_admin_acq_entry *)&cmd_completion,
|
||||
sizeof(cmd_completion));
|
||||
if (err)
|
||||
if (err) {
|
||||
ibdev_err(edev->efa_dev, "Failed to destroy ah-%d pd-%d [%d]\n",
|
||||
ah_cmd.ah, ah_cmd.pd, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -387,10 +395,12 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev,
|
||||
get_resp,
|
||||
sizeof(*get_resp));
|
||||
|
||||
if (err)
|
||||
if (err) {
|
||||
ibdev_err(edev->efa_dev,
|
||||
"Failed to submit get_feature command %d [%d]\n",
|
||||
feature_id, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -534,10 +544,12 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev,
|
||||
(struct efa_admin_acq_entry *)set_resp,
|
||||
sizeof(*set_resp));
|
||||
|
||||
if (err)
|
||||
if (err) {
|
||||
ibdev_err(edev->efa_dev,
|
||||
"Failed to submit set_feature command %d error: %d\n",
|
||||
feature_id, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -204,6 +204,7 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
|
||||
void *obj, u64 address, u64 length, u8 mmap_flag)
|
||||
{
|
||||
struct efa_mmap_entry *entry;
|
||||
u32 next_mmap_page;
|
||||
int err;
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
@@ -216,15 +217,19 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
|
||||
entry->mmap_flag = mmap_flag;
|
||||
|
||||
xa_lock(&ucontext->mmap_xa);
|
||||
if (check_add_overflow(ucontext->mmap_xa_page,
|
||||
(u32)(length >> PAGE_SHIFT),
|
||||
&next_mmap_page))
|
||||
goto err_unlock;
|
||||
|
||||
entry->mmap_page = ucontext->mmap_xa_page;
|
||||
ucontext->mmap_xa_page += DIV_ROUND_UP(length, PAGE_SIZE);
|
||||
ucontext->mmap_xa_page = next_mmap_page;
|
||||
err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry,
|
||||
GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_unlock;
|
||||
|
||||
xa_unlock(&ucontext->mmap_xa);
|
||||
if (err){
|
||||
kfree(entry);
|
||||
return EFA_MMAP_INVALID;
|
||||
}
|
||||
|
||||
ibdev_dbg(
|
||||
&dev->ibdev,
|
||||
@@ -232,6 +237,12 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
|
||||
entry->obj, entry->address, entry->length, get_mmap_key(entry));
|
||||
|
||||
return get_mmap_key(entry);
|
||||
|
||||
err_unlock:
|
||||
xa_unlock(&ucontext->mmap_xa);
|
||||
kfree(entry);
|
||||
return EFA_MMAP_INVALID;
|
||||
|
||||
}
|
||||
|
||||
int efa_query_device(struct ib_device *ibdev,
|
||||
|
@@ -14031,6 +14031,19 @@ static void init_kdeth_qp(struct hfi1_devdata *dd)
|
||||
RCV_BTH_QP_KDETH_QP_SHIFT);
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_get_qp_map
|
||||
* @dd: device data
|
||||
* @idx: index to read
|
||||
*/
|
||||
u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
|
||||
{
|
||||
u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
|
||||
|
||||
reg >>= (idx % 8) * 8;
|
||||
return reg;
|
||||
}
|
||||
|
||||
/**
|
||||
* init_qpmap_table
|
||||
* @dd - device data
|
||||
|
@@ -1445,6 +1445,7 @@ void clear_all_interrupts(struct hfi1_devdata *dd);
|
||||
void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr);
|
||||
void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr);
|
||||
void reset_interrupts(struct hfi1_devdata *dd);
|
||||
u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx);
|
||||
|
||||
/*
|
||||
* Interrupt source table.
|
||||
|
@@ -153,6 +153,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
|
||||
char *dash;
|
||||
unsigned long range_start, range_end, i;
|
||||
bool remove = false;
|
||||
unsigned long bound = 1U << BITS_PER_BYTE;
|
||||
|
||||
end = strchr(ptr, ',');
|
||||
if (end)
|
||||
@@ -178,6 +179,10 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
|
||||
BITS_PER_BYTE);
|
||||
break;
|
||||
}
|
||||
/* Check the inputs */
|
||||
if (range_start >= bound || range_end >= bound)
|
||||
break;
|
||||
|
||||
for (i = range_start; i <= range_end; i++) {
|
||||
if (remove)
|
||||
clear_bit(i, fault->opcodes);
|
||||
|
@@ -539,6 +539,37 @@ static inline void hfi1_16B_set_qpn(struct opa_16b_mgmt *mgmt,
|
||||
mgmt->src_qpn = cpu_to_be32(src_qp & OPA_16B_MGMT_QPN_MASK);
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_get_rc_ohdr - get extended header
|
||||
* @opah - the opaheader
|
||||
*/
|
||||
static inline struct ib_other_headers *
|
||||
hfi1_get_rc_ohdr(struct hfi1_opa_header *opah)
|
||||
{
|
||||
struct ib_other_headers *ohdr;
|
||||
struct ib_header *hdr = NULL;
|
||||
struct hfi1_16b_header *hdr_16b = NULL;
|
||||
|
||||
/* Find out where the BTH is */
|
||||
if (opah->hdr_type == HFI1_PKT_TYPE_9B) {
|
||||
hdr = &opah->ibh;
|
||||
if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
|
||||
ohdr = &hdr->u.oth;
|
||||
else
|
||||
ohdr = &hdr->u.l.oth;
|
||||
} else {
|
||||
u8 l4;
|
||||
|
||||
hdr_16b = &opah->opah;
|
||||
l4 = hfi1_16B_get_l4(hdr_16b);
|
||||
if (l4 == OPA_16B_L4_IB_LOCAL)
|
||||
ohdr = &hdr_16b->u.oth;
|
||||
else
|
||||
ohdr = &hdr_16b->u.l.oth;
|
||||
}
|
||||
return ohdr;
|
||||
}
|
||||
|
||||
struct rvt_sge_state;
|
||||
|
||||
/*
|
||||
|
@@ -952,6 +952,22 @@ void sc_disable(struct send_context *sc)
|
||||
}
|
||||
}
|
||||
spin_unlock(&sc->release_lock);
|
||||
|
||||
write_seqlock(&sc->waitlock);
|
||||
while (!list_empty(&sc->piowait)) {
|
||||
struct iowait *wait;
|
||||
struct rvt_qp *qp;
|
||||
struct hfi1_qp_priv *priv;
|
||||
|
||||
wait = list_first_entry(&sc->piowait, struct iowait, list);
|
||||
qp = iowait_to_qp(wait);
|
||||
priv = qp->priv;
|
||||
list_del_init(&priv->s_iowait.list);
|
||||
priv->s_iowait.lock = NULL;
|
||||
hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
|
||||
}
|
||||
write_sequnlock(&sc->waitlock);
|
||||
|
||||
spin_unlock_irq(&sc->alloc_lock);
|
||||
}
|
||||
|
||||
@@ -1427,7 +1443,8 @@ void sc_stop(struct send_context *sc, int flag)
|
||||
* @cb: optional callback to call when the buffer is finished sending
|
||||
* @arg: argument for cb
|
||||
*
|
||||
* Return a pointer to a PIO buffer if successful, NULL if not enough room.
|
||||
* Return a pointer to a PIO buffer, NULL if not enough room, -ECOMM
|
||||
* when link is down.
|
||||
*/
|
||||
struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
|
||||
pio_release_cb cb, void *arg)
|
||||
@@ -1443,7 +1460,7 @@ struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
|
||||
spin_lock_irqsave(&sc->alloc_lock, flags);
|
||||
if (!(sc->flags & SCF_ENABLED)) {
|
||||
spin_unlock_irqrestore(&sc->alloc_lock, flags);
|
||||
goto done;
|
||||
return ERR_PTR(-ECOMM);
|
||||
}
|
||||
|
||||
retry:
|
||||
|
@@ -1432,7 +1432,7 @@ void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn)
|
||||
pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps,
|
||||
sc_to_vlt(ppd->dd, sc5), plen);
|
||||
pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL);
|
||||
if (!pbuf) {
|
||||
if (IS_ERR_OR_NULL(pbuf)) {
|
||||
/*
|
||||
* We have no room to send at the moment. Pass
|
||||
* responsibility for sending the ACK to the send engine
|
||||
@@ -1701,6 +1701,36 @@ static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_rc_verbs_aborted - handle abort status
|
||||
* @qp: the QP
|
||||
* @opah: the opa header
|
||||
*
|
||||
* This code modifies both ACK bit in BTH[2]
|
||||
* and the s_flags to go into send one mode.
|
||||
*
|
||||
* This serves to throttle the send engine to only
|
||||
* send a single packet in the likely case the
|
||||
* a link has gone down.
|
||||
*/
|
||||
void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah)
|
||||
{
|
||||
struct ib_other_headers *ohdr = hfi1_get_rc_ohdr(opah);
|
||||
u8 opcode = ib_bth_get_opcode(ohdr);
|
||||
u32 psn;
|
||||
|
||||
/* ignore responses */
|
||||
if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
|
||||
opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
|
||||
opcode == TID_OP(READ_RESP) ||
|
||||
opcode == TID_OP(WRITE_RESP))
|
||||
return;
|
||||
|
||||
psn = ib_bth_get_psn(ohdr) | IB_BTH_REQ_ACK;
|
||||
ohdr->bth[2] = cpu_to_be32(psn);
|
||||
qp->s_flags |= RVT_S_SEND_ONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* This should be called with the QP s_lock held and interrupts disabled.
|
||||
*/
|
||||
@@ -1709,8 +1739,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
|
||||
struct ib_other_headers *ohdr;
|
||||
struct hfi1_qp_priv *priv = qp->priv;
|
||||
struct rvt_swqe *wqe;
|
||||
struct ib_header *hdr = NULL;
|
||||
struct hfi1_16b_header *hdr_16b = NULL;
|
||||
u32 opcode, head, tail;
|
||||
u32 psn;
|
||||
struct tid_rdma_request *req;
|
||||
@@ -1719,24 +1747,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
|
||||
if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
|
||||
return;
|
||||
|
||||
/* Find out where the BTH is */
|
||||
if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
|
||||
hdr = &opah->ibh;
|
||||
if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
|
||||
ohdr = &hdr->u.oth;
|
||||
else
|
||||
ohdr = &hdr->u.l.oth;
|
||||
} else {
|
||||
u8 l4;
|
||||
|
||||
hdr_16b = &opah->opah;
|
||||
l4 = hfi1_16B_get_l4(hdr_16b);
|
||||
if (l4 == OPA_16B_L4_IB_LOCAL)
|
||||
ohdr = &hdr_16b->u.oth;
|
||||
else
|
||||
ohdr = &hdr_16b->u.l.oth;
|
||||
}
|
||||
|
||||
ohdr = hfi1_get_rc_ohdr(opah);
|
||||
opcode = ib_bth_get_opcode(ohdr);
|
||||
if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
|
||||
opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
|
||||
|
@@ -405,19 +405,33 @@ static void sdma_flush(struct sdma_engine *sde)
|
||||
struct sdma_txreq *txp, *txp_next;
|
||||
LIST_HEAD(flushlist);
|
||||
unsigned long flags;
|
||||
uint seq;
|
||||
|
||||
/* flush from head to tail */
|
||||
sdma_flush_descq(sde);
|
||||
spin_lock_irqsave(&sde->flushlist_lock, flags);
|
||||
/* copy flush list */
|
||||
list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
|
||||
list_del_init(&txp->list);
|
||||
list_add_tail(&txp->list, &flushlist);
|
||||
}
|
||||
list_splice_init(&sde->flushlist, &flushlist);
|
||||
spin_unlock_irqrestore(&sde->flushlist_lock, flags);
|
||||
/* flush from flush list */
|
||||
list_for_each_entry_safe(txp, txp_next, &flushlist, list)
|
||||
complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
|
||||
/* wakeup QPs orphaned on the dmawait list */
|
||||
do {
|
||||
struct iowait *w, *nw;
|
||||
|
||||
seq = read_seqbegin(&sde->waitlock);
|
||||
if (!list_empty(&sde->dmawait)) {
|
||||
write_seqlock(&sde->waitlock);
|
||||
list_for_each_entry_safe(w, nw, &sde->dmawait, list) {
|
||||
if (w->wakeup) {
|
||||
w->wakeup(w, SDMA_AVAIL_REASON);
|
||||
list_del_init(&w->list);
|
||||
}
|
||||
}
|
||||
write_sequnlock(&sde->waitlock);
|
||||
}
|
||||
} while (read_seqretry(&sde->waitlock, seq));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2413,7 +2427,7 @@ unlock_noconn:
|
||||
list_add_tail(&tx->list, &sde->flushlist);
|
||||
spin_unlock(&sde->flushlist_lock);
|
||||
iowait_inc_wait_count(wait, tx->num_desc);
|
||||
schedule_work(&sde->flush_worker);
|
||||
queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
|
||||
ret = -ECOMM;
|
||||
goto unlock;
|
||||
nodesc:
|
||||
@@ -2511,7 +2525,7 @@ unlock_noconn:
|
||||
iowait_inc_wait_count(wait, tx->num_desc);
|
||||
}
|
||||
spin_unlock(&sde->flushlist_lock);
|
||||
schedule_work(&sde->flush_worker);
|
||||
queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
|
||||
ret = -ECOMM;
|
||||
goto update_tail;
|
||||
nodesc:
|
||||
|
@@ -312,9 +312,7 @@ static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi,
|
||||
if (qp->ibqp.qp_num == 0)
|
||||
ctxt = 0;
|
||||
else
|
||||
ctxt = ((qp->ibqp.qp_num >> dd->qos_shift) %
|
||||
(dd->n_krcv_queues - 1)) + 1;
|
||||
|
||||
ctxt = hfi1_get_qp_map(dd, qp->ibqp.qp_num >> dd->qos_shift);
|
||||
return dd->rcd[ctxt];
|
||||
}
|
||||
|
||||
|
@@ -683,7 +683,7 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
|
||||
pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
|
||||
if (ctxt) {
|
||||
pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
|
||||
if (pbuf) {
|
||||
if (!IS_ERR_OR_NULL(pbuf)) {
|
||||
trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
|
||||
ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
|
||||
&hdr, hwords);
|
||||
@@ -738,7 +738,7 @@ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
|
||||
pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
|
||||
if (ctxt) {
|
||||
pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
|
||||
if (pbuf) {
|
||||
if (!IS_ERR_OR_NULL(pbuf)) {
|
||||
trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
|
||||
ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
|
||||
&hdr, hwords);
|
||||
|
@@ -130,20 +130,16 @@ static int defer_packet_queue(
|
||||
{
|
||||
struct hfi1_user_sdma_pkt_q *pq =
|
||||
container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
|
||||
struct user_sdma_txreq *tx =
|
||||
container_of(txreq, struct user_sdma_txreq, txreq);
|
||||
|
||||
if (sdma_progress(sde, seq, txreq)) {
|
||||
if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
|
||||
goto eagain;
|
||||
}
|
||||
write_seqlock(&sde->waitlock);
|
||||
if (sdma_progress(sde, seq, txreq))
|
||||
goto eagain;
|
||||
/*
|
||||
* We are assuming that if the list is enqueued somewhere, it
|
||||
* is to the dmawait list since that is the only place where
|
||||
* it is supposed to be enqueued.
|
||||
*/
|
||||
xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
|
||||
write_seqlock(&sde->waitlock);
|
||||
if (list_empty(&pq->busy.list)) {
|
||||
iowait_get_priority(&pq->busy);
|
||||
iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
|
||||
@@ -151,6 +147,7 @@ static int defer_packet_queue(
|
||||
write_sequnlock(&sde->waitlock);
|
||||
return -EBUSY;
|
||||
eagain:
|
||||
write_sequnlock(&sde->waitlock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
@@ -804,7 +801,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
|
||||
|
||||
tx->flags = 0;
|
||||
tx->req = req;
|
||||
tx->busycount = 0;
|
||||
INIT_LIST_HEAD(&tx->list);
|
||||
|
||||
/*
|
||||
|
@@ -245,7 +245,6 @@ struct user_sdma_txreq {
|
||||
struct list_head list;
|
||||
struct user_sdma_request *req;
|
||||
u16 flags;
|
||||
unsigned int busycount;
|
||||
u16 seqnum;
|
||||
};
|
||||
|
||||
|
@@ -638,6 +638,8 @@ static void verbs_sdma_complete(
|
||||
struct hfi1_opa_header *hdr;
|
||||
|
||||
hdr = &tx->phdr.hdr;
|
||||
if (unlikely(status == SDMA_TXREQ_S_ABORTED))
|
||||
hfi1_rc_verbs_aborted(qp, hdr);
|
||||
hfi1_rc_send_complete(qp, hdr);
|
||||
}
|
||||
spin_unlock(&qp->s_lock);
|
||||
@@ -1037,10 +1039,10 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
|
||||
if (cb)
|
||||
iowait_pio_inc(&priv->s_iowait);
|
||||
pbuf = sc_buffer_alloc(sc, plen, cb, qp);
|
||||
if (unlikely(!pbuf)) {
|
||||
if (unlikely(IS_ERR_OR_NULL(pbuf))) {
|
||||
if (cb)
|
||||
verbs_pio_complete(qp, 0);
|
||||
if (ppd->host_link_state != HLS_UP_ACTIVE) {
|
||||
if (IS_ERR(pbuf)) {
|
||||
/*
|
||||
* If we have filled the PIO buffers to capacity and are
|
||||
* not in an active state this request is not going to
|
||||
@@ -1095,15 +1097,15 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
|
||||
&ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
|
||||
|
||||
pio_bail:
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
if (qp->s_wqe) {
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
rvt_send_complete(qp, qp->s_wqe, wc_status);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
} else if (qp->ibqp.qp_type == IB_QPT_RC) {
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
if (unlikely(wc_status == IB_WC_GENERAL_ERR))
|
||||
hfi1_rc_verbs_aborted(qp, &ps->s_txreq->phdr.hdr);
|
||||
hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
|
||||
ret = 0;
|
||||
|
||||
|
@@ -416,6 +416,7 @@ void hfi1_rc_hdrerr(
|
||||
|
||||
u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
|
||||
|
||||
void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah);
|
||||
void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah);
|
||||
|
||||
void hfi1_ud_rcv(struct hfi1_packet *packet);
|
||||
|
@@ -100,7 +100,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
|
||||
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
|
||||
struct hfi1_qp_priv *priv;
|
||||
|
||||
tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
|
||||
tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
|
||||
if (tx)
|
||||
goto out;
|
||||
priv = qp->priv;
|
||||
|
@@ -72,6 +72,7 @@ struct hfi1_ibdev;
|
||||
struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
|
||||
struct rvt_qp *qp);
|
||||
|
||||
#define VERBS_TXREQ_GFP (GFP_ATOMIC | __GFP_NOWARN)
|
||||
static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
|
||||
struct rvt_qp *qp)
|
||||
__must_hold(&qp->slock)
|
||||
@@ -79,7 +80,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
|
||||
struct verbs_txreq *tx;
|
||||
struct hfi1_qp_priv *priv = qp->priv;
|
||||
|
||||
tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
|
||||
tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
|
||||
if (unlikely(!tx)) {
|
||||
/* call slow path to get the lock */
|
||||
tx = __get_txreq(dev, qp);
|
||||
|
Reference in New Issue
Block a user