|
@@ -330,6 +330,22 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
|
|
|
dump_cqe(dev, cqe);
|
|
|
}
|
|
|
|
|
|
+static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
|
|
|
+ u16 tail, u16 head)
|
|
|
+{
|
|
|
+ u16 idx;
|
|
|
+
|
|
|
+ do {
|
|
|
+ idx = tail & (qp->sq.wqe_cnt - 1);
|
|
|
+ if (idx == head)
|
|
|
+ break;
|
|
|
+
|
|
|
+ tail = qp->sq.w_list[idx].next;
|
|
|
+ } while (1);
|
|
|
+ tail = qp->sq.w_list[idx].next;
|
|
|
+ qp->sq.last_poll = tail;
|
|
|
+}
|
|
|
+
|
|
|
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
|
|
|
{
|
|
|
mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
|
|
@@ -368,7 +384,7 @@ static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
|
|
|
}
|
|
|
|
|
|
static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
|
|
|
- int *npolled, int is_send)
|
|
|
+ int *npolled, bool is_send)
|
|
|
{
|
|
|
struct mlx5_ib_wq *wq;
|
|
|
unsigned int cur;
|
|
@@ -383,10 +399,16 @@ static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
|
|
|
return;
|
|
|
|
|
|
for (i = 0; i < cur && np < num_entries; i++) {
|
|
|
- wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
|
|
|
+ unsigned int idx;
|
|
|
+
|
|
|
+ idx = (is_send) ? wq->last_poll : wq->tail;
|
|
|
+ idx &= (wq->wqe_cnt - 1);
|
|
|
+ wc->wr_id = wq->wrid[idx];
|
|
|
wc->status = IB_WC_WR_FLUSH_ERR;
|
|
|
wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
|
|
|
wq->tail++;
|
|
|
+ if (is_send)
|
|
|
+ wq->last_poll = wq->w_list[idx].next;
|
|
|
np++;
|
|
|
wc->qp = &qp->ibqp;
|
|
|
wc++;
|
|
@@ -473,6 +495,7 @@ repoll:
|
|
|
wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
|
|
|
idx = wqe_ctr & (wq->wqe_cnt - 1);
|
|
|
handle_good_req(wc, cqe64, wq, idx);
|
|
|
+ handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
|
|
|
wc->wr_id = wq->wrid[idx];
|
|
|
wq->tail = wq->wqe_head[idx] + 1;
|
|
|
wc->status = IB_WC_SUCCESS;
|