net/mlx5: Refactor fragmented buffer struct fields and init flow

Take struct mlx5_frag_buf out of mlx5_frag_buf_ctrl, as it is not
needed to manage and control the datapath of the fragmented buffers API.

struct mlx5_frag_buf contains control info to manage the allocation
and de-allocation of the fragmented buffer.
Its fields are not relevant for datapath, so here I take them out of the
struct mlx5_frag_buf_ctrl, except for the fragments array itself.

In addition, modified mlx5_fill_fbc to initialise the frags pointers
as well. This implies that the buffer must be allocated before the
function is called.

A set of type-specific *_get_byte_size() functions are replaced by
a generic one.

Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
Tariq Toukan
2018-09-12 15:36:41 +03:00
committed by Saeed Mahameed
parent 3a3295bfa6
commit 4972e6fa3a
4 changed files with 69 additions and 105 deletions

View File

@@ -54,54 +54,37 @@ u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
return (u32)wq->fbc.sz_m1 + 1;
}
static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
{
return mlx5_wq_cyc_get_size(wq) << wq->fbc.log_stride;
}
static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
{
return mlx5_wq_cyc_get_byte_size(&wq->rq) +
mlx5_wq_cyc_get_byte_size(&wq->sq);
}
static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
{
return mlx5_cqwq_get_size(wq) << wq->fbc.log_stride;
}
static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
{
return mlx5_wq_ll_get_size(wq) << wq->fbc.log_stride;
return ((u32)1 << log_sz) << log_stride;
}
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
int err;
mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
MLX5_GET(wq, wqc, log_wq_sz),
fbc);
wq->sz = wq->fbc.sz_m1 + 1;
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
wq->db = wq_ctrl->db.db;
err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
fbc->frag_buf = wq_ctrl->buf;
wq->db = wq_ctrl->db.db;
mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
wq->sz = mlx5_wq_cyc_get_size(wq);
wq_ctrl->mdev = mdev;
@@ -113,46 +96,19 @@ err_db_free:
return err;
}
static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
struct mlx5_wq_qp *qp)
{
struct mlx5_frag_buf_ctrl *sq_fbc;
struct mlx5_frag_buf *rqb, *sqb;
rqb = &qp->rq.fbc.frag_buf;
*rqb = *buf;
rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
sq_fbc = &qp->sq.fbc;
sqb = &sq_fbc->frag_buf;
*sqb = *buf;
sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq);
sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
sqb->frags += rqb->npages; /* first part is for the rq */
if (sq_fbc->strides_offset)
sqb->frags--;
}
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
u16 sq_strides_offset;
u32 rq_pg_remainder;
u8 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4;
u8 log_rq_sz = MLX5_GET(qpc, qpc, log_rq_size);
u8 log_sq_stride = ilog2(MLX5_SEND_WQE_BB);
u8 log_sq_sz = MLX5_GET(qpc, qpc, log_sq_size);
u32 rq_byte_size;
int err;
mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
MLX5_GET(qpc, qpc, log_rq_size),
&wq->rq.fbc);
rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
MLX5_GET(qpc, qpc, log_sq_size),
sq_strides_offset,
&wq->sq.fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
@@ -160,14 +116,32 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err;
}
err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
err = mlx5_frag_buf_alloc_node(mdev,
wq_get_byte_sz(log_rq_sz, log_rq_stride) +
wq_get_byte_sz(log_sq_sz, log_sq_stride),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
mlx5_init_fbc(wq_ctrl->buf.frags, log_rq_stride, log_rq_sz, &wq->rq.fbc);
rq_byte_size = wq_get_byte_sz(log_rq_sz, log_rq_stride);
if (rq_byte_size < PAGE_SIZE) {
/* SQ starts within the same page of the RQ */
u16 sq_strides_offset = rq_byte_size / MLX5_SEND_WQE_BB;
mlx5_init_fbc_offset(wq_ctrl->buf.frags,
log_sq_stride, log_sq_sz, sq_strides_offset,
&wq->sq.fbc);
} else {
u16 rq_npages = rq_byte_size >> PAGE_SHIFT;
mlx5_init_fbc(wq_ctrl->buf.frags + rq_npages,
log_sq_stride, log_sq_sz, &wq->sq.fbc);
}
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
@@ -186,17 +160,19 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) + 6;
u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size);
int err;
mlx5_core_init_cq_frag_buf(&wq->fbc, cqc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
wq->db = wq_ctrl->db.db;
err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
&wq_ctrl->buf,
param->buf_numa_node);
if (err) {
@@ -205,8 +181,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
wq->fbc.frag_buf = wq_ctrl->buf;
wq->db = wq_ctrl->db.db;
mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, &wq->fbc);
wq_ctrl->mdev = mdev;
@@ -222,30 +197,29 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
struct mlx5_wqe_srq_next_seg *next_seg;
int err;
int i;
mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
MLX5_GET(wq, wqc, log_wq_sz),
fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
wq->db = wq_ctrl->db.db;
err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
wq->fbc.frag_buf = wq_ctrl->buf;
wq->db = wq_ctrl->db.db;
mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
for (i = 0; i < fbc->sz_m1; i++) {
next_seg = mlx5_wq_ll_get_wqe(wq, i);