mlx5: Move pci device handling from mlx5_ib to mlx5_core
In preparation for a new mlx5 device which is VPI (i.e., ports can be either IB or ETH), move the pci device functionality from mlx5_ib to mlx5_core. This involves the following changes: 1. Move mlx5_core_dev struct out of mlx5_ib_dev. mlx5_core_dev is now an independent structure maintained by mlx5_core. mlx5_ib_dev now has a pointer to that struct. This requires changing a lot of places where the core_dev struct was accessed via mlx5_ib_dev (now, this needs to be a pointer dereference). 2. All PCI initializations are now done in mlx5_core. Thus, it is now mlx5_core which does pci_register_device (and not mlx5_ib, as was previously). 3. mlx5_ib now registers itself with mlx5_core as an "interface" driver. This is very similar to the mechanism employed for the mlx4 (ConnectX) driver. Once the HCA is initialized (by mlx5_core), it invokes the interface drivers to do their initializations. 4. There is a new event handler which the core registers: mlx5_core_event(). This event handler invokes the event handlers registered by the interfaces. Based on a patch by Eli Cohen <eli@mellanox.com> Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: Eli Cohen <eli@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
4ada97abe9
commit
9603b61de1
@@ -180,7 +180,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
|
||||
struct mlx5_core_srq *msrq = NULL;
|
||||
|
||||
if (qp->ibqp.xrcd) {
|
||||
msrq = mlx5_core_get_srq(&dev->mdev,
|
||||
msrq = mlx5_core_get_srq(dev->mdev,
|
||||
be32_to_cpu(cqe->srqn));
|
||||
srq = to_mibsrq(msrq);
|
||||
} else {
|
||||
@@ -364,7 +364,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
|
||||
|
||||
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
|
||||
{
|
||||
mlx5_buf_free(&dev->mdev, &buf->buf);
|
||||
mlx5_buf_free(dev->mdev, &buf->buf);
|
||||
}
|
||||
|
||||
static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
|
||||
@@ -450,7 +450,7 @@ repoll:
|
||||
* because CQs will be locked while QPs are removed
|
||||
* from the table.
|
||||
*/
|
||||
mqp = __mlx5_qp_lookup(&dev->mdev, qpn);
|
||||
mqp = __mlx5_qp_lookup(dev->mdev, qpn);
|
||||
if (unlikely(!mqp)) {
|
||||
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
|
||||
cq->mcq.cqn, qpn);
|
||||
@@ -514,11 +514,11 @@ repoll:
|
||||
case MLX5_CQE_SIG_ERR:
|
||||
sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
|
||||
|
||||
read_lock(&dev->mdev.priv.mr_table.lock);
|
||||
mmr = __mlx5_mr_lookup(&dev->mdev,
|
||||
read_lock(&dev->mdev->priv.mr_table.lock);
|
||||
mmr = __mlx5_mr_lookup(dev->mdev,
|
||||
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
|
||||
if (unlikely(!mmr)) {
|
||||
read_unlock(&dev->mdev.priv.mr_table.lock);
|
||||
read_unlock(&dev->mdev->priv.mr_table.lock);
|
||||
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
|
||||
cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
|
||||
return -EINVAL;
|
||||
@@ -536,7 +536,7 @@ repoll:
|
||||
mr->sig->err_item.expected,
|
||||
mr->sig->err_item.actual);
|
||||
|
||||
read_unlock(&dev->mdev.priv.mr_table.lock);
|
||||
read_unlock(&dev->mdev->priv.mr_table.lock);
|
||||
goto repoll;
|
||||
}
|
||||
|
||||
@@ -575,8 +575,8 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
|
||||
mlx5_cq_arm(&to_mcq(ibcq)->mcq,
|
||||
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
|
||||
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
|
||||
to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map,
|
||||
MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock));
|
||||
to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map,
|
||||
MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -586,7 +586,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size,
|
||||
err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
|
||||
PAGE_SIZE * 2, &buf->buf);
|
||||
if (err)
|
||||
return err;
|
||||
@@ -691,7 +691,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mlx5_db_alloc(&dev->mdev, &cq->db);
|
||||
err = mlx5_db_alloc(dev->mdev, &cq->db);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -716,7 +716,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
|
||||
mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
|
||||
|
||||
(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
|
||||
*index = dev->mdev.priv.uuari.uars[0].index;
|
||||
*index = dev->mdev->priv.uuari.uars[0].index;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -724,14 +724,14 @@ err_buf:
|
||||
free_cq_buf(dev, &cq->buf);
|
||||
|
||||
err_db:
|
||||
mlx5_db_free(&dev->mdev, &cq->db);
|
||||
mlx5_db_free(dev->mdev, &cq->db);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
|
||||
{
|
||||
free_cq_buf(dev, &cq->buf);
|
||||
mlx5_db_free(&dev->mdev, &cq->db);
|
||||
mlx5_db_free(dev->mdev, &cq->db);
|
||||
}
|
||||
|
||||
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
|
||||
@@ -752,7 +752,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
entries = roundup_pow_of_two(entries + 1);
|
||||
if (entries > dev->mdev.caps.max_cqes)
|
||||
if (entries > dev->mdev->caps.max_cqes)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
|
||||
@@ -789,7 +789,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
|
||||
cqb->ctx.c_eqn = cpu_to_be16(eqn);
|
||||
cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
|
||||
|
||||
err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen);
|
||||
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
|
||||
if (err)
|
||||
goto err_cqb;
|
||||
|
||||
@@ -809,7 +809,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
|
||||
return &cq->ibcq;
|
||||
|
||||
err_cmd:
|
||||
mlx5_core_destroy_cq(&dev->mdev, &cq->mcq);
|
||||
mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
|
||||
|
||||
err_cqb:
|
||||
mlx5_vfree(cqb);
|
||||
@@ -834,7 +834,7 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq)
|
||||
if (cq->uobject)
|
||||
context = cq->uobject->context;
|
||||
|
||||
mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq);
|
||||
mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
|
||||
if (context)
|
||||
destroy_cq_user(mcq, context);
|
||||
else
|
||||
@@ -919,7 +919,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
|
||||
int err;
|
||||
u32 fsel;
|
||||
|
||||
if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
|
||||
if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
|
||||
return -ENOSYS;
|
||||
|
||||
in = kzalloc(sizeof(*in), GFP_KERNEL);
|
||||
@@ -931,7 +931,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
|
||||
in->ctx.cq_period = cpu_to_be16(cq_period);
|
||||
in->ctx.cq_max_count = cpu_to_be16(cq_count);
|
||||
in->field_select = cpu_to_be32(fsel);
|
||||
err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in));
|
||||
err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
|
||||
kfree(in);
|
||||
|
||||
if (err)
|
||||
@@ -1074,7 +1074,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
|
||||
int uninitialized_var(cqe_size);
|
||||
unsigned long flags;
|
||||
|
||||
if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
|
||||
if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
|
||||
pr_info("Firmware does not support resize CQ\n");
|
||||
return -ENOSYS;
|
||||
}
|
||||
@@ -1083,7 +1083,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
|
||||
return -EINVAL;
|
||||
|
||||
entries = roundup_pow_of_two(entries + 1);
|
||||
if (entries > dev->mdev.caps.max_cqes + 1)
|
||||
if (entries > dev->mdev->caps.max_cqes + 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (entries == ibcq->cqe + 1)
|
||||
@@ -1128,7 +1128,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
|
||||
in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
|
||||
in->cqn = cpu_to_be32(cq->mcq.cqn);
|
||||
|
||||
err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen);
|
||||
err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
|
||||
if (err)
|
||||
goto ex_alloc;
|
||||
|
||||
|
Reference in New Issue
Block a user