Merge branch 'mlx5-next' into rdma.git for-next
From git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux Required for dependencies on the next series * branch 'mlx5-next': net/mlx5: E-Switch, add a new prio to be used by the RDMA side net/mlx5: E-Switch, don't use hardcoded values for FDB prios net/mlx5: Fix false compilation warning net/mlx5: Expose MPEIN (Management PCIE INfo) register layout net/mlx5: Add rate limit print macros net/mlx5: Add explicit bar address field net/mlx5: Replace dev_err/warn/info by mlx5_core_err/warn/info net/mlx5: Use dev->priv.name instead of dev_name net/mlx5: Make mlx5_core messages independent from mdev->pdev net/mlx5: Break load_one into three stages net/mlx5: Function setup/teardown procedures net/mlx5: Move health and page alloc init to mdev_init net/mlx5: Split mdev init and pci init net/mlx5: Remove redundant init functions parameter net/mlx5: Remove spinlock support from mlx5_write64 net/mlx5: Remove unused MLX5_*_DOORBELL_LOCK macros Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
@@ -148,7 +148,7 @@ int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
*addr = pci_resource_start(dev->pdev, 0) +
|
||||
*addr = dev->bar_addr +
|
||||
MLX5_GET64(alloc_memic_out, out, memic_start_addr);
|
||||
|
||||
return 0;
|
||||
@@ -167,7 +167,7 @@ int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length)
|
||||
u64 start_page_idx;
|
||||
int err;
|
||||
|
||||
addr -= pci_resource_start(dev->pdev, 0);
|
||||
addr -= dev->bar_addr;
|
||||
start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
|
||||
|
||||
MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
|
||||
|
@@ -2009,7 +2009,7 @@ static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
|
||||
|
||||
fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
|
||||
|
||||
return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
|
||||
return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
|
||||
}
|
||||
|
||||
static int get_command(unsigned long offset)
|
||||
@@ -2199,7 +2199,7 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
page_idx + npages)
|
||||
return -EINVAL;
|
||||
|
||||
pfn = ((pci_resource_start(dev->mdev->pdev, 0) +
|
||||
pfn = ((dev->mdev->bar_addr +
|
||||
MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
|
||||
PAGE_SHIFT) +
|
||||
page_idx;
|
||||
@@ -2283,7 +2283,7 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
|
||||
goto err_free;
|
||||
|
||||
start_offset = memic_addr & ~PAGE_MASK;
|
||||
page_idx = (memic_addr - pci_resource_start(memic->dev->pdev, 0) -
|
||||
page_idx = (memic_addr - memic->dev->bar_addr -
|
||||
MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
|
||||
PAGE_SHIFT;
|
||||
|
||||
@@ -2326,7 +2326,7 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
page_idx = (dm->dev_addr - pci_resource_start(memic->dev->pdev, 0) -
|
||||
page_idx = (dm->dev_addr - memic->dev->bar_addr -
|
||||
MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
|
||||
PAGE_SHIFT;
|
||||
bitmap_clear(rdma_udata_to_drv_context(
|
||||
|
@@ -1194,8 +1194,7 @@ static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
|
||||
MLX5_SET64(mkc, mkc, len, length);
|
||||
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
||||
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
||||
MLX5_SET64(mkc, mkc, start_addr,
|
||||
memic_addr - pci_resource_start(dev->mdev->pdev, 0));
|
||||
MLX5_SET64(mkc, mkc, start_addr, memic_addr - dev->mdev->bar_addr);
|
||||
|
||||
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
|
||||
if (err)
|
||||
|
@@ -5126,7 +5126,7 @@ out:
|
||||
wmb();
|
||||
|
||||
/* currently we support only regular doorbells */
|
||||
mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset, NULL);
|
||||
mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
|
||||
/* Make sure doorbells don't leak out of SQ spinlock
|
||||
* and reach the HCA out of order.
|
||||
*/
|
||||
|
Reference in New Issue
Block a user