Merge branch 'mlx5-next' into rdma.git for-next
From git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux To resolve conflicts with net-next and pick up the first patch. * branch 'mlx5-next': net/mlx5: Factor out HCA capabilities functions IB/mlx5: Add support for 50Gbps per lane link modes net/mlx5: Add support to ext_* fields introduced in Port Type and Speed register net/mlx5: Add new fields to Port Type and Speed register net/mlx5: Refactor queries to speed fields in Port Type and Speed register net/mlx5: E-Switch, Avoid magic numbers when initializing offloads mode net/mlx5: Relocate vport macros to the vport header file net/mlx5: E-Switch, Normalize the name of uplink vport number net/mlx5: Provide an alternative VF upper bound for ECPF net/mlx5: Add host params change event net/mlx5: Add query host params command net/mlx5: Update enable HCA dependency net/mlx5: Introduce Mellanox SmartNIC and modify page management logic IB/mlx5: Use unified register/load function for uplink and VF vports net/mlx5: Use consistent vport num argument type net/mlx5: Use void pointer as the type in address_of macro net/mlx5: Align ODP capability function with netdev coding style mlx5: use RCU lock in mlx5_eq_cq_get() Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Este cometimento está contido em:
@@ -3,10 +3,11 @@
|
||||
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/mlx5/vport.h>
|
||||
#include "ib_rep.h"
|
||||
#include "srq.h"
|
||||
|
||||
static const struct mlx5_ib_profile rep_profile = {
|
||||
static const struct mlx5_ib_profile vf_rep_profile = {
|
||||
STAGE_CREATE(MLX5_IB_STAGE_INIT,
|
||||
mlx5_ib_stage_init_init,
|
||||
mlx5_ib_stage_init_cleanup),
|
||||
@@ -45,31 +46,17 @@ static const struct mlx5_ib_profile rep_profile = {
|
||||
NULL),
|
||||
};
|
||||
|
||||
static int
|
||||
mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
||||
{
|
||||
struct mlx5_ib_dev *ibdev;
|
||||
|
||||
ibdev = mlx5_ib_rep_to_dev(rep);
|
||||
if (!__mlx5_ib_add(ibdev, ibdev->profile))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep)
|
||||
{
|
||||
struct mlx5_ib_dev *ibdev;
|
||||
|
||||
ibdev = mlx5_ib_rep_to_dev(rep);
|
||||
__mlx5_ib_remove(ibdev, ibdev->profile, MLX5_IB_STAGE_MAX);
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
||||
{
|
||||
const struct mlx5_ib_profile *profile;
|
||||
struct mlx5_ib_dev *ibdev;
|
||||
|
||||
if (rep->vport == MLX5_VPORT_UPLINK)
|
||||
profile = &uplink_rep_profile;
|
||||
else
|
||||
profile = &vf_rep_profile;
|
||||
|
||||
ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
|
||||
if (!ibdev)
|
||||
return -ENOMEM;
|
||||
@@ -78,7 +65,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
||||
ibdev->mdev = dev;
|
||||
ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
|
||||
MLX5_CAP_GEN(dev, num_vhca_ports));
|
||||
if (!__mlx5_ib_add(ibdev, &rep_profile)) {
|
||||
if (!__mlx5_ib_add(ibdev, profile)) {
|
||||
ib_dealloc_device(&ibdev->ib_dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -107,15 +94,14 @@ static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
|
||||
return mlx5_ib_rep_to_dev(rep);
|
||||
}
|
||||
|
||||
static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev)
|
||||
void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
|
||||
int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
|
||||
struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
||||
int total_vports = MLX5_TOTAL_VPORTS(mdev);
|
||||
struct mlx5_eswitch_rep_if rep_if = {};
|
||||
int vport;
|
||||
|
||||
for (vport = 1; vport < total_vfs; vport++) {
|
||||
struct mlx5_eswitch_rep_if rep_if = {};
|
||||
|
||||
for (vport = 0; vport < total_vports; vport++) {
|
||||
rep_if.load = mlx5_ib_vport_rep_load;
|
||||
rep_if.unload = mlx5_ib_vport_rep_unload;
|
||||
rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
|
||||
@@ -123,39 +109,16 @@ static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5_ib_rep_unregister_vf_vports(struct mlx5_ib_dev *dev)
|
||||
void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
|
||||
int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
|
||||
struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
||||
int total_vports = MLX5_TOTAL_VPORTS(mdev);
|
||||
int vport;
|
||||
|
||||
for (vport = 1; vport < total_vfs; vport++)
|
||||
for (vport = total_vports - 1; vport >= 0; vport--)
|
||||
mlx5_eswitch_unregister_vport_rep(esw, vport, REP_IB);
|
||||
}
|
||||
|
||||
void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
|
||||
struct mlx5_eswitch_rep_if rep_if = {};
|
||||
|
||||
rep_if.load = mlx5_ib_nic_rep_load;
|
||||
rep_if.unload = mlx5_ib_nic_rep_unload;
|
||||
rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
|
||||
rep_if.priv = dev;
|
||||
|
||||
mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_IB);
|
||||
|
||||
mlx5_ib_rep_register_vf_vports(dev);
|
||||
}
|
||||
|
||||
void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
|
||||
|
||||
mlx5_ib_rep_unregister_vf_vports(dev); /* VFs vports */
|
||||
mlx5_eswitch_unregister_vport_rep(esw, 0, REP_IB); /* UPLINK PF*/
|
||||
}
|
||||
|
||||
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
|
||||
{
|
||||
return mlx5_eswitch_mode(esw);
|
||||
|
Criar uma nova questão referindo esta
Bloquear um utilizador