Merge tag 'mlx5-updates-2019-07-04-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-update-2019-07-04

This series adds mlx5 support for devlink fw versions query.

1) Implement the required low level firmware commands
2) Implement the devlink knobs and callbacks for fw versions query.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2019-07-05 16:24:27 -07:00
39 changed files with 1067 additions and 381 deletions

View File

@@ -58,7 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data)
list_for_each_entry_safe(mcq, temp, &ctx->process_list,
tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list);
mcq->tasklet_ctx.comp(mcq);
mcq->tasklet_ctx.comp(mcq, NULL);
mlx5_cq_put(mcq);
if (time_after(jiffies, end))
break;
@@ -68,7 +68,8 @@ void mlx5_cq_tasklet_cb(unsigned long data)
tasklet_schedule(&ctx->task);
}
static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
struct mlx5_eqe *eqe)
{
unsigned long flags;
struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
@@ -87,11 +88,10 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
}
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen)
u32 *in, int inlen, u32 *out, int outlen)
{
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn);
u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
struct mlx5_eq_comp *eq;
int err;
@@ -100,9 +100,9 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
if (IS_ERR(eq))
return PTR_ERR(eq);
memset(out, 0, sizeof(out));
memset(out, 0, outlen);
MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
if (err)
return err;
@@ -158,13 +158,8 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
int err;
err = mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
if (err)
return err;
err = mlx5_eq_del_cq(&cq->eq->core, cq);
if (err)
return err;
mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
mlx5_eq_del_cq(&cq->eq->core, cq);
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);

View File

@@ -311,13 +311,20 @@ static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
u32 pci_id = mlx5_gen_pci_id(dev);
struct mlx5_core_dev *res = NULL;
struct mlx5_core_dev *tmp_dev;
struct mlx5_priv *priv;
u32 pci_id;
if (!mlx5_core_is_pf(dev))
return NULL;
pci_id = mlx5_gen_pci_id(dev);
list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
if (!mlx5_core_is_pf(tmp_dev))
continue;
if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
res = tmp_dev;
break;

View File

@@ -25,6 +25,65 @@ static int mlx5_devlink_flash_update(struct devlink *devlink,
return mlx5_firmware_flash(dev, fw, extack);
}
static u8 mlx5_fw_ver_major(u32 version)
{
return (version >> 24) & 0xff;
}
static u8 mlx5_fw_ver_minor(u32 version)
{
return (version >> 16) & 0xff;
}
static u16 mlx5_fw_ver_subminor(u32 version)
{
return version & 0xffff;
}
#define DEVLINK_FW_STRING_LEN 32
static int
mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
char version_str[DEVLINK_FW_STRING_LEN];
u32 running_fw, stored_fw;
int err;
err = devlink_info_driver_name_put(req, DRIVER_NAME);
if (err)
return err;
err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
if (err)
return err;
err = mlx5_fw_version_query(dev, &running_fw, &stored_fw);
if (err)
return err;
snprintf(version_str, sizeof(version_str), "%d.%d.%04d",
mlx5_fw_ver_major(running_fw), mlx5_fw_ver_minor(running_fw),
mlx5_fw_ver_subminor(running_fw));
err = devlink_info_version_running_put(req, "fw.version", version_str);
if (err)
return err;
/* no pending version, return running (stored) version */
if (stored_fw == 0)
stored_fw = running_fw;
snprintf(version_str, sizeof(version_str), "%d.%d.%04d",
mlx5_fw_ver_major(stored_fw), mlx5_fw_ver_minor(stored_fw),
mlx5_fw_ver_subminor(stored_fw));
err = devlink_info_version_stored_put(req, "fw.version", version_str);
if (err)
return err;
return 0;
}
static const struct devlink_ops mlx5_devlink_ops = {
#ifdef CONFIG_MLX5_ESWITCH
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
@@ -35,6 +94,7 @@ static const struct devlink_ops mlx5_devlink_ops = {
.eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
#endif
.flash_update = mlx5_devlink_flash_update,
.info_get = mlx5_devlink_info_get,
};
struct devlink *mlx5_devlink_alloc(void)

View File

@@ -880,7 +880,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);

View File

@@ -680,7 +680,7 @@ static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
memset(perm_addr, 0xff, MAX_ADDR_LEN);
mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr);
mlx5_query_mac_address(priv->mdev, perm_addr);
}
static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,

View File

@@ -1551,6 +1551,7 @@ static void mlx5e_free_cq(struct mlx5e_cq *cq)
static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
{
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
struct mlx5_core_dev *mdev = cq->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
@@ -1585,7 +1586,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
err = mlx5_core_create_cq(mdev, mcq, in, inlen);
err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
kvfree(in);
@@ -4790,7 +4791,7 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
mlx5_query_mac_address(priv->mdev, netdev->dev_addr);
if (is_zero_ether_addr(netdev->dev_addr) &&
!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
eth_hw_addr_random(netdev);
@@ -5356,7 +5357,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
#ifdef CONFIG_MLX5_ESWITCH
if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
mlx5e_rep_register_vport_reps(mdev);
return mdev;
}

View File

@@ -398,7 +398,7 @@ static int mlx5e_rep_get_port_parent_id(struct net_device *dev,
priv = netdev_priv(dev);
esw = priv->mdev->priv.eswitch;
if (esw->mode == SRIOV_NONE)
if (esw->mode == MLX5_ESWITCH_NONE)
return -EOPNOTSUPP;
parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
@@ -414,7 +414,7 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
struct mlx5e_rep_sq *rep_sq, *tmp;
struct mlx5e_rep_priv *rpriv;
if (esw->mode != SRIOV_OFFLOADS)
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
rpriv = mlx5e_rep_to_rep_priv(rep);
@@ -435,7 +435,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
int err;
int i;
if (esw->mode != SRIOV_OFFLOADS)
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
rpriv = mlx5e_rep_to_rep_priv(rep);
@@ -1392,7 +1392,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
/* we want a persistent mac for the uplink rep */
mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr);
mlx5_query_mac_address(mdev, netdev->dev_addr);
netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
if (MLX5_CAP_GEN(mdev, qos))

View File

@@ -3372,7 +3372,7 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv,
if (!tc_can_offload_extack(priv->netdev, f->common.extack))
return -EOPNOTSUPP;
if (esw && esw->mode == SRIOV_OFFLOADS)
if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
err = mlx5e_add_fdb_flow(priv, f, flow_flags,
filter_dev, flow);
else

View File

@@ -168,7 +168,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
return work_done;
}
void mlx5e_completion_event(struct mlx5_core_cq *mcq)
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);

View File

@@ -153,7 +153,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
cq = mlx5_eq_cq_get(eq, cqn);
if (likely(cq)) {
++cq->arm_sn;
cq->comp(cq);
cq->comp(cq, eqe);
mlx5_cq_put(cq);
} else {
mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
@@ -256,6 +256,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
int inlen;
u32 *in;
int err;
int i;
/* Init CQ table */
memset(cq_table, 0, sizeof(*cq_table));
@@ -283,10 +284,12 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
mlx5_fill_page_array(&eq->buf, pas);
MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
if (!param->mask && MLX5_CAP_GEN(dev, log_max_uctx))
if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
MLX5_SET64(create_eq_in, in, event_bitmask, param->mask);
for (i = 0; i < 4; i++)
MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
param->mask[i]);
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
@@ -389,7 +392,7 @@ int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
return err;
}
int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
{
struct mlx5_cq_table *table = &eq->cq_table;
struct mlx5_core_cq *tmp;
@@ -399,16 +402,14 @@ int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
spin_unlock(&table->lock);
if (!tmp) {
mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
return -ENOENT;
mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
eq->eqn, cq->cqn);
return;
}
if (tmp != cq) {
mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
return -EINVAL;
}
return 0;
if (tmp != cq)
mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
eq->eqn, cq->cqn);
}
int mlx5_eq_table_init(struct mlx5_core_dev *dev)
@@ -502,14 +503,31 @@ static int cq_err_event_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
cq->event(cq, type);
if (cq->event)
cq->event(cq, type);
mlx5_cq_put(cq);
return NOTIFY_OK;
}
static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
{
__be64 *user_unaffiliated_events;
__be64 *user_affiliated_events;
int i;
user_affiliated_events =
MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
user_unaffiliated_events =
MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
for (i = 0; i < 4; i++)
mask[i] |= be64_to_cpu(user_affiliated_events[i] |
user_unaffiliated_events[i]);
}
static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
{
u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
@@ -546,7 +564,10 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
async_event_mask |=
(1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
return async_event_mask;
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))
gather_user_async_events(dev, mask);
}
static int create_async_eqs(struct mlx5_core_dev *dev)
@@ -561,9 +582,10 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
.mask = 1ull << MLX5_EVENT_TYPE_CMD,
.nent = MLX5_NUM_CMD_EQE,
};
param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD;
err = create_async_eq(dev, &table->cmd_eq.core, &param);
if (err) {
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
@@ -579,9 +601,10 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
.mask = gather_async_events_mask(dev),
.nent = MLX5_NUM_ASYNC_EQE,
};
gather_async_events_mask(dev, param.mask);
err = create_async_eq(dev, &table->async_eq.core, &param);
if (err) {
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
@@ -597,9 +620,10 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
.mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
.nent = /* TODO: sriov max_vf + */ 1,
};
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST;
err = create_async_eq(dev, &table->pages_eq.core, &param);
if (err) {
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
@@ -791,7 +815,6 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
eq->irq_nb.notifier_call = mlx5_eq_comp_int;
param = (struct mlx5_eq_param) {
.irq_index = vecidx,
.mask = 0,
.nent = nent,
};
err = create_map_eq(dev, &eq->core, &param);
@@ -927,6 +950,7 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
}
EXPORT_SYMBOL(mlx5_eq_notifier_register);
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{
@@ -937,3 +961,4 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
}
EXPORT_SYMBOL(mlx5_eq_notifier_unregister);

View File

@@ -497,7 +497,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
fdb_add:
/* SRIOV is enabled: Forward UC MAC to vport */
if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY)
if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
@@ -897,7 +897,7 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
struct mlx5_eswitch *esw = dev->priv.eswitch;
u8 mac[ETH_ALEN];
mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
vport->vport, mac);
@@ -1553,6 +1553,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
u16 vport_num = vport->vport;
int flags;
if (esw->manager_vport == vport_num)
return;
@@ -1570,11 +1571,13 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
vport->info.node_guid);
}
flags = (vport->info.vlan || vport->info.qos) ?
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
(vport->info.vlan || vport->info.qos));
flags);
/* Only legacy mode needs ACLs */
if (esw->mode == SRIOV_LEGACY) {
if (esw->mode == MLX5_ESWITCH_LEGACY) {
esw_vport_ingress_config(esw, vport);
esw_vport_egress_config(esw, vport);
}
@@ -1626,7 +1629,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
/* Create steering drop counters for ingress and egress ACLs */
if (vport_num && esw->mode == SRIOV_LEGACY)
if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY)
esw_vport_create_drop_counters(vport);
/* Restore old vport configuration */
@@ -1680,7 +1683,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
vport->enabled_events = 0;
esw_vport_disable_qos(esw, vport);
if (esw->manager_vport != vport_num &&
esw->mode == SRIOV_LEGACY) {
esw->mode == MLX5_ESWITCH_LEGACY) {
mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num, 1,
@@ -1712,59 +1715,91 @@ static int eswitch_vport_event(struct notifier_block *nb,
return NOTIFY_OK;
}
int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen)
/**
* mlx5_esw_query_functions - Returns raw output about functions state
* @dev: Pointer to device to query
*
* mlx5_esw_query_functions() allocates and returns functions changed
* raw output memory pointer from device on success. Otherwise returns ERR_PTR.
* Caller must free the memory using kvfree() when valid pointer is returned.
*/
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
u32 *out;
int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return ERR_PTR(-ENOMEM);
MLX5_SET(query_esw_functions_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
if (!err)
return out;
kvfree(out);
return ERR_PTR(err);
}
static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
{
MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
mlx5_eq_notifier_register(esw->dev, &esw->nb);
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
ESW_FUNCTIONS_CHANGED);
mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
}
}
static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
{
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
flush_workqueue(esw->work_queue);
}
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
{
struct mlx5_vport *vport;
int total_nvports = 0;
int err;
int i, enabled_events;
if (!ESW_ALLOWED(esw) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
return -EOPNOTSUPP;
}
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
if (mode == SRIOV_OFFLOADS) {
if (mlx5_core_is_ecpf_esw_manager(esw->dev))
total_nvports = esw->total_vports;
else
total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev);
}
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
esw->mode = mode;
mlx5_lag_update(esw->dev);
if (mode == SRIOV_LEGACY) {
if (mode == MLX5_ESWITCH_LEGACY) {
err = esw_create_legacy_table(esw);
if (err)
goto abort;
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
err = esw_offloads_init(esw, nvfs, total_nvports);
err = esw_offloads_init(esw);
}
if (err)
@@ -1774,11 +1809,8 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
if (err)
esw_warn(esw->dev, "Failed to create eswitch TSAR");
/* Don't enable vport events when in SRIOV_OFFLOADS mode, since:
* 1. L2 table (MPFS) is programmed by PF/VF representors netdevs set_rx_mode
* 2. FDB/Eswitch is programmed by user space tools
*/
enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0;
enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? SRIOV_VPORT_EVENTS :
UC_ADDR_CHANGE;
/* Enable PF vport */
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
@@ -1791,22 +1823,21 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
}
/* Enable VF vports */
mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs)
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
esw_enable_vport(esw, vport, enabled_events);
if (mode == SRIOV_LEGACY) {
MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
mlx5_eq_notifier_register(esw->dev, &esw->nb);
}
mlx5_eswitch_event_handlers_register(esw);
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
esw->enabled_vports);
return 0;
abort:
esw->mode = SRIOV_NONE;
esw->mode = MLX5_ESWITCH_NONE;
if (mode == SRIOV_OFFLOADS) {
if (mode == MLX5_ESWITCH_OFFLOADS) {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
@@ -1814,23 +1845,22 @@ abort:
return err;
}
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{
struct esw_mc_addr *mc_promisc;
struct mlx5_vport *vport;
int old_mode;
int i;
if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE)
if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
return;
esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
esw->enabled_vports, esw->mode);
esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
mc_promisc = &esw->mc_promisc;
if (esw->mode == SRIOV_LEGACY)
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
mlx5_eswitch_event_handlers_unregister(esw);
mlx5_esw_for_all_vports(esw, i, vport)
esw_disable_vport(esw, vport);
@@ -1840,17 +1870,17 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw_destroy_tsar(esw);
if (esw->mode == SRIOV_LEGACY)
if (esw->mode == MLX5_ESWITCH_LEGACY)
esw_destroy_legacy_table(esw);
else if (esw->mode == SRIOV_OFFLOADS)
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
esw_offloads_cleanup(esw);
old_mode = esw->mode;
esw->mode = SRIOV_NONE;
esw->mode = MLX5_ESWITCH_NONE;
mlx5_lag_update(esw->dev);
if (old_mode == SRIOV_OFFLOADS) {
if (old_mode == MLX5_ESWITCH_OFFLOADS) {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
@@ -1858,14 +1888,16 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
int total_vports = MLX5_TOTAL_VPORTS(dev);
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
int total_vports;
int err, i;
if (!MLX5_VPORT_MANAGER(dev))
return 0;
total_vports = mlx5_eswitch_get_total_vports(dev);
esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
total_vports,
@@ -1878,6 +1910,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->dev = dev;
esw->manager_vport = mlx5_eswitch_manager_vport(dev);
esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
if (!esw->work_queue) {
@@ -1911,7 +1944,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
}
esw->enabled_vports = 0;
esw->mode = SRIOV_NONE;
esw->mode = MLX5_ESWITCH_NONE;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
@@ -1981,7 +2014,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
ether_addr_copy(evport->info.mac, mac);
evport->info.node_guid = node_guid;
if (evport->enabled && esw->mode == SRIOV_LEGACY)
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
err = esw_vport_ingress_config(esw, evport);
unlock:
@@ -2065,7 +2098,7 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
evport->info.vlan = vlan;
evport->info.qos = qos;
if (evport->enabled && esw->mode == SRIOV_LEGACY) {
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
if (err)
goto unlock;
@@ -2107,7 +2140,7 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
mlx5_core_warn(esw->dev,
"Spoofchk in set while MAC is invalid, vport(%d)\n",
evport->vport);
if (evport->enabled && esw->mode == SRIOV_LEGACY)
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
err = esw_vport_ingress_config(esw, evport);
if (err)
evport->info.spoofchk = pschk;
@@ -2203,7 +2236,7 @@ int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
return -EPERM;
mutex_lock(&esw->state_lock);
if (esw->mode != SRIOV_LEGACY) {
if (esw->mode != MLX5_ESWITCH_LEGACY) {
err = -EOPNOTSUPP;
goto out;
}
@@ -2226,7 +2259,7 @@ int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
return -EPERM;
mutex_lock(&esw->state_lock);
if (esw->mode != SRIOV_LEGACY) {
if (esw->mode != MLX5_ESWITCH_LEGACY) {
err = -EOPNOTSUPP;
goto out;
}
@@ -2369,7 +2402,7 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
u64 bytes = 0;
int err = 0;
if (!vport->enabled || esw->mode != SRIOV_LEGACY)
if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY)
return 0;
if (vport->egress.drop_counter)
@@ -2479,7 +2512,7 @@ free_out:
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
{
return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
@@ -2496,10 +2529,10 @@ EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
{
if ((dev0->priv.eswitch->mode == SRIOV_NONE &&
dev1->priv.eswitch->mode == SRIOV_NONE) ||
(dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
dev1->priv.eswitch->mode == SRIOV_OFFLOADS))
if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) ||
(dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS))
return true;
return false;
@@ -2508,6 +2541,26 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1)
{
return (dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
dev1->priv.eswitch->mode == SRIOV_OFFLOADS);
return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
}
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs)
{
const u32 *out;
WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
esw->esw_funcs.num_vfs = num_vfs;
return;
}
out = mlx5_esw_query_functions(esw->dev);
if (IS_ERR(out))
return;
esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_num_of_vfs);
kvfree(out);
}

View File

@@ -229,12 +229,12 @@ struct mlx5_eswitch {
int mode;
int nvports;
u16 manager_vport;
u16 first_host_vport;
struct mlx5_esw_functions esw_funcs;
};
void esw_offloads_cleanup(struct mlx5_eswitch *esw);
int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
int total_nvports);
int esw_offloads_init(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -255,8 +255,8 @@ void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, u8 mac[ETH_ALEN]);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -392,7 +392,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode);
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
enum devlink_eswitch_encap_mode encap,
struct netlink_ext_ack *extack);
@@ -425,7 +425,7 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
struct mlx5_core_dev *dev1);
int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen);
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
@@ -445,6 +445,12 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
MLX5_VPORT_ECPF : MLX5_VPORT_PF;
}
static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
{
return mlx5_core_is_ecpf_esw_manager(dev) ?
MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
}
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev)
{
/* Ideally device should have the functions changed supported
@@ -541,25 +547,48 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
/* Includes host PF (vport 0) if it's not esw manager. */
#define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \
for ((i) = (esw)->first_host_vport; \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) <= (nvfs); (i)++)
#define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \
for ((i) = (nvfs); \
(rep) = &(esw)->offloads.vport_reps[i], \
(i) >= (esw)->first_host_vport; (i)--)
#define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \
for ((vport) = (esw)->first_host_vport; \
(vport) <= (nvfs); (vport)++)
#define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \
for ((vport) = (nvfs); \
(vport) >= (esw)->first_host_vport; (vport)--)
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; }
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline int
mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen)
static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
return -EOPNOTSUPP;
return ERR_PTR(-EOPNOTSUPP);
}
static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
#define FDB_MAX_CHAIN 1
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
#define FDB_MAX_PRIO 1

View File

@@ -147,7 +147,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_table *fdb;
int j, i = 0;
if (esw->mode != SRIOV_OFFLOADS)
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
flow_act.action = attr->action;
@@ -357,11 +357,10 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
{
struct mlx5_eswitch_rep *rep;
int vf_vport, err = 0;
int i, err = 0;
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
rep = &esw->offloads.vport_reps[vf_vport];
mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
continue;
@@ -1370,21 +1369,22 @@ out:
static int esw_offloads_start(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
int err, err1;
if (esw->mode != SRIOV_LEGACY &&
if (esw->mode != MLX5_ESWITCH_LEGACY &&
!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set offloads mode, SRIOV legacy not enabled");
return -EINVAL;
}
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
mlx5_eswitch_disable(esw);
mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to legacy");
@@ -1392,7 +1392,6 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
num_vfs,
&esw->offloads.inline_mode)) {
esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
NL_SET_ERR_MSG_MOD(extack,
@@ -1409,11 +1408,11 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
int total_vports = MLX5_TOTAL_VPORTS(esw->dev);
int total_vports = esw->total_vports;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_eswitch_rep *rep;
u8 hw_id[ETH_ALEN], rep_type;
int vport;
int vport_index;
esw->offloads.vport_reps = kcalloc(total_vports,
sizeof(struct mlx5_eswitch_rep),
@@ -1421,10 +1420,11 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
if (!esw->offloads.vport_reps)
return -ENOMEM;
mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
mlx5_query_mac_address(dev, hw_id);
mlx5_esw_for_all_reps(esw, vport, rep) {
rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport);
mlx5_esw_for_all_reps(esw, vport_index, rep) {
rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
rep->vport_index = vport_index;
ether_addr_copy(rep->hw_id, hw_id);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
@@ -1479,21 +1479,20 @@ static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
__unload_reps_vf_vport(esw, nvports, rep_type);
}
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
u8 rep_type)
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
__unload_reps_vf_vport(esw, nvports, rep_type);
__unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
/* Special vports must be the last to unload. */
__unload_reps_special_vport(esw, rep_type);
}
static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports)
static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
{
u8 rep_type = NUM_REP_TYPES;
while (rep_type-- > 0)
__unload_reps_all_vport(esw, nvports, rep_type);
__unload_reps_all_vport(esw, rep_type);
}
static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
@@ -1569,6 +1568,26 @@ err_vf:
return err;
}
static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
int err;
/* Special vports must be loaded first, uplink rep creates mdev resource. */
err = __load_reps_special_vport(esw, rep_type);
if (err)
return err;
err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
if (err)
goto err_vfs;
return 0;
err_vfs:
__unload_reps_special_vport(esw, rep_type);
return err;
}
static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
{
u8 rep_type = 0;
@@ -1588,13 +1607,13 @@ err_reps:
return err;
}
static int esw_offloads_load_special_vport(struct mlx5_eswitch *esw)
static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
{
u8 rep_type = 0;
int err;
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
err = __load_reps_special_vport(esw, rep_type);
err = __load_reps_all_vport(esw, rep_type);
if (err)
goto err_reps;
}
@@ -1603,7 +1622,7 @@ static int esw_offloads_load_special_vport(struct mlx5_eswitch *esw)
err_reps:
while (rep_type-- > 0)
__unload_reps_special_vport(esw, rep_type);
__unload_reps_all_vport(esw, rep_type);
return err;
}
@@ -1989,11 +2008,17 @@ static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
}
static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int vf_nvports,
int nvports)
static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
{
int num_vfs = esw->esw_funcs.num_vfs;
int total_vports;
int err;
if (mlx5_core_is_ecpf_esw_manager(esw->dev))
total_vports = esw->total_vports;
else
total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
@@ -2001,15 +2026,15 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int vf_nvports,
if (err)
return err;
err = esw_create_offloads_fdb_tables(esw, nvports);
err = esw_create_offloads_fdb_tables(esw, total_vports);
if (err)
goto create_fdb_err;
err = esw_create_offloads_table(esw, nvports);
err = esw_create_offloads_table(esw, total_vports);
if (err)
goto create_ft_err;
err = esw_create_vport_rx_group(esw, nvports);
err = esw_create_vport_rx_group(esw, total_vports);
if (err)
goto create_fg_err;
@@ -2035,56 +2060,53 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_offloads_acl_tables(esw);
}
static void esw_functions_changed_event_handler(struct work_struct *work)
static void
esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
{
u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {};
struct mlx5_host_work *host_work;
struct mlx5_eswitch *esw;
u16 num_vfs = 0;
int err;
bool host_pf_disabled;
u16 new_num_vfs;
host_work = container_of(work, struct mlx5_host_work, work);
esw = host_work->esw;
new_num_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_num_of_vfs);
host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_pf_disabled);
err = mlx5_esw_query_functions(esw->dev, out, sizeof(out));
num_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_num_of_vfs);
if (err || num_vfs == esw->esw_funcs.num_vfs)
goto out;
if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
return;
/* Number of VFs can only change from "0 to x" or "x to 0". */
if (esw->esw_funcs.num_vfs > 0) {
esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
} else {
err = esw_offloads_load_vf_reps(esw, num_vfs);
int err;
err = esw_offloads_load_vf_reps(esw, new_num_vfs);
if (err)
goto out;
return;
}
esw->esw_funcs.num_vfs = new_num_vfs;
}
esw->esw_funcs.num_vfs = num_vfs;
static void esw_functions_changed_event_handler(struct work_struct *work)
{
struct mlx5_host_work *host_work;
struct mlx5_eswitch *esw;
const u32 *out;
host_work = container_of(work, struct mlx5_host_work, work);
esw = host_work->esw;
out = mlx5_esw_query_functions(esw->dev);
if (IS_ERR(out))
goto out;
esw_vfs_changed_event_handler(esw, out);
kvfree(out);
out:
kfree(host_work);
}
static void esw_emulate_event_handler(struct work_struct *work)
{
struct mlx5_host_work *host_work =
container_of(work, struct mlx5_host_work, work);
struct mlx5_eswitch *esw = host_work->esw;
int err;
if (esw->esw_funcs.num_vfs) {
err = esw_offloads_load_vf_reps(esw, esw->esw_funcs.num_vfs);
if (err)
esw_warn(esw->dev, "Load vf reps err=%d\n", err);
}
kfree(host_work);
}
static int esw_functions_changed_event(struct notifier_block *nb,
unsigned long type, void *data)
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
{
struct mlx5_esw_functions *esw_funcs;
struct mlx5_host_work *host_work;
@@ -2099,44 +2121,17 @@ static int esw_functions_changed_event(struct notifier_block *nb,
host_work->esw = esw;
if (mlx5_eswitch_is_funcs_handler(esw->dev))
INIT_WORK(&host_work->work,
esw_functions_changed_event_handler);
else
INIT_WORK(&host_work->work, esw_emulate_event_handler);
INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
queue_work(esw->work_queue, &host_work->work);
return NOTIFY_OK;
}
static void esw_functions_changed_event_init(struct mlx5_eswitch *esw,
u16 vf_nvports)
{
if (mlx5_eswitch_is_funcs_handler(esw->dev)) {
esw->esw_funcs.num_vfs = 0;
MLX5_NB_INIT(&esw->esw_funcs.nb, esw_functions_changed_event,
ESW_FUNCTIONS_CHANGED);
mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
} else {
esw->esw_funcs.num_vfs = vf_nvports;
}
}
static void esw_functions_changed_event_cleanup(struct mlx5_eswitch *esw)
{
if (!mlx5_eswitch_is_funcs_handler(esw->dev))
return;
mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
flush_workqueue(esw->work_queue);
}
int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
int total_nvports)
int esw_offloads_init(struct mlx5_eswitch *esw)
{
int err;
err = esw_offloads_steering_init(esw, vf_nvports, total_nvports);
err = esw_offloads_steering_init(esw);
if (err)
return err;
@@ -2146,31 +2141,15 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
goto err_vport_metadata;
}
/* Only load special vports reps. VF reps will be loaded in
* context of functions_changed event handler through real
* or emulated event.
*/
err = esw_offloads_load_special_vport(esw);
err = esw_offloads_load_all_reps(esw);
if (err)
goto err_reps;
esw_offloads_devcom_init(esw);
mutex_init(&esw->offloads.termtbl_mutex);
esw_functions_changed_event_init(esw, vf_nvports);
mlx5_rdma_enable_roce(esw->dev);
/* Call esw_functions_changed event to load VF reps:
* 1. HW does not support the event then emulate it
* Or
* 2. The event was already notified when num_vfs changed
* and eswitch was in legacy mode
*/
esw_functions_changed_event(&esw->esw_funcs.nb.nb,
MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED,
NULL);
return 0;
err_reps:
@@ -2184,13 +2163,13 @@ err_vport_metadata:
static int esw_offloads_stop(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
int err, err1;
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
mlx5_eswitch_disable(esw);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to offloads");
@@ -2202,10 +2181,9 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_cleanup(struct mlx5_eswitch *esw)
{
esw_functions_changed_event_cleanup(esw);
mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw, esw->esw_funcs.num_vfs);
esw_offloads_unload_all_reps(esw);
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
mlx5_eswitch_disable_passing_vport_metadata(esw);
esw_offloads_steering_cleanup(esw);
@@ -2215,10 +2193,10 @@ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
{
switch (mode) {
case DEVLINK_ESWITCH_MODE_LEGACY:
*mlx5_mode = SRIOV_LEGACY;
*mlx5_mode = MLX5_ESWITCH_LEGACY;
break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
*mlx5_mode = SRIOV_OFFLOADS;
*mlx5_mode = MLX5_ESWITCH_OFFLOADS;
break;
default:
return -EINVAL;
@@ -2230,10 +2208,10 @@ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
{
switch (mlx5_mode) {
case SRIOV_LEGACY:
case MLX5_ESWITCH_LEGACY:
*mode = DEVLINK_ESWITCH_MODE_LEGACY;
break;
case SRIOV_OFFLOADS:
case MLX5_ESWITCH_OFFLOADS:
*mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
break;
default:
@@ -2297,7 +2275,7 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
if(!MLX5_ESWITCH_MANAGER(dev))
return -EPERM;
if (dev->priv.eswitch->mode == SRIOV_NONE &&
if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
!mlx5_core_is_ecpf_esw_manager(dev))
return -EOPNOTSUPP;
@@ -2348,7 +2326,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
int err, vport;
int err, vport, num_vport;
u8 mlx5_mode;
err = mlx5_devlink_eswitch_check(devlink);
@@ -2377,7 +2355,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (err)
goto out;
for (vport = 1; vport < esw->enabled_vports; vport++) {
mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
@@ -2390,7 +2368,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
return 0;
revert_inline_mode:
while (--vport > 0)
num_vport = --vport;
mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
mlx5_modify_nic_vport_min_inline(dev,
vport,
esw->offloads.inline_mode);
@@ -2411,7 +2390,7 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
{
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
struct mlx5_core_dev *dev = esw->dev;
@@ -2420,7 +2399,7 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
if (esw->mode == SRIOV_NONE)
if (esw->mode == MLX5_ESWITCH_NONE)
return -EOPNOTSUPP;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
@@ -2435,9 +2414,10 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
}
query_vports:
for (vport = 1; vport <= nvfs; vport++) {
mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
if (vport > 1 && prev_mlx5_mode != mlx5_mode)
if (prev_mlx5_mode != mlx5_mode)
return -EINVAL;
prev_mlx5_mode = mlx5_mode;
}
@@ -2467,7 +2447,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
return -EOPNOTSUPP;
if (esw->mode == SRIOV_LEGACY) {
if (esw->mode == MLX5_ESWITCH_LEGACY) {
esw->offloads.encap = encap;
return 0;
}
@@ -2530,12 +2510,11 @@ EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
{
u16 max_vf = mlx5_core_max_vfs(esw->dev);
struct mlx5_eswitch_rep *rep;
int i;
if (esw->mode == SRIOV_OFFLOADS)
__unload_reps_all_vport(esw, max_vf, rep_type);
if (esw->mode == MLX5_ESWITCH_OFFLOADS)
__unload_reps_all_vport(esw, rep_type);
mlx5_esw_for_all_reps(esw, i, rep)
atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);

View File

@@ -414,7 +414,8 @@ static void mlx5_fpga_conn_cq_tasklet(unsigned long data)
mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
}
static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq)
static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq,
struct mlx5_eqe *eqe)
{
struct mlx5_fpga_conn *conn;
@@ -429,6 +430,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
struct mlx5_fpga_device *fdev = conn->fdev;
struct mlx5_core_dev *mdev = fdev->mdev;
u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0};
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
struct mlx5_wq_param wqp;
struct mlx5_cqe64 *cqe;
int inlen, err, eqn;
@@ -476,7 +478,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas);
err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen);
err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen, out, sizeof(out));
kvfree(in);
if (err)
@@ -867,7 +869,7 @@ struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
conn->cb_arg = attr->cb_arg;
remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32);
err = mlx5_query_nic_vport_mac_address(fdev->mdev, 0, remote_mac);
err = mlx5_query_mac_address(fdev->mdev, remote_mac);
if (err) {
mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err);
ret = ERR_PTR(err);

View File

@@ -2092,7 +2092,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d
{
struct mlx5_flow_steering *steering = dev->priv.steering;
if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
if (!steering || vport >= mlx5_eswitch_get_total_vports(dev))
return NULL;
switch (type) {
@@ -2423,7 +2423,7 @@ static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
if (!steering->esw_egress_root_ns)
return;
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
cleanup_root_ns(steering->esw_egress_root_ns[i]);
kfree(steering->esw_egress_root_ns);
@@ -2438,7 +2438,7 @@ static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
if (!steering->esw_ingress_root_ns)
return;
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
cleanup_root_ns(steering->esw_ingress_root_ns[i]);
kfree(steering->esw_ingress_root_ns);
@@ -2606,16 +2606,18 @@ static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vpo
static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
int total_vports = mlx5_eswitch_get_total_vports(dev);
int err;
int i;
steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
sizeof(*steering->esw_egress_root_ns),
GFP_KERNEL);
steering->esw_egress_root_ns =
kcalloc(total_vports,
sizeof(*steering->esw_egress_root_ns),
GFP_KERNEL);
if (!steering->esw_egress_root_ns)
return -ENOMEM;
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
for (i = 0; i < total_vports; i++) {
err = init_egress_acl_root_ns(steering, i);
if (err)
goto cleanup_root_ns;
@@ -2634,16 +2636,18 @@ cleanup_root_ns:
static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
int total_vports = mlx5_eswitch_get_total_vports(dev);
int err;
int i;
steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
sizeof(*steering->esw_ingress_root_ns),
GFP_KERNEL);
steering->esw_ingress_root_ns =
kcalloc(total_vports,
sizeof(*steering->esw_ingress_root_ns),
GFP_KERNEL);
if (!steering->esw_ingress_root_ns)
return -ENOMEM;
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
for (i = 0; i < total_vports; i++) {
err = init_ingress_acl_root_ns(steering, i);
if (err)
goto cleanup_root_ns;

View File

@@ -37,6 +37,37 @@
#include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h"
enum {
MCQS_IDENTIFIER_BOOT_IMG = 0x1,
MCQS_IDENTIFIER_OEM_NVCONFIG = 0x4,
MCQS_IDENTIFIER_MLNX_NVCONFIG = 0x5,
MCQS_IDENTIFIER_CS_TOKEN = 0x6,
MCQS_IDENTIFIER_DBG_TOKEN = 0x7,
MCQS_IDENTIFIER_GEARBOX = 0xA,
};
enum {
MCQS_UPDATE_STATE_IDLE,
MCQS_UPDATE_STATE_IN_PROGRESS,
MCQS_UPDATE_STATE_APPLIED,
MCQS_UPDATE_STATE_ACTIVE,
MCQS_UPDATE_STATE_ACTIVE_PENDING_RESET,
MCQS_UPDATE_STATE_FAILED,
MCQS_UPDATE_STATE_CANCELED,
MCQS_UPDATE_STATE_BUSY,
};
enum {
MCQI_INFO_TYPE_CAPABILITIES = 0x0,
MCQI_INFO_TYPE_VERSION = 0x1,
MCQI_INFO_TYPE_ACTIVATION_METHOD = 0x5,
};
enum {
MCQI_FW_RUNNING_VERSION = 0,
MCQI_FW_STORED_VERSION = 1,
};
static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
int outlen)
{
@@ -202,6 +233,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
if (MLX5_CAP_GEN(dev, event_cap)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_EVENT);
if (err)
return err;
}
return 0;
}
@@ -392,33 +429,49 @@ static int mlx5_reg_mcda_set(struct mlx5_core_dev *dev,
}
static int mlx5_reg_mcqi_query(struct mlx5_core_dev *dev,
u16 component_index,
u32 *max_component_size,
u8 *log_mcda_word_size,
u16 *mcda_max_write_size)
u16 component_index, bool read_pending,
u8 info_type, u16 data_size, void *mcqi_data)
{
u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_ST_SZ_DW(mcqi_cap)];
int offset = MLX5_ST_SZ_DW(mcqi_reg);
u32 in[MLX5_ST_SZ_DW(mcqi_reg)];
u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_UN_SZ_DW(mcqi_reg_data)] = {};
u32 in[MLX5_ST_SZ_DW(mcqi_reg)] = {};
void *data;
int err;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(mcqi_reg, in, component_index, component_index);
MLX5_SET(mcqi_reg, in, data_size, MLX5_ST_SZ_BYTES(mcqi_cap));
MLX5_SET(mcqi_reg, in, read_pending_component, read_pending);
MLX5_SET(mcqi_reg, in, info_type, info_type);
MLX5_SET(mcqi_reg, in, data_size, data_size);
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MCQI, 0, 0);
MLX5_ST_SZ_BYTES(mcqi_reg) + data_size,
MLX5_REG_MCQI, 0, 0);
if (err)
goto out;
return err;
*max_component_size = MLX5_GET(mcqi_cap, out + offset, max_component_size);
*log_mcda_word_size = MLX5_GET(mcqi_cap, out + offset, log_mcda_word_size);
*mcda_max_write_size = MLX5_GET(mcqi_cap, out + offset, mcda_max_write_size);
data = MLX5_ADDR_OF(mcqi_reg, out, data);
memcpy(mcqi_data, data, data_size);
out:
return err;
return 0;
}
static int mlx5_reg_mcqi_caps_query(struct mlx5_core_dev *dev, u16 component_index,
u32 *max_component_size, u8 *log_mcda_word_size,
u16 *mcda_max_write_size)
{
u32 mcqi_reg[MLX5_ST_SZ_DW(mcqi_cap)] = {};
int err;
err = mlx5_reg_mcqi_query(dev, component_index, 0,
MCQI_INFO_TYPE_CAPABILITIES,
MLX5_ST_SZ_BYTES(mcqi_cap), mcqi_reg);
if (err)
return err;
*max_component_size = MLX5_GET(mcqi_cap, mcqi_reg, max_component_size);
*log_mcda_word_size = MLX5_GET(mcqi_cap, mcqi_reg, log_mcda_word_size);
*mcda_max_write_size = MLX5_GET(mcqi_cap, mcqi_reg, mcda_max_write_size);
return 0;
}
struct mlx5_mlxfw_dev {
@@ -434,8 +487,13 @@ static int mlx5_component_query(struct mlxfw_dev *mlxfw_dev,
container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
return mlx5_reg_mcqi_query(dev, component_index, p_max_size,
p_align_bits, p_max_write_size);
if (!MLX5_CAP_GEN(dev, mcam_reg) || !MLX5_CAP_MCAM_REG(dev, mcqi)) {
mlx5_core_warn(dev, "caps query isn't supported by running FW\n");
return -EOPNOTSUPP;
}
return mlx5_reg_mcqi_caps_query(dev, component_index, p_max_size,
p_align_bits, p_max_write_size);
}
static int mlx5_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
@@ -575,3 +633,130 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev,
return mlxfw_firmware_flash(&mlx5_mlxfw_dev.mlxfw_dev,
firmware, extack);
}
static int mlx5_reg_mcqi_version_query(struct mlx5_core_dev *dev,
u16 component_index, bool read_pending,
u32 *mcqi_version_out)
{
return mlx5_reg_mcqi_query(dev, component_index, read_pending,
MCQI_INFO_TYPE_VERSION,
MLX5_ST_SZ_BYTES(mcqi_version),
mcqi_version_out);
}
static int mlx5_reg_mcqs_query(struct mlx5_core_dev *dev, u32 *out,
u16 component_index)
{
u8 out_sz = MLX5_ST_SZ_BYTES(mcqs_reg);
u32 in[MLX5_ST_SZ_DW(mcqs_reg)] = {};
int err;
memset(out, 0, out_sz);
MLX5_SET(mcqs_reg, in, component_index, component_index);
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
out_sz, MLX5_REG_MCQS, 0, 0);
return err;
}
/* scans component index sequentially, to find the boot img index */
static int mlx5_get_boot_img_component_index(struct mlx5_core_dev *dev)
{
u32 out[MLX5_ST_SZ_DW(mcqs_reg)] = {};
u16 identifier, component_idx = 0;
bool quit;
int err;
do {
err = mlx5_reg_mcqs_query(dev, out, component_idx);
if (err)
return err;
identifier = MLX5_GET(mcqs_reg, out, identifier);
quit = !!MLX5_GET(mcqs_reg, out, last_index_flag);
quit |= identifier == MCQS_IDENTIFIER_BOOT_IMG;
} while (!quit && ++component_idx);
if (identifier != MCQS_IDENTIFIER_BOOT_IMG) {
mlx5_core_warn(dev, "mcqs: can't find boot_img component ix, last scanned idx %d\n",
component_idx);
return -EOPNOTSUPP;
}
return component_idx;
}
static int
mlx5_fw_image_pending(struct mlx5_core_dev *dev,
int component_index,
bool *pending_version_exists)
{
u32 out[MLX5_ST_SZ_DW(mcqs_reg)];
u8 component_update_state;
int err;
err = mlx5_reg_mcqs_query(dev, out, component_index);
if (err)
return err;
component_update_state = MLX5_GET(mcqs_reg, out, component_update_state);
if (component_update_state == MCQS_UPDATE_STATE_IDLE) {
*pending_version_exists = false;
} else if (component_update_state == MCQS_UPDATE_STATE_ACTIVE_PENDING_RESET) {
*pending_version_exists = true;
} else {
mlx5_core_warn(dev,
"mcqs: can't read pending fw version while fw state is %d\n",
component_update_state);
return -ENODATA;
}
return 0;
}
int mlx5_fw_version_query(struct mlx5_core_dev *dev,
u32 *running_ver, u32 *pending_ver)
{
u32 reg_mcqi_version[MLX5_ST_SZ_DW(mcqi_version)] = {};
bool pending_version_exists;
int component_index;
int err;
if (!MLX5_CAP_GEN(dev, mcam_reg) || !MLX5_CAP_MCAM_REG(dev, mcqi) ||
!MLX5_CAP_MCAM_REG(dev, mcqs)) {
mlx5_core_warn(dev, "fw query isn't supported by the FW\n");
return -EOPNOTSUPP;
}
component_index = mlx5_get_boot_img_component_index(dev);
if (component_index < 0)
return component_index;
err = mlx5_reg_mcqi_version_query(dev, component_index,
MCQI_FW_RUNNING_VERSION,
reg_mcqi_version);
if (err)
return err;
*running_ver = MLX5_GET(mcqi_version, reg_mcqi_version, version);
err = mlx5_fw_image_pending(dev, component_index, &pending_version_exists);
if (err)
return err;
if (!pending_version_exists) {
*pending_ver = 0;
return 0;
}
err = mlx5_reg_mcqi_version_query(dev, component_index,
MCQI_FW_STORED_VERSION,
reg_mcqi_version);
if (err)
return err;
*pending_ver = MLX5_GET(mcqi_version, reg_mcqi_version, version);
return 0;
}

View File

@@ -305,8 +305,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
!mlx5_sriov_is_enabled(dev1);
#ifdef CONFIG_MLX5_ESWITCH
roce_lag &= dev0->priv.eswitch->mode == SRIOV_NONE &&
dev1->priv.eswitch->mode == SRIOV_NONE;
roce_lag &= dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
#endif
if (roce_lag)

View File

@@ -75,7 +75,7 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev);
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev);
int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
void mlx5_cq_tasklet_cb(unsigned long data);
@@ -97,7 +97,4 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
#endif
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
#endif

View File

@@ -734,8 +734,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
struct mlx5_priv *priv = &dev->priv;
int err = 0;
priv->pci_dev_data = id->driver_data;
mutex_init(&dev->pci_status_mutex);
pci_set_drvdata(dev->pdev, dev);
dev->bar_addr = pci_resource_start(pdev, 0);
@@ -1255,7 +1254,6 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
mutex_init(&dev->pci_status_mutex);
mutex_init(&dev->intf_state_mutex);
mutex_init(&priv->bfregs.reg_head.lock);
@@ -1317,6 +1315,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
dev->device = &pdev->dev;
dev->pdev = pdev;
dev->coredev_type = id->driver_data & MLX5_PCI_DEV_IS_VF ?
MLX5_COREDEV_VF : MLX5_COREDEV_PF;
err = mlx5_mdev_init(dev, prof_sel);
if (err)
goto mdev_init_err;

View File

@@ -205,6 +205,8 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
struct netlink_ext_ack *extack);
int mlx5_fw_version_query(struct mlx5_core_dev *dev,
u32 *running_ver, u32 *stored_ver);
void mlx5e_init(void);
void mlx5e_cleanup(void);

View File

@@ -126,7 +126,7 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
{
u8 hw_id[ETH_ALEN];
mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
mlx5_query_mac_address(dev, hw_id);
gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
addrconf_addr_eui48(&gid->raw[8], hw_id);
}

View File

@@ -74,17 +74,11 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
int err;
int vf;
if (sriov->enabled_vfs) {
mlx5_core_warn(dev,
"failed to enable SRIOV on device, already enabled with %d vfs\n",
sriov->enabled_vfs);
return -EBUSY;
}
if (!MLX5_ESWITCH_MANAGER(dev))
goto enable_vfs_hca;
err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
mlx5_eswitch_update_num_of_vfs(dev->priv.eswitch, num_vfs);
err = mlx5_eswitch_enable(dev->priv.eswitch, MLX5_ESWITCH_LEGACY);
if (err) {
mlx5_core_warn(dev,
"failed to enable eswitch SRIOV (%d)\n", err);
@@ -99,7 +93,6 @@ enable_vfs_hca:
continue;
}
sriov->vfs_ctx[vf].enabled = 1;
sriov->enabled_vfs++;
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) {
err = sriov_restore_guids(dev, vf);
if (err) {
@@ -118,13 +111,11 @@ enable_vfs_hca:
static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
int num_vfs = pci_num_vf(dev->pdev);
int err;
int vf;
if (!sriov->enabled_vfs)
goto out;
for (vf = 0; vf < sriov->num_vfs; vf++) {
for (vf = num_vfs - 1; vf >= 0; vf--) {
if (!sriov->vfs_ctx[vf].enabled)
continue;
err = mlx5_core_disable_hca(dev, vf + 1);
@@ -133,12 +124,10 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
continue;
}
sriov->vfs_ctx[vf].enabled = 0;
sriov->enabled_vfs--;
}
out:
if (MLX5_ESWITCH_MANAGER(dev))
mlx5_eswitch_disable_sriov(dev->priv.eswitch);
mlx5_eswitch_disable(dev->priv.eswitch);
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
@@ -191,13 +180,11 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
int mlx5_sriov_attach(struct mlx5_core_dev *dev)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
if (!mlx5_core_is_pf(dev) || !sriov->num_vfs)
if (!mlx5_core_is_pf(dev) || !pci_num_vf(dev->pdev))
return 0;
/* If sriov VFs exist in PCI level, enable them in device level */
return mlx5_device_enable_sriov(dev, sriov->num_vfs);
return mlx5_device_enable_sriov(dev, pci_num_vf(dev->pdev));
}
void mlx5_sriov_detach(struct mlx5_core_dev *dev)
@@ -210,22 +197,25 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev)
static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
{
u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {};
u16 host_total_vfs;
int err;
const u32 *out;
if (mlx5_core_is_ecpf_esw_manager(dev)) {
err = mlx5_esw_query_functions(dev, out, sizeof(out));
host_total_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_total_vfs);
out = mlx5_esw_query_functions(dev);
/* Old FW doesn't support getting total_vfs from esw func
* but supports getting it from pci_sriov.
*/
if (!err && host_total_vfs)
if (IS_ERR(out))
goto done;
host_total_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_total_vfs);
kvfree(out);
if (host_total_vfs)
return host_total_vfs;
}
done:
return pci_sriov_get_totalvfs(dev->pdev);
}

View File

@@ -34,6 +34,7 @@
#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
/* Mutex to hold while enabling or disabling RoCE */
@@ -155,11 +156,12 @@ int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
}
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr)
u16 vport, bool other, u8 *addr)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
u8 *out_addr;
u32 *out;
int err;
out = kvzalloc(outlen, GFP_KERNEL);
@@ -169,7 +171,12 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
nic_vport_context.permanent_address);
err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
MLX5_SET(query_nic_vport_context_in, in, other_vport, other);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
if (!err)
ether_addr_copy(addr, &out_addr[2]);
@@ -178,6 +185,12 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
{
return mlx5_query_nic_vport_mac_address(mdev, 0, false, addr);
}
EXPORT_SYMBOL_GPL(mlx5_query_mac_address);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr)
{
@@ -194,9 +207,7 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
MLX5_SET(modify_nic_vport_context_in, in,
field_select.permanent_address, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
@@ -291,9 +302,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
@@ -483,7 +492,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
MLX5_SET(modify_nic_vport_context_in, in,
field_select.node_guid, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
@@ -1157,3 +1166,17 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
return tmp;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
/**
* mlx5_eswitch_get_total_vports - Get total vports of the eswitch
*
* @dev: Pointer to core device
*
* mlx5_eswitch_get_total_vports returns total number of vports for
* the eswitch.
*/
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
{
return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev);
}
EXPORT_SYMBOL(mlx5_eswitch_get_total_vports);