Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

All conflicts were simple overlapping changes except perhaps
for the Thunder driver.

That driver has a change_mtu method explicitly for sending
a message to the hardware.  If that fails it returns an
error.

Normally a driver doesn't need an ndo_change_mtu method becuase those
are usually just range changes, which are now handled generically.
But since this extra operation is needed in the Thunder driver, it has
to stay.

However, if the message send fails we have to restore the original
MTU before the change because the entire call chain expects that if
an error is thrown by ndo_change_mtu then the MTU did not change.
Therefore code is added to nicvf_change_mtu to remember the original
MTU, and to restore it upon nicvf_update_hw_max_frs() failue.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2016-11-22 11:29:28 -05:00
242 changed files with 3388 additions and 1793 deletions

View File

@@ -932,8 +932,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
if (err)
goto err_create;
} else {
/* for now choose 64 bytes till we have a proper interface */
cqe_size = 64;
cqe_size = cache_line_size() == 128 ? 128 : 64;
err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
&index, &inlen);
if (err)

View File

@@ -2311,14 +2311,14 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
{
struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
struct ib_event ibev;
bool fatal = false;
u8 port = 0;
switch (event) {
case MLX5_DEV_EVENT_SYS_ERROR:
ibdev->ib_active = false;
ibev.event = IB_EVENT_DEVICE_FATAL;
mlx5_ib_handle_internal_error(ibdev);
fatal = true;
break;
case MLX5_DEV_EVENT_PORT_UP:
@@ -2372,6 +2372,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
if (ibdev->ib_active)
ib_dispatch_event(&ibev);
if (fatal)
ibdev->ib_active = false;
}
static void get_ext_port_caps(struct mlx5_ib_dev *dev)
@@ -3117,7 +3120,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
}
err = init_node_data(dev);
if (err)
goto err_dealloc;
goto err_free_port;
mutex_init(&dev->flow_db.lock);
mutex_init(&dev->cap_mask_mutex);
@@ -3127,7 +3130,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (ll == IB_LINK_LAYER_ETHERNET) {
err = mlx5_enable_roce(dev);
if (err)
goto err_dealloc;
goto err_free_port;
}
err = create_dev_resources(&dev->devr);

View File

@@ -626,6 +626,8 @@ struct mlx5_ib_dev {
struct mlx5_ib_resources devr;
struct mlx5_mr_cache cache;
struct timer_list delay_timer;
/* Prevents soft lock on massive reg MRs */
struct mutex slow_path_mutex;
int fill_delay;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_odp_caps odp_caps;

View File

@@ -610,6 +610,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
int err;
int i;
mutex_init(&dev->slow_path_mutex);
cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
if (!cache->wq) {
mlx5_ib_warn(dev, "failed to create work queue\n");
@@ -1182,9 +1183,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto error;
}
if (!mr)
if (!mr) {
mutex_lock(&dev->slow_path_mutex);
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
page_shift, access_flags);
mutex_unlock(&dev->slow_path_mutex);
}
if (IS_ERR(mr)) {
err = PTR_ERR(mr);

View File

@@ -2051,8 +2051,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
to_mcq(init_attr->recv_cq)->mcq.cqn,
to_mcq(init_attr->send_cq)->mcq.cqn);
init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
qp->trans_qp.xrcdn = xrcdn;
@@ -4814,6 +4814,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
udata->inlen))
return ERR_PTR(-EOPNOTSUPP);
if (init_attr->log_ind_tbl_size >
MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
init_attr->log_ind_tbl_size,
MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
return ERR_PTR(-EINVAL);
}
min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
if (udata->outlen && udata->outlen < min_resp_len)
return ERR_PTR(-EINVAL);