Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma fixes from Doug Ledford: "Round three of 4.8 rc fixes. This is likely the last rdma pull request this cycle. The new rxe driver had a few issues (you probably saw the boot bot bug report) and they should be addressed now. There are a couple other fixes here, mainly mlx4. There are still two outstanding issues that need resolved but I don't think their fix will make this kernel cycle. Summary: - Various fixes to rdmavt, ipoib, mlx5, mlx4, rxe" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: IB/rdmavt: Don't vfree a kzalloc'ed memory region IB/rxe: Fix kmem_cache leak IB/rxe: Fix race condition between requester and completer IB/rxe: Fix duplicate atomic request handling IB/rxe: Fix kernel panic in udp_setup_tunnel IB/mlx5: Set source mac address in FTE IB/mlx5: Enable MAD_IFC commands for IB ports only IB/mlx4: Diagnostic HW counters are not supported in slave mode IB/mlx4: Use correct subnet-prefix in QP1 mads under SR-IOV IB/mlx4: Fix code indentation in QP1 MAD flow IB/mlx4: Fix incorrect MC join state bit-masking on SR-IOV IB/ipoib: Don't allow MC joins during light MC flush IB/rxe: fix GFP_KERNEL in spinlock context
This commit is contained in:
@@ -1128,6 +1128,27 @@ void handle_port_mgmt_change_event(struct work_struct *work)
|
||||
|
||||
/* Generate GUID changed event */
|
||||
if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
|
||||
if (mlx4_is_master(dev->dev)) {
|
||||
union ib_gid gid;
|
||||
int err = 0;
|
||||
|
||||
if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
|
||||
err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
|
||||
else
|
||||
gid.global.subnet_prefix =
|
||||
eqe->event.port_mgmt_change.params.port_info.gid_prefix;
|
||||
if (err) {
|
||||
pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
|
||||
port, err);
|
||||
} else {
|
||||
pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
|
||||
port,
|
||||
(u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
|
||||
be64_to_cpu(gid.global.subnet_prefix));
|
||||
atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
|
||||
be64_to_cpu(gid.global.subnet_prefix));
|
||||
}
|
||||
}
|
||||
mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
|
||||
/*if master, notify all slaves*/
|
||||
if (mlx4_is_master(dev->dev))
|
||||
@@ -2202,6 +2223,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
|
||||
if (err)
|
||||
goto demux_err;
|
||||
dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
|
||||
atomic64_set(&dev->sriov.demux[i].subnet_prefix,
|
||||
be64_to_cpu(gid.global.subnet_prefix));
|
||||
err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
|
||||
&dev->sriov.sqps[i]);
|
||||
if (err)
|
||||
|
@@ -2202,6 +2202,9 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
|
||||
bool per_port = !!(ibdev->dev->caps.flags2 &
|
||||
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
|
||||
|
||||
if (mlx4_is_slave(ibdev->dev))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
|
||||
/* i == 1 means we are building port counters */
|
||||
if (i && !per_port)
|
||||
|
@@ -489,7 +489,7 @@ static u8 get_leave_state(struct mcast_group *group)
|
||||
if (!group->members[i])
|
||||
leave_state |= (1 << i);
|
||||
|
||||
return leave_state & (group->rec.scope_join_state & 7);
|
||||
return leave_state & (group->rec.scope_join_state & 0xf);
|
||||
}
|
||||
|
||||
static int join_group(struct mcast_group *group, int slave, u8 join_mask)
|
||||
@@ -564,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
|
||||
} else
|
||||
mcg_warn_group(group, "DRIVER BUG\n");
|
||||
} else if (group->state == MCAST_LEAVE_SENT) {
|
||||
if (group->rec.scope_join_state & 7)
|
||||
group->rec.scope_join_state &= 0xf8;
|
||||
if (group->rec.scope_join_state & 0xf)
|
||||
group->rec.scope_join_state &= 0xf0;
|
||||
group->state = MCAST_IDLE;
|
||||
mutex_unlock(&group->lock);
|
||||
if (release_group(group, 1))
|
||||
@@ -605,7 +605,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
|
||||
static int handle_join_req(struct mcast_group *group, u8 join_mask,
|
||||
struct mcast_req *req)
|
||||
{
|
||||
u8 group_join_state = group->rec.scope_join_state & 7;
|
||||
u8 group_join_state = group->rec.scope_join_state & 0xf;
|
||||
int ref = 0;
|
||||
u16 status;
|
||||
struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
|
||||
@@ -690,8 +690,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
|
||||
u8 cur_join_state;
|
||||
|
||||
resp_join_state = ((struct ib_sa_mcmember_data *)
|
||||
group->response_sa_mad.data)->scope_join_state & 7;
|
||||
cur_join_state = group->rec.scope_join_state & 7;
|
||||
group->response_sa_mad.data)->scope_join_state & 0xf;
|
||||
cur_join_state = group->rec.scope_join_state & 0xf;
|
||||
|
||||
if (method == IB_MGMT_METHOD_GET_RESP) {
|
||||
/* successfull join */
|
||||
@@ -710,7 +710,7 @@ process_requests:
|
||||
req = list_first_entry(&group->pending_list, struct mcast_req,
|
||||
group_list);
|
||||
sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
|
||||
req_join_state = sa_data->scope_join_state & 0x7;
|
||||
req_join_state = sa_data->scope_join_state & 0xf;
|
||||
|
||||
/* For a leave request, we will immediately answer the VF, and
|
||||
* update our internal counters. The actual leave will be sent
|
||||
|
@@ -448,7 +448,7 @@ struct mlx4_ib_demux_ctx {
|
||||
struct workqueue_struct *wq;
|
||||
struct workqueue_struct *ud_wq;
|
||||
spinlock_t ud_lock;
|
||||
__be64 subnet_prefix;
|
||||
atomic64_t subnet_prefix;
|
||||
__be64 guid_cache[128];
|
||||
struct mlx4_ib_dev *dev;
|
||||
/* the following lock protects both mcg_table and mcg_mgid0_list */
|
||||
|
@@ -2493,24 +2493,27 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
|
||||
sqp->ud_header.grh.flow_label =
|
||||
ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
|
||||
sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
|
||||
if (is_eth)
|
||||
if (is_eth) {
|
||||
memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
|
||||
else {
|
||||
if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
|
||||
/* When multi-function is enabled, the ib_core gid
|
||||
* indexes don't necessarily match the hw ones, so
|
||||
* we must use our own cache */
|
||||
sqp->ud_header.grh.source_gid.global.subnet_prefix =
|
||||
to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
|
||||
subnet_prefix;
|
||||
sqp->ud_header.grh.source_gid.global.interface_id =
|
||||
to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
|
||||
guid_cache[ah->av.ib.gid_index];
|
||||
} else
|
||||
ib_get_cached_gid(ib_dev,
|
||||
be32_to_cpu(ah->av.ib.port_pd) >> 24,
|
||||
ah->av.ib.gid_index,
|
||||
&sqp->ud_header.grh.source_gid, NULL);
|
||||
} else {
|
||||
if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
|
||||
/* When multi-function is enabled, the ib_core gid
|
||||
* indexes don't necessarily match the hw ones, so
|
||||
* we must use our own cache
|
||||
*/
|
||||
sqp->ud_header.grh.source_gid.global.subnet_prefix =
|
||||
cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
|
||||
demux[sqp->qp.port - 1].
|
||||
subnet_prefix)));
|
||||
sqp->ud_header.grh.source_gid.global.interface_id =
|
||||
to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
|
||||
guid_cache[ah->av.ib.gid_index];
|
||||
} else {
|
||||
ib_get_cached_gid(ib_dev,
|
||||
be32_to_cpu(ah->av.ib.port_pd) >> 24,
|
||||
ah->av.ib.gid_index,
|
||||
&sqp->ud_header.grh.source_gid, NULL);
|
||||
}
|
||||
}
|
||||
memcpy(sqp->ud_header.grh.destination_gid.raw,
|
||||
ah->av.ib.dgid, 16);
|
||||
|
@@ -288,7 +288,9 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
|
||||
|
||||
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
return !MLX5_CAP_GEN(dev->mdev, ib_virt);
|
||||
if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
|
||||
return !MLX5_CAP_GEN(dev->mdev, ib_virt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum {
|
||||
@@ -1428,6 +1430,13 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
|
||||
dmac_47_16),
|
||||
ib_spec->eth.val.dst_mac);
|
||||
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
smac_47_16),
|
||||
ib_spec->eth.mask.src_mac);
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
||||
smac_47_16),
|
||||
ib_spec->eth.val.src_mac);
|
||||
|
||||
if (ib_spec->eth.mask.vlan_tag) {
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
vlan_tag, 1);
|
||||
|
Reference in New Issue
Block a user