Merge remote-tracking branch 'ovl/rename2' into for-linus
This commit is contained in:
@@ -333,6 +333,8 @@ static void remove_ep_tid(struct c4iw_ep *ep)
|
||||
|
||||
spin_lock_irqsave(&ep->com.dev->lock, flags);
|
||||
_remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
|
||||
if (idr_is_empty(&ep->com.dev->hwtid_idr))
|
||||
wake_up(&ep->com.dev->wait);
|
||||
spin_unlock_irqrestore(&ep->com.dev->lock, flags);
|
||||
}
|
||||
|
||||
@@ -2117,8 +2119,10 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
|
||||
}
|
||||
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
|
||||
n, pdev, rt_tos2priority(tos));
|
||||
if (!ep->l2t)
|
||||
if (!ep->l2t) {
|
||||
dev_put(pdev);
|
||||
goto out;
|
||||
}
|
||||
ep->mtu = pdev->mtu;
|
||||
ep->tx_chan = cxgb4_port_chan(pdev);
|
||||
ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
|
||||
|
@@ -872,9 +872,13 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
|
||||
static void c4iw_dealloc(struct uld_ctx *ctx)
|
||||
{
|
||||
c4iw_rdev_close(&ctx->dev->rdev);
|
||||
WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
|
||||
idr_destroy(&ctx->dev->cqidr);
|
||||
WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr));
|
||||
idr_destroy(&ctx->dev->qpidr);
|
||||
WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr));
|
||||
idr_destroy(&ctx->dev->mmidr);
|
||||
wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr));
|
||||
idr_destroy(&ctx->dev->hwtid_idr);
|
||||
idr_destroy(&ctx->dev->stid_idr);
|
||||
idr_destroy(&ctx->dev->atid_idr);
|
||||
@@ -992,6 +996,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
||||
mutex_init(&devp->rdev.stats.lock);
|
||||
mutex_init(&devp->db_mutex);
|
||||
INIT_LIST_HEAD(&devp->db_fc_list);
|
||||
init_waitqueue_head(&devp->wait);
|
||||
devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
|
||||
|
||||
if (c4iw_debugfs_root) {
|
||||
|
@@ -263,6 +263,7 @@ struct c4iw_dev {
|
||||
struct idr stid_idr;
|
||||
struct list_head db_fc_list;
|
||||
u32 avail_ird;
|
||||
wait_queue_head_t wait;
|
||||
};
|
||||
|
||||
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
|
||||
|
@@ -1128,6 +1128,27 @@ void handle_port_mgmt_change_event(struct work_struct *work)
|
||||
|
||||
/* Generate GUID changed event */
|
||||
if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
|
||||
if (mlx4_is_master(dev->dev)) {
|
||||
union ib_gid gid;
|
||||
int err = 0;
|
||||
|
||||
if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
|
||||
err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
|
||||
else
|
||||
gid.global.subnet_prefix =
|
||||
eqe->event.port_mgmt_change.params.port_info.gid_prefix;
|
||||
if (err) {
|
||||
pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
|
||||
port, err);
|
||||
} else {
|
||||
pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
|
||||
port,
|
||||
(u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
|
||||
be64_to_cpu(gid.global.subnet_prefix));
|
||||
atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
|
||||
be64_to_cpu(gid.global.subnet_prefix));
|
||||
}
|
||||
}
|
||||
mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
|
||||
/*if master, notify all slaves*/
|
||||
if (mlx4_is_master(dev->dev))
|
||||
@@ -2202,6 +2223,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
|
||||
if (err)
|
||||
goto demux_err;
|
||||
dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
|
||||
atomic64_set(&dev->sriov.demux[i].subnet_prefix,
|
||||
be64_to_cpu(gid.global.subnet_prefix));
|
||||
err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
|
||||
&dev->sriov.sqps[i]);
|
||||
if (err)
|
||||
|
@@ -2202,6 +2202,9 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
|
||||
bool per_port = !!(ibdev->dev->caps.flags2 &
|
||||
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
|
||||
|
||||
if (mlx4_is_slave(ibdev->dev))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
|
||||
/* i == 1 means we are building port counters */
|
||||
if (i && !per_port)
|
||||
|
@@ -489,7 +489,7 @@ static u8 get_leave_state(struct mcast_group *group)
|
||||
if (!group->members[i])
|
||||
leave_state |= (1 << i);
|
||||
|
||||
return leave_state & (group->rec.scope_join_state & 7);
|
||||
return leave_state & (group->rec.scope_join_state & 0xf);
|
||||
}
|
||||
|
||||
static int join_group(struct mcast_group *group, int slave, u8 join_mask)
|
||||
@@ -564,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
|
||||
} else
|
||||
mcg_warn_group(group, "DRIVER BUG\n");
|
||||
} else if (group->state == MCAST_LEAVE_SENT) {
|
||||
if (group->rec.scope_join_state & 7)
|
||||
group->rec.scope_join_state &= 0xf8;
|
||||
if (group->rec.scope_join_state & 0xf)
|
||||
group->rec.scope_join_state &= 0xf0;
|
||||
group->state = MCAST_IDLE;
|
||||
mutex_unlock(&group->lock);
|
||||
if (release_group(group, 1))
|
||||
@@ -605,7 +605,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
|
||||
static int handle_join_req(struct mcast_group *group, u8 join_mask,
|
||||
struct mcast_req *req)
|
||||
{
|
||||
u8 group_join_state = group->rec.scope_join_state & 7;
|
||||
u8 group_join_state = group->rec.scope_join_state & 0xf;
|
||||
int ref = 0;
|
||||
u16 status;
|
||||
struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
|
||||
@@ -690,8 +690,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
|
||||
u8 cur_join_state;
|
||||
|
||||
resp_join_state = ((struct ib_sa_mcmember_data *)
|
||||
group->response_sa_mad.data)->scope_join_state & 7;
|
||||
cur_join_state = group->rec.scope_join_state & 7;
|
||||
group->response_sa_mad.data)->scope_join_state & 0xf;
|
||||
cur_join_state = group->rec.scope_join_state & 0xf;
|
||||
|
||||
if (method == IB_MGMT_METHOD_GET_RESP) {
|
||||
/* successfull join */
|
||||
@@ -710,7 +710,7 @@ process_requests:
|
||||
req = list_first_entry(&group->pending_list, struct mcast_req,
|
||||
group_list);
|
||||
sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
|
||||
req_join_state = sa_data->scope_join_state & 0x7;
|
||||
req_join_state = sa_data->scope_join_state & 0xf;
|
||||
|
||||
/* For a leave request, we will immediately answer the VF, and
|
||||
* update our internal counters. The actual leave will be sent
|
||||
|
@@ -448,7 +448,7 @@ struct mlx4_ib_demux_ctx {
|
||||
struct workqueue_struct *wq;
|
||||
struct workqueue_struct *ud_wq;
|
||||
spinlock_t ud_lock;
|
||||
__be64 subnet_prefix;
|
||||
atomic64_t subnet_prefix;
|
||||
__be64 guid_cache[128];
|
||||
struct mlx4_ib_dev *dev;
|
||||
/* the following lock protects both mcg_table and mcg_mgid0_list */
|
||||
|
@@ -2493,24 +2493,27 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
|
||||
sqp->ud_header.grh.flow_label =
|
||||
ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
|
||||
sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
|
||||
if (is_eth)
|
||||
if (is_eth) {
|
||||
memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
|
||||
else {
|
||||
if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
|
||||
/* When multi-function is enabled, the ib_core gid
|
||||
* indexes don't necessarily match the hw ones, so
|
||||
* we must use our own cache */
|
||||
sqp->ud_header.grh.source_gid.global.subnet_prefix =
|
||||
to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
|
||||
subnet_prefix;
|
||||
sqp->ud_header.grh.source_gid.global.interface_id =
|
||||
to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
|
||||
guid_cache[ah->av.ib.gid_index];
|
||||
} else
|
||||
ib_get_cached_gid(ib_dev,
|
||||
be32_to_cpu(ah->av.ib.port_pd) >> 24,
|
||||
ah->av.ib.gid_index,
|
||||
&sqp->ud_header.grh.source_gid, NULL);
|
||||
} else {
|
||||
if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
|
||||
/* When multi-function is enabled, the ib_core gid
|
||||
* indexes don't necessarily match the hw ones, so
|
||||
* we must use our own cache
|
||||
*/
|
||||
sqp->ud_header.grh.source_gid.global.subnet_prefix =
|
||||
cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
|
||||
demux[sqp->qp.port - 1].
|
||||
subnet_prefix)));
|
||||
sqp->ud_header.grh.source_gid.global.interface_id =
|
||||
to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
|
||||
guid_cache[ah->av.ib.gid_index];
|
||||
} else {
|
||||
ib_get_cached_gid(ib_dev,
|
||||
be32_to_cpu(ah->av.ib.port_pd) >> 24,
|
||||
ah->av.ib.gid_index,
|
||||
&sqp->ud_header.grh.source_gid, NULL);
|
||||
}
|
||||
}
|
||||
memcpy(sqp->ud_header.grh.destination_gid.raw,
|
||||
ah->av.ib.dgid, 16);
|
||||
|
@@ -288,7 +288,9 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
|
||||
|
||||
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
return !MLX5_CAP_GEN(dev->mdev, ib_virt);
|
||||
if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
|
||||
return !MLX5_CAP_GEN(dev->mdev, ib_virt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum {
|
||||
@@ -1428,6 +1430,13 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
|
||||
dmac_47_16),
|
||||
ib_spec->eth.val.dst_mac);
|
||||
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
smac_47_16),
|
||||
ib_spec->eth.mask.src_mac);
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
|
||||
smac_47_16),
|
||||
ib_spec->eth.val.src_mac);
|
||||
|
||||
if (ib_spec->eth.mask.vlan_tag) {
|
||||
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
|
||||
vlan_tag, 1);
|
||||
|
Reference in New Issue
Block a user