Merge branches 'mlx4', 'mlx5' and 'ocrdma' into k.o/for-4.6
このコミットが含まれているのは:

@@ -310,7 +310,7 @@ static void aliasguid_query_handler(int status,
|
||||
if (status) {
|
||||
pr_debug("(port: %d) failed: status = %d\n",
|
||||
cb_ctx->port, status);
|
||||
rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC;
|
||||
rec->time_to_run = ktime_get_boot_ns() + 1 * NSEC_PER_SEC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -416,7 +416,7 @@ next_entry:
|
||||
be64_to_cpu((__force __be64)rec->guid_indexes),
|
||||
be64_to_cpu((__force __be64)applied_guid_indexes),
|
||||
be64_to_cpu((__force __be64)declined_guid_indexes));
|
||||
rec->time_to_run = ktime_get_real_ns() +
|
||||
rec->time_to_run = ktime_get_boot_ns() +
|
||||
resched_delay_sec * NSEC_PER_SEC;
|
||||
} else {
|
||||
rec->status = MLX4_GUID_INFO_STATUS_SET;
|
||||
@@ -708,7 +708,7 @@ static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
|
||||
}
|
||||
}
|
||||
if (resched_delay_sec) {
|
||||
u64 curr_time = ktime_get_real_ns();
|
||||
u64 curr_time = ktime_get_boot_ns();
|
||||
|
||||
*resched_delay_sec = (low_record_time < curr_time) ? 0 :
|
||||
div_u64((low_record_time - curr_time), NSEC_PER_SEC);
|
||||
|
@@ -1643,6 +1643,56 @@ static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
|
||||
struct ib_flow_attr *flow_attr,
|
||||
enum mlx4_net_trans_promisc_mode *type)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
|
||||
(dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
|
||||
(flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (flow_attr->num_of_specs == 0) {
|
||||
type[0] = MLX4_FS_MC_SNIFFER;
|
||||
type[1] = MLX4_FS_UC_SNIFFER;
|
||||
} else {
|
||||
union ib_flow_spec *ib_spec;
|
||||
|
||||
ib_spec = (union ib_flow_spec *)(flow_attr + 1);
|
||||
if (ib_spec->type != IB_FLOW_SPEC_ETH)
|
||||
return -EINVAL;
|
||||
|
||||
/* if all is zero than MC and UC */
|
||||
if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
|
||||
type[0] = MLX4_FS_MC_SNIFFER;
|
||||
type[1] = MLX4_FS_UC_SNIFFER;
|
||||
} else {
|
||||
u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
|
||||
ib_spec->eth.mask.dst_mac[1],
|
||||
ib_spec->eth.mask.dst_mac[2],
|
||||
ib_spec->eth.mask.dst_mac[3],
|
||||
ib_spec->eth.mask.dst_mac[4],
|
||||
ib_spec->eth.mask.dst_mac[5]};
|
||||
|
||||
/* Above xor was only on MC bit, non empty mask is valid
|
||||
* only if this bit is set and rest are zero.
|
||||
*/
|
||||
if (!is_zero_ether_addr(&mac[0]))
|
||||
return -EINVAL;
|
||||
|
||||
if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
|
||||
type[0] = MLX4_FS_MC_SNIFFER;
|
||||
else
|
||||
type[0] = MLX4_FS_UC_SNIFFER;
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
|
||||
struct ib_flow_attr *flow_attr,
|
||||
int domain)
|
||||
@@ -1653,6 +1703,10 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
|
||||
struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
|
||||
int is_bonded = mlx4_is_bonded(dev);
|
||||
|
||||
if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
|
||||
(flow_attr->type != IB_FLOW_ATTR_NORMAL))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
memset(type, 0, sizeof(type));
|
||||
|
||||
mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
|
||||
@@ -1663,7 +1717,19 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
|
||||
|
||||
switch (flow_attr->type) {
|
||||
case IB_FLOW_ATTR_NORMAL:
|
||||
type[0] = MLX4_FS_REGULAR;
|
||||
/* If dont trap flag (continue match) is set, under specific
|
||||
* condition traffic be replicated to given qp,
|
||||
* without stealing it
|
||||
*/
|
||||
if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
|
||||
err = mlx4_ib_add_dont_trap_rule(dev,
|
||||
flow_attr,
|
||||
type);
|
||||
if (err)
|
||||
goto err_free;
|
||||
} else {
|
||||
type[0] = MLX4_FS_REGULAR;
|
||||
}
|
||||
break;
|
||||
|
||||
case IB_FLOW_ATTR_ALL_DEFAULT:
|
||||
@@ -1675,8 +1741,8 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
|
||||
break;
|
||||
|
||||
case IB_FLOW_ATTR_SNIFFER:
|
||||
type[0] = MLX4_FS_UC_SNIFFER;
|
||||
type[1] = MLX4_FS_MC_SNIFFER;
|
||||
type[0] = MLX4_FS_MIRROR_RX_PORT;
|
||||
type[1] = MLX4_FS_MIRROR_SX_PORT;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@@ -711,7 +711,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt_addr, int access_flags,
|
||||
struct ib_udata *udata);
|
||||
int mlx4_ib_dereg_mr(struct ib_mr *mr);
|
||||
struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
|
||||
struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
||||
struct ib_udata *udata);
|
||||
int mlx4_ib_dealloc_mw(struct ib_mw *mw);
|
||||
struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
|
||||
enum ib_mr_type mr_type,
|
||||
|
@@ -32,6 +32,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
|
||||
#include "mlx4_ib.h"
|
||||
|
||||
@@ -334,7 +335,8 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
|
||||
struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct mlx4_ib_dev *dev = to_mdev(pd->device);
|
||||
struct mlx4_ib_mw *mw;
|
||||
|
新しいイシューから参照
ユーザーをブロックする