Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull Mellanox rdma updates from Doug Ledford:
"Mellanox specific updates for 4.11 merge window
Because the Mellanox code required being based on a net-next tree, I
keept it separate from the remainder of the RDMA stack submission that
is based on 4.10-rc3.
This branch contains:
- Various mlx4 and mlx5 fixes and minor changes
- Support for adding a tag match rule to flow specs
- Support for cvlan offload operation for raw ethernet QPs
- A change to the core IB code to recognize raw eth capabilities and
enumerate them (touches non-Mellanox code)
- Implicit On-Demand Paging memory registration support"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (40 commits)
IB/mlx5: Fix configuration of port capabilities
IB/mlx4: Take source GID by index from HW GID table
IB/mlx5: Fix blue flame buffer size calculation
IB/mlx4: Remove unused variable from function declaration
IB: Query ports via the core instead of direct into the driver
IB: Add protocol for USNIC
IB/mlx4: Support raw packet protocol
IB/mlx5: Support raw packet protocol
IB/core: Add raw packet protocol
IB/mlx5: Add implicit MR support
IB/mlx5: Expose MR cache for mlx5_ib
IB/mlx5: Add null_mkey access
IB/umem: Indicate that process is being terminated
IB/umem: Update on demand page (ODP) support
IB/core: Add implicit MR flag
IB/mlx5: Support creation of a WQ with scatter FCS offload
IB/mlx5: Enable QP creation with cvlan offload
IB/mlx5: Enable WQ creation and modification with cvlan offload
IB/mlx5: Expose vlan offloads capabilities
IB/uverbs: Enable QP creation with cvlan offload
...
This commit is contained in:
@@ -79,11 +79,15 @@ struct ib_umem_odp {
|
||||
|
||||
struct completion notifier_completion;
|
||||
int dying;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
|
||||
int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem);
|
||||
struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
|
||||
unsigned long addr,
|
||||
size_t size);
|
||||
|
||||
void ib_umem_odp_release(struct ib_umem *umem);
|
||||
|
||||
@@ -117,10 +121,12 @@ typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end,
|
||||
int rbt_ib_umem_for_each_in_range(struct rb_root *root, u64 start, u64 end,
|
||||
umem_call_back cb, void *cookie);
|
||||
|
||||
struct umem_odp_node *rbt_ib_umem_iter_first(struct rb_root *root,
|
||||
u64 start, u64 last);
|
||||
struct umem_odp_node *rbt_ib_umem_iter_next(struct umem_odp_node *node,
|
||||
u64 start, u64 last);
|
||||
/*
|
||||
* Find first region intersecting with address range.
|
||||
* Return NULL if not found
|
||||
*/
|
||||
struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root *root,
|
||||
u64 addr, u64 length);
|
||||
|
||||
static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item,
|
||||
unsigned long mmu_seq)
|
||||
@@ -153,6 +159,13 @@ static inline int ib_umem_odp_get(struct ib_ucontext *context,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
|
||||
unsigned long addr,
|
||||
size_t size)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static inline void ib_umem_odp_release(struct ib_umem *umem) {}
|
||||
|
||||
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
|
||||
|
||||
@@ -207,6 +207,7 @@ enum ib_device_cap_flags {
|
||||
IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
|
||||
IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
|
||||
IB_DEVICE_RC_IP_CSUM = (1 << 25),
|
||||
/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
|
||||
IB_DEVICE_RAW_IP_CSUM = (1 << 26),
|
||||
/*
|
||||
* Devices should set IB_DEVICE_CROSS_CHANNEL if they
|
||||
@@ -220,6 +221,7 @@ enum ib_device_cap_flags {
|
||||
IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
|
||||
IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
|
||||
IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
|
||||
/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
|
||||
IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
|
||||
};
|
||||
|
||||
@@ -241,7 +243,8 @@ enum ib_atomic_cap {
|
||||
};
|
||||
|
||||
enum ib_odp_general_cap_bits {
|
||||
IB_ODP_SUPPORT = 1 << 0,
|
||||
IB_ODP_SUPPORT = 1 << 0,
|
||||
IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
|
||||
};
|
||||
|
||||
enum ib_odp_transport_cap_bits {
|
||||
@@ -330,6 +333,7 @@ struct ib_device_attr {
|
||||
uint64_t hca_core_clock; /* in KHZ */
|
||||
struct ib_rss_caps rss_caps;
|
||||
u32 max_wq_type_rq;
|
||||
u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
|
||||
};
|
||||
|
||||
enum ib_mtu {
|
||||
@@ -499,6 +503,8 @@ static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
|
||||
#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
|
||||
#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
|
||||
#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
|
||||
#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
|
||||
#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
|
||||
|
||||
#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
|
||||
| RDMA_CORE_CAP_IB_MAD \
|
||||
@@ -522,6 +528,10 @@ static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
|
||||
#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
|
||||
| RDMA_CORE_CAP_OPA_MAD)
|
||||
|
||||
#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
|
||||
|
||||
#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
|
||||
|
||||
struct ib_port_attr {
|
||||
u64 subnet_prefix;
|
||||
enum ib_port_state state;
|
||||
@@ -1019,6 +1029,7 @@ enum ib_qp_create_flags {
|
||||
IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
|
||||
IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
|
||||
IB_QP_CREATE_SCATTER_FCS = 1 << 8,
|
||||
IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
|
||||
/* reserve bits 26-31 for low level drivers' internal use */
|
||||
IB_QP_CREATE_RESERVED_START = 1 << 26,
|
||||
IB_QP_CREATE_RESERVED_END = 1 << 31,
|
||||
@@ -1470,6 +1481,18 @@ struct ib_srq {
|
||||
} ext;
|
||||
};
|
||||
|
||||
enum ib_raw_packet_caps {
|
||||
/* Strip cvlan from incoming packet and report it in the matching work
|
||||
* completion is supported.
|
||||
*/
|
||||
IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
|
||||
/* Scatter FCS field of an incoming packet to host memory is supported.
|
||||
*/
|
||||
IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
|
||||
/* Checksum offloads are supported (for both send and receive). */
|
||||
IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
|
||||
};
|
||||
|
||||
enum ib_wq_type {
|
||||
IB_WQT_RQ
|
||||
};
|
||||
@@ -1493,6 +1516,11 @@ struct ib_wq {
|
||||
atomic_t usecnt;
|
||||
};
|
||||
|
||||
enum ib_wq_flags {
|
||||
IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
|
||||
IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
|
||||
};
|
||||
|
||||
struct ib_wq_init_attr {
|
||||
void *wq_context;
|
||||
enum ib_wq_type wq_type;
|
||||
@@ -1500,16 +1528,20 @@ struct ib_wq_init_attr {
|
||||
u32 max_sge;
|
||||
struct ib_cq *cq;
|
||||
void (*event_handler)(struct ib_event *, void *);
|
||||
u32 create_flags; /* Use enum ib_wq_flags */
|
||||
};
|
||||
|
||||
enum ib_wq_attr_mask {
|
||||
IB_WQ_STATE = 1 << 0,
|
||||
IB_WQ_CUR_STATE = 1 << 1,
|
||||
IB_WQ_STATE = 1 << 0,
|
||||
IB_WQ_CUR_STATE = 1 << 1,
|
||||
IB_WQ_FLAGS = 1 << 2,
|
||||
};
|
||||
|
||||
struct ib_wq_attr {
|
||||
enum ib_wq_state wq_state;
|
||||
enum ib_wq_state curr_wq_state;
|
||||
u32 flags; /* Use enum ib_wq_flags */
|
||||
u32 flags_mask; /* Use enum ib_wq_flags */
|
||||
};
|
||||
|
||||
struct ib_rwq_ind_table {
|
||||
@@ -1618,6 +1650,8 @@ enum ib_flow_spec_type {
|
||||
IB_FLOW_SPEC_UDP = 0x41,
|
||||
IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
|
||||
IB_FLOW_SPEC_INNER = 0x100,
|
||||
/* Actions */
|
||||
IB_FLOW_SPEC_ACTION_TAG = 0x1000,
|
||||
};
|
||||
#define IB_FLOW_SPEC_LAYER_MASK 0xF0
|
||||
#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
|
||||
@@ -1740,6 +1774,12 @@ struct ib_flow_spec_tunnel {
|
||||
struct ib_flow_tunnel_filter mask;
|
||||
};
|
||||
|
||||
struct ib_flow_spec_action_tag {
|
||||
enum ib_flow_spec_type type;
|
||||
u16 size;
|
||||
u32 tag_id;
|
||||
};
|
||||
|
||||
union ib_flow_spec {
|
||||
struct {
|
||||
u32 type;
|
||||
@@ -1751,6 +1791,7 @@ union ib_flow_spec {
|
||||
struct ib_flow_spec_tcp_udp tcp_udp;
|
||||
struct ib_flow_spec_ipv6 ipv6;
|
||||
struct ib_flow_spec_tunnel tunnel;
|
||||
struct ib_flow_spec_action_tag flow_tag;
|
||||
};
|
||||
|
||||
struct ib_flow_attr {
|
||||
@@ -2333,6 +2374,16 @@ static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
|
||||
rdma_protocol_roce(device, port_num);
|
||||
}
|
||||
|
||||
static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
|
||||
{
|
||||
return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
|
||||
}
|
||||
|
||||
static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
|
||||
{
|
||||
return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
|
||||
}
|
||||
|
||||
/**
|
||||
* rdma_cap_ib_mad - Check if the port of a device supports Infiniband
|
||||
* Management Datagrams.
|
||||
|
||||
Reference in New Issue
Block a user