Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma fixes from Doug Ledford:
 "For the most part this is just a minor -rc cycle for the rdma
  subsystem. Even given that this is all of the -rc patches since the
  merge window closed, it's still only about 25 patches:

   - Multiple i40iw, nes, iw_cxgb4, hfi1, qib, mlx4, mlx5 fixes

   - A few upper layer protocol fixes (IPoIB, iSER, SRP)

   - A modest number of core fixes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (26 commits)
  RDMA/SA: Fix kernel panic in CMA request handler flow
  RDMA/umem: Fix missing mmap_sem in get umem ODP call
  RDMA/core: not to set page dirty bit if it's already set.
  RDMA/uverbs: Declare local function static and add brackets to sizeof
  RDMA/netlink: Reduce exposure of RDMA netlink functions
  RDMA/srp: Fix NULL deref at srp_destroy_qp()
  RDMA/IPoIB: Limit the ipoib_dev_uninit_default scope
  RDMA/IPoIB: Replace netdev_priv with ipoib_priv for ipoib_get_link_ksettings
  RDMA/qedr: add null check before pointer dereference
  RDMA/mlx5: set UMR wqe fence according to HCA cap
  net/mlx5: Define interface bits for fencing UMR wqe
  RDMA/mlx4: Fix MAD tunneling when SRIOV is enabled
  RDMA/qib,hfi1: Fix MR reference count leak on write with immediate
  RDMA/hfi1: Defer setting VL15 credits to link-up interrupt
  RDMA/hfi1: change PCI bar addr assignments to Linux API functions
  RDMA/hfi1: fix array termination by appending NULL to attr array
  RDMA/iw_cxgb4: fix the calculation of ipv6 header size
  RDMA/iw_cxgb4: calculate t4_eq_status_entries properly
  RDMA/iw_cxgb4: Avoid touch after free error in ARP failure handlers
  RDMA/nes: ACK MPA Reply frame
  ...
This commit is contained in:
Linus Torvalds
2017-06-04 10:41:32 -07:00
37 changed files with 194 additions and 170 deletions

View File

@@ -766,6 +766,12 @@ enum {
MLX5_CAP_PORT_TYPE_ETH = 0x1,
};
enum {
MLX5_CAP_UMR_FENCE_STRONG = 0x0,
MLX5_CAP_UMR_FENCE_SMALL = 0x1,
MLX5_CAP_UMR_FENCE_NONE = 0x2,
};
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x80];
@@ -875,7 +881,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_202[0x1];
u8 ipoib_enhanced_offloads[0x1];
u8 ipoib_basic_offloads[0x1];
u8 reserved_at_205[0xa];
u8 reserved_at_205[0x5];
u8 umr_fence[0x2];
u8 reserved_at_20c[0x3];
u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];

View File

@@ -158,7 +158,6 @@ enum sa_path_rec_type {
};
struct sa_path_rec_ib {
__be64 service_id;
__be16 dlid;
__be16 slid;
u8 raw_traffic;
@@ -174,7 +173,6 @@ struct sa_path_rec_roce {
};
struct sa_path_rec_opa {
__be64 service_id;
__be32 dlid;
__be32 slid;
u8 raw_traffic;
@@ -189,6 +187,7 @@ struct sa_path_rec_opa {
struct sa_path_rec {
union ib_gid dgid;
union ib_gid sgid;
__be64 service_id;
/* reserved */
__be32 flow_label;
u8 hop_limit;
@@ -262,7 +261,7 @@ static inline void path_conv_opa_to_ib(struct sa_path_rec *ib,
ib->ib.dlid = htons(ntohl(opa->opa.dlid));
ib->ib.slid = htons(ntohl(opa->opa.slid));
}
ib->ib.service_id = opa->opa.service_id;
ib->service_id = opa->service_id;
ib->ib.raw_traffic = opa->opa.raw_traffic;
}
@@ -281,7 +280,7 @@ static inline void path_conv_ib_to_opa(struct sa_path_rec *opa,
}
opa->opa.slid = slid;
opa->opa.dlid = dlid;
opa->opa.service_id = ib->ib.service_id;
opa->service_id = ib->service_id;
opa->opa.raw_traffic = ib->ib.raw_traffic;
}
@@ -591,15 +590,6 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec)
(rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2));
}
static inline void sa_path_set_service_id(struct sa_path_rec *rec,
__be64 service_id)
{
if (rec->rec_type == SA_PATH_REC_TYPE_IB)
rec->ib.service_id = service_id;
else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
rec->opa.service_id = service_id;
}
static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid)
{
if (rec->rec_type == SA_PATH_REC_TYPE_IB)
@@ -625,15 +615,6 @@ static inline void sa_path_set_raw_traffic(struct sa_path_rec *rec,
rec->opa.raw_traffic = raw_traffic;
}
static inline __be64 sa_path_get_service_id(struct sa_path_rec *rec)
{
if (rec->rec_type == SA_PATH_REC_TYPE_IB)
return rec->ib.service_id;
else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
return rec->opa.service_id;
return 0;
}
static inline __be32 sa_path_get_slid(struct sa_path_rec *rec)
{
if (rec->rec_type == SA_PATH_REC_TYPE_IB)

View File

@@ -10,9 +10,6 @@ struct ibnl_client_cbs {
struct module *module;
};
int ibnl_init(void);
void ibnl_cleanup(void);
/**
* Add a a client to the list of IB netlink exporters.
* @index: Index of the added client
@@ -77,11 +74,4 @@ int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
unsigned int group, gfp_t flags);
/**
* Check if there are any listeners to the netlink group
* @group: the netlink group ID
* Returns 0 on success or a negative for no listeners.
*/
int ibnl_chk_listeners(unsigned int group);
#endif /* _RDMA_NETLINK_H */