Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull main rdma updates from Doug Ledford:
 "This is the main pull request for the rdma stack this release.  The
  code has been through 0day and I had it tagged for linux-next testing
  for a couple days.

  Summary:

   - updates to mlx5

   - updates to mlx4 (two conflicts, both minor and easily resolved)

   - updates to iw_cxgb4 (one conflict, not so obvious to resolve,
     proper resolution is to keep the code in cxgb4_main.c as it is in
     Linus' tree as attach_uld was refactored and moved into
     cxgb4_uld.c)

   - improvements to uAPI (moved vendor specific API elements to uAPI
     area)

   - add hns-roce driver and hns and hns-roce ACPI reset support

   - conversion of all rdma code away from deprecated
     create_singlethread_workqueue

   - security improvement: remove unsafe ib_get_dma_mr (breaks lustre in
     staging)"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (75 commits)
  staging/lustre: Disable InfiniBand support
  iw_cxgb4: add fast-path for small REG_MR operations
  cxgb4: advertise support for FR_NSMR_TPTE_WR
  IB/core: correctly handle rdma_rw_init_mrs() failure
  IB/srp: Fix infinite loop when FMR sg[0].offset != 0
  IB/srp: Remove an unused argument
  IB/core: Improve ib_map_mr_sg() documentation
  IB/mlx4: Fix possible vl/sl field mismatch in LRH header in QP1 packets
  IB/mthca: Move user vendor structures
  IB/nes: Move user vendor structures
  IB/ocrdma: Move user vendor structures
  IB/mlx4: Move user vendor structures
  IB/cxgb4: Move user vendor structures
  IB/cxgb3: Move user vendor structures
  IB/mlx5: Move and decouple user vendor structures
  IB/{core,hw}: Add constant for node_desc
  ipoib: Make ipoib_warn ratelimited
  IB/mlx4/alias_GUID: Remove deprecated create_singlethread_workqueue
  IB/ipoib_verbs: Remove deprecated create_singlethread_workqueue
  IB/ipoib: Remove deprecated create_singlethread_workqueue
  ...
This commit is contained in:
Linus Torvalds
2016-10-09 17:04:33 -07:00
114 changed files with 12091 additions and 485 deletions

View File

@@ -800,7 +800,7 @@ static struct notifier_block nb = {
int addr_init(void)
{
addr_wq = create_singlethread_workqueue("ib_addr");
addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0);
if (!addr_wq)
return -ENOMEM;

View File

@@ -4369,7 +4369,7 @@ static int __init cma_init(void)
{
int ret;
cma_wq = create_singlethread_workqueue("rdma_cm");
cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
if (!cma_wq)
return -ENOMEM;

View File

@@ -1160,7 +1160,7 @@ static int __init iw_cm_init(void)
if (ret)
pr_err("iw_cm: couldn't register netlink callbacks\n");
iwcm_wq = create_singlethread_workqueue("iw_cm_wq");
iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM);
if (!iwcm_wq)
return -ENOMEM;

View File

@@ -3160,7 +3160,7 @@ static int ib_mad_port_open(struct ib_device *device,
goto error3;
}
port_priv->pd = ib_alloc_pd(device);
port_priv->pd = ib_alloc_pd(device, 0);
if (IS_ERR(port_priv->pd)) {
dev_err(&device->dev, "Couldn't create ib_mad PD\n");
ret = PTR_ERR(port_priv->pd);
@@ -3177,7 +3177,7 @@ static int ib_mad_port_open(struct ib_device *device,
goto error7;
snprintf(name, sizeof name, "ib_mad%d", port_num);
port_priv->wq = create_singlethread_workqueue(name);
port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!port_priv->wq) {
ret = -ENOMEM;
goto error8;

View File

@@ -873,7 +873,7 @@ int mcast_init(void)
{
int ret;
mcast_wq = create_singlethread_workqueue("ib_mcast");
mcast_wq = alloc_ordered_workqueue("ib_mcast", WQ_MEM_RECLAIM);
if (!mcast_wq)
return -ENOMEM;

View File

@@ -2015,7 +2015,7 @@ int ib_sa_init(void)
goto err2;
}
ib_nl_wq = create_singlethread_workqueue("ib_nl_sa_wq");
ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
if (!ib_nl_wq) {
ret = -ENOMEM;
goto err3;

View File

@@ -1193,7 +1193,7 @@ static ssize_t set_node_desc(struct device *device,
if (!dev->modify_device)
return -EIO;
memcpy(desc.node_desc, buf, min_t(int, count, 64));
memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX));
ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
if (ret)
return ret;

View File

@@ -1638,7 +1638,8 @@ static int ucma_open(struct inode *inode, struct file *filp)
if (!file)
return -ENOMEM;
file->close_wq = create_singlethread_workqueue("ucma_close_id");
file->close_wq = alloc_ordered_workqueue("ucma_close_id",
WQ_MEM_RECLAIM);
if (!file->close_wq) {
kfree(file);
return -ENOMEM;

View File

@@ -571,7 +571,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
pd->device = ib_dev;
pd->uobject = uobj;
pd->local_mr = NULL;
pd->__internal_mr = NULL;
atomic_set(&pd->usecnt, 0);
uobj->object = pd;
@@ -3078,51 +3078,102 @@ out_put:
return ret ? ret : in_len;
}
static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec)
{
/* Returns user space filter size, includes padding */
return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
}
static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size,
u16 ib_real_filter_sz)
{
/*
* User space filter structures must be 64 bit aligned, otherwise this
* may pass, but we won't handle additional new attributes.
*/
if (kern_filter_size > ib_real_filter_sz) {
if (memchr_inv(kern_spec_filter +
ib_real_filter_sz, 0,
kern_filter_size - ib_real_filter_sz))
return -EINVAL;
return ib_real_filter_sz;
}
return kern_filter_size;
}
static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec)
{
ssize_t actual_filter_sz;
ssize_t kern_filter_sz;
ssize_t ib_filter_sz;
void *kern_spec_mask;
void *kern_spec_val;
if (kern_spec->reserved)
return -EINVAL;
ib_spec->type = kern_spec->type;
kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
/* User flow spec size must be aligned to 4 bytes */
if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
return -EINVAL;
kern_spec_val = (void *)kern_spec +
sizeof(struct ib_uverbs_flow_spec_hdr);
kern_spec_mask = kern_spec_val + kern_filter_sz;
switch (ib_spec->type) {
case IB_FLOW_SPEC_ETH:
ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
if (ib_spec->eth.size != kern_spec->eth.size)
ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
if (actual_filter_sz <= 0)
return -EINVAL;
memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
sizeof(struct ib_flow_eth_filter));
memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
sizeof(struct ib_flow_eth_filter));
ib_spec->size = sizeof(struct ib_flow_spec_eth);
memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_IPV4:
ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
if (ib_spec->ipv4.size != kern_spec->ipv4.size)
ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
if (actual_filter_sz <= 0)
return -EINVAL;
memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
sizeof(struct ib_flow_ipv4_filter));
memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
sizeof(struct ib_flow_ipv4_filter));
ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_IPV6:
ib_spec->ipv6.size = sizeof(struct ib_flow_spec_ipv6);
if (ib_spec->ipv6.size != kern_spec->ipv6.size)
ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
if (actual_filter_sz <= 0)
return -EINVAL;
ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
(ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
return -EINVAL;
memcpy(&ib_spec->ipv6.val, &kern_spec->ipv6.val,
sizeof(struct ib_flow_ipv6_filter));
memcpy(&ib_spec->ipv6.mask, &kern_spec->ipv6.mask,
sizeof(struct ib_flow_ipv6_filter));
break;
case IB_FLOW_SPEC_TCP:
case IB_FLOW_SPEC_UDP:
ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
if (actual_filter_sz <= 0)
return -EINVAL;
memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
sizeof(struct ib_flow_tcp_udp_filter));
memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
sizeof(struct ib_flow_tcp_udp_filter));
ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
break;
default:
return -EINVAL;
@@ -3654,7 +3705,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
goto err_uobj;
}
flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs *
sizeof(union ib_flow_spec), GFP_KERNEL);
if (!flow_attr) {
err = -ENOMEM;
goto err_put;
@@ -4173,6 +4225,23 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
resp.device_cap_flags_ex = attr.device_cap_flags;
resp.response_length += sizeof(resp.device_cap_flags_ex);
if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps))
goto end;
resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
resp.rss_caps.max_rwq_indirection_tables =
attr.rss_caps.max_rwq_indirection_tables;
resp.rss_caps.max_rwq_indirection_table_size =
attr.rss_caps.max_rwq_indirection_table_size;
resp.response_length += sizeof(resp.rss_caps);
if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq))
goto end;
resp.max_wq_type_rq = attr.max_wq_type_rq;
resp.response_length += sizeof(resp.max_wq_type_rq);
end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
return err;

View File

@@ -227,9 +227,11 @@ EXPORT_SYMBOL(rdma_port_get_link_layer);
* Every PD has a local_dma_lkey which can be used as the lkey value for local
* memory operations.
*/
struct ib_pd *ib_alloc_pd(struct ib_device *device)
struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
const char *caller)
{
struct ib_pd *pd;
int mr_access_flags = 0;
pd = device->alloc_pd(device, NULL, NULL);
if (IS_ERR(pd))
@@ -237,26 +239,46 @@ struct ib_pd *ib_alloc_pd(struct ib_device *device)
pd->device = device;
pd->uobject = NULL;
pd->local_mr = NULL;
pd->__internal_mr = NULL;
atomic_set(&pd->usecnt, 0);
pd->flags = flags;
if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
pd->local_dma_lkey = device->local_dma_lkey;
else {
else
mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
pr_warn("%s: enabling unsafe global rkey\n", caller);
mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
}
if (mr_access_flags) {
struct ib_mr *mr;
mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
mr = pd->device->get_dma_mr(pd, mr_access_flags);
if (IS_ERR(mr)) {
ib_dealloc_pd(pd);
return (struct ib_pd *)mr;
return ERR_CAST(mr);
}
pd->local_mr = mr;
pd->local_dma_lkey = pd->local_mr->lkey;
mr->device = pd->device;
mr->pd = pd;
mr->uobject = NULL;
mr->need_inval = false;
pd->__internal_mr = mr;
if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
pd->local_dma_lkey = pd->__internal_mr->lkey;
if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
pd->unsafe_global_rkey = pd->__internal_mr->rkey;
}
return pd;
}
EXPORT_SYMBOL(ib_alloc_pd);
EXPORT_SYMBOL(__ib_alloc_pd);
/**
* ib_dealloc_pd - Deallocates a protection domain.
@@ -270,10 +292,10 @@ void ib_dealloc_pd(struct ib_pd *pd)
{
int ret;
if (pd->local_mr) {
ret = ib_dereg_mr(pd->local_mr);
if (pd->__internal_mr) {
ret = pd->device->dereg_mr(pd->__internal_mr);
WARN_ON(ret);
pd->local_mr = NULL;
pd->__internal_mr = NULL;
}
/* uverbs manipulates usecnt with proper locking, while the kabi
@@ -821,7 +843,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (ret) {
pr_err("failed to init MR pool ret= %d\n", ret);
ib_destroy_qp(qp);
qp = ERR_PTR(ret);
return ERR_PTR(ret);
}
}
@@ -1391,29 +1413,6 @@ EXPORT_SYMBOL(ib_resize_cq);
/* Memory regions */
struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
{
struct ib_mr *mr;
int err;
err = ib_check_mr_access(mr_access_flags);
if (err)
return ERR_PTR(err);
mr = pd->device->get_dma_mr(pd, mr_access_flags);
if (!IS_ERR(mr)) {
mr->device = pd->device;
mr->pd = pd;
mr->uobject = NULL;
atomic_inc(&pd->usecnt);
mr->need_inval = false;
}
return mr;
}
EXPORT_SYMBOL(ib_get_dma_mr);
int ib_dereg_mr(struct ib_mr *mr)
{
struct ib_pd *pd = mr->pd;
@@ -1812,13 +1811,13 @@ EXPORT_SYMBOL(ib_set_vf_guid);
*
* Constraints:
* - The first sg element is allowed to have an offset.
* - Each sg element must be aligned to page_size (or physically
* contiguous to the previous element). In case an sg element has a
* non contiguous offset, the mapping prefix will not include it.
* - Each sg element must either be aligned to page_size or virtually
* contiguous to the previous element. In case an sg element has a
* non-contiguous offset, the mapping prefix will not include it.
* - The last sg element is allowed to have length less than page_size.
* - If sg_nents total byte length exceeds the mr max_num_sge * page_size
* then only max_num_sg entries will be mapped.
* - If the MR was allocated with type IB_MR_TYPE_SG_GAPS_REG, non of these
* - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
* constraints holds and the page_size argument is ignored.
*
* Returns the number of sg elements that were mapped to the memory region.