Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband/rdma updates from Roland Dreier: - Re-enable flow steering verbs with new improved userspace ABI - Fixes for slow connection due to GID lookup scalability - IPoIB fixes - Many fixes to HW drivers including mlx4, mlx5, ocrdma and qib - Further improvements to SRP error handling - Add new transport type for Cisco usNIC * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (66 commits) IB/core: Re-enable create_flow/destroy_flow uverbs IB/core: extended command: an improved infrastructure for uverbs commands IB/core: Remove ib_uverbs_flow_spec structure from userspace IB/core: Use a common header for uverbs flow_specs IB/core: Make uverbs flow structure use names like verbs ones IB/core: Rename 'flow' structs to match other uverbs structs IB/core: clarify overflow/underflow checks on ib_create/destroy_flow IB/ucma: Convert use of typedef ctl_table to struct ctl_table IB/cm: Convert to using idr_alloc_cyclic() IB/mlx5: Fix page shift in create CQ for userspace IB/mlx4: Fix device max capabilities check IB/mlx5: Fix list_del of empty list IB/mlx5: Remove dead code IB/core: Encorce MR access rights rules on kernel consumers IB/mlx4: Fix endless loop in resize CQ RDMA/cma: Remove unused argument and minor dead code RDMA/ucma: Discard events for IDs not yet claimed by user space IB/core: Add Cisco usNIC rdma node and transport types RDMA/nes: Remove self-assignment from nes_query_qp() IB/srp: Report receive errors correctly ...
Bu işleme şunda yer alıyor:
@@ -383,14 +383,11 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv)
|
||||
{
|
||||
unsigned long flags;
|
||||
int id;
|
||||
static int next_id;
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock_irqsave(&cm.lock, flags);
|
||||
|
||||
id = idr_alloc(&cm.local_id_table, cm_id_priv, next_id, 0, GFP_NOWAIT);
|
||||
if (id >= 0)
|
||||
next_id = max(id + 1, 0);
|
||||
id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
|
||||
|
||||
spin_unlock_irqrestore(&cm.lock, flags);
|
||||
idr_preload_end();
|
||||
|
@@ -328,28 +328,6 @@ static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num)
|
||||
{
|
||||
int i;
|
||||
int err;
|
||||
struct ib_port_attr props;
|
||||
union ib_gid tmp;
|
||||
|
||||
err = ib_query_port(device, port_num, &props);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for (i = 0; i < props.gid_tbl_len; ++i) {
|
||||
err = ib_query_gid(device, port_num, i, &tmp);
|
||||
if (err)
|
||||
return err;
|
||||
if (!memcmp(&tmp, gid, sizeof tmp))
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
|
||||
{
|
||||
dev_addr->dev_type = ARPHRD_INFINIBAND;
|
||||
@@ -371,13 +349,14 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cma_acquire_dev(struct rdma_id_private *id_priv)
|
||||
static int cma_acquire_dev(struct rdma_id_private *id_priv,
|
||||
struct rdma_id_private *listen_id_priv)
|
||||
{
|
||||
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
|
||||
struct cma_device *cma_dev;
|
||||
union ib_gid gid, iboe_gid;
|
||||
int ret = -ENODEV;
|
||||
u8 port;
|
||||
u8 port, found_port;
|
||||
enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
|
||||
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
|
||||
|
||||
@@ -389,17 +368,39 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
|
||||
iboe_addr_get_sgid(dev_addr, &iboe_gid);
|
||||
memcpy(&gid, dev_addr->src_dev_addr +
|
||||
rdma_addr_gid_offset(dev_addr), sizeof gid);
|
||||
if (listen_id_priv &&
|
||||
rdma_port_get_link_layer(listen_id_priv->id.device,
|
||||
listen_id_priv->id.port_num) == dev_ll) {
|
||||
cma_dev = listen_id_priv->cma_dev;
|
||||
port = listen_id_priv->id.port_num;
|
||||
if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
|
||||
rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
|
||||
ret = ib_find_cached_gid(cma_dev->device, &iboe_gid,
|
||||
&found_port, NULL);
|
||||
else
|
||||
ret = ib_find_cached_gid(cma_dev->device, &gid,
|
||||
&found_port, NULL);
|
||||
|
||||
if (!ret && (port == found_port)) {
|
||||
id_priv->id.port_num = found_port;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
list_for_each_entry(cma_dev, &dev_list, list) {
|
||||
for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
|
||||
if (listen_id_priv &&
|
||||
listen_id_priv->cma_dev == cma_dev &&
|
||||
listen_id_priv->id.port_num == port)
|
||||
continue;
|
||||
if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
|
||||
if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
|
||||
rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
|
||||
ret = find_gid_port(cma_dev->device, &iboe_gid, port);
|
||||
ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL);
|
||||
else
|
||||
ret = find_gid_port(cma_dev->device, &gid, port);
|
||||
ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL);
|
||||
|
||||
if (!ret) {
|
||||
id_priv->id.port_num = port;
|
||||
if (!ret && (port == found_port)) {
|
||||
id_priv->id.port_num = found_port;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
@@ -1292,7 +1293,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
||||
}
|
||||
|
||||
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
|
||||
ret = cma_acquire_dev(conn_id);
|
||||
ret = cma_acquire_dev(conn_id, listen_id);
|
||||
if (ret)
|
||||
goto err2;
|
||||
|
||||
@@ -1451,7 +1452,6 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
||||
{
|
||||
struct rdma_cm_id *new_cm_id;
|
||||
struct rdma_id_private *listen_id, *conn_id;
|
||||
struct net_device *dev = NULL;
|
||||
struct rdma_cm_event event;
|
||||
int ret;
|
||||
struct ib_device_attr attr;
|
||||
@@ -1481,7 +1481,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = cma_acquire_dev(conn_id);
|
||||
ret = cma_acquire_dev(conn_id, listen_id);
|
||||
if (ret) {
|
||||
mutex_unlock(&conn_id->handler_mutex);
|
||||
rdma_destroy_id(new_cm_id);
|
||||
@@ -1529,8 +1529,6 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
||||
cma_deref_id(conn_id);
|
||||
|
||||
out:
|
||||
if (dev)
|
||||
dev_put(dev);
|
||||
mutex_unlock(&listen_id->handler_mutex);
|
||||
return ret;
|
||||
}
|
||||
@@ -2066,7 +2064,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
|
||||
goto out;
|
||||
|
||||
if (!status && !id_priv->cma_dev)
|
||||
status = cma_acquire_dev(id_priv);
|
||||
status = cma_acquire_dev(id_priv, NULL);
|
||||
|
||||
if (status) {
|
||||
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
|
||||
@@ -2563,7 +2561,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
|
||||
if (ret)
|
||||
goto err1;
|
||||
|
||||
ret = cma_acquire_dev(id_priv);
|
||||
ret = cma_acquire_dev(id_priv, NULL);
|
||||
if (ret)
|
||||
goto err1;
|
||||
}
|
||||
|
@@ -148,7 +148,7 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
list_for_each_entry(client, &client_list, list) {
|
||||
if (client->index == index) {
|
||||
if (op < 0 || op >= client->nops ||
|
||||
!client->cb_table[RDMA_NL_GET_OP(op)].dump)
|
||||
!client->cb_table[op].dump)
|
||||
return -EINVAL;
|
||||
|
||||
{
|
||||
|
@@ -612,6 +612,7 @@ static ssize_t show_node_type(struct device *device,
|
||||
switch (dev->node_type) {
|
||||
case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
|
||||
case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
|
||||
case RDMA_NODE_USNIC: return sprintf(buf, "%d: usNIC\n", dev->node_type);
|
||||
case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
|
||||
case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
|
||||
default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
|
||||
|
@@ -57,7 +57,7 @@ MODULE_LICENSE("Dual BSD/GPL");
|
||||
static unsigned int max_backlog = 1024;
|
||||
|
||||
static struct ctl_table_header *ucma_ctl_table_hdr;
|
||||
static ctl_table ucma_ctl_table[] = {
|
||||
static struct ctl_table ucma_ctl_table[] = {
|
||||
{
|
||||
.procname = "max_backlog",
|
||||
.data = &max_backlog,
|
||||
@@ -271,7 +271,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
|
||||
goto out;
|
||||
}
|
||||
ctx->backlog--;
|
||||
} else if (!ctx->uid) {
|
||||
} else if (!ctx->uid || ctx->cm_id != cm_id) {
|
||||
/*
|
||||
* We ignore events for new connections until userspace has set
|
||||
* their context. This can only happen if an error occurs on a
|
||||
|
@@ -47,6 +47,14 @@
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
|
||||
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
|
||||
do { \
|
||||
(udata)->inbuf = (void __user *) (ibuf); \
|
||||
(udata)->outbuf = (void __user *) (obuf); \
|
||||
(udata)->inlen = (ilen); \
|
||||
(udata)->outlen = (olen); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Our lifetime rules for these structs are the following:
|
||||
*
|
||||
@@ -178,6 +186,22 @@ void ib_uverbs_event_handler(struct ib_event_handler *handler,
|
||||
struct ib_event *event);
|
||||
void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, struct ib_xrcd *xrcd);
|
||||
|
||||
struct ib_uverbs_flow_spec {
|
||||
union {
|
||||
union {
|
||||
struct ib_uverbs_flow_spec_hdr hdr;
|
||||
struct {
|
||||
__u32 type;
|
||||
__u16 size;
|
||||
__u16 reserved;
|
||||
};
|
||||
};
|
||||
struct ib_uverbs_flow_spec_eth eth;
|
||||
struct ib_uverbs_flow_spec_ipv4 ipv4;
|
||||
struct ib_uverbs_flow_spec_tcp_udp tcp_udp;
|
||||
};
|
||||
};
|
||||
|
||||
#define IB_UVERBS_DECLARE_CMD(name) \
|
||||
ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
|
||||
const char __user *buf, int in_len, \
|
||||
@@ -217,9 +241,13 @@ IB_UVERBS_DECLARE_CMD(destroy_srq);
|
||||
IB_UVERBS_DECLARE_CMD(create_xsrq);
|
||||
IB_UVERBS_DECLARE_CMD(open_xrcd);
|
||||
IB_UVERBS_DECLARE_CMD(close_xrcd);
|
||||
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
|
||||
IB_UVERBS_DECLARE_CMD(create_flow);
|
||||
IB_UVERBS_DECLARE_CMD(destroy_flow);
|
||||
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
|
||||
|
||||
#define IB_UVERBS_DECLARE_EX_CMD(name) \
|
||||
int ib_uverbs_ex_##name(struct ib_uverbs_file *file, \
|
||||
struct ib_udata *ucore, \
|
||||
struct ib_udata *uhw)
|
||||
|
||||
IB_UVERBS_DECLARE_EX_CMD(create_flow);
|
||||
IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
|
||||
|
||||
#endif /* UVERBS_H */
|
||||
|
@@ -54,17 +54,7 @@ static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
|
||||
static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
|
||||
static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
|
||||
static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
|
||||
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
|
||||
static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
|
||||
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
|
||||
|
||||
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
|
||||
do { \
|
||||
(udata)->inbuf = (void __user *) (ibuf); \
|
||||
(udata)->outbuf = (void __user *) (obuf); \
|
||||
(udata)->inlen = (ilen); \
|
||||
(udata)->outlen = (olen); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* The ib_uobject locking scheme is as follows:
|
||||
@@ -939,13 +929,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
|
||||
if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Local write permission is required if remote write or
|
||||
* remote atomic permission is also requested.
|
||||
*/
|
||||
if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
|
||||
!(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
|
||||
return -EINVAL;
|
||||
ret = ib_check_mr_access(cmd.access_flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
|
||||
if (!uobj)
|
||||
@@ -2128,6 +2114,9 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
|
||||
}
|
||||
next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
|
||||
next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
|
||||
if (next->opcode == IB_WR_SEND_WITH_IMM)
|
||||
next->ex.imm_data =
|
||||
(__be32 __force) user_wr->ex.imm_data;
|
||||
} else {
|
||||
switch (next->opcode) {
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
@@ -2601,8 +2590,7 @@ out_put:
|
||||
return ret ? ret : in_len;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
|
||||
static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
|
||||
static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
|
||||
union ib_flow_spec *ib_spec)
|
||||
{
|
||||
ib_spec->type = kern_spec->type;
|
||||
@@ -2642,28 +2630,31 @@ static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
|
||||
return 0;
|
||||
}
|
||||
|
||||
ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
|
||||
const char __user *buf, int in_len,
|
||||
int out_len)
|
||||
int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
|
||||
struct ib_udata *ucore,
|
||||
struct ib_udata *uhw)
|
||||
{
|
||||
struct ib_uverbs_create_flow cmd;
|
||||
struct ib_uverbs_create_flow_resp resp;
|
||||
struct ib_uobject *uobj;
|
||||
struct ib_flow *flow_id;
|
||||
struct ib_kern_flow_attr *kern_flow_attr;
|
||||
struct ib_uverbs_flow_attr *kern_flow_attr;
|
||||
struct ib_flow_attr *flow_attr;
|
||||
struct ib_qp *qp;
|
||||
int err = 0;
|
||||
void *kern_spec;
|
||||
void *ib_spec;
|
||||
int i;
|
||||
int kern_attr_size;
|
||||
|
||||
if (out_len < sizeof(resp))
|
||||
if (ucore->outlen < sizeof(resp))
|
||||
return -ENOSPC;
|
||||
|
||||
if (copy_from_user(&cmd, buf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ucore->inbuf += sizeof(cmd);
|
||||
ucore->inlen -= sizeof(cmd);
|
||||
|
||||
if (cmd.comp_mask)
|
||||
return -EINVAL;
|
||||
@@ -2672,32 +2663,27 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
|
||||
!capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
|
||||
return -EPERM;
|
||||
|
||||
if (cmd.flow_attr.num_of_specs < 0 ||
|
||||
cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
|
||||
if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
|
||||
return -EINVAL;
|
||||
|
||||
kern_attr_size = cmd.flow_attr.size - sizeof(cmd) -
|
||||
sizeof(struct ib_uverbs_cmd_hdr_ex);
|
||||
|
||||
if (cmd.flow_attr.size < 0 || cmd.flow_attr.size > in_len ||
|
||||
kern_attr_size < 0 || kern_attr_size >
|
||||
(cmd.flow_attr.num_of_specs * sizeof(struct ib_kern_spec)))
|
||||
if (cmd.flow_attr.size > ucore->inlen ||
|
||||
cmd.flow_attr.size >
|
||||
(cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
|
||||
return -EINVAL;
|
||||
|
||||
if (cmd.flow_attr.num_of_specs) {
|
||||
kern_flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL);
|
||||
kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
|
||||
GFP_KERNEL);
|
||||
if (!kern_flow_attr)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
|
||||
if (copy_from_user(kern_flow_attr + 1, buf + sizeof(cmd),
|
||||
kern_attr_size)) {
|
||||
err = -EFAULT;
|
||||
err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
|
||||
cmd.flow_attr.size);
|
||||
if (err)
|
||||
goto err_free_attr;
|
||||
}
|
||||
} else {
|
||||
kern_flow_attr = &cmd.flow_attr;
|
||||
kern_attr_size = sizeof(cmd.flow_attr);
|
||||
}
|
||||
|
||||
uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
|
||||
@@ -2714,7 +2700,7 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
|
||||
goto err_uobj;
|
||||
}
|
||||
|
||||
flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL);
|
||||
flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
|
||||
if (!flow_attr) {
|
||||
err = -ENOMEM;
|
||||
goto err_put;
|
||||
@@ -2729,19 +2715,22 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
|
||||
|
||||
kern_spec = kern_flow_attr + 1;
|
||||
ib_spec = flow_attr + 1;
|
||||
for (i = 0; i < flow_attr->num_of_specs && kern_attr_size > 0; i++) {
|
||||
for (i = 0; i < flow_attr->num_of_specs &&
|
||||
cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
|
||||
cmd.flow_attr.size >=
|
||||
((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
|
||||
err = kern_spec_to_ib_spec(kern_spec, ib_spec);
|
||||
if (err)
|
||||
goto err_free;
|
||||
flow_attr->size +=
|
||||
((union ib_flow_spec *) ib_spec)->size;
|
||||
kern_attr_size -= ((struct ib_kern_spec *) kern_spec)->size;
|
||||
kern_spec += ((struct ib_kern_spec *) kern_spec)->size;
|
||||
cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
|
||||
kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
|
||||
ib_spec += ((union ib_flow_spec *) ib_spec)->size;
|
||||
}
|
||||
if (kern_attr_size) {
|
||||
pr_warn("create flow failed, %d bytes left from uverb cmd\n",
|
||||
kern_attr_size);
|
||||
if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
|
||||
pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
|
||||
i, cmd.flow_attr.size);
|
||||
goto err_free;
|
||||
}
|
||||
flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
|
||||
@@ -2760,11 +2749,10 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
|
||||
memset(&resp, 0, sizeof(resp));
|
||||
resp.flow_handle = uobj->id;
|
||||
|
||||
if (copy_to_user((void __user *)(unsigned long) cmd.response,
|
||||
&resp, sizeof(resp))) {
|
||||
err = -EFAULT;
|
||||
err = ib_copy_to_udata(ucore,
|
||||
&resp, sizeof(resp));
|
||||
if (err)
|
||||
goto err_copy;
|
||||
}
|
||||
|
||||
put_qp_read(qp);
|
||||
mutex_lock(&file->mutex);
|
||||
@@ -2777,7 +2765,7 @@ ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
|
||||
kfree(flow_attr);
|
||||
if (cmd.flow_attr.num_of_specs)
|
||||
kfree(kern_flow_attr);
|
||||
return in_len;
|
||||
return 0;
|
||||
err_copy:
|
||||
idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
|
||||
destroy_flow:
|
||||
@@ -2794,16 +2782,18 @@ err_free_attr:
|
||||
return err;
|
||||
}
|
||||
|
||||
ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
|
||||
const char __user *buf, int in_len,
|
||||
int out_len) {
|
||||
int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
|
||||
struct ib_udata *ucore,
|
||||
struct ib_udata *uhw)
|
||||
{
|
||||
struct ib_uverbs_destroy_flow cmd;
|
||||
struct ib_flow *flow_id;
|
||||
struct ib_uobject *uobj;
|
||||
int ret;
|
||||
|
||||
if (copy_from_user(&cmd, buf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
|
||||
file->ucontext);
|
||||
@@ -2825,9 +2815,8 @@ ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
|
||||
|
||||
put_uobj(uobj);
|
||||
|
||||
return ret ? ret : in_len;
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
|
||||
|
||||
static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
|
||||
struct ib_uverbs_create_xsrq *cmd,
|
||||
|
@@ -115,10 +115,13 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
|
||||
[IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd,
|
||||
[IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq,
|
||||
[IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp,
|
||||
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
|
||||
[IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow,
|
||||
[IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow
|
||||
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
|
||||
};
|
||||
|
||||
static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
|
||||
struct ib_udata *ucore,
|
||||
struct ib_udata *uhw) = {
|
||||
[IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
|
||||
[IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow
|
||||
};
|
||||
|
||||
static void ib_uverbs_add_one(struct ib_device *device);
|
||||
@@ -589,6 +592,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
|
||||
{
|
||||
struct ib_uverbs_file *file = filp->private_data;
|
||||
struct ib_uverbs_cmd_hdr hdr;
|
||||
__u32 flags;
|
||||
|
||||
if (count < sizeof hdr)
|
||||
return -EINVAL;
|
||||
@@ -596,45 +600,105 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
|
||||
if (copy_from_user(&hdr, buf, sizeof hdr))
|
||||
return -EFAULT;
|
||||
|
||||
if (hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
|
||||
!uverbs_cmd_table[hdr.command])
|
||||
return -EINVAL;
|
||||
flags = (hdr.command &
|
||||
IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
|
||||
|
||||
if (!file->ucontext &&
|
||||
hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT)
|
||||
return -EINVAL;
|
||||
if (!flags) {
|
||||
__u32 command;
|
||||
|
||||
if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
|
||||
return -ENOSYS;
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
|
||||
if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) {
|
||||
struct ib_uverbs_cmd_hdr_ex hdr_ex;
|
||||
|
||||
if (copy_from_user(&hdr_ex, buf, sizeof(hdr_ex)))
|
||||
return -EFAULT;
|
||||
|
||||
if (((hdr_ex.in_words + hdr_ex.provider_in_words) * 4) != count)
|
||||
if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
|
||||
IB_USER_VERBS_CMD_COMMAND_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
return uverbs_cmd_table[hdr.command](file,
|
||||
buf + sizeof(hdr_ex),
|
||||
(hdr_ex.in_words +
|
||||
hdr_ex.provider_in_words) * 4,
|
||||
(hdr_ex.out_words +
|
||||
hdr_ex.provider_out_words) * 4);
|
||||
} else {
|
||||
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
|
||||
command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
|
||||
|
||||
if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
|
||||
!uverbs_cmd_table[command])
|
||||
return -EINVAL;
|
||||
|
||||
if (!file->ucontext &&
|
||||
command != IB_USER_VERBS_CMD_GET_CONTEXT)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << command)))
|
||||
return -ENOSYS;
|
||||
|
||||
if (hdr.in_words * 4 != count)
|
||||
return -EINVAL;
|
||||
|
||||
return uverbs_cmd_table[hdr.command](file,
|
||||
buf + sizeof(hdr),
|
||||
hdr.in_words * 4,
|
||||
hdr.out_words * 4);
|
||||
#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
|
||||
return uverbs_cmd_table[command](file,
|
||||
buf + sizeof(hdr),
|
||||
hdr.in_words * 4,
|
||||
hdr.out_words * 4);
|
||||
|
||||
} else if (flags == IB_USER_VERBS_CMD_FLAG_EXTENDED) {
|
||||
__u32 command;
|
||||
|
||||
struct ib_uverbs_ex_cmd_hdr ex_hdr;
|
||||
struct ib_udata ucore;
|
||||
struct ib_udata uhw;
|
||||
int err;
|
||||
size_t written_count = count;
|
||||
|
||||
if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
|
||||
IB_USER_VERBS_CMD_COMMAND_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
|
||||
|
||||
if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
|
||||
!uverbs_ex_cmd_table[command])
|
||||
return -ENOSYS;
|
||||
|
||||
if (!file->ucontext)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(file->device->ib_dev->uverbs_ex_cmd_mask & (1ull << command)))
|
||||
return -ENOSYS;
|
||||
|
||||
if (count < (sizeof(hdr) + sizeof(ex_hdr)))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr)))
|
||||
return -EFAULT;
|
||||
|
||||
count -= sizeof(hdr) + sizeof(ex_hdr);
|
||||
buf += sizeof(hdr) + sizeof(ex_hdr);
|
||||
|
||||
if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count)
|
||||
return -EINVAL;
|
||||
|
||||
if (ex_hdr.response) {
|
||||
if (!hdr.out_words && !ex_hdr.provider_out_words)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (hdr.out_words || ex_hdr.provider_out_words)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
INIT_UDATA(&ucore,
|
||||
(hdr.in_words) ? buf : 0,
|
||||
(unsigned long)ex_hdr.response,
|
||||
hdr.in_words * 8,
|
||||
hdr.out_words * 8);
|
||||
|
||||
INIT_UDATA(&uhw,
|
||||
(ex_hdr.provider_in_words) ? buf + ucore.inlen : 0,
|
||||
(ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0,
|
||||
ex_hdr.provider_in_words * 8,
|
||||
ex_hdr.provider_out_words * 8);
|
||||
|
||||
err = uverbs_ex_cmd_table[command](file,
|
||||
&ucore,
|
||||
&uhw);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return written_count;
|
||||
}
|
||||
#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
|
||||
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
|
@@ -114,6 +114,8 @@ rdma_node_get_transport(enum rdma_node_type node_type)
|
||||
return RDMA_TRANSPORT_IB;
|
||||
case RDMA_NODE_RNIC:
|
||||
return RDMA_TRANSPORT_IWARP;
|
||||
case RDMA_NODE_USNIC:
|
||||
return RDMA_TRANSPORT_USNIC;
|
||||
default:
|
||||
BUG();
|
||||
return 0;
|
||||
@@ -130,6 +132,7 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_
|
||||
case RDMA_TRANSPORT_IB:
|
||||
return IB_LINK_LAYER_INFINIBAND;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
case RDMA_TRANSPORT_USNIC:
|
||||
return IB_LINK_LAYER_ETHERNET;
|
||||
default:
|
||||
return IB_LINK_LAYER_UNSPECIFIED;
|
||||
@@ -958,6 +961,11 @@ EXPORT_SYMBOL(ib_resize_cq);
|
||||
struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
|
||||
{
|
||||
struct ib_mr *mr;
|
||||
int err;
|
||||
|
||||
err = ib_check_mr_access(mr_access_flags);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
mr = pd->device->get_dma_mr(pd, mr_access_flags);
|
||||
|
||||
@@ -980,6 +988,11 @@ struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
|
||||
u64 *iova_start)
|
||||
{
|
||||
struct ib_mr *mr;
|
||||
int err;
|
||||
|
||||
err = ib_check_mr_access(mr_access_flags);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
if (!pd->device->reg_phys_mr)
|
||||
return ERR_PTR(-ENOSYS);
|
||||
@@ -1010,6 +1023,10 @@ int ib_rereg_phys_mr(struct ib_mr *mr,
|
||||
struct ib_pd *old_pd;
|
||||
int ret;
|
||||
|
||||
ret = ib_check_mr_access(mr_access_flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!mr->device->rereg_phys_mr)
|
||||
return -ENOSYS;
|
||||
|
||||
|
Yeni konuda referans
Bir kullanıcı engelle