Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull RDMA subsystem updates from Jason Gunthorpe: "This cycle mainly saw lots of bug fixes and clean up code across the core code and several drivers, few new functional changes were made. - Many cleanup and bug fixes for hns - Various small bug fixes and cleanups in hfi1, mlx5, usnic, qed, bnxt_re, efa - Share the query_port code between all the iWarp drivers - General rework and cleanup of the ODP MR umem code to fit better with the mmu notifier get/put scheme - Support rdma netlink in non init_net name spaces - mlx5 support for XRC devx and DC ODP" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (99 commits) RDMA: Fix double-free in srq creation error flow RDMA/efa: Fix incorrect error print IB/mlx5: Free mpi in mp_slave mode IB/mlx5: Use the original address for the page during free_pages RDMA/bnxt_re: Fix spelling mistake "missin_resp" -> "missing_resp" RDMA/hns: Package operations of rq inline buffer into separate functions RDMA/hns: Optimize cmd init and mode selection for hip08 IB/hfi1: Define variables as unsigned long to fix KASAN warning IB/{rdmavt, hfi1, qib}: Add a counter for credit waits IB/hfi1: Add traces for TID RDMA READ RDMA/siw: Relax from kmap_atomic() use in TX path IB/iser: Support up to 16MB data transfer in a single command RDMA/siw: Fix page address mapping in TX path RDMA: Fix goto target to release the allocated memory RDMA/usnic: Avoid overly large buffers on stack RDMA/odp: Add missing cast for 32 bit RDMA/hns: Use devm_platform_ioremap_resource() to simplify code Documentation/infiniband: update name of some functions RDMA/cma: Fix false error message RDMA/hns: Fix wrong assignment of qp_access_flags ...
This commit is contained in:
@@ -183,7 +183,7 @@ static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
|
||||
|
||||
/* Repair the nlmsg header length */
|
||||
nlmsg_end(skb, nlh);
|
||||
rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, GFP_KERNEL);
|
||||
rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, GFP_KERNEL);
|
||||
|
||||
/* Make the request retry, so when we get the response from userspace
|
||||
* we will have something.
|
||||
|
@@ -810,6 +810,7 @@ static void release_gid_table(struct ib_device *device,
|
||||
if (leak)
|
||||
return;
|
||||
|
||||
mutex_destroy(&table->lock);
|
||||
kfree(table->data_vec);
|
||||
kfree(table);
|
||||
}
|
||||
|
@@ -3046,7 +3046,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
|
||||
if (status)
|
||||
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
|
||||
status);
|
||||
} else {
|
||||
} else if (status) {
|
||||
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
|
||||
}
|
||||
|
||||
|
@@ -342,12 +342,18 @@ static struct configfs_subsystem cma_subsys = {
|
||||
|
||||
int __init cma_configfs_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
config_group_init(&cma_subsys.su_group);
|
||||
mutex_init(&cma_subsys.su_mutex);
|
||||
return configfs_register_subsystem(&cma_subsys);
|
||||
ret = configfs_register_subsystem(&cma_subsys);
|
||||
if (ret)
|
||||
mutex_destroy(&cma_subsys.su_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __exit cma_configfs_exit(void)
|
||||
{
|
||||
configfs_unregister_subsystem(&cma_subsys);
|
||||
mutex_destroy(&cma_subsys.su_mutex);
|
||||
}
|
||||
|
@@ -36,6 +36,8 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/cgroup_rdma.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/netns/generic.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/opa_addr.h>
|
||||
@@ -54,8 +56,26 @@ struct pkey_index_qp_list {
|
||||
struct list_head qp_list;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rdma_dev_net - rdma net namespace metadata for a net
|
||||
* @nl_sock: Pointer to netlink socket
|
||||
* @net: Pointer to owner net namespace
|
||||
* @id: xarray id to identify the net namespace.
|
||||
*/
|
||||
struct rdma_dev_net {
|
||||
struct sock *nl_sock;
|
||||
possible_net_t net;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
extern const struct attribute_group ib_dev_attr_group;
|
||||
extern bool ib_devices_shared_netns;
|
||||
extern unsigned int rdma_dev_net_id;
|
||||
|
||||
static inline struct rdma_dev_net *rdma_net_to_dev_net(struct net *net)
|
||||
{
|
||||
return net_generic(net, rdma_dev_net_id);
|
||||
}
|
||||
|
||||
int ib_device_register_sysfs(struct ib_device *device);
|
||||
void ib_device_unregister_sysfs(struct ib_device *device);
|
||||
@@ -179,7 +199,6 @@ void ib_mad_cleanup(void);
|
||||
int ib_sa_init(void);
|
||||
void ib_sa_cleanup(void);
|
||||
|
||||
int rdma_nl_init(void);
|
||||
void rdma_nl_exit(void);
|
||||
|
||||
int ib_nl_handle_resolve_resp(struct sk_buff *skb,
|
||||
@@ -365,4 +384,7 @@ void ib_port_unregister_module_stat(struct kobject *kobj);
|
||||
|
||||
int ib_device_set_netns_put(struct sk_buff *skb,
|
||||
struct ib_device *dev, u32 ns_fd);
|
||||
|
||||
int rdma_nl_net_init(struct rdma_dev_net *rnet);
|
||||
void rdma_nl_net_exit(struct rdma_dev_net *rnet);
|
||||
#endif /* _CORE_PRIV_H */
|
||||
|
@@ -599,7 +599,7 @@ int rdma_counter_get_mode(struct ib_device *dev, u8 port,
|
||||
void rdma_counter_init(struct ib_device *dev)
|
||||
{
|
||||
struct rdma_port_counter *port_counter;
|
||||
u32 port;
|
||||
u32 port, i;
|
||||
|
||||
if (!dev->port_data)
|
||||
return;
|
||||
@@ -620,13 +620,12 @@ void rdma_counter_init(struct ib_device *dev)
|
||||
return;
|
||||
|
||||
fail:
|
||||
rdma_for_each_port(dev, port) {
|
||||
for (i = port; i >= rdma_start_port(dev); i--) {
|
||||
port_counter = &dev->port_data[port].port_counter;
|
||||
kfree(port_counter->hstats);
|
||||
port_counter->hstats = NULL;
|
||||
mutex_destroy(&port_counter->lock);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void rdma_counter_release(struct ib_device *dev)
|
||||
@@ -637,5 +636,6 @@ void rdma_counter_release(struct ib_device *dev)
|
||||
rdma_for_each_port(dev, port) {
|
||||
port_counter = &dev->port_data[port].port_counter;
|
||||
kfree(port_counter->hstats);
|
||||
mutex_destroy(&port_counter->lock);
|
||||
}
|
||||
}
|
||||
|
@@ -252,6 +252,34 @@ out_free_cq:
|
||||
}
|
||||
EXPORT_SYMBOL(__ib_alloc_cq_user);
|
||||
|
||||
/**
|
||||
* __ib_alloc_cq_any - allocate a completion queue
|
||||
* @dev: device to allocate the CQ for
|
||||
* @private: driver private data, accessible from cq->cq_context
|
||||
* @nr_cqe: number of CQEs to allocate
|
||||
* @poll_ctx: context to poll the CQ from
|
||||
* @caller: module owner name
|
||||
*
|
||||
* Attempt to spread ULP Completion Queues over each device's interrupt
|
||||
* vectors. A simple best-effort mechanism is used.
|
||||
*/
|
||||
struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
|
||||
int nr_cqe, enum ib_poll_context poll_ctx,
|
||||
const char *caller)
|
||||
{
|
||||
static atomic_t counter;
|
||||
int comp_vector = 0;
|
||||
|
||||
if (dev->num_comp_vectors > 1)
|
||||
comp_vector =
|
||||
atomic_inc_return(&counter) %
|
||||
min_t(int, dev->num_comp_vectors, num_online_cpus());
|
||||
|
||||
return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
|
||||
caller, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(__ib_alloc_cq_any);
|
||||
|
||||
/**
|
||||
* ib_free_cq_user - free a completion queue
|
||||
* @cq: completion queue to free.
|
||||
|
@@ -39,7 +39,6 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/netns/generic.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/hashtable.h>
|
||||
@@ -111,17 +110,7 @@ static void ib_client_put(struct ib_client *client)
|
||||
*/
|
||||
#define CLIENT_DATA_REGISTERED XA_MARK_1
|
||||
|
||||
/**
|
||||
* struct rdma_dev_net - rdma net namespace metadata for a net
|
||||
* @net: Pointer to owner net namespace
|
||||
* @id: xarray id to identify the net namespace.
|
||||
*/
|
||||
struct rdma_dev_net {
|
||||
possible_net_t net;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
static unsigned int rdma_dev_net_id;
|
||||
unsigned int rdma_dev_net_id;
|
||||
|
||||
/*
|
||||
* A list of net namespaces is maintained in an xarray. This is necessary
|
||||
@@ -514,6 +503,9 @@ static void ib_device_release(struct device *device)
|
||||
rcu_head);
|
||||
}
|
||||
|
||||
mutex_destroy(&dev->unregistration_lock);
|
||||
mutex_destroy(&dev->compat_devs_mutex);
|
||||
|
||||
xa_destroy(&dev->compat_devs);
|
||||
xa_destroy(&dev->client_data);
|
||||
kfree_rcu(dev, rcu_head);
|
||||
@@ -1060,7 +1052,7 @@ int rdma_compatdev_set(u8 enable)
|
||||
|
||||
static void rdma_dev_exit_net(struct net *net)
|
||||
{
|
||||
struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id);
|
||||
struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
|
||||
struct ib_device *dev;
|
||||
unsigned long index;
|
||||
int ret;
|
||||
@@ -1094,25 +1086,32 @@ static void rdma_dev_exit_net(struct net *net)
|
||||
}
|
||||
up_read(&devices_rwsem);
|
||||
|
||||
rdma_nl_net_exit(rnet);
|
||||
xa_erase(&rdma_nets, rnet->id);
|
||||
}
|
||||
|
||||
static __net_init int rdma_dev_init_net(struct net *net)
|
||||
{
|
||||
struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id);
|
||||
struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
|
||||
unsigned long index;
|
||||
struct ib_device *dev;
|
||||
int ret;
|
||||
|
||||
write_pnet(&rnet->net, net);
|
||||
|
||||
ret = rdma_nl_net_init(rnet);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* No need to create any compat devices in default init_net. */
|
||||
if (net_eq(net, &init_net))
|
||||
return 0;
|
||||
|
||||
write_pnet(&rnet->net, net);
|
||||
|
||||
ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
rdma_nl_net_exit(rnet);
|
||||
return ret;
|
||||
}
|
||||
|
||||
down_read(&devices_rwsem);
|
||||
xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
|
||||
@@ -1974,6 +1973,74 @@ void ib_dispatch_event(struct ib_event *event)
|
||||
}
|
||||
EXPORT_SYMBOL(ib_dispatch_event);
|
||||
|
||||
static int iw_query_port(struct ib_device *device,
|
||||
u8 port_num,
|
||||
struct ib_port_attr *port_attr)
|
||||
{
|
||||
struct in_device *inetdev;
|
||||
struct net_device *netdev;
|
||||
int err;
|
||||
|
||||
memset(port_attr, 0, sizeof(*port_attr));
|
||||
|
||||
netdev = ib_device_get_netdev(device, port_num);
|
||||
if (!netdev)
|
||||
return -ENODEV;
|
||||
|
||||
dev_put(netdev);
|
||||
|
||||
port_attr->max_mtu = IB_MTU_4096;
|
||||
port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
|
||||
|
||||
if (!netif_carrier_ok(netdev)) {
|
||||
port_attr->state = IB_PORT_DOWN;
|
||||
port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
|
||||
} else {
|
||||
inetdev = in_dev_get(netdev);
|
||||
|
||||
if (inetdev && inetdev->ifa_list) {
|
||||
port_attr->state = IB_PORT_ACTIVE;
|
||||
port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
|
||||
in_dev_put(inetdev);
|
||||
} else {
|
||||
port_attr->state = IB_PORT_INIT;
|
||||
port_attr->phys_state =
|
||||
IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
|
||||
}
|
||||
}
|
||||
|
||||
err = device->ops.query_port(device, port_num, port_attr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __ib_query_port(struct ib_device *device,
|
||||
u8 port_num,
|
||||
struct ib_port_attr *port_attr)
|
||||
{
|
||||
union ib_gid gid = {};
|
||||
int err;
|
||||
|
||||
memset(port_attr, 0, sizeof(*port_attr));
|
||||
|
||||
err = device->ops.query_port(device, port_num, port_attr);
|
||||
if (err || port_attr->subnet_prefix)
|
||||
return err;
|
||||
|
||||
if (rdma_port_get_link_layer(device, port_num) !=
|
||||
IB_LINK_LAYER_INFINIBAND)
|
||||
return 0;
|
||||
|
||||
err = device->ops.query_gid(device, port_num, 0, &gid);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_query_port - Query IB port attributes
|
||||
* @device:Device to query
|
||||
@@ -1987,26 +2054,13 @@ int ib_query_port(struct ib_device *device,
|
||||
u8 port_num,
|
||||
struct ib_port_attr *port_attr)
|
||||
{
|
||||
union ib_gid gid;
|
||||
int err;
|
||||
|
||||
if (!rdma_is_port_valid(device, port_num))
|
||||
return -EINVAL;
|
||||
|
||||
memset(port_attr, 0, sizeof(*port_attr));
|
||||
err = device->ops.query_port(device, port_num, port_attr);
|
||||
if (err || port_attr->subnet_prefix)
|
||||
return err;
|
||||
|
||||
if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
|
||||
return 0;
|
||||
|
||||
err = device->ops.query_gid(device, port_num, 0, &gid);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
|
||||
return 0;
|
||||
if (rdma_protocol_iwarp(device, port_num))
|
||||
return iw_query_port(device, port_num, port_attr);
|
||||
else
|
||||
return __ib_query_port(device, port_num, port_attr);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_query_port);
|
||||
|
||||
@@ -2661,12 +2715,6 @@ static int __init ib_core_init(void)
|
||||
goto err_comp_unbound;
|
||||
}
|
||||
|
||||
ret = rdma_nl_init();
|
||||
if (ret) {
|
||||
pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
|
||||
goto err_sysfs;
|
||||
}
|
||||
|
||||
ret = addr_init();
|
||||
if (ret) {
|
||||
pr_warn("Could't init IB address resolution\n");
|
||||
@@ -2712,8 +2760,6 @@ err_mad:
|
||||
err_addr:
|
||||
addr_cleanup();
|
||||
err_ibnl:
|
||||
rdma_nl_exit();
|
||||
err_sysfs:
|
||||
class_unregister(&ib_class);
|
||||
err_comp_unbound:
|
||||
destroy_workqueue(ib_comp_unbound_wq);
|
||||
|
@@ -148,13 +148,6 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
|
||||
hlist_del_init(&fmr->cache_node);
|
||||
fmr->remap_count = 0;
|
||||
list_add_tail(&fmr->fmr->list, &fmr_list);
|
||||
|
||||
#ifdef DEBUG
|
||||
if (fmr->ref_count !=0) {
|
||||
pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n",
|
||||
fmr, fmr->ref_count);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
list_splice_init(&pool->dirty_list, &unmap_list);
|
||||
@@ -496,12 +489,6 @@ void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
if (fmr->ref_count < 0)
|
||||
pr_warn(PFX "FMR %p has ref count %d < 0\n",
|
||||
fmr, fmr->ref_count);
|
||||
#endif
|
||||
|
||||
spin_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_fmr_pool_unmap);
|
||||
|
@@ -112,7 +112,7 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
|
||||
pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
|
||||
__func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
|
||||
|
||||
ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
|
||||
ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
|
||||
if (ret) {
|
||||
skb = NULL; /* skb is freed in the netlink send-op handling */
|
||||
iwpm_user_pid = IWPM_PID_UNAVAILABLE;
|
||||
@@ -124,8 +124,7 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
|
||||
return ret;
|
||||
pid_query_error:
|
||||
pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
|
||||
if (skb)
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb(skb);
|
||||
if (nlmsg_request)
|
||||
iwpm_free_nlmsg_request(&nlmsg_request->kref);
|
||||
return ret;
|
||||
@@ -202,7 +201,7 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
|
||||
nlmsg_end(skb, nlh);
|
||||
nlmsg_request->req_buffer = pm_msg;
|
||||
|
||||
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
|
||||
ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
|
||||
if (ret) {
|
||||
skb = NULL; /* skb is freed in the netlink send-op handling */
|
||||
iwpm_user_pid = IWPM_PID_UNDEFINED;
|
||||
@@ -214,8 +213,7 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
|
||||
add_mapping_error:
|
||||
pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
|
||||
add_mapping_error_nowarn:
|
||||
if (skb)
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb(skb);
|
||||
if (nlmsg_request)
|
||||
iwpm_free_nlmsg_request(&nlmsg_request->kref);
|
||||
return ret;
|
||||
@@ -297,7 +295,7 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
|
||||
nlmsg_end(skb, nlh);
|
||||
nlmsg_request->req_buffer = pm_msg;
|
||||
|
||||
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
|
||||
ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
|
||||
if (ret) {
|
||||
skb = NULL; /* skb is freed in the netlink send-op handling */
|
||||
err_str = "Unable to send a nlmsg";
|
||||
@@ -308,8 +306,7 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
|
||||
query_mapping_error:
|
||||
pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
|
||||
query_mapping_error_nowarn:
|
||||
if (skb)
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb(skb);
|
||||
if (nlmsg_request)
|
||||
iwpm_free_nlmsg_request(&nlmsg_request->kref);
|
||||
return ret;
|
||||
@@ -364,7 +361,7 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
|
||||
|
||||
nlmsg_end(skb, nlh);
|
||||
|
||||
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
|
||||
ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
|
||||
if (ret) {
|
||||
skb = NULL; /* skb is freed in the netlink send-op handling */
|
||||
iwpm_user_pid = IWPM_PID_UNDEFINED;
|
||||
|
@@ -645,7 +645,7 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
|
||||
|
||||
nlmsg_end(skb, nlh);
|
||||
|
||||
ret = rdma_nl_unicast(skb, iwpm_pid);
|
||||
ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
|
||||
if (ret) {
|
||||
skb = NULL;
|
||||
err_str = "Unable to send a nlmsg";
|
||||
@@ -655,8 +655,7 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
|
||||
return 0;
|
||||
mapinfo_num_error:
|
||||
pr_info("%s: %s\n", __func__, err_str);
|
||||
if (skb)
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb(skb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -674,7 +673,7 @@ static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid)
|
||||
return -ENOMEM;
|
||||
}
|
||||
nlh->nlmsg_type = NLMSG_DONE;
|
||||
ret = rdma_nl_unicast(skb, iwpm_pid);
|
||||
ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
|
||||
if (ret)
|
||||
pr_warn("%s Unable to send a nlmsg\n", __func__);
|
||||
return ret;
|
||||
@@ -778,8 +777,7 @@ send_mapping_info_unlock:
|
||||
send_mapping_info_exit:
|
||||
if (ret) {
|
||||
pr_warn("%s: %s (ret = %d)\n", __func__, err_str, ret);
|
||||
if (skb)
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb(skb);
|
||||
return ret;
|
||||
}
|
||||
send_nlmsg_done(skb, nl_client, iwpm_pid);
|
||||
@@ -824,7 +822,7 @@ int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version)
|
||||
goto hello_num_error;
|
||||
nlmsg_end(skb, nlh);
|
||||
|
||||
ret = rdma_nl_unicast(skb, iwpm_pid);
|
||||
ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
|
||||
if (ret) {
|
||||
skb = NULL;
|
||||
err_str = "Unable to send a nlmsg";
|
||||
@@ -834,7 +832,6 @@ int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version)
|
||||
return 0;
|
||||
hello_num_error:
|
||||
pr_info("%s: %s\n", __func__, err_str);
|
||||
if (skb)
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb(skb);
|
||||
return ret;
|
||||
}
|
||||
|
@@ -36,20 +36,22 @@
|
||||
#include <linux/export.h>
|
||||
#include <net/netlink.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/netns/generic.h>
|
||||
#include <net/sock.h>
|
||||
#include <rdma/rdma_netlink.h>
|
||||
#include <linux/module.h>
|
||||
#include "core_priv.h"
|
||||
|
||||
static DEFINE_MUTEX(rdma_nl_mutex);
|
||||
static struct sock *nls;
|
||||
static struct {
|
||||
const struct rdma_nl_cbs *cb_table;
|
||||
} rdma_nl_types[RDMA_NL_NUM_CLIENTS];
|
||||
|
||||
bool rdma_nl_chk_listeners(unsigned int group)
|
||||
{
|
||||
return netlink_has_listeners(nls, group);
|
||||
struct rdma_dev_net *rnet = rdma_net_to_dev_net(&init_net);
|
||||
|
||||
return netlink_has_listeners(rnet->nl_sock, group);
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_nl_chk_listeners);
|
||||
|
||||
@@ -73,13 +75,21 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op)
|
||||
return (op < max_num_ops[type]) ? true : false;
|
||||
}
|
||||
|
||||
static bool is_nl_valid(unsigned int type, unsigned int op)
|
||||
static bool
|
||||
is_nl_valid(const struct sk_buff *skb, unsigned int type, unsigned int op)
|
||||
{
|
||||
const struct rdma_nl_cbs *cb_table;
|
||||
|
||||
if (!is_nl_msg_valid(type, op))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Currently only NLDEV client is supporting netlink commands in
|
||||
* non init_net net namespace.
|
||||
*/
|
||||
if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
|
||||
return false;
|
||||
|
||||
if (!rdma_nl_types[type].cb_table) {
|
||||
mutex_unlock(&rdma_nl_mutex);
|
||||
request_module("rdma-netlink-subsys-%d", type);
|
||||
@@ -161,7 +171,7 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
unsigned int op = RDMA_NL_GET_OP(type);
|
||||
const struct rdma_nl_cbs *cb_table;
|
||||
|
||||
if (!is_nl_valid(index, op))
|
||||
if (!is_nl_valid(skb, index, op))
|
||||
return -EINVAL;
|
||||
|
||||
cb_table = rdma_nl_types[index].cb_table;
|
||||
@@ -185,7 +195,7 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
.dump = cb_table[op].dump,
|
||||
};
|
||||
if (c.dump)
|
||||
return netlink_dump_start(nls, skb, nlh, &c);
|
||||
return netlink_dump_start(skb->sk, skb, nlh, &c);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -258,52 +268,65 @@ static void rdma_nl_rcv(struct sk_buff *skb)
|
||||
mutex_unlock(&rdma_nl_mutex);
|
||||
}
|
||||
|
||||
int rdma_nl_unicast(struct sk_buff *skb, u32 pid)
|
||||
int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
|
||||
{
|
||||
struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
|
||||
int err;
|
||||
|
||||
err = netlink_unicast(nls, skb, pid, MSG_DONTWAIT);
|
||||
err = netlink_unicast(rnet->nl_sock, skb, pid, MSG_DONTWAIT);
|
||||
return (err < 0) ? err : 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_nl_unicast);
|
||||
|
||||
int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid)
|
||||
int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid)
|
||||
{
|
||||
struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
|
||||
int err;
|
||||
|
||||
err = netlink_unicast(nls, skb, pid, 0);
|
||||
err = netlink_unicast(rnet->nl_sock, skb, pid, 0);
|
||||
return (err < 0) ? err : 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_nl_unicast_wait);
|
||||
|
||||
int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags)
|
||||
int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
|
||||
unsigned int group, gfp_t flags)
|
||||
{
|
||||
return nlmsg_multicast(nls, skb, 0, group, flags);
|
||||
struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
|
||||
|
||||
return nlmsg_multicast(rnet->nl_sock, skb, 0, group, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_nl_multicast);
|
||||
|
||||
int __init rdma_nl_init(void)
|
||||
{
|
||||
struct netlink_kernel_cfg cfg = {
|
||||
.input = rdma_nl_rcv,
|
||||
};
|
||||
|
||||
nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg);
|
||||
if (!nls)
|
||||
return -ENOMEM;
|
||||
|
||||
nls->sk_sndtimeo = 10 * HZ;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rdma_nl_exit(void)
|
||||
{
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
|
||||
rdma_nl_unregister(idx);
|
||||
WARN(rdma_nl_types[idx].cb_table,
|
||||
"Netlink client %d wasn't released prior to unloading %s\n",
|
||||
idx, KBUILD_MODNAME);
|
||||
}
|
||||
|
||||
netlink_kernel_release(nls);
|
||||
int rdma_nl_net_init(struct rdma_dev_net *rnet)
|
||||
{
|
||||
struct net *net = read_pnet(&rnet->net);
|
||||
struct netlink_kernel_cfg cfg = {
|
||||
.input = rdma_nl_rcv,
|
||||
};
|
||||
struct sock *nls;
|
||||
|
||||
nls = netlink_kernel_create(net, NETLINK_RDMA, &cfg);
|
||||
if (!nls)
|
||||
return -ENOMEM;
|
||||
|
||||
nls->sk_sndtimeo = 10 * HZ;
|
||||
rnet->nl_sock = nls;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rdma_nl_net_exit(struct rdma_dev_net *rnet)
|
||||
{
|
||||
netlink_kernel_release(rnet->nl_sock);
|
||||
}
|
||||
|
||||
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA);
|
||||
|
@@ -831,7 +831,7 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
nlmsg_end(msg, nlh);
|
||||
|
||||
ib_device_put(device);
|
||||
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
|
||||
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
|
||||
|
||||
err_free:
|
||||
nlmsg_free(msg);
|
||||
@@ -971,7 +971,7 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
nlmsg_end(msg, nlh);
|
||||
ib_device_put(device);
|
||||
|
||||
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
|
||||
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
|
||||
|
||||
err_free:
|
||||
nlmsg_free(msg);
|
||||
@@ -1073,7 +1073,7 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
|
||||
nlmsg_end(msg, nlh);
|
||||
ib_device_put(device);
|
||||
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
|
||||
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
|
||||
|
||||
err_free:
|
||||
nlmsg_free(msg);
|
||||
@@ -1250,7 +1250,7 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
|
||||
nlmsg_end(msg, nlh);
|
||||
ib_device_put(device);
|
||||
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
|
||||
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
|
||||
|
||||
err_free:
|
||||
nlmsg_free(msg);
|
||||
@@ -1595,7 +1595,7 @@ static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
put_device(data.cdev);
|
||||
if (ibdev)
|
||||
ib_device_put(ibdev);
|
||||
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
|
||||
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
|
||||
|
||||
out_data:
|
||||
put_device(data.cdev);
|
||||
@@ -1635,7 +1635,7 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
return err;
|
||||
}
|
||||
nlmsg_end(msg, nlh);
|
||||
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
|
||||
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
|
||||
}
|
||||
|
||||
static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
@@ -1733,7 +1733,7 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
|
||||
nlmsg_end(msg, nlh);
|
||||
ib_device_put(device);
|
||||
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
|
||||
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
|
||||
|
||||
err_fill:
|
||||
rdma_counter_unbind_qpn(device, port, qpn, cntn);
|
||||
@@ -1801,7 +1801,7 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
|
||||
nlmsg_end(msg, nlh);
|
||||
ib_device_put(device);
|
||||
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
|
||||
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
|
||||
|
||||
err_fill:
|
||||
rdma_counter_bind_qpn(device, port, qpn, cntn);
|
||||
@@ -1892,7 +1892,7 @@ static int stat_get_doit_default_counter(struct sk_buff *skb,
|
||||
mutex_unlock(&stats->lock);
|
||||
nlmsg_end(msg, nlh);
|
||||
ib_device_put(device);
|
||||
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
|
||||
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
|
||||
|
||||
err_table:
|
||||
nla_nest_cancel(msg, table_attr);
|
||||
@@ -1964,7 +1964,7 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
|
||||
nlmsg_end(msg, nlh);
|
||||
ib_device_put(device);
|
||||
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
|
||||
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
|
||||
|
||||
err_msg:
|
||||
nlmsg_free(msg);
|
||||
|
@@ -860,7 +860,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
|
||||
/* Repair the nlmsg header length */
|
||||
nlmsg_end(skb, nlh);
|
||||
|
||||
return rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask);
|
||||
return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
|
||||
}
|
||||
|
||||
static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
|
||||
|
@@ -289,6 +289,24 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
|
||||
ib_width_enum_to_int(attr.active_width), speed);
|
||||
}
|
||||
|
||||
static const char *phys_state_to_str(enum ib_port_phys_state phys_state)
|
||||
{
|
||||
static const char * phys_state_str[] = {
|
||||
"<unknown>",
|
||||
"Sleep",
|
||||
"Polling",
|
||||
"Disabled",
|
||||
"PortConfigurationTraining",
|
||||
"LinkUp",
|
||||
"LinkErrorRecovery",
|
||||
"Phy Test",
|
||||
};
|
||||
|
||||
if (phys_state < ARRAY_SIZE(phys_state_str))
|
||||
return phys_state_str[phys_state];
|
||||
return "<unknown>";
|
||||
}
|
||||
|
||||
static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
|
||||
char *buf)
|
||||
{
|
||||
@@ -300,16 +318,8 @@ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (attr.phys_state) {
|
||||
case 1: return sprintf(buf, "1: Sleep\n");
|
||||
case 2: return sprintf(buf, "2: Polling\n");
|
||||
case 3: return sprintf(buf, "3: Disabled\n");
|
||||
case 4: return sprintf(buf, "4: PortConfigurationTraining\n");
|
||||
case 5: return sprintf(buf, "5: LinkUp\n");
|
||||
case 6: return sprintf(buf, "6: LinkErrorRecovery\n");
|
||||
case 7: return sprintf(buf, "7: Phy Test\n");
|
||||
default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
|
||||
}
|
||||
return sprintf(buf, "%d: %s\n", attr.phys_state,
|
||||
phys_state_to_str(attr.phys_state));
|
||||
}
|
||||
|
||||
static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused,
|
||||
|
@@ -218,7 +218,7 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp)
|
||||
umem_odp->interval_tree.start =
|
||||
ALIGN_DOWN(umem_odp->umem.address, page_size);
|
||||
if (check_add_overflow(umem_odp->umem.address,
|
||||
umem_odp->umem.length,
|
||||
(unsigned long)umem_odp->umem.length,
|
||||
&umem_odp->interval_tree.last))
|
||||
return -EOVERFLOW;
|
||||
umem_odp->interval_tree.last =
|
||||
|
@@ -1042,7 +1042,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
|
||||
ib_unregister_mad_agent(file->agent[i]);
|
||||
|
||||
mutex_unlock(&file->port->file_mutex);
|
||||
|
||||
mutex_destroy(&file->mutex);
|
||||
kfree(file);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -3479,7 +3479,8 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
|
||||
|
||||
err_copy:
|
||||
ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
|
||||
|
||||
/* It was released in ib_destroy_srq_user */
|
||||
srq = NULL;
|
||||
err_free:
|
||||
kfree(srq);
|
||||
err_put:
|
||||
|
@@ -120,6 +120,8 @@ static void ib_uverbs_release_dev(struct device *device)
|
||||
|
||||
uverbs_destroy_api(dev->uapi);
|
||||
cleanup_srcu_struct(&dev->disassociate_srcu);
|
||||
mutex_destroy(&dev->lists_mutex);
|
||||
mutex_destroy(&dev->xrcd_tree_mutex);
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
@@ -212,6 +214,8 @@ void ib_uverbs_release_file(struct kref *ref)
|
||||
|
||||
if (file->disassociate_page)
|
||||
__free_pages(file->disassociate_page, 0);
|
||||
mutex_destroy(&file->umap_lock);
|
||||
mutex_destroy(&file->ucontext_lock);
|
||||
kfree(file);
|
||||
}
|
||||
|
||||
|
@@ -2259,6 +2259,7 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
mutex_destroy(&xrcd->tgt_qp_mutex);
|
||||
|
||||
return xrcd->device->ops.dealloc_xrcd(xrcd, udata);
|
||||
}
|
||||
|
Reference in New Issue
Block a user