Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma updates from Doug Ledford: - a large cleanup of how device capabilities are checked for various features - additional cleanups in the MAD processing - update to the srp driver - creation and use of centralized log message helpers - add const to a number of args to calls and clean up call chain - add support for extended cq create verb - add support for timestamps on cq completion - add support for processing OPA MAD packets * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (92 commits) IB/mad: Add final OPA MAD processing IB/mad: Add partial Intel OPA MAD support IB/mad: Add partial Intel OPA MAD support IB/core: Add OPA MAD core capability flag IB/mad: Add support for additional MAD info to/from drivers IB/mad: Convert allocations from kmem_cache to kzalloc IB/core: Add ability for drivers to report an alternate MAD size. IB/mad: Support alternate Base Versions when creating MADs IB/mad: Create a generic helper for DR forwarding checks IB/mad: Create a generic helper for DR SMP Recv processing IB/mad: Create a generic helper for DR SMP Send processing IB/mad: Split IB SMI handling from MAD Recv handler IB/mad cleanup: Generalize processing of MAD data IB/mad cleanup: Clean up function params -- find_mad_agent IB/mlx4: Add support for CQ time-stamping IB/mlx4: Add mmap call to map the hardware clock IB/core: Pass hardware specific data in query_device IB/core: Add timestamp_mask and hca_core_clock to query_device IB/core: Extend ib_uverbs_create_cq IB/core: Add CQ creation time-stamping flag ...
This commit is contained in:
@@ -457,8 +457,8 @@ static void resolve_cb(int status, struct sockaddr *src_addr,
|
||||
complete(&((struct resolve_cb_context *)context)->comp);
|
||||
}
|
||||
|
||||
int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac,
|
||||
u16 *vlan_id)
|
||||
int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgid,
|
||||
u8 *dmac, u16 *vlan_id)
|
||||
{
|
||||
int ret = 0;
|
||||
struct rdma_dev_addr dev_addr;
|
||||
|
@@ -54,7 +54,7 @@ static DEFINE_SPINLOCK(ib_agent_port_list_lock);
|
||||
static LIST_HEAD(ib_agent_port_list);
|
||||
|
||||
static struct ib_agent_port_private *
|
||||
__ib_get_agent_port(struct ib_device *device, int port_num)
|
||||
__ib_get_agent_port(const struct ib_device *device, int port_num)
|
||||
{
|
||||
struct ib_agent_port_private *entry;
|
||||
|
||||
@@ -67,7 +67,7 @@ __ib_get_agent_port(struct ib_device *device, int port_num)
|
||||
}
|
||||
|
||||
static struct ib_agent_port_private *
|
||||
ib_get_agent_port(struct ib_device *device, int port_num)
|
||||
ib_get_agent_port(const struct ib_device *device, int port_num)
|
||||
{
|
||||
struct ib_agent_port_private *entry;
|
||||
unsigned long flags;
|
||||
@@ -78,9 +78,9 @@ ib_get_agent_port(struct ib_device *device, int port_num)
|
||||
return entry;
|
||||
}
|
||||
|
||||
void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
|
||||
struct ib_wc *wc, struct ib_device *device,
|
||||
int port_num, int qpn)
|
||||
void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *grh,
|
||||
const struct ib_wc *wc, const struct ib_device *device,
|
||||
int port_num, int qpn, size_t resp_mad_len, bool opa)
|
||||
{
|
||||
struct ib_agent_port_private *port_priv;
|
||||
struct ib_mad_agent *agent;
|
||||
@@ -106,15 +106,20 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
|
||||
return;
|
||||
}
|
||||
|
||||
if (opa && mad_hdr->base_version != OPA_MGMT_BASE_VERSION)
|
||||
resp_mad_len = IB_MGMT_MAD_SIZE;
|
||||
|
||||
send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0,
|
||||
IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
|
||||
GFP_KERNEL);
|
||||
IB_MGMT_MAD_HDR,
|
||||
resp_mad_len - IB_MGMT_MAD_HDR,
|
||||
GFP_KERNEL,
|
||||
mad_hdr->base_version);
|
||||
if (IS_ERR(send_buf)) {
|
||||
dev_err(&device->dev, "ib_create_send_mad error\n");
|
||||
goto err1;
|
||||
}
|
||||
|
||||
memcpy(send_buf->mad, mad, sizeof *mad);
|
||||
memcpy(send_buf->mad, mad_hdr, resp_mad_len);
|
||||
send_buf->ah = ah;
|
||||
|
||||
if (device->node_type == RDMA_NODE_IB_SWITCH) {
|
||||
@@ -156,7 +161,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
|
||||
goto error1;
|
||||
}
|
||||
|
||||
if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) {
|
||||
if (rdma_cap_ib_smi(device, port_num)) {
|
||||
/* Obtain send only MAD agent for SMI QP */
|
||||
port_priv->agent[0] = ib_register_mad_agent(device, port_num,
|
||||
IB_QPT_SMI, NULL, 0,
|
||||
|
@@ -44,8 +44,8 @@ extern int ib_agent_port_open(struct ib_device *device, int port_num);
|
||||
|
||||
extern int ib_agent_port_close(struct ib_device *device, int port_num);
|
||||
|
||||
extern void agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
|
||||
struct ib_wc *wc, struct ib_device *device,
|
||||
int port_num, int qpn);
|
||||
extern void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *grh,
|
||||
const struct ib_wc *wc, const struct ib_device *device,
|
||||
int port_num, int qpn, size_t resp_mad_len, bool opa);
|
||||
|
||||
#endif /* __AGENT_H_ */
|
||||
|
@@ -58,17 +58,6 @@ struct ib_update_work {
|
||||
u8 port_num;
|
||||
};
|
||||
|
||||
static inline int start_port(struct ib_device *device)
|
||||
{
|
||||
return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
|
||||
}
|
||||
|
||||
static inline int end_port(struct ib_device *device)
|
||||
{
|
||||
return (device->node_type == RDMA_NODE_IB_SWITCH) ?
|
||||
0 : device->phys_port_cnt;
|
||||
}
|
||||
|
||||
int ib_get_cached_gid(struct ib_device *device,
|
||||
u8 port_num,
|
||||
int index,
|
||||
@@ -78,12 +67,12 @@ int ib_get_cached_gid(struct ib_device *device,
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
if (port_num < start_port(device) || port_num > end_port(device))
|
||||
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
|
||||
return -EINVAL;
|
||||
|
||||
read_lock_irqsave(&device->cache.lock, flags);
|
||||
|
||||
cache = device->cache.gid_cache[port_num - start_port(device)];
|
||||
cache = device->cache.gid_cache[port_num - rdma_start_port(device)];
|
||||
|
||||
if (index < 0 || index >= cache->table_len)
|
||||
ret = -EINVAL;
|
||||
@@ -96,10 +85,10 @@ int ib_get_cached_gid(struct ib_device *device,
|
||||
}
|
||||
EXPORT_SYMBOL(ib_get_cached_gid);
|
||||
|
||||
int ib_find_cached_gid(struct ib_device *device,
|
||||
union ib_gid *gid,
|
||||
u8 *port_num,
|
||||
u16 *index)
|
||||
int ib_find_cached_gid(struct ib_device *device,
|
||||
const union ib_gid *gid,
|
||||
u8 *port_num,
|
||||
u16 *index)
|
||||
{
|
||||
struct ib_gid_cache *cache;
|
||||
unsigned long flags;
|
||||
@@ -112,11 +101,11 @@ int ib_find_cached_gid(struct ib_device *device,
|
||||
|
||||
read_lock_irqsave(&device->cache.lock, flags);
|
||||
|
||||
for (p = 0; p <= end_port(device) - start_port(device); ++p) {
|
||||
for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
|
||||
cache = device->cache.gid_cache[p];
|
||||
for (i = 0; i < cache->table_len; ++i) {
|
||||
if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
|
||||
*port_num = p + start_port(device);
|
||||
*port_num = p + rdma_start_port(device);
|
||||
if (index)
|
||||
*index = i;
|
||||
ret = 0;
|
||||
@@ -140,12 +129,12 @@ int ib_get_cached_pkey(struct ib_device *device,
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
if (port_num < start_port(device) || port_num > end_port(device))
|
||||
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
|
||||
return -EINVAL;
|
||||
|
||||
read_lock_irqsave(&device->cache.lock, flags);
|
||||
|
||||
cache = device->cache.pkey_cache[port_num - start_port(device)];
|
||||
cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
|
||||
|
||||
if (index < 0 || index >= cache->table_len)
|
||||
ret = -EINVAL;
|
||||
@@ -169,12 +158,12 @@ int ib_find_cached_pkey(struct ib_device *device,
|
||||
int ret = -ENOENT;
|
||||
int partial_ix = -1;
|
||||
|
||||
if (port_num < start_port(device) || port_num > end_port(device))
|
||||
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
|
||||
return -EINVAL;
|
||||
|
||||
read_lock_irqsave(&device->cache.lock, flags);
|
||||
|
||||
cache = device->cache.pkey_cache[port_num - start_port(device)];
|
||||
cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
|
||||
|
||||
*index = -1;
|
||||
|
||||
@@ -209,12 +198,12 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
|
||||
int i;
|
||||
int ret = -ENOENT;
|
||||
|
||||
if (port_num < start_port(device) || port_num > end_port(device))
|
||||
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
|
||||
return -EINVAL;
|
||||
|
||||
read_lock_irqsave(&device->cache.lock, flags);
|
||||
|
||||
cache = device->cache.pkey_cache[port_num - start_port(device)];
|
||||
cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
|
||||
|
||||
*index = -1;
|
||||
|
||||
@@ -238,11 +227,11 @@ int ib_get_cached_lmc(struct ib_device *device,
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
if (port_num < start_port(device) || port_num > end_port(device))
|
||||
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
|
||||
return -EINVAL;
|
||||
|
||||
read_lock_irqsave(&device->cache.lock, flags);
|
||||
*lmc = device->cache.lmc_cache[port_num - start_port(device)];
|
||||
*lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
|
||||
read_unlock_irqrestore(&device->cache.lock, flags);
|
||||
|
||||
return ret;
|
||||
@@ -303,13 +292,13 @@ static void ib_cache_update(struct ib_device *device,
|
||||
|
||||
write_lock_irq(&device->cache.lock);
|
||||
|
||||
old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
|
||||
old_gid_cache = device->cache.gid_cache [port - start_port(device)];
|
||||
old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
|
||||
old_gid_cache = device->cache.gid_cache [port - rdma_start_port(device)];
|
||||
|
||||
device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
|
||||
device->cache.gid_cache [port - start_port(device)] = gid_cache;
|
||||
device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
|
||||
device->cache.gid_cache [port - rdma_start_port(device)] = gid_cache;
|
||||
|
||||
device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;
|
||||
device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
|
||||
|
||||
write_unlock_irq(&device->cache.lock);
|
||||
|
||||
@@ -363,14 +352,14 @@ static void ib_cache_setup_one(struct ib_device *device)
|
||||
|
||||
device->cache.pkey_cache =
|
||||
kmalloc(sizeof *device->cache.pkey_cache *
|
||||
(end_port(device) - start_port(device) + 1), GFP_KERNEL);
|
||||
(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
|
||||
device->cache.gid_cache =
|
||||
kmalloc(sizeof *device->cache.gid_cache *
|
||||
(end_port(device) - start_port(device) + 1), GFP_KERNEL);
|
||||
(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
|
||||
|
||||
device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
|
||||
(end_port(device) -
|
||||
start_port(device) + 1),
|
||||
(rdma_end_port(device) -
|
||||
rdma_start_port(device) + 1),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!device->cache.pkey_cache || !device->cache.gid_cache ||
|
||||
@@ -380,10 +369,10 @@ static void ib_cache_setup_one(struct ib_device *device)
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (p = 0; p <= end_port(device) - start_port(device); ++p) {
|
||||
for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
|
||||
device->cache.pkey_cache[p] = NULL;
|
||||
device->cache.gid_cache [p] = NULL;
|
||||
ib_cache_update(device, p + start_port(device));
|
||||
ib_cache_update(device, p + rdma_start_port(device));
|
||||
}
|
||||
|
||||
INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
|
||||
@@ -394,7 +383,7 @@ static void ib_cache_setup_one(struct ib_device *device)
|
||||
return;
|
||||
|
||||
err_cache:
|
||||
for (p = 0; p <= end_port(device) - start_port(device); ++p) {
|
||||
for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
|
||||
kfree(device->cache.pkey_cache[p]);
|
||||
kfree(device->cache.gid_cache[p]);
|
||||
}
|
||||
@@ -412,7 +401,7 @@ static void ib_cache_cleanup_one(struct ib_device *device)
|
||||
ib_unregister_event_handler(&device->cache.event_handler);
|
||||
flush_workqueue(ib_wq);
|
||||
|
||||
for (p = 0; p <= end_port(device) - start_port(device); ++p) {
|
||||
for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
|
||||
kfree(device->cache.pkey_cache[p]);
|
||||
kfree(device->cache.gid_cache[p]);
|
||||
}
|
||||
|
@@ -267,7 +267,8 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
|
||||
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
|
||||
cm_id_priv->av.pkey_index,
|
||||
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
|
||||
GFP_ATOMIC);
|
||||
GFP_ATOMIC,
|
||||
IB_MGMT_BASE_VERSION);
|
||||
if (IS_ERR(m)) {
|
||||
ib_destroy_ah(ah);
|
||||
return PTR_ERR(m);
|
||||
@@ -297,7 +298,8 @@ static int cm_alloc_response_msg(struct cm_port *port,
|
||||
|
||||
m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
|
||||
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
|
||||
GFP_ATOMIC);
|
||||
GFP_ATOMIC,
|
||||
IB_MGMT_BASE_VERSION);
|
||||
if (IS_ERR(m)) {
|
||||
ib_destroy_ah(ah);
|
||||
return PTR_ERR(m);
|
||||
@@ -3759,11 +3761,9 @@ static void cm_add_one(struct ib_device *ib_device)
|
||||
};
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
int count = 0;
|
||||
u8 i;
|
||||
|
||||
if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB)
|
||||
return;
|
||||
|
||||
cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
|
||||
ib_device->phys_port_cnt, GFP_KERNEL);
|
||||
if (!cm_dev)
|
||||
@@ -3782,6 +3782,9 @@ static void cm_add_one(struct ib_device *ib_device)
|
||||
|
||||
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
|
||||
for (i = 1; i <= ib_device->phys_port_cnt; i++) {
|
||||
if (!rdma_cap_ib_cm(ib_device, i))
|
||||
continue;
|
||||
|
||||
port = kzalloc(sizeof *port, GFP_KERNEL);
|
||||
if (!port)
|
||||
goto error1;
|
||||
@@ -3808,7 +3811,13 @@ static void cm_add_one(struct ib_device *ib_device)
|
||||
ret = ib_modify_port(ib_device, i, 0, &port_modify);
|
||||
if (ret)
|
||||
goto error3;
|
||||
|
||||
count++;
|
||||
}
|
||||
|
||||
if (!count)
|
||||
goto free;
|
||||
|
||||
ib_set_client_data(ib_device, &cm_client, cm_dev);
|
||||
|
||||
write_lock_irqsave(&cm.device_lock, flags);
|
||||
@@ -3824,11 +3833,15 @@ error1:
|
||||
port_modify.set_port_cap_mask = 0;
|
||||
port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
|
||||
while (--i) {
|
||||
if (!rdma_cap_ib_cm(ib_device, i))
|
||||
continue;
|
||||
|
||||
port = cm_dev->port[i-1];
|
||||
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
|
||||
ib_unregister_mad_agent(port->mad_agent);
|
||||
cm_remove_port_fs(port);
|
||||
}
|
||||
free:
|
||||
device_unregister(cm_dev->device);
|
||||
kfree(cm_dev);
|
||||
}
|
||||
@@ -3852,6 +3865,9 @@ static void cm_remove_one(struct ib_device *ib_device)
|
||||
write_unlock_irqrestore(&cm.device_lock, flags);
|
||||
|
||||
for (i = 1; i <= ib_device->phys_port_cnt; i++) {
|
||||
if (!rdma_cap_ib_cm(ib_device, i))
|
||||
continue;
|
||||
|
||||
port = cm_dev->port[i-1];
|
||||
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
|
||||
ib_unregister_mad_agent(port->mad_agent);
|
||||
|
@@ -65,6 +65,34 @@ MODULE_LICENSE("Dual BSD/GPL");
|
||||
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
|
||||
#define CMA_IBOE_PACKET_LIFETIME 18
|
||||
|
||||
static const char * const cma_events[] = {
|
||||
[RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved",
|
||||
[RDMA_CM_EVENT_ADDR_ERROR] = "address error",
|
||||
[RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ",
|
||||
[RDMA_CM_EVENT_ROUTE_ERROR] = "route error",
|
||||
[RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request",
|
||||
[RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response",
|
||||
[RDMA_CM_EVENT_CONNECT_ERROR] = "connect error",
|
||||
[RDMA_CM_EVENT_UNREACHABLE] = "unreachable",
|
||||
[RDMA_CM_EVENT_REJECTED] = "rejected",
|
||||
[RDMA_CM_EVENT_ESTABLISHED] = "established",
|
||||
[RDMA_CM_EVENT_DISCONNECTED] = "disconnected",
|
||||
[RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal",
|
||||
[RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join",
|
||||
[RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error",
|
||||
[RDMA_CM_EVENT_ADDR_CHANGE] = "address change",
|
||||
[RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
|
||||
};
|
||||
|
||||
const char *rdma_event_msg(enum rdma_cm_event_type event)
|
||||
{
|
||||
size_t index = event;
|
||||
|
||||
return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ?
|
||||
cma_events[index] : "unrecognized event";
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_event_msg);
|
||||
|
||||
static void cma_add_one(struct ib_device *device);
|
||||
static void cma_remove_one(struct ib_device *device);
|
||||
|
||||
@@ -349,18 +377,35 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int cma_validate_port(struct ib_device *device, u8 port,
|
||||
union ib_gid *gid, int dev_type)
|
||||
{
|
||||
u8 found_port;
|
||||
int ret = -ENODEV;
|
||||
|
||||
if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
|
||||
return ret;
|
||||
|
||||
if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
|
||||
return ret;
|
||||
|
||||
ret = ib_find_cached_gid(device, gid, &found_port, NULL);
|
||||
if (port != found_port)
|
||||
return -ENODEV;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cma_acquire_dev(struct rdma_id_private *id_priv,
|
||||
struct rdma_id_private *listen_id_priv)
|
||||
{
|
||||
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
|
||||
struct cma_device *cma_dev;
|
||||
union ib_gid gid, iboe_gid;
|
||||
union ib_gid gid, iboe_gid, *gidp;
|
||||
int ret = -ENODEV;
|
||||
u8 port, found_port;
|
||||
enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
|
||||
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
|
||||
u8 port;
|
||||
|
||||
if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
|
||||
if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
|
||||
id_priv->id.ps == RDMA_PS_IPOIB)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -370,41 +415,36 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
|
||||
|
||||
memcpy(&gid, dev_addr->src_dev_addr +
|
||||
rdma_addr_gid_offset(dev_addr), sizeof gid);
|
||||
if (listen_id_priv &&
|
||||
rdma_port_get_link_layer(listen_id_priv->id.device,
|
||||
listen_id_priv->id.port_num) == dev_ll) {
|
||||
|
||||
if (listen_id_priv) {
|
||||
cma_dev = listen_id_priv->cma_dev;
|
||||
port = listen_id_priv->id.port_num;
|
||||
if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
|
||||
rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
|
||||
ret = ib_find_cached_gid(cma_dev->device, &iboe_gid,
|
||||
&found_port, NULL);
|
||||
else
|
||||
ret = ib_find_cached_gid(cma_dev->device, &gid,
|
||||
&found_port, NULL);
|
||||
gidp = rdma_protocol_roce(cma_dev->device, port) ?
|
||||
&iboe_gid : &gid;
|
||||
|
||||
if (!ret && (port == found_port)) {
|
||||
id_priv->id.port_num = found_port;
|
||||
ret = cma_validate_port(cma_dev->device, port, gidp,
|
||||
dev_addr->dev_type);
|
||||
if (!ret) {
|
||||
id_priv->id.port_num = port;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(cma_dev, &dev_list, list) {
|
||||
for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
|
||||
if (listen_id_priv &&
|
||||
listen_id_priv->cma_dev == cma_dev &&
|
||||
listen_id_priv->id.port_num == port)
|
||||
continue;
|
||||
if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
|
||||
if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
|
||||
rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
|
||||
ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL);
|
||||
else
|
||||
ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL);
|
||||
|
||||
if (!ret && (port == found_port)) {
|
||||
id_priv->id.port_num = found_port;
|
||||
goto out;
|
||||
}
|
||||
gidp = rdma_protocol_roce(cma_dev->device, port) ?
|
||||
&iboe_gid : &gid;
|
||||
|
||||
ret = cma_validate_port(cma_dev->device, port, gidp,
|
||||
dev_addr->dev_type);
|
||||
if (!ret) {
|
||||
id_priv->id.port_num = port;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -435,10 +475,10 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
|
||||
pkey = ntohs(addr->sib_pkey);
|
||||
|
||||
list_for_each_entry(cur_dev, &dev_list, list) {
|
||||
if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
|
||||
continue;
|
||||
|
||||
for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
|
||||
if (!rdma_cap_af_ib(cur_dev->device, p))
|
||||
continue;
|
||||
|
||||
if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
|
||||
continue;
|
||||
|
||||
@@ -633,10 +673,9 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
|
||||
== RDMA_TRANSPORT_IB &&
|
||||
rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
|
||||
== IB_LINK_LAYER_ETHERNET) {
|
||||
BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
|
||||
|
||||
if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
|
||||
ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
|
||||
|
||||
if (ret)
|
||||
@@ -700,11 +739,10 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
|
||||
int ret;
|
||||
u16 pkey;
|
||||
|
||||
if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
|
||||
IB_LINK_LAYER_INFINIBAND)
|
||||
pkey = ib_addr_get_pkey(dev_addr);
|
||||
else
|
||||
if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
|
||||
pkey = 0xffff;
|
||||
else
|
||||
pkey = ib_addr_get_pkey(dev_addr);
|
||||
|
||||
ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
|
||||
pkey, &qp_attr->pkey_index);
|
||||
@@ -735,8 +773,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
|
||||
int ret = 0;
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (rdma_cap_ib_cm(id->device, id->port_num)) {
|
||||
if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
|
||||
ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
|
||||
else
|
||||
@@ -745,19 +782,15 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
|
||||
|
||||
if (qp_attr->qp_state == IB_QPS_RTR)
|
||||
qp_attr->rq_psn = id_priv->seq_num;
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
|
||||
if (!id_priv->cm_id.iw) {
|
||||
qp_attr->qp_access_flags = 0;
|
||||
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
|
||||
} else
|
||||
ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
|
||||
qp_attr_mask);
|
||||
break;
|
||||
default:
|
||||
} else
|
||||
ret = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -945,13 +978,9 @@ static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
|
||||
|
||||
static void cma_cancel_route(struct rdma_id_private *id_priv)
|
||||
{
|
||||
switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
|
||||
case IB_LINK_LAYER_INFINIBAND:
|
||||
if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
|
||||
if (id_priv->query)
|
||||
ib_sa_cancel_query(id_priv->query_id, id_priv->query);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1023,17 +1052,12 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
|
||||
mc = container_of(id_priv->mc_list.next,
|
||||
struct cma_multicast, list);
|
||||
list_del(&mc->list);
|
||||
switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
|
||||
case IB_LINK_LAYER_INFINIBAND:
|
||||
if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
|
||||
id_priv->id.port_num)) {
|
||||
ib_sa_free_multicast(mc->multicast.ib);
|
||||
kfree(mc);
|
||||
break;
|
||||
case IB_LINK_LAYER_ETHERNET:
|
||||
} else
|
||||
kref_put(&mc->mcref, release_mc);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1054,17 +1078,12 @@ void rdma_destroy_id(struct rdma_cm_id *id)
|
||||
mutex_unlock(&id_priv->handler_mutex);
|
||||
|
||||
if (id_priv->cma_dev) {
|
||||
switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
|
||||
if (id_priv->cm_id.ib)
|
||||
ib_destroy_cm_id(id_priv->cm_id.ib);
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
} else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
|
||||
if (id_priv->cm_id.iw)
|
||||
iw_destroy_cm_id(id_priv->cm_id.iw);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
cma_leave_mc_groups(id_priv);
|
||||
cma_release_dev(id_priv);
|
||||
@@ -1610,6 +1629,7 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
|
||||
if (IS_ERR(id))
|
||||
return PTR_ERR(id);
|
||||
|
||||
id->tos = id_priv->tos;
|
||||
id_priv->cm_id.iw = id;
|
||||
|
||||
memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
|
||||
@@ -1642,8 +1662,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
|
||||
struct rdma_cm_id *id;
|
||||
int ret;
|
||||
|
||||
if (cma_family(id_priv) == AF_IB &&
|
||||
rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB)
|
||||
if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
|
||||
return;
|
||||
|
||||
id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
|
||||
@@ -1984,26 +2003,15 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
|
||||
return -EINVAL;
|
||||
|
||||
atomic_inc(&id_priv->refcount);
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
switch (rdma_port_get_link_layer(id->device, id->port_num)) {
|
||||
case IB_LINK_LAYER_INFINIBAND:
|
||||
ret = cma_resolve_ib_route(id_priv, timeout_ms);
|
||||
break;
|
||||
case IB_LINK_LAYER_ETHERNET:
|
||||
ret = cma_resolve_iboe_route(id_priv);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
if (rdma_cap_ib_sa(id->device, id->port_num))
|
||||
ret = cma_resolve_ib_route(id_priv, timeout_ms);
|
||||
else if (rdma_protocol_roce(id->device, id->port_num))
|
||||
ret = cma_resolve_iboe_route(id_priv);
|
||||
else if (rdma_protocol_iwarp(id->device, id->port_num))
|
||||
ret = cma_resolve_iw_route(id_priv, timeout_ms);
|
||||
break;
|
||||
default:
|
||||
else
|
||||
ret = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@@ -2045,7 +2053,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
|
||||
mutex_lock(&lock);
|
||||
list_for_each_entry(cur_dev, &dev_list, list) {
|
||||
if (cma_family(id_priv) == AF_IB &&
|
||||
rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
|
||||
!rdma_cap_ib_cm(cur_dev->device, 1))
|
||||
continue;
|
||||
|
||||
if (!cma_dev)
|
||||
@@ -2077,7 +2085,7 @@ port_found:
|
||||
goto out;
|
||||
|
||||
id_priv->id.route.addr.dev_addr.dev_type =
|
||||
(rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
|
||||
(rdma_protocol_ib(cma_dev->device, p)) ?
|
||||
ARPHRD_INFINIBAND : ARPHRD_ETHER;
|
||||
|
||||
rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
|
||||
@@ -2554,18 +2562,15 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
|
||||
|
||||
id_priv->backlog = backlog;
|
||||
if (id->device) {
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (rdma_cap_ib_cm(id->device, 1)) {
|
||||
ret = cma_ib_listen(id_priv);
|
||||
if (ret)
|
||||
goto err;
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
} else if (rdma_cap_iw_cm(id->device, 1)) {
|
||||
ret = cma_iw_listen(id_priv, backlog);
|
||||
if (ret)
|
||||
goto err;
|
||||
break;
|
||||
default:
|
||||
} else {
|
||||
ret = -ENOSYS;
|
||||
goto err;
|
||||
}
|
||||
@@ -2857,6 +2862,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
|
||||
if (IS_ERR(cm_id))
|
||||
return PTR_ERR(cm_id);
|
||||
|
||||
cm_id->tos = id_priv->tos;
|
||||
id_priv->cm_id.iw = cm_id;
|
||||
|
||||
memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
|
||||
@@ -2901,20 +2907,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
||||
id_priv->srq = conn_param->srq;
|
||||
}
|
||||
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (rdma_cap_ib_cm(id->device, id->port_num)) {
|
||||
if (id->qp_type == IB_QPT_UD)
|
||||
ret = cma_resolve_ib_udp(id_priv, conn_param);
|
||||
else
|
||||
ret = cma_connect_ib(id_priv, conn_param);
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
} else if (rdma_cap_iw_cm(id->device, id->port_num))
|
||||
ret = cma_connect_iw(id_priv, conn_param);
|
||||
break;
|
||||
default:
|
||||
else
|
||||
ret = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@@ -3017,8 +3018,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
||||
id_priv->srq = conn_param->srq;
|
||||
}
|
||||
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (rdma_cap_ib_cm(id->device, id->port_num)) {
|
||||
if (id->qp_type == IB_QPT_UD) {
|
||||
if (conn_param)
|
||||
ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
|
||||
@@ -3034,14 +3034,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
||||
else
|
||||
ret = cma_rep_recv(id_priv);
|
||||
}
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
} else if (rdma_cap_iw_cm(id->device, id->port_num))
|
||||
ret = cma_accept_iw(id_priv, conn_param);
|
||||
break;
|
||||
default:
|
||||
else
|
||||
ret = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto reject;
|
||||
@@ -3085,8 +3081,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
|
||||
if (!id_priv->cm_id.ib)
|
||||
return -EINVAL;
|
||||
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (rdma_cap_ib_cm(id->device, id->port_num)) {
|
||||
if (id->qp_type == IB_QPT_UD)
|
||||
ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
|
||||
private_data, private_data_len);
|
||||
@@ -3094,15 +3089,12 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
|
||||
ret = ib_send_cm_rej(id_priv->cm_id.ib,
|
||||
IB_CM_REJ_CONSUMER_DEFINED, NULL,
|
||||
0, private_data, private_data_len);
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
|
||||
ret = iw_cm_reject(id_priv->cm_id.iw,
|
||||
private_data, private_data_len);
|
||||
break;
|
||||
default:
|
||||
} else
|
||||
ret = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_reject);
|
||||
@@ -3116,22 +3108,18 @@ int rdma_disconnect(struct rdma_cm_id *id)
|
||||
if (!id_priv->cm_id.ib)
|
||||
return -EINVAL;
|
||||
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (rdma_cap_ib_cm(id->device, id->port_num)) {
|
||||
ret = cma_modify_qp_err(id_priv);
|
||||
if (ret)
|
||||
goto out;
|
||||
/* Initiate or respond to a disconnect. */
|
||||
if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
|
||||
ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
|
||||
ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
|
||||
break;
|
||||
default:
|
||||
} else
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@@ -3377,24 +3365,13 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
|
||||
list_add(&mc->list, &id_priv->mc_list);
|
||||
spin_unlock(&id_priv->lock);
|
||||
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
switch (rdma_port_get_link_layer(id->device, id->port_num)) {
|
||||
case IB_LINK_LAYER_INFINIBAND:
|
||||
ret = cma_join_ib_multicast(id_priv, mc);
|
||||
break;
|
||||
case IB_LINK_LAYER_ETHERNET:
|
||||
kref_init(&mc->mcref);
|
||||
ret = cma_iboe_join_multicast(id_priv, mc);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (rdma_protocol_roce(id->device, id->port_num)) {
|
||||
kref_init(&mc->mcref);
|
||||
ret = cma_iboe_join_multicast(id_priv, mc);
|
||||
} else if (rdma_cap_ib_mcast(id->device, id->port_num))
|
||||
ret = cma_join_ib_multicast(id_priv, mc);
|
||||
else
|
||||
ret = -ENOSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
spin_lock_irq(&id_priv->lock);
|
||||
@@ -3422,19 +3399,15 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
|
||||
ib_detach_mcast(id->qp,
|
||||
&mc->multicast.ib->rec.mgid,
|
||||
be16_to_cpu(mc->multicast.ib->rec.mlid));
|
||||
if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
|
||||
switch (rdma_port_get_link_layer(id->device, id->port_num)) {
|
||||
case IB_LINK_LAYER_INFINIBAND:
|
||||
ib_sa_free_multicast(mc->multicast.ib);
|
||||
kfree(mc);
|
||||
break;
|
||||
case IB_LINK_LAYER_ETHERNET:
|
||||
kref_put(&mc->mcref, release_mc);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
BUG_ON(id_priv->cma_dev->device != id->device);
|
||||
|
||||
if (rdma_cap_ib_mcast(id->device, id->port_num)) {
|
||||
ib_sa_free_multicast(mc->multicast.ib);
|
||||
kfree(mc);
|
||||
} else if (rdma_protocol_roce(id->device, id->port_num))
|
||||
kref_put(&mc->mcref, release_mc);
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@@ -92,7 +92,8 @@ static int ib_device_check_mandatory(struct ib_device *device)
|
||||
IB_MANDATORY_FUNC(poll_cq),
|
||||
IB_MANDATORY_FUNC(req_notify_cq),
|
||||
IB_MANDATORY_FUNC(get_dma_mr),
|
||||
IB_MANDATORY_FUNC(dereg_mr)
|
||||
IB_MANDATORY_FUNC(dereg_mr),
|
||||
IB_MANDATORY_FUNC(get_port_immutable)
|
||||
};
|
||||
int i;
|
||||
|
||||
@@ -151,18 +152,6 @@ static int alloc_name(char *name)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int start_port(struct ib_device *device)
|
||||
{
|
||||
return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
|
||||
}
|
||||
|
||||
|
||||
static int end_port(struct ib_device *device)
|
||||
{
|
||||
return (device->node_type == RDMA_NODE_IB_SWITCH) ?
|
||||
0 : device->phys_port_cnt;
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_alloc_device - allocate an IB device struct
|
||||
* @size:size of structure to allocate
|
||||
@@ -222,42 +211,49 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int read_port_table_lengths(struct ib_device *device)
|
||||
static int verify_immutable(const struct ib_device *dev, u8 port)
|
||||
{
|
||||
struct ib_port_attr *tprops = NULL;
|
||||
int num_ports, ret = -ENOMEM;
|
||||
u8 port_index;
|
||||
return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
|
||||
rdma_max_mad_size(dev, port) != 0);
|
||||
}
|
||||
|
||||
tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
|
||||
if (!tprops)
|
||||
goto out;
|
||||
static int read_port_immutable(struct ib_device *device)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
u8 start_port = rdma_start_port(device);
|
||||
u8 end_port = rdma_end_port(device);
|
||||
u8 port;
|
||||
|
||||
num_ports = end_port(device) - start_port(device) + 1;
|
||||
|
||||
device->pkey_tbl_len = kmalloc(sizeof *device->pkey_tbl_len * num_ports,
|
||||
GFP_KERNEL);
|
||||
device->gid_tbl_len = kmalloc(sizeof *device->gid_tbl_len * num_ports,
|
||||
GFP_KERNEL);
|
||||
if (!device->pkey_tbl_len || !device->gid_tbl_len)
|
||||
/**
|
||||
* device->port_immutable is indexed directly by the port number to make
|
||||
* access to this data as efficient as possible.
|
||||
*
|
||||
* Therefore port_immutable is declared as a 1 based array with
|
||||
* potential empty slots at the beginning.
|
||||
*/
|
||||
device->port_immutable = kzalloc(sizeof(*device->port_immutable)
|
||||
* (end_port + 1),
|
||||
GFP_KERNEL);
|
||||
if (!device->port_immutable)
|
||||
goto err;
|
||||
|
||||
for (port_index = 0; port_index < num_ports; ++port_index) {
|
||||
ret = ib_query_port(device, port_index + start_port(device),
|
||||
tprops);
|
||||
for (port = start_port; port <= end_port; ++port) {
|
||||
ret = device->get_port_immutable(device, port,
|
||||
&device->port_immutable[port]);
|
||||
if (ret)
|
||||
goto err;
|
||||
device->pkey_tbl_len[port_index] = tprops->pkey_tbl_len;
|
||||
device->gid_tbl_len[port_index] = tprops->gid_tbl_len;
|
||||
|
||||
if (verify_immutable(device, port)) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
goto out;
|
||||
|
||||
err:
|
||||
kfree(device->gid_tbl_len);
|
||||
kfree(device->pkey_tbl_len);
|
||||
kfree(device->port_immutable);
|
||||
out:
|
||||
kfree(tprops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -294,9 +290,9 @@ int ib_register_device(struct ib_device *device,
|
||||
spin_lock_init(&device->event_handler_lock);
|
||||
spin_lock_init(&device->client_data_lock);
|
||||
|
||||
ret = read_port_table_lengths(device);
|
||||
ret = read_port_immutable(device);
|
||||
if (ret) {
|
||||
printk(KERN_WARNING "Couldn't create table lengths cache for device %s\n",
|
||||
printk(KERN_WARNING "Couldn't create per port immutable data %s\n",
|
||||
device->name);
|
||||
goto out;
|
||||
}
|
||||
@@ -305,8 +301,7 @@ int ib_register_device(struct ib_device *device,
|
||||
if (ret) {
|
||||
printk(KERN_WARNING "Couldn't register device %s with driver model\n",
|
||||
device->name);
|
||||
kfree(device->gid_tbl_len);
|
||||
kfree(device->pkey_tbl_len);
|
||||
kfree(device->port_immutable);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -348,9 +343,6 @@ void ib_unregister_device(struct ib_device *device)
|
||||
|
||||
list_del(&device->core_list);
|
||||
|
||||
kfree(device->gid_tbl_len);
|
||||
kfree(device->pkey_tbl_len);
|
||||
|
||||
mutex_unlock(&device_mutex);
|
||||
|
||||
ib_device_unregister_sysfs(device);
|
||||
@@ -558,7 +550,11 @@ EXPORT_SYMBOL(ib_dispatch_event);
|
||||
int ib_query_device(struct ib_device *device,
|
||||
struct ib_device_attr *device_attr)
|
||||
{
|
||||
return device->query_device(device, device_attr);
|
||||
struct ib_udata uhw = {.outlen = 0, .inlen = 0};
|
||||
|
||||
memset(device_attr, 0, sizeof(*device_attr));
|
||||
|
||||
return device->query_device(device, device_attr, &uhw);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_query_device);
|
||||
|
||||
@@ -575,7 +571,7 @@ int ib_query_port(struct ib_device *device,
|
||||
u8 port_num,
|
||||
struct ib_port_attr *port_attr)
|
||||
{
|
||||
if (port_num < start_port(device) || port_num > end_port(device))
|
||||
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
|
||||
return -EINVAL;
|
||||
|
||||
return device->query_port(device, port_num, port_attr);
|
||||
@@ -653,7 +649,7 @@ int ib_modify_port(struct ib_device *device,
|
||||
if (!device->modify_port)
|
||||
return -ENOSYS;
|
||||
|
||||
if (port_num < start_port(device) || port_num > end_port(device))
|
||||
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
|
||||
return -EINVAL;
|
||||
|
||||
return device->modify_port(device, port_num, port_modify_mask,
|
||||
@@ -676,8 +672,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
|
||||
union ib_gid tmp_gid;
|
||||
int ret, port, i;
|
||||
|
||||
for (port = start_port(device); port <= end_port(device); ++port) {
|
||||
for (i = 0; i < device->gid_tbl_len[port - start_port(device)]; ++i) {
|
||||
for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
|
||||
for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
|
||||
ret = ib_query_gid(device, port, i, &tmp_gid);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -709,7 +705,7 @@ int ib_find_pkey(struct ib_device *device,
|
||||
u16 tmp_pkey;
|
||||
int partial_ix = -1;
|
||||
|
||||
for (i = 0; i < device->pkey_tbl_len[port_num - start_port(device)]; ++i) {
|
||||
for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
|
||||
ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -41,6 +41,7 @@
|
||||
#include <linux/workqueue.h>
|
||||
#include <rdma/ib_mad.h>
|
||||
#include <rdma/ib_smi.h>
|
||||
#include <rdma/opa_smi.h>
|
||||
|
||||
#define IB_MAD_QPS_CORE 2 /* Always QP0 and QP1 as a minimum */
|
||||
|
||||
@@ -56,7 +57,7 @@
|
||||
|
||||
/* Registration table sizes */
|
||||
#define MAX_MGMT_CLASS 80
|
||||
#define MAX_MGMT_VERSION 8
|
||||
#define MAX_MGMT_VERSION 0x83
|
||||
#define MAX_MGMT_OUI 8
|
||||
#define MAX_MGMT_VENDOR_RANGE2 (IB_MGMT_CLASS_VENDOR_RANGE2_END - \
|
||||
IB_MGMT_CLASS_VENDOR_RANGE2_START + 1)
|
||||
@@ -75,12 +76,9 @@ struct ib_mad_private_header {
|
||||
|
||||
struct ib_mad_private {
|
||||
struct ib_mad_private_header header;
|
||||
size_t mad_size;
|
||||
struct ib_grh grh;
|
||||
union {
|
||||
struct ib_mad mad;
|
||||
struct ib_rmpp_mad rmpp_mad;
|
||||
struct ib_smp smp;
|
||||
} mad;
|
||||
u8 mad[0];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct ib_rmpp_segment {
|
||||
@@ -150,6 +148,7 @@ struct ib_mad_local_private {
|
||||
struct ib_mad_private *mad_priv;
|
||||
struct ib_mad_agent_private *recv_mad_agent;
|
||||
struct ib_mad_send_wr_private *mad_send_wr;
|
||||
size_t return_wc_byte_len;
|
||||
};
|
||||
|
||||
struct ib_mad_mgmt_method_table {
|
||||
@@ -213,8 +212,8 @@ struct ib_mad_port_private {
|
||||
int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
|
||||
|
||||
struct ib_mad_send_wr_private *
|
||||
ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
|
||||
struct ib_mad_recv_wc *mad_recv_wc);
|
||||
ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
|
||||
const struct ib_mad_recv_wc *mad_recv_wc);
|
||||
|
||||
void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
|
||||
struct ib_mad_send_wc *mad_send_wc);
|
||||
|
@@ -1,6 +1,7 @@
|
||||
/*
|
||||
* Copyright (c) 2005 Intel Inc. All rights reserved.
|
||||
* Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
|
||||
* Copyright (c) 2014 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@@ -67,6 +68,7 @@ struct mad_rmpp_recv {
|
||||
u8 mgmt_class;
|
||||
u8 class_version;
|
||||
u8 method;
|
||||
u8 base_version;
|
||||
};
|
||||
|
||||
static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
|
||||
@@ -139,7 +141,8 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
|
||||
hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
|
||||
msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
|
||||
recv_wc->wc->pkey_index, 1, hdr_len,
|
||||
0, GFP_KERNEL);
|
||||
0, GFP_KERNEL,
|
||||
IB_MGMT_BASE_VERSION);
|
||||
if (IS_ERR(msg))
|
||||
return;
|
||||
|
||||
@@ -165,7 +168,8 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
|
||||
hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
|
||||
msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
|
||||
recv_wc->wc->pkey_index, 1,
|
||||
hdr_len, 0, GFP_KERNEL);
|
||||
hdr_len, 0, GFP_KERNEL,
|
||||
IB_MGMT_BASE_VERSION);
|
||||
if (IS_ERR(msg))
|
||||
ib_destroy_ah(ah);
|
||||
else {
|
||||
@@ -316,6 +320,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
|
||||
rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
|
||||
rmpp_recv->class_version = mad_hdr->class_version;
|
||||
rmpp_recv->method = mad_hdr->method;
|
||||
rmpp_recv->base_version = mad_hdr->base_version;
|
||||
return rmpp_recv;
|
||||
|
||||
error: kfree(rmpp_recv);
|
||||
@@ -431,14 +436,23 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
|
||||
{
|
||||
struct ib_rmpp_mad *rmpp_mad;
|
||||
int hdr_size, data_size, pad;
|
||||
bool opa = rdma_cap_opa_mad(rmpp_recv->agent->qp_info->port_priv->device,
|
||||
rmpp_recv->agent->qp_info->port_priv->port_num);
|
||||
|
||||
rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
|
||||
|
||||
hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
|
||||
data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
|
||||
pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
|
||||
if (pad > IB_MGMT_RMPP_DATA || pad < 0)
|
||||
pad = 0;
|
||||
if (opa && rmpp_recv->base_version == OPA_MGMT_BASE_VERSION) {
|
||||
data_size = sizeof(struct opa_rmpp_mad) - hdr_size;
|
||||
pad = OPA_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
|
||||
if (pad > OPA_MGMT_RMPP_DATA || pad < 0)
|
||||
pad = 0;
|
||||
} else {
|
||||
data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
|
||||
pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
|
||||
if (pad > IB_MGMT_RMPP_DATA || pad < 0)
|
||||
pad = 0;
|
||||
}
|
||||
|
||||
return hdr_size + rmpp_recv->seg_num * data_size - pad;
|
||||
}
|
||||
@@ -570,13 +584,14 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
|
||||
|
||||
if (mad_send_wr->seg_num == 1) {
|
||||
rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
|
||||
paylen = mad_send_wr->send_buf.seg_count * IB_MGMT_RMPP_DATA -
|
||||
mad_send_wr->pad;
|
||||
paylen = (mad_send_wr->send_buf.seg_count *
|
||||
mad_send_wr->send_buf.seg_rmpp_size) -
|
||||
mad_send_wr->pad;
|
||||
}
|
||||
|
||||
if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) {
|
||||
rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
|
||||
paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad;
|
||||
paylen = mad_send_wr->send_buf.seg_rmpp_size - mad_send_wr->pad;
|
||||
}
|
||||
rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
|
||||
|
||||
|
@@ -780,8 +780,7 @@ static void mcast_event_handler(struct ib_event_handler *handler,
|
||||
int index;
|
||||
|
||||
dev = container_of(handler, struct mcast_device, event_handler);
|
||||
if (rdma_port_get_link_layer(dev->device, event->element.port_num) !=
|
||||
IB_LINK_LAYER_INFINIBAND)
|
||||
if (!rdma_cap_ib_mcast(dev->device, event->element.port_num))
|
||||
return;
|
||||
|
||||
index = event->element.port_num - dev->start_port;
|
||||
@@ -808,9 +807,6 @@ static void mcast_add_one(struct ib_device *device)
|
||||
int i;
|
||||
int count = 0;
|
||||
|
||||
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
|
||||
return;
|
||||
|
||||
dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
|
||||
GFP_KERNEL);
|
||||
if (!dev)
|
||||
@@ -824,8 +820,7 @@ static void mcast_add_one(struct ib_device *device)
|
||||
}
|
||||
|
||||
for (i = 0; i <= dev->end_port - dev->start_port; i++) {
|
||||
if (rdma_port_get_link_layer(device, dev->start_port + i) !=
|
||||
IB_LINK_LAYER_INFINIBAND)
|
||||
if (!rdma_cap_ib_mcast(device, dev->start_port + i))
|
||||
continue;
|
||||
port = &dev->port[i];
|
||||
port->dev = dev;
|
||||
@@ -863,8 +858,7 @@ static void mcast_remove_one(struct ib_device *device)
|
||||
flush_workqueue(mcast_wq);
|
||||
|
||||
for (i = 0; i <= dev->end_port - dev->start_port; i++) {
|
||||
if (rdma_port_get_link_layer(device, dev->start_port + i) ==
|
||||
IB_LINK_LAYER_INFINIBAND) {
|
||||
if (rdma_cap_ib_mcast(device, dev->start_port + i)) {
|
||||
port = &dev->port[i];
|
||||
deref_port(port);
|
||||
wait_for_completion(&port->comp);
|
||||
|
78
drivers/infiniband/core/opa_smi.h
Normal file
78
drivers/infiniband/core/opa_smi.h
Normal file
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __OPA_SMI_H_
|
||||
#define __OPA_SMI_H_
|
||||
|
||||
#include <rdma/ib_smi.h>
|
||||
#include <rdma/opa_smi.h>
|
||||
|
||||
#include "smi.h"
|
||||
|
||||
enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type,
|
||||
int port_num, int phys_port_cnt);
|
||||
int opa_smi_get_fwd_port(struct opa_smp *smp);
|
||||
extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
|
||||
extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
|
||||
u8 node_type, int port_num);
|
||||
|
||||
/*
|
||||
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
|
||||
* via process_mad
|
||||
*/
|
||||
static inline enum smi_action opa_smi_check_local_smp(struct opa_smp *smp,
|
||||
struct ib_device *device)
|
||||
{
|
||||
/* C14-9:3 -- We're at the end of the DR segment of path */
|
||||
/* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */
|
||||
return (device->process_mad &&
|
||||
!opa_get_smp_direction(smp) &&
|
||||
(smp->hop_ptr == smp->hop_cnt + 1)) ?
|
||||
IB_SMI_HANDLE : IB_SMI_DISCARD;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
|
||||
* via process_mad
|
||||
*/
|
||||
static inline enum smi_action opa_smi_check_local_returning_smp(struct opa_smp *smp,
|
||||
struct ib_device *device)
|
||||
{
|
||||
/* C14-13:3 -- We're at the end of the DR segment of path */
|
||||
/* C14-13:4 -- Hop Pointer == 0 -> give to SM */
|
||||
return (device->process_mad &&
|
||||
opa_get_smp_direction(smp) &&
|
||||
!smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD;
|
||||
}
|
||||
|
||||
#endif /* __OPA_SMI_H_ */
|
@@ -450,7 +450,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
|
||||
struct ib_sa_port *port =
|
||||
&sa_dev->port[event->element.port_num - sa_dev->start_port];
|
||||
|
||||
if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND)
|
||||
if (!rdma_cap_ib_sa(handler->device, port->port_num))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&port->ah_lock, flags);
|
||||
@@ -540,7 +540,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
|
||||
ah_attr->port_num = port_num;
|
||||
ah_attr->static_rate = rec->rate;
|
||||
|
||||
force_grh = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_ETHERNET;
|
||||
force_grh = rdma_cap_eth_ah(device, port_num);
|
||||
|
||||
if (rec->hop_limit > 1 || force_grh) {
|
||||
ah_attr->ah_flags = IB_AH_GRH;
|
||||
@@ -583,7 +583,8 @@ static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
|
||||
query->mad_buf = ib_create_send_mad(query->port->agent, 1,
|
||||
query->sm_ah->pkey_index,
|
||||
0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
|
||||
gfp_mask);
|
||||
gfp_mask,
|
||||
IB_MGMT_BASE_VERSION);
|
||||
if (IS_ERR(query->mad_buf)) {
|
||||
kref_put(&query->sm_ah->ref, free_sm_ah);
|
||||
return -ENOMEM;
|
||||
@@ -1153,9 +1154,7 @@ static void ib_sa_add_one(struct ib_device *device)
|
||||
{
|
||||
struct ib_sa_device *sa_dev;
|
||||
int s, e, i;
|
||||
|
||||
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
|
||||
return;
|
||||
int count = 0;
|
||||
|
||||
if (device->node_type == RDMA_NODE_IB_SWITCH)
|
||||
s = e = 0;
|
||||
@@ -1175,7 +1174,7 @@ static void ib_sa_add_one(struct ib_device *device)
|
||||
|
||||
for (i = 0; i <= e - s; ++i) {
|
||||
spin_lock_init(&sa_dev->port[i].ah_lock);
|
||||
if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND)
|
||||
if (!rdma_cap_ib_sa(device, i + 1))
|
||||
continue;
|
||||
|
||||
sa_dev->port[i].sm_ah = NULL;
|
||||
@@ -1189,8 +1188,13 @@ static void ib_sa_add_one(struct ib_device *device)
|
||||
goto err;
|
||||
|
||||
INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
|
||||
|
||||
count++;
|
||||
}
|
||||
|
||||
if (!count)
|
||||
goto free;
|
||||
|
||||
ib_set_client_data(device, &sa_client, sa_dev);
|
||||
|
||||
/*
|
||||
@@ -1204,19 +1208,20 @@ static void ib_sa_add_one(struct ib_device *device)
|
||||
if (ib_register_event_handler(&sa_dev->event_handler))
|
||||
goto err;
|
||||
|
||||
for (i = 0; i <= e - s; ++i)
|
||||
if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
|
||||
for (i = 0; i <= e - s; ++i) {
|
||||
if (rdma_cap_ib_sa(device, i + 1))
|
||||
update_sm_ah(&sa_dev->port[i].update_task);
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
err:
|
||||
while (--i >= 0)
|
||||
if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
|
||||
while (--i >= 0) {
|
||||
if (rdma_cap_ib_sa(device, i + 1))
|
||||
ib_unregister_mad_agent(sa_dev->port[i].agent);
|
||||
|
||||
}
|
||||
free:
|
||||
kfree(sa_dev);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1233,7 +1238,7 @@ static void ib_sa_remove_one(struct ib_device *device)
|
||||
flush_workqueue(ib_wq);
|
||||
|
||||
for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
|
||||
if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
|
||||
if (rdma_cap_ib_sa(device, i + 1)) {
|
||||
ib_unregister_mad_agent(sa_dev->port[i].agent);
|
||||
if (sa_dev->port[i].sm_ah)
|
||||
kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
|
||||
|
@@ -5,6 +5,7 @@
|
||||
* Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
|
||||
* Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved.
|
||||
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
|
||||
* Copyright (c) 2014 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@@ -38,85 +39,82 @@
|
||||
|
||||
#include <rdma/ib_smi.h>
|
||||
#include "smi.h"
|
||||
#include "opa_smi.h"
|
||||
|
||||
/*
|
||||
* Fixup a directed route SMP for sending
|
||||
* Return 0 if the SMP should be discarded
|
||||
*/
|
||||
enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
|
||||
u8 node_type, int port_num)
|
||||
static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
|
||||
u8 *hop_ptr, u8 hop_cnt,
|
||||
const u8 *initial_path,
|
||||
const u8 *return_path,
|
||||
u8 direction,
|
||||
bool dr_dlid_is_permissive,
|
||||
bool dr_slid_is_permissive)
|
||||
{
|
||||
u8 hop_ptr, hop_cnt;
|
||||
|
||||
hop_ptr = smp->hop_ptr;
|
||||
hop_cnt = smp->hop_cnt;
|
||||
|
||||
/* See section 14.2.2.2, Vol 1 IB spec */
|
||||
/* C14-6 -- valid hop_cnt values are from 0 to 63 */
|
||||
if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
|
||||
return IB_SMI_DISCARD;
|
||||
|
||||
if (!ib_get_smp_direction(smp)) {
|
||||
if (!direction) {
|
||||
/* C14-9:1 */
|
||||
if (hop_cnt && hop_ptr == 0) {
|
||||
smp->hop_ptr++;
|
||||
return (smp->initial_path[smp->hop_ptr] ==
|
||||
if (hop_cnt && *hop_ptr == 0) {
|
||||
(*hop_ptr)++;
|
||||
return (initial_path[*hop_ptr] ==
|
||||
port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
}
|
||||
|
||||
/* C14-9:2 */
|
||||
if (hop_ptr && hop_ptr < hop_cnt) {
|
||||
if (*hop_ptr && *hop_ptr < hop_cnt) {
|
||||
if (node_type != RDMA_NODE_IB_SWITCH)
|
||||
return IB_SMI_DISCARD;
|
||||
|
||||
/* smp->return_path set when received */
|
||||
smp->hop_ptr++;
|
||||
return (smp->initial_path[smp->hop_ptr] ==
|
||||
/* return_path set when received */
|
||||
(*hop_ptr)++;
|
||||
return (initial_path[*hop_ptr] ==
|
||||
port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
}
|
||||
|
||||
/* C14-9:3 -- We're at the end of the DR segment of path */
|
||||
if (hop_ptr == hop_cnt) {
|
||||
/* smp->return_path set when received */
|
||||
smp->hop_ptr++;
|
||||
if (*hop_ptr == hop_cnt) {
|
||||
/* return_path set when received */
|
||||
(*hop_ptr)++;
|
||||
return (node_type == RDMA_NODE_IB_SWITCH ||
|
||||
smp->dr_dlid == IB_LID_PERMISSIVE ?
|
||||
dr_dlid_is_permissive ?
|
||||
IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
}
|
||||
|
||||
/* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */
|
||||
/* C14-9:5 -- Fail unreasonable hop pointer */
|
||||
return (hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
return (*hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
|
||||
} else {
|
||||
/* C14-13:1 */
|
||||
if (hop_cnt && hop_ptr == hop_cnt + 1) {
|
||||
smp->hop_ptr--;
|
||||
return (smp->return_path[smp->hop_ptr] ==
|
||||
if (hop_cnt && *hop_ptr == hop_cnt + 1) {
|
||||
(*hop_ptr)--;
|
||||
return (return_path[*hop_ptr] ==
|
||||
port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
}
|
||||
|
||||
/* C14-13:2 */
|
||||
if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
|
||||
if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
|
||||
if (node_type != RDMA_NODE_IB_SWITCH)
|
||||
return IB_SMI_DISCARD;
|
||||
|
||||
smp->hop_ptr--;
|
||||
return (smp->return_path[smp->hop_ptr] ==
|
||||
(*hop_ptr)--;
|
||||
return (return_path[*hop_ptr] ==
|
||||
port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
}
|
||||
|
||||
/* C14-13:3 -- at the end of the DR segment of path */
|
||||
if (hop_ptr == 1) {
|
||||
smp->hop_ptr--;
|
||||
if (*hop_ptr == 1) {
|
||||
(*hop_ptr)--;
|
||||
/* C14-13:3 -- SMPs destined for SM shouldn't be here */
|
||||
return (node_type == RDMA_NODE_IB_SWITCH ||
|
||||
smp->dr_slid == IB_LID_PERMISSIVE ?
|
||||
dr_slid_is_permissive ?
|
||||
IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
}
|
||||
|
||||
/* C14-13:4 -- hop_ptr = 0 -> should have gone to SM */
|
||||
if (hop_ptr == 0)
|
||||
if (*hop_ptr == 0)
|
||||
return IB_SMI_HANDLE;
|
||||
|
||||
/* C14-13:5 -- Check for unreasonable hop pointer */
|
||||
@@ -125,105 +123,164 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjust information for a received SMP
|
||||
* Return 0 if the SMP should be dropped
|
||||
* Fixup a directed route SMP for sending
|
||||
* Return IB_SMI_DISCARD if the SMP should be discarded
|
||||
*/
|
||||
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
|
||||
int port_num, int phys_port_cnt)
|
||||
enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
|
||||
u8 node_type, int port_num)
|
||||
{
|
||||
u8 hop_ptr, hop_cnt;
|
||||
return __smi_handle_dr_smp_send(node_type, port_num,
|
||||
&smp->hop_ptr, smp->hop_cnt,
|
||||
smp->initial_path,
|
||||
smp->return_path,
|
||||
ib_get_smp_direction(smp),
|
||||
smp->dr_dlid == IB_LID_PERMISSIVE,
|
||||
smp->dr_slid == IB_LID_PERMISSIVE);
|
||||
}
|
||||
|
||||
hop_ptr = smp->hop_ptr;
|
||||
hop_cnt = smp->hop_cnt;
|
||||
enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
|
||||
u8 node_type, int port_num)
|
||||
{
|
||||
return __smi_handle_dr_smp_send(node_type, port_num,
|
||||
&smp->hop_ptr, smp->hop_cnt,
|
||||
smp->route.dr.initial_path,
|
||||
smp->route.dr.return_path,
|
||||
opa_get_smp_direction(smp),
|
||||
smp->route.dr.dr_dlid ==
|
||||
OPA_LID_PERMISSIVE,
|
||||
smp->route.dr.dr_slid ==
|
||||
OPA_LID_PERMISSIVE);
|
||||
}
|
||||
|
||||
static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
|
||||
int phys_port_cnt,
|
||||
u8 *hop_ptr, u8 hop_cnt,
|
||||
const u8 *initial_path,
|
||||
u8 *return_path,
|
||||
u8 direction,
|
||||
bool dr_dlid_is_permissive,
|
||||
bool dr_slid_is_permissive)
|
||||
{
|
||||
/* See section 14.2.2.2, Vol 1 IB spec */
|
||||
/* C14-6 -- valid hop_cnt values are from 0 to 63 */
|
||||
if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
|
||||
return IB_SMI_DISCARD;
|
||||
|
||||
if (!ib_get_smp_direction(smp)) {
|
||||
if (!direction) {
|
||||
/* C14-9:1 -- sender should have incremented hop_ptr */
|
||||
if (hop_cnt && hop_ptr == 0)
|
||||
if (hop_cnt && *hop_ptr == 0)
|
||||
return IB_SMI_DISCARD;
|
||||
|
||||
/* C14-9:2 -- intermediate hop */
|
||||
if (hop_ptr && hop_ptr < hop_cnt) {
|
||||
if (*hop_ptr && *hop_ptr < hop_cnt) {
|
||||
if (node_type != RDMA_NODE_IB_SWITCH)
|
||||
return IB_SMI_DISCARD;
|
||||
|
||||
smp->return_path[hop_ptr] = port_num;
|
||||
/* smp->hop_ptr updated when sending */
|
||||
return (smp->initial_path[hop_ptr+1] <= phys_port_cnt ?
|
||||
return_path[*hop_ptr] = port_num;
|
||||
/* hop_ptr updated when sending */
|
||||
return (initial_path[*hop_ptr+1] <= phys_port_cnt ?
|
||||
IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
}
|
||||
|
||||
/* C14-9:3 -- We're at the end of the DR segment of path */
|
||||
if (hop_ptr == hop_cnt) {
|
||||
if (*hop_ptr == hop_cnt) {
|
||||
if (hop_cnt)
|
||||
smp->return_path[hop_ptr] = port_num;
|
||||
/* smp->hop_ptr updated when sending */
|
||||
return_path[*hop_ptr] = port_num;
|
||||
/* hop_ptr updated when sending */
|
||||
|
||||
return (node_type == RDMA_NODE_IB_SWITCH ||
|
||||
smp->dr_dlid == IB_LID_PERMISSIVE ?
|
||||
dr_dlid_is_permissive ?
|
||||
IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
}
|
||||
|
||||
/* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */
|
||||
/* C14-9:5 -- fail unreasonable hop pointer */
|
||||
return (hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
return (*hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
|
||||
} else {
|
||||
|
||||
/* C14-13:1 */
|
||||
if (hop_cnt && hop_ptr == hop_cnt + 1) {
|
||||
smp->hop_ptr--;
|
||||
return (smp->return_path[smp->hop_ptr] ==
|
||||
if (hop_cnt && *hop_ptr == hop_cnt + 1) {
|
||||
(*hop_ptr)--;
|
||||
return (return_path[*hop_ptr] ==
|
||||
port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
}
|
||||
|
||||
/* C14-13:2 */
|
||||
if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
|
||||
if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
|
||||
if (node_type != RDMA_NODE_IB_SWITCH)
|
||||
return IB_SMI_DISCARD;
|
||||
|
||||
/* smp->hop_ptr updated when sending */
|
||||
return (smp->return_path[hop_ptr-1] <= phys_port_cnt ?
|
||||
/* hop_ptr updated when sending */
|
||||
return (return_path[*hop_ptr-1] <= phys_port_cnt ?
|
||||
IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
}
|
||||
|
||||
/* C14-13:3 -- We're at the end of the DR segment of path */
|
||||
if (hop_ptr == 1) {
|
||||
if (smp->dr_slid == IB_LID_PERMISSIVE) {
|
||||
if (*hop_ptr == 1) {
|
||||
if (dr_slid_is_permissive) {
|
||||
/* giving SMP to SM - update hop_ptr */
|
||||
smp->hop_ptr--;
|
||||
(*hop_ptr)--;
|
||||
return IB_SMI_HANDLE;
|
||||
}
|
||||
/* smp->hop_ptr updated when sending */
|
||||
/* hop_ptr updated when sending */
|
||||
return (node_type == RDMA_NODE_IB_SWITCH ?
|
||||
IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
}
|
||||
|
||||
/* C14-13:4 -- hop_ptr = 0 -> give to SM */
|
||||
/* C14-13:5 -- Check for unreasonable hop pointer */
|
||||
return (hop_ptr == 0 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
return (*hop_ptr == 0 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
}
|
||||
}
|
||||
|
||||
enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp)
|
||||
/*
|
||||
* Adjust information for a received SMP
|
||||
* Return IB_SMI_DISCARD if the SMP should be dropped
|
||||
*/
|
||||
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
|
||||
int port_num, int phys_port_cnt)
|
||||
{
|
||||
u8 hop_ptr, hop_cnt;
|
||||
return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt,
|
||||
&smp->hop_ptr, smp->hop_cnt,
|
||||
smp->initial_path,
|
||||
smp->return_path,
|
||||
ib_get_smp_direction(smp),
|
||||
smp->dr_dlid == IB_LID_PERMISSIVE,
|
||||
smp->dr_slid == IB_LID_PERMISSIVE);
|
||||
}
|
||||
|
||||
hop_ptr = smp->hop_ptr;
|
||||
hop_cnt = smp->hop_cnt;
|
||||
/*
|
||||
* Adjust information for a received SMP
|
||||
* Return IB_SMI_DISCARD if the SMP should be dropped
|
||||
*/
|
||||
enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type,
|
||||
int port_num, int phys_port_cnt)
|
||||
{
|
||||
return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt,
|
||||
&smp->hop_ptr, smp->hop_cnt,
|
||||
smp->route.dr.initial_path,
|
||||
smp->route.dr.return_path,
|
||||
opa_get_smp_direction(smp),
|
||||
smp->route.dr.dr_dlid ==
|
||||
OPA_LID_PERMISSIVE,
|
||||
smp->route.dr.dr_slid ==
|
||||
OPA_LID_PERMISSIVE);
|
||||
}
|
||||
|
||||
if (!ib_get_smp_direction(smp)) {
|
||||
static enum smi_forward_action __smi_check_forward_dr_smp(u8 hop_ptr, u8 hop_cnt,
|
||||
u8 direction,
|
||||
bool dr_dlid_is_permissive,
|
||||
bool dr_slid_is_permissive)
|
||||
{
|
||||
if (!direction) {
|
||||
/* C14-9:2 -- intermediate hop */
|
||||
if (hop_ptr && hop_ptr < hop_cnt)
|
||||
return IB_SMI_FORWARD;
|
||||
|
||||
/* C14-9:3 -- at the end of the DR segment of path */
|
||||
if (hop_ptr == hop_cnt)
|
||||
return (smp->dr_dlid == IB_LID_PERMISSIVE ?
|
||||
return (dr_dlid_is_permissive ?
|
||||
IB_SMI_SEND : IB_SMI_LOCAL);
|
||||
|
||||
/* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */
|
||||
@@ -236,10 +293,29 @@ enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp)
|
||||
|
||||
/* C14-13:3 -- at the end of the DR segment of path */
|
||||
if (hop_ptr == 1)
|
||||
return (smp->dr_slid != IB_LID_PERMISSIVE ?
|
||||
return (!dr_slid_is_permissive ?
|
||||
IB_SMI_SEND : IB_SMI_LOCAL);
|
||||
}
|
||||
return IB_SMI_LOCAL;
|
||||
|
||||
}
|
||||
|
||||
enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp)
|
||||
{
|
||||
return __smi_check_forward_dr_smp(smp->hop_ptr, smp->hop_cnt,
|
||||
ib_get_smp_direction(smp),
|
||||
smp->dr_dlid == IB_LID_PERMISSIVE,
|
||||
smp->dr_slid == IB_LID_PERMISSIVE);
|
||||
}
|
||||
|
||||
enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp)
|
||||
{
|
||||
return __smi_check_forward_dr_smp(smp->hop_ptr, smp->hop_cnt,
|
||||
opa_get_smp_direction(smp),
|
||||
smp->route.dr.dr_dlid ==
|
||||
OPA_LID_PERMISSIVE,
|
||||
smp->route.dr.dr_slid ==
|
||||
OPA_LID_PERMISSIVE);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -251,3 +327,13 @@ int smi_get_fwd_port(struct ib_smp *smp)
|
||||
return (!ib_get_smp_direction(smp) ? smp->initial_path[smp->hop_ptr+1] :
|
||||
smp->return_path[smp->hop_ptr-1]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the forwarding port number from initial_path for outgoing SMP and
|
||||
* from return_path for returning SMP
|
||||
*/
|
||||
int opa_smi_get_fwd_port(struct opa_smp *smp)
|
||||
{
|
||||
return !opa_get_smp_direction(smp) ? smp->route.dr.initial_path[smp->hop_ptr+1] :
|
||||
smp->route.dr.return_path[smp->hop_ptr-1];
|
||||
}
|
||||
|
@@ -326,6 +326,8 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
|
||||
int width = (tab_attr->index >> 16) & 0xff;
|
||||
struct ib_mad *in_mad = NULL;
|
||||
struct ib_mad *out_mad = NULL;
|
||||
size_t mad_size = sizeof(*out_mad);
|
||||
u16 out_mad_pkey_index = 0;
|
||||
ssize_t ret;
|
||||
|
||||
if (!p->ibdev->process_mad)
|
||||
@@ -347,7 +349,10 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
|
||||
in_mad->data[41] = p->port_num; /* PortSelect field */
|
||||
|
||||
if ((p->ibdev->process_mad(p->ibdev, IB_MAD_IGNORE_MKEY,
|
||||
p->port_num, NULL, NULL, in_mad, out_mad) &
|
||||
p->port_num, NULL, NULL,
|
||||
(const struct ib_mad_hdr *)in_mad, mad_size,
|
||||
(struct ib_mad_hdr *)out_mad, &mad_size,
|
||||
&out_mad_pkey_index) &
|
||||
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) !=
|
||||
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {
|
||||
ret = -EINVAL;
|
||||
@@ -456,6 +461,7 @@ static void ib_device_release(struct device *device)
|
||||
{
|
||||
struct ib_device *dev = container_of(device, struct ib_device, dev);
|
||||
|
||||
kfree(dev->port_immutable);
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
|
@@ -1253,8 +1253,7 @@ static void ib_ucm_add_one(struct ib_device *device)
|
||||
dev_t base;
|
||||
struct ib_ucm_device *ucm_dev;
|
||||
|
||||
if (!device->alloc_ucontext ||
|
||||
rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
|
||||
if (!device->alloc_ucontext || !rdma_cap_ib_cm(device, 1))
|
||||
return;
|
||||
|
||||
ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
|
||||
|
@@ -722,26 +722,13 @@ static ssize_t ucma_query_route(struct ucma_file *file,
|
||||
|
||||
resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
|
||||
resp.port_num = ctx->cm_id->port_num;
|
||||
switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
switch (rdma_port_get_link_layer(ctx->cm_id->device,
|
||||
ctx->cm_id->port_num)) {
|
||||
case IB_LINK_LAYER_INFINIBAND:
|
||||
ucma_copy_ib_route(&resp, &ctx->cm_id->route);
|
||||
break;
|
||||
case IB_LINK_LAYER_ETHERNET:
|
||||
ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
|
||||
if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
|
||||
ucma_copy_ib_route(&resp, &ctx->cm_id->route);
|
||||
else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
|
||||
ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
|
||||
else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
|
||||
ucma_copy_iw_route(&resp, &ctx->cm_id->route);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
if (copy_to_user((void __user *)(unsigned long)cmd.response,
|
||||
|
@@ -99,7 +99,6 @@ struct ib_umad_port {
|
||||
};
|
||||
|
||||
struct ib_umad_device {
|
||||
int start_port, end_port;
|
||||
struct kobject kobj;
|
||||
struct ib_umad_port port[0];
|
||||
};
|
||||
@@ -263,20 +262,23 @@ static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf,
|
||||
{
|
||||
struct ib_mad_recv_buf *recv_buf;
|
||||
int left, seg_payload, offset, max_seg_payload;
|
||||
size_t seg_size;
|
||||
|
||||
recv_buf = &packet->recv_wc->recv_buf;
|
||||
seg_size = packet->recv_wc->mad_seg_size;
|
||||
|
||||
/* We need enough room to copy the first (or only) MAD segment. */
|
||||
recv_buf = &packet->recv_wc->recv_buf;
|
||||
if ((packet->length <= sizeof (*recv_buf->mad) &&
|
||||
if ((packet->length <= seg_size &&
|
||||
count < hdr_size(file) + packet->length) ||
|
||||
(packet->length > sizeof (*recv_buf->mad) &&
|
||||
count < hdr_size(file) + sizeof (*recv_buf->mad)))
|
||||
(packet->length > seg_size &&
|
||||
count < hdr_size(file) + seg_size))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_to_user(buf, &packet->mad, hdr_size(file)))
|
||||
return -EFAULT;
|
||||
|
||||
buf += hdr_size(file);
|
||||
seg_payload = min_t(int, packet->length, sizeof (*recv_buf->mad));
|
||||
seg_payload = min_t(int, packet->length, seg_size);
|
||||
if (copy_to_user(buf, recv_buf->mad, seg_payload))
|
||||
return -EFAULT;
|
||||
|
||||
@@ -293,7 +295,7 @@ static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf,
|
||||
return -ENOSPC;
|
||||
}
|
||||
offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class);
|
||||
max_seg_payload = sizeof (struct ib_mad) - offset;
|
||||
max_seg_payload = seg_size - offset;
|
||||
|
||||
for (left = packet->length - seg_payload, buf += seg_payload;
|
||||
left; left -= seg_payload, buf += seg_payload) {
|
||||
@@ -426,11 +428,11 @@ static int is_duplicate(struct ib_umad_file *file,
|
||||
* the same TID, reject the second as a duplicate. This is more
|
||||
* restrictive than required by the spec.
|
||||
*/
|
||||
if (!ib_response_mad((struct ib_mad *) hdr)) {
|
||||
if (!ib_response_mad((struct ib_mad *) sent_hdr))
|
||||
if (!ib_response_mad(hdr)) {
|
||||
if (!ib_response_mad(sent_hdr))
|
||||
return 1;
|
||||
continue;
|
||||
} else if (!ib_response_mad((struct ib_mad *) sent_hdr))
|
||||
} else if (!ib_response_mad(sent_hdr))
|
||||
continue;
|
||||
|
||||
if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
|
||||
@@ -451,6 +453,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
||||
struct ib_rmpp_mad *rmpp_mad;
|
||||
__be64 *tid;
|
||||
int ret, data_len, hdr_len, copy_offset, rmpp_active;
|
||||
u8 base_version;
|
||||
|
||||
if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
|
||||
return -EINVAL;
|
||||
@@ -517,11 +520,13 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
||||
rmpp_active = 0;
|
||||
}
|
||||
|
||||
base_version = ((struct ib_mad_hdr *)&packet->mad.data)->base_version;
|
||||
data_len = count - hdr_size(file) - hdr_len;
|
||||
packet->msg = ib_create_send_mad(agent,
|
||||
be32_to_cpu(packet->mad.hdr.qpn),
|
||||
packet->mad.hdr.pkey_index, rmpp_active,
|
||||
hdr_len, data_len, GFP_KERNEL);
|
||||
hdr_len, data_len, GFP_KERNEL,
|
||||
base_version);
|
||||
if (IS_ERR(packet->msg)) {
|
||||
ret = PTR_ERR(packet->msg);
|
||||
goto err_ah;
|
||||
@@ -1273,16 +1278,10 @@ static void ib_umad_add_one(struct ib_device *device)
|
||||
{
|
||||
struct ib_umad_device *umad_dev;
|
||||
int s, e, i;
|
||||
int count = 0;
|
||||
|
||||
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
|
||||
return;
|
||||
|
||||
if (device->node_type == RDMA_NODE_IB_SWITCH)
|
||||
s = e = 0;
|
||||
else {
|
||||
s = 1;
|
||||
e = device->phys_port_cnt;
|
||||
}
|
||||
s = rdma_start_port(device);
|
||||
e = rdma_end_port(device);
|
||||
|
||||
umad_dev = kzalloc(sizeof *umad_dev +
|
||||
(e - s + 1) * sizeof (struct ib_umad_port),
|
||||
@@ -1292,25 +1291,34 @@ static void ib_umad_add_one(struct ib_device *device)
|
||||
|
||||
kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype);
|
||||
|
||||
umad_dev->start_port = s;
|
||||
umad_dev->end_port = e;
|
||||
|
||||
for (i = s; i <= e; ++i) {
|
||||
if (!rdma_cap_ib_mad(device, i))
|
||||
continue;
|
||||
|
||||
umad_dev->port[i - s].umad_dev = umad_dev;
|
||||
|
||||
if (ib_umad_init_port(device, i, umad_dev,
|
||||
&umad_dev->port[i - s]))
|
||||
goto err;
|
||||
|
||||
count++;
|
||||
}
|
||||
|
||||
if (!count)
|
||||
goto free;
|
||||
|
||||
ib_set_client_data(device, &umad_client, umad_dev);
|
||||
|
||||
return;
|
||||
|
||||
err:
|
||||
while (--i >= s)
|
||||
ib_umad_kill_port(&umad_dev->port[i - s]);
|
||||
while (--i >= s) {
|
||||
if (!rdma_cap_ib_mad(device, i))
|
||||
continue;
|
||||
|
||||
ib_umad_kill_port(&umad_dev->port[i - s]);
|
||||
}
|
||||
free:
|
||||
kobject_put(&umad_dev->kobj);
|
||||
}
|
||||
|
||||
@@ -1322,8 +1330,10 @@ static void ib_umad_remove_one(struct ib_device *device)
|
||||
if (!umad_dev)
|
||||
return;
|
||||
|
||||
for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i)
|
||||
ib_umad_kill_port(&umad_dev->port[i]);
|
||||
for (i = 0; i <= rdma_end_port(device) - rdma_start_port(device); ++i) {
|
||||
if (rdma_cap_ib_mad(device, i + rdma_start_port(device)))
|
||||
ib_umad_kill_port(&umad_dev->port[i]);
|
||||
}
|
||||
|
||||
kobject_put(&umad_dev->kobj);
|
||||
}
|
||||
|
@@ -259,5 +259,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
|
||||
IB_UVERBS_DECLARE_EX_CMD(create_flow);
|
||||
IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
|
||||
IB_UVERBS_DECLARE_EX_CMD(query_device);
|
||||
IB_UVERBS_DECLARE_EX_CMD(create_cq);
|
||||
|
||||
#endif /* UVERBS_H */
|
||||
|
@@ -1330,40 +1330,37 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
|
||||
return in_len;
|
||||
}
|
||||
|
||||
ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
|
||||
const char __user *buf, int in_len,
|
||||
int out_len)
|
||||
static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
|
||||
struct ib_udata *ucore,
|
||||
struct ib_udata *uhw,
|
||||
struct ib_uverbs_ex_create_cq *cmd,
|
||||
size_t cmd_sz,
|
||||
int (*cb)(struct ib_uverbs_file *file,
|
||||
struct ib_ucq_object *obj,
|
||||
struct ib_uverbs_ex_create_cq_resp *resp,
|
||||
struct ib_udata *udata,
|
||||
void *context),
|
||||
void *context)
|
||||
{
|
||||
struct ib_uverbs_create_cq cmd;
|
||||
struct ib_uverbs_create_cq_resp resp;
|
||||
struct ib_udata udata;
|
||||
struct ib_ucq_object *obj;
|
||||
struct ib_uverbs_event_file *ev_file = NULL;
|
||||
struct ib_cq *cq;
|
||||
int ret;
|
||||
struct ib_uverbs_ex_create_cq_resp resp;
|
||||
struct ib_cq_init_attr attr = {};
|
||||
|
||||
if (out_len < sizeof resp)
|
||||
return -ENOSPC;
|
||||
|
||||
if (copy_from_user(&cmd, buf, sizeof cmd))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof cmd,
|
||||
(unsigned long) cmd.response + sizeof resp,
|
||||
in_len - sizeof cmd, out_len - sizeof resp);
|
||||
|
||||
if (cmd.comp_vector >= file->device->num_comp_vectors)
|
||||
return -EINVAL;
|
||||
if (cmd->comp_vector >= file->device->num_comp_vectors)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
obj = kmalloc(sizeof *obj, GFP_KERNEL);
|
||||
if (!obj)
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class);
|
||||
init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class);
|
||||
down_write(&obj->uobject.mutex);
|
||||
|
||||
if (cmd.comp_channel >= 0) {
|
||||
ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
|
||||
if (cmd->comp_channel >= 0) {
|
||||
ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel);
|
||||
if (!ev_file) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
@@ -1376,9 +1373,14 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
|
||||
INIT_LIST_HEAD(&obj->comp_list);
|
||||
INIT_LIST_HEAD(&obj->async_list);
|
||||
|
||||
cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
|
||||
cmd.comp_vector,
|
||||
file->ucontext, &udata);
|
||||
attr.cqe = cmd->cqe;
|
||||
attr.comp_vector = cmd->comp_vector;
|
||||
|
||||
if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
|
||||
attr.flags = cmd->flags;
|
||||
|
||||
cq = file->device->ib_dev->create_cq(file->device->ib_dev, &attr,
|
||||
file->ucontext, uhw);
|
||||
if (IS_ERR(cq)) {
|
||||
ret = PTR_ERR(cq);
|
||||
goto err_file;
|
||||
@@ -1397,14 +1399,15 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
|
||||
goto err_free;
|
||||
|
||||
memset(&resp, 0, sizeof resp);
|
||||
resp.cq_handle = obj->uobject.id;
|
||||
resp.cqe = cq->cqe;
|
||||
resp.base.cq_handle = obj->uobject.id;
|
||||
resp.base.cqe = cq->cqe;
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp)) {
|
||||
ret = -EFAULT;
|
||||
goto err_copy;
|
||||
}
|
||||
resp.response_length = offsetof(typeof(resp), response_length) +
|
||||
sizeof(resp.response_length);
|
||||
|
||||
ret = cb(file, obj, &resp, ucore, context);
|
||||
if (ret)
|
||||
goto err_cb;
|
||||
|
||||
mutex_lock(&file->mutex);
|
||||
list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
|
||||
@@ -1414,9 +1417,9 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
|
||||
|
||||
up_write(&obj->uobject.mutex);
|
||||
|
||||
return in_len;
|
||||
return obj;
|
||||
|
||||
err_copy:
|
||||
err_cb:
|
||||
idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
|
||||
|
||||
err_free:
|
||||
@@ -1428,7 +1431,106 @@ err_file:
|
||||
|
||||
err:
|
||||
put_uobj_write(&obj->uobject);
|
||||
return ret;
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
|
||||
struct ib_ucq_object *obj,
|
||||
struct ib_uverbs_ex_create_cq_resp *resp,
|
||||
struct ib_udata *ucore, void *context)
|
||||
{
|
||||
if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
|
||||
const char __user *buf, int in_len,
|
||||
int out_len)
|
||||
{
|
||||
struct ib_uverbs_create_cq cmd;
|
||||
struct ib_uverbs_ex_create_cq cmd_ex;
|
||||
struct ib_uverbs_create_cq_resp resp;
|
||||
struct ib_udata ucore;
|
||||
struct ib_udata uhw;
|
||||
struct ib_ucq_object *obj;
|
||||
|
||||
if (out_len < sizeof(resp))
|
||||
return -ENOSPC;
|
||||
|
||||
if (copy_from_user(&cmd, buf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_UDATA(&ucore, buf, cmd.response, sizeof(cmd), sizeof(resp));
|
||||
|
||||
INIT_UDATA(&uhw, buf + sizeof(cmd),
|
||||
(unsigned long)cmd.response + sizeof(resp),
|
||||
in_len - sizeof(cmd), out_len - sizeof(resp));
|
||||
|
||||
memset(&cmd_ex, 0, sizeof(cmd_ex));
|
||||
cmd_ex.user_handle = cmd.user_handle;
|
||||
cmd_ex.cqe = cmd.cqe;
|
||||
cmd_ex.comp_vector = cmd.comp_vector;
|
||||
cmd_ex.comp_channel = cmd.comp_channel;
|
||||
|
||||
obj = create_cq(file, &ucore, &uhw, &cmd_ex,
|
||||
offsetof(typeof(cmd_ex), comp_channel) +
|
||||
sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
|
||||
NULL);
|
||||
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
return in_len;
|
||||
}
|
||||
|
||||
static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
|
||||
struct ib_ucq_object *obj,
|
||||
struct ib_uverbs_ex_create_cq_resp *resp,
|
||||
struct ib_udata *ucore, void *context)
|
||||
{
|
||||
if (ib_copy_to_udata(ucore, resp, resp->response_length))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
|
||||
struct ib_udata *ucore,
|
||||
struct ib_udata *uhw)
|
||||
{
|
||||
struct ib_uverbs_ex_create_cq_resp resp;
|
||||
struct ib_uverbs_ex_create_cq cmd;
|
||||
struct ib_ucq_object *obj;
|
||||
int err;
|
||||
|
||||
if (ucore->inlen < sizeof(cmd))
|
||||
return -EINVAL;
|
||||
|
||||
err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (cmd.comp_mask)
|
||||
return -EINVAL;
|
||||
|
||||
if (cmd.reserved)
|
||||
return -EINVAL;
|
||||
|
||||
if (ucore->outlen < (offsetof(typeof(resp), response_length) +
|
||||
sizeof(resp.response_length)))
|
||||
return -ENOSPC;
|
||||
|
||||
obj = create_cq(file, ucore, uhw, &cmd,
|
||||
min(ucore->inlen, sizeof(cmd)),
|
||||
ib_uverbs_ex_create_cq_cb, NULL);
|
||||
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
|
||||
@@ -3324,7 +3426,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
|
||||
if (ucore->outlen < resp.response_length)
|
||||
return -ENOSPC;
|
||||
|
||||
err = device->query_device(device, &attr);
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
|
||||
err = device->query_device(device, &attr, uhw);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -3348,6 +3452,18 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
|
||||
#endif
|
||||
resp.response_length += sizeof(resp.odp_caps);
|
||||
|
||||
if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
|
||||
goto end;
|
||||
|
||||
resp.timestamp_mask = attr.timestamp_mask;
|
||||
resp.response_length += sizeof(resp.timestamp_mask);
|
||||
|
||||
if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
|
||||
goto end;
|
||||
|
||||
resp.hca_core_clock = attr.hca_core_clock;
|
||||
resp.response_length += sizeof(resp.hca_core_clock);
|
||||
|
||||
end:
|
||||
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
|
||||
if (err)
|
||||
|
@@ -124,6 +124,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
|
||||
[IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow,
|
||||
[IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow,
|
||||
[IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device,
|
||||
[IB_USER_VERBS_EX_CMD_CREATE_CQ] = ib_uverbs_ex_create_cq,
|
||||
};
|
||||
|
||||
static void ib_uverbs_add_one(struct ib_device *device);
|
||||
|
@@ -48,6 +48,71 @@
|
||||
|
||||
#include "core_priv.h"
|
||||
|
||||
static const char * const ib_events[] = {
|
||||
[IB_EVENT_CQ_ERR] = "CQ error",
|
||||
[IB_EVENT_QP_FATAL] = "QP fatal error",
|
||||
[IB_EVENT_QP_REQ_ERR] = "QP request error",
|
||||
[IB_EVENT_QP_ACCESS_ERR] = "QP access error",
|
||||
[IB_EVENT_COMM_EST] = "communication established",
|
||||
[IB_EVENT_SQ_DRAINED] = "send queue drained",
|
||||
[IB_EVENT_PATH_MIG] = "path migration successful",
|
||||
[IB_EVENT_PATH_MIG_ERR] = "path migration error",
|
||||
[IB_EVENT_DEVICE_FATAL] = "device fatal error",
|
||||
[IB_EVENT_PORT_ACTIVE] = "port active",
|
||||
[IB_EVENT_PORT_ERR] = "port error",
|
||||
[IB_EVENT_LID_CHANGE] = "LID change",
|
||||
[IB_EVENT_PKEY_CHANGE] = "P_key change",
|
||||
[IB_EVENT_SM_CHANGE] = "SM change",
|
||||
[IB_EVENT_SRQ_ERR] = "SRQ error",
|
||||
[IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
|
||||
[IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
|
||||
[IB_EVENT_CLIENT_REREGISTER] = "client reregister",
|
||||
[IB_EVENT_GID_CHANGE] = "GID changed",
|
||||
};
|
||||
|
||||
const char *ib_event_msg(enum ib_event_type event)
|
||||
{
|
||||
size_t index = event;
|
||||
|
||||
return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
|
||||
ib_events[index] : "unrecognized event";
|
||||
}
|
||||
EXPORT_SYMBOL(ib_event_msg);
|
||||
|
||||
static const char * const wc_statuses[] = {
|
||||
[IB_WC_SUCCESS] = "success",
|
||||
[IB_WC_LOC_LEN_ERR] = "local length error",
|
||||
[IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
|
||||
[IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
|
||||
[IB_WC_LOC_PROT_ERR] = "local protection error",
|
||||
[IB_WC_WR_FLUSH_ERR] = "WR flushed",
|
||||
[IB_WC_MW_BIND_ERR] = "memory management operation error",
|
||||
[IB_WC_BAD_RESP_ERR] = "bad response error",
|
||||
[IB_WC_LOC_ACCESS_ERR] = "local access error",
|
||||
[IB_WC_REM_INV_REQ_ERR] = "invalid request error",
|
||||
[IB_WC_REM_ACCESS_ERR] = "remote access error",
|
||||
[IB_WC_REM_OP_ERR] = "remote operation error",
|
||||
[IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
|
||||
[IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
|
||||
[IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
|
||||
[IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
|
||||
[IB_WC_REM_ABORT_ERR] = "operation aborted",
|
||||
[IB_WC_INV_EECN_ERR] = "invalid EE context number",
|
||||
[IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
|
||||
[IB_WC_FATAL_ERR] = "fatal error",
|
||||
[IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
|
||||
[IB_WC_GENERAL_ERR] = "general error",
|
||||
};
|
||||
|
||||
const char *ib_wc_status_msg(enum ib_wc_status status)
|
||||
{
|
||||
size_t index = status;
|
||||
|
||||
return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
|
||||
wc_statuses[index] : "unrecognized status";
|
||||
}
|
||||
EXPORT_SYMBOL(ib_wc_status_msg);
|
||||
|
||||
__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
|
||||
{
|
||||
switch (rate) {
|
||||
@@ -192,17 +257,16 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
|
||||
}
|
||||
EXPORT_SYMBOL(ib_create_ah);
|
||||
|
||||
int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
|
||||
struct ib_grh *grh, struct ib_ah_attr *ah_attr)
|
||||
int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
|
||||
const struct ib_wc *wc, const struct ib_grh *grh,
|
||||
struct ib_ah_attr *ah_attr)
|
||||
{
|
||||
u32 flow_class;
|
||||
u16 gid_index;
|
||||
int ret;
|
||||
int is_eth = (rdma_port_get_link_layer(device, port_num) ==
|
||||
IB_LINK_LAYER_ETHERNET);
|
||||
|
||||
memset(ah_attr, 0, sizeof *ah_attr);
|
||||
if (is_eth) {
|
||||
if (rdma_cap_eth_ah(device, port_num)) {
|
||||
if (!(wc->wc_flags & IB_WC_GRH))
|
||||
return -EPROTOTYPE;
|
||||
|
||||
@@ -244,8 +308,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
|
||||
}
|
||||
EXPORT_SYMBOL(ib_init_ah_from_wc);
|
||||
|
||||
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
|
||||
struct ib_grh *grh, u8 port_num)
|
||||
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
|
||||
const struct ib_grh *grh, u8 port_num)
|
||||
{
|
||||
struct ib_ah_attr ah_attr;
|
||||
int ret;
|
||||
@@ -871,7 +935,7 @@ int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
|
||||
union ib_gid sgid;
|
||||
|
||||
if ((*qp_attr_mask & IB_QP_AV) &&
|
||||
(rdma_port_get_link_layer(qp->device, qp_attr->ah_attr.port_num) == IB_LINK_LAYER_ETHERNET)) {
|
||||
(rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))) {
|
||||
ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num,
|
||||
qp_attr->ah_attr.grh.sgid_index, &sgid);
|
||||
if (ret)
|
||||
@@ -1012,11 +1076,12 @@ EXPORT_SYMBOL(ib_destroy_qp);
|
||||
struct ib_cq *ib_create_cq(struct ib_device *device,
|
||||
ib_comp_handler comp_handler,
|
||||
void (*event_handler)(struct ib_event *, void *),
|
||||
void *cq_context, int cqe, int comp_vector)
|
||||
void *cq_context,
|
||||
const struct ib_cq_init_attr *cq_attr)
|
||||
{
|
||||
struct ib_cq *cq;
|
||||
|
||||
cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
|
||||
cq = device->create_cq(device, cq_attr, NULL, NULL);
|
||||
|
||||
if (!IS_ERR(cq)) {
|
||||
cq->device = device;
|
||||
|
Reference in New Issue
Block a user