Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband/rdma updates from Roland Dreier: - Re-enable flow steering verbs with new improved userspace ABI - Fixes for slow connection due to GID lookup scalability - IPoIB fixes - Many fixes to HW drivers including mlx4, mlx5, ocrdma and qib - Further improvements to SRP error handling - Add new transport type for Cisco usNIC * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (66 commits) IB/core: Re-enable create_flow/destroy_flow uverbs IB/core: extended command: an improved infrastructure for uverbs commands IB/core: Remove ib_uverbs_flow_spec structure from userspace IB/core: Use a common header for uverbs flow_specs IB/core: Make uverbs flow structure use names like verbs ones IB/core: Rename 'flow' structs to match other uverbs structs IB/core: clarify overflow/underflow checks on ib_create/destroy_flow IB/ucma: Convert use of typedef ctl_table to struct ctl_table IB/cm: Convert to using idr_alloc_cyclic() IB/mlx5: Fix page shift in create CQ for userspace IB/mlx4: Fix device max capabilities check IB/mlx5: Fix list_del of empty list IB/mlx5: Remove dead code IB/core: Encorce MR access rights rules on kernel consumers IB/mlx4: Fix endless loop in resize CQ RDMA/cma: Remove unused argument and minor dead code RDMA/ucma: Discard events for IDs not yet claimed by user space IB/core: Add Cisco usNIC rdma node and transport types RDMA/nes: Remove self-assignment from nes_query_qp() IB/srp: Report receive errors correctly ...
This commit is contained in:
@@ -101,6 +101,7 @@ enum {
|
||||
IPOIB_MCAST_FLAG_SENDONLY = 1,
|
||||
IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
|
||||
IPOIB_MCAST_FLAG_ATTACHED = 3,
|
||||
IPOIB_MCAST_JOIN_STARTED = 4,
|
||||
|
||||
MAX_SEND_CQE = 16,
|
||||
IPOIB_CM_COPYBREAK = 256,
|
||||
@@ -151,6 +152,7 @@ struct ipoib_mcast {
|
||||
struct sk_buff_head pkt_queue;
|
||||
|
||||
struct net_device *dev;
|
||||
struct completion done;
|
||||
};
|
||||
|
||||
struct ipoib_rx_buf {
|
||||
@@ -299,7 +301,7 @@ struct ipoib_dev_priv {
|
||||
|
||||
unsigned long flags;
|
||||
|
||||
struct mutex vlan_mutex;
|
||||
struct rw_semaphore vlan_rwsem;
|
||||
|
||||
struct rb_root path_tree;
|
||||
struct list_head path_list;
|
||||
|
@@ -140,7 +140,8 @@ static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
|
||||
static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
|
||||
struct ipoib_cm_rx_buf *rx_ring,
|
||||
int id, int frags,
|
||||
u64 mapping[IPOIB_CM_RX_SG])
|
||||
u64 mapping[IPOIB_CM_RX_SG],
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct sk_buff *skb;
|
||||
@@ -164,7 +165,7 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
|
||||
}
|
||||
|
||||
for (i = 0; i < frags; i++) {
|
||||
struct page *page = alloc_page(GFP_ATOMIC);
|
||||
struct page *page = alloc_page(gfp);
|
||||
|
||||
if (!page)
|
||||
goto partial_error;
|
||||
@@ -382,7 +383,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
|
||||
|
||||
for (i = 0; i < ipoib_recvq_size; ++i) {
|
||||
if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
|
||||
rx->rx_ring[i].mapping)) {
|
||||
rx->rx_ring[i].mapping,
|
||||
GFP_KERNEL)) {
|
||||
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
|
||||
ret = -ENOMEM;
|
||||
goto err_count;
|
||||
@@ -639,7 +641,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
|
||||
frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
|
||||
(unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
|
||||
|
||||
newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
|
||||
newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
|
||||
mapping, GFP_ATOMIC);
|
||||
if (unlikely(!newskb)) {
|
||||
/*
|
||||
* If we can't allocate a new RX buffer, dump
|
||||
@@ -1556,7 +1559,8 @@ int ipoib_cm_dev_init(struct net_device *dev)
|
||||
for (i = 0; i < ipoib_recvq_size; ++i) {
|
||||
if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
|
||||
priv->cm.num_frags - 1,
|
||||
priv->cm.srq_ring[i].mapping)) {
|
||||
priv->cm.srq_ring[i].mapping,
|
||||
GFP_KERNEL)) {
|
||||
ipoib_warn(priv, "failed to allocate "
|
||||
"receive buffer %d\n", i);
|
||||
ipoib_cm_dev_cleanup(dev);
|
||||
|
@@ -685,15 +685,13 @@ int ipoib_ib_dev_open(struct net_device *dev)
|
||||
ret = ipoib_ib_post_receives(dev);
|
||||
if (ret) {
|
||||
ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
|
||||
ipoib_ib_dev_stop(dev, 1);
|
||||
return -1;
|
||||
goto dev_stop;
|
||||
}
|
||||
|
||||
ret = ipoib_cm_dev_open(dev);
|
||||
if (ret) {
|
||||
ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
|
||||
ipoib_ib_dev_stop(dev, 1);
|
||||
return -1;
|
||||
goto dev_stop;
|
||||
}
|
||||
|
||||
clear_bit(IPOIB_STOP_REAPER, &priv->flags);
|
||||
@@ -704,6 +702,11 @@ int ipoib_ib_dev_open(struct net_device *dev)
|
||||
napi_enable(&priv->napi);
|
||||
|
||||
return 0;
|
||||
dev_stop:
|
||||
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
|
||||
napi_enable(&priv->napi);
|
||||
ipoib_ib_dev_stop(dev, 1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void ipoib_pkey_dev_check_presence(struct net_device *dev)
|
||||
@@ -746,10 +749,8 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush)
|
||||
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
|
||||
mutex_lock(&pkey_mutex);
|
||||
set_bit(IPOIB_PKEY_STOP, &priv->flags);
|
||||
cancel_delayed_work(&priv->pkey_poll_task);
|
||||
cancel_delayed_work_sync(&priv->pkey_poll_task);
|
||||
mutex_unlock(&pkey_mutex);
|
||||
if (flush)
|
||||
flush_workqueue(ipoib_workqueue);
|
||||
}
|
||||
|
||||
ipoib_mcast_stop_thread(dev, flush);
|
||||
@@ -974,7 +975,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
|
||||
u16 new_index;
|
||||
int result;
|
||||
|
||||
mutex_lock(&priv->vlan_mutex);
|
||||
down_read(&priv->vlan_rwsem);
|
||||
|
||||
/*
|
||||
* Flush any child interfaces too -- they might be up even if
|
||||
@@ -983,7 +984,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
|
||||
list_for_each_entry(cpriv, &priv->child_intfs, list)
|
||||
__ipoib_ib_dev_flush(cpriv, level);
|
||||
|
||||
mutex_unlock(&priv->vlan_mutex);
|
||||
up_read(&priv->vlan_rwsem);
|
||||
|
||||
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
|
||||
/* for non-child devices must check/update the pkey value here */
|
||||
@@ -1081,6 +1082,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
|
||||
ipoib_dbg(priv, "cleaning up ib_dev\n");
|
||||
/*
|
||||
* We must make sure there are no more (path) completions
|
||||
* that may wish to touch priv fields that are no longer valid
|
||||
*/
|
||||
ipoib_flush_paths(dev);
|
||||
|
||||
ipoib_mcast_stop_thread(dev, 1);
|
||||
ipoib_mcast_dev_flush(dev);
|
||||
|
@@ -119,7 +119,7 @@ int ipoib_open(struct net_device *dev)
|
||||
struct ipoib_dev_priv *cpriv;
|
||||
|
||||
/* Bring up any child interfaces too */
|
||||
mutex_lock(&priv->vlan_mutex);
|
||||
down_read(&priv->vlan_rwsem);
|
||||
list_for_each_entry(cpriv, &priv->child_intfs, list) {
|
||||
int flags;
|
||||
|
||||
@@ -129,7 +129,7 @@ int ipoib_open(struct net_device *dev)
|
||||
|
||||
dev_change_flags(cpriv->dev, flags | IFF_UP);
|
||||
}
|
||||
mutex_unlock(&priv->vlan_mutex);
|
||||
up_read(&priv->vlan_rwsem);
|
||||
}
|
||||
|
||||
netif_start_queue(dev);
|
||||
@@ -162,7 +162,7 @@ static int ipoib_stop(struct net_device *dev)
|
||||
struct ipoib_dev_priv *cpriv;
|
||||
|
||||
/* Bring down any child interfaces too */
|
||||
mutex_lock(&priv->vlan_mutex);
|
||||
down_read(&priv->vlan_rwsem);
|
||||
list_for_each_entry(cpriv, &priv->child_intfs, list) {
|
||||
int flags;
|
||||
|
||||
@@ -172,7 +172,7 @@ static int ipoib_stop(struct net_device *dev)
|
||||
|
||||
dev_change_flags(cpriv->dev, flags & ~IFF_UP);
|
||||
}
|
||||
mutex_unlock(&priv->vlan_mutex);
|
||||
up_read(&priv->vlan_rwsem);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1350,7 +1350,7 @@ void ipoib_setup(struct net_device *dev)
|
||||
|
||||
ipoib_set_ethtool_ops(dev);
|
||||
|
||||
netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
|
||||
netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT);
|
||||
|
||||
dev->watchdog_timeo = HZ;
|
||||
|
||||
@@ -1372,7 +1372,7 @@ void ipoib_setup(struct net_device *dev)
|
||||
|
||||
spin_lock_init(&priv->lock);
|
||||
|
||||
mutex_init(&priv->vlan_mutex);
|
||||
init_rwsem(&priv->vlan_rwsem);
|
||||
|
||||
INIT_LIST_HEAD(&priv->path_list);
|
||||
INIT_LIST_HEAD(&priv->child_intfs);
|
||||
|
@@ -386,8 +386,10 @@ static int ipoib_mcast_join_complete(int status,
|
||||
mcast->mcmember.mgid.raw, status);
|
||||
|
||||
/* We trap for port events ourselves. */
|
||||
if (status == -ENETRESET)
|
||||
return 0;
|
||||
if (status == -ENETRESET) {
|
||||
status = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!status)
|
||||
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
|
||||
@@ -407,7 +409,8 @@ static int ipoib_mcast_join_complete(int status,
|
||||
if (mcast == priv->broadcast)
|
||||
queue_work(ipoib_workqueue, &priv->carrier_on_task);
|
||||
|
||||
return 0;
|
||||
status = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mcast->logcount++ < 20) {
|
||||
@@ -434,7 +437,8 @@ static int ipoib_mcast_join_complete(int status,
|
||||
mcast->backoff * HZ);
|
||||
spin_unlock_irq(&priv->lock);
|
||||
mutex_unlock(&mcast_mutex);
|
||||
|
||||
out:
|
||||
complete(&mcast->done);
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -484,11 +488,15 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
|
||||
}
|
||||
|
||||
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
|
||||
init_completion(&mcast->done);
|
||||
set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
|
||||
|
||||
mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
|
||||
&rec, comp_mask, GFP_KERNEL,
|
||||
ipoib_mcast_join_complete, mcast);
|
||||
if (IS_ERR(mcast->mc)) {
|
||||
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
|
||||
complete(&mcast->done);
|
||||
ret = PTR_ERR(mcast->mc);
|
||||
ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
|
||||
|
||||
@@ -510,10 +518,18 @@ void ipoib_mcast_join_task(struct work_struct *work)
|
||||
struct ipoib_dev_priv *priv =
|
||||
container_of(work, struct ipoib_dev_priv, mcast_task.work);
|
||||
struct net_device *dev = priv->dev;
|
||||
struct ib_port_attr port_attr;
|
||||
|
||||
if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
|
||||
return;
|
||||
|
||||
if (ib_query_port(priv->ca, priv->port, &port_attr) ||
|
||||
port_attr.state != IB_PORT_ACTIVE) {
|
||||
ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
|
||||
port_attr.state);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
|
||||
ipoib_warn(priv, "ib_query_gid() failed\n");
|
||||
else
|
||||
@@ -751,6 +767,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
/* seperate between the wait to the leave*/
|
||||
list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
|
||||
if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
|
||||
wait_for_completion(&mcast->done);
|
||||
|
||||
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
|
||||
ipoib_mcast_leave(dev, mcast);
|
||||
ipoib_mcast_free(mcast);
|
||||
|
@@ -142,10 +142,10 @@ static void ipoib_unregister_child_dev(struct net_device *dev, struct list_head
|
||||
priv = netdev_priv(dev);
|
||||
ppriv = netdev_priv(priv->parent);
|
||||
|
||||
mutex_lock(&ppriv->vlan_mutex);
|
||||
down_write(&ppriv->vlan_rwsem);
|
||||
unregister_netdevice_queue(dev, head);
|
||||
list_del(&priv->list);
|
||||
mutex_unlock(&ppriv->vlan_mutex);
|
||||
up_write(&ppriv->vlan_rwsem);
|
||||
}
|
||||
|
||||
static size_t ipoib_get_size(const struct net_device *dev)
|
||||
|
@@ -140,7 +140,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
|
||||
if (!rtnl_trylock())
|
||||
return restart_syscall();
|
||||
|
||||
mutex_lock(&ppriv->vlan_mutex);
|
||||
down_write(&ppriv->vlan_rwsem);
|
||||
|
||||
/*
|
||||
* First ensure this isn't a duplicate. We check the parent device and
|
||||
@@ -163,7 +163,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
|
||||
result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
|
||||
|
||||
out:
|
||||
mutex_unlock(&ppriv->vlan_mutex);
|
||||
up_write(&ppriv->vlan_rwsem);
|
||||
|
||||
if (result)
|
||||
free_netdev(priv->dev);
|
||||
@@ -185,7 +185,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
|
||||
|
||||
if (!rtnl_trylock())
|
||||
return restart_syscall();
|
||||
mutex_lock(&ppriv->vlan_mutex);
|
||||
|
||||
down_write(&ppriv->vlan_rwsem);
|
||||
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
|
||||
if (priv->pkey == pkey &&
|
||||
priv->child_type == IPOIB_LEGACY_CHILD) {
|
||||
@@ -195,7 +196,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ppriv->vlan_mutex);
|
||||
up_write(&ppriv->vlan_rwsem);
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
if (dev) {
|
||||
|
@@ -46,6 +46,7 @@
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_dbg.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include <scsi/srp.h>
|
||||
#include <scsi/scsi_transport_srp.h>
|
||||
|
||||
@@ -86,6 +87,32 @@ module_param(topspin_workarounds, int, 0444);
|
||||
MODULE_PARM_DESC(topspin_workarounds,
|
||||
"Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
|
||||
|
||||
static struct kernel_param_ops srp_tmo_ops;
|
||||
|
||||
static int srp_reconnect_delay = 10;
|
||||
module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
|
||||
S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
|
||||
|
||||
static int srp_fast_io_fail_tmo = 15;
|
||||
module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
|
||||
S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(fast_io_fail_tmo,
|
||||
"Number of seconds between the observation of a transport"
|
||||
" layer error and failing all I/O. \"off\" means that this"
|
||||
" functionality is disabled.");
|
||||
|
||||
static int srp_dev_loss_tmo = 600;
|
||||
module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
|
||||
S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(dev_loss_tmo,
|
||||
"Maximum number of seconds that the SRP transport should"
|
||||
" insulate transport layer errors. After this time has been"
|
||||
" exceeded the SCSI host is removed. Should be"
|
||||
" between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
|
||||
" if fast_io_fail_tmo has not been set. \"off\" means that"
|
||||
" this functionality is disabled.");
|
||||
|
||||
static void srp_add_one(struct ib_device *device);
|
||||
static void srp_remove_one(struct ib_device *device);
|
||||
static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
|
||||
@@ -102,6 +129,48 @@ static struct ib_client srp_client = {
|
||||
|
||||
static struct ib_sa_client srp_sa_client;
|
||||
|
||||
static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
|
||||
{
|
||||
int tmo = *(int *)kp->arg;
|
||||
|
||||
if (tmo >= 0)
|
||||
return sprintf(buffer, "%d", tmo);
|
||||
else
|
||||
return sprintf(buffer, "off");
|
||||
}
|
||||
|
||||
static int srp_tmo_set(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
int tmo, res;
|
||||
|
||||
if (strncmp(val, "off", 3) != 0) {
|
||||
res = kstrtoint(val, 0, &tmo);
|
||||
if (res)
|
||||
goto out;
|
||||
} else {
|
||||
tmo = -1;
|
||||
}
|
||||
if (kp->arg == &srp_reconnect_delay)
|
||||
res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
|
||||
srp_dev_loss_tmo);
|
||||
else if (kp->arg == &srp_fast_io_fail_tmo)
|
||||
res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
|
||||
else
|
||||
res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
|
||||
tmo);
|
||||
if (res)
|
||||
goto out;
|
||||
*(int *)kp->arg = tmo;
|
||||
|
||||
out:
|
||||
return res;
|
||||
}
|
||||
|
||||
static struct kernel_param_ops srp_tmo_ops = {
|
||||
.get = srp_tmo_get,
|
||||
.set = srp_tmo_set,
|
||||
};
|
||||
|
||||
static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
|
||||
{
|
||||
return (struct srp_target_port *) host->hostdata;
|
||||
@@ -231,16 +300,16 @@ static int srp_create_target_ib(struct srp_target_port *target)
|
||||
return -ENOMEM;
|
||||
|
||||
recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
|
||||
srp_recv_completion, NULL, target, SRP_RQ_SIZE,
|
||||
target->comp_vector);
|
||||
srp_recv_completion, NULL, target,
|
||||
target->queue_size, target->comp_vector);
|
||||
if (IS_ERR(recv_cq)) {
|
||||
ret = PTR_ERR(recv_cq);
|
||||
goto err;
|
||||
}
|
||||
|
||||
send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
|
||||
srp_send_completion, NULL, target, SRP_SQ_SIZE,
|
||||
target->comp_vector);
|
||||
srp_send_completion, NULL, target,
|
||||
target->queue_size, target->comp_vector);
|
||||
if (IS_ERR(send_cq)) {
|
||||
ret = PTR_ERR(send_cq);
|
||||
goto err_recv_cq;
|
||||
@@ -249,8 +318,8 @@ static int srp_create_target_ib(struct srp_target_port *target)
|
||||
ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
|
||||
|
||||
init_attr->event_handler = srp_qp_event;
|
||||
init_attr->cap.max_send_wr = SRP_SQ_SIZE;
|
||||
init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
|
||||
init_attr->cap.max_send_wr = target->queue_size;
|
||||
init_attr->cap.max_recv_wr = target->queue_size;
|
||||
init_attr->cap.max_recv_sge = 1;
|
||||
init_attr->cap.max_send_sge = 1;
|
||||
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
|
||||
@@ -296,6 +365,10 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: this function may be called without srp_alloc_iu_bufs() having been
|
||||
* invoked. Hence the target->[rt]x_ring checks.
|
||||
*/
|
||||
static void srp_free_target_ib(struct srp_target_port *target)
|
||||
{
|
||||
int i;
|
||||
@@ -307,10 +380,18 @@ static void srp_free_target_ib(struct srp_target_port *target)
|
||||
target->qp = NULL;
|
||||
target->send_cq = target->recv_cq = NULL;
|
||||
|
||||
for (i = 0; i < SRP_RQ_SIZE; ++i)
|
||||
srp_free_iu(target->srp_host, target->rx_ring[i]);
|
||||
for (i = 0; i < SRP_SQ_SIZE; ++i)
|
||||
srp_free_iu(target->srp_host, target->tx_ring[i]);
|
||||
if (target->rx_ring) {
|
||||
for (i = 0; i < target->queue_size; ++i)
|
||||
srp_free_iu(target->srp_host, target->rx_ring[i]);
|
||||
kfree(target->rx_ring);
|
||||
target->rx_ring = NULL;
|
||||
}
|
||||
if (target->tx_ring) {
|
||||
for (i = 0; i < target->queue_size; ++i)
|
||||
srp_free_iu(target->srp_host, target->tx_ring[i]);
|
||||
kfree(target->tx_ring);
|
||||
target->tx_ring = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void srp_path_rec_completion(int status,
|
||||
@@ -390,7 +471,7 @@ static int srp_send_req(struct srp_target_port *target)
|
||||
req->param.responder_resources = 4;
|
||||
req->param.remote_cm_response_timeout = 20;
|
||||
req->param.local_cm_response_timeout = 20;
|
||||
req->param.retry_count = 7;
|
||||
req->param.retry_count = target->tl_retry_count;
|
||||
req->param.rnr_retry_count = 7;
|
||||
req->param.max_cm_retries = 15;
|
||||
|
||||
@@ -496,7 +577,11 @@ static void srp_free_req_data(struct srp_target_port *target)
|
||||
struct srp_request *req;
|
||||
int i;
|
||||
|
||||
for (i = 0, req = target->req_ring; i < SRP_CMD_SQ_SIZE; ++i, ++req) {
|
||||
if (!target->req_ring)
|
||||
return;
|
||||
|
||||
for (i = 0; i < target->req_ring_size; ++i) {
|
||||
req = &target->req_ring[i];
|
||||
kfree(req->fmr_list);
|
||||
kfree(req->map_page);
|
||||
if (req->indirect_dma_addr) {
|
||||
@@ -506,6 +591,50 @@ static void srp_free_req_data(struct srp_target_port *target)
|
||||
}
|
||||
kfree(req->indirect_desc);
|
||||
}
|
||||
|
||||
kfree(target->req_ring);
|
||||
target->req_ring = NULL;
|
||||
}
|
||||
|
||||
static int srp_alloc_req_data(struct srp_target_port *target)
|
||||
{
|
||||
struct srp_device *srp_dev = target->srp_host->srp_dev;
|
||||
struct ib_device *ibdev = srp_dev->dev;
|
||||
struct srp_request *req;
|
||||
dma_addr_t dma_addr;
|
||||
int i, ret = -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&target->free_reqs);
|
||||
|
||||
target->req_ring = kzalloc(target->req_ring_size *
|
||||
sizeof(*target->req_ring), GFP_KERNEL);
|
||||
if (!target->req_ring)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < target->req_ring_size; ++i) {
|
||||
req = &target->req_ring[i];
|
||||
req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
req->map_page = kmalloc(SRP_FMR_SIZE * sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
|
||||
if (!req->fmr_list || !req->map_page || !req->indirect_desc)
|
||||
goto out;
|
||||
|
||||
dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
|
||||
target->indirect_size,
|
||||
DMA_TO_DEVICE);
|
||||
if (ib_dma_mapping_error(ibdev, dma_addr))
|
||||
goto out;
|
||||
|
||||
req->indirect_dma_addr = dma_addr;
|
||||
req->index = i;
|
||||
list_add_tail(&req->list, &target->free_reqs);
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -528,12 +657,20 @@ static void srp_remove_target(struct srp_target_port *target)
|
||||
WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
|
||||
|
||||
srp_del_scsi_host_attr(target->scsi_host);
|
||||
srp_rport_get(target->rport);
|
||||
srp_remove_host(target->scsi_host);
|
||||
scsi_remove_host(target->scsi_host);
|
||||
srp_disconnect_target(target);
|
||||
ib_destroy_cm_id(target->cm_id);
|
||||
srp_free_target_ib(target);
|
||||
cancel_work_sync(&target->tl_err_work);
|
||||
srp_rport_put(target->rport);
|
||||
srp_free_req_data(target);
|
||||
|
||||
spin_lock(&target->srp_host->target_lock);
|
||||
list_del(&target->list);
|
||||
spin_unlock(&target->srp_host->target_lock);
|
||||
|
||||
scsi_host_put(target->scsi_host);
|
||||
}
|
||||
|
||||
@@ -545,10 +682,6 @@ static void srp_remove_work(struct work_struct *work)
|
||||
WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
|
||||
|
||||
srp_remove_target(target);
|
||||
|
||||
spin_lock(&target->srp_host->target_lock);
|
||||
list_del(&target->list);
|
||||
spin_unlock(&target->srp_host->target_lock);
|
||||
}
|
||||
|
||||
static void srp_rport_delete(struct srp_rport *rport)
|
||||
@@ -686,23 +819,42 @@ static void srp_free_req(struct srp_target_port *target,
|
||||
spin_unlock_irqrestore(&target->lock, flags);
|
||||
}
|
||||
|
||||
static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
|
||||
static void srp_finish_req(struct srp_target_port *target,
|
||||
struct srp_request *req, int result)
|
||||
{
|
||||
struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
|
||||
|
||||
if (scmnd) {
|
||||
srp_free_req(target, req, scmnd, 0);
|
||||
scmnd->result = DID_RESET << 16;
|
||||
scmnd->result = result;
|
||||
scmnd->scsi_done(scmnd);
|
||||
}
|
||||
}
|
||||
|
||||
static int srp_reconnect_target(struct srp_target_port *target)
|
||||
static void srp_terminate_io(struct srp_rport *rport)
|
||||
{
|
||||
struct Scsi_Host *shost = target->scsi_host;
|
||||
int i, ret;
|
||||
struct srp_target_port *target = rport->lld_data;
|
||||
int i;
|
||||
|
||||
scsi_target_block(&shost->shost_gendev);
|
||||
for (i = 0; i < target->req_ring_size; ++i) {
|
||||
struct srp_request *req = &target->req_ring[i];
|
||||
srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* It is up to the caller to ensure that srp_rport_reconnect() calls are
|
||||
* serialized and that no concurrent srp_queuecommand(), srp_abort(),
|
||||
* srp_reset_device() or srp_reset_host() calls will occur while this function
|
||||
* is in progress. One way to realize that is not to call this function
|
||||
* directly but to call srp_reconnect_rport() instead since that last function
|
||||
* serializes calls of this function via rport->mutex and also blocks
|
||||
* srp_queuecommand() calls before invoking this function.
|
||||
*/
|
||||
static int srp_rport_reconnect(struct srp_rport *rport)
|
||||
{
|
||||
struct srp_target_port *target = rport->lld_data;
|
||||
int i, ret;
|
||||
|
||||
srp_disconnect_target(target);
|
||||
/*
|
||||
@@ -721,41 +873,21 @@ static int srp_reconnect_target(struct srp_target_port *target)
|
||||
else
|
||||
srp_create_target_ib(target);
|
||||
|
||||
for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
|
||||
for (i = 0; i < target->req_ring_size; ++i) {
|
||||
struct srp_request *req = &target->req_ring[i];
|
||||
if (req->scmnd)
|
||||
srp_reset_req(target, req);
|
||||
srp_finish_req(target, req, DID_RESET << 16);
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&target->free_tx);
|
||||
for (i = 0; i < SRP_SQ_SIZE; ++i)
|
||||
for (i = 0; i < target->queue_size; ++i)
|
||||
list_add(&target->tx_ring[i]->list, &target->free_tx);
|
||||
|
||||
if (ret == 0)
|
||||
ret = srp_connect_target(target);
|
||||
|
||||
scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING :
|
||||
SDEV_TRANSPORT_OFFLINE);
|
||||
target->transport_offline = !!ret;
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
shost_printk(KERN_INFO, target->scsi_host, PFX "reconnect succeeded\n");
|
||||
|
||||
return ret;
|
||||
|
||||
err:
|
||||
shost_printk(KERN_ERR, target->scsi_host,
|
||||
PFX "reconnect failed (%d), removing target port.\n", ret);
|
||||
|
||||
/*
|
||||
* We couldn't reconnect, so kill our target port off.
|
||||
* However, we have to defer the real removal because we
|
||||
* are in the context of the SCSI error handler now, which
|
||||
* will deadlock if we call scsi_remove_host().
|
||||
*/
|
||||
srp_queue_remove_work(target);
|
||||
if (ret == 0)
|
||||
shost_printk(KERN_INFO, target->scsi_host,
|
||||
PFX "reconnect succeeded\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1302,15 +1434,30 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
|
||||
PFX "Recv failed with error code %d\n", res);
|
||||
}
|
||||
|
||||
static void srp_handle_qp_err(enum ib_wc_status wc_status,
|
||||
enum ib_wc_opcode wc_opcode,
|
||||
/**
|
||||
* srp_tl_err_work() - handle a transport layer error
|
||||
*
|
||||
* Note: This function may get invoked before the rport has been created,
|
||||
* hence the target->rport test.
|
||||
*/
|
||||
static void srp_tl_err_work(struct work_struct *work)
|
||||
{
|
||||
struct srp_target_port *target;
|
||||
|
||||
target = container_of(work, struct srp_target_port, tl_err_work);
|
||||
if (target->rport)
|
||||
srp_start_tl_fail_timers(target->rport);
|
||||
}
|
||||
|
||||
static void srp_handle_qp_err(enum ib_wc_status wc_status, bool send_err,
|
||||
struct srp_target_port *target)
|
||||
{
|
||||
if (target->connected && !target->qp_in_error) {
|
||||
shost_printk(KERN_ERR, target->scsi_host,
|
||||
PFX "failed %s status %d\n",
|
||||
wc_opcode & IB_WC_RECV ? "receive" : "send",
|
||||
send_err ? "send" : "receive",
|
||||
wc_status);
|
||||
queue_work(system_long_wq, &target->tl_err_work);
|
||||
}
|
||||
target->qp_in_error = true;
|
||||
}
|
||||
@@ -1325,7 +1472,7 @@ static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
|
||||
if (likely(wc.status == IB_WC_SUCCESS)) {
|
||||
srp_handle_recv(target, &wc);
|
||||
} else {
|
||||
srp_handle_qp_err(wc.status, wc.opcode, target);
|
||||
srp_handle_qp_err(wc.status, false, target);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1341,7 +1488,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
|
||||
iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
|
||||
list_add(&iu->list, &target->free_tx);
|
||||
} else {
|
||||
srp_handle_qp_err(wc.status, wc.opcode, target);
|
||||
srp_handle_qp_err(wc.status, true, target);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1349,17 +1496,29 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
|
||||
static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
|
||||
{
|
||||
struct srp_target_port *target = host_to_target(shost);
|
||||
struct srp_rport *rport = target->rport;
|
||||
struct srp_request *req;
|
||||
struct srp_iu *iu;
|
||||
struct srp_cmd *cmd;
|
||||
struct ib_device *dev;
|
||||
unsigned long flags;
|
||||
int len;
|
||||
int len, result;
|
||||
const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
|
||||
|
||||
if (unlikely(target->transport_offline)) {
|
||||
scmnd->result = DID_NO_CONNECT << 16;
|
||||
/*
|
||||
* The SCSI EH thread is the only context from which srp_queuecommand()
|
||||
* can get invoked for blocked devices (SDEV_BLOCK /
|
||||
* SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
|
||||
* locking the rport mutex if invoked from inside the SCSI EH.
|
||||
*/
|
||||
if (in_scsi_eh)
|
||||
mutex_lock(&rport->mutex);
|
||||
|
||||
result = srp_chkready(target->rport);
|
||||
if (unlikely(result)) {
|
||||
scmnd->result = result;
|
||||
scmnd->scsi_done(scmnd);
|
||||
return 0;
|
||||
goto unlock_rport;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&target->lock, flags);
|
||||
@@ -1404,6 +1563,10 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
|
||||
goto err_unmap;
|
||||
}
|
||||
|
||||
unlock_rport:
|
||||
if (in_scsi_eh)
|
||||
mutex_unlock(&rport->mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unmap:
|
||||
@@ -1418,14 +1581,30 @@ err_iu:
|
||||
err_unlock:
|
||||
spin_unlock_irqrestore(&target->lock, flags);
|
||||
|
||||
if (in_scsi_eh)
|
||||
mutex_unlock(&rport->mutex);
|
||||
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: the resources allocated in this function are freed in
|
||||
* srp_free_target_ib().
|
||||
*/
|
||||
static int srp_alloc_iu_bufs(struct srp_target_port *target)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < SRP_RQ_SIZE; ++i) {
|
||||
target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring),
|
||||
GFP_KERNEL);
|
||||
if (!target->rx_ring)
|
||||
goto err_no_ring;
|
||||
target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring),
|
||||
GFP_KERNEL);
|
||||
if (!target->tx_ring)
|
||||
goto err_no_ring;
|
||||
|
||||
for (i = 0; i < target->queue_size; ++i) {
|
||||
target->rx_ring[i] = srp_alloc_iu(target->srp_host,
|
||||
target->max_ti_iu_len,
|
||||
GFP_KERNEL, DMA_FROM_DEVICE);
|
||||
@@ -1433,7 +1612,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (i = 0; i < SRP_SQ_SIZE; ++i) {
|
||||
for (i = 0; i < target->queue_size; ++i) {
|
||||
target->tx_ring[i] = srp_alloc_iu(target->srp_host,
|
||||
target->max_iu_len,
|
||||
GFP_KERNEL, DMA_TO_DEVICE);
|
||||
@@ -1446,15 +1625,17 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
|
||||
return 0;
|
||||
|
||||
err:
|
||||
for (i = 0; i < SRP_RQ_SIZE; ++i) {
|
||||
for (i = 0; i < target->queue_size; ++i) {
|
||||
srp_free_iu(target->srp_host, target->rx_ring[i]);
|
||||
target->rx_ring[i] = NULL;
|
||||
srp_free_iu(target->srp_host, target->tx_ring[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < SRP_SQ_SIZE; ++i) {
|
||||
srp_free_iu(target->srp_host, target->tx_ring[i]);
|
||||
target->tx_ring[i] = NULL;
|
||||
}
|
||||
|
||||
err_no_ring:
|
||||
kfree(target->tx_ring);
|
||||
target->tx_ring = NULL;
|
||||
kfree(target->rx_ring);
|
||||
target->rx_ring = NULL;
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -1506,6 +1687,9 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
|
||||
target->scsi_host->can_queue
|
||||
= min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
|
||||
target->scsi_host->can_queue);
|
||||
target->scsi_host->cmd_per_lun
|
||||
= min_t(int, target->scsi_host->can_queue,
|
||||
target->scsi_host->cmd_per_lun);
|
||||
} else {
|
||||
shost_printk(KERN_WARNING, target->scsi_host,
|
||||
PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
|
||||
@@ -1513,7 +1697,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (!target->rx_ring[0]) {
|
||||
if (!target->rx_ring) {
|
||||
ret = srp_alloc_iu_bufs(target);
|
||||
if (ret)
|
||||
goto error;
|
||||
@@ -1533,7 +1717,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
|
||||
if (ret)
|
||||
goto error_free;
|
||||
|
||||
for (i = 0; i < SRP_RQ_SIZE; i++) {
|
||||
for (i = 0; i < target->queue_size; i++) {
|
||||
struct srp_iu *iu = target->rx_ring[i];
|
||||
ret = srp_post_recv(target, iu);
|
||||
if (ret)
|
||||
@@ -1672,6 +1856,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
|
||||
if (ib_send_cm_drep(cm_id, NULL, 0))
|
||||
shost_printk(KERN_ERR, target->scsi_host,
|
||||
PFX "Sending CM DREP failed\n");
|
||||
queue_work(system_long_wq, &target->tl_err_work);
|
||||
break;
|
||||
|
||||
case IB_CM_TIMEWAIT_EXIT:
|
||||
@@ -1698,9 +1883,61 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* srp_change_queue_type - changing device queue tag type
|
||||
* @sdev: scsi device struct
|
||||
* @tag_type: requested tag type
|
||||
*
|
||||
* Returns queue tag type.
|
||||
*/
|
||||
static int
|
||||
srp_change_queue_type(struct scsi_device *sdev, int tag_type)
|
||||
{
|
||||
if (sdev->tagged_supported) {
|
||||
scsi_set_tag_type(sdev, tag_type);
|
||||
if (tag_type)
|
||||
scsi_activate_tcq(sdev, sdev->queue_depth);
|
||||
else
|
||||
scsi_deactivate_tcq(sdev, sdev->queue_depth);
|
||||
} else
|
||||
tag_type = 0;
|
||||
|
||||
return tag_type;
|
||||
}
|
||||
|
||||
/**
|
||||
* srp_change_queue_depth - setting device queue depth
|
||||
* @sdev: scsi device struct
|
||||
* @qdepth: requested queue depth
|
||||
* @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
|
||||
* (see include/scsi/scsi_host.h for definition)
|
||||
*
|
||||
* Returns queue depth.
|
||||
*/
|
||||
static int
|
||||
srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
|
||||
{
|
||||
struct Scsi_Host *shost = sdev->host;
|
||||
int max_depth;
|
||||
if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
|
||||
max_depth = shost->can_queue;
|
||||
if (!sdev->tagged_supported)
|
||||
max_depth = 1;
|
||||
if (qdepth > max_depth)
|
||||
qdepth = max_depth;
|
||||
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
|
||||
} else if (reason == SCSI_QDEPTH_QFULL)
|
||||
scsi_track_queue_full(sdev, qdepth);
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return sdev->queue_depth;
|
||||
}
|
||||
|
||||
static int srp_send_tsk_mgmt(struct srp_target_port *target,
|
||||
u64 req_tag, unsigned int lun, u8 func)
|
||||
{
|
||||
struct srp_rport *rport = target->rport;
|
||||
struct ib_device *dev = target->srp_host->srp_dev->dev;
|
||||
struct srp_iu *iu;
|
||||
struct srp_tsk_mgmt *tsk_mgmt;
|
||||
@@ -1710,12 +1947,20 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
|
||||
|
||||
init_completion(&target->tsk_mgmt_done);
|
||||
|
||||
/*
|
||||
* Lock the rport mutex to avoid that srp_create_target_ib() is
|
||||
* invoked while a task management function is being sent.
|
||||
*/
|
||||
mutex_lock(&rport->mutex);
|
||||
spin_lock_irq(&target->lock);
|
||||
iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
|
||||
spin_unlock_irq(&target->lock);
|
||||
|
||||
if (!iu)
|
||||
if (!iu) {
|
||||
mutex_unlock(&rport->mutex);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
|
||||
DMA_TO_DEVICE);
|
||||
@@ -1732,8 +1977,11 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
|
||||
DMA_TO_DEVICE);
|
||||
if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
|
||||
srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
|
||||
mutex_unlock(&rport->mutex);
|
||||
|
||||
return -1;
|
||||
}
|
||||
mutex_unlock(&rport->mutex);
|
||||
|
||||
if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
|
||||
msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
|
||||
@@ -1751,11 +1999,11 @@ static int srp_abort(struct scsi_cmnd *scmnd)
|
||||
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
|
||||
|
||||
if (!req || !srp_claim_req(target, req, scmnd))
|
||||
return FAILED;
|
||||
return SUCCESS;
|
||||
if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
|
||||
SRP_TSK_ABORT_TASK) == 0)
|
||||
ret = SUCCESS;
|
||||
else if (target->transport_offline)
|
||||
else if (target->rport->state == SRP_RPORT_LOST)
|
||||
ret = FAST_IO_FAIL;
|
||||
else
|
||||
ret = FAILED;
|
||||
@@ -1779,10 +2027,10 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
|
||||
if (target->tsk_mgmt_status)
|
||||
return FAILED;
|
||||
|
||||
for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
|
||||
for (i = 0; i < target->req_ring_size; ++i) {
|
||||
struct srp_request *req = &target->req_ring[i];
|
||||
if (req->scmnd && req->scmnd->device == scmnd->device)
|
||||
srp_reset_req(target, req);
|
||||
srp_finish_req(target, req, DID_RESET << 16);
|
||||
}
|
||||
|
||||
return SUCCESS;
|
||||
@@ -1791,14 +2039,10 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
|
||||
static int srp_reset_host(struct scsi_cmnd *scmnd)
|
||||
{
|
||||
struct srp_target_port *target = host_to_target(scmnd->device->host);
|
||||
int ret = FAILED;
|
||||
|
||||
shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
|
||||
|
||||
if (!srp_reconnect_target(target))
|
||||
ret = SUCCESS;
|
||||
|
||||
return ret;
|
||||
return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
|
||||
}
|
||||
|
||||
static int srp_slave_configure(struct scsi_device *sdev)
|
||||
@@ -1851,6 +2095,14 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
|
||||
return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
|
||||
}
|
||||
|
||||
static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct srp_target_port *target = host_to_target(class_to_shost(dev));
|
||||
|
||||
return sprintf(buf, "%pI6\n", target->path.sgid.raw);
|
||||
}
|
||||
|
||||
static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
@@ -1907,6 +2159,14 @@ static ssize_t show_comp_vector(struct device *dev,
|
||||
return sprintf(buf, "%d\n", target->comp_vector);
|
||||
}
|
||||
|
||||
static ssize_t show_tl_retry_count(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct srp_target_port *target = host_to_target(class_to_shost(dev));
|
||||
|
||||
return sprintf(buf, "%d\n", target->tl_retry_count);
|
||||
}
|
||||
|
||||
static ssize_t show_cmd_sg_entries(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
@@ -1927,6 +2187,7 @@ static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
|
||||
static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
|
||||
static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
|
||||
static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
|
||||
static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
|
||||
static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
|
||||
static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
|
||||
static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
|
||||
@@ -1934,6 +2195,7 @@ static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
|
||||
static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
|
||||
static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
|
||||
static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
|
||||
static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
|
||||
static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
|
||||
static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
|
||||
|
||||
@@ -1942,6 +2204,7 @@ static struct device_attribute *srp_host_attrs[] = {
|
||||
&dev_attr_ioc_guid,
|
||||
&dev_attr_service_id,
|
||||
&dev_attr_pkey,
|
||||
&dev_attr_sgid,
|
||||
&dev_attr_dgid,
|
||||
&dev_attr_orig_dgid,
|
||||
&dev_attr_req_lim,
|
||||
@@ -1949,6 +2212,7 @@ static struct device_attribute *srp_host_attrs[] = {
|
||||
&dev_attr_local_ib_port,
|
||||
&dev_attr_local_ib_device,
|
||||
&dev_attr_comp_vector,
|
||||
&dev_attr_tl_retry_count,
|
||||
&dev_attr_cmd_sg_entries,
|
||||
&dev_attr_allow_ext_sg,
|
||||
NULL
|
||||
@@ -1961,14 +2225,16 @@ static struct scsi_host_template srp_template = {
|
||||
.slave_configure = srp_slave_configure,
|
||||
.info = srp_target_info,
|
||||
.queuecommand = srp_queuecommand,
|
||||
.change_queue_depth = srp_change_queue_depth,
|
||||
.change_queue_type = srp_change_queue_type,
|
||||
.eh_abort_handler = srp_abort,
|
||||
.eh_device_reset_handler = srp_reset_device,
|
||||
.eh_host_reset_handler = srp_reset_host,
|
||||
.skip_settle_delay = true,
|
||||
.sg_tablesize = SRP_DEF_SG_TABLESIZE,
|
||||
.can_queue = SRP_CMD_SQ_SIZE,
|
||||
.can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
|
||||
.this_id = -1,
|
||||
.cmd_per_lun = SRP_CMD_SQ_SIZE,
|
||||
.cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = srp_host_attrs
|
||||
};
|
||||
@@ -1994,6 +2260,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
|
||||
}
|
||||
|
||||
rport->lld_data = target;
|
||||
target->rport = rport;
|
||||
|
||||
spin_lock(&host->target_lock);
|
||||
list_add_tail(&target->list, &host->target_list);
|
||||
@@ -2073,6 +2340,8 @@ enum {
|
||||
SRP_OPT_ALLOW_EXT_SG = 1 << 10,
|
||||
SRP_OPT_SG_TABLESIZE = 1 << 11,
|
||||
SRP_OPT_COMP_VECTOR = 1 << 12,
|
||||
SRP_OPT_TL_RETRY_COUNT = 1 << 13,
|
||||
SRP_OPT_QUEUE_SIZE = 1 << 14,
|
||||
SRP_OPT_ALL = (SRP_OPT_ID_EXT |
|
||||
SRP_OPT_IOC_GUID |
|
||||
SRP_OPT_DGID |
|
||||
@@ -2094,6 +2363,8 @@ static const match_table_t srp_opt_tokens = {
|
||||
{ SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
|
||||
{ SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
|
||||
{ SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
|
||||
{ SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
|
||||
{ SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
|
||||
{ SRP_OPT_ERR, NULL }
|
||||
};
|
||||
|
||||
@@ -2188,13 +2459,25 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
|
||||
target->scsi_host->max_sectors = token;
|
||||
break;
|
||||
|
||||
case SRP_OPT_QUEUE_SIZE:
|
||||
if (match_int(args, &token) || token < 1) {
|
||||
pr_warn("bad queue_size parameter '%s'\n", p);
|
||||
goto out;
|
||||
}
|
||||
target->scsi_host->can_queue = token;
|
||||
target->queue_size = token + SRP_RSP_SQ_SIZE +
|
||||
SRP_TSK_MGMT_SQ_SIZE;
|
||||
if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
|
||||
target->scsi_host->cmd_per_lun = token;
|
||||
break;
|
||||
|
||||
case SRP_OPT_MAX_CMD_PER_LUN:
|
||||
if (match_int(args, &token)) {
|
||||
if (match_int(args, &token) || token < 1) {
|
||||
pr_warn("bad max cmd_per_lun parameter '%s'\n",
|
||||
p);
|
||||
goto out;
|
||||
}
|
||||
target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
|
||||
target->scsi_host->cmd_per_lun = token;
|
||||
break;
|
||||
|
||||
case SRP_OPT_IO_CLASS:
|
||||
@@ -2257,6 +2540,15 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
|
||||
target->comp_vector = token;
|
||||
break;
|
||||
|
||||
case SRP_OPT_TL_RETRY_COUNT:
|
||||
if (match_int(args, &token) || token < 2 || token > 7) {
|
||||
pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
|
||||
p);
|
||||
goto out;
|
||||
}
|
||||
target->tl_retry_count = token;
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_warn("unknown parameter or missing value '%s' in target creation request\n",
|
||||
p);
|
||||
@@ -2273,6 +2565,12 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
|
||||
pr_warn("target creation request is missing parameter '%s'\n",
|
||||
srp_opt_tokens[i].pattern);
|
||||
|
||||
if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
|
||||
&& (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
|
||||
pr_warn("cmd_per_lun = %d > queue_size = %d\n",
|
||||
target->scsi_host->cmd_per_lun,
|
||||
target->scsi_host->can_queue);
|
||||
|
||||
out:
|
||||
kfree(options);
|
||||
return ret;
|
||||
@@ -2287,8 +2585,7 @@ static ssize_t srp_create_target(struct device *dev,
|
||||
struct Scsi_Host *target_host;
|
||||
struct srp_target_port *target;
|
||||
struct ib_device *ibdev = host->srp_dev->dev;
|
||||
dma_addr_t dma_addr;
|
||||
int i, ret;
|
||||
int ret;
|
||||
|
||||
target_host = scsi_host_alloc(&srp_template,
|
||||
sizeof (struct srp_target_port));
|
||||
@@ -2311,11 +2608,15 @@ static ssize_t srp_create_target(struct device *dev,
|
||||
target->cmd_sg_cnt = cmd_sg_entries;
|
||||
target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
|
||||
target->allow_ext_sg = allow_ext_sg;
|
||||
target->tl_retry_count = 7;
|
||||
target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
|
||||
|
||||
ret = srp_parse_options(buf, target);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
|
||||
|
||||
if (!srp_conn_unique(target->srp_host, target)) {
|
||||
shost_printk(KERN_INFO, target->scsi_host,
|
||||
PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
|
||||
@@ -2339,31 +2640,13 @@ static ssize_t srp_create_target(struct device *dev,
|
||||
sizeof (struct srp_indirect_buf) +
|
||||
target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
|
||||
|
||||
INIT_WORK(&target->tl_err_work, srp_tl_err_work);
|
||||
INIT_WORK(&target->remove_work, srp_remove_work);
|
||||
spin_lock_init(&target->lock);
|
||||
INIT_LIST_HEAD(&target->free_tx);
|
||||
INIT_LIST_HEAD(&target->free_reqs);
|
||||
for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
|
||||
struct srp_request *req = &target->req_ring[i];
|
||||
|
||||
req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof (void *),
|
||||
GFP_KERNEL);
|
||||
req->map_page = kmalloc(SRP_FMR_SIZE * sizeof (void *),
|
||||
GFP_KERNEL);
|
||||
req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
|
||||
if (!req->fmr_list || !req->map_page || !req->indirect_desc)
|
||||
goto err_free_mem;
|
||||
|
||||
dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
|
||||
target->indirect_size,
|
||||
DMA_TO_DEVICE);
|
||||
if (ib_dma_mapping_error(ibdev, dma_addr))
|
||||
goto err_free_mem;
|
||||
|
||||
req->indirect_dma_addr = dma_addr;
|
||||
req->index = i;
|
||||
list_add_tail(&req->list, &target->free_reqs);
|
||||
}
|
||||
ret = srp_alloc_req_data(target);
|
||||
if (ret)
|
||||
goto err_free_mem;
|
||||
|
||||
ib_query_gid(ibdev, host->port, 0, &target->path.sgid);
|
||||
|
||||
@@ -2612,7 +2895,14 @@ static void srp_remove_one(struct ib_device *device)
|
||||
}
|
||||
|
||||
static struct srp_function_template ib_srp_transport_functions = {
|
||||
.has_rport_state = true,
|
||||
.reset_timer_if_blocked = true,
|
||||
.reconnect_delay = &srp_reconnect_delay,
|
||||
.fast_io_fail_tmo = &srp_fast_io_fail_tmo,
|
||||
.dev_loss_tmo = &srp_dev_loss_tmo,
|
||||
.reconnect = srp_rport_reconnect,
|
||||
.rport_delete = srp_rport_delete,
|
||||
.terminate_rport_io = srp_terminate_io,
|
||||
};
|
||||
|
||||
static int __init srp_init_module(void)
|
||||
|
@@ -57,14 +57,11 @@ enum {
|
||||
SRP_MAX_LUN = 512,
|
||||
SRP_DEF_SG_TABLESIZE = 12,
|
||||
|
||||
SRP_RQ_SHIFT = 6,
|
||||
SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
|
||||
|
||||
SRP_SQ_SIZE = SRP_RQ_SIZE,
|
||||
SRP_DEFAULT_QUEUE_SIZE = 1 << 6,
|
||||
SRP_RSP_SQ_SIZE = 1,
|
||||
SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
|
||||
SRP_TSK_MGMT_SQ_SIZE = 1,
|
||||
SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
|
||||
SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
|
||||
SRP_TSK_MGMT_SQ_SIZE,
|
||||
|
||||
SRP_TAG_NO_REQ = ~0U,
|
||||
SRP_TAG_TSK_MGMT = 1U << 31,
|
||||
@@ -140,7 +137,6 @@ struct srp_target_port {
|
||||
unsigned int cmd_sg_cnt;
|
||||
unsigned int indirect_size;
|
||||
bool allow_ext_sg;
|
||||
bool transport_offline;
|
||||
|
||||
/* Everything above this point is used in the hot path of
|
||||
* command processing. Try to keep them packed into cachelines.
|
||||
@@ -153,10 +149,14 @@ struct srp_target_port {
|
||||
u16 io_class;
|
||||
struct srp_host *srp_host;
|
||||
struct Scsi_Host *scsi_host;
|
||||
struct srp_rport *rport;
|
||||
char target_name[32];
|
||||
unsigned int scsi_id;
|
||||
unsigned int sg_tablesize;
|
||||
int queue_size;
|
||||
int req_ring_size;
|
||||
int comp_vector;
|
||||
int tl_retry_count;
|
||||
|
||||
struct ib_sa_path_rec path;
|
||||
__be16 orig_dgid[8];
|
||||
@@ -172,10 +172,11 @@ struct srp_target_port {
|
||||
|
||||
int zero_req_lim;
|
||||
|
||||
struct srp_iu *tx_ring[SRP_SQ_SIZE];
|
||||
struct srp_iu *rx_ring[SRP_RQ_SIZE];
|
||||
struct srp_request req_ring[SRP_CMD_SQ_SIZE];
|
||||
struct srp_iu **tx_ring;
|
||||
struct srp_iu **rx_ring;
|
||||
struct srp_request *req_ring;
|
||||
|
||||
struct work_struct tl_err_work;
|
||||
struct work_struct remove_work;
|
||||
|
||||
struct list_head list;
|
||||
|
Reference in New Issue
Block a user