Merge branch 'k.o/for-4.3-v1' into k.o/for-4.4
Pick up the late fixes from the 4.3 cycle so we have them in our next branch.
This commit is contained in:
@@ -80,7 +80,7 @@ enum {
|
||||
IPOIB_NUM_WC = 4,
|
||||
|
||||
IPOIB_MAX_PATH_REC_QUEUE = 3,
|
||||
IPOIB_MAX_MCAST_QUEUE = 3,
|
||||
IPOIB_MAX_MCAST_QUEUE = 64,
|
||||
|
||||
IPOIB_FLAG_OPER_UP = 0,
|
||||
IPOIB_FLAG_INITIALIZED = 1,
|
||||
@@ -495,6 +495,7 @@ void ipoib_dev_cleanup(struct net_device *dev);
|
||||
void ipoib_mcast_join_task(struct work_struct *work);
|
||||
void ipoib_mcast_carrier_on_task(struct work_struct *work);
|
||||
void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
|
||||
void ipoib_mcast_free(struct ipoib_mcast *mc);
|
||||
|
||||
void ipoib_mcast_restart_task(struct work_struct *work);
|
||||
int ipoib_mcast_start_thread(struct net_device *dev);
|
||||
@@ -548,6 +549,8 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter,
|
||||
|
||||
int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
|
||||
union ib_gid *mgid, int set_qkey);
|
||||
int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast);
|
||||
struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid);
|
||||
|
||||
int ipoib_init_qp(struct net_device *dev);
|
||||
int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
|
||||
|
@@ -1149,6 +1149,9 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
|
||||
unsigned long dt;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
LIST_HEAD(remove_list);
|
||||
struct ipoib_mcast *mcast, *tmcast;
|
||||
struct net_device *dev = priv->dev;
|
||||
|
||||
if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
|
||||
return;
|
||||
@@ -1176,6 +1179,19 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
|
||||
lockdep_is_held(&priv->lock))) != NULL) {
|
||||
/* was the neigh idle for two GC periods */
|
||||
if (time_after(neigh_obsolete, neigh->alive)) {
|
||||
u8 *mgid = neigh->daddr + 4;
|
||||
|
||||
/* Is this multicast ? */
|
||||
if (*mgid == 0xff) {
|
||||
mcast = __ipoib_mcast_find(dev, mgid);
|
||||
|
||||
if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
|
||||
list_del(&mcast->list);
|
||||
rb_erase(&mcast->rb_node, &priv->multicast_tree);
|
||||
list_add_tail(&mcast->list, &remove_list);
|
||||
}
|
||||
}
|
||||
|
||||
rcu_assign_pointer(*np,
|
||||
rcu_dereference_protected(neigh->hnext,
|
||||
lockdep_is_held(&priv->lock)));
|
||||
@@ -1191,6 +1207,10 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
|
||||
ipoib_mcast_leave(dev, mcast);
|
||||
ipoib_mcast_free(mcast);
|
||||
}
|
||||
}
|
||||
|
||||
static void ipoib_reap_neigh(struct work_struct *work)
|
||||
|
@@ -106,7 +106,7 @@ static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv,
|
||||
queue_delayed_work(priv->wq, &priv->mcast_task, 0);
|
||||
}
|
||||
|
||||
static void ipoib_mcast_free(struct ipoib_mcast *mcast)
|
||||
void ipoib_mcast_free(struct ipoib_mcast *mcast)
|
||||
{
|
||||
struct net_device *dev = mcast->dev;
|
||||
int tx_dropped = 0;
|
||||
@@ -153,7 +153,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
|
||||
return mcast;
|
||||
}
|
||||
|
||||
static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
|
||||
struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct rb_node *n = priv->multicast_tree.rb_node;
|
||||
@@ -508,17 +508,19 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
|
||||
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
|
||||
|
||||
/*
|
||||
* Historically Linux IPoIB has never properly supported SEND
|
||||
* ONLY join. It emulated it by not providing all the required
|
||||
* attributes, which is enough to prevent group creation and
|
||||
* detect if there are full members or not. A major problem
|
||||
* with supporting SEND ONLY is detecting when the group is
|
||||
* auto-destroyed as IPoIB will cache the MLID..
|
||||
* Send-only IB Multicast joins do not work at the core
|
||||
* IB layer yet, so we can't use them here. However,
|
||||
* we are emulating an Ethernet multicast send, which
|
||||
* does not require a multicast subscription and will
|
||||
* still send properly. The most appropriate thing to
|
||||
* do is to create the group if it doesn't exist as that
|
||||
* most closely emulates the behavior, from a user space
|
||||
* application perspecitive, of Ethernet multicast
|
||||
* operation. For now, we do a full join, maybe later
|
||||
* when the core IB layers support send only joins we
|
||||
* will use them.
|
||||
*/
|
||||
#if 1
|
||||
if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
|
||||
comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
|
||||
#else
|
||||
#if 0
|
||||
if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
|
||||
rec.join_state = 4;
|
||||
#endif
|
||||
@@ -675,7 +677,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
|
||||
int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
int ret = 0;
|
||||
|
@@ -97,6 +97,11 @@ unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS;
|
||||
module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024");
|
||||
|
||||
bool iser_always_reg = true;
|
||||
module_param_named(always_register, iser_always_reg, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(always_register,
|
||||
"Always register memory, even for continuous memory regions (default:true)");
|
||||
|
||||
bool iser_pi_enable = false;
|
||||
module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
|
||||
|
@@ -611,6 +611,7 @@ extern int iser_debug_level;
|
||||
extern bool iser_pi_enable;
|
||||
extern int iser_pi_guard;
|
||||
extern unsigned int iser_max_sectors;
|
||||
extern bool iser_always_reg;
|
||||
|
||||
int iser_assign_reg_ops(struct iser_device *device);
|
||||
|
||||
|
@@ -803,11 +803,12 @@ static int
|
||||
iser_reg_prot_sg(struct iscsi_iser_task *task,
|
||||
struct iser_data_buf *mem,
|
||||
struct iser_fr_desc *desc,
|
||||
bool use_dma_key,
|
||||
struct iser_mem_reg *reg)
|
||||
{
|
||||
struct iser_device *device = task->iser_conn->ib_conn.device;
|
||||
|
||||
if (mem->dma_nents == 1)
|
||||
if (use_dma_key)
|
||||
return iser_reg_dma(device, mem, reg);
|
||||
|
||||
return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
|
||||
@@ -817,11 +818,12 @@ static int
|
||||
iser_reg_data_sg(struct iscsi_iser_task *task,
|
||||
struct iser_data_buf *mem,
|
||||
struct iser_fr_desc *desc,
|
||||
bool use_dma_key,
|
||||
struct iser_mem_reg *reg)
|
||||
{
|
||||
struct iser_device *device = task->iser_conn->ib_conn.device;
|
||||
|
||||
if (mem->dma_nents == 1)
|
||||
if (use_dma_key)
|
||||
return iser_reg_dma(device, mem, reg);
|
||||
|
||||
return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
|
||||
@@ -836,14 +838,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
|
||||
struct iser_mem_reg *reg = &task->rdma_reg[dir];
|
||||
struct iser_mem_reg *data_reg;
|
||||
struct iser_fr_desc *desc = NULL;
|
||||
bool use_dma_key;
|
||||
int err;
|
||||
|
||||
err = iser_handle_unaligned_buf(task, mem, dir);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
if (mem->dma_nents != 1 ||
|
||||
scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
|
||||
use_dma_key = (mem->dma_nents == 1 && !iser_always_reg &&
|
||||
scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL);
|
||||
|
||||
if (!use_dma_key) {
|
||||
desc = device->reg_ops->reg_desc_get(ib_conn);
|
||||
reg->mem_h = desc;
|
||||
}
|
||||
@@ -853,7 +858,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
|
||||
else
|
||||
data_reg = &task->desc.data_reg;
|
||||
|
||||
err = iser_reg_data_sg(task, mem, desc, data_reg);
|
||||
err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
|
||||
if (unlikely(err))
|
||||
goto err_reg;
|
||||
|
||||
@@ -866,7 +871,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
|
||||
if (unlikely(err))
|
||||
goto err_reg;
|
||||
|
||||
err = iser_reg_prot_sg(task, mem, desc, prot_reg);
|
||||
err = iser_reg_prot_sg(task, mem, desc,
|
||||
use_dma_key, prot_reg);
|
||||
if (unlikely(err))
|
||||
goto err_reg;
|
||||
}
|
||||
|
@@ -133,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device)
|
||||
(unsigned long)comp);
|
||||
}
|
||||
|
||||
device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
|
||||
IB_ACCESS_REMOTE_WRITE |
|
||||
IB_ACCESS_REMOTE_READ);
|
||||
if (IS_ERR(device->mr))
|
||||
goto dma_mr_err;
|
||||
if (!iser_always_reg) {
|
||||
int access = IB_ACCESS_LOCAL_WRITE |
|
||||
IB_ACCESS_REMOTE_WRITE |
|
||||
IB_ACCESS_REMOTE_READ;
|
||||
|
||||
device->mr = ib_get_dma_mr(device->pd, access);
|
||||
if (IS_ERR(device->mr))
|
||||
goto dma_mr_err;
|
||||
}
|
||||
|
||||
INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
|
||||
iser_event_handler);
|
||||
@@ -147,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device)
|
||||
return 0;
|
||||
|
||||
handler_err:
|
||||
ib_dereg_mr(device->mr);
|
||||
if (device->mr)
|
||||
ib_dereg_mr(device->mr);
|
||||
dma_mr_err:
|
||||
for (i = 0; i < device->comps_used; i++)
|
||||
tasklet_kill(&device->comps[i].tasklet);
|
||||
@@ -173,7 +178,6 @@ comps_err:
|
||||
static void iser_free_device_ib_res(struct iser_device *device)
|
||||
{
|
||||
int i;
|
||||
BUG_ON(device->mr == NULL);
|
||||
|
||||
for (i = 0; i < device->comps_used; i++) {
|
||||
struct iser_comp *comp = &device->comps[i];
|
||||
@@ -184,7 +188,8 @@ static void iser_free_device_ib_res(struct iser_device *device)
|
||||
}
|
||||
|
||||
(void)ib_unregister_event_handler(&device->event_handler);
|
||||
(void)ib_dereg_mr(device->mr);
|
||||
if (device->mr)
|
||||
(void)ib_dereg_mr(device->mr);
|
||||
ib_dealloc_pd(device->pd);
|
||||
|
||||
kfree(device->comps);
|
||||
|
Reference in New Issue
Block a user