IB/qib: Use RCU for qpn lookup

The heavy weight spinlock in qib_lookup_qpn() is replaced with RCU.
The hash list itself is now accessed via jhash functions instead of mod.

The changes should benefit multiple receive contexts in different
processors by not contending for the lock just to read the hash
structures.

The patch also adds a lookaside_qp (pointer) and a lookaside_qpn in
the context.  The interrupt handler will test the current packet's qpn
against lookaside_qpn if the lookaside_qp pointer is non-NULL.  The
pointer is NULL'ed when the interrupt handler exits.

Signed-off-by: Mike Marciniszyn <mike.marciniszyn@qlogic.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
Mike Marciniszyn
2011-09-23 13:16:44 -04:00
committed by Roland Dreier
parent 9e1c0e4325
commit af061a644a
5 changed files with 92 additions and 46 deletions

View File

@@ -34,6 +34,7 @@
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/jhash.h>
#include "qib.h"
@@ -204,6 +205,13 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
}
static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
{
return jhash_1word(qpn, dev->qp_rnd) &
(dev->qp_table_size - 1);
}
/*
* Put the QP into the hash table.
* The hash table holds a reference to the QP.
@@ -211,22 +219,23 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
{
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
unsigned n = qp->ibqp.qp_num % dev->qp_table_size;
unsigned long flags;
unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
spin_lock_irqsave(&dev->qpt_lock, flags);
if (qp->ibqp.qp_num == 0)
ibp->qp0 = qp;
else if (qp->ibqp.qp_num == 1)
ibp->qp1 = qp;
else {
qp->next = dev->qp_table[n];
dev->qp_table[n] = qp;
}
atomic_inc(&qp->refcount);
if (qp->ibqp.qp_num == 0)
rcu_assign_pointer(ibp->qp0, qp);
else if (qp->ibqp.qp_num == 1)
rcu_assign_pointer(ibp->qp1, qp);
else {
qp->next = dev->qp_table[n];
rcu_assign_pointer(dev->qp_table[n], qp);
}
spin_unlock_irqrestore(&dev->qpt_lock, flags);
synchronize_rcu();
}
/*
@@ -236,29 +245,32 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
{
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct qib_qp *q, **qpp;
unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
unsigned long flags;
qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size];
spin_lock_irqsave(&dev->qpt_lock, flags);
if (ibp->qp0 == qp) {
ibp->qp0 = NULL;
atomic_dec(&qp->refcount);
rcu_assign_pointer(ibp->qp0, NULL);
} else if (ibp->qp1 == qp) {
ibp->qp1 = NULL;
atomic_dec(&qp->refcount);
} else
rcu_assign_pointer(ibp->qp1, NULL);
} else {
struct qib_qp *q, **qpp;
qpp = &dev->qp_table[n];
for (; (q = *qpp) != NULL; qpp = &q->next)
if (q == qp) {
*qpp = qp->next;
qp->next = NULL;
atomic_dec(&qp->refcount);
rcu_assign_pointer(*qpp, qp->next);
qp->next = NULL;
break;
}
}
spin_unlock_irqrestore(&dev->qpt_lock, flags);
synchronize_rcu();
}
/**
@@ -280,21 +292,24 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
if (!qib_mcast_tree_empty(ibp))
qp_inuse++;
if (ibp->qp0)
rcu_read_lock();
if (rcu_dereference(ibp->qp0))
qp_inuse++;
if (ibp->qp1)
if (rcu_dereference(ibp->qp1))
qp_inuse++;
rcu_read_unlock();
}
spin_lock_irqsave(&dev->qpt_lock, flags);
for (n = 0; n < dev->qp_table_size; n++) {
qp = dev->qp_table[n];
dev->qp_table[n] = NULL;
rcu_assign_pointer(dev->qp_table[n], NULL);
for (; qp; qp = qp->next)
qp_inuse++;
}
spin_unlock_irqrestore(&dev->qpt_lock, flags);
synchronize_rcu();
return qp_inuse;
}
@@ -309,25 +324,28 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
*/
struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
{
struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
unsigned long flags;
struct qib_qp *qp;
struct qib_qp *qp = NULL;
spin_lock_irqsave(&dev->qpt_lock, flags);
if (unlikely(qpn <= 1)) {
rcu_read_lock();
if (qpn == 0)
qp = rcu_dereference(ibp->qp0);
else
qp = rcu_dereference(ibp->qp1);
} else {
struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
unsigned n = qpn_hash(dev, qpn);
if (qpn == 0)
qp = ibp->qp0;
else if (qpn == 1)
qp = ibp->qp1;
else
for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp;
qp = qp->next)
rcu_read_lock();
for (qp = dev->qp_table[n]; rcu_dereference(qp); qp = qp->next)
if (qp->ibqp.qp_num == qpn)
break;
}
if (qp)
atomic_inc(&qp->refcount);
if (unlikely(!atomic_inc_not_zero(&qp->refcount)))
qp = NULL;
spin_unlock_irqrestore(&dev->qpt_lock, flags);
rcu_read_unlock();
return qp;
}
@@ -1015,6 +1033,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
ret = ERR_PTR(-ENOMEM);
goto bail_swq;
}
RCU_INIT_POINTER(qp->next, NULL);
if (init_attr->srq)
sz = 0;
else {