IB/rxe: Replace spinlock with rwlock
Concurrent readers which read rb tree are protected using read lock. Concurrent writers which add element to pool are protected using write lock. Signed-off-by: Parav Pandit <parav@mellanox.com> Reviewed-by: Daniel Jurgens <danielj@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:

committed by
Doug Ledford

vanhempi
3db2bceb29
commit
66d0f207db
@@ -207,7 +207,7 @@ int rxe_pool_init(
|
||||
|
||||
kref_init(&pool->ref_cnt);
|
||||
|
||||
spin_lock_init(&pool->pool_lock);
|
||||
rwlock_init(&pool->pool_lock);
|
||||
|
||||
if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
|
||||
err = rxe_pool_init_index(pool,
|
||||
@@ -245,12 +245,12 @@ int rxe_pool_cleanup(struct rxe_pool *pool)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pool->pool_lock, flags);
|
||||
write_lock_irqsave(&pool->pool_lock, flags);
|
||||
pool->state = rxe_pool_invalid;
|
||||
if (atomic_read(&pool->num_elem) > 0)
|
||||
pr_warn("%s pool destroyed with unfree'd elem\n",
|
||||
pool_name(pool));
|
||||
spin_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
|
||||
rxe_pool_put(pool);
|
||||
|
||||
@@ -336,10 +336,10 @@ void rxe_add_key(void *arg, void *key)
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pool->pool_lock, flags);
|
||||
write_lock_irqsave(&pool->pool_lock, flags);
|
||||
memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
|
||||
insert_key(pool, elem);
|
||||
spin_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
}
|
||||
|
||||
void rxe_drop_key(void *arg)
|
||||
@@ -348,9 +348,9 @@ void rxe_drop_key(void *arg)
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pool->pool_lock, flags);
|
||||
write_lock_irqsave(&pool->pool_lock, flags);
|
||||
rb_erase(&elem->node, &pool->tree);
|
||||
spin_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
}
|
||||
|
||||
void rxe_add_index(void *arg)
|
||||
@@ -359,10 +359,10 @@ void rxe_add_index(void *arg)
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pool->pool_lock, flags);
|
||||
write_lock_irqsave(&pool->pool_lock, flags);
|
||||
elem->index = alloc_index(pool);
|
||||
insert_index(pool, elem);
|
||||
spin_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
}
|
||||
|
||||
void rxe_drop_index(void *arg)
|
||||
@@ -371,10 +371,10 @@ void rxe_drop_index(void *arg)
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pool->pool_lock, flags);
|
||||
write_lock_irqsave(&pool->pool_lock, flags);
|
||||
clear_bit(elem->index - pool->min_index, pool->table);
|
||||
rb_erase(&elem->node, &pool->tree);
|
||||
spin_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
}
|
||||
|
||||
void *rxe_alloc(struct rxe_pool *pool)
|
||||
@@ -384,13 +384,13 @@ void *rxe_alloc(struct rxe_pool *pool)
|
||||
|
||||
might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
|
||||
|
||||
spin_lock_irqsave(&pool->pool_lock, flags);
|
||||
read_lock_irqsave(&pool->pool_lock, flags);
|
||||
if (pool->state != rxe_pool_valid) {
|
||||
spin_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
read_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
return NULL;
|
||||
}
|
||||
kref_get(&pool->ref_cnt);
|
||||
spin_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
read_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
|
||||
kref_get(&pool->rxe->ref_cnt);
|
||||
|
||||
@@ -436,7 +436,7 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
|
||||
struct rxe_pool_entry *elem = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pool->pool_lock, flags);
|
||||
read_lock_irqsave(&pool->pool_lock, flags);
|
||||
|
||||
if (pool->state != rxe_pool_valid)
|
||||
goto out;
|
||||
@@ -458,7 +458,7 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
|
||||
kref_get(&elem->ref_cnt);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
read_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
return node ? elem : NULL;
|
||||
}
|
||||
|
||||
@@ -469,7 +469,7 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
|
||||
int cmp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pool->pool_lock, flags);
|
||||
read_lock_irqsave(&pool->pool_lock, flags);
|
||||
|
||||
if (pool->state != rxe_pool_valid)
|
||||
goto out;
|
||||
@@ -494,6 +494,6 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
|
||||
kref_get(&elem->ref_cnt);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
read_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
return node ? elem : NULL;
|
||||
}
|
||||
|
@@ -90,7 +90,7 @@ struct rxe_pool_entry {
|
||||
|
||||
struct rxe_pool {
|
||||
struct rxe_dev *rxe;
|
||||
spinlock_t pool_lock; /* pool spinlock */
|
||||
rwlock_t pool_lock; /* protects pool add/del/search */
|
||||
size_t elem_size;
|
||||
struct kref ref_cnt;
|
||||
void (*cleanup)(struct rxe_pool_entry *obj);
|
||||
|
Viittaa uudesa ongelmassa
Block a user