ocfs2_dlm: fix cluster-wide refcounting of lock resources
This was previously broken and migration of some locks had to be temporarily disabled. We use a new (and backward-incompatible) set of network messages to account for all references to a lock resources held across the cluster. once these are all freed, the master node may then free the lock resource memory once its local references are dropped. Signed-off-by: Kurt Hackel <kurt.hackel@oracle.com> Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
This commit is contained in:
@@ -53,6 +53,23 @@ void dlm_print_one_lock_resource(struct dlm_lock_resource *res)
|
||||
spin_unlock(&res->spinlock);
|
||||
}
|
||||
|
||||
static void dlm_print_lockres_refmap(struct dlm_lock_resource *res)
|
||||
{
|
||||
int bit;
|
||||
assert_spin_locked(&res->spinlock);
|
||||
|
||||
mlog(ML_NOTICE, " refmap nodes: [ ");
|
||||
bit = 0;
|
||||
while (1) {
|
||||
bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
|
||||
if (bit >= O2NM_MAX_NODES)
|
||||
break;
|
||||
printk("%u ", bit);
|
||||
bit++;
|
||||
}
|
||||
printk("], inflight=%u\n", res->inflight_locks);
|
||||
}
|
||||
|
||||
void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
|
||||
{
|
||||
struct list_head *iter2;
|
||||
@@ -65,6 +82,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
|
||||
res->owner, res->state);
|
||||
mlog(ML_NOTICE, " last used: %lu, on purge list: %s\n",
|
||||
res->last_used, list_empty(&res->purge) ? "no" : "yes");
|
||||
dlm_print_lockres_refmap(res);
|
||||
mlog(ML_NOTICE, " granted queue: \n");
|
||||
list_for_each(iter2, &res->granted) {
|
||||
lock = list_entry(iter2, struct dlm_lock, list);
|
||||
|
Reference in New Issue
Block a user