Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull misc VFS updates from Al Viro: "This cycle a lot of stuff sits on topical branches, so I'll be sending more or less one pull request per branch. This is the first pile; more to follow in a few. In this one are several misc commits from early in the cycle (before I went for separate branches), plus the rework of mntput/dput ordering on umount, switching to use of fs_pin instead of convoluted games in namespace_unlock()" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: switch the IO-triggering parts of umount to fs_pin new fs_pin killing logics allow attaching fs_pin to a group not associated with some superblock get rid of the second argument of acct_kill() take count and rcu_head out of fs_pin dcache: let the dentry count go down to zero without taking d_lock pull bumping refcount into ->kill() kill pin_put() mode_t whack-a-mole: chelsio file->f_path.dentry is pinned down for as long as the file is open... get rid of lustre_dump_dentry() gut proc_register() a bit kill d_validate() ncpfs: get rid of d_validate() nonsense selinuxfs: don't open-code d_genocide()
This commit is contained in:
149
fs/dcache.c
149
fs/dcache.c
@@ -511,7 +511,7 @@ static void __dentry_kill(struct dentry *dentry)
|
||||
* dentry_iput drops the locks, at which point nobody (except
|
||||
* transient RCU lookups) can reach this dentry.
|
||||
*/
|
||||
BUG_ON((int)dentry->d_lockref.count > 0);
|
||||
BUG_ON(dentry->d_lockref.count > 0);
|
||||
this_cpu_dec(nr_dentry);
|
||||
if (dentry->d_op && dentry->d_op->d_release)
|
||||
dentry->d_op->d_release(dentry);
|
||||
@@ -564,7 +564,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
|
||||
struct dentry *parent = dentry->d_parent;
|
||||
if (IS_ROOT(dentry))
|
||||
return NULL;
|
||||
if (unlikely((int)dentry->d_lockref.count < 0))
|
||||
if (unlikely(dentry->d_lockref.count < 0))
|
||||
return NULL;
|
||||
if (likely(spin_trylock(&parent->d_lock)))
|
||||
return parent;
|
||||
@@ -593,6 +593,110 @@ again:
|
||||
return parent;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to do a lockless dput(), and return whether that was successful.
|
||||
*
|
||||
* If unsuccessful, we return false, having already taken the dentry lock.
|
||||
*
|
||||
* The caller needs to hold the RCU read lock, so that the dentry is
|
||||
* guaranteed to stay around even if the refcount goes down to zero!
|
||||
*/
|
||||
static inline bool fast_dput(struct dentry *dentry)
|
||||
{
|
||||
int ret;
|
||||
unsigned int d_flags;
|
||||
|
||||
/*
|
||||
* If we have a d_op->d_delete() operation, we sould not
|
||||
* let the dentry count go to zero, so use "put__or_lock".
|
||||
*/
|
||||
if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
|
||||
return lockref_put_or_lock(&dentry->d_lockref);
|
||||
|
||||
/*
|
||||
* .. otherwise, we can try to just decrement the
|
||||
* lockref optimistically.
|
||||
*/
|
||||
ret = lockref_put_return(&dentry->d_lockref);
|
||||
|
||||
/*
|
||||
* If the lockref_put_return() failed due to the lock being held
|
||||
* by somebody else, the fast path has failed. We will need to
|
||||
* get the lock, and then check the count again.
|
||||
*/
|
||||
if (unlikely(ret < 0)) {
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (dentry->d_lockref.count > 1) {
|
||||
dentry->d_lockref.count--;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we weren't the last ref, we're done.
|
||||
*/
|
||||
if (ret)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Careful, careful. The reference count went down
|
||||
* to zero, but we don't hold the dentry lock, so
|
||||
* somebody else could get it again, and do another
|
||||
* dput(), and we need to not race with that.
|
||||
*
|
||||
* However, there is a very special and common case
|
||||
* where we don't care, because there is nothing to
|
||||
* do: the dentry is still hashed, it does not have
|
||||
* a 'delete' op, and it's referenced and already on
|
||||
* the LRU list.
|
||||
*
|
||||
* NOTE! Since we aren't locked, these values are
|
||||
* not "stable". However, it is sufficient that at
|
||||
* some point after we dropped the reference the
|
||||
* dentry was hashed and the flags had the proper
|
||||
* value. Other dentry users may have re-gotten
|
||||
* a reference to the dentry and change that, but
|
||||
* our work is done - we can leave the dentry
|
||||
* around with a zero refcount.
|
||||
*/
|
||||
smp_rmb();
|
||||
d_flags = ACCESS_ONCE(dentry->d_flags);
|
||||
d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST;
|
||||
|
||||
/* Nothing to do? Dropping the reference was all we needed? */
|
||||
if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Not the fast normal case? Get the lock. We've already decremented
|
||||
* the refcount, but we'll need to re-check the situation after
|
||||
* getting the lock.
|
||||
*/
|
||||
spin_lock(&dentry->d_lock);
|
||||
|
||||
/*
|
||||
* Did somebody else grab a reference to it in the meantime, and
|
||||
* we're no longer the last user after all? Alternatively, somebody
|
||||
* else could have killed it and marked it dead. Either way, we
|
||||
* don't need to do anything else.
|
||||
*/
|
||||
if (dentry->d_lockref.count) {
|
||||
spin_unlock(&dentry->d_lock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Re-get the reference we optimistically dropped. We hold the
|
||||
* lock, and we just tested that it was zero, so we can just
|
||||
* set it to 1.
|
||||
*/
|
||||
dentry->d_lockref.count = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* This is dput
|
||||
*
|
||||
@@ -625,8 +729,14 @@ void dput(struct dentry *dentry)
|
||||
return;
|
||||
|
||||
repeat:
|
||||
if (lockref_put_or_lock(&dentry->d_lockref))
|
||||
rcu_read_lock();
|
||||
if (likely(fast_dput(dentry))) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
/* Slow case: now with the dentry lock held */
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Unreachable? Get rid of it */
|
||||
if (unlikely(d_unhashed(dentry)))
|
||||
@@ -813,7 +923,7 @@ static void shrink_dentry_list(struct list_head *list)
|
||||
* We found an inuse dentry which was not removed from
|
||||
* the LRU because of laziness during lookup. Do not free it.
|
||||
*/
|
||||
if ((int)dentry->d_lockref.count > 0) {
|
||||
if (dentry->d_lockref.count > 0) {
|
||||
spin_unlock(&dentry->d_lock);
|
||||
if (parent)
|
||||
spin_unlock(&parent->d_lock);
|
||||
@@ -2191,37 +2301,6 @@ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
|
||||
}
|
||||
EXPORT_SYMBOL(d_hash_and_lookup);
|
||||
|
||||
/**
|
||||
* d_validate - verify dentry provided from insecure source (deprecated)
|
||||
* @dentry: The dentry alleged to be valid child of @dparent
|
||||
* @dparent: The parent dentry (known to be valid)
|
||||
*
|
||||
* An insecure source has sent us a dentry, here we verify it and dget() it.
|
||||
* This is used by ncpfs in its readdir implementation.
|
||||
* Zero is returned in the dentry is invalid.
|
||||
*
|
||||
* This function is slow for big directories, and deprecated, do not use it.
|
||||
*/
|
||||
int d_validate(struct dentry *dentry, struct dentry *dparent)
|
||||
{
|
||||
struct dentry *child;
|
||||
|
||||
spin_lock(&dparent->d_lock);
|
||||
list_for_each_entry(child, &dparent->d_subdirs, d_child) {
|
||||
if (dentry == child) {
|
||||
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
|
||||
__dget_dlock(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
spin_unlock(&dparent->d_lock);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
spin_unlock(&dparent->d_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(d_validate);
|
||||
|
||||
/*
|
||||
* When a file is deleted, we have two options:
|
||||
* - turn this dentry into a negative dentry
|
||||
|
Reference in New Issue
Block a user