|
@@ -33,20 +33,58 @@
|
|
|
#include <linux/bootmem.h>
|
|
|
#include <linux/fs_struct.h>
|
|
|
#include <linux/hardirq.h>
|
|
|
+#include <linux/bit_spinlock.h>
|
|
|
+#include <linux/rculist_bl.h>
|
|
|
#include "internal.h"
|
|
|
|
|
|
+/*
|
|
|
+ * Usage:
|
|
|
+ * dcache->d_inode->i_lock protects:
|
|
|
+ * - i_dentry, d_alias, d_inode of aliases
|
|
|
+ * dcache_hash_bucket lock protects:
|
|
|
+ * - the dcache hash table
|
|
|
+ * s_anon bl list spinlock protects:
|
|
|
+ * - the s_anon list (see __d_drop)
|
|
|
+ * dcache_lru_lock protects:
|
|
|
+ * - the dcache lru lists and counters
|
|
|
+ * d_lock protects:
|
|
|
+ * - d_flags
|
|
|
+ * - d_name
|
|
|
+ * - d_lru
|
|
|
+ * - d_count
|
|
|
+ * - d_unhashed()
|
|
|
+ * - d_parent and d_subdirs
|
|
|
+ * - childrens' d_child and d_parent
|
|
|
+ * - d_alias, d_inode
|
|
|
+ *
|
|
|
+ * Ordering:
|
|
|
+ * dentry->d_inode->i_lock
|
|
|
+ * dentry->d_lock
|
|
|
+ * dcache_lru_lock
|
|
|
+ * dcache_hash_bucket lock
|
|
|
+ * s_anon lock
|
|
|
+ *
|
|
|
+ * If there is an ancestor relationship:
|
|
|
+ * dentry->d_parent->...->d_parent->d_lock
|
|
|
+ * ...
|
|
|
+ * dentry->d_parent->d_lock
|
|
|
+ * dentry->d_lock
|
|
|
+ *
|
|
|
+ * If no ancestor relationship:
|
|
|
+ * if (dentry1 < dentry2)
|
|
|
+ * dentry1->d_lock
|
|
|
+ * dentry2->d_lock
|
|
|
+ */
|
|
|
int sysctl_vfs_cache_pressure __read_mostly = 100;
|
|
|
EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
|
|
|
|
|
|
- __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
|
|
|
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
|
|
|
__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
|
|
|
|
|
|
-EXPORT_SYMBOL(dcache_lock);
|
|
|
+EXPORT_SYMBOL(rename_lock);
|
|
|
|
|
|
static struct kmem_cache *dentry_cache __read_mostly;
|
|
|
|
|
|
-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
|
|
|
-
|
|
|
/*
|
|
|
* This is the single most critical data structure when it comes
|
|
|
* to the dcache: the hashtable for lookups. Somebody should try
|
|
@@ -60,22 +98,51 @@ static struct kmem_cache *dentry_cache __read_mostly;
|
|
|
|
|
|
static unsigned int d_hash_mask __read_mostly;
|
|
|
static unsigned int d_hash_shift __read_mostly;
|
|
|
-static struct hlist_head *dentry_hashtable __read_mostly;
|
|
|
+
|
|
|
+struct dcache_hash_bucket {
|
|
|
+ struct hlist_bl_head head;
|
|
|
+};
|
|
|
+static struct dcache_hash_bucket *dentry_hashtable __read_mostly;
|
|
|
+
|
|
|
+static inline struct dcache_hash_bucket *d_hash(struct dentry *parent,
|
|
|
+ unsigned long hash)
|
|
|
+{
|
|
|
+ hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
|
|
|
+ hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
|
|
|
+ return dentry_hashtable + (hash & D_HASHMASK);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void spin_lock_bucket(struct dcache_hash_bucket *b)
|
|
|
+{
|
|
|
+ bit_spin_lock(0, (unsigned long *)&b->head.first);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void spin_unlock_bucket(struct dcache_hash_bucket *b)
|
|
|
+{
|
|
|
+ __bit_spin_unlock(0, (unsigned long *)&b->head.first);
|
|
|
+}
|
|
|
|
|
|
/* Statistics gathering. */
|
|
|
struct dentry_stat_t dentry_stat = {
|
|
|
.age_limit = 45,
|
|
|
};
|
|
|
|
|
|
-static struct percpu_counter nr_dentry __cacheline_aligned_in_smp;
|
|
|
-static struct percpu_counter nr_dentry_unused __cacheline_aligned_in_smp;
|
|
|
+static DEFINE_PER_CPU(unsigned int, nr_dentry);
|
|
|
|
|
|
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
|
|
|
+static int get_nr_dentry(void)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ int sum = 0;
|
|
|
+ for_each_possible_cpu(i)
|
|
|
+ sum += per_cpu(nr_dentry, i);
|
|
|
+ return sum < 0 ? 0 : sum;
|
|
|
+}
|
|
|
+
|
|
|
int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
|
|
|
size_t *lenp, loff_t *ppos)
|
|
|
{
|
|
|
- dentry_stat.nr_dentry = percpu_counter_sum_positive(&nr_dentry);
|
|
|
- dentry_stat.nr_unused = percpu_counter_sum_positive(&nr_dentry_unused);
|
|
|
+ dentry_stat.nr_dentry = get_nr_dentry();
|
|
|
return proc_dointvec(table, write, buffer, lenp, ppos);
|
|
|
}
|
|
|
#endif
|
|
@@ -91,35 +158,50 @@ static void __d_free(struct rcu_head *head)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * no dcache_lock, please.
|
|
|
+ * no locks, please.
|
|
|
*/
|
|
|
static void d_free(struct dentry *dentry)
|
|
|
{
|
|
|
- percpu_counter_dec(&nr_dentry);
|
|
|
+ BUG_ON(dentry->d_count);
|
|
|
+ this_cpu_dec(nr_dentry);
|
|
|
if (dentry->d_op && dentry->d_op->d_release)
|
|
|
dentry->d_op->d_release(dentry);
|
|
|
|
|
|
/* if dentry was never inserted into hash, immediate free is OK */
|
|
|
- if (hlist_unhashed(&dentry->d_hash))
|
|
|
+ if (hlist_bl_unhashed(&dentry->d_hash))
|
|
|
__d_free(&dentry->d_u.d_rcu);
|
|
|
else
|
|
|
call_rcu(&dentry->d_u.d_rcu, __d_free);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
|
|
|
+ * After this call, in-progress rcu-walk path lookup will fail. This
|
|
|
+ * should be called after unhashing, and after changing d_inode (if
|
|
|
+ * the dentry has not already been unhashed).
|
|
|
+ */
|
|
|
+static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
|
|
|
+{
|
|
|
+ assert_spin_locked(&dentry->d_lock);
|
|
|
+ /* Go through a barrier */
|
|
|
+ write_seqcount_barrier(&dentry->d_seq);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Release the dentry's inode, using the filesystem
|
|
|
- * d_iput() operation if defined.
|
|
|
+ * d_iput() operation if defined. Dentry has no refcount
|
|
|
+ * and is unhashed.
|
|
|
*/
|
|
|
static void dentry_iput(struct dentry * dentry)
|
|
|
__releases(dentry->d_lock)
|
|
|
- __releases(dcache_lock)
|
|
|
+ __releases(dentry->d_inode->i_lock)
|
|
|
{
|
|
|
struct inode *inode = dentry->d_inode;
|
|
|
if (inode) {
|
|
|
dentry->d_inode = NULL;
|
|
|
list_del_init(&dentry->d_alias);
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
if (!inode->i_nlink)
|
|
|
fsnotify_inoderemove(inode);
|
|
|
if (dentry->d_op && dentry->d_op->d_iput)
|
|
@@ -128,40 +210,72 @@ static void dentry_iput(struct dentry * dentry)
|
|
|
iput(inode);
|
|
|
} else {
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * dentry_lru_(add|del|move_tail) must be called with dcache_lock held.
|
|
|
+ * Release the dentry's inode, using the filesystem
|
|
|
+ * d_iput() operation if defined. dentry remains in-use.
|
|
|
+ */
|
|
|
+static void dentry_unlink_inode(struct dentry * dentry)
|
|
|
+ __releases(dentry->d_lock)
|
|
|
+ __releases(dentry->d_inode->i_lock)
|
|
|
+{
|
|
|
+ struct inode *inode = dentry->d_inode;
|
|
|
+ dentry->d_inode = NULL;
|
|
|
+ list_del_init(&dentry->d_alias);
|
|
|
+ dentry_rcuwalk_barrier(dentry);
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
+ if (!inode->i_nlink)
|
|
|
+ fsnotify_inoderemove(inode);
|
|
|
+ if (dentry->d_op && dentry->d_op->d_iput)
|
|
|
+ dentry->d_op->d_iput(dentry, inode);
|
|
|
+ else
|
|
|
+ iput(inode);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * dentry_lru_(add|del|move_tail) must be called with d_lock held.
|
|
|
*/
|
|
|
static void dentry_lru_add(struct dentry *dentry)
|
|
|
{
|
|
|
if (list_empty(&dentry->d_lru)) {
|
|
|
+ spin_lock(&dcache_lru_lock);
|
|
|
list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
|
|
|
dentry->d_sb->s_nr_dentry_unused++;
|
|
|
- percpu_counter_inc(&nr_dentry_unused);
|
|
|
+ dentry_stat.nr_unused++;
|
|
|
+ spin_unlock(&dcache_lru_lock);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void __dentry_lru_del(struct dentry *dentry)
|
|
|
+{
|
|
|
+ list_del_init(&dentry->d_lru);
|
|
|
+ dentry->d_sb->s_nr_dentry_unused--;
|
|
|
+ dentry_stat.nr_unused--;
|
|
|
+}
|
|
|
+
|
|
|
static void dentry_lru_del(struct dentry *dentry)
|
|
|
{
|
|
|
if (!list_empty(&dentry->d_lru)) {
|
|
|
- list_del_init(&dentry->d_lru);
|
|
|
- dentry->d_sb->s_nr_dentry_unused--;
|
|
|
- percpu_counter_dec(&nr_dentry_unused);
|
|
|
+ spin_lock(&dcache_lru_lock);
|
|
|
+ __dentry_lru_del(dentry);
|
|
|
+ spin_unlock(&dcache_lru_lock);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static void dentry_lru_move_tail(struct dentry *dentry)
|
|
|
{
|
|
|
+ spin_lock(&dcache_lru_lock);
|
|
|
if (list_empty(&dentry->d_lru)) {
|
|
|
list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
|
|
|
dentry->d_sb->s_nr_dentry_unused++;
|
|
|
- percpu_counter_inc(&nr_dentry_unused);
|
|
|
+ dentry_stat.nr_unused++;
|
|
|
} else {
|
|
|
list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
|
|
|
}
|
|
|
+ spin_unlock(&dcache_lru_lock);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -171,22 +285,115 @@ static void dentry_lru_move_tail(struct dentry *dentry)
|
|
|
* The dentry must already be unhashed and removed from the LRU.
|
|
|
*
|
|
|
* If this is the root of the dentry tree, return NULL.
|
|
|
+ *
|
|
|
+ * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
|
|
|
+ * d_kill.
|
|
|
*/
|
|
|
-static struct dentry *d_kill(struct dentry *dentry)
|
|
|
+static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
|
|
|
__releases(dentry->d_lock)
|
|
|
- __releases(dcache_lock)
|
|
|
+ __releases(parent->d_lock)
|
|
|
+ __releases(dentry->d_inode->i_lock)
|
|
|
{
|
|
|
- struct dentry *parent;
|
|
|
-
|
|
|
+ dentry->d_parent = NULL;
|
|
|
list_del(&dentry->d_u.d_child);
|
|
|
- /*drops the locks, at that point nobody can reach this dentry */
|
|
|
+ if (parent)
|
|
|
+ spin_unlock(&parent->d_lock);
|
|
|
dentry_iput(dentry);
|
|
|
+ /*
|
|
|
+ * dentry_iput drops the locks, at which point nobody (except
|
|
|
+ * transient RCU lookups) can reach this dentry.
|
|
|
+ */
|
|
|
+ d_free(dentry);
|
|
|
+ return parent;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * d_drop - drop a dentry
|
|
|
+ * @dentry: dentry to drop
|
|
|
+ *
|
|
|
+ * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
|
|
|
+ * be found through a VFS lookup any more. Note that this is different from
|
|
|
+ * deleting the dentry - d_delete will try to mark the dentry negative if
|
|
|
+ * possible, giving a successful _negative_ lookup, while d_drop will
|
|
|
+ * just make the cache lookup fail.
|
|
|
+ *
|
|
|
+ * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
|
|
|
+ * reason (NFS timeouts or autofs deletes).
|
|
|
+ *
|
|
|
+ * __d_drop requires dentry->d_lock.
|
|
|
+ */
|
|
|
+void __d_drop(struct dentry *dentry)
|
|
|
+{
|
|
|
+ if (!(dentry->d_flags & DCACHE_UNHASHED)) {
|
|
|
+ if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) {
|
|
|
+ bit_spin_lock(0,
|
|
|
+ (unsigned long *)&dentry->d_sb->s_anon.first);
|
|
|
+ dentry->d_flags |= DCACHE_UNHASHED;
|
|
|
+ hlist_bl_del_init(&dentry->d_hash);
|
|
|
+ __bit_spin_unlock(0,
|
|
|
+ (unsigned long *)&dentry->d_sb->s_anon.first);
|
|
|
+ } else {
|
|
|
+ struct dcache_hash_bucket *b;
|
|
|
+ b = d_hash(dentry->d_parent, dentry->d_name.hash);
|
|
|
+ spin_lock_bucket(b);
|
|
|
+ /*
|
|
|
+ * We may not actually need to put DCACHE_UNHASHED
|
|
|
+ * manipulations under the hash lock, but follow
|
|
|
+ * the principle of least surprise.
|
|
|
+ */
|
|
|
+ dentry->d_flags |= DCACHE_UNHASHED;
|
|
|
+ hlist_bl_del_rcu(&dentry->d_hash);
|
|
|
+ spin_unlock_bucket(b);
|
|
|
+ dentry_rcuwalk_barrier(dentry);
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(__d_drop);
|
|
|
+
|
|
|
+void d_drop(struct dentry *dentry)
|
|
|
+{
|
|
|
+ spin_lock(&dentry->d_lock);
|
|
|
+ __d_drop(dentry);
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(d_drop);
|
|
|
+
|
|
|
+/*
|
|
|
+ * Finish off a dentry we've decided to kill.
|
|
|
+ * dentry->d_lock must be held, returns with it unlocked.
|
|
|
+ * If ref is non-zero, then decrement the refcount too.
|
|
|
+ * Returns dentry requiring refcount drop, or NULL if we're done.
|
|
|
+ */
|
|
|
+static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
|
|
|
+ __releases(dentry->d_lock)
|
|
|
+{
|
|
|
+ struct inode *inode;
|
|
|
+ struct dentry *parent;
|
|
|
+
|
|
|
+ inode = dentry->d_inode;
|
|
|
+ if (inode && !spin_trylock(&inode->i_lock)) {
|
|
|
+relock:
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
+ cpu_relax();
|
|
|
+ return dentry; /* try again with same dentry */
|
|
|
+ }
|
|
|
if (IS_ROOT(dentry))
|
|
|
parent = NULL;
|
|
|
else
|
|
|
parent = dentry->d_parent;
|
|
|
- d_free(dentry);
|
|
|
- return parent;
|
|
|
+ if (parent && !spin_trylock(&parent->d_lock)) {
|
|
|
+ if (inode)
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
+ goto relock;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ref)
|
|
|
+ dentry->d_count--;
|
|
|
+ /* if dentry was on the d_lru list delete it from there */
|
|
|
+ dentry_lru_del(dentry);
|
|
|
+ /* if it was on the hash then remove it */
|
|
|
+ __d_drop(dentry);
|
|
|
+ return d_kill(dentry, parent);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -214,34 +421,26 @@ static struct dentry *d_kill(struct dentry *dentry)
|
|
|
* call the dentry unlink method as well as removing it from the queues and
|
|
|
* releasing its resources. If the parent dentries were scheduled for release
|
|
|
* they too may now get deleted.
|
|
|
- *
|
|
|
- * no dcache lock, please.
|
|
|
*/
|
|
|
-
|
|
|
void dput(struct dentry *dentry)
|
|
|
{
|
|
|
if (!dentry)
|
|
|
return;
|
|
|
|
|
|
repeat:
|
|
|
- if (atomic_read(&dentry->d_count) == 1)
|
|
|
+ if (dentry->d_count == 1)
|
|
|
might_sleep();
|
|
|
- if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
|
|
|
- return;
|
|
|
-
|
|
|
spin_lock(&dentry->d_lock);
|
|
|
- if (atomic_read(&dentry->d_count)) {
|
|
|
+ BUG_ON(!dentry->d_count);
|
|
|
+ if (dentry->d_count > 1) {
|
|
|
+ dentry->d_count--;
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * AV: ->d_delete() is _NOT_ allowed to block now.
|
|
|
- */
|
|
|
- if (dentry->d_op && dentry->d_op->d_delete) {
|
|
|
+ if (dentry->d_flags & DCACHE_OP_DELETE) {
|
|
|
if (dentry->d_op->d_delete(dentry))
|
|
|
- goto unhash_it;
|
|
|
+ goto kill_it;
|
|
|
}
|
|
|
|
|
|
/* Unreachable? Get rid of it */
|
|
@@ -252,16 +451,12 @@ repeat:
|
|
|
dentry->d_flags |= DCACHE_REFERENCED;
|
|
|
dentry_lru_add(dentry);
|
|
|
|
|
|
- spin_unlock(&dentry->d_lock);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ dentry->d_count--;
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
return;
|
|
|
|
|
|
-unhash_it:
|
|
|
- __d_drop(dentry);
|
|
|
kill_it:
|
|
|
- /* if dentry was on the d_lru list delete it from there */
|
|
|
- dentry_lru_del(dentry);
|
|
|
- dentry = d_kill(dentry);
|
|
|
+ dentry = dentry_kill(dentry, 1);
|
|
|
if (dentry)
|
|
|
goto repeat;
|
|
|
}
|
|
@@ -284,9 +479,9 @@ int d_invalidate(struct dentry * dentry)
|
|
|
/*
|
|
|
* If it's already been dropped, return OK.
|
|
|
*/
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ spin_lock(&dentry->d_lock);
|
|
|
if (d_unhashed(dentry)) {
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
return 0;
|
|
|
}
|
|
|
/*
|
|
@@ -294,9 +489,9 @@ int d_invalidate(struct dentry * dentry)
|
|
|
* to get rid of unused child entries.
|
|
|
*/
|
|
|
if (!list_empty(&dentry->d_subdirs)) {
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
shrink_dcache_parent(dentry);
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ spin_lock(&dentry->d_lock);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -309,35 +504,61 @@ int d_invalidate(struct dentry * dentry)
|
|
|
* we might still populate it if it was a
|
|
|
* working directory or similar).
|
|
|
*/
|
|
|
- spin_lock(&dentry->d_lock);
|
|
|
- if (atomic_read(&dentry->d_count) > 1) {
|
|
|
+ if (dentry->d_count > 1) {
|
|
|
if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
return -EBUSY;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
__d_drop(dentry);
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL(d_invalidate);
|
|
|
|
|
|
-/* This should be called _only_ with dcache_lock held */
|
|
|
-static inline struct dentry * __dget_locked(struct dentry *dentry)
|
|
|
+/* This must be called with d_lock held */
|
|
|
+static inline void __dget_dlock(struct dentry *dentry)
|
|
|
{
|
|
|
- atomic_inc(&dentry->d_count);
|
|
|
- dentry_lru_del(dentry);
|
|
|
- return dentry;
|
|
|
+ dentry->d_count++;
|
|
|
}
|
|
|
|
|
|
-struct dentry * dget_locked(struct dentry *dentry)
|
|
|
+static inline void __dget(struct dentry *dentry)
|
|
|
{
|
|
|
- return __dget_locked(dentry);
|
|
|
+ spin_lock(&dentry->d_lock);
|
|
|
+ __dget_dlock(dentry);
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
+}
|
|
|
+
|
|
|
+struct dentry *dget_parent(struct dentry *dentry)
|
|
|
+{
|
|
|
+ struct dentry *ret;
|
|
|
+
|
|
|
+repeat:
|
|
|
+ /*
|
|
|
+ * Don't need rcu_dereference because we re-check it was correct under
|
|
|
+ * the lock.
|
|
|
+ */
|
|
|
+ rcu_read_lock();
|
|
|
+ ret = dentry->d_parent;
|
|
|
+ if (!ret) {
|
|
|
+ rcu_read_unlock();
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ spin_lock(&ret->d_lock);
|
|
|
+ if (unlikely(ret != dentry->d_parent)) {
|
|
|
+ spin_unlock(&ret->d_lock);
|
|
|
+ rcu_read_unlock();
|
|
|
+ goto repeat;
|
|
|
+ }
|
|
|
+ rcu_read_unlock();
|
|
|
+ BUG_ON(!ret->d_count);
|
|
|
+ ret->d_count++;
|
|
|
+ spin_unlock(&ret->d_lock);
|
|
|
+out:
|
|
|
+ return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(dget_locked);
|
|
|
+EXPORT_SYMBOL(dget_parent);
|
|
|
|
|
|
/**
|
|
|
* d_find_alias - grab a hashed alias of inode
|
|
@@ -355,42 +576,51 @@ EXPORT_SYMBOL(dget_locked);
|
|
|
* any other hashed alias over that one unless @want_discon is set,
|
|
|
* in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
|
|
|
*/
|
|
|
-
|
|
|
-static struct dentry * __d_find_alias(struct inode *inode, int want_discon)
|
|
|
+static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
|
|
|
{
|
|
|
- struct list_head *head, *next, *tmp;
|
|
|
- struct dentry *alias, *discon_alias=NULL;
|
|
|
+ struct dentry *alias, *discon_alias;
|
|
|
|
|
|
- head = &inode->i_dentry;
|
|
|
- next = inode->i_dentry.next;
|
|
|
- while (next != head) {
|
|
|
- tmp = next;
|
|
|
- next = tmp->next;
|
|
|
- prefetch(next);
|
|
|
- alias = list_entry(tmp, struct dentry, d_alias);
|
|
|
+again:
|
|
|
+ discon_alias = NULL;
|
|
|
+ list_for_each_entry(alias, &inode->i_dentry, d_alias) {
|
|
|
+ spin_lock(&alias->d_lock);
|
|
|
if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
|
|
|
if (IS_ROOT(alias) &&
|
|
|
- (alias->d_flags & DCACHE_DISCONNECTED))
|
|
|
+ (alias->d_flags & DCACHE_DISCONNECTED)) {
|
|
|
discon_alias = alias;
|
|
|
- else if (!want_discon) {
|
|
|
- __dget_locked(alias);
|
|
|
+ } else if (!want_discon) {
|
|
|
+ __dget_dlock(alias);
|
|
|
+ spin_unlock(&alias->d_lock);
|
|
|
+ return alias;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ spin_unlock(&alias->d_lock);
|
|
|
+ }
|
|
|
+ if (discon_alias) {
|
|
|
+ alias = discon_alias;
|
|
|
+ spin_lock(&alias->d_lock);
|
|
|
+ if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
|
|
|
+ if (IS_ROOT(alias) &&
|
|
|
+ (alias->d_flags & DCACHE_DISCONNECTED)) {
|
|
|
+ __dget_dlock(alias);
|
|
|
+ spin_unlock(&alias->d_lock);
|
|
|
return alias;
|
|
|
}
|
|
|
}
|
|
|
+ spin_unlock(&alias->d_lock);
|
|
|
+ goto again;
|
|
|
}
|
|
|
- if (discon_alias)
|
|
|
- __dget_locked(discon_alias);
|
|
|
- return discon_alias;
|
|
|
+ return NULL;
|
|
|
}
|
|
|
|
|
|
-struct dentry * d_find_alias(struct inode *inode)
|
|
|
+struct dentry *d_find_alias(struct inode *inode)
|
|
|
{
|
|
|
struct dentry *de = NULL;
|
|
|
|
|
|
if (!list_empty(&inode->i_dentry)) {
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ spin_lock(&inode->i_lock);
|
|
|
de = __d_find_alias(inode, 0);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
}
|
|
|
return de;
|
|
|
}
|
|
@@ -404,54 +634,61 @@ void d_prune_aliases(struct inode *inode)
|
|
|
{
|
|
|
struct dentry *dentry;
|
|
|
restart:
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ spin_lock(&inode->i_lock);
|
|
|
list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
|
|
|
spin_lock(&dentry->d_lock);
|
|
|
- if (!atomic_read(&dentry->d_count)) {
|
|
|
- __dget_locked(dentry);
|
|
|
+ if (!dentry->d_count) {
|
|
|
+ __dget_dlock(dentry);
|
|
|
__d_drop(dentry);
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
dput(dentry);
|
|
|
goto restart;
|
|
|
}
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
}
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
}
|
|
|
EXPORT_SYMBOL(d_prune_aliases);
|
|
|
|
|
|
/*
|
|
|
- * Throw away a dentry - free the inode, dput the parent. This requires that
|
|
|
- * the LRU list has already been removed.
|
|
|
+ * Try to throw away a dentry - free the inode, dput the parent.
|
|
|
+ * Requires dentry->d_lock is held, and dentry->d_count == 0.
|
|
|
+ * Releases dentry->d_lock.
|
|
|
*
|
|
|
- * Try to prune ancestors as well. This is necessary to prevent
|
|
|
- * quadratic behavior of shrink_dcache_parent(), but is also expected
|
|
|
- * to be beneficial in reducing dentry cache fragmentation.
|
|
|
+ * This may fail if locks cannot be acquired no problem, just try again.
|
|
|
*/
|
|
|
-static void prune_one_dentry(struct dentry * dentry)
|
|
|
+static void try_prune_one_dentry(struct dentry *dentry)
|
|
|
__releases(dentry->d_lock)
|
|
|
- __releases(dcache_lock)
|
|
|
- __acquires(dcache_lock)
|
|
|
{
|
|
|
- __d_drop(dentry);
|
|
|
- dentry = d_kill(dentry);
|
|
|
+ struct dentry *parent;
|
|
|
|
|
|
+ parent = dentry_kill(dentry, 0);
|
|
|
/*
|
|
|
- * Prune ancestors. Locking is simpler than in dput(),
|
|
|
- * because dcache_lock needs to be taken anyway.
|
|
|
+ * If dentry_kill returns NULL, we have nothing more to do.
|
|
|
+ * if it returns the same dentry, trylocks failed. In either
|
|
|
+ * case, just loop again.
|
|
|
+ *
|
|
|
+ * Otherwise, we need to prune ancestors too. This is necessary
|
|
|
+ * to prevent quadratic behavior of shrink_dcache_parent(), but
|
|
|
+ * is also expected to be beneficial in reducing dentry cache
|
|
|
+ * fragmentation.
|
|
|
*/
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ if (!parent)
|
|
|
+ return;
|
|
|
+ if (parent == dentry)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* Prune ancestors. */
|
|
|
+ dentry = parent;
|
|
|
while (dentry) {
|
|
|
- if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock))
|
|
|
+ spin_lock(&dentry->d_lock);
|
|
|
+ if (dentry->d_count > 1) {
|
|
|
+ dentry->d_count--;
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
return;
|
|
|
-
|
|
|
- if (dentry->d_op && dentry->d_op->d_delete)
|
|
|
- dentry->d_op->d_delete(dentry);
|
|
|
- dentry_lru_del(dentry);
|
|
|
- __d_drop(dentry);
|
|
|
- dentry = d_kill(dentry);
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ }
|
|
|
+ dentry = dentry_kill(dentry, 1);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -459,24 +696,35 @@ static void shrink_dentry_list(struct list_head *list)
|
|
|
{
|
|
|
struct dentry *dentry;
|
|
|
|
|
|
- while (!list_empty(list)) {
|
|
|
- dentry = list_entry(list->prev, struct dentry, d_lru);
|
|
|
- dentry_lru_del(dentry);
|
|
|
+ rcu_read_lock();
|
|
|
+ for (;;) {
|
|
|
+ dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
|
|
|
+ if (&dentry->d_lru == list)
|
|
|
+ break; /* empty */
|
|
|
+ spin_lock(&dentry->d_lock);
|
|
|
+ if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* We found an inuse dentry which was not removed from
|
|
|
* the LRU because of laziness during lookup. Do not free
|
|
|
* it - just keep it off the LRU list.
|
|
|
*/
|
|
|
- spin_lock(&dentry->d_lock);
|
|
|
- if (atomic_read(&dentry->d_count)) {
|
|
|
+ if (dentry->d_count) {
|
|
|
+ dentry_lru_del(dentry);
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
continue;
|
|
|
}
|
|
|
- prune_one_dentry(dentry);
|
|
|
- /* dentry->d_lock was dropped in prune_one_dentry() */
|
|
|
- cond_resched_lock(&dcache_lock);
|
|
|
+
|
|
|
+ rcu_read_unlock();
|
|
|
+
|
|
|
+ try_prune_one_dentry(dentry);
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
}
|
|
|
+ rcu_read_unlock();
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -495,42 +743,44 @@ static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
|
|
|
LIST_HEAD(tmp);
|
|
|
int cnt = *count;
|
|
|
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+relock:
|
|
|
+ spin_lock(&dcache_lru_lock);
|
|
|
while (!list_empty(&sb->s_dentry_lru)) {
|
|
|
dentry = list_entry(sb->s_dentry_lru.prev,
|
|
|
struct dentry, d_lru);
|
|
|
BUG_ON(dentry->d_sb != sb);
|
|
|
|
|
|
+ if (!spin_trylock(&dentry->d_lock)) {
|
|
|
+ spin_unlock(&dcache_lru_lock);
|
|
|
+ cpu_relax();
|
|
|
+ goto relock;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* If we are honouring the DCACHE_REFERENCED flag and the
|
|
|
* dentry has this flag set, don't free it. Clear the flag
|
|
|
* and put it back on the LRU.
|
|
|
*/
|
|
|
- if (flags & DCACHE_REFERENCED) {
|
|
|
- spin_lock(&dentry->d_lock);
|
|
|
- if (dentry->d_flags & DCACHE_REFERENCED) {
|
|
|
- dentry->d_flags &= ~DCACHE_REFERENCED;
|
|
|
- list_move(&dentry->d_lru, &referenced);
|
|
|
- spin_unlock(&dentry->d_lock);
|
|
|
- cond_resched_lock(&dcache_lock);
|
|
|
- continue;
|
|
|
- }
|
|
|
+ if (flags & DCACHE_REFERENCED &&
|
|
|
+ dentry->d_flags & DCACHE_REFERENCED) {
|
|
|
+ dentry->d_flags &= ~DCACHE_REFERENCED;
|
|
|
+ list_move(&dentry->d_lru, &referenced);
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
+ } else {
|
|
|
+ list_move_tail(&dentry->d_lru, &tmp);
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
+ if (!--cnt)
|
|
|
+ break;
|
|
|
}
|
|
|
-
|
|
|
- list_move_tail(&dentry->d_lru, &tmp);
|
|
|
- if (!--cnt)
|
|
|
- break;
|
|
|
- cond_resched_lock(&dcache_lock);
|
|
|
+ cond_resched_lock(&dcache_lru_lock);
|
|
|
}
|
|
|
-
|
|
|
- *count = cnt;
|
|
|
- shrink_dentry_list(&tmp);
|
|
|
-
|
|
|
if (!list_empty(&referenced))
|
|
|
list_splice(&referenced, &sb->s_dentry_lru);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&dcache_lru_lock);
|
|
|
|
|
|
+ shrink_dentry_list(&tmp);
|
|
|
+
|
|
|
+ *count = cnt;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -546,13 +796,12 @@ static void prune_dcache(int count)
|
|
|
{
|
|
|
struct super_block *sb, *p = NULL;
|
|
|
int w_count;
|
|
|
- int unused = percpu_counter_sum_positive(&nr_dentry_unused);
|
|
|
+ int unused = dentry_stat.nr_unused;
|
|
|
int prune_ratio;
|
|
|
int pruned;
|
|
|
|
|
|
if (unused == 0 || count == 0)
|
|
|
return;
|
|
|
- spin_lock(&dcache_lock);
|
|
|
if (count >= unused)
|
|
|
prune_ratio = 1;
|
|
|
else
|
|
@@ -589,11 +838,9 @@ static void prune_dcache(int count)
|
|
|
if (down_read_trylock(&sb->s_umount)) {
|
|
|
if ((sb->s_root != NULL) &&
|
|
|
(!list_empty(&sb->s_dentry_lru))) {
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
__shrink_dcache_sb(sb, &w_count,
|
|
|
DCACHE_REFERENCED);
|
|
|
pruned -= w_count;
|
|
|
- spin_lock(&dcache_lock);
|
|
|
}
|
|
|
up_read(&sb->s_umount);
|
|
|
}
|
|
@@ -609,7 +856,6 @@ static void prune_dcache(int count)
|
|
|
if (p)
|
|
|
__put_super(p);
|
|
|
spin_unlock(&sb_lock);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -623,12 +869,14 @@ void shrink_dcache_sb(struct super_block *sb)
|
|
|
{
|
|
|
LIST_HEAD(tmp);
|
|
|
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ spin_lock(&dcache_lru_lock);
|
|
|
while (!list_empty(&sb->s_dentry_lru)) {
|
|
|
list_splice_init(&sb->s_dentry_lru, &tmp);
|
|
|
+ spin_unlock(&dcache_lru_lock);
|
|
|
shrink_dentry_list(&tmp);
|
|
|
+ spin_lock(&dcache_lru_lock);
|
|
|
}
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&dcache_lru_lock);
|
|
|
}
|
|
|
EXPORT_SYMBOL(shrink_dcache_sb);
|
|
|
|
|
@@ -645,10 +893,10 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
|
|
|
BUG_ON(!IS_ROOT(dentry));
|
|
|
|
|
|
/* detach this root from the system */
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ spin_lock(&dentry->d_lock);
|
|
|
dentry_lru_del(dentry);
|
|
|
__d_drop(dentry);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
|
|
|
for (;;) {
|
|
|
/* descend to the first leaf in the current subtree */
|
|
@@ -657,14 +905,16 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
|
|
|
|
|
|
/* this is a branch with children - detach all of them
|
|
|
* from the system in one go */
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ spin_lock(&dentry->d_lock);
|
|
|
list_for_each_entry(loop, &dentry->d_subdirs,
|
|
|
d_u.d_child) {
|
|
|
+ spin_lock_nested(&loop->d_lock,
|
|
|
+ DENTRY_D_LOCK_NESTED);
|
|
|
dentry_lru_del(loop);
|
|
|
__d_drop(loop);
|
|
|
- cond_resched_lock(&dcache_lock);
|
|
|
+ spin_unlock(&loop->d_lock);
|
|
|
}
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
|
|
|
/* move to the first child */
|
|
|
dentry = list_entry(dentry->d_subdirs.next,
|
|
@@ -676,7 +926,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
|
|
|
do {
|
|
|
struct inode *inode;
|
|
|
|
|
|
- if (atomic_read(&dentry->d_count) != 0) {
|
|
|
+ if (dentry->d_count != 0) {
|
|
|
printk(KERN_ERR
|
|
|
"BUG: Dentry %p{i=%lx,n=%s}"
|
|
|
" still in use (%d)"
|
|
@@ -685,20 +935,23 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
|
|
|
dentry->d_inode ?
|
|
|
dentry->d_inode->i_ino : 0UL,
|
|
|
dentry->d_name.name,
|
|
|
- atomic_read(&dentry->d_count),
|
|
|
+ dentry->d_count,
|
|
|
dentry->d_sb->s_type->name,
|
|
|
dentry->d_sb->s_id);
|
|
|
BUG();
|
|
|
}
|
|
|
|
|
|
- if (IS_ROOT(dentry))
|
|
|
+ if (IS_ROOT(dentry)) {
|
|
|
parent = NULL;
|
|
|
- else {
|
|
|
+ list_del(&dentry->d_u.d_child);
|
|
|
+ } else {
|
|
|
parent = dentry->d_parent;
|
|
|
- atomic_dec(&parent->d_count);
|
|
|
+ spin_lock(&parent->d_lock);
|
|
|
+ parent->d_count--;
|
|
|
+ list_del(&dentry->d_u.d_child);
|
|
|
+ spin_unlock(&parent->d_lock);
|
|
|
}
|
|
|
|
|
|
- list_del(&dentry->d_u.d_child);
|
|
|
detached++;
|
|
|
|
|
|
inode = dentry->d_inode;
|
|
@@ -728,8 +981,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
|
|
|
|
|
|
/*
|
|
|
* destroy the dentries attached to a superblock on unmounting
|
|
|
- * - we don't need to use dentry->d_lock, and only need dcache_lock when
|
|
|
- * removing the dentry from the system lists and hashes because:
|
|
|
+ * - we don't need to use dentry->d_lock because:
|
|
|
* - the superblock is detached from all mountings and open files, so the
|
|
|
* dentry trees will not be rearranged by the VFS
|
|
|
* - s_umount is write-locked, so the memory pressure shrinker will ignore
|
|
@@ -746,11 +998,13 @@ void shrink_dcache_for_umount(struct super_block *sb)
|
|
|
|
|
|
dentry = sb->s_root;
|
|
|
sb->s_root = NULL;
|
|
|
- atomic_dec(&dentry->d_count);
|
|
|
+ spin_lock(&dentry->d_lock);
|
|
|
+ dentry->d_count--;
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
shrink_dcache_for_umount_subtree(dentry);
|
|
|
|
|
|
- while (!hlist_empty(&sb->s_anon)) {
|
|
|
- dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash);
|
|
|
+ while (!hlist_bl_empty(&sb->s_anon)) {
|
|
|
+ dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
|
|
|
shrink_dcache_for_umount_subtree(dentry);
|
|
|
}
|
|
|
}
|
|
@@ -768,15 +1022,20 @@ void shrink_dcache_for_umount(struct super_block *sb)
|
|
|
* Return true if the parent or its subdirectories contain
|
|
|
* a mount point
|
|
|
*/
|
|
|
-
|
|
|
int have_submounts(struct dentry *parent)
|
|
|
{
|
|
|
- struct dentry *this_parent = parent;
|
|
|
+ struct dentry *this_parent;
|
|
|
struct list_head *next;
|
|
|
+ unsigned seq;
|
|
|
+ int locked = 0;
|
|
|
+
|
|
|
+ seq = read_seqbegin(&rename_lock);
|
|
|
+again:
|
|
|
+ this_parent = parent;
|
|
|
|
|
|
- spin_lock(&dcache_lock);
|
|
|
if (d_mountpoint(parent))
|
|
|
goto positive;
|
|
|
+ spin_lock(&this_parent->d_lock);
|
|
|
repeat:
|
|
|
next = this_parent->d_subdirs.next;
|
|
|
resume:
|
|
@@ -784,27 +1043,65 @@ resume:
|
|
|
struct list_head *tmp = next;
|
|
|
struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
|
|
|
next = tmp->next;
|
|
|
+
|
|
|
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
|
|
|
/* Have we found a mount point ? */
|
|
|
- if (d_mountpoint(dentry))
|
|
|
+ if (d_mountpoint(dentry)) {
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
+ spin_unlock(&this_parent->d_lock);
|
|
|
goto positive;
|
|
|
+ }
|
|
|
if (!list_empty(&dentry->d_subdirs)) {
|
|
|
+ spin_unlock(&this_parent->d_lock);
|
|
|
+ spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
|
|
|
this_parent = dentry;
|
|
|
+ spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
|
|
|
goto repeat;
|
|
|
}
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
}
|
|
|
/*
|
|
|
* All done at this level ... ascend and resume the search.
|
|
|
*/
|
|
|
if (this_parent != parent) {
|
|
|
- next = this_parent->d_u.d_child.next;
|
|
|
- this_parent = this_parent->d_parent;
|
|
|
+ struct dentry *tmp;
|
|
|
+ struct dentry *child;
|
|
|
+
|
|
|
+ tmp = this_parent->d_parent;
|
|
|
+ rcu_read_lock();
|
|
|
+ spin_unlock(&this_parent->d_lock);
|
|
|
+ child = this_parent;
|
|
|
+ this_parent = tmp;
|
|
|
+ spin_lock(&this_parent->d_lock);
|
|
|
+ /* might go back up the wrong parent if we have had a rename
|
|
|
+ * or deletion */
|
|
|
+ if (this_parent != child->d_parent ||
|
|
|
+ (!locked && read_seqretry(&rename_lock, seq))) {
|
|
|
+ spin_unlock(&this_parent->d_lock);
|
|
|
+ rcu_read_unlock();
|
|
|
+ goto rename_retry;
|
|
|
+ }
|
|
|
+ rcu_read_unlock();
|
|
|
+ next = child->d_u.d_child.next;
|
|
|
goto resume;
|
|
|
}
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&this_parent->d_lock);
|
|
|
+ if (!locked && read_seqretry(&rename_lock, seq))
|
|
|
+ goto rename_retry;
|
|
|
+ if (locked)
|
|
|
+ write_sequnlock(&rename_lock);
|
|
|
return 0; /* No mount points found in tree */
|
|
|
positive:
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ if (!locked && read_seqretry(&rename_lock, seq))
|
|
|
+ goto rename_retry;
|
|
|
+ if (locked)
|
|
|
+ write_sequnlock(&rename_lock);
|
|
|
return 1;
|
|
|
+
|
|
|
+rename_retry:
|
|
|
+ locked = 1;
|
|
|
+ write_seqlock(&rename_lock);
|
|
|
+ goto again;
|
|
|
}
|
|
|
EXPORT_SYMBOL(have_submounts);
|
|
|
|
|
@@ -824,11 +1121,16 @@ EXPORT_SYMBOL(have_submounts);
|
|
|
*/
|
|
|
static int select_parent(struct dentry * parent)
|
|
|
{
|
|
|
- struct dentry *this_parent = parent;
|
|
|
+ struct dentry *this_parent;
|
|
|
struct list_head *next;
|
|
|
+ unsigned seq;
|
|
|
int found = 0;
|
|
|
+ int locked = 0;
|
|
|
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ seq = read_seqbegin(&rename_lock);
|
|
|
+again:
|
|
|
+ this_parent = parent;
|
|
|
+ spin_lock(&this_parent->d_lock);
|
|
|
repeat:
|
|
|
next = this_parent->d_subdirs.next;
|
|
|
resume:
|
|
@@ -837,11 +1139,13 @@ resume:
|
|
|
struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
|
|
|
next = tmp->next;
|
|
|
|
|
|
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
|
|
|
+
|
|
|
/*
|
|
|
* move only zero ref count dentries to the end
|
|
|
* of the unused list for prune_dcache
|
|
|
*/
|
|
|
- if (!atomic_read(&dentry->d_count)) {
|
|
|
+ if (!dentry->d_count) {
|
|
|
dentry_lru_move_tail(dentry);
|
|
|
found++;
|
|
|
} else {
|
|
@@ -853,28 +1157,63 @@ resume:
|
|
|
* ensures forward progress). We'll be coming back to find
|
|
|
* the rest.
|
|
|
*/
|
|
|
- if (found && need_resched())
|
|
|
+ if (found && need_resched()) {
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
goto out;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Descend a level if the d_subdirs list is non-empty.
|
|
|
*/
|
|
|
if (!list_empty(&dentry->d_subdirs)) {
|
|
|
+ spin_unlock(&this_parent->d_lock);
|
|
|
+ spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
|
|
|
this_parent = dentry;
|
|
|
+ spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
|
|
|
goto repeat;
|
|
|
}
|
|
|
+
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
}
|
|
|
/*
|
|
|
* All done at this level ... ascend and resume the search.
|
|
|
*/
|
|
|
if (this_parent != parent) {
|
|
|
- next = this_parent->d_u.d_child.next;
|
|
|
- this_parent = this_parent->d_parent;
|
|
|
+ struct dentry *tmp;
|
|
|
+ struct dentry *child;
|
|
|
+
|
|
|
+ tmp = this_parent->d_parent;
|
|
|
+ rcu_read_lock();
|
|
|
+ spin_unlock(&this_parent->d_lock);
|
|
|
+ child = this_parent;
|
|
|
+ this_parent = tmp;
|
|
|
+ spin_lock(&this_parent->d_lock);
|
|
|
+ /* might go back up the wrong parent if we have had a rename
|
|
|
+ * or deletion */
|
|
|
+ if (this_parent != child->d_parent ||
|
|
|
+ (!locked && read_seqretry(&rename_lock, seq))) {
|
|
|
+ spin_unlock(&this_parent->d_lock);
|
|
|
+ rcu_read_unlock();
|
|
|
+ goto rename_retry;
|
|
|
+ }
|
|
|
+ rcu_read_unlock();
|
|
|
+ next = child->d_u.d_child.next;
|
|
|
goto resume;
|
|
|
}
|
|
|
out:
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&this_parent->d_lock);
|
|
|
+ if (!locked && read_seqretry(&rename_lock, seq))
|
|
|
+ goto rename_retry;
|
|
|
+ if (locked)
|
|
|
+ write_sequnlock(&rename_lock);
|
|
|
return found;
|
|
|
+
|
|
|
+rename_retry:
|
|
|
+ if (found)
|
|
|
+ return found;
|
|
|
+ locked = 1;
|
|
|
+ write_seqlock(&rename_lock);
|
|
|
+ goto again;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -908,16 +1247,13 @@ EXPORT_SYMBOL(shrink_dcache_parent);
|
|
|
*/
|
|
|
static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
|
|
|
{
|
|
|
- int nr_unused;
|
|
|
-
|
|
|
if (nr) {
|
|
|
if (!(gfp_mask & __GFP_FS))
|
|
|
return -1;
|
|
|
prune_dcache(nr);
|
|
|
}
|
|
|
|
|
|
- nr_unused = percpu_counter_sum_positive(&nr_dentry_unused);
|
|
|
- return (nr_unused / 100) * sysctl_vfs_cache_pressure;
|
|
|
+ return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
|
|
|
}
|
|
|
|
|
|
static struct shrinker dcache_shrinker = {
|
|
@@ -960,38 +1296,52 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
|
|
|
memcpy(dname, name->name, name->len);
|
|
|
dname[name->len] = 0;
|
|
|
|
|
|
- atomic_set(&dentry->d_count, 1);
|
|
|
+ dentry->d_count = 1;
|
|
|
dentry->d_flags = DCACHE_UNHASHED;
|
|
|
spin_lock_init(&dentry->d_lock);
|
|
|
+ seqcount_init(&dentry->d_seq);
|
|
|
dentry->d_inode = NULL;
|
|
|
dentry->d_parent = NULL;
|
|
|
dentry->d_sb = NULL;
|
|
|
dentry->d_op = NULL;
|
|
|
dentry->d_fsdata = NULL;
|
|
|
- dentry->d_mounted = 0;
|
|
|
- INIT_HLIST_NODE(&dentry->d_hash);
|
|
|
+ INIT_HLIST_BL_NODE(&dentry->d_hash);
|
|
|
INIT_LIST_HEAD(&dentry->d_lru);
|
|
|
INIT_LIST_HEAD(&dentry->d_subdirs);
|
|
|
INIT_LIST_HEAD(&dentry->d_alias);
|
|
|
+ INIT_LIST_HEAD(&dentry->d_u.d_child);
|
|
|
|
|
|
if (parent) {
|
|
|
- dentry->d_parent = dget(parent);
|
|
|
+ spin_lock(&parent->d_lock);
|
|
|
+ /*
|
|
|
+ * don't need child lock because it is not subject
|
|
|
+ * to concurrency here
|
|
|
+ */
|
|
|
+ __dget_dlock(parent);
|
|
|
+ dentry->d_parent = parent;
|
|
|
dentry->d_sb = parent->d_sb;
|
|
|
- } else {
|
|
|
- INIT_LIST_HEAD(&dentry->d_u.d_child);
|
|
|
- }
|
|
|
-
|
|
|
- spin_lock(&dcache_lock);
|
|
|
- if (parent)
|
|
|
list_add(&dentry->d_u.d_child, &parent->d_subdirs);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&parent->d_lock);
|
|
|
+ }
|
|
|
|
|
|
- percpu_counter_inc(&nr_dentry);
|
|
|
+ this_cpu_inc(nr_dentry);
|
|
|
|
|
|
return dentry;
|
|
|
}
|
|
|
EXPORT_SYMBOL(d_alloc);
|
|
|
|
|
|
+struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
|
|
|
+{
|
|
|
+ struct dentry *dentry = d_alloc(NULL, name);
|
|
|
+ if (dentry) {
|
|
|
+ dentry->d_sb = sb;
|
|
|
+ dentry->d_parent = dentry;
|
|
|
+ dentry->d_flags |= DCACHE_DISCONNECTED;
|
|
|
+ }
|
|
|
+ return dentry;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(d_alloc_pseudo);
|
|
|
+
|
|
|
struct dentry *d_alloc_name(struct dentry *parent, const char *name)
|
|
|
{
|
|
|
struct qstr q;
|
|
@@ -1003,12 +1353,36 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name)
|
|
|
}
|
|
|
EXPORT_SYMBOL(d_alloc_name);
|
|
|
|
|
|
-/* the caller must hold dcache_lock */
|
|
|
+void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
|
|
|
+{
|
|
|
+ BUG_ON(dentry->d_op);
|
|
|
+ BUG_ON(dentry->d_flags & (DCACHE_OP_HASH |
|
|
|
+ DCACHE_OP_COMPARE |
|
|
|
+ DCACHE_OP_REVALIDATE |
|
|
|
+ DCACHE_OP_DELETE ));
|
|
|
+ dentry->d_op = op;
|
|
|
+ if (!op)
|
|
|
+ return;
|
|
|
+ if (op->d_hash)
|
|
|
+ dentry->d_flags |= DCACHE_OP_HASH;
|
|
|
+ if (op->d_compare)
|
|
|
+ dentry->d_flags |= DCACHE_OP_COMPARE;
|
|
|
+ if (op->d_revalidate)
|
|
|
+ dentry->d_flags |= DCACHE_OP_REVALIDATE;
|
|
|
+ if (op->d_delete)
|
|
|
+ dentry->d_flags |= DCACHE_OP_DELETE;
|
|
|
+
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(d_set_d_op);
|
|
|
+
|
|
|
static void __d_instantiate(struct dentry *dentry, struct inode *inode)
|
|
|
{
|
|
|
+ spin_lock(&dentry->d_lock);
|
|
|
if (inode)
|
|
|
list_add(&dentry->d_alias, &inode->i_dentry);
|
|
|
dentry->d_inode = inode;
|
|
|
+ dentry_rcuwalk_barrier(dentry);
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
fsnotify_d_instantiate(dentry, inode);
|
|
|
}
|
|
|
|
|
@@ -1030,9 +1404,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
|
|
|
void d_instantiate(struct dentry *entry, struct inode * inode)
|
|
|
{
|
|
|
BUG_ON(!list_empty(&entry->d_alias));
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ if (inode)
|
|
|
+ spin_lock(&inode->i_lock);
|
|
|
__d_instantiate(entry, inode);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ if (inode)
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
security_d_instantiate(entry, inode);
|
|
|
}
|
|
|
EXPORT_SYMBOL(d_instantiate);
|
|
@@ -1069,15 +1445,18 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
|
|
|
list_for_each_entry(alias, &inode->i_dentry, d_alias) {
|
|
|
struct qstr *qstr = &alias->d_name;
|
|
|
|
|
|
+ /*
|
|
|
+ * Don't need alias->d_lock here, because aliases with
|
|
|
+ * d_parent == entry->d_parent are not subject to name or
|
|
|
+ * parent changes, because the parent inode i_mutex is held.
|
|
|
+ */
|
|
|
if (qstr->hash != hash)
|
|
|
continue;
|
|
|
if (alias->d_parent != entry->d_parent)
|
|
|
continue;
|
|
|
- if (qstr->len != len)
|
|
|
+ if (dentry_cmp(qstr->name, qstr->len, name, len))
|
|
|
continue;
|
|
|
- if (memcmp(qstr->name, name, len))
|
|
|
- continue;
|
|
|
- dget_locked(alias);
|
|
|
+ __dget(alias);
|
|
|
return alias;
|
|
|
}
|
|
|
|
|
@@ -1091,9 +1470,11 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
|
|
|
|
|
|
BUG_ON(!list_empty(&entry->d_alias));
|
|
|
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ if (inode)
|
|
|
+ spin_lock(&inode->i_lock);
|
|
|
result = __d_instantiate_unique(entry, inode);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ if (inode)
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
|
|
|
if (!result) {
|
|
|
security_d_instantiate(entry, inode);
|
|
@@ -1134,14 +1515,6 @@ struct dentry * d_alloc_root(struct inode * root_inode)
|
|
|
}
|
|
|
EXPORT_SYMBOL(d_alloc_root);
|
|
|
|
|
|
-static inline struct hlist_head *d_hash(struct dentry *parent,
|
|
|
- unsigned long hash)
|
|
|
-{
|
|
|
- hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
|
|
|
- hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
|
|
|
- return dentry_hashtable + (hash & D_HASHMASK);
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* d_obtain_alias - find or allocate a dentry for a given inode
|
|
|
* @inode: inode to allocate the dentry for
|
|
@@ -1182,10 +1555,11 @@ struct dentry *d_obtain_alias(struct inode *inode)
|
|
|
}
|
|
|
tmp->d_parent = tmp; /* make sure dput doesn't croak */
|
|
|
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+
|
|
|
+ spin_lock(&inode->i_lock);
|
|
|
res = __d_find_alias(inode, 0);
|
|
|
if (res) {
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
dput(tmp);
|
|
|
goto out_iput;
|
|
|
}
|
|
@@ -1195,12 +1569,14 @@ struct dentry *d_obtain_alias(struct inode *inode)
|
|
|
tmp->d_sb = inode->i_sb;
|
|
|
tmp->d_inode = inode;
|
|
|
tmp->d_flags |= DCACHE_DISCONNECTED;
|
|
|
- tmp->d_flags &= ~DCACHE_UNHASHED;
|
|
|
list_add(&tmp->d_alias, &inode->i_dentry);
|
|
|
- hlist_add_head(&tmp->d_hash, &inode->i_sb->s_anon);
|
|
|
+ bit_spin_lock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
|
|
|
+ tmp->d_flags &= ~DCACHE_UNHASHED;
|
|
|
+ hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
|
|
|
+ __bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
|
|
|
spin_unlock(&tmp->d_lock);
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
return tmp;
|
|
|
|
|
|
out_iput:
|
|
@@ -1230,18 +1606,18 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
|
|
|
struct dentry *new = NULL;
|
|
|
|
|
|
if (inode && S_ISDIR(inode->i_mode)) {
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ spin_lock(&inode->i_lock);
|
|
|
new = __d_find_alias(inode, 1);
|
|
|
if (new) {
|
|
|
BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
security_d_instantiate(new, inode);
|
|
|
d_move(new, dentry);
|
|
|
iput(inode);
|
|
|
} else {
|
|
|
- /* already taking dcache_lock, so d_add() by hand */
|
|
|
+ /* already taking inode->i_lock, so d_add() by hand */
|
|
|
__d_instantiate(dentry, inode);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
security_d_instantiate(dentry, inode);
|
|
|
d_rehash(dentry);
|
|
|
}
|
|
@@ -1314,10 +1690,10 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
|
|
|
* Negative dentry: instantiate it unless the inode is a directory and
|
|
|
* already has a dentry.
|
|
|
*/
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ spin_lock(&inode->i_lock);
|
|
|
if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
|
|
|
__d_instantiate(found, inode);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
security_d_instantiate(found, inode);
|
|
|
return found;
|
|
|
}
|
|
@@ -1327,8 +1703,8 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
|
|
|
* reference to it, move it in place and use it.
|
|
|
*/
|
|
|
new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
|
|
|
- dget_locked(new);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ __dget(new);
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
security_d_instantiate(found, inode);
|
|
|
d_move(new, found);
|
|
|
iput(inode);
|
|
@@ -1341,6 +1717,112 @@ err_out:
|
|
|
}
|
|
|
EXPORT_SYMBOL(d_add_ci);
|
|
|
|
|
|
+/**
|
|
|
+ * __d_lookup_rcu - search for a dentry (racy, store-free)
|
|
|
+ * @parent: parent dentry
|
|
|
+ * @name: qstr of name we wish to find
|
|
|
+ * @seq: returns d_seq value at the point where the dentry was found
|
|
|
+ * @inode: returns dentry->d_inode when the inode was found valid.
|
|
|
+ * Returns: dentry, or NULL
|
|
|
+ *
|
|
|
+ * __d_lookup_rcu is the dcache lookup function for rcu-walk name
|
|
|
+ * resolution (store-free path walking) design described in
|
|
|
+ * Documentation/filesystems/path-lookup.txt.
|
|
|
+ *
|
|
|
+ * This is not to be used outside core vfs.
|
|
|
+ *
|
|
|
+ * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
|
|
|
+ * held, and rcu_read_lock held. The returned dentry must not be stored into
|
|
|
+ * without taking d_lock and checking d_seq sequence count against @seq
|
|
|
+ * returned here.
|
|
|
+ *
|
|
|
+ * A refcount may be taken on the found dentry with the __d_rcu_to_refcount
|
|
|
+ * function.
|
|
|
+ *
|
|
|
+ * Alternatively, __d_lookup_rcu may be called again to look up the child of
|
|
|
+ * the returned dentry, so long as its parent's seqlock is checked after the
|
|
|
+ * child is looked up. Thus, an interlocking stepping of sequence lock checks
|
|
|
+ * is formed, giving integrity down the path walk.
|
|
|
+ */
|
|
|
+struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
|
|
|
+ unsigned *seq, struct inode **inode)
|
|
|
+{
|
|
|
+ unsigned int len = name->len;
|
|
|
+ unsigned int hash = name->hash;
|
|
|
+ const unsigned char *str = name->name;
|
|
|
+ struct dcache_hash_bucket *b = d_hash(parent, hash);
|
|
|
+ struct hlist_bl_node *node;
|
|
|
+ struct dentry *dentry;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Note: There is significant duplication with __d_lookup_rcu which is
|
|
|
+ * required to prevent single threaded performance regressions
|
|
|
+ * especially on architectures where smp_rmb (in seqcounts) are costly.
|
|
|
+ * Keep the two functions in sync.
|
|
|
+ */
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The hash list is protected using RCU.
|
|
|
+ *
|
|
|
+ * Carefully use d_seq when comparing a candidate dentry, to avoid
|
|
|
+ * races with d_move().
|
|
|
+ *
|
|
|
+ * It is possible that concurrent renames can mess up our list
|
|
|
+ * walk here and result in missing our dentry, resulting in the
|
|
|
+ * false-negative result. d_lookup() protects against concurrent
|
|
|
+ * renames using rename_lock seqlock.
|
|
|
+ *
|
|
|
+ * See Documentation/vfs/dcache-locking.txt for more details.
|
|
|
+ */
|
|
|
+ hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
|
|
|
+ struct inode *i;
|
|
|
+ const char *tname;
|
|
|
+ int tlen;
|
|
|
+
|
|
|
+ if (dentry->d_name.hash != hash)
|
|
|
+ continue;
|
|
|
+
|
|
|
+seqretry:
|
|
|
+ *seq = read_seqcount_begin(&dentry->d_seq);
|
|
|
+ if (dentry->d_parent != parent)
|
|
|
+ continue;
|
|
|
+ if (d_unhashed(dentry))
|
|
|
+ continue;
|
|
|
+ tlen = dentry->d_name.len;
|
|
|
+ tname = dentry->d_name.name;
|
|
|
+ i = dentry->d_inode;
|
|
|
+ prefetch(tname);
|
|
|
+ if (i)
|
|
|
+ prefetch(i);
|
|
|
+ /*
|
|
|
+ * This seqcount check is required to ensure name and
|
|
|
+ * len are loaded atomically, so as not to walk off the
|
|
|
+ * edge of memory when walking. If we could load this
|
|
|
+ * atomically some other way, we could drop this check.
|
|
|
+ */
|
|
|
+ if (read_seqcount_retry(&dentry->d_seq, *seq))
|
|
|
+ goto seqretry;
|
|
|
+ if (parent->d_flags & DCACHE_OP_COMPARE) {
|
|
|
+ if (parent->d_op->d_compare(parent, *inode,
|
|
|
+ dentry, i,
|
|
|
+ tlen, tname, name))
|
|
|
+ continue;
|
|
|
+ } else {
|
|
|
+ if (dentry_cmp(tname, tlen, str, len))
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * No extra seqcount check is required after the name
|
|
|
+ * compare. The caller must perform a seqcount check in
|
|
|
+ * order to do anything useful with the returned dentry
|
|
|
+ * anyway.
|
|
|
+ */
|
|
|
+ *inode = i;
|
|
|
+ return dentry;
|
|
|
+ }
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* d_lookup - search for a dentry
|
|
|
* @parent: parent dentry
|
|
@@ -1352,10 +1834,10 @@ EXPORT_SYMBOL(d_add_ci);
|
|
|
* dentry is returned. The caller must use dput to free the entry when it has
|
|
|
* finished using it. %NULL is returned if the dentry does not exist.
|
|
|
*/
|
|
|
-struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
|
|
|
+struct dentry *d_lookup(struct dentry *parent, struct qstr *name)
|
|
|
{
|
|
|
- struct dentry * dentry = NULL;
|
|
|
- unsigned long seq;
|
|
|
+ struct dentry *dentry;
|
|
|
+ unsigned seq;
|
|
|
|
|
|
do {
|
|
|
seq = read_seqbegin(&rename_lock);
|
|
@@ -1367,7 +1849,7 @@ struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
|
|
|
}
|
|
|
EXPORT_SYMBOL(d_lookup);
|
|
|
|
|
|
-/*
|
|
|
+/**
|
|
|
* __d_lookup - search for a dentry (racy)
|
|
|
* @parent: parent dentry
|
|
|
* @name: qstr of name we wish to find
|
|
@@ -1382,16 +1864,23 @@ EXPORT_SYMBOL(d_lookup);
|
|
|
*
|
|
|
* __d_lookup callers must be commented.
|
|
|
*/
|
|
|
-struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
|
|
|
+struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
|
|
|
{
|
|
|
unsigned int len = name->len;
|
|
|
unsigned int hash = name->hash;
|
|
|
const unsigned char *str = name->name;
|
|
|
- struct hlist_head *head = d_hash(parent,hash);
|
|
|
+ struct dcache_hash_bucket *b = d_hash(parent, hash);
|
|
|
+ struct hlist_bl_node *node;
|
|
|
struct dentry *found = NULL;
|
|
|
- struct hlist_node *node;
|
|
|
struct dentry *dentry;
|
|
|
|
|
|
+ /*
|
|
|
+ * Note: There is significant duplication with __d_lookup_rcu which is
|
|
|
+ * required to prevent single threaded performance regressions
|
|
|
+ * especially on architectures where smp_rmb (in seqcounts) are costly.
|
|
|
+ * Keep the two functions in sync.
|
|
|
+ */
|
|
|
+
|
|
|
/*
|
|
|
* The hash list is protected using RCU.
|
|
|
*
|
|
@@ -1407,25 +1896,16 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
|
|
|
*/
|
|
|
rcu_read_lock();
|
|
|
|
|
|
- hlist_for_each_entry_rcu(dentry, node, head, d_hash) {
|
|
|
- struct qstr *qstr;
|
|
|
+ hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
|
|
|
+ const char *tname;
|
|
|
+ int tlen;
|
|
|
|
|
|
if (dentry->d_name.hash != hash)
|
|
|
continue;
|
|
|
- if (dentry->d_parent != parent)
|
|
|
- continue;
|
|
|
|
|
|
spin_lock(&dentry->d_lock);
|
|
|
-
|
|
|
- /*
|
|
|
- * Recheck the dentry after taking the lock - d_move may have
|
|
|
- * changed things. Don't bother checking the hash because
|
|
|
- * we're about to compare the whole name anyway.
|
|
|
- */
|
|
|
if (dentry->d_parent != parent)
|
|
|
goto next;
|
|
|
-
|
|
|
- /* non-existing due to RCU? */
|
|
|
if (d_unhashed(dentry))
|
|
|
goto next;
|
|
|
|
|
@@ -1433,18 +1913,19 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
|
|
|
* It is safe to compare names since d_move() cannot
|
|
|
* change the qstr (protected by d_lock).
|
|
|
*/
|
|
|
- qstr = &dentry->d_name;
|
|
|
- if (parent->d_op && parent->d_op->d_compare) {
|
|
|
- if (parent->d_op->d_compare(parent, qstr, name))
|
|
|
+ tlen = dentry->d_name.len;
|
|
|
+ tname = dentry->d_name.name;
|
|
|
+ if (parent->d_flags & DCACHE_OP_COMPARE) {
|
|
|
+ if (parent->d_op->d_compare(parent, parent->d_inode,
|
|
|
+ dentry, dentry->d_inode,
|
|
|
+ tlen, tname, name))
|
|
|
goto next;
|
|
|
} else {
|
|
|
- if (qstr->len != len)
|
|
|
- goto next;
|
|
|
- if (memcmp(qstr->name, str, len))
|
|
|
+ if (dentry_cmp(tname, tlen, str, len))
|
|
|
goto next;
|
|
|
}
|
|
|
|
|
|
- atomic_inc(&dentry->d_count);
|
|
|
+ dentry->d_count++;
|
|
|
found = dentry;
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
break;
|
|
@@ -1473,8 +1954,8 @@ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
|
|
|
* routine may choose to leave the hash value unchanged.
|
|
|
*/
|
|
|
name->hash = full_name_hash(name->name, name->len);
|
|
|
- if (dir->d_op && dir->d_op->d_hash) {
|
|
|
- if (dir->d_op->d_hash(dir, name) < 0)
|
|
|
+ if (dir->d_flags & DCACHE_OP_HASH) {
|
|
|
+ if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0)
|
|
|
goto out;
|
|
|
}
|
|
|
dentry = d_lookup(dir, name);
|
|
@@ -1483,34 +1964,32 @@ out:
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * d_validate - verify dentry provided from insecure source
|
|
|
+ * d_validate - verify dentry provided from insecure source (deprecated)
|
|
|
* @dentry: The dentry alleged to be valid child of @dparent
|
|
|
* @dparent: The parent dentry (known to be valid)
|
|
|
*
|
|
|
* An insecure source has sent us a dentry, here we verify it and dget() it.
|
|
|
* This is used by ncpfs in its readdir implementation.
|
|
|
* Zero is returned in the dentry is invalid.
|
|
|
+ *
|
|
|
+ * This function is slow for big directories, and deprecated, do not use it.
|
|
|
*/
|
|
|
-int d_validate(struct dentry *dentry, struct dentry *parent)
|
|
|
+int d_validate(struct dentry *dentry, struct dentry *dparent)
|
|
|
{
|
|
|
- struct hlist_head *head = d_hash(parent, dentry->d_name.hash);
|
|
|
- struct hlist_node *node;
|
|
|
- struct dentry *d;
|
|
|
-
|
|
|
- /* Check whether the ptr might be valid at all.. */
|
|
|
- if (!kmem_ptr_validate(dentry_cache, dentry))
|
|
|
- return 0;
|
|
|
- if (dentry->d_parent != parent)
|
|
|
- return 0;
|
|
|
+ struct dentry *child;
|
|
|
|
|
|
- rcu_read_lock();
|
|
|
- hlist_for_each_entry_rcu(d, node, head, d_hash) {
|
|
|
- if (d == dentry) {
|
|
|
- dget(dentry);
|
|
|
+ spin_lock(&dparent->d_lock);
|
|
|
+ list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
|
|
|
+ if (dentry == child) {
|
|
|
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
|
|
|
+ __dget_dlock(dentry);
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
+ spin_unlock(&dparent->d_lock);
|
|
|
return 1;
|
|
|
}
|
|
|
}
|
|
|
- rcu_read_unlock();
|
|
|
+ spin_unlock(&dparent->d_lock);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL(d_validate);
|
|
@@ -1538,16 +2017,23 @@ EXPORT_SYMBOL(d_validate);
|
|
|
|
|
|
void d_delete(struct dentry * dentry)
|
|
|
{
|
|
|
+ struct inode *inode;
|
|
|
int isdir = 0;
|
|
|
/*
|
|
|
* Are we the only user?
|
|
|
*/
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+again:
|
|
|
spin_lock(&dentry->d_lock);
|
|
|
- isdir = S_ISDIR(dentry->d_inode->i_mode);
|
|
|
- if (atomic_read(&dentry->d_count) == 1) {
|
|
|
+ inode = dentry->d_inode;
|
|
|
+ isdir = S_ISDIR(inode->i_mode);
|
|
|
+ if (dentry->d_count == 1) {
|
|
|
+ if (inode && !spin_trylock(&inode->i_lock)) {
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
+ cpu_relax();
|
|
|
+ goto again;
|
|
|
+ }
|
|
|
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
|
|
|
- dentry_iput(dentry);
|
|
|
+ dentry_unlink_inode(dentry);
|
|
|
fsnotify_nameremove(dentry, isdir);
|
|
|
return;
|
|
|
}
|
|
@@ -1556,17 +2042,18 @@ void d_delete(struct dentry * dentry)
|
|
|
__d_drop(dentry);
|
|
|
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
|
|
|
fsnotify_nameremove(dentry, isdir);
|
|
|
}
|
|
|
EXPORT_SYMBOL(d_delete);
|
|
|
|
|
|
-static void __d_rehash(struct dentry * entry, struct hlist_head *list)
|
|
|
+static void __d_rehash(struct dentry * entry, struct dcache_hash_bucket *b)
|
|
|
{
|
|
|
-
|
|
|
+ BUG_ON(!d_unhashed(entry));
|
|
|
+ spin_lock_bucket(b);
|
|
|
entry->d_flags &= ~DCACHE_UNHASHED;
|
|
|
- hlist_add_head_rcu(&entry->d_hash, list);
|
|
|
+ hlist_bl_add_head_rcu(&entry->d_hash, &b->head);
|
|
|
+ spin_unlock_bucket(b);
|
|
|
}
|
|
|
|
|
|
static void _d_rehash(struct dentry * entry)
|
|
@@ -1583,25 +2070,39 @@ static void _d_rehash(struct dentry * entry)
|
|
|
|
|
|
void d_rehash(struct dentry * entry)
|
|
|
{
|
|
|
- spin_lock(&dcache_lock);
|
|
|
spin_lock(&entry->d_lock);
|
|
|
_d_rehash(entry);
|
|
|
spin_unlock(&entry->d_lock);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
}
|
|
|
EXPORT_SYMBOL(d_rehash);
|
|
|
|
|
|
-/*
|
|
|
- * When switching names, the actual string doesn't strictly have to
|
|
|
- * be preserved in the target - because we're dropping the target
|
|
|
- * anyway. As such, we can just do a simple memcpy() to copy over
|
|
|
- * the new name before we switch.
|
|
|
+/**
|
|
|
+ * dentry_update_name_case - update case insensitive dentry with a new name
|
|
|
+ * @dentry: dentry to be updated
|
|
|
+ * @name: new name
|
|
|
*
|
|
|
- * Note that we have to be a lot more careful about getting the hash
|
|
|
- * switched - we have to switch the hash value properly even if it
|
|
|
- * then no longer matches the actual (corrupted) string of the target.
|
|
|
- * The hash value has to match the hash queue that the dentry is on..
|
|
|
+ * Update a case insensitive dentry with new case of name.
|
|
|
+ *
|
|
|
+ * dentry must have been returned by d_lookup with name @name. Old and new
|
|
|
+ * name lengths must match (ie. no d_compare which allows mismatched name
|
|
|
+ * lengths).
|
|
|
+ *
|
|
|
+ * Parent inode i_mutex must be held over d_lookup and into this call (to
|
|
|
+ * keep renames and concurrent inserts, and readdir(2) away).
|
|
|
*/
|
|
|
+void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
|
|
|
+{
|
|
|
+ BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
|
|
|
+ BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
|
|
|
+
|
|
|
+ spin_lock(&dentry->d_lock);
|
|
|
+ write_seqcount_begin(&dentry->d_seq);
|
|
|
+ memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
|
|
|
+ write_seqcount_end(&dentry->d_seq);
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(dentry_update_name_case);
|
|
|
+
|
|
|
static void switch_names(struct dentry *dentry, struct dentry *target)
|
|
|
{
|
|
|
if (dname_external(target)) {
|
|
@@ -1643,54 +2144,84 @@ static void switch_names(struct dentry *dentry, struct dentry *target)
|
|
|
swap(dentry->d_name.len, target->d_name.len);
|
|
|
}
|
|
|
|
|
|
+static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * XXXX: do we really need to take target->d_lock?
|
|
|
+ */
|
|
|
+ if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
|
|
|
+ spin_lock(&target->d_parent->d_lock);
|
|
|
+ else {
|
|
|
+ if (d_ancestor(dentry->d_parent, target->d_parent)) {
|
|
|
+ spin_lock(&dentry->d_parent->d_lock);
|
|
|
+ spin_lock_nested(&target->d_parent->d_lock,
|
|
|
+ DENTRY_D_LOCK_NESTED);
|
|
|
+ } else {
|
|
|
+ spin_lock(&target->d_parent->d_lock);
|
|
|
+ spin_lock_nested(&dentry->d_parent->d_lock,
|
|
|
+ DENTRY_D_LOCK_NESTED);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (target < dentry) {
|
|
|
+ spin_lock_nested(&target->d_lock, 2);
|
|
|
+ spin_lock_nested(&dentry->d_lock, 3);
|
|
|
+ } else {
|
|
|
+ spin_lock_nested(&dentry->d_lock, 2);
|
|
|
+ spin_lock_nested(&target->d_lock, 3);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void dentry_unlock_parents_for_move(struct dentry *dentry,
|
|
|
+ struct dentry *target)
|
|
|
+{
|
|
|
+ if (target->d_parent != dentry->d_parent)
|
|
|
+ spin_unlock(&dentry->d_parent->d_lock);
|
|
|
+ if (target->d_parent != target)
|
|
|
+ spin_unlock(&target->d_parent->d_lock);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
- * We cannibalize "target" when moving dentry on top of it,
|
|
|
- * because it's going to be thrown away anyway. We could be more
|
|
|
- * polite about it, though.
|
|
|
- *
|
|
|
- * This forceful removal will result in ugly /proc output if
|
|
|
- * somebody holds a file open that got deleted due to a rename.
|
|
|
- * We could be nicer about the deleted file, and let it show
|
|
|
- * up under the name it had before it was deleted rather than
|
|
|
- * under the original name of the file that was moved on top of it.
|
|
|
+ * When switching names, the actual string doesn't strictly have to
|
|
|
+ * be preserved in the target - because we're dropping the target
|
|
|
+ * anyway. As such, we can just do a simple memcpy() to copy over
|
|
|
+ * the new name before we switch.
|
|
|
+ *
|
|
|
+ * Note that we have to be a lot more careful about getting the hash
|
|
|
+ * switched - we have to switch the hash value properly even if it
|
|
|
+ * then no longer matches the actual (corrupted) string of the target.
|
|
|
+ * The hash value has to match the hash queue that the dentry is on..
|
|
|
*/
|
|
|
-
|
|
|
/*
|
|
|
- * d_move_locked - move a dentry
|
|
|
+ * d_move - move a dentry
|
|
|
* @dentry: entry to move
|
|
|
* @target: new dentry
|
|
|
*
|
|
|
* Update the dcache to reflect the move of a file name. Negative
|
|
|
* dcache entries should not be moved in this way.
|
|
|
*/
|
|
|
-static void d_move_locked(struct dentry * dentry, struct dentry * target)
|
|
|
+void d_move(struct dentry * dentry, struct dentry * target)
|
|
|
{
|
|
|
- struct hlist_head *list;
|
|
|
-
|
|
|
if (!dentry->d_inode)
|
|
|
printk(KERN_WARNING "VFS: moving negative dcache entry\n");
|
|
|
|
|
|
+ BUG_ON(d_ancestor(dentry, target));
|
|
|
+ BUG_ON(d_ancestor(target, dentry));
|
|
|
+
|
|
|
write_seqlock(&rename_lock);
|
|
|
- /*
|
|
|
- * XXXX: do we really need to take target->d_lock?
|
|
|
- */
|
|
|
- if (target < dentry) {
|
|
|
- spin_lock(&target->d_lock);
|
|
|
- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
|
|
|
- } else {
|
|
|
- spin_lock(&dentry->d_lock);
|
|
|
- spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED);
|
|
|
- }
|
|
|
|
|
|
- /* Move the dentry to the target hash queue, if on different bucket */
|
|
|
- if (d_unhashed(dentry))
|
|
|
- goto already_unhashed;
|
|
|
+ dentry_lock_for_move(dentry, target);
|
|
|
|
|
|
- hlist_del_rcu(&dentry->d_hash);
|
|
|
+ write_seqcount_begin(&dentry->d_seq);
|
|
|
+ write_seqcount_begin(&target->d_seq);
|
|
|
|
|
|
-already_unhashed:
|
|
|
- list = d_hash(target->d_parent, target->d_name.hash);
|
|
|
- __d_rehash(dentry, list);
|
|
|
+ /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Move the dentry to the target hash queue. Don't bother checking
|
|
|
+ * for the same hash queue because of how unlikely it is.
|
|
|
+ */
|
|
|
+ __d_drop(dentry);
|
|
|
+ __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
|
|
|
|
|
|
/* Unhash the target: dput() will then get rid of it */
|
|
|
__d_drop(target);
|
|
@@ -1715,27 +2246,16 @@ already_unhashed:
|
|
|
}
|
|
|
|
|
|
list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
|
|
|
+
|
|
|
+ write_seqcount_end(&target->d_seq);
|
|
|
+ write_seqcount_end(&dentry->d_seq);
|
|
|
+
|
|
|
+ dentry_unlock_parents_for_move(dentry, target);
|
|
|
spin_unlock(&target->d_lock);
|
|
|
fsnotify_d_move(dentry);
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
write_sequnlock(&rename_lock);
|
|
|
}
|
|
|
-
|
|
|
-/**
|
|
|
- * d_move - move a dentry
|
|
|
- * @dentry: entry to move
|
|
|
- * @target: new dentry
|
|
|
- *
|
|
|
- * Update the dcache to reflect the move of a file name. Negative
|
|
|
- * dcache entries should not be moved in this way.
|
|
|
- */
|
|
|
-
|
|
|
-void d_move(struct dentry * dentry, struct dentry * target)
|
|
|
-{
|
|
|
- spin_lock(&dcache_lock);
|
|
|
- d_move_locked(dentry, target);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
-}
|
|
|
EXPORT_SYMBOL(d_move);
|
|
|
|
|
|
/**
|
|
@@ -1761,13 +2281,13 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
|
|
|
* This helper attempts to cope with remotely renamed directories
|
|
|
*
|
|
|
* It assumes that the caller is already holding
|
|
|
- * dentry->d_parent->d_inode->i_mutex and the dcache_lock
|
|
|
+ * dentry->d_parent->d_inode->i_mutex and the inode->i_lock
|
|
|
*
|
|
|
* Note: If ever the locking in lock_rename() changes, then please
|
|
|
* remember to update this too...
|
|
|
*/
|
|
|
-static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias)
|
|
|
- __releases(dcache_lock)
|
|
|
+static struct dentry *__d_unalias(struct inode *inode,
|
|
|
+ struct dentry *dentry, struct dentry *alias)
|
|
|
{
|
|
|
struct mutex *m1 = NULL, *m2 = NULL;
|
|
|
struct dentry *ret;
|
|
@@ -1790,10 +2310,10 @@ static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias)
|
|
|
goto out_err;
|
|
|
m2 = &alias->d_parent->d_inode->i_mutex;
|
|
|
out_unalias:
|
|
|
- d_move_locked(alias, dentry);
|
|
|
+ d_move(alias, dentry);
|
|
|
ret = alias;
|
|
|
out_err:
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
if (m2)
|
|
|
mutex_unlock(m2);
|
|
|
if (m1)
|
|
@@ -1804,17 +2324,23 @@ out_err:
|
|
|
/*
|
|
|
* Prepare an anonymous dentry for life in the superblock's dentry tree as a
|
|
|
* named dentry in place of the dentry to be replaced.
|
|
|
+ * returns with anon->d_lock held!
|
|
|
*/
|
|
|
static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
|
|
|
{
|
|
|
struct dentry *dparent, *aparent;
|
|
|
|
|
|
- switch_names(dentry, anon);
|
|
|
- swap(dentry->d_name.hash, anon->d_name.hash);
|
|
|
+ dentry_lock_for_move(anon, dentry);
|
|
|
+
|
|
|
+ write_seqcount_begin(&dentry->d_seq);
|
|
|
+ write_seqcount_begin(&anon->d_seq);
|
|
|
|
|
|
dparent = dentry->d_parent;
|
|
|
aparent = anon->d_parent;
|
|
|
|
|
|
+ switch_names(dentry, anon);
|
|
|
+ swap(dentry->d_name.hash, anon->d_name.hash);
|
|
|
+
|
|
|
dentry->d_parent = (aparent == anon) ? dentry : aparent;
|
|
|
list_del(&dentry->d_u.d_child);
|
|
|
if (!IS_ROOT(dentry))
|
|
@@ -1829,6 +2355,13 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
|
|
|
else
|
|
|
INIT_LIST_HEAD(&anon->d_u.d_child);
|
|
|
|
|
|
+ write_seqcount_end(&dentry->d_seq);
|
|
|
+ write_seqcount_end(&anon->d_seq);
|
|
|
+
|
|
|
+ dentry_unlock_parents_for_move(anon, dentry);
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
+
|
|
|
+ /* anon->d_lock still locked, returns locked */
|
|
|
anon->d_flags &= ~DCACHE_DISCONNECTED;
|
|
|
}
|
|
|
|
|
@@ -1846,14 +2379,15 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
|
|
|
|
|
|
BUG_ON(!d_unhashed(dentry));
|
|
|
|
|
|
- spin_lock(&dcache_lock);
|
|
|
-
|
|
|
if (!inode) {
|
|
|
actual = dentry;
|
|
|
__d_instantiate(dentry, NULL);
|
|
|
- goto found_lock;
|
|
|
+ d_rehash(actual);
|
|
|
+ goto out_nolock;
|
|
|
}
|
|
|
|
|
|
+ spin_lock(&inode->i_lock);
|
|
|
+
|
|
|
if (S_ISDIR(inode->i_mode)) {
|
|
|
struct dentry *alias;
|
|
|
|
|
@@ -1864,13 +2398,12 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
|
|
|
/* Is this an anonymous mountpoint that we could splice
|
|
|
* into our tree? */
|
|
|
if (IS_ROOT(alias)) {
|
|
|
- spin_lock(&alias->d_lock);
|
|
|
__d_materialise_dentry(dentry, alias);
|
|
|
__d_drop(alias);
|
|
|
goto found;
|
|
|
}
|
|
|
/* Nope, but we must(!) avoid directory aliasing */
|
|
|
- actual = __d_unalias(dentry, alias);
|
|
|
+ actual = __d_unalias(inode, dentry, alias);
|
|
|
if (IS_ERR(actual))
|
|
|
dput(alias);
|
|
|
goto out_nolock;
|
|
@@ -1881,15 +2414,14 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
|
|
|
actual = __d_instantiate_unique(dentry, inode);
|
|
|
if (!actual)
|
|
|
actual = dentry;
|
|
|
- else if (unlikely(!d_unhashed(actual)))
|
|
|
- goto shouldnt_be_hashed;
|
|
|
+ else
|
|
|
+ BUG_ON(!d_unhashed(actual));
|
|
|
|
|
|
-found_lock:
|
|
|
spin_lock(&actual->d_lock);
|
|
|
found:
|
|
|
_d_rehash(actual);
|
|
|
spin_unlock(&actual->d_lock);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
out_nolock:
|
|
|
if (actual == dentry) {
|
|
|
security_d_instantiate(dentry, inode);
|
|
@@ -1898,10 +2430,6 @@ out_nolock:
|
|
|
|
|
|
iput(inode);
|
|
|
return actual;
|
|
|
-
|
|
|
-shouldnt_be_hashed:
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
- BUG();
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(d_materialise_unique);
|
|
|
|
|
@@ -1928,7 +2456,7 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
|
|
|
* @buffer: pointer to the end of the buffer
|
|
|
* @buflen: pointer to buffer length
|
|
|
*
|
|
|
- * Caller holds the dcache_lock.
|
|
|
+ * Caller holds the rename_lock.
|
|
|
*
|
|
|
* If path is not reachable from the supplied root, then the value of
|
|
|
* root is changed (without modifying refcounts).
|
|
@@ -1956,7 +2484,9 @@ static int prepend_path(const struct path *path, struct path *root,
|
|
|
}
|
|
|
parent = dentry->d_parent;
|
|
|
prefetch(parent);
|
|
|
+ spin_lock(&dentry->d_lock);
|
|
|
error = prepend_name(buffer, buflen, &dentry->d_name);
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
if (!error)
|
|
|
error = prepend(buffer, buflen, "/", 1);
|
|
|
if (error)
|
|
@@ -2012,9 +2542,9 @@ char *__d_path(const struct path *path, struct path *root,
|
|
|
int error;
|
|
|
|
|
|
prepend(&res, &buflen, "\0", 1);
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ write_seqlock(&rename_lock);
|
|
|
error = prepend_path(path, root, &res, &buflen);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ write_sequnlock(&rename_lock);
|
|
|
|
|
|
if (error)
|
|
|
return ERR_PTR(error);
|
|
@@ -2076,12 +2606,12 @@ char *d_path(const struct path *path, char *buf, int buflen)
|
|
|
return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
|
|
|
|
|
|
get_fs_root(current->fs, &root);
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ write_seqlock(&rename_lock);
|
|
|
tmp = root;
|
|
|
error = path_with_deleted(path, &tmp, &res, &buflen);
|
|
|
if (error)
|
|
|
res = ERR_PTR(error);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ write_sequnlock(&rename_lock);
|
|
|
path_put(&root);
|
|
|
return res;
|
|
|
}
|
|
@@ -2107,12 +2637,12 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
|
|
|
return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
|
|
|
|
|
|
get_fs_root(current->fs, &root);
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ write_seqlock(&rename_lock);
|
|
|
tmp = root;
|
|
|
error = path_with_deleted(path, &tmp, &res, &buflen);
|
|
|
if (!error && !path_equal(&tmp, &root))
|
|
|
error = prepend_unreachable(&res, &buflen);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ write_sequnlock(&rename_lock);
|
|
|
path_put(&root);
|
|
|
if (error)
|
|
|
res = ERR_PTR(error);
|
|
@@ -2144,7 +2674,7 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
|
|
|
/*
|
|
|
* Write full pathname from the root of the filesystem into the buffer.
|
|
|
*/
|
|
|
-char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
|
|
|
+static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
|
|
|
{
|
|
|
char *end = buf + buflen;
|
|
|
char *retval;
|
|
@@ -2158,10 +2688,13 @@ char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
|
|
|
|
|
|
while (!IS_ROOT(dentry)) {
|
|
|
struct dentry *parent = dentry->d_parent;
|
|
|
+ int error;
|
|
|
|
|
|
prefetch(parent);
|
|
|
- if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) ||
|
|
|
- (prepend(&end, &buflen, "/", 1) != 0))
|
|
|
+ spin_lock(&dentry->d_lock);
|
|
|
+ error = prepend_name(&end, &buflen, &dentry->d_name);
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
+ if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
|
|
|
goto Elong;
|
|
|
|
|
|
retval = end;
|
|
@@ -2171,14 +2704,25 @@ char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
|
|
|
Elong:
|
|
|
return ERR_PTR(-ENAMETOOLONG);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(__dentry_path);
|
|
|
+
|
|
|
+char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
|
|
|
+{
|
|
|
+ char *retval;
|
|
|
+
|
|
|
+ write_seqlock(&rename_lock);
|
|
|
+ retval = __dentry_path(dentry, buf, buflen);
|
|
|
+ write_sequnlock(&rename_lock);
|
|
|
+
|
|
|
+ return retval;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(dentry_path_raw);
|
|
|
|
|
|
char *dentry_path(struct dentry *dentry, char *buf, int buflen)
|
|
|
{
|
|
|
char *p = NULL;
|
|
|
char *retval;
|
|
|
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ write_seqlock(&rename_lock);
|
|
|
if (d_unlinked(dentry)) {
|
|
|
p = buf + buflen;
|
|
|
if (prepend(&p, &buflen, "//deleted", 10) != 0)
|
|
@@ -2186,12 +2730,11 @@ char *dentry_path(struct dentry *dentry, char *buf, int buflen)
|
|
|
buflen++;
|
|
|
}
|
|
|
retval = __dentry_path(dentry, buf, buflen);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ write_sequnlock(&rename_lock);
|
|
|
if (!IS_ERR(retval) && p)
|
|
|
*p = '/'; /* restore '/' overriden with '\0' */
|
|
|
return retval;
|
|
|
Elong:
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
return ERR_PTR(-ENAMETOOLONG);
|
|
|
}
|
|
|
|
|
@@ -2225,7 +2768,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
|
|
|
get_fs_root_and_pwd(current->fs, &root, &pwd);
|
|
|
|
|
|
error = -ENOENT;
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ write_seqlock(&rename_lock);
|
|
|
if (!d_unlinked(pwd.dentry)) {
|
|
|
unsigned long len;
|
|
|
struct path tmp = root;
|
|
@@ -2234,7 +2777,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
|
|
|
|
|
|
prepend(&cwd, &buflen, "\0", 1);
|
|
|
error = prepend_path(&pwd, &tmp, &cwd, &buflen);
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ write_sequnlock(&rename_lock);
|
|
|
|
|
|
if (error)
|
|
|
goto out;
|
|
@@ -2253,8 +2796,9 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
|
|
|
if (copy_to_user(buf, cwd, len))
|
|
|
error = -EFAULT;
|
|
|
}
|
|
|
- } else
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ } else {
|
|
|
+ write_sequnlock(&rename_lock);
|
|
|
+ }
|
|
|
|
|
|
out:
|
|
|
path_put(&pwd);
|
|
@@ -2282,25 +2826,25 @@ out:
|
|
|
int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
|
|
|
{
|
|
|
int result;
|
|
|
- unsigned long seq;
|
|
|
+ unsigned seq;
|
|
|
|
|
|
if (new_dentry == old_dentry)
|
|
|
return 1;
|
|
|
|
|
|
- /*
|
|
|
- * Need rcu_readlock to protect against the d_parent trashing
|
|
|
- * due to d_move
|
|
|
- */
|
|
|
- rcu_read_lock();
|
|
|
do {
|
|
|
/* for restarting inner loop in case of seq retry */
|
|
|
seq = read_seqbegin(&rename_lock);
|
|
|
+ /*
|
|
|
+ * Need rcu_readlock to protect against the d_parent trashing
|
|
|
+ * due to d_move
|
|
|
+ */
|
|
|
+ rcu_read_lock();
|
|
|
if (d_ancestor(old_dentry, new_dentry))
|
|
|
result = 1;
|
|
|
else
|
|
|
result = 0;
|
|
|
+ rcu_read_unlock();
|
|
|
} while (read_seqretry(&rename_lock, seq));
|
|
|
- rcu_read_unlock();
|
|
|
|
|
|
return result;
|
|
|
}
|
|
@@ -2332,10 +2876,15 @@ EXPORT_SYMBOL(path_is_under);
|
|
|
|
|
|
void d_genocide(struct dentry *root)
|
|
|
{
|
|
|
- struct dentry *this_parent = root;
|
|
|
+ struct dentry *this_parent;
|
|
|
struct list_head *next;
|
|
|
+ unsigned seq;
|
|
|
+ int locked = 0;
|
|
|
|
|
|
- spin_lock(&dcache_lock);
|
|
|
+ seq = read_seqbegin(&rename_lock);
|
|
|
+again:
|
|
|
+ this_parent = root;
|
|
|
+ spin_lock(&this_parent->d_lock);
|
|
|
repeat:
|
|
|
next = this_parent->d_subdirs.next;
|
|
|
resume:
|
|
@@ -2343,21 +2892,62 @@ resume:
|
|
|
struct list_head *tmp = next;
|
|
|
struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
|
|
|
next = tmp->next;
|
|
|
- if (d_unhashed(dentry)||!dentry->d_inode)
|
|
|
+
|
|
|
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
|
|
|
+ if (d_unhashed(dentry) || !dentry->d_inode) {
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
continue;
|
|
|
+ }
|
|
|
if (!list_empty(&dentry->d_subdirs)) {
|
|
|
+ spin_unlock(&this_parent->d_lock);
|
|
|
+ spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
|
|
|
this_parent = dentry;
|
|
|
+ spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
|
|
|
goto repeat;
|
|
|
}
|
|
|
- atomic_dec(&dentry->d_count);
|
|
|
+ if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
|
|
|
+ dentry->d_flags |= DCACHE_GENOCIDE;
|
|
|
+ dentry->d_count--;
|
|
|
+ }
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
}
|
|
|
if (this_parent != root) {
|
|
|
- next = this_parent->d_u.d_child.next;
|
|
|
- atomic_dec(&this_parent->d_count);
|
|
|
- this_parent = this_parent->d_parent;
|
|
|
+ struct dentry *tmp;
|
|
|
+ struct dentry *child;
|
|
|
+
|
|
|
+ tmp = this_parent->d_parent;
|
|
|
+ if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
|
|
|
+ this_parent->d_flags |= DCACHE_GENOCIDE;
|
|
|
+ this_parent->d_count--;
|
|
|
+ }
|
|
|
+ rcu_read_lock();
|
|
|
+ spin_unlock(&this_parent->d_lock);
|
|
|
+ child = this_parent;
|
|
|
+ this_parent = tmp;
|
|
|
+ spin_lock(&this_parent->d_lock);
|
|
|
+ /* might go back up the wrong parent if we have had a rename
|
|
|
+ * or deletion */
|
|
|
+ if (this_parent != child->d_parent ||
|
|
|
+ (!locked && read_seqretry(&rename_lock, seq))) {
|
|
|
+ spin_unlock(&this_parent->d_lock);
|
|
|
+ rcu_read_unlock();
|
|
|
+ goto rename_retry;
|
|
|
+ }
|
|
|
+ rcu_read_unlock();
|
|
|
+ next = child->d_u.d_child.next;
|
|
|
goto resume;
|
|
|
}
|
|
|
- spin_unlock(&dcache_lock);
|
|
|
+ spin_unlock(&this_parent->d_lock);
|
|
|
+ if (!locked && read_seqretry(&rename_lock, seq))
|
|
|
+ goto rename_retry;
|
|
|
+ if (locked)
|
|
|
+ write_sequnlock(&rename_lock);
|
|
|
+ return;
|
|
|
+
|
|
|
+rename_retry:
|
|
|
+ locked = 1;
|
|
|
+ write_seqlock(&rename_lock);
|
|
|
+ goto again;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2411,7 +3001,7 @@ static void __init dcache_init_early(void)
|
|
|
|
|
|
dentry_hashtable =
|
|
|
alloc_large_system_hash("Dentry cache",
|
|
|
- sizeof(struct hlist_head),
|
|
|
+ sizeof(struct dcache_hash_bucket),
|
|
|
dhash_entries,
|
|
|
13,
|
|
|
HASH_EARLY,
|
|
@@ -2420,16 +3010,13 @@ static void __init dcache_init_early(void)
|
|
|
0);
|
|
|
|
|
|
for (loop = 0; loop < (1 << d_hash_shift); loop++)
|
|
|
- INIT_HLIST_HEAD(&dentry_hashtable[loop]);
|
|
|
+ INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
|
|
|
}
|
|
|
|
|
|
static void __init dcache_init(void)
|
|
|
{
|
|
|
int loop;
|
|
|
|
|
|
- percpu_counter_init(&nr_dentry, 0);
|
|
|
- percpu_counter_init(&nr_dentry_unused, 0);
|
|
|
-
|
|
|
/*
|
|
|
* A constructor could be added for stable state like the lists,
|
|
|
* but it is probably not worth it because of the cache nature
|
|
@@ -2446,7 +3033,7 @@ static void __init dcache_init(void)
|
|
|
|
|
|
dentry_hashtable =
|
|
|
alloc_large_system_hash("Dentry cache",
|
|
|
- sizeof(struct hlist_head),
|
|
|
+ sizeof(struct dcache_hash_bucket),
|
|
|
dhash_entries,
|
|
|
13,
|
|
|
0,
|
|
@@ -2455,7 +3042,7 @@ static void __init dcache_init(void)
|
|
|
0);
|
|
|
|
|
|
for (loop = 0; loop < (1 << d_hash_shift); loop++)
|
|
|
- INIT_HLIST_HEAD(&dentry_hashtable[loop]);
|
|
|
+ INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
|
|
|
}
|
|
|
|
|
|
/* SLAB cache for __getname() consumers */
|