|
@@ -235,18 +235,49 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
|
|
return dentry_string_cmp(cs, ct, tcount);
|
|
return dentry_string_cmp(cs, ct, tcount);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+struct external_name {
|
|
|
|
+ union {
|
|
|
|
+ atomic_t count;
|
|
|
|
+ struct rcu_head head;
|
|
|
|
+ } u;
|
|
|
|
+ unsigned char name[];
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static inline struct external_name *external_name(struct dentry *dentry)
|
|
|
|
+{
|
|
|
|
+ return container_of(dentry->d_name.name, struct external_name, name[0]);
|
|
|
|
+}
|
|
|
|
+
|
|
static void __d_free(struct rcu_head *head)
|
|
static void __d_free(struct rcu_head *head)
|
|
{
|
|
{
|
|
struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
|
|
struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
|
|
|
|
|
|
WARN_ON(!hlist_unhashed(&dentry->d_alias));
|
|
WARN_ON(!hlist_unhashed(&dentry->d_alias));
|
|
- if (dname_external(dentry))
|
|
|
|
- kfree(dentry->d_name.name);
|
|
|
|
kmem_cache_free(dentry_cache, dentry);
|
|
kmem_cache_free(dentry_cache, dentry);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void __d_free_external(struct rcu_head *head)
|
|
|
|
+{
|
|
|
|
+ struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
|
|
|
|
+ WARN_ON(!hlist_unhashed(&dentry->d_alias));
|
|
|
|
+ kfree(external_name(dentry));
|
|
|
|
+ kmem_cache_free(dentry_cache, dentry);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int dname_external(const struct dentry *dentry)
|
|
|
|
+{
|
|
|
|
+ return dentry->d_name.name != dentry->d_iname;
|
|
|
|
+}
|
|
|
|
+
|
|
static void dentry_free(struct dentry *dentry)
|
|
static void dentry_free(struct dentry *dentry)
|
|
{
|
|
{
|
|
|
|
+ if (unlikely(dname_external(dentry))) {
|
|
|
|
+ struct external_name *p = external_name(dentry);
|
|
|
|
+ if (likely(atomic_dec_and_test(&p->u.count))) {
|
|
|
|
+ call_rcu(&dentry->d_u.d_rcu, __d_free_external);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
/* if dentry was never visible to RCU, immediate free is OK */
|
|
/* if dentry was never visible to RCU, immediate free is OK */
|
|
if (!(dentry->d_flags & DCACHE_RCUACCESS))
|
|
if (!(dentry->d_flags & DCACHE_RCUACCESS))
|
|
__d_free(&dentry->d_u.d_rcu);
|
|
__d_free(&dentry->d_u.d_rcu);
|
|
@@ -456,7 +487,7 @@ static void __dentry_kill(struct dentry *dentry)
|
|
* inform the fs via d_prune that this dentry is about to be
|
|
* inform the fs via d_prune that this dentry is about to be
|
|
* unhashed and destroyed.
|
|
* unhashed and destroyed.
|
|
*/
|
|
*/
|
|
- if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
|
|
|
|
|
|
+ if (dentry->d_flags & DCACHE_OP_PRUNE)
|
|
dentry->d_op->d_prune(dentry);
|
|
dentry->d_op->d_prune(dentry);
|
|
|
|
|
|
if (dentry->d_flags & DCACHE_LRU_LIST) {
|
|
if (dentry->d_flags & DCACHE_LRU_LIST) {
|
|
@@ -619,62 +650,6 @@ kill_it:
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(dput);
|
|
EXPORT_SYMBOL(dput);
|
|
|
|
|
|
-/**
|
|
|
|
- * d_invalidate - invalidate a dentry
|
|
|
|
- * @dentry: dentry to invalidate
|
|
|
|
- *
|
|
|
|
- * Try to invalidate the dentry if it turns out to be
|
|
|
|
- * possible. If there are other dentries that can be
|
|
|
|
- * reached through this one we can't delete it and we
|
|
|
|
- * return -EBUSY. On success we return 0.
|
|
|
|
- *
|
|
|
|
- * no dcache lock.
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
-int d_invalidate(struct dentry * dentry)
|
|
|
|
-{
|
|
|
|
- /*
|
|
|
|
- * If it's already been dropped, return OK.
|
|
|
|
- */
|
|
|
|
- spin_lock(&dentry->d_lock);
|
|
|
|
- if (d_unhashed(dentry)) {
|
|
|
|
- spin_unlock(&dentry->d_lock);
|
|
|
|
- return 0;
|
|
|
|
- }
|
|
|
|
- /*
|
|
|
|
- * Check whether to do a partial shrink_dcache
|
|
|
|
- * to get rid of unused child entries.
|
|
|
|
- */
|
|
|
|
- if (!list_empty(&dentry->d_subdirs)) {
|
|
|
|
- spin_unlock(&dentry->d_lock);
|
|
|
|
- shrink_dcache_parent(dentry);
|
|
|
|
- spin_lock(&dentry->d_lock);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Somebody else still using it?
|
|
|
|
- *
|
|
|
|
- * If it's a directory, we can't drop it
|
|
|
|
- * for fear of somebody re-populating it
|
|
|
|
- * with children (even though dropping it
|
|
|
|
- * would make it unreachable from the root,
|
|
|
|
- * we might still populate it if it was a
|
|
|
|
- * working directory or similar).
|
|
|
|
- * We also need to leave mountpoints alone,
|
|
|
|
- * directory or not.
|
|
|
|
- */
|
|
|
|
- if (dentry->d_lockref.count > 1 && dentry->d_inode) {
|
|
|
|
- if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
|
|
|
|
- spin_unlock(&dentry->d_lock);
|
|
|
|
- return -EBUSY;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- __d_drop(dentry);
|
|
|
|
- spin_unlock(&dentry->d_lock);
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL(d_invalidate);
|
|
|
|
|
|
|
|
/* This must be called with d_lock held */
|
|
/* This must be called with d_lock held */
|
|
static inline void __dget_dlock(struct dentry *dentry)
|
|
static inline void __dget_dlock(struct dentry *dentry)
|
|
@@ -735,7 +710,8 @@ EXPORT_SYMBOL(dget_parent);
|
|
* acquire the reference to alias and return it. Otherwise return NULL.
|
|
* acquire the reference to alias and return it. Otherwise return NULL.
|
|
* Notice that if inode is a directory there can be only one alias and
|
|
* Notice that if inode is a directory there can be only one alias and
|
|
* it can be unhashed only if it has no children, or if it is the root
|
|
* it can be unhashed only if it has no children, or if it is the root
|
|
- * of a filesystem.
|
|
|
|
|
|
+ * of a filesystem, or if the directory was renamed and d_revalidate
|
|
|
|
+ * was the first vfs operation to notice.
|
|
*
|
|
*
|
|
* If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
|
|
* If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
|
|
* any other hashed alias over that one.
|
|
* any other hashed alias over that one.
|
|
@@ -799,20 +775,13 @@ restart:
|
|
hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
|
|
hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
|
|
spin_lock(&dentry->d_lock);
|
|
spin_lock(&dentry->d_lock);
|
|
if (!dentry->d_lockref.count) {
|
|
if (!dentry->d_lockref.count) {
|
|
- /*
|
|
|
|
- * inform the fs via d_prune that this dentry
|
|
|
|
- * is about to be unhashed and destroyed.
|
|
|
|
- */
|
|
|
|
- if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
|
|
|
|
- !d_unhashed(dentry))
|
|
|
|
- dentry->d_op->d_prune(dentry);
|
|
|
|
-
|
|
|
|
- __dget_dlock(dentry);
|
|
|
|
- __d_drop(dentry);
|
|
|
|
- spin_unlock(&dentry->d_lock);
|
|
|
|
- spin_unlock(&inode->i_lock);
|
|
|
|
- dput(dentry);
|
|
|
|
- goto restart;
|
|
|
|
|
|
+ struct dentry *parent = lock_parent(dentry);
|
|
|
|
+ if (likely(!dentry->d_lockref.count)) {
|
|
|
|
+ __dentry_kill(dentry);
|
|
|
|
+ goto restart;
|
|
|
|
+ }
|
|
|
|
+ if (parent)
|
|
|
|
+ spin_unlock(&parent->d_lock);
|
|
}
|
|
}
|
|
spin_unlock(&dentry->d_lock);
|
|
spin_unlock(&dentry->d_lock);
|
|
}
|
|
}
|
|
@@ -1193,7 +1162,7 @@ EXPORT_SYMBOL(have_submounts);
|
|
* reachable (e.g. NFS can unhash a directory dentry and then the complete
|
|
* reachable (e.g. NFS can unhash a directory dentry and then the complete
|
|
* subtree can become unreachable).
|
|
* subtree can become unreachable).
|
|
*
|
|
*
|
|
- * Only one of check_submounts_and_drop() and d_set_mounted() must succeed. For
|
|
|
|
|
|
+ * Only one of d_invalidate() and d_set_mounted() must succeed. For
|
|
* this reason take rename_lock and d_lock on dentry and ancestors.
|
|
* this reason take rename_lock and d_lock on dentry and ancestors.
|
|
*/
|
|
*/
|
|
int d_set_mounted(struct dentry *dentry)
|
|
int d_set_mounted(struct dentry *dentry)
|
|
@@ -1202,7 +1171,7 @@ int d_set_mounted(struct dentry *dentry)
|
|
int ret = -ENOENT;
|
|
int ret = -ENOENT;
|
|
write_seqlock(&rename_lock);
|
|
write_seqlock(&rename_lock);
|
|
for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
|
|
for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
|
|
- /* Need exclusion wrt. check_submounts_and_drop() */
|
|
|
|
|
|
+ /* Need exclusion wrt. d_invalidate() */
|
|
spin_lock(&p->d_lock);
|
|
spin_lock(&p->d_lock);
|
|
if (unlikely(d_unhashed(p))) {
|
|
if (unlikely(d_unhashed(p))) {
|
|
spin_unlock(&p->d_lock);
|
|
spin_unlock(&p->d_lock);
|
|
@@ -1346,70 +1315,84 @@ void shrink_dcache_for_umount(struct super_block *sb)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static enum d_walk_ret check_and_collect(void *_data, struct dentry *dentry)
|
|
|
|
|
|
+struct detach_data {
|
|
|
|
+ struct select_data select;
|
|
|
|
+ struct dentry *mountpoint;
|
|
|
|
+};
|
|
|
|
+static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
|
|
{
|
|
{
|
|
- struct select_data *data = _data;
|
|
|
|
|
|
+ struct detach_data *data = _data;
|
|
|
|
|
|
if (d_mountpoint(dentry)) {
|
|
if (d_mountpoint(dentry)) {
|
|
- data->found = -EBUSY;
|
|
|
|
|
|
+ __dget_dlock(dentry);
|
|
|
|
+ data->mountpoint = dentry;
|
|
return D_WALK_QUIT;
|
|
return D_WALK_QUIT;
|
|
}
|
|
}
|
|
|
|
|
|
- return select_collect(_data, dentry);
|
|
|
|
|
|
+ return select_collect(&data->select, dentry);
|
|
}
|
|
}
|
|
|
|
|
|
static void check_and_drop(void *_data)
|
|
static void check_and_drop(void *_data)
|
|
{
|
|
{
|
|
- struct select_data *data = _data;
|
|
|
|
|
|
+ struct detach_data *data = _data;
|
|
|
|
|
|
- if (d_mountpoint(data->start))
|
|
|
|
- data->found = -EBUSY;
|
|
|
|
- if (!data->found)
|
|
|
|
- __d_drop(data->start);
|
|
|
|
|
|
+ if (!data->mountpoint && !data->select.found)
|
|
|
|
+ __d_drop(data->select.start);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
- * check_submounts_and_drop - prune dcache, check for submounts and drop
|
|
|
|
|
|
+ * d_invalidate - detach submounts, prune dcache, and drop
|
|
|
|
+ * @dentry: dentry to invalidate (aka detach, prune and drop)
|
|
*
|
|
*
|
|
- * All done as a single atomic operation relative to has_unlinked_ancestor().
|
|
|
|
- * Returns 0 if successfully unhashed @parent. If there were submounts then
|
|
|
|
- * return -EBUSY.
|
|
|
|
|
|
+ * no dcache lock.
|
|
*
|
|
*
|
|
- * @dentry: dentry to prune and drop
|
|
|
|
|
|
+ * The final d_drop is done as an atomic operation relative to
|
|
|
|
+ * rename_lock ensuring there are no races with d_set_mounted. This
|
|
|
|
+ * ensures there are no unhashed dentries on the path to a mountpoint.
|
|
*/
|
|
*/
|
|
-int check_submounts_and_drop(struct dentry *dentry)
|
|
|
|
|
|
+void d_invalidate(struct dentry *dentry)
|
|
{
|
|
{
|
|
- int ret = 0;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * If it's already been dropped, return OK.
|
|
|
|
+ */
|
|
|
|
+ spin_lock(&dentry->d_lock);
|
|
|
|
+ if (d_unhashed(dentry)) {
|
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
|
|
|
/* Negative dentries can be dropped without further checks */
|
|
/* Negative dentries can be dropped without further checks */
|
|
if (!dentry->d_inode) {
|
|
if (!dentry->d_inode) {
|
|
d_drop(dentry);
|
|
d_drop(dentry);
|
|
- goto out;
|
|
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
|
|
|
|
for (;;) {
|
|
for (;;) {
|
|
- struct select_data data;
|
|
|
|
|
|
+ struct detach_data data;
|
|
|
|
|
|
- INIT_LIST_HEAD(&data.dispose);
|
|
|
|
- data.start = dentry;
|
|
|
|
- data.found = 0;
|
|
|
|
|
|
+ data.mountpoint = NULL;
|
|
|
|
+ INIT_LIST_HEAD(&data.select.dispose);
|
|
|
|
+ data.select.start = dentry;
|
|
|
|
+ data.select.found = 0;
|
|
|
|
+
|
|
|
|
+ d_walk(dentry, &data, detach_and_collect, check_and_drop);
|
|
|
|
|
|
- d_walk(dentry, &data, check_and_collect, check_and_drop);
|
|
|
|
- ret = data.found;
|
|
|
|
|
|
+ if (data.select.found)
|
|
|
|
+ shrink_dentry_list(&data.select.dispose);
|
|
|
|
|
|
- if (!list_empty(&data.dispose))
|
|
|
|
- shrink_dentry_list(&data.dispose);
|
|
|
|
|
|
+ if (data.mountpoint) {
|
|
|
|
+ detach_mounts(data.mountpoint);
|
|
|
|
+ dput(data.mountpoint);
|
|
|
|
+ }
|
|
|
|
|
|
- if (ret <= 0)
|
|
|
|
|
|
+ if (!data.mountpoint && !data.select.found)
|
|
break;
|
|
break;
|
|
|
|
|
|
cond_resched();
|
|
cond_resched();
|
|
}
|
|
}
|
|
-
|
|
|
|
-out:
|
|
|
|
- return ret;
|
|
|
|
}
|
|
}
|
|
-EXPORT_SYMBOL(check_submounts_and_drop);
|
|
|
|
|
|
+EXPORT_SYMBOL(d_invalidate);
|
|
|
|
|
|
/**
|
|
/**
|
|
* __d_alloc - allocate a dcache entry
|
|
* __d_alloc - allocate a dcache entry
|
|
@@ -1438,11 +1421,14 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
|
|
*/
|
|
*/
|
|
dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
|
|
dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
|
|
if (name->len > DNAME_INLINE_LEN-1) {
|
|
if (name->len > DNAME_INLINE_LEN-1) {
|
|
- dname = kmalloc(name->len + 1, GFP_KERNEL);
|
|
|
|
- if (!dname) {
|
|
|
|
|
|
+ size_t size = offsetof(struct external_name, name[1]);
|
|
|
|
+ struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
|
|
|
|
+ if (!p) {
|
|
kmem_cache_free(dentry_cache, dentry);
|
|
kmem_cache_free(dentry_cache, dentry);
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
+ atomic_set(&p->u.count, 1);
|
|
|
|
+ dname = p->name;
|
|
} else {
|
|
} else {
|
|
dname = dentry->d_iname;
|
|
dname = dentry->d_iname;
|
|
}
|
|
}
|
|
@@ -2112,10 +2098,10 @@ struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
|
|
struct dentry *dentry;
|
|
struct dentry *dentry;
|
|
unsigned seq;
|
|
unsigned seq;
|
|
|
|
|
|
- do {
|
|
|
|
- seq = read_seqbegin(&rename_lock);
|
|
|
|
- dentry = __d_lookup(parent, name);
|
|
|
|
- if (dentry)
|
|
|
|
|
|
+ do {
|
|
|
|
+ seq = read_seqbegin(&rename_lock);
|
|
|
|
+ dentry = __d_lookup(parent, name);
|
|
|
|
+ if (dentry)
|
|
break;
|
|
break;
|
|
} while (read_seqretry(&rename_lock, seq));
|
|
} while (read_seqretry(&rename_lock, seq));
|
|
return dentry;
|
|
return dentry;
|
|
@@ -2372,11 +2358,10 @@ void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(dentry_update_name_case);
|
|
EXPORT_SYMBOL(dentry_update_name_case);
|
|
|
|
|
|
-static void switch_names(struct dentry *dentry, struct dentry *target,
|
|
|
|
- bool exchange)
|
|
|
|
|
|
+static void swap_names(struct dentry *dentry, struct dentry *target)
|
|
{
|
|
{
|
|
- if (dname_external(target)) {
|
|
|
|
- if (dname_external(dentry)) {
|
|
|
|
|
|
+ if (unlikely(dname_external(target))) {
|
|
|
|
+ if (unlikely(dname_external(dentry))) {
|
|
/*
|
|
/*
|
|
* Both external: swap the pointers
|
|
* Both external: swap the pointers
|
|
*/
|
|
*/
|
|
@@ -2392,7 +2377,7 @@ static void switch_names(struct dentry *dentry, struct dentry *target,
|
|
target->d_name.name = target->d_iname;
|
|
target->d_name.name = target->d_iname;
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
- if (dname_external(dentry)) {
|
|
|
|
|
|
+ if (unlikely(dname_external(dentry))) {
|
|
/*
|
|
/*
|
|
* dentry:external, target:internal. Give dentry's
|
|
* dentry:external, target:internal. Give dentry's
|
|
* storage to target and make dentry internal
|
|
* storage to target and make dentry internal
|
|
@@ -2407,12 +2392,6 @@ static void switch_names(struct dentry *dentry, struct dentry *target,
|
|
*/
|
|
*/
|
|
unsigned int i;
|
|
unsigned int i;
|
|
BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
|
|
BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
|
|
- if (!exchange) {
|
|
|
|
- memcpy(dentry->d_iname, target->d_name.name,
|
|
|
|
- target->d_name.len + 1);
|
|
|
|
- dentry->d_name.hash_len = target->d_name.hash_len;
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
|
|
for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
|
|
swap(((long *) &dentry->d_iname)[i],
|
|
swap(((long *) &dentry->d_iname)[i],
|
|
((long *) &target->d_iname)[i]);
|
|
((long *) &target->d_iname)[i]);
|
|
@@ -2422,6 +2401,24 @@ static void switch_names(struct dentry *dentry, struct dentry *target,
|
|
swap(dentry->d_name.hash_len, target->d_name.hash_len);
|
|
swap(dentry->d_name.hash_len, target->d_name.hash_len);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void copy_name(struct dentry *dentry, struct dentry *target)
|
|
|
|
+{
|
|
|
|
+ struct external_name *old_name = NULL;
|
|
|
|
+ if (unlikely(dname_external(dentry)))
|
|
|
|
+ old_name = external_name(dentry);
|
|
|
|
+ if (unlikely(dname_external(target))) {
|
|
|
|
+ atomic_inc(&external_name(target)->u.count);
|
|
|
|
+ dentry->d_name = target->d_name;
|
|
|
|
+ } else {
|
|
|
|
+ memcpy(dentry->d_iname, target->d_name.name,
|
|
|
|
+ target->d_name.len + 1);
|
|
|
|
+ dentry->d_name.name = dentry->d_iname;
|
|
|
|
+ dentry->d_name.hash_len = target->d_name.hash_len;
|
|
|
|
+ }
|
|
|
|
+ if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
|
|
|
|
+ kfree_rcu(old_name, u.head);
|
|
|
|
+}
|
|
|
|
+
|
|
static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
|
|
static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
@@ -2518,7 +2515,10 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
|
|
}
|
|
}
|
|
|
|
|
|
/* Switch the names.. */
|
|
/* Switch the names.. */
|
|
- switch_names(dentry, target, exchange);
|
|
|
|
|
|
+ if (exchange)
|
|
|
|
+ swap_names(dentry, target);
|
|
|
|
+ else
|
|
|
|
+ copy_name(dentry, target);
|
|
|
|
|
|
/* ... and switch them in the tree */
|
|
/* ... and switch them in the tree */
|
|
if (IS_ROOT(dentry)) {
|
|
if (IS_ROOT(dentry)) {
|
|
@@ -2625,10 +2625,8 @@ static struct dentry *__d_unalias(struct inode *inode,
|
|
goto out_err;
|
|
goto out_err;
|
|
m2 = &alias->d_parent->d_inode->i_mutex;
|
|
m2 = &alias->d_parent->d_inode->i_mutex;
|
|
out_unalias:
|
|
out_unalias:
|
|
- if (likely(!d_mountpoint(alias))) {
|
|
|
|
- __d_move(alias, dentry, false);
|
|
|
|
- ret = alias;
|
|
|
|
- }
|
|
|
|
|
|
+ __d_move(alias, dentry, false);
|
|
|
|
+ ret = alias;
|
|
out_err:
|
|
out_err:
|
|
spin_unlock(&inode->i_lock);
|
|
spin_unlock(&inode->i_lock);
|
|
if (m2)
|
|
if (m2)
|
|
@@ -2810,6 +2808,9 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
|
|
* the beginning of the name. The sequence number check at the caller will
|
|
* the beginning of the name. The sequence number check at the caller will
|
|
* retry it again when a d_move() does happen. So any garbage in the buffer
|
|
* retry it again when a d_move() does happen. So any garbage in the buffer
|
|
* due to mismatched pointer and length will be discarded.
|
|
* due to mismatched pointer and length will be discarded.
|
|
|
|
+ *
|
|
|
|
+ * Data dependency barrier is needed to make sure that we see that terminating
|
|
|
|
+ * NUL. Alpha strikes again, film at 11...
|
|
*/
|
|
*/
|
|
static int prepend_name(char **buffer, int *buflen, struct qstr *name)
|
|
static int prepend_name(char **buffer, int *buflen, struct qstr *name)
|
|
{
|
|
{
|
|
@@ -2817,6 +2818,8 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
|
|
u32 dlen = ACCESS_ONCE(name->len);
|
|
u32 dlen = ACCESS_ONCE(name->len);
|
|
char *p;
|
|
char *p;
|
|
|
|
|
|
|
|
+ smp_read_barrier_depends();
|
|
|
|
+
|
|
*buflen -= dlen + 1;
|
|
*buflen -= dlen + 1;
|
|
if (*buflen < 0)
|
|
if (*buflen < 0)
|
|
return -ENAMETOOLONG;
|
|
return -ENAMETOOLONG;
|