summaryrefslogtreecommitdiff
path: root/fs/namespace.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-20 19:15:51 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-20 19:15:51 +0300
commit18253e034d2aeee140f82fc9fe89c4bce5c81799 (patch)
tree0a01610fbe16b24800977d2c7a3b93721ca5639c /fs/namespace.c
parentabdfd52a295fb5731ab07b5c9013e2e39f4d1cbe (diff)
parent56cbb429d911991170fe867b4bba14f0efed5829 (diff)
downloadlinux-18253e034d2aeee140f82fc9fe89c4bce5c81799.tar.xz
Merge branch 'work.dcache2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull dcache and mountpoint updates from Al Viro: "Saner handling of refcounts to mountpoints. Transfer the counting reference from struct mount ->mnt_mountpoint over to struct mountpoint ->m_dentry. That allows us to get rid of the convoluted games with ordering of mount shutdowns. The cost is in teaching shrink_dcache_{parent,for_umount} to cope with mixed-filesystem shrink lists, which we'll also need for the Slab Movable Objects patchset" * 'work.dcache2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: switch the remnants of releasing the mountpoint away from fs_pin get rid of detach_mnt() make struct mountpoint bear the dentry reference to mountpoint, not struct mount Teach shrink_dcache_parent() to cope with mixed-filesystem shrink lists fs/namespace.c: shift put_mountpoint() to callers of unhash_mnt() __detach_mounts(): lookup_mountpoint() can't return ERR_PTR() anymore nfs: dget_parent() never returns NULL ceph: don't open-code the check for dead lockref
Diffstat (limited to 'fs/namespace.c')
-rw-r--r--fs/namespace.c159
1 files changed, 77 insertions, 82 deletions
diff --git a/fs/namespace.c b/fs/namespace.c
index f0d664adb9ba..6464ea4acba9 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -70,6 +70,8 @@ static struct hlist_head *mount_hashtable __read_mostly;
static struct hlist_head *mountpoint_hashtable __read_mostly;
static struct kmem_cache *mnt_cache __read_mostly;
static DECLARE_RWSEM(namespace_sem);
+static HLIST_HEAD(unmounted); /* protected by namespace_sem */
+static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
/* /sys/fs */
struct kobject *fs_kobj;
@@ -170,14 +172,6 @@ unsigned int mnt_get_count(struct mount *mnt)
#endif
}
-static void drop_mountpoint(struct fs_pin *p)
-{
- struct mount *m = container_of(p, struct mount, mnt_umount);
- dput(m->mnt_ex_mountpoint);
- pin_remove(p);
- mntput(&m->mnt);
-}
-
static struct mount *alloc_vfsmnt(const char *name)
{
struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
@@ -215,7 +209,7 @@ static struct mount *alloc_vfsmnt(const char *name)
INIT_LIST_HEAD(&mnt->mnt_slave);
INIT_HLIST_NODE(&mnt->mnt_mp_list);
INIT_LIST_HEAD(&mnt->mnt_umounting);
- init_fs_pin(&mnt->mnt_umount, drop_mountpoint);
+ INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
}
return mnt;
@@ -740,7 +734,7 @@ mountpoint:
/* Add the new mountpoint to the hash table */
read_seqlock_excl(&mount_lock);
- new->m_dentry = dentry;
+ new->m_dentry = dget(dentry);
new->m_count = 1;
hlist_add_head(&new->m_hash, mp_hash(dentry));
INIT_HLIST_HEAD(&new->m_list);
@@ -753,7 +747,11 @@ done:
return mp;
}
-static void put_mountpoint(struct mountpoint *mp)
+/*
+ * vfsmount lock must be held. Additionally, the caller is responsible
+ * for serializing calls for given disposal list.
+ */
+static void __put_mountpoint(struct mountpoint *mp, struct list_head *list)
{
if (!--mp->m_count) {
struct dentry *dentry = mp->m_dentry;
@@ -761,11 +759,18 @@ static void put_mountpoint(struct mountpoint *mp)
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_MOUNTED;
spin_unlock(&dentry->d_lock);
+ dput_to_list(dentry, list);
hlist_del(&mp->m_hash);
kfree(mp);
}
}
+/* called with namespace_lock and vfsmount lock */
+static void put_mountpoint(struct mountpoint *mp)
+{
+ __put_mountpoint(mp, &ex_mountpoints);
+}
+
static inline int check_mnt(struct mount *mnt)
{
return mnt->mnt_ns == current->nsproxy->mnt_ns;
@@ -796,25 +801,17 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
/*
* vfsmount lock must be held for write
*/
-static void unhash_mnt(struct mount *mnt)
+static struct mountpoint *unhash_mnt(struct mount *mnt)
{
+ struct mountpoint *mp;
mnt->mnt_parent = mnt;
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
list_del_init(&mnt->mnt_child);
hlist_del_init_rcu(&mnt->mnt_hash);
hlist_del_init(&mnt->mnt_mp_list);
- put_mountpoint(mnt->mnt_mp);
+ mp = mnt->mnt_mp;
mnt->mnt_mp = NULL;
-}
-
-/*
- * vfsmount lock must be held for write
- */
-static void detach_mnt(struct mount *mnt, struct path *old_path)
-{
- old_path->dentry = mnt->mnt_mountpoint;
- old_path->mnt = &mnt->mnt_parent->mnt;
- unhash_mnt(mnt);
+ return mp;
}
/*
@@ -822,9 +819,7 @@ static void detach_mnt(struct mount *mnt, struct path *old_path)
*/
static void umount_mnt(struct mount *mnt)
{
- /* old mountpoint will be dropped when we can do that */
- mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint;
- unhash_mnt(mnt);
+ put_mountpoint(unhash_mnt(mnt));
}
/*
@@ -836,7 +831,7 @@ void mnt_set_mountpoint(struct mount *mnt,
{
mp->m_count++;
mnt_add_count(mnt, 1); /* essentially, that's mntget */
- child_mnt->mnt_mountpoint = dget(mp->m_dentry);
+ child_mnt->mnt_mountpoint = mp->m_dentry;
child_mnt->mnt_parent = mnt;
child_mnt->mnt_mp = mp;
hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
@@ -863,7 +858,6 @@ static void attach_mnt(struct mount *mnt,
void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
{
struct mountpoint *old_mp = mnt->mnt_mp;
- struct dentry *old_mountpoint = mnt->mnt_mountpoint;
struct mount *old_parent = mnt->mnt_parent;
list_del_init(&mnt->mnt_child);
@@ -873,22 +867,6 @@ void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct m
attach_mnt(mnt, parent, mp);
put_mountpoint(old_mp);
-
- /*
- * Safely avoid even the suggestion this code might sleep or
- * lock the mount hash by taking advantage of the knowledge that
- * mnt_change_mountpoint will not release the final reference
- * to a mountpoint.
- *
- * During mounting, the mount passed in as the parent mount will
- * continue to use the old mountpoint and during unmounting, the
- * old mountpoint will continue to exist until namespace_unlock,
- * which happens well after mnt_change_mountpoint.
- */
- spin_lock(&old_mountpoint->d_lock);
- old_mountpoint->d_lockref.count--;
- spin_unlock(&old_mountpoint->d_lock);
-
mnt_add_count(old_parent, -1);
}
@@ -1103,19 +1081,22 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
static void cleanup_mnt(struct mount *mnt)
{
+ struct hlist_node *p;
+ struct mount *m;
/*
- * This probably indicates that somebody messed
- * up a mnt_want/drop_write() pair. If this
- * happens, the filesystem was probably unable
- * to make r/w->r/o transitions.
- */
- /*
+ * The warning here probably indicates that somebody messed
+ * up a mnt_want/drop_write() pair. If this happens, the
+ * filesystem was probably unable to make r/w->r/o transitions.
* The locking used to deal with mnt_count decrement provides barriers,
* so mnt_get_writers() below is safe.
*/
WARN_ON(mnt_get_writers(mnt));
if (unlikely(mnt->mnt_pins.first))
mnt_pin_kill(mnt);
+ hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
+ hlist_del(&m->mnt_umount);
+ mntput(&m->mnt);
+ }
fsnotify_vfsmount_delete(&mnt->mnt);
dput(mnt->mnt.mnt_root);
deactivate_super(mnt->mnt.mnt_sb);
@@ -1141,6 +1122,8 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
static void mntput_no_expire(struct mount *mnt)
{
+ LIST_HEAD(list);
+
rcu_read_lock();
if (likely(READ_ONCE(mnt->mnt_ns))) {
/*
@@ -1181,10 +1164,12 @@ static void mntput_no_expire(struct mount *mnt)
if (unlikely(!list_empty(&mnt->mnt_mounts))) {
struct mount *p, *tmp;
list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
- umount_mnt(p);
+ __put_mountpoint(unhash_mnt(p), &list);
+ hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
}
}
unlock_mount_hash();
+ shrink_dentry_list(&list);
if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
struct task_struct *task = current;
@@ -1370,22 +1355,29 @@ int may_umount(struct vfsmount *mnt)
EXPORT_SYMBOL(may_umount);
-static HLIST_HEAD(unmounted); /* protected by namespace_sem */
-
static void namespace_unlock(void)
{
struct hlist_head head;
+ struct hlist_node *p;
+ struct mount *m;
+ LIST_HEAD(list);
hlist_move_list(&unmounted, &head);
+ list_splice_init(&ex_mountpoints, &list);
up_write(&namespace_sem);
+ shrink_dentry_list(&list);
+
if (likely(hlist_empty(&head)))
return;
synchronize_rcu_expedited();
- group_pin_kill(&head);
+ hlist_for_each_entry_safe(m, p, &head, mnt_umount) {
+ hlist_del(&m->mnt_umount);
+ mntput(&m->mnt);
+ }
}
static inline void namespace_lock(void)
@@ -1472,8 +1464,6 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
disconnect = disconnect_mount(p, how);
- pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
- disconnect ? &unmounted : NULL);
if (mnt_has_parent(p)) {
mnt_add_count(p->mnt_parent, -1);
if (!disconnect) {
@@ -1481,6 +1471,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
} else {
umount_mnt(p);
+ hlist_add_head(&p->mnt_umount, &unmounted);
}
}
change_mnt_propagation(p, MS_PRIVATE);
@@ -1626,15 +1617,15 @@ void __detach_mounts(struct dentry *dentry)
namespace_lock();
lock_mount_hash();
mp = lookup_mountpoint(dentry);
- if (IS_ERR_OR_NULL(mp))
+ if (!mp)
goto out_unlock;
event++;
while (!hlist_empty(&mp->m_list)) {
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
- hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
umount_mnt(mnt);
+ hlist_add_head(&mnt->mnt_umount, &unmounted);
}
else umount_tree(mnt, UMOUNT_CONNECTED);
}
@@ -2046,7 +2037,7 @@ int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
static int attach_recursive_mnt(struct mount *source_mnt,
struct mount *dest_mnt,
struct mountpoint *dest_mp,
- struct path *parent_path)
+ bool moving)
{
struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
HLIST_HEAD(tree_list);
@@ -2064,7 +2055,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
return PTR_ERR(smp);
/* Is there space to add these mounts to the mount namespace? */
- if (!parent_path) {
+ if (!moving) {
err = count_mounts(ns, source_mnt);
if (err)
goto out;
@@ -2083,8 +2074,8 @@ static int attach_recursive_mnt(struct mount *source_mnt,
} else {
lock_mount_hash();
}
- if (parent_path) {
- detach_mnt(source_mnt, parent_path);
+ if (moving) {
+ unhash_mnt(source_mnt);
attach_mnt(source_mnt, dest_mnt, dest_mp);
touch_mnt_namespace(source_mnt->mnt_ns);
} else {
@@ -2182,7 +2173,7 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
d_is_dir(mnt->mnt.mnt_root))
return -ENOTDIR;
- return attach_recursive_mnt(mnt, p, mp, NULL);
+ return attach_recursive_mnt(mnt, p, mp, false);
}
/*
@@ -2575,11 +2566,11 @@ out:
static int do_move_mount(struct path *old_path, struct path *new_path)
{
- struct path parent_path = {.mnt = NULL, .dentry = NULL};
struct mnt_namespace *ns;
struct mount *p;
struct mount *old;
- struct mountpoint *mp;
+ struct mount *parent;
+ struct mountpoint *mp, *old_mp;
int err;
bool attached;
@@ -2589,7 +2580,9 @@ static int do_move_mount(struct path *old_path, struct path *new_path)
old = real_mount(old_path->mnt);
p = real_mount(new_path->mnt);
+ parent = old->mnt_parent;
attached = mnt_has_parent(old);
+ old_mp = old->mnt_mp;
ns = old->mnt_ns;
err = -EINVAL;
@@ -2617,7 +2610,7 @@ static int do_move_mount(struct path *old_path, struct path *new_path)
/*
* Don't move a mount residing in a shared parent.
*/
- if (attached && IS_MNT_SHARED(old->mnt_parent))
+ if (attached && IS_MNT_SHARED(parent))
goto out;
/*
* Don't move a mount tree containing unbindable mounts to a destination
@@ -2633,18 +2626,21 @@ static int do_move_mount(struct path *old_path, struct path *new_path)
goto out;
err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp,
- attached ? &parent_path : NULL);
+ attached);
if (err)
goto out;
/* if the mount is moved, it should no longer be expire
* automatically */
list_del_init(&old->mnt_expire);
+ if (attached)
+ put_mountpoint(old_mp);
out:
unlock_mount(mp);
if (!err) {
- path_put(&parent_path);
- if (!attached)
+ if (attached)
+ mntput_no_expire(parent);
+ else
free_mnt_ns(ns);
}
return err;
@@ -3589,8 +3585,8 @@ EXPORT_SYMBOL(path_is_under);
SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
const char __user *, put_old)
{
- struct path new, old, parent_path, root_parent, root;
- struct mount *new_mnt, *root_mnt, *old_mnt;
+ struct path new, old, root;
+ struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent;
struct mountpoint *old_mp, *root_mp;
int error;
@@ -3619,9 +3615,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
new_mnt = real_mount(new.mnt);
root_mnt = real_mount(root.mnt);
old_mnt = real_mount(old.mnt);
+ ex_parent = new_mnt->mnt_parent;
+ root_parent = root_mnt->mnt_parent;
if (IS_MNT_SHARED(old_mnt) ||
- IS_MNT_SHARED(new_mnt->mnt_parent) ||
- IS_MNT_SHARED(root_mnt->mnt_parent))
+ IS_MNT_SHARED(ex_parent) ||
+ IS_MNT_SHARED(root_parent))
goto out4;
if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
goto out4;
@@ -3638,7 +3636,6 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
goto out4; /* not a mountpoint */
if (!mnt_has_parent(root_mnt))
goto out4; /* not attached */
- root_mp = root_mnt->mnt_mp;
if (new.mnt->mnt_root != new.dentry)
goto out4; /* not a mountpoint */
if (!mnt_has_parent(new_mnt))
@@ -3649,10 +3646,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
/* make certain new is below the root */
if (!is_path_reachable(new_mnt, new.dentry, &root))
goto out4;
- root_mp->m_count++; /* pin it so it won't go away */
lock_mount_hash();
- detach_mnt(new_mnt, &parent_path);
- detach_mnt(root_mnt, &root_parent);
+ umount_mnt(new_mnt);
+ root_mp = unhash_mnt(root_mnt); /* we'll need its mountpoint */
if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
new_mnt->mnt.mnt_flags |= MNT_LOCKED;
root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
@@ -3660,7 +3656,8 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
/* mount old root on put_old */
attach_mnt(root_mnt, old_mnt, old_mp);
/* mount new_root on / */
- attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
+ attach_mnt(new_mnt, root_parent, root_mp);
+ mnt_add_count(root_parent, -1);
touch_mnt_namespace(current->nsproxy->mnt_ns);
/* A moved mount should not expire automatically */
list_del_init(&new_mnt->mnt_expire);
@@ -3670,10 +3667,8 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
error = 0;
out4:
unlock_mount(old_mp);
- if (!error) {
- path_put(&root_parent);
- path_put(&parent_path);
- }
+ if (!error)
+ mntput_no_expire(ex_parent);
out3:
path_put(&root);
out2: