summaryrefslogtreecommitdiff
path: root/fs/super.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/super.c')
-rw-r--r--fs/super.c46
1 files changed, 41 insertions, 5 deletions
diff --git a/fs/super.c b/fs/super.c
index 12c08cb20405..84332d5cb817 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -291,7 +291,6 @@ static void __put_super(struct super_block *s)
WARN_ON(s->s_inode_lru.node);
WARN_ON(!list_empty(&s->s_mounts));
security_sb_free(s);
- fscrypt_destroy_keyring(s);
put_user_ns(s->s_user_ns);
kfree(s->s_subtype);
call_rcu(&s->rcu, destroy_super_rcu);
@@ -491,10 +490,23 @@ void generic_shutdown_super(struct super_block *sb)
if (sop->put_super)
sop->put_super(sb);
- if (!list_empty(&sb->s_inodes)) {
- printk("VFS: Busy inodes after unmount of %s. "
- "Self-destruct in 5 seconds. Have a nice day...\n",
- sb->s_id);
+ if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes),
+ "VFS: Busy inodes after unmount of %s (%s)",
+ sb->s_id, sb->s_type->name)) {
+ /*
+ * Adding a proper bailout path here would be hard, but
+ * we can at least make it more likely that a later
+ * iput_final() or such crashes cleanly.
+ */
+ struct inode *inode;
+
+ spin_lock(&sb->s_inode_list_lock);
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ inode->i_op = VFS_PTR_POISON;
+ inode->i_sb = VFS_PTR_POISON;
+ inode->i_mapping = VFS_PTR_POISON;
+ }
+ spin_unlock(&sb->s_inode_list_lock);
}
}
spin_lock(&sb_lock);
@@ -1764,3 +1776,27 @@ int thaw_super(struct super_block *sb)
return thaw_super_locked(sb);
}
EXPORT_SYMBOL(thaw_super);
+
+/*
+ * Create workqueue for deferred direct IO completions. We allocate the
+ * workqueue when it's first needed. This avoids creating workqueue for
+ * filesystems that don't need it and also allows us to create the workqueue
+ * late enough so the we can include s_id in the name of the workqueue.
+ */
+int sb_init_dio_done_wq(struct super_block *sb)
+{
+ struct workqueue_struct *old;
+ struct workqueue_struct *wq = alloc_workqueue("dio/%s",
+ WQ_MEM_RECLAIM, 0,
+ sb->s_id);
+ if (!wq)
+ return -ENOMEM;
+ /*
+ * This has to be atomic as more DIOs can race to create the workqueue
+ */
+ old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
+ /* Someone created workqueue before us? Free ours... */
+ if (old)
+ destroy_workqueue(wq);
+ return 0;
+}