summaryrefslogtreecommitdiff
path: root/fs/dcache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c17
1 files changed, 5 insertions, 12 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index bb0c4d0038db..52e6d5fdab6b 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2597,15 +2597,7 @@ EXPORT_SYMBOL(d_rehash);
static inline unsigned start_dir_add(struct inode *dir)
{
- /*
- * The caller holds a spinlock (dentry::d_lock). On !PREEMPT_RT
- * kernels spin_lock() implicitly disables preemption, but not on
- * PREEMPT_RT. So for RT it has to be done explicitly to protect
- * the sequence count write side critical section against a reader
- * or another writer preempting, which would result in a live lock.
- */
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- preempt_disable();
+ preempt_disable_nested();
for (;;) {
unsigned n = dir->i_dir_seq;
if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
@@ -2618,8 +2610,7 @@ static inline void end_dir_add(struct inode *dir, unsigned int n,
wait_queue_head_t *d_wait)
{
smp_store_release(&dir->i_dir_seq, n + 2);
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- preempt_enable();
+ preempt_enable_nested();
wake_up_all(d_wait);
}
@@ -3258,8 +3249,10 @@ void d_genocide(struct dentry *parent)
EXPORT_SYMBOL(d_genocide);
-void d_tmpfile(struct dentry *dentry, struct inode *inode)
+void d_tmpfile(struct file *file, struct inode *inode)
{
+ struct dentry *dentry = file->f_path.dentry;
+
inode_dec_link_count(inode);
BUG_ON(dentry->d_name.name != dentry->d_iname ||
!hlist_unhashed(&dentry->d_u.d_alias) ||