summaryrefslogtreecommitdiff
path: root/fs/orangefs/file.c
diff options
context:
space:
mode:
authorMartin Brandenburg <martin@omnibond.com>2019-02-12 23:19:06 +0300
committerMike Marshall <hubcap@omnibond.com>2019-05-03 21:32:39 +0300
commit8f04e1be784858ba0288c7c09b9de06627a800c9 (patch)
treebf777bf20b50e8904de6be0f63f452b4246a9909 /fs/orangefs/file.c
parentc472ebc25555e634d89e1ed508d37c9102bff017 (diff)
downloadlinux-8f04e1be784858ba0288c7c09b9de06627a800c9.tar.xz
orangefs: add orangefs_revalidate_mapping
This is modeled after NFS, except our method is different. We use a simple timer to determine whether to invalidate the page cache. This is bound to perform. This addes a sysfs parameter cache_timeout_msecs which controls the time between page cache invalidations. Signed-off-by: Martin Brandenburg <martin@omnibond.com> Signed-off-by: Mike Marshall <hubcap@omnibond.com>
Diffstat (limited to 'fs/orangefs/file.c')
-rw-r--r--fs/orangefs/file.c70
1 files changed, 68 insertions, 2 deletions
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index 405449ce4b02..faa5b61cdfd6 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -241,18 +241,78 @@ out:
return ret;
}
+int orangefs_revalidate_mapping(struct inode *inode)
+{
+ struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+ struct address_space *mapping = inode->i_mapping;
+ unsigned long *bitlock = &orangefs_inode->bitlock;
+ int ret;
+
+ while (1) {
+ ret = wait_on_bit(bitlock, 1, TASK_KILLABLE);
+ if (ret)
+ return ret;
+ spin_lock(&inode->i_lock);
+ if (test_bit(1, bitlock)) {
+ spin_unlock(&inode->i_lock);
+ continue;
+ }
+ if (!time_before(jiffies, orangefs_inode->mapping_time))
+ break;
+ spin_unlock(&inode->i_lock);
+ return 0;
+ }
+
+ set_bit(1, bitlock);
+ smp_wmb();
+ spin_unlock(&inode->i_lock);
+
+ unmap_mapping_range(mapping, 0, 0, 0);
+ ret = filemap_write_and_wait(mapping);
+ if (!ret)
+ ret = invalidate_inode_pages2(mapping);
+
+ orangefs_inode->mapping_time = jiffies +
+ orangefs_cache_timeout_msecs*HZ/1000;
+
+ clear_bit(1, bitlock);
+ smp_mb__after_atomic();
+ wake_up_bit(bitlock, 1);
+
+ return ret;
+}
+
static ssize_t orangefs_file_read_iter(struct kiocb *iocb,
struct iov_iter *iter)
{
+ int ret;
orangefs_stats.reads++;
- return generic_file_read_iter(iocb, iter);
+
+ down_read(&file_inode(iocb->ki_filp)->i_rwsem);
+ ret = orangefs_revalidate_mapping(file_inode(iocb->ki_filp));
+ if (ret)
+ goto out;
+
+ ret = generic_file_read_iter(iocb, iter);
+out:
+ up_read(&file_inode(iocb->ki_filp)->i_rwsem);
+ return ret;
}
static ssize_t orangefs_file_write_iter(struct kiocb *iocb,
struct iov_iter *iter)
{
+ int ret;
orangefs_stats.writes++;
- return generic_file_write_iter(iocb, iter);
+
+ if (iocb->ki_pos > i_size_read(file_inode(iocb->ki_filp))) {
+ ret = orangefs_revalidate_mapping(file_inode(iocb->ki_filp));
+ if (ret)
+ return ret;
+ }
+
+ ret = generic_file_write_iter(iocb, iter);
+ return ret;
}
/*
@@ -341,6 +401,12 @@ static const struct vm_operations_struct orangefs_file_vm_ops = {
*/
static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
+ int ret;
+
+ ret = orangefs_revalidate_mapping(file_inode(file));
+ if (ret)
+ return ret;
+
gossip_debug(GOSSIP_FILE_DEBUG,
"orangefs_file_mmap: called on %s\n",
(file ?