summaryrefslogtreecommitdiff
path: root/fs/afs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/afs')
-rw-r--r--fs/afs/file.c71
-rw-r--r--fs/afs/internal.h16
-rw-r--r--fs/afs/write.c1
3 files changed, 76 insertions, 12 deletions
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 322973d12614..85f5adf21aa0 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -602,6 +602,63 @@ static int afs_readpages(struct file *file, struct address_space *mapping,
}
/*
+ * Adjust the dirty region of the page on truncation or full invalidation,
+ * getting rid of the markers altogether if the region is entirely invalidated.
+ */
+static void afs_invalidate_dirty(struct page *page, unsigned int offset,
+ unsigned int length)
+{
+ struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
+ unsigned long priv;
+ unsigned int f, t, end = offset + length;
+
+ priv = page_private(page);
+
+ /* we clean up only if the entire page is being invalidated */
+ if (offset == 0 && length == thp_size(page))
+ goto full_invalidate;
+
+ /* If the page was dirtied by page_mkwrite(), the PTE stays writable
+ * and we don't get another notification to tell us to expand it
+ * again.
+ */
+ if (afs_is_page_dirty_mmapped(priv))
+ return;
+
+ /* We may need to shorten the dirty region */
+ f = afs_page_dirty_from(priv);
+ t = afs_page_dirty_to(priv);
+
+ if (t <= offset || f >= end)
+ return; /* Doesn't overlap */
+
+ if (f < offset && t > end)
+ return; /* Splits the dirty region - just absorb it */
+
+ if (f >= offset && t <= end)
+ goto undirty;
+
+ if (f < offset)
+ t = offset;
+ else
+ f = end;
+ if (f == t)
+ goto undirty;
+
+ priv = afs_page_dirty(f, t);
+ set_page_private(page, priv);
+ trace_afs_page_dirty(vnode, tracepoint_string("trunc"), page->index, priv);
+ return;
+
+undirty:
+ trace_afs_page_dirty(vnode, tracepoint_string("undirty"), page->index, priv);
+ clear_page_dirty_for_io(page);
+full_invalidate:
+ priv = (unsigned long)detach_page_private(page);
+ trace_afs_page_dirty(vnode, tracepoint_string("inval"), page->index, priv);
+}
+
+/*
* invalidate part or all of a page
* - release a page and clean up its private data if offset is 0 (indicating
* the entire page)
@@ -609,29 +666,23 @@ static int afs_readpages(struct file *file, struct address_space *mapping,
static void afs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
- struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
- unsigned long priv;
-
_enter("{%lu},%u,%u", page->index, offset, length);
BUG_ON(!PageLocked(page));
+#ifdef CONFIG_AFS_FSCACHE
/* we clean up only if the entire page is being invalidated */
if (offset == 0 && length == PAGE_SIZE) {
-#ifdef CONFIG_AFS_FSCACHE
if (PageFsCache(page)) {
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
fscache_wait_on_page_write(vnode->cache, page);
fscache_uncache_page(vnode->cache, page);
}
+ }
#endif
- if (PagePrivate(page)) {
- priv = (unsigned long)detach_page_private(page);
- trace_afs_page_dirty(vnode, tracepoint_string("inval"),
- page->index, priv);
- }
- }
+ if (PagePrivate(page))
+ afs_invalidate_dirty(page, offset, length);
_leave("");
}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 344c545f934c..b0fce1f75397 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -864,11 +864,13 @@ struct afs_vnode_cache_aux {
* 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
*/
#ifdef CONFIG_64BIT
-#define __AFS_PAGE_PRIV_MASK 0xffffffffUL
+#define __AFS_PAGE_PRIV_MASK 0x7fffffffUL
#define __AFS_PAGE_PRIV_SHIFT 32
+#define __AFS_PAGE_PRIV_MMAPPED 0x80000000UL
#else
-#define __AFS_PAGE_PRIV_MASK 0xffffUL
+#define __AFS_PAGE_PRIV_MASK 0x7fffUL
#define __AFS_PAGE_PRIV_SHIFT 16
+#define __AFS_PAGE_PRIV_MMAPPED 0x8000UL
#endif
static inline size_t afs_page_dirty_from(unsigned long priv)
@@ -886,6 +888,16 @@ static inline unsigned long afs_page_dirty(size_t from, size_t to)
return ((unsigned long)(to - 1) << __AFS_PAGE_PRIV_SHIFT) | from;
}
+static inline unsigned long afs_page_dirty_mmapped(unsigned long priv)
+{
+ return priv | __AFS_PAGE_PRIV_MMAPPED;
+}
+
+static inline bool afs_is_page_dirty_mmapped(unsigned long priv)
+{
+ return priv & __AFS_PAGE_PRIV_MMAPPED;
+}
+
#include <trace/events/afs.h>
/*****************************************************************************/
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 1a49f5c89342..a2511e3ad2cc 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -867,6 +867,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
wait_on_page_writeback(vmf->page);
priv = afs_page_dirty(0, PAGE_SIZE);
+ priv = afs_page_dirty_mmapped(priv);
trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
vmf->page->index, priv);
if (PagePrivate(vmf->page))