summaryrefslogtreecommitdiff
path: root/fs/ecryptfs/mmap.c
diff options
context:
space:
mode:
authorTyler Hicks <tyhicks@canonical.com>2012-07-04 03:50:57 +0400
committerTyler Hicks <tyhicks@canonical.com>2012-07-14 03:46:06 +0400
commit821f7494a77627fb1ab539591c57b22cdca702d6 (patch)
tree561ee1768e62c120766c40c0b8fc8aa30b4f4a95 /fs/ecryptfs/mmap.c
parente3ccaa9761200952cc269b1f4b7d7bb77a5e071b (diff)
downloadlinux-821f7494a77627fb1ab539591c57b22cdca702d6.tar.xz
eCryptfs: Revert to a writethrough cache model
A change was made about a year ago to get eCryptfs to better utilize its page cache during writes. The idea was to do the page encryption operations during page writeback, rather than doing them when initially writing into the page cache, to reduce the number of page encryption operations during sequential writes. This meant that the encrypted page would only be written to the lower filesystem during page writeback, which was a change from how eCryptfs had previously wrote to the lower filesystem in ecryptfs_write_end(). The change caused a few eCryptfs-internal bugs that were shook out. Unfortunately, more grave side effects have been identified that will force changes outside of eCryptfs. Because the lower filesystem isn't consulted until page writeback, eCryptfs has no way to pass lower write errors (ENOSPC, mainly) back to userspace. Additionaly, it was reported that quotas could be bypassed because of the way eCryptfs may sometimes open the lower filesystem using a privileged kthread. It would be nice to resolve the latest issues, but it is best if the eCryptfs commits be reverted to the old behavior in the meantime. This reverts: 32001d6f "eCryptfs: Flush file in vma close" 5be79de2 "eCryptfs: Flush dirty pages in setattr" 57db4e8d "ecryptfs: modify write path to encrypt page in writepage" Signed-off-by: Tyler Hicks <tyhicks@canonical.com> Tested-by: Colin King <colin.king@canonical.com> Cc: Colin King <colin.king@canonical.com> Cc: Thieu Le <thieule@google.com>
Diffstat (limited to 'fs/ecryptfs/mmap.c')
-rw-r--r--fs/ecryptfs/mmap.c39
1 files changed, 13 insertions, 26 deletions
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index a46b3a8fee1e..bd1d57f98f74 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -66,18 +66,6 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{
int rc;
- /*
- * Refuse to write the page out if we are called from reclaim context
- * since our writepage() path may potentially allocate memory when
- * calling into the lower fs vfs_write() which may in turn invoke
- * us again.
- */
- if (current->flags & PF_MEMALLOC) {
- redirty_page_for_writepage(wbc, page);
- rc = 0;
- goto out;
- }
-
rc = ecryptfs_encrypt_page(page);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error encrypting "
@@ -498,7 +486,6 @@ static int ecryptfs_write_end(struct file *file,
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
int rc;
- int need_unlock_page = 1;
ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
"(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
@@ -519,26 +506,26 @@ static int ecryptfs_write_end(struct file *file,
"zeros in page with index = [0x%.16lx]\n", index);
goto out;
}
- set_page_dirty(page);
- unlock_page(page);
- need_unlock_page = 0;
+ rc = ecryptfs_encrypt_page(page);
+ if (rc) {
+ ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
+ "index [0x%.16lx])\n", index);
+ goto out;
+ }
if (pos + copied > i_size_read(ecryptfs_inode)) {
i_size_write(ecryptfs_inode, pos + copied);
ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
"[0x%.16llx]\n",
(unsigned long long)i_size_read(ecryptfs_inode));
- balance_dirty_pages_ratelimited(mapping);
- rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
- if (rc) {
- printk(KERN_ERR "Error writing inode size to metadata; "
- "rc = [%d]\n", rc);
- goto out;
- }
}
- rc = copied;
+ rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
+ if (rc)
+ printk(KERN_ERR "Error writing inode size to metadata; "
+ "rc = [%d]\n", rc);
+ else
+ rc = copied;
out:
- if (need_unlock_page)
- unlock_page(page);
+ unlock_page(page);
page_cache_release(page);
return rc;
}