summaryrefslogtreecommitdiff
path: root/fs/ntfs
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-05-07 01:49:04 +0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 23:12:51 +0400
commit6fe6900e1e5b6fa9e5c59aa5061f244fe3f467e2 (patch)
tree8bbfe5072279227cc50a941ad4813908082426a1 /fs/ntfs
parent714b8171af9c930a59a0da8f6fe50518e70ab035 (diff)
downloadlinux-6fe6900e1e5b6fa9e5c59aa5061f244fe3f467e2.tar.xz
mm: make read_cache_page synchronous
Ensure pages are uptodate after returning from read_cache_page, which allows us to cut out most of the filesystem-internal PageUptodate calls. I didn't have a great look down the call chains, but this appears to fixes 7 possible use-before uptodate in hfs, 2 in hfsplus, 1 in jfs, a few in ecryptfs, 1 in jffs2, and a possible cleared data overwritten with readpage in block2mtd. All depending on whether the filler is async and/or can return with a !uptodate page. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/ntfs')
-rw-r--r--fs/ntfs/aops.h3
-rw-r--r--fs/ntfs/attrib.c18
-rw-r--r--fs/ntfs/file.c3
-rw-r--r--fs/ntfs/super.c30
4 files changed, 8 insertions, 46 deletions
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
index 9393f4b1e298..caecc58f529c 100644
--- a/fs/ntfs/aops.h
+++ b/fs/ntfs/aops.h
@@ -89,9 +89,8 @@ static inline struct page *ntfs_map_page(struct address_space *mapping,
struct page *page = read_mapping_page(mapping, index, NULL);
if (!IS_ERR(page)) {
- wait_on_page_locked(page);
kmap(page);
- if (PageUptodate(page) && !PageError(page))
+ if (!PageError(page))
return page;
ntfs_unmap_page(page);
return ERR_PTR(-EIO);
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 7659cc192995..1c08fefe487a 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -2532,14 +2532,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read first partial "
- "page (sync error, index 0x%lx).", idx);
- return PTR_ERR(page);
- }
- wait_on_page_locked(page);
- if (unlikely(!PageUptodate(page))) {
- ntfs_error(vol->sb, "Failed to read first partial page "
- "(async error, index 0x%lx).", idx);
- page_cache_release(page);
+ "page (error, index 0x%lx).", idx);
return PTR_ERR(page);
}
/*
@@ -2602,14 +2595,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read last partial page "
- "(sync error, index 0x%lx).", idx);
- return PTR_ERR(page);
- }
- wait_on_page_locked(page);
- if (unlikely(!PageUptodate(page))) {
- ntfs_error(vol->sb, "Failed to read last partial page "
- "(async error, index 0x%lx).", idx);
- page_cache_release(page);
+ "(error, index 0x%lx).", idx);
return PTR_ERR(page);
}
kaddr = kmap_atomic(page, KM_USER0);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index d69c4595ccd0..dbbac5593106 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -236,8 +236,7 @@ do_non_resident_extend:
err = PTR_ERR(page);
goto init_err_out;
}
- wait_on_page_locked(page);
- if (unlikely(!PageUptodate(page) || PageError(page))) {
+ if (unlikely(PageError(page))) {
page_cache_release(page);
err = -EIO;
goto init_err_out;
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 1594c90b7164..2ddde534db0a 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2471,7 +2471,6 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
s64 nr_free = vol->nr_clusters;
u32 *kaddr;
struct address_space *mapping = vol->lcnbmp_ino->i_mapping;
- filler_t *readpage = (filler_t*)mapping->a_ops->readpage;
struct page *page;
pgoff_t index, max_index;
@@ -2494,24 +2493,14 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
* Read the page from page cache, getting it from backing store
* if necessary, and increment the use count.
*/
- page = read_cache_page(mapping, index, (filler_t*)readpage,
- NULL);
+ page = read_mapping_page(mapping, index, NULL);
/* Ignore pages which errored synchronously. */
if (IS_ERR(page)) {
- ntfs_debug("Sync read_cache_page() error. Skipping "
+ ntfs_debug("read_mapping_page() error. Skipping "
"page (index 0x%lx).", index);
nr_free -= PAGE_CACHE_SIZE * 8;
continue;
}
- wait_on_page_locked(page);
- /* Ignore pages which errored asynchronously. */
- if (!PageUptodate(page)) {
- ntfs_debug("Async read_cache_page() error. Skipping "
- "page (index 0x%lx).", index);
- page_cache_release(page);
- nr_free -= PAGE_CACHE_SIZE * 8;
- continue;
- }
kaddr = (u32*)kmap_atomic(page, KM_USER0);
/*
* For each 4 bytes, subtract the number of set bits. If this
@@ -2562,7 +2551,6 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
{
u32 *kaddr;
struct address_space *mapping = vol->mftbmp_ino->i_mapping;
- filler_t *readpage = (filler_t*)mapping->a_ops->readpage;
struct page *page;
pgoff_t index;
@@ -2576,21 +2564,11 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
* Read the page from page cache, getting it from backing store
* if necessary, and increment the use count.
*/
- page = read_cache_page(mapping, index, (filler_t*)readpage,
- NULL);
+ page = read_mapping_page(mapping, index, NULL);
/* Ignore pages which errored synchronously. */
if (IS_ERR(page)) {
- ntfs_debug("Sync read_cache_page() error. Skipping "
- "page (index 0x%lx).", index);
- nr_free -= PAGE_CACHE_SIZE * 8;
- continue;
- }
- wait_on_page_locked(page);
- /* Ignore pages which errored asynchronously. */
- if (!PageUptodate(page)) {
- ntfs_debug("Async read_cache_page() error. Skipping "
+ ntfs_debug("read_mapping_page() error. Skipping "
"page (index 0x%lx).", index);
- page_cache_release(page);
nr_free -= PAGE_CACHE_SIZE * 8;
continue;
}