summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-02-13 23:22:28 +0300
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-03-21 19:59:02 +0300
commitd6c75dc22c755c567838f12f12a16f2a323ebd4e (patch)
tree78954666fd026403b0ee17150063a927b5ff7f86
parent5100da38ef3c33d9ad8b60b29c2b671249bf7e1d (diff)
downloadlinux-d6c75dc22c755c567838f12f12a16f2a323ebd4e.tar.xz
mm/truncate: Split invalidate_inode_page() into mapping_evict_folio()
Some of the callers already have the address_space and can avoid calling folio_mapping() and checking if the folio was already truncated. Also add kernel-doc and fix the return type (in case we ever support folios larger than 4TB). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
-rw-r--r--include/linux/mm.h1
-rw-r--r--mm/internal.h1
-rw-r--r--mm/memory-failure.c4
-rw-r--r--mm/truncate.c34
4 files changed, 26 insertions, 14 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a583b7375445..dede2eda4d7f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1825,7 +1825,6 @@ extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
-int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU
extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
diff --git a/mm/internal.h b/mm/internal.h
index ade30a1e6682..9c1959fff477 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -95,6 +95,7 @@ void filemap_free_folio(struct address_space *mapping, struct folio *folio);
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
loff_t end);
+long invalidate_inode_page(struct page *page);
/**
* folio_evictable - Test whether a folio is evictable.
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 97a9ed8f87a9..0b72a936b8dd 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -2139,7 +2139,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
*/
static int __soft_offline_page(struct page *page)
{
- int ret = 0;
+ long ret = 0;
unsigned long pfn = page_to_pfn(page);
struct page *hpage = compound_head(page);
char const *msg_page[] = {"page", "hugepage"};
@@ -2196,7 +2196,7 @@ static int __soft_offline_page(struct page *page)
if (!list_empty(&pagelist))
putback_movable_pages(&pagelist);
- pr_info("soft offline: %#lx: %s migration failed %d, type %pGp\n",
+ pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n",
pfn, msg_page[huge], ret, &page->flags);
if (ret > 0)
ret = -EBUSY;
diff --git a/mm/truncate.c b/mm/truncate.c
index 1d97c4cae6a0..2fb10735aab4 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -273,18 +273,9 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page)
}
EXPORT_SYMBOL(generic_error_remove_page);
-/*
- * Safely invalidate one page from its pagecache mapping.
- * It only drops clean, unused pages. The page must be locked.
- *
- * Returns 1 if the page is successfully invalidated, otherwise 0.
- */
-int invalidate_inode_page(struct page *page)
+static long mapping_evict_folio(struct address_space *mapping,
+ struct folio *folio)
{
- struct folio *folio = page_folio(page);
- struct address_space *mapping = folio_mapping(folio);
- if (!mapping)
- return 0;
if (folio_test_dirty(folio) || folio_test_writeback(folio))
return 0;
/* The refcount will be elevated if any page in the folio is mapped */
@@ -298,6 +289,27 @@ int invalidate_inode_page(struct page *page)
}
/**
+ * invalidate_inode_page() - Remove an unused page from the pagecache.
+ * @page: The page to remove.
+ *
+ * Safely invalidate one page from its pagecache mapping.
+ * It only drops clean, unused pages.
+ *
+ * Context: Page must be locked.
+ * Return: The number of pages successfully removed.
+ */
+long invalidate_inode_page(struct page *page)
+{
+ struct folio *folio = page_folio(page);
+ struct address_space *mapping = folio_mapping(folio);
+
+ /* The page may have been truncated before it was locked */
+ if (!mapping)
+ return 0;
+ return mapping_evict_folio(mapping, folio);
+}
+
+/**
* truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
* @mapping: mapping to truncate
* @lstart: offset from which to truncate