summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-03-04 20:02:54 +0300
committerMatthew Wilcox (Oracle) <willy@infradead.org>2021-09-27 16:27:30 +0300
commit101c0bf67f50ca0e8b9da97b26f8dc7cb232b4d3 (patch)
treeda83e363252d5d54335c5ab52d552c06d715f99c /include
parenta49d0c507759214a7cfd26555382c314db486792 (diff)
downloadlinux-101c0bf67f50ca0e8b9da97b26f8dc7cb232b4d3.tar.xz
mm/filemap: Add folio_wait_bit()
Rename wait_on_page_bit() to folio_wait_bit(). We must always wait on the folio, otherwise we won't be woken up due to the tail page hashing to a different bucket from the head page. This commit shrinks the kernel by 770 bytes, mostly due to moving the page waitqueue lookup into folio_wait_bit_common(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Acked-by: Jeff Layton <jlayton@kernel.org> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: William Kucharski <william.kucharski@oracle.com> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/pagemap.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index ee39ad7b42f1..2f481327dee8 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -728,11 +728,11 @@ static inline bool lock_page_or_retry(struct page *page, struct mm_struct *mm,
}
/*
- * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
+ * This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
* and should not be used directly.
*/
-extern void wait_on_page_bit(struct page *page, int bit_nr);
-extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
+void folio_wait_bit(struct folio *folio, int bit_nr);
+int folio_wait_bit_killable(struct folio *folio, int bit_nr);
/*
* Wait for a folio to be unlocked.
@@ -744,14 +744,14 @@ extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
static inline void folio_wait_locked(struct folio *folio)
{
if (folio_test_locked(folio))
- wait_on_page_bit(&folio->page, PG_locked);
+ folio_wait_bit(folio, PG_locked);
}
static inline int folio_wait_locked_killable(struct folio *folio)
{
if (!folio_test_locked(folio))
return 0;
- return wait_on_page_bit_killable(&folio->page, PG_locked);
+ return folio_wait_bit_killable(folio, PG_locked);
}
static inline void wait_on_page_locked(struct page *page)