summaryrefslogtreecommitdiff
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorYin Fengwei <fengwei.yin@intel.com>2023-08-02 18:14:02 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-08-25 02:20:26 +0300
commitde74976eb65151a2f568e477fc2e0032df5b22b4 (patch)
treefa36b6e03679eeaad213a391dde09527a9d665bf /mm/filemap.c
parent9f1f5b60e76d44fa85fef6970b7477f72d3999eb (diff)
downloadlinux-de74976eb65151a2f568e477fc2e0032df5b22b4.tar.xz
filemap: add filemap_map_folio_range()
filemap_map_folio_range() maps partial/full folio. Comparing to original filemap_map_pages(), it updates refcount once per folio instead of per page and gets minor performance improvement for large folio. With a will-it-scale.page_fault3 like app (change file write fault testing to read fault testing. Trying to upstream it to will-it-scale at [1]), got 2% performance gain on a 48C/96T Cascade Lake test box with 96 processes running against xfs. [1]: https://github.com/antonblanchard/will-it-scale/pull/37 Link: https://lkml.kernel.org/r/20230802151406.3735276-35-willy@infradead.org Signed-off-by: Yin Fengwei <fengwei.yin@intel.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c109
1 files changed, 55 insertions, 54 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 8040545954bc..bdc1e0b811bf 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2168,16 +2168,6 @@ out:
}
EXPORT_SYMBOL(filemap_get_folios);
-static inline
-bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
-{
- if (!folio_test_large(folio) || folio_test_hugetlb(folio))
- return false;
- if (index >= max)
- return false;
- return index < folio_next_index(folio) - 1;
-}
-
/**
* filemap_get_folios_contig - Get a batch of contiguous folios
* @mapping: The address_space to search
@@ -3436,10 +3426,10 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
return false;
}
-static struct folio *next_uptodate_page(struct folio *folio,
- struct address_space *mapping,
- struct xa_state *xas, pgoff_t end_pgoff)
+static struct folio *next_uptodate_folio(struct xa_state *xas,
+ struct address_space *mapping, pgoff_t end_pgoff)
{
+ struct folio *folio = xas_next_entry(xas, end_pgoff);
unsigned long max_idx;
do {
@@ -3477,20 +3467,51 @@ skip:
return NULL;
}
-static inline struct folio *first_map_page(struct address_space *mapping,
- struct xa_state *xas,
- pgoff_t end_pgoff)
+/*
+ * Map page range [start_page, start_page + nr_pages) of folio.
+ * start_page is gotten from start by folio_page(folio, start)
+ */
+static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
+ struct folio *folio, unsigned long start,
+ unsigned long addr, unsigned int nr_pages)
{
- return next_uptodate_page(xas_find(xas, end_pgoff),
- mapping, xas, end_pgoff);
-}
+ vm_fault_t ret = 0;
+ struct vm_area_struct *vma = vmf->vma;
+ struct file *file = vma->vm_file;
+ struct page *page = folio_page(folio, start);
+ unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
+ unsigned int ref_count = 0, count = 0;
-static inline struct folio *next_map_page(struct address_space *mapping,
- struct xa_state *xas,
- pgoff_t end_pgoff)
-{
- return next_uptodate_page(xas_next_entry(xas, end_pgoff),
- mapping, xas, end_pgoff);
+ do {
+ if (PageHWPoison(page))
+ continue;
+
+ if (mmap_miss > 0)
+ mmap_miss--;
+
+ /*
+ * NOTE: If there're PTE markers, we'll leave them to be
+ * handled in the specific fault path, and it'll prohibit the
+ * fault-around logic.
+ */
+ if (!pte_none(*vmf->pte))
+ continue;
+
+ if (vmf->address == addr)
+ ret = VM_FAULT_NOPAGE;
+
+ ref_count++;
+ do_set_pte(vmf, page, addr);
+ update_mmu_cache(vma, addr, vmf->pte);
+ } while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < nr_pages);
+
+ /* Restore the vmf->pte */
+ vmf->pte -= nr_pages;
+
+ folio_ref_add(folio, ref_count);
+ WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
+
+ return ret;
}
vm_fault_t filemap_map_pages(struct vm_fault *vmf,
@@ -3503,12 +3524,11 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
unsigned long addr;
XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct folio *folio;
- struct page *page;
- unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
vm_fault_t ret = 0;
+ int nr_pages = 0;
rcu_read_lock();
- folio = first_map_page(mapping, &xas, end_pgoff);
+ folio = next_uptodate_folio(&xas, mapping, end_pgoff);
if (!folio)
goto out;
@@ -3525,17 +3545,13 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
goto out;
}
do {
-again:
- page = folio_file_page(folio, xas.xa_index);
- if (PageHWPoison(page))
- goto unlock;
-
- if (mmap_miss > 0)
- mmap_miss--;
+ unsigned long end;
addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
vmf->pte += xas.xa_index - last_pgoff;
last_pgoff = xas.xa_index;
+ end = folio->index + folio_nr_pages(folio) - 1;
+ nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
/*
* NOTE: If there're PTE markers, we'll leave them to be
@@ -3545,32 +3561,17 @@ again:
if (!pte_none(ptep_get(vmf->pte)))
goto unlock;
- /* We're about to handle the fault */
- if (vmf->address == addr)
- ret = VM_FAULT_NOPAGE;
+ ret |= filemap_map_folio_range(vmf, folio,
+ xas.xa_index - folio->index, addr, nr_pages);
- do_set_pte(vmf, page, addr);
- /* no need to invalidate: a not-present page won't be cached */
- update_mmu_cache(vma, addr, vmf->pte);
- if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
- xas.xa_index++;
- folio_ref_inc(folio);
- goto again;
- }
- folio_unlock(folio);
- continue;
unlock:
- if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
- xas.xa_index++;
- goto again;
- }
folio_unlock(folio);
folio_put(folio);
- } while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);
+ folio = next_uptodate_folio(&xas, mapping, end_pgoff);
+ } while (folio);
pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
rcu_read_unlock();
- WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
return ret;
}
EXPORT_SYMBOL(filemap_map_pages);