summaryrefslogtreecommitdiff
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2020-06-28 05:19:08 +0300
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-01-08 08:28:41 +0300
commit6b24ca4a1a8d4ee3221d6d44ddbb99f542e4bda3 (patch)
tree19f30971709b643688cada0032b9b33806c633e1 /mm/shmem.c
parent25a8de7f8d970ffa7263bd9d32a08138cd949f17 (diff)
downloadlinux-6b24ca4a1a8d4ee3221d6d44ddbb99f542e4bda3.tar.xz
mm: Use multi-index entries in the page cache
We currently store large folios as 2^N consecutive entries. While this consumes rather more memory than necessary, it also turns out to be buggy. A writeback operation which starts within a tail page of a dirty folio will not write back the folio as the xarray's dirty bit is only set on the head index. With multi-index entries, the dirty bit will be found no matter where in the folio the operation starts. This does end up simplifying the page cache slightly, although not as much as I had hoped. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index e4c9e5c7081f..28d627444a24 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -694,7 +694,6 @@ static int shmem_add_to_page_cache(struct page *page,
struct mm_struct *charge_mm)
{
XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
- unsigned long i = 0;
unsigned long nr = compound_nr(page);
int error;
@@ -721,20 +720,18 @@ static int shmem_add_to_page_cache(struct page *page,
cgroup_throttle_swaprate(page, gfp);
do {
- void *entry;
xas_lock_irq(&xas);
- entry = xas_find_conflict(&xas);
- if (entry != expected)
+ if (expected != xas_find_conflict(&xas)) {
+ xas_set_err(&xas, -EEXIST);
+ goto unlock;
+ }
+ if (expected && xas_find_conflict(&xas)) {
xas_set_err(&xas, -EEXIST);
- xas_create_range(&xas);
- if (xas_error(&xas))
goto unlock;
-next:
- xas_store(&xas, page);
- if (++i < nr) {
- xas_next(&xas);
- goto next;
}
+ xas_store(&xas, page);
+ if (xas_error(&xas))
+ goto unlock;
if (PageTransHuge(page)) {
count_vm_event(THP_FILE_ALLOC);
__mod_lruvec_page_state(page, NR_SHMEM_THPS, nr);