summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorYuanyuan Zhong <yzhong@purestorage.com>2024-05-23 21:35:31 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-06-16 14:51:07 +0300
commit51cf740e24fb1f57be2816468a620d0cd0b78d55 (patch)
tree74a19df0fac857bec2bc6801b0ac8780c575250f /fs
parenta68ebd3dcd44e26e8e13b181a80628dacc16b8b7 (diff)
downloadlinux-51cf740e24fb1f57be2816468a620d0cd0b78d55.tar.xz
mm: /proc/pid/smaps_rollup: avoid skipping vma after getting mmap_lock again
commit 6d065f507d82307d6161ac75c025111fb8b08a46 upstream. After switching smaps_rollup to use VMA iterator, searching for next entry is part of the condition expression of the do-while loop. So the current VMA needs to be addressed before the continue statement. Otherwise, with some VMAs skipped, userspace observed memory consumption from /proc/pid/smaps_rollup will be smaller than the sum of the corresponding fields from /proc/pid/smaps. Link: https://lkml.kernel.org/r/20240523183531.2535436-1-yzhong@purestorage.com Fixes: c4c84f06285e ("fs/proc/task_mmu: stop using linked list and highest_vm_end") Signed-off-by: Yuanyuan Zhong <yzhong@purestorage.com> Reviewed-by: Mohamed Khalfella <mkhalfella@purestorage.com> Cc: David Hildenbrand <david@redhat.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/proc/task_mmu.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 102f48668c35..a097c9a65234 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -965,12 +965,17 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
break;
/* Case 1 and 2 above */
- if (vma->vm_start >= last_vma_end)
+ if (vma->vm_start >= last_vma_end) {
+ smap_gather_stats(vma, &mss, 0);
+ last_vma_end = vma->vm_end;
continue;
+ }
/* Case 4 above */
- if (vma->vm_end > last_vma_end)
+ if (vma->vm_end > last_vma_end) {
smap_gather_stats(vma, &mss, last_vma_end);
+ last_vma_end = vma->vm_end;
+ }
}
} for_each_vma(vmi, vma);