summaryrefslogtreecommitdiff
path: root/arch/mips
diff options
context:
space:
mode:
authorPeter Xu <peterx@redhat.com>2020-08-12 04:38:15 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-12 20:58:03 +0300
commit2558fd7f5c3eda31d4474c7cdc8dc4b3bb6526f5 (patch)
treef81def9f19623a67c3e5e99a632e664450d7c464 /arch/mips
parentaeb6aefc3129307858e4d5a4b73ae28f1871ae05 (diff)
downloadlinux-2558fd7f5c3eda31d4474c7cdc8dc4b3bb6526f5.tar.xz
mm/mips: use general page fault accounting
Use the general page fault accounting by passing regs into handle_mm_fault(). It naturally solve the issue of multiple page fault accounting when page fault retry happened. Fix PERF_COUNT_SW_PAGE_FAULTS perf event manually for page fault retries, by moving it before taking mmap_sem. Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Link: http://lkml.kernel.org/r/20200707225021.200906-12-peterx@redhat.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/mm/fault.c14
1 files changed, 3 insertions, 11 deletions
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index b1db39784db9..7c871b14e74a 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -96,6 +96,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
mmap_read_lock(mm);
vma = find_vma(mm, address);
@@ -152,12 +154,11 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(vma, address, flags, NULL);
+ fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs))
return;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -168,15 +169,6 @@ good_area:
BUG();
}
if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR) {
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
- regs, address);
- tsk->maj_flt++;
- } else {
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
- regs, address);
- tsk->min_flt++;
- }
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;