summaryrefslogtreecommitdiff
path: root/mm/damon/paddr.c
diff options
context:
space:
mode:
authorSeongJae Park <sj@kernel.org>2023-09-15 05:52:49 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-10-04 20:32:30 +0300
commitace30fb21af5f1be1605db72c16040b95b1557ef (patch)
tree354827f4ddb7ae02ee231fd953c83752b3236eb0 /mm/damon/paddr.c
parent80333828ea7728ebe85d079bb5c1467eb9fc6c8c (diff)
downloadlinux-ace30fb21af5f1be1605db72c16040b95b1557ef.tar.xz
mm/damon/core: use pseudo-moving sum for nr_accesses_bp
Let nr_accesses_bp be calculated as a pseudo-moving sum that updated for every sampling interval, using damon_moving_sum(). This is assumed to be useful for cases that the aggregation interval is set quite huge, but the monivoting results need to be collected earlier than next aggregation interval is passed. Link: https://lkml.kernel.org/r/20230915025251.72816-7-sj@kernel.org Signed-off-by: SeongJae Park <sj@kernel.org> Cc: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/damon/paddr.c')
-rw-r--r--mm/damon/paddr.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 44f21860b555..081e2a325778 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -148,7 +148,8 @@ out:
return accessed;
}
-static void __damon_pa_check_access(struct damon_region *r)
+static void __damon_pa_check_access(struct damon_region *r,
+ struct damon_attrs *attrs)
{
static unsigned long last_addr;
static unsigned long last_folio_sz = PAGE_SIZE;
@@ -157,12 +158,12 @@ static void __damon_pa_check_access(struct damon_region *r)
/* If the region is in the last checked page, reuse the result */
if (ALIGN_DOWN(last_addr, last_folio_sz) ==
ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
- damon_update_region_access_rate(r, last_accessed);
+ damon_update_region_access_rate(r, last_accessed, attrs);
return;
}
last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
- damon_update_region_access_rate(r, last_accessed);
+ damon_update_region_access_rate(r, last_accessed, attrs);
last_addr = r->sampling_addr;
}
@@ -175,7 +176,7 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
damon_for_each_target(t, ctx) {
damon_for_each_region(r, t) {
- __damon_pa_check_access(r);
+ __damon_pa_check_access(r, &ctx->attrs);
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
}
}