summaryrefslogtreecommitdiff
path: root/mm/damon/vaddr.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/damon/vaddr.c')
-rw-r--r--mm/damon/vaddr.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 9d92c5eb3a1f..d6cb1fca1769 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -422,7 +422,8 @@ static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
}
struct damon_young_walk_private {
- unsigned long *page_sz;
+ /* size of the folio for the access checked virtual memory address */
+ unsigned long *folio_sz;
bool young;
};
@@ -452,7 +453,7 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
if (pmd_young(*pmd) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm,
addr)) {
- *priv->page_sz = HPAGE_PMD_SIZE;
+ *priv->folio_sz = HPAGE_PMD_SIZE;
priv->young = true;
}
folio_put(folio);
@@ -474,7 +475,7 @@ regular_page:
goto out;
if (pte_young(*pte) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm, addr)) {
- *priv->page_sz = PAGE_SIZE;
+ *priv->folio_sz = PAGE_SIZE;
priv->young = true;
}
folio_put(folio);
@@ -504,7 +505,7 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
if (pte_young(entry) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm, addr)) {
- *priv->page_sz = huge_page_size(h);
+ *priv->folio_sz = huge_page_size(h);
priv->young = true;
}
@@ -524,10 +525,10 @@ static const struct mm_walk_ops damon_young_ops = {
};
static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
- unsigned long *page_sz)
+ unsigned long *folio_sz)
{
struct damon_young_walk_private arg = {
- .page_sz = page_sz,
+ .folio_sz = folio_sz,
.young = false,
};
@@ -547,18 +548,18 @@ static void __damon_va_check_access(struct mm_struct *mm,
struct damon_region *r, bool same_target)
{
static unsigned long last_addr;
- static unsigned long last_page_sz = PAGE_SIZE;
+ static unsigned long last_folio_sz = PAGE_SIZE;
static bool last_accessed;
/* If the region is in the last checked page, reuse the result */
- if (same_target && (ALIGN_DOWN(last_addr, last_page_sz) ==
- ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
+ if (same_target && (ALIGN_DOWN(last_addr, last_folio_sz) ==
+ ALIGN_DOWN(r->sampling_addr, last_folio_sz))) {
if (last_accessed)
r->nr_accesses++;
return;
}
- last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz);
+ last_accessed = damon_va_young(mm, r->sampling_addr, &last_folio_sz);
if (last_accessed)
r->nr_accesses++;