summaryrefslogtreecommitdiff
path: root/mm/mlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c38
1 files changed, 18 insertions, 20 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index f8e8d30ab08a..9e9c8be58277 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -210,7 +210,7 @@ static void mlock_folio_batch(struct folio_batch *fbatch)
folio_batch_reinit(fbatch);
}
-void mlock_page_drain_local(void)
+void mlock_drain_local(void)
{
struct folio_batch *fbatch;
@@ -221,7 +221,7 @@ void mlock_page_drain_local(void)
local_unlock(&mlock_fbatch.lock);
}
-void mlock_page_drain_remote(int cpu)
+void mlock_drain_remote(int cpu)
{
struct folio_batch *fbatch;
@@ -231,7 +231,7 @@ void mlock_page_drain_remote(int cpu)
mlock_folio_batch(fbatch);
}
-bool need_mlock_page_drain(int cpu)
+bool need_mlock_drain(int cpu)
{
return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
}
@@ -262,13 +262,12 @@ void mlock_folio(struct folio *folio)
}
/**
- * mlock_new_page - mlock a newly allocated page not yet on LRU
- * @page: page to be mlocked, either a normal page or a THP head.
+ * mlock_new_folio - mlock a newly allocated folio not yet on LRU
+ * @folio: folio to be mlocked, either normal or a THP head.
*/
-void mlock_new_page(struct page *page)
+void mlock_new_folio(struct folio *folio)
{
struct folio_batch *fbatch;
- struct folio *folio = page_folio(page);
int nr_pages = folio_nr_pages(folio);
local_lock(&mlock_fbatch.lock);
@@ -286,13 +285,12 @@ void mlock_new_page(struct page *page)
}
/**
- * munlock_page - munlock a page
- * @page: page to be munlocked, either a normal page or a THP head.
+ * munlock_folio - munlock a folio
+ * @folio: folio to be munlocked, either normal or a THP head.
*/
-void munlock_page(struct page *page)
+void munlock_folio(struct folio *folio)
{
struct folio_batch *fbatch;
- struct folio *folio = page_folio(page);
local_lock(&mlock_fbatch.lock);
fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
@@ -314,7 +312,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
struct vm_area_struct *vma = walk->vma;
spinlock_t *ptl;
pte_t *start_pte, *pte;
- struct page *page;
+ struct folio *folio;
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
@@ -322,11 +320,11 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
goto out;
if (is_huge_zero_pmd(*pmd))
goto out;
- page = pmd_page(*pmd);
+ folio = page_folio(pmd_page(*pmd));
if (vma->vm_flags & VM_LOCKED)
- mlock_folio(page_folio(page));
+ mlock_folio(folio);
else
- munlock_page(page);
+ munlock_folio(folio);
goto out;
}
@@ -334,15 +332,15 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
- page = vm_normal_page(vma, addr, *pte);
- if (!page || is_zone_device_page(page))
+ folio = vm_normal_folio(vma, addr, *pte);
+ if (!folio || folio_is_zone_device(folio))
continue;
- if (PageTransCompound(page))
+ if (folio_test_large(folio))
continue;
if (vma->vm_flags & VM_LOCKED)
- mlock_folio(page_folio(page));
+ mlock_folio(folio);
else
- munlock_page(page);
+ munlock_folio(folio);
}
pte_unmap(start_pte);
out: