summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 6e568f190e7a..d671ad367d67 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3618,11 +3618,11 @@ EXPORT_SYMBOL(unmap_mapping_range);
*/
static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct vm_area_struct *vma = vmf->vma;
struct mmu_notifier_range range;
- if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags))
+ if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags))
return VM_FAULT_RETRY;
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
vma->vm_mm, vmf->address & PAGE_MASK,
@@ -3632,10 +3632,10 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
&vmf->ptl);
if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
- restore_exclusive_pte(vma, page, vmf->address, vmf->pte);
+ restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
pte_unmap_unlock(vmf->pte, vmf->ptl);
- unlock_page(page);
+ folio_unlock(folio);
mmu_notifier_invalidate_range_end(&range);
return 0;
@@ -3835,7 +3835,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out_release;
}
- locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
+ locked = folio_lock_or_retry(folio, vma->vm_mm, vmf->flags);
if (!locked) {
ret |= VM_FAULT_RETRY;