summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2024-03-25 02:45:41 +0300
committerAndrew Morton <akpm@linux-foundation.org>2024-04-26 06:56:12 +0300
commitcb10c28ac82c9b7a5e9b3b1dc7157036c20c36dd (patch)
treebb9aa68386b8f292f11f134c6447076a807556bb
parent1b265da7ea1e1ae997fa119c2846bb389eb39c6b (diff)
downloadlinux-cb10c28ac82c9b7a5e9b3b1dc7157036c20c36dd.tar.xz
mm: remove follow_pfn
Remove follow_pfn now that the last user is gone. Link: https://lkml.kernel.org/r/20240324234542.2038726-3-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Hildenbrand <david@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fei Li <fei1.li@intel.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Nathan Chancellor <nathan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/mm.h2
-rw-r--r--mm/memory.c36
-rw-r--r--mm/nommu.c21
3 files changed, 2 insertions, 57 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index dd47ba7a1c04..cb8e6158b577 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2424,8 +2424,6 @@ int
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
int follow_pte(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp);
-int follow_pfn(struct vm_area_struct *vma, unsigned long address,
- unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
unsigned int flags, unsigned long *prot, resource_size_t *phys);
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
diff --git a/mm/memory.c b/mm/memory.c
index bcd29104f5ea..652370703e41 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5884,8 +5884,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
* Only IO mappings and raw PFN mappings are allowed. The mmap semaphore
* should be taken for read.
*
- * KVM uses this function. While it is arguably less bad than ``follow_pfn``,
- * it is not a good general-purpose API.
+ * KVM uses this function. While it is arguably less bad than the historic
+ * ``follow_pfn``, it is not a good general-purpose API.
*
* Return: zero on success, -ve otherwise.
*/
@@ -5927,38 +5927,6 @@ out:
}
EXPORT_SYMBOL_GPL(follow_pte);
-/**
- * follow_pfn - look up PFN at a user virtual address
- * @vma: memory mapping
- * @address: user virtual address
- * @pfn: location to store found PFN
- *
- * Only IO mappings and raw PFN mappings are allowed.
- *
- * This function does not allow the caller to read the permissions
- * of the PTE. Do not use it.
- *
- * Return: zero and the pfn at @pfn on success, -ve otherwise.
- */
-int follow_pfn(struct vm_area_struct *vma, unsigned long address,
- unsigned long *pfn)
-{
- int ret = -EINVAL;
- spinlock_t *ptl;
- pte_t *ptep;
-
- if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
- return ret;
-
- ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
- if (ret)
- return ret;
- *pfn = pte_pfn(ptep_get(ptep));
- pte_unmap_unlock(ptep, ptl);
- return 0;
-}
-EXPORT_SYMBOL(follow_pfn);
-
#ifdef CONFIG_HAVE_IOREMAP_PROT
int follow_phys(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
diff --git a/mm/nommu.c b/mm/nommu.c
index 88b646ea6d30..331d2f778695 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -110,27 +110,6 @@ unsigned int kobjsize(const void *objp)
return page_size(page);
}
-/**
- * follow_pfn - look up PFN at a user virtual address
- * @vma: memory mapping
- * @address: user virtual address
- * @pfn: location to store found PFN
- *
- * Only IO mappings and raw PFN mappings are allowed.
- *
- * Returns zero and the pfn at @pfn on success, -ve otherwise.
- */
-int follow_pfn(struct vm_area_struct *vma, unsigned long address,
- unsigned long *pfn)
-{
- if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
- return -EINVAL;
-
- *pfn = address >> PAGE_SHIFT;
- return 0;
-}
-EXPORT_SYMBOL(follow_pfn);
-
void vfree(const void *addr)
{
kfree(addr);