summaryrefslogtreecommitdiff
path: root/mm/fremap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-17 02:20:36 +0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-17 02:20:36 +0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /mm/fremap.c
downloadlinux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.xz
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'mm/fremap.c')
-rw-r--r--mm/fremap.c256
1 files changed, 256 insertions, 0 deletions
diff --git a/mm/fremap.c b/mm/fremap.c
new file mode 100644
index 000000000000..3235fb77c133
--- /dev/null
+++ b/mm/fremap.c
@@ -0,0 +1,256 @@
+/*
+ * linux/mm/fremap.c
+ *
+ * Explicit pagetable population and nonlinear (random) mappings support.
+ *
+ * started by Ingo Molnar, Copyright (C) 2002, 2003
+ */
+
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/file.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include <linux/swapops.h>
+#include <linux/rmap.h>
+#include <linux/module.h>
+#include <linux/syscalls.h>
+
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+
+ if (pte_none(pte))
+ return;
+ if (pte_present(pte)) {
+ unsigned long pfn = pte_pfn(pte);
+
+ flush_cache_page(vma, addr, pfn);
+ pte = ptep_clear_flush(vma, addr, ptep);
+ if (pfn_valid(pfn)) {
+ struct page *page = pfn_to_page(pfn);
+ if (!PageReserved(page)) {
+ if (pte_dirty(pte))
+ set_page_dirty(page);
+ page_remove_rmap(page);
+ page_cache_release(page);
+ dec_mm_counter(mm, rss);
+ }
+ }
+ } else {
+ if (!pte_file(pte))
+ free_swap_and_cache(pte_to_swp_entry(pte));
+ pte_clear(mm, addr, ptep);
+ }
+}
+
+/*
+ * Install a file page to a given virtual memory address, release any
+ * previously existing mapping.
+ */
+int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, struct page *page, pgprot_t prot)
+{
+ struct inode *inode;
+ pgoff_t size;
+ int err = -ENOMEM;
+ pte_t *pte;
+ pmd_t *pmd;
+ pud_t *pud;
+ pgd_t *pgd;
+ pte_t pte_val;
+
+ pgd = pgd_offset(mm, addr);
+ spin_lock(&mm->page_table_lock);
+
+ pud = pud_alloc(mm, pgd, addr);
+ if (!pud)
+ goto err_unlock;
+
+ pmd = pmd_alloc(mm, pud, addr);
+ if (!pmd)
+ goto err_unlock;
+
+ pte = pte_alloc_map(mm, pmd, addr);
+ if (!pte)
+ goto err_unlock;
+
+ /*
+ * This page may have been truncated. Tell the
+ * caller about it.
+ */
+ err = -EINVAL;
+ inode = vma->vm_file->f_mapping->host;
+ size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ if (!page->mapping || page->index >= size)
+ goto err_unlock;
+
+ zap_pte(mm, vma, addr, pte);
+
+ inc_mm_counter(mm,rss);
+ flush_icache_page(vma, page);
+ set_pte_at(mm, addr, pte, mk_pte(page, prot));
+ page_add_file_rmap(page);
+ pte_val = *pte;
+ pte_unmap(pte);
+ update_mmu_cache(vma, addr, pte_val);
+
+ err = 0;
+err_unlock:
+ spin_unlock(&mm->page_table_lock);
+ return err;
+}
+EXPORT_SYMBOL(install_page);
+
+
+/*
+ * Install a file pte to a given virtual memory address, release any
+ * previously existing mapping.
+ */
+int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pgoff, pgprot_t prot)
+{
+ int err = -ENOMEM;
+ pte_t *pte;
+ pmd_t *pmd;
+ pud_t *pud;
+ pgd_t *pgd;
+ pte_t pte_val;
+
+ pgd = pgd_offset(mm, addr);
+ spin_lock(&mm->page_table_lock);
+
+ pud = pud_alloc(mm, pgd, addr);
+ if (!pud)
+ goto err_unlock;
+
+ pmd = pmd_alloc(mm, pud, addr);
+ if (!pmd)
+ goto err_unlock;
+
+ pte = pte_alloc_map(mm, pmd, addr);
+ if (!pte)
+ goto err_unlock;
+
+ zap_pte(mm, vma, addr, pte);
+
+ set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
+ pte_val = *pte;
+ pte_unmap(pte);
+ update_mmu_cache(vma, addr, pte_val);
+ spin_unlock(&mm->page_table_lock);
+ return 0;
+
+err_unlock:
+ spin_unlock(&mm->page_table_lock);
+ return err;
+}
+
+
+/***
+ * sys_remap_file_pages - remap arbitrary pages of a shared backing store
+ * file within an existing vma.
+ * @start: start of the remapped virtual memory range
+ * @size: size of the remapped virtual memory range
+ * @prot: new protection bits of the range
+ * @pgoff: to be mapped page of the backing store file
+ * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
+ *
+ * this syscall works purely via pagetables, so it's the most efficient
+ * way to map the same (large) file into a given virtual window. Unlike
+ * mmap()/mremap() it does not create any new vmas. The new mappings are
+ * also safe across swapout.
+ *
+ * NOTE: the 'prot' parameter right now is ignored, and the vma's default
+ * protection is used. Arbitrary protections might be implemented in the
+ * future.
+ */
+asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
+ unsigned long __prot, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct address_space *mapping;
+ unsigned long end = start + size;
+ struct vm_area_struct *vma;
+ int err = -EINVAL;
+ int has_write_lock = 0;
+
+ if (__prot)
+ return err;
+ /*
+ * Sanitize the syscall parameters:
+ */
+ start = start & PAGE_MASK;
+ size = size & PAGE_MASK;
+
+ /* Does the address range wrap, or is the span zero-sized? */
+ if (start + size <= start)
+ return err;
+
+ /* Can we represent this offset inside this architecture's pte's? */
+#if PTE_FILE_MAX_BITS < BITS_PER_LONG
+ if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
+ return err;
+#endif
+
+ /* We need down_write() to change vma->vm_flags. */
+ down_read(&mm->mmap_sem);
+ retry:
+ vma = find_vma(mm, start);
+
+ /*
+ * Make sure the vma is shared, that it supports prefaulting,
+ * and that the remapped range is valid and fully within
+ * the single existing vma. vm_private_data is used as a
+ * swapout cursor in a VM_NONLINEAR vma (unless VM_RESERVED
+ * or VM_LOCKED, but VM_LOCKED could be revoked later on).
+ */
+ if (vma && (vma->vm_flags & VM_SHARED) &&
+ (!vma->vm_private_data ||
+ (vma->vm_flags & (VM_NONLINEAR|VM_RESERVED))) &&
+ vma->vm_ops && vma->vm_ops->populate &&
+ end > start && start >= vma->vm_start &&
+ end <= vma->vm_end) {
+
+ /* Must set VM_NONLINEAR before any pages are populated. */
+ if (pgoff != linear_page_index(vma, start) &&
+ !(vma->vm_flags & VM_NONLINEAR)) {
+ if (!has_write_lock) {
+ up_read(&mm->mmap_sem);
+ down_write(&mm->mmap_sem);
+ has_write_lock = 1;
+ goto retry;
+ }
+ mapping = vma->vm_file->f_mapping;
+ spin_lock(&mapping->i_mmap_lock);
+ flush_dcache_mmap_lock(mapping);
+ vma->vm_flags |= VM_NONLINEAR;
+ vma_prio_tree_remove(vma, &mapping->i_mmap);
+ vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
+ flush_dcache_mmap_unlock(mapping);
+ spin_unlock(&mapping->i_mmap_lock);
+ }
+
+ err = vma->vm_ops->populate(vma, start, size,
+ vma->vm_page_prot,
+ pgoff, flags & MAP_NONBLOCK);
+
+ /*
+ * We can't clear VM_NONLINEAR because we'd have to do
+ * it after ->populate completes, and that would prevent
+ * downgrading the lock. (Locks can't be upgraded).
+ */
+ }
+ if (likely(!has_write_lock))
+ up_read(&mm->mmap_sem);
+ else
+ up_write(&mm->mmap_sem);
+
+ return err;
+}
+