summaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-24 23:52:45 +0300
committerIngo Molnar <mingo@elte.hu>2009-02-24 23:52:45 +0300
commit0edcf8d6926f4038443dbc24e319530177ca0353 (patch)
tree6010af62f73d01ab673d5106f310eaf4f4228e32 /mm/vmalloc.c
parent87b203079ed949de52f0d92aeae20e5e0116c12f (diff)
parent40150d37be7f7949b2ec07d511244da856647d84 (diff)
downloadlinux-0edcf8d6926f4038443dbc24e319530177ca0353.tar.xz
Merge branch 'tj-percpu' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into core/percpu
Conflicts: arch/x86/include/asm/pgtable.h
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c94
1 files changed, 91 insertions, 3 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 903cad46e796..fb6f59935fb2 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -24,6 +24,7 @@
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
#include <linux/bootmem.h>
+#include <linux/pfn.h>
#include <asm/atomic.h>
#include <asm/uaccess.h>
@@ -152,8 +153,8 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
*
* Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
*/
-static int vmap_page_range(unsigned long start, unsigned long end,
- pgprot_t prot, struct page **pages)
+static int vmap_page_range_noflush(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages)
{
pgd_t *pgd;
unsigned long next;
@@ -169,13 +170,22 @@ static int vmap_page_range(unsigned long start, unsigned long end,
if (err)
break;
} while (pgd++, addr = next, addr != end);
- flush_cache_vmap(start, end);
if (unlikely(err))
return err;
return nr;
}
+static int vmap_page_range(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages)
+{
+ int ret;
+
+ ret = vmap_page_range_noflush(start, end, prot, pages);
+ flush_cache_vmap(start, end);
+ return ret;
+}
+
static inline int is_vmalloc_or_module_addr(const void *x)
{
/*
@@ -982,6 +992,32 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
}
EXPORT_SYMBOL(vm_map_ram);
+/**
+ * vm_area_register_early - register vmap area early during boot
+ * @vm: vm_struct to register
+ * @align: requested alignment
+ *
+ * This function is used to register kernel vm area before
+ * vmalloc_init() is called. @vm->size and @vm->flags should contain
+ * proper values on entry and other fields should be zero. On return,
+ * vm->addr contains the allocated address.
+ *
+ * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
+ */
+void __init vm_area_register_early(struct vm_struct *vm, size_t align)
+{
+ static size_t vm_init_off __initdata;
+ unsigned long addr;
+
+ addr = ALIGN(VMALLOC_START + vm_init_off, align);
+ vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
+
+ vm->addr = (void *)addr;
+
+ vm->next = vmlist;
+ vmlist = vm;
+}
+
void __init vmalloc_init(void)
{
struct vmap_area *va;
@@ -1009,6 +1045,58 @@ void __init vmalloc_init(void)
vmap_initialized = true;
}
+/**
+ * map_kernel_range_noflush - map kernel VM area with the specified pages
+ * @addr: start of the VM area to map
+ * @size: size of the VM area to map
+ * @prot: page protection flags to use
+ * @pages: pages to map
+ *
+ * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
+ * specify should have been allocated using get_vm_area() and its
+ * friends.
+ *
+ * NOTE:
+ * This function does NOT do any cache flushing. The caller is
+ * responsible for calling flush_cache_vmap() on to-be-mapped areas
+ * before calling this function.
+ *
+ * RETURNS:
+ * The number of pages mapped on success, -errno on failure.
+ */
+int map_kernel_range_noflush(unsigned long addr, unsigned long size,
+ pgprot_t prot, struct page **pages)
+{
+ return vmap_page_range_noflush(addr, addr + size, prot, pages);
+}
+
+/**
+ * unmap_kernel_range_noflush - unmap kernel VM area
+ * @addr: start of the VM area to unmap
+ * @size: size of the VM area to unmap
+ *
+ * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
+ * specify should have been allocated using get_vm_area() and its
+ * friends.
+ *
+ * NOTE:
+ * This function does NOT do any cache flushing. The caller is
+ * responsible for calling flush_cache_vunmap() on to-be-mapped areas
+ * before calling this function and flush_tlb_kernel_range() after.
+ */
+void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
+{
+ vunmap_page_range(addr, addr + size);
+}
+
+/**
+ * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
+ * @addr: start of the VM area to unmap
+ * @size: size of the VM area to unmap
+ *
+ * Similar to unmap_kernel_range_noflush() but flushes vcache before
+ * the unmapping and tlb after.
+ */
void unmap_kernel_range(unsigned long addr, unsigned long size)
{
unsigned long end = addr + size;