summaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2013-03-11 04:14:08 +0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-03-11 05:18:21 +0400
commit32fcfd40715ed13f7a80cbde49d097ddae20c8e2 (patch)
treee4c211c1135a48ee853b3ec4d00623686317293a /mm/vmalloc.c
parent6dbe51c251a327e012439c4772097a13df43c5b8 (diff)
downloadlinux-32fcfd40715ed13f7a80cbde49d097ddae20c8e2.tar.xz
make vfree() safe to call from interrupt contexts
A bunch of RCU callbacks want to be able to do vfree() and end up with rather kludgy schemes. Just let vfree() do the right thing - put the victim on llist and schedule actual __vunmap() via schedule_work(), so that it runs from non-interrupt context. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c45
1 files changed, 40 insertions, 5 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0f751f2068c3..ef9bdf742273 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -27,10 +27,30 @@
#include <linux/pfn.h>
#include <linux/kmemleak.h>
#include <linux/atomic.h>
+#include <linux/llist.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/shmparam.h>
+struct vfree_deferred {
+ struct llist_head list;
+ struct work_struct wq;
+};
+static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
+
+static void __vunmap(const void *, int);
+
+static void free_work(struct work_struct *w)
+{
+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
+ struct llist_node *llnode = llist_del_all(&p->list);
+ while (llnode) {
+ void *p = llnode;
+ llnode = llist_next(llnode);
+ __vunmap(p, 1);
+ }
+}
+
/*** Page table manipulation functions ***/
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
@@ -1184,10 +1204,14 @@ void __init vmalloc_init(void)
for_each_possible_cpu(i) {
struct vmap_block_queue *vbq;
+ struct vfree_deferred *p;
vbq = &per_cpu(vmap_block_queue, i);
spin_lock_init(&vbq->lock);
INIT_LIST_HEAD(&vbq->free);
+ p = &per_cpu(vfree_deferred, i);
+ init_llist_head(&p->list);
+ INIT_WORK(&p->wq, free_work);
}
/* Import existing vmlist entries. */
@@ -1511,7 +1535,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
kfree(area);
return;
}
-
+
/**
* vfree - release memory allocated by vmalloc()
* @addr: memory base address
@@ -1520,15 +1544,25 @@ static void __vunmap(const void *addr, int deallocate_pages)
* obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
* NULL, no operation is performed.
*
- * Must not be called in interrupt context.
+ * Must not be called in NMI context (strictly speaking, only if we don't
+ * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
+ * conventions for vfree() arch-depenedent would be a really bad idea)
+ *
*/
void vfree(const void *addr)
{
- BUG_ON(in_interrupt());
+ BUG_ON(in_nmi());
kmemleak_free(addr);
- __vunmap(addr, 1);
+ if (!addr)
+ return;
+ if (unlikely(in_interrupt())) {
+ struct vfree_deferred *p = &__get_cpu_var(vfree_deferred);
+ llist_add((struct llist_node *)addr, &p->list);
+ schedule_work(&p->wq);
+ } else
+ __vunmap(addr, 1);
}
EXPORT_SYMBOL(vfree);
@@ -1545,7 +1579,8 @@ void vunmap(const void *addr)
{
BUG_ON(in_interrupt());
might_sleep();
- __vunmap(addr, 0);
+ if (addr)
+ __vunmap(addr, 0);
}
EXPORT_SYMBOL(vunmap);