summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2016-05-21 02:58:36 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-21 03:58:30 +0300
commitd2005e3f41d4f9299e2df6a967c8beb5086967a9 (patch)
tree075dc5ba6de68cfe96acdb91478595e7d3366f2e /include
parentcd33a76b0f2805fb0d6a05a2d53933f3817ccc9b (diff)
downloadlinux-d2005e3f41d4f9299e2df6a967c8beb5086967a9.tar.xz
userfaultfd: don't pin the user memory in userfaultfd_file_create()
userfaultfd_file_create() increments mm->mm_users; this means that the memory won't be unmapped/freed if mm owner exits/execs, and UFFDIO_COPY after that can populate the orphaned mm more. Change userfaultfd_file_create() and userfaultfd_ctx_put() to use mm->mm_count to pin mm_struct. This means that atomic_inc_not_zero(mm->mm_users) is needed when we are going to actually play with this memory. Except handle_userfault() path doesn't need this, the caller must already have a reference. The patch adds the new trivial helper, mmget_not_zero(), it can have more users. Link: http://lkml.kernel.org/r/20160516172254.GA8595@redhat.com Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/sched.h7
1 files changed, 6 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 01fe1bb68754..6b3213d96da6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2723,12 +2723,17 @@ extern struct mm_struct * mm_alloc(void);
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
-static inline void mmdrop(struct mm_struct * mm)
+static inline void mmdrop(struct mm_struct *mm)
{
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
+static inline bool mmget_not_zero(struct mm_struct *mm)
+{
+ return atomic_inc_not_zero(&mm->mm_users);
+}
+
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
/* same as above but performs the slow path from the async kontext. Can