summaryrefslogtreecommitdiff
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2018-05-07 17:15:37 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-05-14 17:06:48 +0300
commit720c241924046aff83f5f2323232f34a30a4c281 (patch)
treedeea4bba50854f89995df7f934a37f5eaeed8e01 /drivers/android/binder_alloc.c
parent838d5565669aa5bb7deb605684a5970d51d5eaf6 (diff)
downloadlinux-720c241924046aff83f5f2323232f34a30a4c281.tar.xz
ANDROID: binder: change down_write to down_read
binder_update_page_range needs down_write of mmap_sem because vm_insert_page need to change vma->vm_flags to VM_MIXEDMAP unless it is set. However, when I profile binder working, it seems every binder buffers should be mapped in advance by binder_mmap. It means we could set VM_MIXEDMAP in binder_mmap time which is already hold a mmap_sem as down_write so binder_update_page_range doesn't need to hold a mmap_sem as down_write. Please use proper API down_read. It would help mmap_sem contention problem as well as fixing down_write abuse. Ganesh Mahendran tested app launching and binder throughput test and he said he couldn't find any problem and I did binder latency test per Greg KH request(Thanks Martijn to teach me how I can do) I cannot find any problem, too. Cc: Ganesh Mahendran <opensource.ganesh@gmail.com> Cc: Joe Perches <joe@perches.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Todd Kjos <tkjos@google.com> Reviewed-by: Martijn Coenen <maco@android.com> Signed-off-by: Minchan Kim <minchan@kernel.org> Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 5a426c877dfb..4f382d51def1 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -219,7 +219,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
mm = alloc->vma_vm_mm;
if (mm) {
- down_write(&mm->mmap_sem);
+ down_read(&mm->mmap_sem);
vma = alloc->vma;
}
@@ -288,7 +288,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
/* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
- up_write(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
mmput(mm);
}
return 0;
@@ -321,7 +321,7 @@ err_page_ptr_cleared:
}
err_no_vma:
if (mm) {
- up_write(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;