summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-12-15 02:07:42 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-15 03:04:09 +0300
commita19e25536ed3a20845f642ce531e10c27fb2add5 (patch)
tree7bcc1c969a3138ef3c31c2fb8ae815c96afe4382 /mm/memory.c
parent66a6197c118540d454913eef24d68d7491ab5d5f (diff)
downloadlinux-a19e25536ed3a20845f642ce531e10c27fb2add5.tar.xz
mm: change return values of finish_mkwrite_fault()
Currently finish_mkwrite_fault() returns 0 when PTE got changed before we acquired PTE lock and VM_FAULT_WRITE when we succeeded in modifying the PTE. This is somewhat confusing since 0 generally means success, it is also inconsistent with finish_fault() which returns 0 on success. Change finish_mkwrite_fault() to return 0 on success and VM_FAULT_NOPAGE when PTE changed. Practically, there should be no behavioral difference since we bail out from the fault the same way regardless whether we return 0, VM_FAULT_NOPAGE, or VM_FAULT_WRITE. Also note that VM_FAULT_WRITE has no effect for shared mappings since the only two places that check it - KSM and GUP - care about private mappings only. Generally the meaning of VM_FAULT_WRITE for shared mappings is not well defined and we should probably clean that up. Link: http://lkml.kernel.org/r/1479460644-25076-17-git-send-email-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index bbc25da48a18..8b7f0656a921 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2295,10 +2295,10 @@ int finish_mkwrite_fault(struct vm_fault *vmf)
*/
if (!pte_same(*vmf->pte, vmf->orig_pte)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
- return 0;
+ return VM_FAULT_NOPAGE;
}
wp_page_reuse(vmf);
- return VM_FAULT_WRITE;
+ return 0;
}
/*
@@ -2341,8 +2341,7 @@ static int wp_page_shared(struct vm_fault *vmf)
return tmp;
}
tmp = finish_mkwrite_fault(vmf);
- if (unlikely(!tmp || (tmp &
- (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
+ if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
unlock_page(vmf->page);
put_page(vmf->page);
return tmp;