From a750036f35cda160ef77408ec92c3dc41f8feebb Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 19 Jul 2011 13:00:45 +0100 Subject: x86: Fix write lock scalability 64-bit issue With the write lock path simply subtracting RW_LOCK_BIAS there is, on large systems, the theoretical possibility of overflowing the 32-bit value that was used so far (namely if 128 or more CPUs manage to do the subtraction, but don't get to do the inverse addition in the failure path quickly enough). A first measure is to modify RW_LOCK_BIAS itself - with the new value chosen, it is good for up to 2048 CPUs each allowed to nest over 2048 times on the read path without causing an issue. Quite possibly it would even be sufficient to adjust the bias a little further, assuming that allowing for significantly less nesting would suffice. However, as the original value chosen allowed for even more nesting levels, to support more than 2048 CPUs (possible currently only for 64-bit kernels) the lock itself gets widened to 64 bits. Signed-off-by: Jan Beulich Cc: Linus Torvalds Cc: Andrew Morton Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/4E258E0D020000780004E3F0@nat28.tlf.novell.com Signed-off-by: Ingo Molnar --- arch/x86/lib/rwlock.S | 12 ++++++------ arch/x86/lib/thunk_64.S | 1 - 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'arch/x86/lib') diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S index fca17829caa8..1cad22139c88 100644 --- a/arch/x86/lib/rwlock.S +++ b/arch/x86/lib/rwlock.S @@ -15,12 +15,12 @@ ENTRY(__write_lock_failed) CFI_STARTPROC FRAME 0: LOCK_PREFIX - addl $RW_LOCK_BIAS, (%__lock_ptr) + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr) 1: rep; nop - cmpl $RW_LOCK_BIAS, (%__lock_ptr) + cmpl $WRITE_LOCK_CMP, (%__lock_ptr) jne 1b LOCK_PREFIX - subl $RW_LOCK_BIAS, (%__lock_ptr) + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr) jnz 0b ENDFRAME ret @@ -31,12 +31,12 @@ ENTRY(__read_lock_failed) CFI_STARTPROC FRAME 0: LOCK_PREFIX - incl (%__lock_ptr) + READ_LOCK_SIZE(inc) (%__lock_ptr) 1: rep; nop - cmpl $1, (%__lock_ptr) + READ_LOCK_SIZE(cmp) $1, (%__lock_ptr) js 1b LOCK_PREFIX - decl (%__lock_ptr) + READ_LOCK_SIZE(dec) (%__lock_ptr) js 0b ENDFRAME ret diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S index d5b088b3ab81..a63efd6bb6a5 100644 --- a/arch/x86/lib/thunk_64.S +++ b/arch/x86/lib/thunk_64.S @@ -8,7 +8,6 @@ #include #include #include -#include /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ .macro THUNK name, func, put_ret_addr_in_rdi=0 -- cgit v1.2.3