From 8f4c344696b9f9f8471d7f342076ef10ed7f66a5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 Sep 2013 19:06:46 +0100 Subject: lockref: use cmpxchg64 explicitly for lockless updates The cmpxchg() function tends not to support 64-bit arguments on 32-bit architectures. This could be either due to use of unsigned long arguments (like on ARM) or lack of instruction support (cmpxchgq on x86). However, these architectures may implement a specific cmpxchg64() function to provide 64-bit cmpxchg support instead. Since the lockref code requires a 64-bit cmpxchg and relies on the architecture selecting ARCH_USE_CMPXCHG_LOCKREF, move to using cmpxchg64 instead of cmpxchg and allow 32-bit architectures to make use of the lockless lockref implementation. Cc: Waiman Long Signed-off-by: Will Deacon Signed-off-by: Linus Torvalds --- lib/lockref.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/lockref.c b/lib/lockref.c index e2cd2c0a8821..677d036cf3c7 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -14,8 +14,8 @@ while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ struct lockref new = old, prev = old; \ CODE \ - old.lock_count = cmpxchg(&lockref->lock_count, \ - old.lock_count, new.lock_count); \ + old.lock_count = cmpxchg64(&lockref->lock_count, \ + old.lock_count, new.lock_count); \ if (likely(old.lock_count == prev.lock_count)) { \ SUCCESS; \ } \ -- cgit v1.2.3 From c26d436cbf7a9549ec1073480a2e3f0d3f64e02d Mon Sep 17 00:00:00 2001 From: Andre Naujoks Date: Fri, 13 Sep 2013 19:37:12 +0200 Subject: lib: introduce upper case hex ascii helpers To be able to use the hex ascii functions in case sensitive environments the array hex_asc_upper[] and the needed functions for hex_byte_pack_upper() are introduced. Signed-off-by: Andre Naujoks Signed-off-by: David S. Miller --- include/linux/kernel.h | 11 +++++++++++ lib/hexdump.c | 2 ++ 2 files changed, 13 insertions(+) (limited to 'lib') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 482ad2d84a32..672ddc4de4af 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -439,6 +439,17 @@ static inline char *hex_byte_pack(char *buf, u8 byte) return buf; } +extern const char hex_asc_upper[]; +#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)] +#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4] + +static inline char *hex_byte_pack_upper(char *buf, u8 byte) +{ + *buf++ = hex_asc_upper_hi(byte); + *buf++ = hex_asc_upper_lo(byte); + return buf; +} + static inline char * __deprecated pack_hex_byte(char *buf, u8 byte) { return hex_byte_pack(buf, byte); diff --git a/lib/hexdump.c b/lib/hexdump.c index 3f0494c9d57a..8499c810909a 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -14,6 +14,8 @@ const char hex_asc[] = "0123456789abcdef"; EXPORT_SYMBOL(hex_asc); +const char hex_asc_upper[] = "0123456789ABCDEF"; +EXPORT_SYMBOL(hex_asc_upper); /** * hex_to_bin - convert a hex digit to its real value -- cgit v1.2.3 From d2212b4dce596fee83e5c523400bf084f4cc816c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 Sep 2013 17:27:00 +0100 Subject: lockref: allow relaxed cmpxchg64 variant for lockless updates The 64-bit cmpxchg operation on the lockref is ordered by virtue of hazarding between the cmpxchg operation and the reference count manipulation. On weakly ordered memory architectures (such as ARM), it can be of great benefit to omit the barrier instructions where they are not needed. This patch moves the lockless lockref code over to a cmpxchg64_relaxed operation, which doesn't provide barrier semantics. If the operation isn't defined, we simply #define it as the usual 64-bit cmpxchg macro. Cc: Waiman Long Signed-off-by: Will Deacon Signed-off-by: Linus Torvalds --- lib/lockref.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/lockref.c b/lib/lockref.c index 677d036cf3c7..e294ae445c9a 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -3,6 +3,14 @@ #ifdef CONFIG_CMPXCHG_LOCKREF +/* + * Allow weakly-ordered memory architectures to provide barrier-less + * cmpxchg semantics for lockref updates. + */ +#ifndef cmpxchg64_relaxed +# define cmpxchg64_relaxed cmpxchg64 +#endif + /* * Note that the "cmpxchg()" reloads the "old" value for the * failure case. @@ -14,8 +22,9 @@ while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ struct lockref new = old, prev = old; \ CODE \ - old.lock_count = cmpxchg64(&lockref->lock_count, \ - old.lock_count, new.lock_count); \ + old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \ + old.lock_count, \ + new.lock_count); \ if (likely(old.lock_count == prev.lock_count)) { \ SUCCESS; \ } \ -- cgit v1.2.3 From 730d7d339884f20da28b59bf6f0a16af6400a113 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 23 Sep 2013 14:41:17 -0700 Subject: sysfs: Allow mounting without CONFIG_NET In kobj_ns_current_may_mount the default should be to allow the mount. The test is only for a single kobj_ns_type at a time, and unless there is a reason to prevent it the mounting sysfs should be allowed. Subsystems that are not registered can't have are not involved so can't have a reason to prevent mounting sysfs. This is a bug-fix to commit 7dc5dbc879bd ("sysfs: Restrict mounting sysfs") that came in via the userns tree during the 3.12 merge window. Reported-and-tested-by: James Hogan Signed-off-by: "Eric W. Biederman" Signed-off-by: Linus Torvalds --- lib/kobject.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/kobject.c b/lib/kobject.c index 962175134702..669bf190d4fb 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -933,10 +933,7 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj) bool kobj_ns_current_may_mount(enum kobj_ns_type type) { - bool may_mount = false; - - if (type == KOBJ_NS_TYPE_NONE) - return true; + bool may_mount = true; spin_lock(&kobj_ns_type_lock); if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && -- cgit v1.2.3 From 491f6f8e5fd9a57aaf03b6d6e3e153f1c27d8a46 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 23 Sep 2013 12:59:56 +0200 Subject: lockref: use arch_mutex_cpu_relax() in CMPXCHG_LOOP() Make use of arch_mutex_cpu_relax() so architectures can override the default cpu_relax() semantics. This is especially useful for s390, where cpu_relax() means that we yield() the current (virtual) cpu and therefore is very expensive, and would contradict the whole purpose of the lockless cmpxchg loop. Signed-off-by: Heiko Carstens --- lib/lockref.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/lockref.c b/lib/lockref.c index e294ae445c9a..6f9d434c1521 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -11,6 +11,14 @@ # define cmpxchg64_relaxed cmpxchg64 #endif +/* + * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP. + * This is useful for architectures with an expensive cpu_relax(). + */ +#ifndef arch_mutex_cpu_relax +# define arch_mutex_cpu_relax() cpu_relax() +#endif + /* * Note that the "cmpxchg()" reloads the "old" value for the * failure case. @@ -28,7 +36,7 @@ if (likely(old.lock_count == prev.lock_count)) { \ SUCCESS; \ } \ - cpu_relax(); \ + arch_mutex_cpu_relax(); \ } \ } while (0) -- cgit v1.2.3