summaryrefslogtreecommitdiff
path: root/include/linux/unaligned
diff options
context:
space:
mode:
authorDenys Vlasenko <dvlasenk@redhat.com>2016-03-18 00:22:47 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-18 01:09:34 +0300
commite3bde9568d992c5f985e6e30731a5f9f9bef7b13 (patch)
tree861438397ca7cbedeab03b3c2ba995b1d437a728 /include/linux/unaligned
parentbc27fb68aaad44dd8f5c34924f05721f0abaeec1 (diff)
downloadlinux-e3bde9568d992c5f985e6e30731a5f9f9bef7b13.tar.xz
include/linux/unaligned: force inlining of byteswap operations
Sometimes gcc mysteriously doesn't inline very small functions we expect to be inlined. See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66122 With this .config: http://busybox.net/~vda/kernel_config_OPTIMIZE_INLINING_and_Os, the following functions get deinlined many times. Examples of disassembly: <get_unaligned_be16> (24 copies, 108 calls): 66 8b 07 mov (%rdi),%ax 55 push %rbp 48 89 e5 mov %rsp,%rbp 86 e0 xchg %ah,%al 5d pop %rbp c3 retq <get_unaligned_be32> (25 copies, 181 calls): 8b 07 mov (%rdi),%eax 55 push %rbp 48 89 e5 mov %rsp,%rbp 0f c8 bswap %eax 5d pop %rbp c3 retq <get_unaligned_be64> (23 copies, 94 calls): 48 8b 07 mov (%rdi),%rax 55 push %rbp 48 89 e5 mov %rsp,%rbp 48 0f c8 bswap %rax 5d pop %rbp c3 retq <put_unaligned_be16> (2 copies, 11 calls): 89 f8 mov %edi,%eax 55 push %rbp c1 ef 08 shr $0x8,%edi c1 e0 08 shl $0x8,%eax 09 c7 or %eax,%edi 48 89 e5 mov %rsp,%rbp 66 89 3e mov %di,(%rsi) <put_unaligned_be32> (8 copies, 43 calls): 55 push %rbp 0f cf bswap %edi 89 3e mov %edi,(%rsi) 48 89 e5 mov %rsp,%rbp 5d pop %rbp c3 retq <put_unaligned_be64> (26 copies, 157 calls): 55 push %rbp 48 0f cf bswap %rdi 48 89 3e mov %rdi,(%rsi) 48 89 e5 mov %rsp,%rbp 5d pop %rbp c3 retq This patch fixes this via s/inline/__always_inline/. It only affects arches with efficient unaligned access insns, such as x86. (arched which lack such ops do not include linux/unaligned/access_ok.h) Code size decrease after the patch is ~8.5k: text data bss dec hex filename 92197848 20826112 36417536 149441496 8e84bd8 vmlinux 92189231 20826144 36417536 149432911 8e82a4f vmlinux6_unaligned_be_after Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com> Acked-by: Ingo Molnar <mingo@kernel.org> Cc: Thomas Graf <tgraf@suug.ch> Cc: Peter Zijlstra <peterz@infradead.org> Cc: David Rientjes <rientjes@google.com> Cc: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/unaligned')
-rw-r--r--include/linux/unaligned/access_ok.h24
1 files changed, 12 insertions, 12 deletions
diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
index 99c1b4d20b0f..33383ca23837 100644
--- a/include/linux/unaligned/access_ok.h
+++ b/include/linux/unaligned/access_ok.h
@@ -4,62 +4,62 @@
#include <linux/kernel.h>
#include <asm/byteorder.h>
-static inline u16 get_unaligned_le16(const void *p)
+static __always_inline u16 get_unaligned_le16(const void *p)
{
return le16_to_cpup((__le16 *)p);
}
-static inline u32 get_unaligned_le32(const void *p)
+static __always_inline u32 get_unaligned_le32(const void *p)
{
return le32_to_cpup((__le32 *)p);
}
-static inline u64 get_unaligned_le64(const void *p)
+static __always_inline u64 get_unaligned_le64(const void *p)
{
return le64_to_cpup((__le64 *)p);
}
-static inline u16 get_unaligned_be16(const void *p)
+static __always_inline u16 get_unaligned_be16(const void *p)
{
return be16_to_cpup((__be16 *)p);
}
-static inline u32 get_unaligned_be32(const void *p)
+static __always_inline u32 get_unaligned_be32(const void *p)
{
return be32_to_cpup((__be32 *)p);
}
-static inline u64 get_unaligned_be64(const void *p)
+static __always_inline u64 get_unaligned_be64(const void *p)
{
return be64_to_cpup((__be64 *)p);
}
-static inline void put_unaligned_le16(u16 val, void *p)
+static __always_inline void put_unaligned_le16(u16 val, void *p)
{
*((__le16 *)p) = cpu_to_le16(val);
}
-static inline void put_unaligned_le32(u32 val, void *p)
+static __always_inline void put_unaligned_le32(u32 val, void *p)
{
*((__le32 *)p) = cpu_to_le32(val);
}
-static inline void put_unaligned_le64(u64 val, void *p)
+static __always_inline void put_unaligned_le64(u64 val, void *p)
{
*((__le64 *)p) = cpu_to_le64(val);
}
-static inline void put_unaligned_be16(u16 val, void *p)
+static __always_inline void put_unaligned_be16(u16 val, void *p)
{
*((__be16 *)p) = cpu_to_be16(val);
}
-static inline void put_unaligned_be32(u32 val, void *p)
+static __always_inline void put_unaligned_be32(u32 val, void *p)
{
*((__be32 *)p) = cpu_to_be32(val);
}
-static inline void put_unaligned_be64(u64 val, void *p)
+static __always_inline void put_unaligned_be64(u64 val, void *p)
{
*((__be64 *)p) = cpu_to_be64(val);
}