summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-07-02 22:43:40 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-07-02 22:43:40 +0300
commit4cad67197989c81417810b89f09a3549b75a2441 (patch)
tree33cefbddefbdcf95b3ee385a79cbc22ba77101dd /include/asm-generic
parent406254918b232db198ed60f5bf1f8b84d96bca00 (diff)
parent803f4e1eab7a8938ba3a3c30dd4eb5e9eeef5e63 (diff)
downloadlinux-4cad67197989c81417810b89f09a3549b75a2441.tar.xz
Merge tag 'asm-generic-unaligned-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic
Pull asm/unaligned.h unification from Arnd Bergmann: "Unify asm/unaligned.h around struct helper The get_unaligned()/put_unaligned() helpers are traditionally architecture specific, with the two main variants being the "access-ok.h" version that assumes unaligned pointer accesses always work on a particular architecture, and the "le-struct.h" version that casts the data to a byte aligned type before dereferencing, for architectures that cannot always do unaligned accesses in hardware. Based on the discussion linked below, it appears that the access-ok version is not realiable on any architecture, but the struct version probably has no downsides. This series changes the code to use the same implementation on all architectures, addressing the few exceptions separately" Link: https://lore.kernel.org/lkml/75d07691-1e4f-741f-9852-38c0b4f520bc@synopsys.com/ Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100363 Link: https://lore.kernel.org/lkml/20210507220813.365382-14-arnd@kernel.org/ Link: git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic.git unaligned-rework-v2 Link: https://lore.kernel.org/lkml/CAHk-=whGObOKruA_bU3aPGZfoDqZM1_9wBkwREp0H0FgR-90uQ@mail.gmail.com/ * tag 'asm-generic-unaligned-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: asm-generic: simplify asm/unaligned.h asm-generic: uaccess: 1-byte access is always aligned netpoll: avoid put_unaligned() on single character mwifiex: re-fix for unaligned accesses apparmor: use get_unaligned() only for multi-byte words partitions: msdos: fix one-byte get_unaligned() asm-generic: unaligned always use struct helpers asm-generic: unaligned: remove byteshift helpers powerpc: use linux/unaligned/le_struct.h on LE power7 m68k: select CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS sh: remove unaligned access for sh4a openrisc: always use unaligned-struct header asm-generic: use asm-generic/unaligned.h for most architectures
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/uaccess.h4
-rw-r--r--include/asm-generic/unaligned.h141
2 files changed, 119 insertions, 26 deletions
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 4973328f3c6e..a0b2f270dddc 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -19,7 +19,7 @@ __get_user_fn(size_t size, const void __user *from, void *to)
switch (size) {
case 1:
- *(u8 *)to = get_unaligned((u8 __force *)from);
+ *(u8 *)to = *((u8 __force *)from);
return 0;
case 2:
*(u16 *)to = get_unaligned((u16 __force *)from);
@@ -45,7 +45,7 @@ __put_user_fn(size_t size, void __user *to, void *from)
switch (size) {
case 1:
- put_unaligned(*(u8 *)from, (u8 __force *)to);
+ *(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
put_unaligned(*(u16 *)from, (u16 __force *)to);
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 374c940e9be1..1c4242416c9f 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -6,31 +6,124 @@
* This is the most generic implementation of unaligned accesses
* and should work almost anywhere.
*/
+#include <linux/unaligned/packed_struct.h>
#include <asm/byteorder.h>
-/* Set by the arch if it can handle unaligned accesses in hardware. */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/access_ok.h>
-#endif
-
-#if defined(__LITTLE_ENDIAN)
-# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# endif
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#elif defined(__BIG_ENDIAN)
-# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# endif
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#else
-# error need to define endianess
-#endif
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define __put_unaligned_t(type, val, ptr) do { \
+ struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x = (val); \
+} while (0)
+
+#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
+#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+ return le16_to_cpu(__get_unaligned_t(__le16, p));
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+ return le32_to_cpu(__get_unaligned_t(__le32, p));
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+ return le64_to_cpu(__get_unaligned_t(__le64, p));
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+ __put_unaligned_t(__le16, cpu_to_le16(val), p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+ __put_unaligned_t(__le32, cpu_to_le32(val), p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+ __put_unaligned_t(__le64, cpu_to_le64(val), p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return be16_to_cpu(__get_unaligned_t(__be16, p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return be32_to_cpu(__get_unaligned_t(__be32, p));
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+ return be64_to_cpu(__get_unaligned_t(__be64, p));
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+ __put_unaligned_t(__be16, cpu_to_be16(val), p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+ __put_unaligned_t(__be32, cpu_to_be32(val), p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+ __put_unaligned_t(__be64, cpu_to_be64(val), p);
+}
+
+static inline u32 __get_unaligned_be24(const u8 *p)
+{
+ return p[0] << 16 | p[1] << 8 | p[2];
+}
+
+static inline u32 get_unaligned_be24(const void *p)
+{
+ return __get_unaligned_be24(p);
+}
+
+static inline u32 __get_unaligned_le24(const u8 *p)
+{
+ return p[0] | p[1] << 8 | p[2] << 16;
+}
+
+static inline u32 get_unaligned_le24(const void *p)
+{
+ return __get_unaligned_le24(p);
+}
+
+static inline void __put_unaligned_be24(const u32 val, u8 *p)
+{
+ *p++ = val >> 16;
+ *p++ = val >> 8;
+ *p++ = val;
+}
+
+static inline void put_unaligned_be24(const u32 val, void *p)
+{
+ __put_unaligned_be24(val, p);
+}
+
+static inline void __put_unaligned_le24(const u32 val, u8 *p)
+{
+ *p++ = val;
+ *p++ = val >> 8;
+ *p++ = val >> 16;
+}
+
+static inline void put_unaligned_le24(const u32 val, void *p)
+{
+ __put_unaligned_le24(val, p);
+}
#endif /* __ASM_GENERIC_UNALIGNED_H */