summaryrefslogtreecommitdiff
path: root/fs/btrfs/struct-funcs.c
diff options
context:
space:
mode:
authorDavid Miller <davem@davemloft.net>2008-02-15 18:40:52 +0300
committerChris Mason <chris.mason@oracle.com>2008-09-25 19:04:00 +0400
commitdf68b8a7ad4a18c9e63f1c12015a59c3b7031adb (patch)
treef2e6569b5f4843a01f23068fdfd3b450c8258459 /fs/btrfs/struct-funcs.c
parent39b5637f6f195852259004bb27b58e2dcf9fb378 (diff)
downloadlinux-df68b8a7ad4a18c9e63f1c12015a59c3b7031adb.tar.xz
Btrfs: unaligned access fixes
Btrfs set/get macros lose type information needed to avoid unaligned accesses on sparc64. ere is a patch for the kernel bits which fixes most of the unaligned accesses on sparc64. btrfs_name_hash is modified to return the hash value instead of getting a return location via a (potentially unaligned) pointer. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/struct-funcs.c')
-rw-r--r--fs/btrfs/struct-funcs.c30
1 files changed, 14 insertions, 16 deletions
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index c5715a60554c..ad03a32d1116 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -21,16 +21,15 @@
u##bits btrfs_##name(struct extent_buffer *eb, \
type *s) \
{ \
- unsigned long offset = (unsigned long)s + \
- offsetof(type, member); \
- __le##bits *tmp; \
+ unsigned long part_offset = (unsigned long)s; \
+ unsigned long offset = part_offset + offsetof(type, member); \
+ type *p; \
/* ugly, but we want the fast path here */ \
if (eb->map_token && offset >= eb->map_start && \
offset + sizeof(((type *)0)->member) <= eb->map_start + \
eb->map_len) { \
- tmp = (__le##bits *)(eb->kaddr + offset - \
- eb->map_start); \
- return le##bits##_to_cpu(*tmp); \
+ p = (type *)(eb->kaddr + part_offset - eb->map_start); \
+ return le##bits##_to_cpu(p->member); \
} \
{ \
int err; \
@@ -48,8 +47,8 @@ u##bits btrfs_##name(struct extent_buffer *eb, \
read_eb_member(eb, s, type, member, &res); \
return le##bits##_to_cpu(res); \
} \
- tmp = (__le##bits *)(kaddr + offset - map_start); \
- res = le##bits##_to_cpu(*tmp); \
+ p = (type *)(kaddr + part_offset - map_start); \
+ res = le##bits##_to_cpu(p->member); \
if (unmap_on_exit) \
unmap_extent_buffer(eb, map_token, KM_USER1); \
return res; \
@@ -58,16 +57,15 @@ u##bits btrfs_##name(struct extent_buffer *eb, \
void btrfs_set_##name(struct extent_buffer *eb, \
type *s, u##bits val) \
{ \
- unsigned long offset = (unsigned long)s + \
- offsetof(type, member); \
- __le##bits *tmp; \
+ unsigned long part_offset = (unsigned long)s; \
+ unsigned long offset = part_offset + offsetof(type, member); \
+ type *p; \
/* ugly, but we want the fast path here */ \
if (eb->map_token && offset >= eb->map_start && \
offset + sizeof(((type *)0)->member) <= eb->map_start + \
eb->map_len) { \
- tmp = (__le##bits *)(eb->kaddr + offset - \
- eb->map_start); \
- *tmp = cpu_to_le##bits(val); \
+ p = (type *)(eb->kaddr + part_offset - eb->map_start); \
+ p->member = cpu_to_le##bits(val); \
return; \
} \
{ \
@@ -86,8 +84,8 @@ void btrfs_set_##name(struct extent_buffer *eb, \
write_eb_member(eb, s, type, member, &val); \
return; \
} \
- tmp = (__le##bits *)(kaddr + offset - map_start); \
- *tmp = cpu_to_le##bits(val); \
+ p = (type *)(kaddr + part_offset - map_start); \
+ p->member = cpu_to_le##bits(val); \
if (unmap_on_exit) \
unmap_extent_buffer(eb, map_token, KM_USER1); \
} \