Btrfs: unaligned access fixes

Btrfs set/get macros lose type information needed to avoid
unaligned accesses on sparc64.
ere is a patch for the kernel bits which fixes most of the
unaligned accesses on sparc64.

btrfs_name_hash is modified to return the hash value instead
of getting a return location via a (potentially unaligned)
pointer.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
David Miller
2008-02-15 10:40:52 -05:00
committed by Chris Mason
parent 39b5637f6f
commit df68b8a7ad
5 changed files with 33 additions and 45 deletions

View File

@@ -495,22 +495,17 @@ void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val);
#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
static inline u##bits btrfs_##name(struct extent_buffer *eb) \
{ \
char *kaddr = kmap_atomic(eb->first_page, KM_USER0); \
unsigned long offset = offsetof(type, member); \
u##bits res; \
__le##bits *tmp = (__le##bits *)(kaddr + offset); \
res = le##bits##_to_cpu(*tmp); \
kunmap_atomic(kaddr, KM_USER0); \
type *p = kmap_atomic(eb->first_page, KM_USER0); \
u##bits res = le##bits##_to_cpu(p->member); \
kunmap_atomic(p, KM_USER0); \
return res; \
} \
static inline void btrfs_set_##name(struct extent_buffer *eb, \
u##bits val) \
{ \
char *kaddr = kmap_atomic(eb->first_page, KM_USER0); \
unsigned long offset = offsetof(type, member); \
__le##bits *tmp = (__le##bits *)(kaddr + offset); \
*tmp = cpu_to_le##bits(val); \
kunmap_atomic(kaddr, KM_USER0); \
type *p = kmap_atomic(eb->first_page, KM_USER0); \
p->member = cpu_to_le##bits(val); \
kunmap_atomic(p, KM_USER0); \
}
#define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \