Btrfs: unaligned access fixes

Btrfs set/get macros lose type information needed to avoid
unaligned accesses on sparc64.
ere is a patch for the kernel bits which fixes most of the
unaligned accesses on sparc64.

btrfs_name_hash is modified to return the hash value instead
of getting a return location via a (potentially unaligned)
pointer.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index c5715a6..ad03a32 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -21,16 +21,15 @@
 u##bits btrfs_##name(struct extent_buffer *eb,				\
 				   type *s)				\
 {									\
-	unsigned long offset = (unsigned long)s +			\
-				offsetof(type, member);			\
-	__le##bits *tmp;						\
+	unsigned long part_offset = (unsigned long)s;			\
+	unsigned long offset = part_offset + offsetof(type, member);	\
+	type *p;							\
 	/* ugly, but we want the fast path here */			\
 	if (eb->map_token && offset >= eb->map_start &&			\
 	    offset + sizeof(((type *)0)->member) <= eb->map_start +	\
 	    eb->map_len) {						\
-		tmp = (__le##bits *)(eb->kaddr + offset -		\
-				     eb->map_start);			\
-		return le##bits##_to_cpu(*tmp);				\
+		p = (type *)(eb->kaddr + part_offset - eb->map_start);	\
+		return le##bits##_to_cpu(p->member);			\
 	}								\
 	{								\
 		int err;						\
@@ -48,8 +47,8 @@
 			read_eb_member(eb, s, type, member, &res);	\
 			return le##bits##_to_cpu(res);			\
 		}							\
-		tmp = (__le##bits *)(kaddr + offset - map_start);	\
-		res = le##bits##_to_cpu(*tmp);				\
+		p = (type *)(kaddr + part_offset - map_start);		\
+		res = le##bits##_to_cpu(p->member);			\
 		if (unmap_on_exit)					\
 			unmap_extent_buffer(eb, map_token, KM_USER1);	\
 		return res;						\
@@ -58,16 +57,15 @@
 void btrfs_set_##name(struct extent_buffer *eb,				\
 				    type *s, u##bits val)		\
 {									\
-	unsigned long offset = (unsigned long)s +			\
-				offsetof(type, member);			\
-	__le##bits *tmp;						\
+	unsigned long part_offset = (unsigned long)s;			\
+	unsigned long offset = part_offset + offsetof(type, member);	\
+	type *p;							\
 	/* ugly, but we want the fast path here */			\
 	if (eb->map_token && offset >= eb->map_start &&			\
 	    offset + sizeof(((type *)0)->member) <= eb->map_start +	\
 	    eb->map_len) {						\
-		tmp = (__le##bits *)(eb->kaddr + offset -		\
-				     eb->map_start);			\
-		*tmp = cpu_to_le##bits(val);				\
+		p = (type *)(eb->kaddr + part_offset - eb->map_start);	\
+		p->member = cpu_to_le##bits(val);			\
 		return;							\
 	}								\
 	{								\
@@ -86,8 +84,8 @@
 			write_eb_member(eb, s, type, member, &val);	\
 			return;						\
 		}							\
-		tmp = (__le##bits *)(kaddr + offset - map_start);	\
-		*tmp = cpu_to_le##bits(val);				\
+		p = (type *)(kaddr + part_offset - map_start);		\
+		p->member = cpu_to_le##bits(val);			\
 		if (unmap_on_exit)					\
 			unmap_extent_buffer(eb, map_token, KM_USER1);	\
 	}								\