diff options
author | Ben Peddell <klightspeed@killerwolves.net> | 2013-01-27 15:45:43 +1000 |
---|---|---|
committer | David Sterba <dsterba@suse.cz> | 2013-01-28 18:06:43 +0100 |
commit | 7b668965f0cf3fb8632c505a7a011189ee1a5a8e (patch) | |
tree | ed4755aba4bd2c26a232545dc2d636ac83835251 /kerncompat.h | |
parent | 272c04915252c497c64fd4036b601b82c3368bbd (diff) |
btrfs-progs: fix unaligned accesses v2
gcc optimizes out the memcpy calls at -O2 and -Os.
Replacing memcpy with memmove does't work - gcc treats memmove
the same way it treats memcpy.
This patch brings in {get|put}_unaligned_le{16|32|64} (using the
packed struct method), and uses them in the failing get/set calls.
On architectures where unaligned accesses are cheap, these unaligned
macros should be optimized out by the compiler.
Signed-off-by: Ben Peddell <klightspeed@killerwolves.net>
Diffstat (limited to 'kerncompat.h')
-rw-r--r-- | kerncompat.h | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/kerncompat.h b/kerncompat.h index d60f7222..a38a9b06 100644 --- a/kerncompat.h +++ b/kerncompat.h @@ -267,6 +267,19 @@ typedef u64 __bitwise __be64; #define cpu_to_le16(x) ((__force __le16)(u16)(x)) #define le16_to_cpu(x) ((__force u16)(__le16)(x)) #endif + +struct __una_u16 { u16 x; } __attribute__((__packed__)); +struct __una_u32 { u32 x; } __attribute__((__packed__)); +struct __una_u64 { u64 x; } __attribute__((__packed__)); + +#define get_unaligned_le8(p) (*((u8 *)(p))) +#define put_unaligned_le8(val,p) ((*((u8 *)(p))) = (val)) +#define get_unaligned_le16(p) le16_to_cpu(((const struct __una_u16 *)(p))->x) +#define put_unaligned_le16(val,p) (((struct __una_u16 *)(p))->x = cpu_to_le16(val)) +#define get_unaligned_le32(p) le32_to_cpu(((const struct __una_u32 *)(p))->x) +#define put_unaligned_le32(val,p) (((struct __una_u32 *)(p))->x = cpu_to_le32(val)) +#define get_unaligned_le64(p) le64_to_cpu(((const struct __una_u64 *)(p))->x) +#define put_unaligned_le64(val,p) (((struct __una_u64 *)(p))->x = cpu_to_le64(val)) #endif #ifndef noinline |