summaryrefslogtreecommitdiff
path: root/kernel-lib
diff options
context:
space:
mode:
authorNikolay Borisov <nborisov@suse.com>2018-10-01 17:46:15 +0300
committerDavid Sterba <dsterba@suse.com>2018-10-23 15:51:17 +0200
commita9ce9286f24b299ea2a8465d89cee659c3f5dcf1 (patch)
treee1666469469bb4b821981e71c9b2e284f04e32d1 /kernel-lib
parentaa3088632a05361bc7e5dd16055f6c01f8ecbd2e (diff)
btrfs-progs: Implement find_*_bit_le operations
This commit introduces explicit little endian bit operations. The only difference with the existing bitops implementation is that bswap(32|64) is called when the _le versions are invoked on a big-endian machine. This is in preparation for adding free space tree conversion support. Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'kernel-lib')
-rw-r--r--kernel-lib/bitops.h82
1 files changed, 82 insertions, 0 deletions
diff --git a/kernel-lib/bitops.h b/kernel-lib/bitops.h
index a9b22f24..b1fd6f53 100644
--- a/kernel-lib/bitops.h
+++ b/kernel-lib/bitops.h
@@ -2,6 +2,7 @@
#define _PERF_LINUX_BITOPS_H_
#include <linux/kernel.h>
+#include <endian.h>
#include "internal.h"
#ifndef DIV_ROUND_UP
@@ -170,5 +171,86 @@ static inline unsigned long find_next_zero_bit(const unsigned long *addr,
}
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+
+static inline unsigned long ext2_swab(const unsigned long y)
+{
+#if BITS_PER_LONG == 64
+ return (unsigned long) bswap64((u64) y);
+#elif BITS_PER_LONG == 32
+ return (unsigned long) bswap32((u32) y);
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
+
+static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long nbits,
+ unsigned long start, unsigned long invert)
+{
+ unsigned long tmp;
+
+ if (start >= nbits)
+ return nbits;
+
+ tmp = addr1[start / BITS_PER_LONG];
+ if (addr2)
+ tmp &= addr2[start / BITS_PER_LONG];
+ tmp ^= invert;
+
+ /* Handle 1st word. */
+ tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
+ start = round_down(start, BITS_PER_LONG);
+
+ while (!tmp) {
+ start += BITS_PER_LONG;
+ if (start >= nbits)
+ return nbits;
+
+ tmp = addr1[start / BITS_PER_LONG];
+ if (addr2)
+ tmp &= addr2[start / BITS_PER_LONG];
+ tmp ^= invert;
+ }
+
+ return min(start + __ffs(ext2_swab(tmp)), nbits);
+}
+
+unsigned long find_next_zero_bit_le(const void *addr, unsigned long size,
+ unsigned long offset)
+{
+ return _find_next_bit_le(addr, NULL, size, offset, ~0UL);
+}
+
+
+unsigned long find_next_bit_le(const void *addr, unsigned long size,
+ unsigned long offset)
+{
+ return _find_next_bit_le(addr, NULL, size, offset, 0UL);
+}
+
+#else
+
+static inline unsigned long find_next_zero_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_zero_bit(addr, size, offset);
+}
+
+static inline unsigned long find_next_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_bit(addr, size, offset);
+}
+
+static inline unsigned long find_first_zero_bit_le(const void *addr,
+ unsigned long size)
+{
+ return find_first_zero_bit(addr, size);
+}
+
+#endif
#endif