summaryrefslogtreecommitdiff
path: root/fs/bcachefs/bkey.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-11-24 03:12:22 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:47 -0400
commite88a75ebe86c1df42f0ca9ab6e8fa50db26e7cef (patch)
tree3b8608b0ae6e06d405bf6ef63e098416c68830db /fs/bcachefs/bkey.h
parente15382125948523cd5c887c5fe4fa4303e9a9dc1 (diff)
bcachefs: New bpos_cmp(), bkey_cmp() replacements
This patch introduces - bpos_eq() - bpos_lt() - bpos_le() - bpos_gt() - bpos_ge() and equivalent replacements for bkey_cmp(). Looking at the generated assembly these could probably be improved further, but we already see a significant code size improvement with this patch. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/bkey.h')
-rw-r--r--fs/bcachefs/bkey.h65
1 files changed, 63 insertions, 2 deletions
diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h
index df8189476016..dc2b91bc67f3 100644
--- a/fs/bcachefs/bkey.h
+++ b/fs/bcachefs/bkey.h
@@ -144,6 +144,37 @@ static inline int bkey_cmp_left_packed_byval(const struct btree *b,
return bkey_cmp_left_packed(b, l, &r);
}
+static __always_inline bool bpos_eq(struct bpos l, struct bpos r)
+{
+ return !((l.inode ^ r.inode) |
+ (l.offset ^ r.offset) |
+ (l.snapshot ^ r.snapshot));
+}
+
+static __always_inline bool bpos_lt(struct bpos l, struct bpos r)
+{
+ return l.inode != r.inode ? l.inode < r.inode :
+ l.offset != r.offset ? l.offset < r.offset :
+ l.snapshot != r.snapshot ? l.snapshot < r.snapshot : false;
+}
+
+static __always_inline bool bpos_le(struct bpos l, struct bpos r)
+{
+ return l.inode != r.inode ? l.inode < r.inode :
+ l.offset != r.offset ? l.offset < r.offset :
+ l.snapshot != r.snapshot ? l.snapshot < r.snapshot : true;
+}
+
+static __always_inline bool bpos_gt(struct bpos l, struct bpos r)
+{
+ return bpos_lt(r, l);
+}
+
+static __always_inline bool bpos_ge(struct bpos l, struct bpos r)
+{
+ return bpos_le(r, l);
+}
+
static __always_inline int bpos_cmp(struct bpos l, struct bpos r)
{
return cmp_int(l.inode, r.inode) ?:
@@ -151,6 +182,36 @@ static __always_inline int bpos_cmp(struct bpos l, struct bpos r)
cmp_int(l.snapshot, r.snapshot);
}
+static __always_inline bool bkey_eq(struct bpos l, struct bpos r)
+{
+ return !((l.inode ^ r.inode) |
+ (l.offset ^ r.offset));
+}
+
+static __always_inline bool bkey_lt(struct bpos l, struct bpos r)
+{
+ return l.inode != r.inode
+ ? l.inode < r.inode
+ : l.offset < r.offset;
+}
+
+static __always_inline bool bkey_le(struct bpos l, struct bpos r)
+{
+ return l.inode != r.inode
+ ? l.inode < r.inode
+ : l.offset <= r.offset;
+}
+
+static __always_inline bool bkey_gt(struct bpos l, struct bpos r)
+{
+ return bkey_lt(r, l);
+}
+
+static __always_inline bool bkey_ge(struct bpos l, struct bpos r)
+{
+ return bkey_le(r, l);
+}
+
static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
{
return cmp_int(l.inode, r.inode) ?:
@@ -159,12 +220,12 @@ static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
static inline struct bpos bpos_min(struct bpos l, struct bpos r)
{
- return bpos_cmp(l, r) < 0 ? l : r;
+ return bpos_lt(l, r) ? l : r;
}
static inline struct bpos bpos_max(struct bpos l, struct bpos r)
{
- return bpos_cmp(l, r) > 0 ? l : r;
+ return bpos_gt(l, r) ? l : r;
}
void bch2_bpos_swab(struct bpos *);