summaryrefslogtreecommitdiff
path: root/libbcache/btree_locking.h
diff options
context:
space:
mode:
Diffstat (limited to 'libbcache/btree_locking.h')
-rw-r--r--libbcache/btree_locking.h119
1 files changed, 119 insertions, 0 deletions
diff --git a/libbcache/btree_locking.h b/libbcache/btree_locking.h
new file mode 100644
index 0000000..76f85c0
--- /dev/null
+++ b/libbcache/btree_locking.h
@@ -0,0 +1,119 @@
+#ifndef _BCACHE_BTREE_LOCKING_H
+#define _BCACHE_BTREE_LOCKING_H
+
+/*
+ * Only for internal btree use:
+ *
+ * The btree iterator tracks what locks it wants to take, and what locks it
+ * currently has - here we have wrappers for locking/unlocking btree nodes and
+ * updating the iterator state
+ */
+
+#include "btree_iter.h"
+#include "six.h"
+
+/* matches six lock types */
+enum btree_node_locked_type {
+ BTREE_NODE_UNLOCKED = -1,
+ BTREE_NODE_READ_LOCKED = SIX_LOCK_read,
+ BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent,
+};
+
+static inline int btree_node_locked_type(struct btree_iter *iter,
+ unsigned level)
+{
+ /*
+ * We're relying on the fact that if nodes_intent_locked is set
+ * nodes_locked must be set as well, so that we can compute without
+ * branches:
+ */
+ return BTREE_NODE_UNLOCKED +
+ ((iter->nodes_locked >> level) & 1) +
+ ((iter->nodes_intent_locked >> level) & 1);
+}
+
+static inline bool btree_node_intent_locked(struct btree_iter *iter,
+ unsigned level)
+{
+ return btree_node_locked_type(iter, level) == BTREE_NODE_INTENT_LOCKED;
+}
+
+static inline bool btree_node_read_locked(struct btree_iter *iter,
+ unsigned level)
+{
+ return btree_node_locked_type(iter, level) == BTREE_NODE_READ_LOCKED;
+}
+
+static inline bool btree_node_locked(struct btree_iter *iter, unsigned level)
+{
+ return iter->nodes_locked & (1 << level);
+}
+
+static inline void mark_btree_node_unlocked(struct btree_iter *iter,
+ unsigned level)
+{
+ iter->nodes_locked &= ~(1 << level);
+ iter->nodes_intent_locked &= ~(1 << level);
+}
+
+static inline void mark_btree_node_locked(struct btree_iter *iter,
+ unsigned level,
+ enum six_lock_type type)
+{
+ /* relying on this to avoid a branch */
+ BUILD_BUG_ON(SIX_LOCK_read != 0);
+ BUILD_BUG_ON(SIX_LOCK_intent != 1);
+
+ iter->nodes_locked |= 1 << level;
+ iter->nodes_intent_locked |= type << level;
+}
+
+static inline void mark_btree_node_intent_locked(struct btree_iter *iter,
+ unsigned level)
+{
+ mark_btree_node_locked(iter, level, SIX_LOCK_intent);
+}
+
+static inline enum six_lock_type
+btree_lock_want(struct btree_iter *iter, int level)
+{
+ return level < iter->locks_want
+ ? SIX_LOCK_intent
+ : SIX_LOCK_read;
+}
+
+static inline bool btree_want_intent(struct btree_iter *iter, int level)
+{
+ return btree_lock_want(iter, level) == SIX_LOCK_intent;
+}
+
+static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
+{
+ int lock_type = btree_node_locked_type(iter, level);
+
+ if (lock_type != BTREE_NODE_UNLOCKED)
+ six_unlock_type(&iter->nodes[level]->lock, lock_type);
+ mark_btree_node_unlocked(iter, level);
+}
+
+bool __bch_btree_node_lock(struct btree *, struct bpos, unsigned,
+ struct btree_iter *, enum six_lock_type);
+
+static inline bool btree_node_lock(struct btree *b, struct bpos pos,
+ unsigned level,
+ struct btree_iter *iter,
+ enum six_lock_type type)
+{
+ return likely(six_trylock_type(&b->lock, type)) ||
+ __bch_btree_node_lock(b, pos, level, iter, type);
+}
+
+bool btree_node_relock(struct btree_iter *, unsigned);
+
+void btree_node_unlock_write(struct btree *, struct btree_iter *);
+void btree_node_lock_write(struct btree *, struct btree_iter *);
+
+void __btree_node_unlock_write(struct btree *, struct btree_iter *);
+void __btree_node_lock_write(struct btree *, struct btree_iter *);
+
+#endif /* _BCACHE_BTREE_LOCKING_H */