summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/six.h58
-rw-r--r--include/trace/events/bcachefs.h175
2 files changed, 159 insertions, 74 deletions
diff --git a/include/linux/six.h b/include/linux/six.h
index 40e213f2fb40..0fb1b2f49345 100644
--- a/include/linux/six.h
+++ b/include/linux/six.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SIX_H
#define _LINUX_SIX_H
@@ -50,12 +50,12 @@
* six_trylock_convert(lock, from, to)
*
* A lock may be held multiple types by the same thread (for read or intent,
- * not write) - up to SIX_LOCK_MAX_RECURSE. However, the six locks code does
- * _not_ implement the actual recursive checks itself though - rather, if your
- * code (e.g. btree iterator code) knows that the current thread already has a
- * lock held, and for the correct type, six_lock_increment() may be used to
- * bump up the counter for that type - the only effect is that one more call to
- * unlock will be required before the lock is unlocked.
+ * not write). However, the six locks code does _not_ implement the actual
+ * recursive checks itself though - rather, if your code (e.g. btree iterator
+ * code) knows that the current thread already has a lock held, and for the
+ * correct type, six_lock_increment() may be used to bump up the counter for
+ * that type - the only effect is that one more call to unlock will be required
+ * before the lock is unlocked.
*/
#include <linux/lockdep.h>
@@ -80,8 +80,8 @@ union six_lock_state {
};
struct {
- unsigned read_lock:26;
- unsigned intent_lock:3;
+ unsigned read_lock:28;
+ unsigned intent_lock:1;
unsigned waiters:3;
/*
* seq works much like in seqlocks: it's incremented every time
@@ -96,8 +96,6 @@ union six_lock_state {
};
};
-#define SIX_LOCK_MAX_RECURSE ((1 << 3) - 1)
-
enum six_lock_type {
SIX_LOCK_read,
SIX_LOCK_intent,
@@ -106,6 +104,7 @@ enum six_lock_type {
struct six_lock {
union six_lock_state state;
+ unsigned intent_lock_recurse;
struct task_struct *owner;
struct optimistic_spin_queue osq;
@@ -139,8 +138,6 @@ do { \
#define __SIX_VAL(field, _v) (((union six_lock_state) { .field = _v }).v)
-#ifdef SIX_LOCK_SEPARATE_LOCKFNS
-
#define __SIX_LOCK(type) \
bool six_trylock_##type(struct six_lock *); \
bool six_relock_##type(struct six_lock *, u32); \
@@ -185,41 +182,6 @@ static inline void six_unlock_type(struct six_lock *lock, enum six_lock_type typ
SIX_LOCK_DISPATCH(type, six_unlock, lock);
}
-#else
-
-bool six_trylock_type(struct six_lock *, enum six_lock_type);
-bool six_relock_type(struct six_lock *, enum six_lock_type, unsigned);
-void six_lock_type(struct six_lock *, enum six_lock_type);
-void six_unlock_type(struct six_lock *, enum six_lock_type);
-
-#define __SIX_LOCK(type) \
-static __always_inline bool six_trylock_##type(struct six_lock *lock) \
-{ \
- return six_trylock_type(lock, SIX_LOCK_##type); \
-} \
- \
-static __always_inline bool six_relock_##type(struct six_lock *lock, u32 seq)\
-{ \
- return six_relock_type(lock, SIX_LOCK_##type, seq); \
-} \
- \
-static __always_inline void six_lock_##type(struct six_lock *lock) \
-{ \
- six_lock_type(lock, SIX_LOCK_##type); \
-} \
- \
-static __always_inline void six_unlock_##type(struct six_lock *lock) \
-{ \
- six_unlock_type(lock, SIX_LOCK_##type); \
-}
-
-__SIX_LOCK(read)
-__SIX_LOCK(intent)
-__SIX_LOCK(write)
-#undef __SIX_LOCK
-
-#endif
-
void six_lock_downgrade(struct six_lock *);
bool six_lock_tryupgrade(struct six_lock *);
bool six_trylock_convert(struct six_lock *, enum six_lock_type,
diff --git a/include/trace/events/bcachefs.h b/include/trace/events/bcachefs.h
index 026ad55bf80c..d7e898b02491 100644
--- a/include/trace/events/bcachefs.h
+++ b/include/trace/events/bcachefs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM bcachefs
@@ -43,21 +44,6 @@ DECLARE_EVENT_CLASS(bkey,
__entry->offset, __entry->size)
);
-DECLARE_EVENT_CLASS(bch_dev,
- TP_PROTO(struct bch_dev *ca),
- TP_ARGS(ca),
-
- TP_STRUCT__entry(
- __array(char, uuid, 16 )
- ),
-
- TP_fast_assign(
- memcpy(__entry->uuid, ca->uuid.b, 16);
- ),
-
- TP_printk("%pU", __entry->uuid)
-);
-
DECLARE_EVENT_CLASS(bch_fs,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c),
@@ -179,7 +165,7 @@ TRACE_EVENT(btree_write,
TP_ARGS(b, bytes, sectors),
TP_STRUCT__entry(
- __field(enum bkey_type, type)
+ __field(enum btree_node_type, type)
__field(unsigned, bytes )
__field(unsigned, sectors )
),
@@ -296,6 +282,11 @@ DEFINE_EVENT(btree_node, btree_compact,
TP_ARGS(c, b)
);
+DEFINE_EVENT(btree_node, btree_merge,
+ TP_PROTO(struct bch_fs *c, struct btree *b),
+ TP_ARGS(c, b)
+);
+
DEFINE_EVENT(btree_node, btree_set_root,
TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
@@ -355,16 +346,6 @@ DEFINE_EVENT(bch_fs, gc_coalesce_end,
TP_ARGS(c)
);
-DEFINE_EVENT(bch_dev, sectors_saturated,
- TP_PROTO(struct bch_dev *ca),
- TP_ARGS(ca)
-);
-
-DEFINE_EVENT(bch_fs, gc_sectors_saturated,
- TP_PROTO(struct bch_fs *c),
- TP_ARGS(c)
-);
-
DEFINE_EVENT(bch_fs, gc_cannot_inc_gens,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
@@ -518,6 +499,148 @@ TRACE_EVENT(copygc,
__entry->buckets_moved, __entry->buckets_not_moved)
);
+DECLARE_EVENT_CLASS(transaction_restart,
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, ip )
+ ),
+
+ TP_fast_assign(
+ __entry->ip = ip;
+ ),
+
+ TP_printk("%pf", (void *) __entry->ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_btree_node_reused,
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_would_deadlock,
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
+);
+
+TRACE_EVENT(trans_restart_iters_realloced,
+ TP_PROTO(unsigned long ip, unsigned nr),
+ TP_ARGS(ip, nr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, ip )
+ __field(unsigned, nr )
+ ),
+
+ TP_fast_assign(
+ __entry->ip = ip;
+ __entry->nr = nr;
+ ),
+
+ TP_printk("%pf nr %u", (void *) __entry->ip, __entry->nr)
+);
+
+TRACE_EVENT(trans_restart_mem_realloced,
+ TP_PROTO(unsigned long ip, unsigned long bytes),
+ TP_ARGS(ip, bytes),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, ip )
+ __field(unsigned long, bytes )
+ ),
+
+ TP_fast_assign(
+ __entry->ip = ip;
+ __entry->bytes = bytes;
+ ),
+
+ TP_printk("%pf bytes %lu", (void *) __entry->ip, __entry->bytes)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_journal_res_get,
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_journal_preres_get,
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_mark_replicas,
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_fault_inject,
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_btree_node_split,
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_mark,
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_upgrade,
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_iter_upgrade,
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_traverse,
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
+);
+
+DEFINE_EVENT(transaction_restart, trans_restart_atomic,
+ TP_PROTO(unsigned long ip),
+ TP_ARGS(ip)
+);
+
+DECLARE_EVENT_CLASS(node_lock_fail,
+ TP_PROTO(unsigned level, u32 iter_seq, unsigned node, u32 node_seq),
+ TP_ARGS(level, iter_seq, node, node_seq),
+
+ TP_STRUCT__entry(
+ __field(u32, level)
+ __field(u32, iter_seq)
+ __field(u32, node)
+ __field(u32, node_seq)
+ ),
+
+ TP_fast_assign(
+ __entry->level = level;
+ __entry->iter_seq = iter_seq;
+ __entry->node = node;
+ __entry->node_seq = node_seq;
+ ),
+
+ TP_printk("level %u iter seq %u node %u node seq %u",
+ __entry->level, __entry->iter_seq,
+ __entry->node, __entry->node_seq)
+);
+
+DEFINE_EVENT(node_lock_fail, node_upgrade_fail,
+ TP_PROTO(unsigned level, u32 iter_seq, unsigned node, u32 node_seq),
+ TP_ARGS(level, iter_seq, node, node_seq)
+);
+
+DEFINE_EVENT(node_lock_fail, node_relock_fail,
+ TP_PROTO(unsigned level, u32 iter_seq, unsigned node, u32 node_seq),
+ TP_ARGS(level, iter_seq, node, node_seq)
+);
+
#endif /* _TRACE_BCACHE_H */
/* This part must be outside protection */