summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2015-06-15 00:33:53 -0700
committerKent Overstreet <kent.overstreet@gmail.com>2016-10-07 12:34:27 -0800
commit3f2639eeb21c5bfd02fda2b7913aa83b75ca65a4 (patch)
tree97448f7a8df2df59a58258f679b8592ea46f0888
parent8832ebd3fed5a38c8a00d6af3dcb4c14e41e52c0 (diff)
bcache: Add six_lock_type() etc.
-rw-r--r--drivers/md/bcache/btree.c6
-rw-r--r--drivers/md/bcache/six.c147
-rw-r--r--drivers/md/bcache/six.h132
3 files changed, 165 insertions, 120 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 4f982e164243..4768860b0139 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -151,7 +151,7 @@ static bool btree_lock_upgrade(struct btree_iter *iter, unsigned level)
return true;
if (btree_node_locked(iter, level)
- ? six_trylock_convert(&b->lock, read, intent)
+ ? six_trylock_convert(&b->lock, SIX_LOCK_read, SIX_LOCK_intent)
: six_relock_intent(&b->lock, iter->lock_seq[level])) {
mark_btree_node_intent_locked(iter, level);
trace_bcache_btree_upgrade_lock(b, iter);
@@ -1506,7 +1506,9 @@ static noinline struct btree *bch_btree_node_fill(struct btree_iter *iter,
mark_btree_node_intent_locked(iter, level);
} else {
mark_btree_node_read_locked(iter, level);
- BUG_ON(!six_trylock_convert(&b->lock, intent, read));
+ BUG_ON(!six_trylock_convert(&b->lock,
+ SIX_LOCK_intent,
+ SIX_LOCK_read));
}
return b;
diff --git a/drivers/md/bcache/six.c b/drivers/md/bcache/six.c
index b4b2e0507a29..46e393d31933 100644
--- a/drivers/md/bcache/six.c
+++ b/drivers/md/bcache/six.c
@@ -1,48 +1,87 @@
#include "six.h"
-bool __six_trylock(struct six_lock *lock,
- unsigned long lock_val,
- unsigned long lock_fail)
+/* Number of times to trylock() before sleeping in six_lock(): */
+#define SIX_LOCK_SPIN_COUNT 1
+
+#define __SIX_LOCK_HELD_read __SIX_VAL(read_lock, ~0)
+#define __SIX_LOCK_HELD_intent __SIX_VAL(intent_lock, ~0)
+#define __SIX_LOCK_HELD_write __SIX_VAL(seq, 1)
+
+struct six_lock_vals {
+ /* Value we add to the lock in order to take the lock: */
+ unsigned long lock_val;
+
+ /* If the lock has this value (used as a mask), taking the lock fails: */
+ unsigned long lock_fail;
+
+ /* Value we add to the lock in order to release the lock: */
+ unsigned long unlock_val;
+
+ /* Waitlist we wakeup when releasing the lock: */
+ enum six_lock_type unlock_wakeup;
+};
+
+#define LOCK_VALS { \
+ [SIX_LOCK_read] = { \
+ .lock_val = __SIX_VAL(read_lock, 1), \
+ .lock_fail = __SIX_LOCK_HELD_write, \
+ .unlock_val = -__SIX_VAL(read_lock, 1), \
+ .unlock_wakeup = SIX_LOCK_write, \
+ }, \
+ [SIX_LOCK_intent] = { \
+ .lock_val = __SIX_VAL(intent_lock, 1), \
+ .lock_fail = __SIX_LOCK_HELD_intent, \
+ .unlock_val = -__SIX_VAL(intent_lock, 1), \
+ .unlock_wakeup = SIX_LOCK_intent, \
+ }, \
+ [SIX_LOCK_write] = { \
+ .lock_val = __SIX_VAL(seq, 1), \
+ .lock_fail = __SIX_LOCK_HELD_read, \
+ .unlock_val = __SIX_VAL(seq, 1), \
+ .unlock_wakeup = SIX_LOCK_read, \
+ }, \
+}
+
+bool __six_trylock_type(struct six_lock *lock, enum six_lock_type type)
{
+ const struct six_lock_vals l[] = LOCK_VALS;
union six_lock_state old;
unsigned long v = lock->state.v;
do {
old.v = v;
- EBUG_ON(lock_val == __SIX_LOCK_VAL_write &&
+ EBUG_ON(type == SIX_LOCK_write &&
((old.v & __SIX_LOCK_HELD_write) ||
!(old.v & __SIX_LOCK_HELD_intent)));
- if (old.v & lock_fail)
+ if (old.v & l[type].lock_fail)
return false;
} while ((v = cmpxchg(&lock->state.v,
old.v,
- old.v + lock_val)) != old.v);
+ old.v + l[type].lock_val)) != old.v);
return true;
}
-bool __six_relock(struct six_lock *lock,
- unsigned long lock_val,
- unsigned long lock_fail,
- unsigned seq)
+bool __six_relock_type(struct six_lock *lock, enum six_lock_type type,
+ unsigned seq)
{
- union six_lock_state old = lock->state;
- unsigned long v;
+ const struct six_lock_vals l[] = LOCK_VALS;
+ union six_lock_state old;
+ unsigned long v = lock->state.v;
- while (1) {
- if (old.seq != seq ||
- old.v & lock_fail)
- return false;
+ do {
+ old.v = v;
- v = cmpxchg(&lock->state.v, old.v, old.v + lock_val);
- if (v == old.v)
- return true;
+ if (old.seq != seq || old.v & l[type].lock_fail)
+ return false;
+ } while ((v = cmpxchg(&lock->state.v,
+ old.v,
+ old.v + l[type].lock_val)) != old.v);
- old.v = v;
- }
+ return true;
}
struct six_lock_waiter {
@@ -53,16 +92,13 @@ struct six_lock_waiter {
/* This is probably up there with the more evil things I've done */
#define waitlist_bitnr(id) ilog2(__SIX_VAL(waiters, 1 << (id)))
-void __six_lock(struct six_lock *lock,
- unsigned long lock_val,
- unsigned long lock_fail,
- unsigned waitlist_id)
+void __six_lock_type(struct six_lock *lock, enum six_lock_type type)
{
struct six_lock_waiter wait;
unsigned i;
for (i = 0; i < SIX_LOCK_SPIN_COUNT; i++) {
- if (__six_trylock(lock, lock_val, lock_fail))
+ if (__six_trylock_type(lock, type))
return;
cpu_relax();
}
@@ -74,15 +110,14 @@ void __six_lock(struct six_lock *lock,
set_current_state(TASK_UNINTERRUPTIBLE);
if (list_empty(&wait.list)) {
spin_lock(&lock->wait_lock);
- list_add_tail(&wait.list,
- &lock->wait_list[waitlist_id]);
+ list_add_tail(&wait.list, &lock->wait_list[type]);
spin_unlock(&lock->wait_lock);
}
- set_bit(waitlist_bitnr(waitlist_id),
+ set_bit(waitlist_bitnr(type),
(unsigned long *) &lock->state.v);
- if (__six_trylock(lock, lock_val, lock_fail))
+ if (__six_trylock_type(lock, type))
break;
schedule();
@@ -130,40 +165,56 @@ static inline void six_lock_wakeup(struct six_lock *lock,
spin_unlock(&lock->wait_lock);
}
-bool __six_trylock_convert(struct six_lock *lock,
- unsigned long unlock_val,
- unsigned long lock_val,
- unsigned long lock_fail,
- unsigned wakeup)
+void __six_unlock_type(struct six_lock *lock, enum six_lock_type type)
+{
+ const struct six_lock_vals l[] = LOCK_VALS;
+ union six_lock_state state;
+
+ /* unlock barrier */
+ smp_wmb();
+ state.v = atomic64_add_return(l[type].unlock_val,
+ &lock->state.counter);
+
+ six_lock_wakeup(lock, state, l[type].unlock_wakeup);
+}
+
+bool six_trylock_convert(struct six_lock *lock,
+ enum six_lock_type from,
+ enum six_lock_type to)
{
+ const struct six_lock_vals l[] = LOCK_VALS;
union six_lock_state old, new;
unsigned long v = lock->state.v;
do {
new.v = old.v = v;
- new.v += unlock_val;
+ new.v += l[from].unlock_val;
- if (new.v & lock_fail)
+ if (new.v & l[to].lock_fail)
return false;
} while ((v = cmpxchg(&lock->state.v,
old.v,
- new.v + lock_val)) != old.v);
+ new.v + l[to].lock_val)) != old.v);
- six_lock_wakeup(lock, new, wakeup);
+ six_lock_wakeup(lock, new, l[from].unlock_wakeup);
return true;
}
-void __six_unlock(struct six_lock *lock,
- unsigned long unlock_val,
- unsigned wakeup)
+/*
+ * Increment read/intent lock count, assuming we already have it read or intent
+ * locked:
+ */
+void __six_lock_increment(struct six_lock *lock, enum six_lock_type type)
{
- union six_lock_state state;
+ const struct six_lock_vals l[] = LOCK_VALS;
- /* unlock barrier */
- smp_wmb();
- state.v = atomic64_add_return(unlock_val,
- &lock->state.counter);
+ BUG_ON(type == SIX_LOCK_write);
+
+ /* XXX: assert already locked, and that we don't overflow: */
+
+ atomic64_add(l[type].lock_val, &lock->state.counter);
- six_lock_wakeup(lock, state, wakeup);
+ /* lock barrier: */
+ smp_mb__after_atomic();
}
diff --git a/drivers/md/bcache/six.h b/drivers/md/bcache/six.h
index 3baa45687f6e..c98be80b9972 100644
--- a/drivers/md/bcache/six.h
+++ b/drivers/md/bcache/six.h
@@ -33,8 +33,8 @@ union six_lock_state {
};
struct {
- unsigned read_lock:28;
- unsigned intent_lock:1;
+ unsigned read_lock:26;
+ unsigned intent_lock:3;
unsigned waiters:3;
/*
* seq works much like in seqlocks: it's incremented every time
@@ -65,7 +65,7 @@ struct six_lock {
#endif
};
-static inline void __six_lock_init(struct six_lock *lock, const char *name,
+static __always_inline void __six_lock_init(struct six_lock *lock, const char *name,
struct lock_class_key *key)
{
atomic64_set(&lock->state.counter, 0);
@@ -86,12 +86,13 @@ do { \
__six_lock_init((lock), #lock, &__key); \
} while (0)
-bool __six_trylock_convert(struct six_lock *, unsigned long, unsigned long,
- unsigned long, unsigned);
-bool __six_trylock(struct six_lock *, unsigned long, unsigned long);
-bool __six_relock(struct six_lock *, unsigned long, unsigned long, unsigned);
-void __six_lock(struct six_lock *, unsigned long, unsigned long, unsigned);
-void __six_unlock(struct six_lock *, unsigned long, unsigned);
+bool __six_trylock_type(struct six_lock *, enum six_lock_type);
+bool __six_relock_type(struct six_lock *, enum six_lock_type, unsigned);
+void __six_lock_type(struct six_lock *, enum six_lock_type);
+void __six_unlock_type(struct six_lock *, enum six_lock_type);
+bool six_trylock_convert(struct six_lock *, enum six_lock_type,
+ enum six_lock_type);
+void __six_lock_increment(struct six_lock *, enum six_lock_type);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -107,80 +108,71 @@ void __six_unlock(struct six_lock *, unsigned long, unsigned);
#define __SIX_VAL(field, _v) (((union six_lock_state) { .field = _v }).v)
-#define __SIX_VAL_WAIT __SIX_VAL(waiters, 1)
+static __always_inline bool six_trylock_type(struct six_lock *lock,
+ enum six_lock_type type)
+{
+ if (!__six_trylock_type(lock, type))
+ return false;
-#define __SIX_LOCK_HELD_read __SIX_VAL(read_lock, ~0)
-#define __SIX_LOCK_HELD_intent __SIX_VAL(intent_lock, 1)
-#define __SIX_LOCK_HELD_write __SIX_VAL(seq, 1)
+ six_acquire(&lock->dep_map);
+ return true;
+}
-#define __SIX_LOCK_FAIL_read __SIX_LOCK_HELD_write
-#define __SIX_LOCK_VAL_read __SIX_VAL(read_lock, 1)
-#define __SIX_UNLOCK_VAL_read (-__SIX_VAL(read_lock, 1))
-#define __SIX_UNLOCK_WAKEUP_read SIX_LOCK_write
+static __always_inline bool six_relock_type(struct six_lock *lock,
+ enum six_lock_type type,
+ u32 seq)
+{
+ if (!__six_relock_type(lock, type, seq))
+ return false;
-#define __SIX_LOCK_FAIL_intent __SIX_LOCK_HELD_intent
-#define __SIX_LOCK_VAL_intent __SIX_VAL(intent_lock, 1)
-#define __SIX_UNLOCK_VAL_intent (-__SIX_VAL(intent_lock, 1))
-#define __SIX_UNLOCK_WAKEUP_intent SIX_LOCK_intent
+ six_acquire(&lock->dep_map);
+ return true;
+}
+
+static __always_inline void six_lock_type(struct six_lock *lock,
+ enum six_lock_type type)
+{
+ __six_lock_type(lock, type);
+ six_acquire(&lock->dep_map);
+}
-#define __SIX_LOCK_FAIL_write __SIX_LOCK_HELD_read
-#define __SIX_LOCK_VAL_write __SIX_VAL(seq, 1)
-#define __SIX_UNLOCK_VAL_write __SIX_VAL(seq, 1)
-#define __SIX_UNLOCK_WAKEUP_write SIX_LOCK_read
+static __always_inline void six_unlock_type(struct six_lock *lock,
+ enum six_lock_type type)
+{
+ six_release(&lock->dep_map);
+ __six_unlock_type(lock, type);
+}
-#define SIX_LOCK_SPIN_COUNT 1
+static __always_inline void six_lock_increment(struct six_lock *lock,
+ enum six_lock_type type)
+{
+ __six_lock_increment(lock, type);
+ six_acquire(&lock->dep_map);
+}
#define __SIX_LOCK(type) \
- static inline bool six_trylock_##type(struct six_lock *lock) \
- { \
- if (__six_trylock(lock, \
- __SIX_LOCK_VAL_##type, \
- __SIX_LOCK_FAIL_##type)) { \
- six_acquire(&lock->dep_map); \
- return true; \
- } \
- return false; \
- } \
+static __always_inline bool six_trylock_##type(struct six_lock *lock) \
+{ \
+ return six_trylock_type(lock, SIX_LOCK_##type); \
+} \
\
- static inline bool six_relock_##type(struct six_lock *lock, u32 seq)\
- { \
- if (__six_relock(lock, \
- __SIX_LOCK_VAL_##type, \
- __SIX_LOCK_FAIL_##type, \
- seq)) { \
- six_acquire(&lock->dep_map); \
- return true; \
- } \
- return false; \
- } \
+static __always_inline bool six_relock_##type(struct six_lock *lock, u32 seq)\
+{ \
+ return six_relock_type(lock, SIX_LOCK_##type, seq); \
+} \
\
- static inline void six_lock_##type(struct six_lock *lock) \
- { \
- __six_lock(lock, \
- __SIX_LOCK_VAL_##type, \
- __SIX_LOCK_FAIL_##type, \
- SIX_LOCK_##type); \
- six_acquire(&lock->dep_map); \
- } \
+static __always_inline void six_lock_##type(struct six_lock *lock) \
+{ \
+ six_lock_type(lock, SIX_LOCK_##type); \
+} \
\
- static inline void six_unlock_##type(struct six_lock *lock) \
- { \
- six_release(&lock->dep_map); \
- \
- __six_unlock(lock, \
- __SIX_UNLOCK_VAL_##type, \
- __SIX_UNLOCK_WAKEUP_##type); \
- }
+static __always_inline void six_unlock_##type(struct six_lock *lock) \
+{ \
+ six_unlock_type(lock, SIX_LOCK_##type); \
+}
__SIX_LOCK(read)
__SIX_LOCK(intent)
__SIX_LOCK(write)
-#define six_trylock_convert(lock, from, to) \
- __six_trylock_convert(lock, \
- __SIX_UNLOCK_VAL_##from, \
- __SIX_LOCK_VAL_##to, \
- __SIX_LOCK_FAIL_##to, \
- __SIX_UNLOCK_WAKEUP_##from)
-
#endif /* _BCACHE_SIX_H */