summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-06-30 18:34:01 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2016-10-07 12:36:37 -0800
commitf4e8f82990d6487221b44325226d53ce43dfe178 (patch)
tree9330aae7a9d13a0ff20b1b1b44f2b3fec105b8cd
parentd172e8a092d2d58bcfc2c69efc9009ae9c7dbac2 (diff)
bcache: make locks_want unsigned
-rw-r--r--drivers/md/bcache/btree_cache.c6
-rw-r--r--drivers/md/bcache/btree_cache.h2
-rw-r--r--drivers/md/bcache/btree_gc.c2
-rw-r--r--drivers/md/bcache/btree_iter.c10
-rw-r--r--drivers/md/bcache/btree_iter.h16
-rw-r--r--drivers/md/bcache/btree_locking.h6
-rw-r--r--drivers/md/bcache/btree_update.c12
-rw-r--r--drivers/md/bcache/migrate.c2
8 files changed, 28 insertions, 28 deletions
diff --git a/drivers/md/bcache/btree_cache.c b/drivers/md/bcache/btree_cache.c
index 4132db6250c4..fe50ddd5d799 100644
--- a/drivers/md/bcache/btree_cache.c
+++ b/drivers/md/bcache/btree_cache.c
@@ -562,7 +562,7 @@ err:
/* Slowpath, don't want it inlined into btree_iter_traverse() */
static noinline struct btree *bch_btree_node_fill(struct btree_iter *iter,
const struct bkey_i *k,
- int level,
+ unsigned level,
struct closure *cl)
{
struct cache_set *c = iter->c;
@@ -625,13 +625,13 @@ static noinline struct btree *bch_btree_node_fill(struct btree_iter *iter,
* the @write parameter.
*/
struct btree *bch_btree_node_get(struct btree_iter *iter,
- const struct bkey_i *k, int level,
+ const struct bkey_i *k, unsigned level,
struct closure *cl)
{
int i = 0;
struct btree *b;
- BUG_ON(level < 0);
+ BUG_ON(level >= BTREE_MAX_DEPTH);
retry:
rcu_read_lock();
b = mca_find(iter->c, k);
diff --git a/drivers/md/bcache/btree_cache.h b/drivers/md/bcache/btree_cache.h
index 8ebe6d315196..e3950bf4cfb3 100644
--- a/drivers/md/bcache/btree_cache.h
+++ b/drivers/md/bcache/btree_cache.h
@@ -20,7 +20,7 @@ int mca_cannibalize_lock(struct cache_set *, struct closure *);
struct btree *mca_alloc(struct cache_set *, struct closure *);
struct btree *bch_btree_node_get(struct btree_iter *,
- const struct bkey_i *, int,
+ const struct bkey_i *, unsigned,
struct closure *);
void bch_btree_cache_free(struct cache_set *);
diff --git a/drivers/md/bcache/btree_gc.c b/drivers/md/bcache/btree_gc.c
index 9e8222d7f643..39355378bac4 100644
--- a/drivers/md/bcache/btree_gc.c
+++ b/drivers/md/bcache/btree_gc.c
@@ -677,7 +677,7 @@ static int bch_coalesce_btree(struct cache_set *c, enum btree_id btree_id)
*/
memset(merge, 0, sizeof(merge));
- __for_each_btree_node(&iter, c, btree_id, POS_MIN, b, BTREE_MAX_DEPTH) {
+ __for_each_btree_node(&iter, c, btree_id, POS_MIN, b, U8_MAX) {
memmove(merge + 1, merge,
sizeof(merge) - sizeof(merge[0]));
memmove(lock_seq + 1, lock_seq,
diff --git a/drivers/md/bcache/btree_iter.c b/drivers/md/bcache/btree_iter.c
index 989c6be55109..5a2e2b974339 100644
--- a/drivers/md/bcache/btree_iter.c
+++ b/drivers/md/bcache/btree_iter.c
@@ -115,9 +115,9 @@ bool bch_btree_iter_upgrade(struct btree_iter *iter)
{
int i;
- EBUG_ON(iter->locks_want > BTREE_MAX_DEPTH);
-
- for (i = iter->locks_want; i >= iter->level; --i)
+ for (i = iter->level;
+ i < min_t(int, iter->locks_want, BTREE_MAX_DEPTH);
+ i++)
if (iter->nodes[i] && !btree_lock_upgrade(iter, i)) {
do {
btree_node_unlock(iter, i);
@@ -440,7 +440,7 @@ static void btree_iter_verify_locking(struct btree_iter *iter)
*/
for_each_linked_btree_iter(iter, linked)
BUG_ON(btree_iter_cmp(linked, iter) <= 0 &&
- linked->locks_want >= 0 &&
+ linked->locks_want > 0 &&
linked->locks_want < iter->locks_want);
#endif
}
@@ -483,7 +483,7 @@ retry:
__btree_iter_advance(iter);
}
- if (iter->locks_want >= 0)
+ if (iter->locks_want > 0)
btree_iter_verify_locking(iter);
/*
diff --git a/drivers/md/bcache/btree_iter.h b/drivers/md/bcache/btree_iter.h
index afd8965fae57..032a720ecf60 100644
--- a/drivers/md/bcache/btree_iter.h
+++ b/drivers/md/bcache/btree_iter.h
@@ -18,7 +18,7 @@ struct btree_iter {
u8 nodes_intent_locked;
/* Btree level below which we start taking intent locks */
- s8 locks_want;
+ u8 locks_want;
enum btree_id btree_id:8;
@@ -140,7 +140,7 @@ static inline void bch_btree_iter_init(struct btree_iter *iter,
enum btree_id btree_id,
struct bpos pos)
{
- __bch_btree_iter_init(iter, c, btree_id, pos, -1);
+ __bch_btree_iter_init(iter, c, btree_id, pos, 0);
}
static inline void bch_btree_iter_init_intent(struct btree_iter *iter,
@@ -148,7 +148,7 @@ static inline void bch_btree_iter_init_intent(struct btree_iter *iter,
enum btree_id btree_id,
struct bpos pos)
{
- __bch_btree_iter_init(iter, c, btree_id, pos, 0);
+ __bch_btree_iter_init(iter, c, btree_id, pos, 1);
}
int bch_btree_iter_unlink(struct btree_iter *);
@@ -186,7 +186,7 @@ static inline int btree_iter_cmp(const struct btree_iter *l,
(_b) = bch_btree_iter_next_node(_iter))
#define for_each_btree_node(_iter, _c, _btree_id, _start, _b) \
- __for_each_btree_node(_iter, _c, _btree_id, _start, _b, -1)
+ __for_each_btree_node(_iter, _c, _btree_id, _start, _b, 0)
#define __for_each_btree_key(_iter, _c, _btree_id, _start, \
_k, _locks_want) \
@@ -196,10 +196,10 @@ static inline int btree_iter_cmp(const struct btree_iter *l,
bch_btree_iter_advance_pos(_iter))
#define for_each_btree_key(_iter, _c, _btree_id, _start, _k) \
- __for_each_btree_key(_iter, _c, _btree_id, _start, _k, -1)
+ __for_each_btree_key(_iter, _c, _btree_id, _start, _k, 0)
#define for_each_btree_key_intent(_iter, _c, _btree_id, _start, _k) \
- __for_each_btree_key(_iter, _c, _btree_id, _start, _k, 0)
+ __for_each_btree_key(_iter, _c, _btree_id, _start, _k, 1)
#define __for_each_btree_key_with_holes(_iter, _c, _btree_id, \
_start, _k, _locks_want) \
@@ -209,11 +209,11 @@ static inline int btree_iter_cmp(const struct btree_iter *l,
bch_btree_iter_advance_pos(_iter))
#define for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k) \
- __for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k, -1)
+ __for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k, 0)
#define for_each_btree_key_with_holes_intent(_iter, _c, _btree_id, \
_start, _k) \
- __for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k, 0)
+ __for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k, 1)
/*
* Unlocks before scheduling
diff --git a/drivers/md/bcache/btree_locking.h b/drivers/md/bcache/btree_locking.h
index 7db4acbae67a..fb6ce606eea4 100644
--- a/drivers/md/bcache/btree_locking.h
+++ b/drivers/md/bcache/btree_locking.h
@@ -77,9 +77,9 @@ static inline void mark_btree_node_intent_locked(struct btree_iter *iter,
static inline enum six_lock_type
btree_lock_want(struct btree_iter *iter, int level)
{
- return level > iter->locks_want
- ? SIX_LOCK_read
- : SIX_LOCK_intent;
+ return level < iter->locks_want
+ ? SIX_LOCK_intent
+ : SIX_LOCK_read;
}
static inline bool btree_want_intent(struct btree_iter *iter, int level)
diff --git a/drivers/md/bcache/btree_update.c b/drivers/md/bcache/btree_update.c
index 05e23e91949e..025a29d96138 100644
--- a/drivers/md/bcache/btree_update.c
+++ b/drivers/md/bcache/btree_update.c
@@ -1476,7 +1476,7 @@ static int bch_btree_split_leaf(struct btree_iter *iter, unsigned flags,
* XXX: figure out how far we might need to split,
* instead of locking/reserving all the way to the root:
*/
- iter->locks_want = BTREE_MAX_DEPTH;
+ iter->locks_want = U8_MAX;
if (!bch_btree_iter_upgrade(iter)) {
ret = -EINTR;
@@ -1495,7 +1495,7 @@ static int bch_btree_split_leaf(struct btree_iter *iter, unsigned flags,
btree_split(b, iter, NULL, reserve, as);
bch_btree_reserve_put(c, reserve);
- iter->locks_want = 0;
+ iter->locks_want = 1;
out:
up_read(&c->gc_lock);
return ret;
@@ -1503,7 +1503,7 @@ out_get_locks:
/* Lock ordering... */
for_each_linked_btree_iter(iter, linked)
if (btree_iter_cmp(linked, iter) <= 0)
- linked->locks_want = BTREE_MAX_DEPTH;
+ linked->locks_want = U8_MAX;
goto out;
}
@@ -1606,7 +1606,7 @@ int bch_btree_insert_trans(struct btree_insert_trans *trans,
return -EROFS;
trans_for_each_entry(trans, i) {
- i->iter->locks_want = max_t(int, i->iter->locks_want, 0);
+ i->iter->locks_want = max_t(int, i->iter->locks_want, 1);
if (unlikely(!bch_btree_iter_upgrade(i->iter))) {
ret = -EINTR;
goto err;
@@ -1681,7 +1681,7 @@ retry:
bch_btree_node_write_lazy(i->iter->nodes[0], i->iter);
trans_for_each_entry(trans, i)
- i->iter->locks_want = 0;
+ i->iter->locks_want = 1;
out:
percpu_ref_put(&c->writes);
@@ -2002,7 +2002,7 @@ int bch_btree_node_rewrite(struct btree_iter *iter, struct btree *b,
struct btree_reserve *reserve;
struct async_split *as;
- iter->locks_want = BTREE_MAX_DEPTH;
+ iter->locks_want = U8_MAX;
if (!bch_btree_iter_upgrade(iter))
return -EINTR;
diff --git a/drivers/md/bcache/migrate.c b/drivers/md/bcache/migrate.c
index 43d86a68fb10..91dc277cca3d 100644
--- a/drivers/md/bcache/migrate.c
+++ b/drivers/md/bcache/migrate.c
@@ -274,7 +274,7 @@ retry:
}
moved++;
- iter.locks_want = -1;
+ iter.locks_want = 0;
}
ret = bch_btree_iter_unlock(&iter);
if (ret)