summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-07-15 04:06:31 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2016-10-07 12:36:35 -0800
commitd55dc0693fc2cda91620edcc250bd2cdc9711004 (patch)
treeb194f785818a8cf10ed269f0f98475c288ba36c1
parentb41b4ad27582a1694a5e3cc60547b284b28a6758 (diff)
bcache: b->written in sectors, not blocks
random cleanup
-rw-r--r--drivers/md/bcache/btree_cache.c9
-rw-r--r--drivers/md/bcache/btree_cache.h6
-rw-r--r--drivers/md/bcache/btree_io.c63
-rw-r--r--drivers/md/bcache/btree_update.c6
-rw-r--r--drivers/md/bcache/btree_update.h48
5 files changed, 45 insertions, 87 deletions
diff --git a/drivers/md/bcache/btree_cache.c b/drivers/md/bcache/btree_cache.c
index c9584f8d9e2c..4132db6250c4 100644
--- a/drivers/md/bcache/btree_cache.c
+++ b/drivers/md/bcache/btree_cache.c
@@ -154,7 +154,6 @@ static inline struct btree *mca_find(struct cache_set *c,
static int mca_reap_notrace(struct cache_set *c, struct btree *b, bool flush)
{
struct closure cl;
- struct bset *i;
closure_init_stack(&cl);
lockdep_assert_held(&c->btree_cache_lock);
@@ -171,14 +170,6 @@ static int mca_reap_notrace(struct cache_set *c, struct btree *b, bool flush)
if (!list_empty(&b->write_blocked))
goto out_unlock;
- i = btree_bset_last(b);
- BUG_ON(!i && btree_node_dirty(b));
- BUG_ON(i && i->u64s &&
- b->io_mutex.count == 1 &&
- !btree_node_dirty(b) &&
- (((void *) i - (void *) b->data) >>
- (c->block_bits + 9) >= b->written));
-
/* XXX: we need a better solution for this, this will cause deadlocks */
if (!list_empty_careful(&b->journal_seq_blacklisted))
goto out_unlock;
diff --git a/drivers/md/bcache/btree_cache.h b/drivers/md/bcache/btree_cache.h
index e185df336778..8ebe6d315196 100644
--- a/drivers/md/bcache/btree_cache.h
+++ b/drivers/md/bcache/btree_cache.h
@@ -34,17 +34,17 @@ int bch_btree_cache_alloc(struct cache_set *);
static inline size_t btree_bytes(struct cache_set *c)
{
- return CACHE_BTREE_NODE_SIZE(&c->disk_sb) << 9;
+ return c->sb.btree_node_size << 9;
}
static inline size_t btree_pages(struct cache_set *c)
{
- return CACHE_BTREE_NODE_SIZE(&c->disk_sb) >> (PAGE_SHIFT - 9);
+ return c->sb.btree_node_size >> (PAGE_SHIFT - 9);
}
static inline unsigned btree_blocks(struct cache_set *c)
{
- return CACHE_BTREE_NODE_SIZE(&c->disk_sb) >> c->block_bits;
+ return c->sb.btree_node_size >> c->block_bits;
}
#define btree_node_root(_b) ((_b)->c->btree_roots[(_b)->btree_id].b)
diff --git a/drivers/md/bcache/btree_io.c b/drivers/md/bcache/btree_io.c
index 5cd85733ab80..d9f2a09e9725 100644
--- a/drivers/md/bcache/btree_io.c
+++ b/drivers/md/bcache/btree_io.c
@@ -152,9 +152,9 @@ void bch_btree_init_next(struct cache_set *c, struct btree *b,
if (did_sort && !b->keys.nsets)
bch_btree_verify(c, b);
- if (b->written < btree_blocks(c)) {
+ if (b->written < c->sb.btree_node_size) {
__btree_node_lock_write(b, iter);
- bch_bset_init_next(&b->keys, &write_block(c, b)->keys);
+ bch_bset_init_next(&b->keys, &write_block(b)->keys);
__btree_node_unlock_write(b, iter);
}
@@ -190,7 +190,7 @@ void bch_btree_init_next(struct cache_set *c, struct btree *b,
static const char *validate_bset(struct cache_set *c, struct btree *b,
struct cache *ca,
const struct bch_extent_ptr *ptr,
- struct bset *i, unsigned blocks)
+ struct bset *i, unsigned sectors)
{
struct bkey_format *f = &b->keys.format;
struct bkey_packed *k;
@@ -198,7 +198,7 @@ static const char *validate_bset(struct cache_set *c, struct btree *b,
if (le16_to_cpu(i->version) != BCACHE_BSET_VERSION)
return "unsupported bset version";
- if (b->written + blocks > btree_blocks(c))
+ if (b->written + sectors > c->sb.btree_node_size)
return "bset past end of btree node";
if (i != &b->data->keys && !i->u64s)
@@ -252,7 +252,7 @@ static const char *validate_bset(struct cache_set *c, struct btree *b,
SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
- b->written += blocks;
+ b->written += sectors;
return NULL;
}
@@ -273,8 +273,8 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b,
if (bch_meta_read_fault("btree"))
goto err;
- while (b->written < btree_blocks(c)) {
- unsigned blocks;
+ while (b->written < c->sb.btree_node_size) {
+ unsigned sectors;
if (!b->written) {
i = &b->data->keys;
@@ -290,9 +290,9 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b,
btree_csum_set(b, b->data))
goto err;
- blocks = __set_blocks(b->data,
- le16_to_cpu(b->data->keys.u64s),
- block_bytes(c));
+ sectors = __set_blocks(b->data,
+ le16_to_cpu(b->data->keys.u64s),
+ block_bytes(c)) << c->block_bits;
err = "bad magic";
if (le64_to_cpu(b->data->magic) != bset_magic(&c->disk_sb))
@@ -322,7 +322,7 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b,
b->keys.format = b->data->format;
b->keys.set->data = &b->data->keys;
} else {
- bne = write_block(c, b);
+ bne = write_block(b);
i = &bne->keys;
if (i->seq != b->data->keys.seq)
@@ -337,12 +337,12 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b,
btree_csum_set(b, bne))
goto err;
- blocks = __set_blocks(bne,
- le16_to_cpu(bne->keys.u64s),
- block_bytes(c));
+ sectors = __set_blocks(bne,
+ le16_to_cpu(bne->keys.u64s),
+ block_bytes(c)) << c->block_bits;
}
- err = validate_bset(c, b, ca, ptr, i, blocks);
+ err = validate_bset(c, b, ca, ptr, i, sectors);
if (err)
goto err;
@@ -359,7 +359,7 @@ void bch_btree_node_read_done(struct cache_set *c, struct btree *b,
}
err = "corrupted btree";
- for (bne = write_block(c, b);
+ for (bne = write_block(b);
bset_byte_offset(b, bne) < btree_bytes(c);
bne = (void *) bne + block_bytes(c))
if (bne->keys.seq == b->data->keys.seq)
@@ -537,12 +537,12 @@ static void do_btree_node_write(struct closure *cl)
struct bkey_s_extent e;
struct bch_extent_ptr *ptr;
struct cache *ca;
- size_t blocks_to_write;
+ size_t sectors_to_write;
void *data;
trace_bcache_btree_write(b);
- BUG_ON(b->written >= btree_blocks(c));
+ BUG_ON(b->written >= c->sb.btree_node_size);
BUG_ON(b->written && !i->u64s);
BUG_ON(btree_bset_first(b)->seq != i->seq);
BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
@@ -561,21 +561,21 @@ static void do_btree_node_write(struct closure *cl)
b->data->format = b->keys.format;
data = b->data;
b->data->csum = cpu_to_le64(btree_csum_set(b, b->data));
- blocks_to_write = __set_blocks(b->data,
- le16_to_cpu(b->data->keys.u64s),
- block_bytes(c));
+ sectors_to_write = __set_blocks(b->data,
+ le16_to_cpu(b->data->keys.u64s),
+ block_bytes(c)) << c->block_bits;
} else {
- struct btree_node_entry *bne = write_block(c, b);
+ struct btree_node_entry *bne = write_block(b);
data = bne;
bne->csum = cpu_to_le64(btree_csum_set(b, bne));
- blocks_to_write = __set_blocks(bne,
- le16_to_cpu(bne->keys.u64s),
- block_bytes(c));
+ sectors_to_write = __set_blocks(bne,
+ le16_to_cpu(bne->keys.u64s),
+ block_bytes(c)) << c->block_bits;
}
- BUG_ON(b->written + blocks_to_write > btree_blocks(c));
+ BUG_ON(b->written + sectors_to_write > c->sb.btree_node_size);
/*
* We handle btree write errors by immediately halting the journal -
@@ -594,7 +594,7 @@ static void do_btree_node_write(struct closure *cl)
struct btree_write *w = btree_prev_write(b);
set_btree_node_write_error(b);
- b->written += blocks_to_write;
+ b->written += sectors_to_write;
bch_btree_complete_write(c, b, w);
closure_return_with_destructor(cl, btree_node_write_unlock);
@@ -608,7 +608,7 @@ static void do_btree_node_write(struct closure *cl)
bio->bi_end_io = btree_node_write_endio;
bio->bi_private = cl;
- bio->bi_iter.bi_size = blocks_to_write << (c->block_bits + 9);
+ bio->bi_iter.bi_size = sectors_to_write << 9;
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA);
bch_bio_map(bio, data);
@@ -631,15 +631,14 @@ static void do_btree_node_write(struct closure *cl)
e = bkey_i_to_s_extent(&k.key);
extent_for_each_ptr(e, ptr)
- ptr->offset += b->written << c->block_bits;
+ ptr->offset += b->written;
rcu_read_lock();
extent_for_each_online_device(c, e, ptr, ca)
- atomic64_add(blocks_to_write << c->block_bits,
- &ca->btree_sectors_written);
+ atomic64_add(sectors_to_write, &ca->btree_sectors_written);
rcu_read_unlock();
- b->written += blocks_to_write;
+ b->written += sectors_to_write;
if (!bio_alloc_pages(bio, __GFP_NOWARN|GFP_NOWAIT)) {
int j;
diff --git a/drivers/md/bcache/btree_update.c b/drivers/md/bcache/btree_update.c
index a54cc5aa4ed1..05e23e91949e 100644
--- a/drivers/md/bcache/btree_update.c
+++ b/drivers/md/bcache/btree_update.c
@@ -781,15 +781,15 @@ static void btree_node_lock_for_insert(struct btree *b, struct btree_iter *iter)
relock:
btree_node_lock_write(b, iter);
- BUG_ON(&write_block(c, b)->keys < btree_bset_last(b));
+ BUG_ON(&write_block(b)->keys < btree_bset_last(b));
/*
* If the last bset has been written, initialize a new one - check after
* taking the write lock because it can be written with only a read
* lock:
*/
- if (b->written != btree_blocks(c) &&
- &write_block(c, b)->keys > btree_bset_last(b)) {
+ if (b->written != c->sb.btree_node_size &&
+ &write_block(b)->keys > btree_bset_last(b)) {
btree_node_unlock_write(b, iter);
bch_btree_init_next(c, b, iter);
goto relock;
diff --git a/drivers/md/bcache/btree_update.h b/drivers/md/bcache/btree_update.h
index 4418bcd5af97..f7ce263f9908 100644
--- a/drivers/md/bcache/btree_update.h
+++ b/drivers/md/bcache/btree_update.h
@@ -141,59 +141,27 @@ void bch_btree_bset_insert(struct btree_iter *, struct btree *,
void bch_btree_insert_and_journal(struct btree_iter *, struct bkey_i *,
struct journal_res *);
-static inline struct btree_node_entry *write_block(struct cache_set *c,
- struct btree *b)
+static inline struct btree_node_entry *write_block(struct btree *b)
{
EBUG_ON(!b->written);
- return (void *) b->data + (b->written << (c->block_bits + 9));
+ return (void *) b->data + (b->written << 9);
}
static inline size_t bch_btree_keys_u64s_remaining(struct cache_set *c,
struct btree *b)
{
struct bset *i = btree_bset_last(b);
+ size_t bytes_used = bset_byte_offset(b, i) +
+ __set_bytes(i, le16_to_cpu(i->u64s));
- BUG_ON((PAGE_SIZE << b->keys.page_order) <
- (bset_byte_offset(b, i) + __set_bytes(i, le16_to_cpu(i->u64s))));
-
- if (b->written == btree_blocks(c))
+ if (b->written == c->sb.btree_node_size)
return 0;
-#if 1
- EBUG_ON(i != (b->written
- ? &write_block(c, b)->keys
- : &b->data->keys));
-
- return ((PAGE_SIZE << b->keys.page_order) -
- (bset_byte_offset(b, i) + __set_bytes(i, le16_to_cpu(i->u64s)))) /
- sizeof(u64);
-#else
- /*
- * first bset is embedded in a struct btree_node, not a
- * btree_node_entry, so write_block() when b->written == 0 doesn't
- * work... ugh
- */
-
- if (!b->written ||
- &write_block(c, b)->keys == i)
- return ((PAGE_SIZE << b->keys.page_order) -
- (bset_byte_offset(b, i) + set_bytes(i))) /
- sizeof(u64);
-
- /* haven't initialized the next bset: */
-
- BUG_ON(&write_block(c, b)->keys < i);
-
- BUG_ON(!b->written);
-
- return ((((btree_blocks(c) - b->written) <<
- (c->block_bits + 9)) -
- sizeof(struct btree_node_entry)) /
- sizeof(u64));
+ EBUG_ON(bytes_used > btree_bytes(c));
+ EBUG_ON(i != (b->written ? &write_block(b)->keys : &b->data->keys));
- return b->written < btree_blocks(c);
-#endif
+ return (btree_bytes(c) - bytes_used) / sizeof(u64);
}
/*