diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2015-05-04 19:45:24 -0700 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2016-10-07 12:34:07 -0800 |
commit | e461fa327712186992439a5ca1e30baef1ad2655 (patch) | |
tree | 05e85a6e2273b4f0b3b378951c45f63ef784fe6e | |
parent | 35e590ea1d212f92f42611a7f4190ab4e1e298ae (diff) |
bcache: Kill bio_meta mempool, c->btree_pages
-rw-r--r-- | drivers/md/bcache/bcache.h | 7 | ||||
-rw-r--r-- | drivers/md/bcache/btree.c | 52 | ||||
-rw-r--r-- | drivers/md/bcache/btree.h | 8 | ||||
-rw-r--r-- | drivers/md/bcache/debug.c | 6 | ||||
-rw-r--r-- | drivers/md/bcache/io.c | 18 | ||||
-rw-r--r-- | drivers/md/bcache/io.h | 2 | ||||
-rw-r--r-- | drivers/md/bcache/super.c | 11 |
7 files changed, 38 insertions, 66 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 80f245ab9a32..a334dfa428a9 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -421,7 +421,6 @@ struct cache_set { struct closure sb_write; struct semaphore sb_write_mutex; - mempool_t bio_meta; struct bio_set bio_split; struct bio_list bio_submit_list; @@ -429,11 +428,7 @@ struct cache_set { spinlock_t bio_submit_lock; /* BTREE CACHE */ - /* - * Default number of pages for a new btree node - may be less than a - * full bucket - */ - unsigned btree_pages; + struct bio_set btree_bio; spinlock_t btree_root_lock; struct btree *btree_roots[BTREE_ID_NR]; diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 918b73e71675..eee5c616de81 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -548,7 +548,7 @@ static void bch_btree_node_read(struct cache_set *c, struct btree *b) { uint64_t start_time = local_clock(); struct closure cl; - struct bbio *bio; + struct bio *bio; struct cache *ca; const struct bch_extent_ptr *ptr; @@ -564,24 +564,24 @@ static void bch_btree_node_read(struct cache_set *c, struct btree *b) percpu_ref_get(&ca->ref); - bio = to_bbio(bch_bbio_alloc(c)); - bio->bio.bi_iter.bi_size = btree_bytes(c); - bio->bio.bi_end_io = btree_node_read_endio; - bio->bio.bi_private = &cl; - bio_set_op_attrs(&bio->bio, REQ_OP_READ, REQ_META|READ_SYNC); + bio = bio_alloc_bioset(GFP_NOIO, btree_pages(c), &c->btree_bio); + bio->bi_iter.bi_size = btree_bytes(c); + bio->bi_end_io = btree_node_read_endio; + bio->bi_private = &cl; + bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); - bch_bio_map(&bio->bio, b->data); + bch_bio_map(bio, b->data); - bio_get(&bio->bio); - bch_submit_bbio(bio, ca, &b->key, ptr, true); + bio_get(bio); + bch_submit_bbio(to_bbio(bio), ca, &b->key, ptr, true); closure_sync(&cl); - if (bio->bio.bi_error || + if (bio->bi_error || bch_meta_read_fault("btree")) set_btree_node_io_error(b); - bch_bbio_free(&bio->bio, c); + bio_put(bio); if (btree_node_io_error(b)) goto err; @@ -624,7 +624,7 @@ static void __btree_node_write_done(struct closure *cl) struct btree_write *w = btree_prev_write(b); struct cache_set *c = b->c; - bch_bbio_free(b->bio, c); + bio_put(b->bio); b->bio = NULL; btree_complete_write(c, b, w); @@ -707,10 +707,12 @@ static void do_btree_node_write(struct closure *cl) BUG_ON(b->written + blocks_to_write > btree_blocks(c)); BUG_ON(b->bio); - b->bio = bch_bbio_alloc(c); + b->bio = bio_alloc_bioset(GFP_NOIO, btree_pages(c), &c->btree_bio); - /* Take an extra reference so that the bio_put() in - * btree_node_write_endio() doesn't call bio_free() */ + /* + * Take an extra reference so that the bio_put() in + * btree_node_write_endio() doesn't call bio_free() + */ bio_get(b->bio); b->bio->bi_end_io = btree_node_write_endio; @@ -958,7 +960,7 @@ static void mca_bucket_free(struct cache_set *c, struct btree *b) static void mca_data_alloc(struct cache_set *c, struct btree *b, gfp_t gfp) { - unsigned order = ilog2(c->btree_pages); + unsigned order = ilog2(btree_pages(c)); b->data = (void *) __get_free_pages(gfp, order); if (!b->data) @@ -1091,7 +1093,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, * succeed, so that inserting keys into the btree can always succeed and * IO can always make forward progress: */ - nr /= c->btree_pages; + nr /= btree_pages(c); can_free = mca_can_free(c); nr = min_t(unsigned long, nr, can_free); @@ -1137,12 +1139,12 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, bch_time_stats_update(&c->mca_scan_time, start_time); trace_bcache_mca_scan(c, - touched * c->btree_pages, - freed * c->btree_pages, - can_free * c->btree_pages, + touched * btree_pages(c), + freed * btree_pages(c), + can_free * btree_pages(c), sc->nr_to_scan); - return (unsigned long) freed * c->btree_pages; + return (unsigned long) freed * btree_pages(c); } static unsigned long bch_mca_count(struct shrinker *shrink, @@ -1157,7 +1159,7 @@ static unsigned long bch_mca_count(struct shrinker *shrink, if (c->btree_cache_alloc_lock) return 0; - return mca_can_free(c) * c->btree_pages; + return mca_can_free(c) * btree_pages(c); } void bch_btree_cache_free(struct cache_set *c) @@ -1177,7 +1179,7 @@ void bch_btree_cache_free(struct cache_set *c) if (c->verify_data) list_move(&c->verify_data->list, &c->btree_cache); - free_pages((unsigned long) c->verify_ondisk, ilog2(c->btree_pages)); + free_pages((unsigned long) c->verify_ondisk, ilog2(btree_pages(c))); #endif for (i = 0; i < BTREE_ID_NR; i++) @@ -1231,7 +1233,7 @@ int bch_btree_cache_alloc(struct cache_set *c) mutex_init(&c->verify_lock); c->verify_ondisk = (void *) - __get_free_pages(GFP_KERNEL, ilog2(c->btree_pages)); + __get_free_pages(GFP_KERNEL, ilog2(btree_pages(c))); c->verify_data = mca_bucket_alloc(c, GFP_KERNEL); if (c->verify_data) @@ -1241,7 +1243,7 @@ int bch_btree_cache_alloc(struct cache_set *c) c->btree_cache_shrink.count_objects = bch_mca_count; c->btree_cache_shrink.scan_objects = bch_mca_scan; c->btree_cache_shrink.seeks = 4; - c->btree_cache_shrink.batch = c->btree_pages * 2; + c->btree_cache_shrink.batch = btree_pages(c) * 2; register_shrinker(&c->btree_cache_shrink); return 0; diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 829a080f8f47..88423bc7738b 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -183,17 +183,17 @@ static inline unsigned bset_byte_offset(struct btree *b, void *i) static inline size_t btree_bytes(struct cache_set *c) { - return c->btree_pages * PAGE_SIZE; + return CACHE_BTREE_NODE_SIZE(&c->sb) << 9; } -static inline unsigned btree_sectors(struct cache_set *c) +static inline size_t btree_pages(struct cache_set *c) { - return c->btree_pages << (PAGE_SHIFT - 9); + return CACHE_BTREE_NODE_SIZE(&c->sb) >> (PAGE_SHIFT - 9); } static inline unsigned btree_blocks(struct cache_set *c) { - return btree_sectors(c) >> c->block_bits; + return CACHE_BTREE_NODE_SIZE(&c->sb) >> c->block_bits; } static inline size_t bch_btree_keys_u64s_remaining(struct btree *b) diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index f53aa1d78c42..b0d22579ea0b 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -64,7 +64,7 @@ void bch_btree_verify(struct cache_set *c, struct btree *b) ca = bch_btree_pick_ptr(c, b, &ptr); - bio = bch_bbio_alloc(c); + bio = bio_alloc_bioset(GFP_NOIO, btree_pages(c), &c->btree_bio); bio->bi_bdev = ca->disk_sb.bdev; bio->bi_iter.bi_size = btree_bytes(c); bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); @@ -72,12 +72,10 @@ void bch_btree_verify(struct cache_set *c, struct btree *b) bio->bi_end_io = btree_verify_endio; bch_bio_map(bio, n_sorted); - bio_get(bio); bch_submit_bbio(to_bbio(bio), ca, &b->key, ptr, true); closure_sync(&cl); - - bch_bbio_free(bio, c); + bio_put(bio); memcpy(n_ondisk, n_sorted, btree_bytes(c)); diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index dcf41ff10979..4d176caf097a 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -56,24 +56,6 @@ void bch_bio_submit_work(struct work_struct *work) /* Bios with headers */ -void bch_bbio_free(struct bio *bio, struct cache_set *c) -{ - struct bbio *b = container_of(bio, struct bbio, bio); - mempool_free(b, &c->bio_meta); -} - -struct bio *bch_bbio_alloc(struct cache_set *c) -{ - struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO); - struct bio *bio = &b->bio; - - bio_init(bio); - bio->bi_max_vecs = c->btree_pages; - bio->bi_io_vec = bio->bi_inline_vecs; - - return bio; -} - void bch_bbio_prep(struct bbio *b, struct cache *ca) { struct bvec_iter *iter = &b->bio.bi_iter; diff --git a/drivers/md/bcache/io.h b/drivers/md/bcache/io.h index 28fbbaa3c485..ef7122a591da 100644 --- a/drivers/md/bcache/io.h +++ b/drivers/md/bcache/io.h @@ -80,8 +80,6 @@ void bch_cache_io_error_work(struct work_struct *); void bch_count_io_errors(struct cache *, int, const char *); void bch_bbio_count_io_errors(struct bbio *, int, const char *); void bch_bbio_endio(struct bbio *, int, const char *); -void bch_bbio_free(struct bio *, struct cache_set *); -struct bio *bch_bbio_alloc(struct cache_set *); void bch_generic_make_request(struct bio *, struct cache_set *); void bch_bio_submit_work(struct work_struct *); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 84ae4466d972..4299f76e35e7 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -770,10 +770,10 @@ static void cache_set_free(struct closure *cl) percpu_ref_exit(&c->writes); bch_io_clock_exit(&c->io_clock[WRITE]); bch_io_clock_exit(&c->io_clock[READ]); + bioset_exit(&c->btree_bio); bioset_exit(&c->bio_split); mempool_exit(&c->btree_reserve_pool); mempool_exit(&c->fill_iter); - mempool_exit(&c->bio_meta); mempool_exit(&c->search); if (c->wq) @@ -924,7 +924,6 @@ static const char *bch_cache_set_alloc(struct cache_sb *sb, goto err; c->block_bits = ilog2(c->sb.block_size); - c->btree_pages = CACHE_BTREE_NODE_SIZE(&c->sb) / PAGE_SECTORS; sema_init(&c->sb_write_mutex, 1); INIT_RADIX_TREE(&c->devices, GFP_KERNEL); @@ -991,18 +990,16 @@ static const char *bch_cache_set_alloc(struct cache_sb *sb, if (!(c->wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) || mempool_init_slab_pool(&c->search, 1, bch_search_cache) || - mempool_init_kmalloc_pool(&c->bio_meta, 1, - sizeof(struct bbio) + sizeof(struct bio_vec) * - c->btree_pages) || mempool_init_kmalloc_pool(&c->btree_reserve_pool, 1, BTREE_RESERVE_SIZE) || mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) || bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio)) || + bioset_init(&c->btree_bio, 1, offsetof(struct bbio, bio)) || bch_io_clock_init(&c->io_clock[READ]) || bch_io_clock_init(&c->io_clock[WRITE]) || bch_journal_alloc(&c->journal) || bch_btree_cache_alloc(c) || - bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) + bch_bset_sort_state_init(&c->sort, ilog2(btree_pages(c)))) goto err; err = "error creating kobject"; @@ -1238,7 +1235,7 @@ static const char *can_add_cache(struct cache_sb *sb, return "mismatched block size"; if (sb->members[le16_to_cpu(sb->nr_this_dev)].bucket_size < - c->btree_pages * PAGE_SECTORS) + CACHE_BTREE_NODE_SIZE(&c->sb)) return "new cache bucket_size is too small"; return NULL; |