summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-07-09 11:16:33 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2019-07-09 12:06:36 -0400
commit9d5510ae5f8ea881bf8f379c4396e899004e9a84 (patch)
treea6e96bee751f01de64d2f87e064acc27e0ec8054
parent701163bf66e6a161f080b1ef32e395d0831bab0f (diff)
bcachefs: Fix moving compressed data
bio_uncompress_inplace() used to potentially need to extend the bio to be big enough for the uncompressed data, which has become problematic with multipage bvecs - but, the move extent path actually already allocated the bios to be big enough for the uncompressed data. The promote path needed to be fixed, though. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/compress.c15
-rw-r--r--fs/bcachefs/io.c59
2 files changed, 34 insertions, 40 deletions
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c
index c9139166df51..3787390da47f 100644
--- a/fs/bcachefs/compress.c
+++ b/fs/bcachefs/compress.c
@@ -241,19 +241,10 @@ int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio,
}
/*
- * might have to free existing pages and retry allocation from mempool -
- * do this _after_ decompressing:
+ * XXX: don't have a good way to assert that the bio was allocated with
+ * enough space, we depend on bch2_move_extent doing the right thing
*/
- if (bio->bi_iter.bi_size < crc->live_size << 9) {
- if (bch2_bio_alloc_pages(bio, (crc->live_size << 9) -
- bio->bi_iter.bi_size,
- GFP_NOFS)) {
- bch2_bio_free_pages_pool(c, bio);
- bio->bi_iter.bi_size = 0;
- bio->bi_vcnt = 0;
- bch2_bio_alloc_pages_pool(c, bio, crc->live_size << 9);
- }
- }
+ bio->bi_iter.bi_size = crc->live_size << 9;
memcpy_to_bio(bio, bio->bi_iter, data.b + (crc->offset << 9));
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index fdc92e16d287..4d81b6e6e54f 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -1031,22 +1031,18 @@ static struct promote_op *__promote_alloc(struct bch_fs *c,
struct bpos pos,
struct extent_ptr_decoded *pick,
struct bch_io_opts opts,
- unsigned rbio_sectors,
+ unsigned sectors,
struct bch_read_bio **rbio)
{
struct promote_op *op = NULL;
struct bio *bio;
- unsigned rbio_pages = DIV_ROUND_UP(rbio_sectors, PAGE_SECTORS);
- /* data might have to be decompressed in the write path: */
- unsigned wbio_pages = DIV_ROUND_UP(pick->crc.uncompressed_size,
- PAGE_SECTORS);
+ unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
int ret;
if (!percpu_ref_tryget(&c->writes))
return NULL;
- op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * wbio_pages,
- GFP_NOIO);
+ op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOIO);
if (!op)
goto err;
@@ -1054,35 +1050,32 @@ static struct promote_op *__promote_alloc(struct bch_fs *c,
op->pos = pos;
/*
- * promotes require bouncing, but if the extent isn't
- * checksummed/compressed it might be too big for the mempool:
+ * We don't use the mempool here because extents that aren't
+ * checksummed or compressed can be too big for the mempool:
*/
- if (rbio_sectors > c->sb.encoded_extent_max) {
- *rbio = kzalloc(sizeof(struct bch_read_bio) +
- sizeof(struct bio_vec) * rbio_pages,
- GFP_NOIO);
- if (!*rbio)
- goto err;
+ *rbio = kzalloc(sizeof(struct bch_read_bio) +
+ sizeof(struct bio_vec) * pages,
+ GFP_NOIO);
+ if (!*rbio)
+ goto err;
- rbio_init(&(*rbio)->bio, opts);
- bio_init(&(*rbio)->bio, (*rbio)->bio.bi_inline_vecs,
- rbio_pages);
+ rbio_init(&(*rbio)->bio, opts);
+ bio_init(&(*rbio)->bio, (*rbio)->bio.bi_inline_vecs, pages);
- if (bch2_bio_alloc_pages(&(*rbio)->bio, rbio_sectors << 9,
- GFP_NOIO))
- goto err;
+ if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
+ GFP_NOIO))
+ goto err;
- (*rbio)->bounce = true;
- (*rbio)->split = true;
- (*rbio)->kmalloc = true;
- }
+ (*rbio)->bounce = true;
+ (*rbio)->split = true;
+ (*rbio)->kmalloc = true;
if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
bch_promote_params))
goto err;
bio = &op->write.op.wbio.bio;
- bio_init(bio, bio->bi_inline_vecs, wbio_pages);
+ bio_init(bio, bio->bi_inline_vecs, pages);
ret = bch2_migrate_write_init(c, &op->write,
writepoint_hashed((unsigned long) current),
@@ -1116,8 +1109,9 @@ static inline struct promote_op *promote_alloc(struct bch_fs *c,
bool *read_full)
{
bool promote_full = *read_full || READ_ONCE(c->promote_whole_extents);
+ /* data might have to be decompressed in the write path: */
unsigned sectors = promote_full
- ? pick->crc.compressed_size
+ ? max(pick->crc.compressed_size, pick->crc.live_size)
: bvec_iter_sectors(iter);
struct bpos pos = promote_full
? bkey_start_pos(k.k)
@@ -1652,7 +1646,16 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
}
if (rbio) {
- /* promote already allocated bounce rbio */
+ /*
+ * promote already allocated bounce rbio:
+ * promote needs to allocate a bio big enough for uncompressing
+ * data in the write path, but we're not going to use it all
+ * here:
+ */
+ BUG_ON(rbio->bio.bi_iter.bi_size <
+ pick.crc.compressed_size << 9);
+ rbio->bio.bi_iter.bi_size =
+ pick.crc.compressed_size << 9;
} else if (bounce) {
unsigned sectors = pick.crc.compressed_size;