diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2019-01-21 15:32:13 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2019-04-03 12:44:03 -0400 |
commit | 3d3968df6f1d79d8a10314c23b3482e91c1d76a2 (patch) | |
tree | 4aa72c74850ddf5cffdc6fc51db24ee372cf5b37 | |
parent | b2bf4f43a12c05c2130f241255d8ea9b2420cd6e (diff) |
bcachefs: fix check for if extent update is allocating
-rw-r--r-- | fs/bcachefs/buckets.c | 46 | ||||
-rw-r--r-- | fs/bcachefs/buckets.h | 4 | ||||
-rw-r--r-- | fs/bcachefs/extents.c | 32 | ||||
-rw-r--r-- | fs/bcachefs/extents.h | 3 | ||||
-rw-r--r-- | fs/bcachefs/fs-io.c | 19 |
5 files changed, 83 insertions, 21 deletions
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index d7db4ce16f9f..abc920b856cc 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -207,13 +207,14 @@ static bool bucket_became_unavailable(struct bucket_mark old, !is_available_bucket(new); } -void bch2_fs_usage_apply(struct bch_fs *c, - struct bch_fs_usage *fs_usage, - struct disk_reservation *disk_res, - struct gc_pos gc_pos) +int bch2_fs_usage_apply(struct bch_fs *c, + struct bch_fs_usage *fs_usage, + struct disk_reservation *disk_res, + struct gc_pos gc_pos) { s64 added = fs_usage->s.data + fs_usage->s.reserved; s64 should_not_have_added; + int ret = 0; percpu_rwsem_assert_held(&c->mark_lock); @@ -226,6 +227,7 @@ void bch2_fs_usage_apply(struct bch_fs *c, "disk usage increased without a reservation")) { atomic64_sub(should_not_have_added, &c->sectors_available); added -= should_not_have_added; + ret = -1; } if (added > 0) { @@ -245,6 +247,8 @@ void bch2_fs_usage_apply(struct bch_fs *c, } memset(fs_usage, 0, sizeof(*fs_usage)); + + return ret; } static inline void account_bucket(struct bch_fs_usage *fs_usage, @@ -833,6 +837,8 @@ void bch2_mark_update(struct btree_insert *trans, struct bch_fs_usage fs_usage = { 0 }; struct gc_pos pos = gc_pos_btree_node(b); struct bkey_packed *_k; + u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0; + static int warned_disk_usage = 0; if (!btree_node_type_needs_gc(iter->btree_id)) return; @@ -892,7 +898,37 @@ void bch2_mark_update(struct btree_insert *trans, bch2_btree_node_iter_advance(&node_iter, b); } - bch2_fs_usage_apply(c, &fs_usage, trans->disk_res, pos); + if (bch2_fs_usage_apply(c, &fs_usage, trans->disk_res, pos) && + !warned_disk_usage && + !xchg(&warned_disk_usage, 1)) { + char buf[200]; + + pr_err("disk usage increased more than %llu sectors reserved", disk_res_sectors); + + pr_err("while inserting"); + bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(insert->k)); + pr_err("%s", buf); + pr_err("overlapping with"); + + node_iter = iter->l[0].iter; + while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b, + KEY_TYPE_discard))) { + struct bkey unpacked; + struct bkey_s_c k; + + k = bkey_disassemble(b, _k, &unpacked); + + if (btree_node_is_extents(b) + ? bkey_cmp(insert->k->k.p, bkey_start_pos(k.k)) <= 0 + : bkey_cmp(insert->k->k.p, k.k->p)) + break; + + bch2_bkey_val_to_text(&PBUF(buf), c, k); + pr_err("%s", buf); + + bch2_btree_node_iter_advance(&node_iter, b); + } + } percpu_up_read_preempt_enable(&c->mark_lock); } diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h index bc9180ffd5f5..10cacf36d238 100644 --- a/fs/bcachefs/buckets.h +++ b/fs/bcachefs/buckets.h @@ -253,8 +253,8 @@ int bch2_mark_key(struct bch_fs *, struct bkey_s_c, bool, s64, struct gc_pos, struct bch_fs_usage *, u64, unsigned); void bch2_mark_update(struct btree_insert *, struct btree_insert_entry *); -void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *, - struct disk_reservation *, struct gc_pos); +int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *, + struct disk_reservation *, struct gc_pos); /* disk reservations: */ diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index 2980416871d8..0f075fa1d360 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -1664,12 +1664,13 @@ static bool bch2_extent_merge_inline(struct bch_fs *c, return ret == BCH_MERGE_MERGE; } -int bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size) +bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size, + unsigned nr_replicas) { struct btree_iter iter; struct bpos end = pos; struct bkey_s_c k; - int ret = 0; + bool ret = true; end.offset += size; @@ -1678,8 +1679,8 @@ int bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size) if (bkey_cmp(bkey_start_pos(k.k), end) >= 0) break; - if (!bch2_extent_is_fully_allocated(k)) { - ret = -ENOSPC; + if (nr_replicas > bch2_bkey_nr_ptrs_allocated(k)) { + ret = false; break; } } @@ -1688,6 +1689,29 @@ int bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size) return ret; } +unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k) +{ + unsigned ret = 0; + + switch (k.k->type) { + case KEY_TYPE_extent: { + struct bkey_s_c_extent e = bkey_s_c_to_extent(k); + const union bch_extent_entry *entry; + struct extent_ptr_decoded p; + + extent_for_each_ptr_decode(e, p, entry) + ret += !p.ptr.cached && + p.crc.compression_type == BCH_COMPRESSION_NONE; + break; + } + case KEY_TYPE_reservation: + ret = bkey_s_c_to_reservation(k).v->nr_replicas; + break; + } + + return ret; +} + /* KEY_TYPE_reservation: */ const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k) diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h index 0e6f4a0bbcab..698b25818afb 100644 --- a/fs/bcachefs/extents.h +++ b/fs/bcachefs/extents.h @@ -571,6 +571,7 @@ static inline void extent_save(struct btree *b, struct bkey_packed *dst, BUG_ON(!bch2_bkey_pack_key(dst, src, f)); } -int bch2_check_range_allocated(struct bch_fs *, struct bpos, u64); +bool bch2_check_range_allocated(struct bch_fs *, struct bpos, u64, unsigned); +unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c); #endif /* _BCACHEFS_EXTENTS_H */ diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index 64e36b1dc510..f1fb17bff877 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -253,7 +253,9 @@ static s64 sum_sector_overwrites(struct bkey_i *new, struct btree_iter *_iter, BUG_ON(btree_iter_err(old)); if (allocating && - !bch2_extent_is_fully_allocated(old)) + !*allocating && + bch2_bkey_nr_ptrs_allocated(old) < + bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(new))) *allocating = true; delta += (min(new->k.p.offset, @@ -858,9 +860,7 @@ static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k) { struct bvec_iter iter; struct bio_vec bv; - unsigned nr_ptrs = !bch2_extent_is_compressed(k) - ? bch2_bkey_nr_dirty_ptrs(k) - : 0; + unsigned nr_ptrs = bch2_bkey_nr_ptrs_allocated(k); bio_for_each_segment(bv, bio, iter) { /* brand new pages, don't need to be locked: */ @@ -1918,19 +1918,20 @@ static int bch2_direct_IO_write(struct kiocb *req, if (unlikely(ret)) goto err; + dio->iop.op.nr_replicas = dio->iop.op.opts.data_replicas; + ret = bch2_disk_reservation_get(c, &dio->iop.op.res, iter->count >> 9, dio->iop.op.opts.data_replicas, 0); if (unlikely(ret)) { - if (bch2_check_range_allocated(c, POS(inode->v.i_ino, - req->ki_pos >> 9), - iter->count >> 9)) + if (!bch2_check_range_allocated(c, POS(inode->v.i_ino, + req->ki_pos >> 9), + iter->count >> 9, + dio->iop.op.opts.data_replicas)) goto err; dio->iop.unalloc = true; } - dio->iop.op.nr_replicas = dio->iop.op.res.nr_replicas; - return bch2_dio_write_loop(dio); err: bch2_disk_reservation_put(c, &dio->iop.op.res); |