summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-11-02 17:12:00 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2022-11-14 01:58:27 -0500
commitdf958ca647ae73c548cf07a7bce010b32eabd8d7 (patch)
tree3210c8b89b8f021974c7e562a6f6a68f50ee4da0
parentbbbd4ce9e06e6046b3c1f8d11e84c0c66234b049 (diff)
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when possible. Patch components: - New boolean filesystem and inode option, nocow: note that when nocow is enabled, data checksumming and compression are implicitly disabled - To prevent in-place writes from racing with data moves (data_update.c) or bucket reuse (i.e. a bucket being reused and re-allocated while a nocow write is in flight, we have a new locking mechanism. Buckets can be locked for either data update or data move, using a fixed size hash table of two_state_shared locks. We don't have any chaining, meaning updates and moves to different buckets that hash to the same lock will wait unnecessarily - we'll want to watch for this becoming an issue. - The allocator path also needs to check for in-place writes in flight to a given bucket before giving it out: thus we add another counter to bucket_alloc_state so we can track this. - Fsync now may need to issue cache flushes to block devices instead of flushing the journal. We add a device bitmask to bch_inode_info, ei_devs_need_flush, which tracks devices that need to have flushes issued - note that this will lead to unnecessary flushes when other codepaths have already issued flushes, we may want to replace this with a sequence number. - New nocow write path: look up extents, and if they're writable write to them - otherwise fall back to the normal COW write path. XXX: switch to sequence numbers instead of bitmask for devs needing journal flush XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to run in process context - see if we can improve this Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--fs/bcachefs/Makefile1
-rw-r--r--fs/bcachefs/alloc_foreground.c5
-rw-r--r--fs/bcachefs/alloc_types.h1
-rw-r--r--fs/bcachefs/bcachefs.h7
-rw-r--r--fs/bcachefs/bcachefs_format.h7
-rw-r--r--fs/bcachefs/btree_io.c3
-rw-r--r--fs/bcachefs/buckets_waiting_for_journal.c3
-rw-r--r--fs/bcachefs/data_update.c10
-rw-r--r--fs/bcachefs/extents.c13
-rw-r--r--fs/bcachefs/extents.h1
-rw-r--r--fs/bcachefs/fs-io.c103
-rw-r--r--fs/bcachefs/fs.h11
-rw-r--r--fs/bcachefs/inode.h2
-rw-r--r--fs/bcachefs/io.c233
-rw-r--r--fs/bcachefs/io.h3
-rw-r--r--fs/bcachefs/io_types.h2
-rw-r--r--fs/bcachefs/move.c9
-rw-r--r--fs/bcachefs/nocow_locking.c16
-rw-r--r--fs/bcachefs/nocow_locking.h55
-rw-r--r--fs/bcachefs/opts.h7
-rw-r--r--fs/bcachefs/super.h7
21 files changed, 469 insertions, 30 deletions
diff --git a/fs/bcachefs/Makefile b/fs/bcachefs/Makefile
index c4db30e13379..4085f1ee7bb9 100644
--- a/fs/bcachefs/Makefile
+++ b/fs/bcachefs/Makefile
@@ -50,6 +50,7 @@ bcachefs-y := \
migrate.o \
move.o \
movinggc.o \
+ nocow_locking.o \
opts.o \
quota.o \
rebalance.o \
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 949c068ebd0f..c4f971c12a51 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -216,6 +216,11 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
return NULL;
}
+ if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
+ s->skipped_nocow++;
+ return NULL;
+ }
+
spin_lock(&c->freelist_lock);
if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
diff --git a/fs/bcachefs/alloc_types.h b/fs/bcachefs/alloc_types.h
index e66a85f7a9a3..271b4bf2b95e 100644
--- a/fs/bcachefs/alloc_types.h
+++ b/fs/bcachefs/alloc_types.h
@@ -13,6 +13,7 @@ struct bucket_alloc_state {
u64 buckets_seen;
u64 skipped_open;
u64 skipped_need_journal_commit;
+ u64 skipped_nocow;
u64 skipped_nouse;
};
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 8a43fcfa0a8c..7230ac788cd5 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -206,6 +206,7 @@
#include "bcachefs_format.h"
#include "errcode.h"
#include "fifo.h"
+#include "nocow_locking.h"
#include "opts.h"
#include "util.h"
@@ -349,7 +350,8 @@ BCH_DEBUG_PARAMS_DEBUG()
x(journal_flush_seq) \
x(blocked_journal) \
x(blocked_allocate) \
- x(blocked_allocate_open_bucket)
+ x(blocked_allocate_open_bucket) \
+ x(nocow_lock_contended)
enum bch_time_stats {
#define x(name) BCH_TIME_##name,
@@ -847,6 +849,8 @@ struct bch_fs {
struct bio_set bio_write;
struct mutex bio_bounce_pages_lock;
mempool_t bio_bounce_pages;
+ struct bucket_nocow_lock_table
+ nocow_locks;
struct rhashtable promote_table;
mempool_t compression_bounce[2];
@@ -908,6 +912,7 @@ struct bch_fs {
struct bio_set writepage_bioset;
struct bio_set dio_write_bioset;
struct bio_set dio_read_bioset;
+ struct bio_set nocow_flush_bioset;
/* ERRORS */
struct list_head fsck_errors;
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index 174b0fa10923..6669a8e6c0bb 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -795,7 +795,8 @@ struct bch_inode_generation {
x(bi_dir_offset, 64) \
x(bi_subvol, 32) \
x(bi_parent_subvol, 32) \
- x(bi_tmpdir, 8)
+ x(bi_tmpdir, 8) \
+ x(bi_nocow, 8)
/* subset of BCH_INODE_FIELDS */
#define BCH_INODE_OPTS() \
@@ -808,7 +809,8 @@ struct bch_inode_generation {
x(foreground_target, 16) \
x(background_target, 16) \
x(erasure_code, 16) \
- x(tmpdir, 8)
+ x(tmpdir, 8) \
+ x(nocow, 8)
enum inode_opt_id {
#define x(name, ...) \
@@ -1694,6 +1696,7 @@ LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
/* Obsolete, always enabled: */
LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
+LE64_BITMASK(BCH_SB_NOCOW, struct bch_sb, flags[4], 33, 34);
/*
* Features:
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index cee3b500d45b..d53b7f2f2818 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -1798,7 +1798,8 @@ static void btree_write_submit(struct work_struct *work)
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
ptr->offset += wbio->sector_offset;
- bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &tmp.k);
+ bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
+ &tmp.k, false);
}
void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
diff --git a/fs/bcachefs/buckets_waiting_for_journal.c b/fs/bcachefs/buckets_waiting_for_journal.c
index 2e5b955080de..0f4ef9e5a431 100644
--- a/fs/bcachefs/buckets_waiting_for_journal.c
+++ b/fs/bcachefs/buckets_waiting_for_journal.c
@@ -3,6 +3,7 @@
#include "bcachefs.h"
#include "buckets_waiting_for_journal.h"
#include <linux/random.h>
+#include <linux/siphash.h>
static inline struct bucket_hashed *
bucket_hash(struct buckets_waiting_for_journal_table *t,
@@ -10,7 +11,7 @@ bucket_hash(struct buckets_waiting_for_journal_table *t,
{
unsigned h = siphash_1u64(dev_bucket, &t->hash_seeds[hash_seed_idx]);
- BUG_ON(!is_power_of_2(t->size));
+ EBUG_ON(!is_power_of_2(t->size));
return t->d + (h & (t->size - 1));
}
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 6caeae8f60c1..8eaf93ff2716 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -294,6 +294,13 @@ void bch2_data_update_read_done(struct data_update *m,
void bch2_data_update_exit(struct data_update *update)
{
struct bch_fs *c = update->op.c;
+ struct bkey_ptrs_c ptrs =
+ bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
+ const struct bch_extent_ptr *ptr;
+
+ bkey_for_each_ptr(ptrs, ptr)
+ bch2_bucket_nocow_unlock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, ptr), 0);
bch2_bkey_buf_exit(&update->k, c);
bch2_disk_reservation_put(c, &update->op.res);
@@ -425,6 +432,9 @@ int bch2_data_update_init(struct bch_fs *c, struct data_update *m,
m->op.incompressible = true;
i++;
+
+ bch2_bucket_nocow_lock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, &p.ptr), 0);
}
if (reserve_sectors) {
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index cec674de90c8..e5e700d201c3 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -665,22 +665,21 @@ unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
return replicas;
}
-static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
- struct extent_ptr_decoded p)
+unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
{
unsigned durability = 0;
struct bch_dev *ca;
- if (p.ptr.cached)
+ if (p->ptr.cached)
return 0;
- ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ ca = bch_dev_bkey_exists(c, p->ptr.dev);
if (ca->mi.state != BCH_MEMBER_STATE_failed)
durability = max_t(unsigned, durability, ca->mi.durability);
- if (p.has_ec)
- durability += p.ec.redundancy;
+ if (p->has_ec)
+ durability += p->ec.redundancy;
return durability;
}
@@ -693,7 +692,7 @@ unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
unsigned durability = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- durability += bch2_extent_ptr_durability(c, p);
+ durability += bch2_extent_ptr_durability(c,& p);
return durability;
}
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index 6a4b7f33b7dc..3e6b6a863ace 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -590,6 +590,7 @@ bool bch2_bkey_is_incompressible(struct bkey_s_c);
unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
+unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
void bch2_bkey_extent_entry_drop(struct bkey_i *, union bch_extent_entry *);
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 1794f172e4c1..ed60ac403332 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -35,6 +35,80 @@
#include <trace/events/bcachefs.h>
#include <trace/events/writeback.h>
+static inline void bch2_inode_mark_nocow_writes(struct bch_inode_info *inode, struct bch_write_op *op)
+{
+ unsigned i;
+
+ for (i = 0; i < op->nocow_devs.nr; i++)
+ set_bit(op->nocow_devs.devs[i], inode->ei_devs_need_flush.d);
+}
+
+struct nocow_flush {
+ struct closure *cl;
+ struct bch_dev *ca;
+ struct bio bio;
+};
+
+static void nocow_flush_endio(struct bio *_bio)
+{
+
+ struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
+
+ closure_put(bio->cl);
+ percpu_ref_put(&bio->ca->io_ref);
+ bio_put(&bio->bio);
+}
+
+static void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct closure *cl)
+{
+ struct nocow_flush *bio;
+ struct bch_dev *ca;
+ struct bch_devs_mask devs;
+ unsigned dev;
+
+ dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
+ if (dev == BCH_SB_MEMBERS_MAX)
+ return;
+
+ devs = inode->ei_devs_need_flush;
+ memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
+
+ for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
+ rcu_read_lock();
+ ca = rcu_dereference(c->devs[dev]);
+ if (ca && !percpu_ref_tryget(&ca->io_ref))
+ ca = NULL;
+ rcu_read_unlock();
+
+ if (!ca)
+ continue;
+
+ bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
+ REQ_OP_FLUSH,
+ GFP_KERNEL,
+ &c->nocow_flush_bioset),
+ struct nocow_flush, bio);
+ bio->cl = cl;
+ bio->ca = ca;
+ bio->bio.bi_end_io = nocow_flush_endio;
+ closure_bio_submit(&bio->bio, cl);
+ }
+}
+
+static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
+ struct bch_inode_info *inode)
+{
+ struct closure cl;
+
+ closure_init_stack(&cl);
+ bch2_inode_flush_nocow_writes_async(c, inode, &cl);
+ closure_sync(&cl);
+
+ return 0;
+}
+
static inline bool bio_full(struct bio *bio, unsigned len)
{
if (bio->bi_vcnt >= bio->bi_max_vecs)
@@ -1253,6 +1327,8 @@ static void bch2_writepage_io_done(struct bch_write_op *op)
}
}
+ bch2_inode_mark_nocow_writes(io->inode, &io->op);
+
/*
* racing with fallocate can cause us to add fewer sectors than
* expected - but we shouldn't add more sectors than expected:
@@ -2114,10 +2190,12 @@ static noinline void bch2_dio_write_flush(struct dio_write *dio)
if (!dio->op.error) {
ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
- if (ret)
+ if (ret) {
dio->op.error = ret;
- else
+ } else {
bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq, &dio->op.cl);
+ bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
+ }
}
if (dio->sync) {
@@ -2171,6 +2249,8 @@ static __always_inline void bch2_dio_write_end(struct dio_write *dio)
struct bvec_iter_all iter;
struct bio_vec *bv;
+ bch2_inode_mark_nocow_writes(inode, &dio->op);
+
req->ki_pos += (u64) dio->op.written << 9;
dio->written += dio->op.written;
@@ -2469,19 +2549,21 @@ out:
* inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
* insert trigger: look up the btree inode instead
*/
-static int bch2_flush_inode(struct bch_fs *c, subvol_inum inum)
+static int bch2_flush_inode(struct bch_fs *c,
+ struct bch_inode_info *inode)
{
- struct bch_inode_unpacked inode;
+ struct bch_inode_unpacked u;
int ret;
if (c->opts.journal_flush_disabled)
return 0;
- ret = bch2_inode_find_by_inum(c, inum, &inode);
+ ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
if (ret)
return ret;
- return bch2_journal_flush_seq(&c->journal, inode.bi_journal_seq);
+ return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
+ bch2_inode_flush_nocow_writes(c, inode);
}
int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
@@ -2495,7 +2577,7 @@ int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
ret = file_write_and_wait_range(file, start, end);
ret2 = sync_inode_metadata(&inode->v, 1);
- ret3 = bch2_flush_inode(c, inode_inum(inode));
+ ret3 = bch2_flush_inode(c, inode);
return bch2_err_class(ret ?: ret2 ?: ret3);
}
@@ -3349,7 +3431,7 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
IS_SYNC(file_inode(file_dst)))
- ret = bch2_flush_inode(c, inode_inum(dst));
+ ret = bch2_flush_inode(c, dst);
err:
bch2_quota_reservation_put(c, dst, &quota_res);
bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
@@ -3605,6 +3687,7 @@ loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
void bch2_fs_fsio_exit(struct bch_fs *c)
{
+ bioset_exit(&c->nocow_flush_bioset);
bioset_exit(&c->dio_write_bioset);
bioset_exit(&c->dio_read_bioset);
bioset_exit(&c->writepage_bioset);
@@ -3624,7 +3707,9 @@ int bch2_fs_fsio_init(struct bch_fs *c)
BIOSET_NEED_BVECS) ||
bioset_init(&c->dio_write_bioset,
4, offsetof(struct dio_write, op.wbio.bio),
- BIOSET_NEED_BVECS))
+ BIOSET_NEED_BVECS) ||
+ bioset_init(&c->nocow_flush_bioset,
+ 1, offsetof(struct nocow_flush, bio), 0))
ret = -ENOMEM;
pr_verbose_init(c->opts, "ret %i", ret);
diff --git a/fs/bcachefs/fs.h b/fs/bcachefs/fs.h
index b11a1508b6b3..6b91bbe91116 100644
--- a/fs/bcachefs/fs.h
+++ b/fs/bcachefs/fs.h
@@ -26,6 +26,17 @@ struct bch_inode_info {
u32 ei_subvol;
+ /*
+ * When we've been doing nocow writes we'll need to issue flushes to the
+ * underlying block devices
+ *
+ * XXX: a device may have had a flush issued by some other codepath. It
+ * would be better to keep for each device a sequence number that's
+ * incremented when we isusue a cache flush, and track here the sequence
+ * number that needs flushing.
+ */
+ struct bch_devs_mask ei_devs_need_flush;
+
/* copy of inode in btree: */
struct bch_inode_unpacked ei_inode;
};
diff --git a/fs/bcachefs/inode.h b/fs/bcachefs/inode.h
index 3739e9f34f4f..8cb9a3023992 100644
--- a/fs/bcachefs/inode.h
+++ b/fs/bcachefs/inode.h
@@ -157,6 +157,8 @@ io_opts(struct bch_fs *c, struct bch_inode_unpacked *inode)
struct bch_io_opts opts = bch2_opts_to_inode_opts(c->opts);
bch2_io_opts_apply(&opts, bch2_inode_opts_get(inode));
+ if (opts.nocow)
+ opts.compression = opts.background_compression = opts.data_checksum = opts.erasure_code;
return opts;
}
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index 3d7bb5097765..87aaf05e5efd 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -513,7 +513,8 @@ static int bch2_write_index_default(struct bch_write_op *op)
void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
enum bch_data_type type,
- const struct bkey_i *k)
+ const struct bkey_i *k,
+ bool have_ioref)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
const struct bch_extent_ptr *ptr;
@@ -547,7 +548,7 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
n->c = c;
n->dev = ptr->dev;
- n->have_ioref = bch2_dev_get_ioref(ca,
+ n->have_ioref = have_ioref || bch2_dev_get_ioref(ca,
type == BCH_DATA_btree ? READ : WRITE);
n->submit_time = local_clock();
n->bio.bi_iter.bi_sector = ptr->offset;
@@ -1150,6 +1151,219 @@ err:
return ret;
}
+static bool bch2_extent_is_writeable(struct bch_write_op *op,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = op->c;
+ struct bkey_s_c_extent e;
+ struct extent_ptr_decoded p;
+ const union bch_extent_entry *entry;
+ unsigned replicas = 0;
+
+ if (k.k->type != KEY_TYPE_extent)
+ return false;
+
+ e = bkey_s_c_to_extent(k);
+ extent_for_each_ptr_decode(e, p, entry) {
+ if (p.crc.csum_type ||
+ crc_is_compressed(p.crc) ||
+ p.has_ec)
+ return false;
+
+ replicas += bch2_extent_ptr_durability(c, &p);
+ }
+
+ return replicas >= op->opts.data_replicas;
+}
+
+static inline void bch2_nocow_write_unlock(struct bch_write_op *op)
+{
+ struct bch_fs *c = op->c;
+ const struct bch_extent_ptr *ptr;
+ struct bkey_i *k;
+
+ for_each_keylist_key(&op->insert_keys, k) {
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
+
+ bkey_for_each_ptr(ptrs, ptr)
+ bch2_bucket_nocow_unlock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, ptr),
+ BUCKET_NOCOW_LOCK_UPDATE);
+ }
+}
+
+static void bch2_nocow_write_done(struct closure *cl)
+{
+ struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+
+ bch2_nocow_write_unlock(op);
+
+ if (unlikely(op->flags & BCH_WRITE_IO_ERROR))
+ op->error = -EIO;
+ bch2_write_done(cl);
+}
+
+static void bch2_nocow_write(struct bch_write_op *op)
+{
+ struct bch_fs *c = op->c;
+ struct bio *bio = &op->wbio.bio;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_ptrs_c ptrs;
+ const struct bch_extent_ptr *ptr, *ptr2;
+ u32 snapshot;
+ int ret;
+
+ if (op->flags & BCH_WRITE_MOVE)
+ return;
+
+ bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
+
+ ret = bch2_subvolume_get_snapshot(&trans, op->subvol, &snapshot);
+ if (unlikely(ret))
+ goto err;
+
+ bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
+ SPOS(op->pos.inode, op->pos.offset, snapshot),
+ BTREE_ITER_SLOTS);
+ while (1) {
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ break;
+
+ /* fall back to normal cow write path? */
+ if (unlikely(k.k->p.snapshot != snapshot ||
+ !bch2_extent_is_writeable(op, k)))
+ break;
+
+ if (bch2_keylist_realloc(&op->insert_keys,
+ op->inline_keys,
+ ARRAY_SIZE(op->inline_keys),
+ k.k->u64s))
+ break;
+
+ /* Get iorefs before dropping btree locks: */
+ ptrs = bch2_bkey_ptrs_c(k);
+ bkey_for_each_ptr(ptrs, ptr)
+ if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
+ goto err_get_ioref;
+
+ /* Unlock before taking nocow locks, doing IO: */
+ bkey_reassemble(op->insert_keys.top, k);
+ bch2_trans_unlock(&trans);
+
+ if (bkey_cmp(op->pos, op->insert_keys.top->k.p))
+ bch2_cut_front(op->pos, op->insert_keys.top);
+
+ ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(op->insert_keys.top));
+ bkey_for_each_ptr(ptrs, ptr) {
+ /*
+ * XXX: since we'll be reading from multiple extents
+ * that could live on multiple devices, this could
+ * overflow bch_devs_list
+ *
+ * perhaps we should stash a pointer to
+ * bch_inode_info->ei_devs_need_flush?
+ *
+ * it would need to be updated after bio completion,
+ * before completing bch_write_op
+ */
+ bch2_dev_list_add_dev(&op->nocow_devs, ptr->dev);
+ bch2_bucket_nocow_lock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, ptr),
+ BUCKET_NOCOW_LOCK_UPDATE);
+ if (unlikely(ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
+ goto err_bucket_stale;
+ }
+
+ bio = &op->wbio.bio;
+ if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
+ bio = bio_split(bio, k.k->p.offset - op->pos.offset,
+ GFP_KERNEL, &c->bio_write);
+ wbio_init(bio)->put_bio = true;
+ bio->bi_opf = op->wbio.bio.bi_opf;
+ } else {
+ op->flags |= BCH_WRITE_DONE;
+ }
+
+ op->pos.offset += bio_sectors(bio);
+ op->written += bio_sectors(bio);
+
+ bio->bi_end_io = bch2_write_endio;
+ bio->bi_private = &op->cl;
+ bio->bi_opf |= REQ_OP_WRITE;
+ closure_get(&op->cl);
+ bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
+ op->insert_keys.top, true);
+
+ bch2_keylist_push(&op->insert_keys);
+ if (op->flags & BCH_WRITE_DONE)
+ break;
+ bch2_btree_iter_advance(&iter);
+ }
+out:
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ if (ret) {
+ bch_err_inum_ratelimited(c, op->pos.inode,
+ "%s: error %s", __func__, bch2_err_str(ret));
+ op->error = ret;
+ op->flags |= BCH_WRITE_DONE;
+ }
+
+ bch2_trans_exit(&trans);
+
+ /* fallback to cow write path? */
+ if (!(op->flags & BCH_WRITE_DONE)) {
+ closure_sync(&op->cl);
+ bch2_nocow_write_unlock(op);
+ op->insert_keys.top = op->insert_keys.keys;
+ } else if (op->flags & BCH_WRITE_SYNC) {
+ closure_sync(&op->cl);
+ bch2_nocow_write_done(&op->cl);
+ } else {
+ /*
+ * XXX
+ * needs to run out of process context because ei_quota_lock is
+ * a mutex
+ */
+ continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
+ }
+ return;
+err_get_ioref:
+ bkey_for_each_ptr(ptrs, ptr2) {
+ if (ptr2 == ptr)
+ break;
+
+ percpu_ref_put(&bch_dev_bkey_exists(c, ptr2->dev)->io_ref);
+ }
+
+ /* Fall back to COW path: */
+ goto out;
+err_bucket_stale:
+ bkey_for_each_ptr(ptrs, ptr2) {
+ bch2_bucket_nocow_unlock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, ptr2),
+ BUCKET_NOCOW_LOCK_UPDATE);
+ if (ptr2 == ptr)
+ break;
+ }
+
+ bkey_for_each_ptr(ptrs, ptr2)
+ percpu_ref_put(&bch_dev_bkey_exists(c, ptr2->dev)->io_ref);
+
+ /* We can retry this: */
+ ret = BCH_ERR_transaction_restart;
+ goto out;
+}
+
static void __bch2_write(struct bch_write_op *op)
{
struct bch_fs *c = op->c;
@@ -1159,6 +1373,12 @@ static void __bch2_write(struct bch_write_op *op)
int ret;
nofs_flags = memalloc_nofs_save();
+
+ if (unlikely(op->opts.nocow)) {
+ bch2_nocow_write(op);
+ if (op->flags & BCH_WRITE_DONE)
+ goto out_nofs_restore;
+ }
again:
memset(&op->failed, 0, sizeof(op->failed));
op->btree_update_ready = false;
@@ -1230,7 +1450,7 @@ again:
key_to_write_offset);
bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
- key_to_write);
+ key_to_write, false);
} while (ret);
/*
@@ -1255,7 +1475,7 @@ again:
continue_at(&op->cl, bch2_write_index, NULL);
}
-
+out_nofs_restore:
memalloc_nofs_restore(nofs_flags);
}
@@ -2462,6 +2682,11 @@ void bch2_fs_io_exit(struct bch_fs *c)
int bch2_fs_io_init(struct bch_fs *c)
{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(c->nocow_locks.l); i++)
+ two_state_lock_init(&c->nocow_locks.l[i]);
+
if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
BIOSET_NEED_BVECS) ||
bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
diff --git a/fs/bcachefs/io.h b/fs/bcachefs/io.h
index ce0a6ca5c3e2..617804ec6105 100644
--- a/fs/bcachefs/io.h
+++ b/fs/bcachefs/io.h
@@ -18,7 +18,7 @@ void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
void bch2_latency_acct(struct bch_dev *, u64, int);
void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
- enum bch_data_type, const struct bkey_i *);
+ enum bch_data_type, const struct bkey_i *, bool);
#define BLK_STS_REMOVED ((__force blk_status_t)128)
@@ -87,6 +87,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
op->res = (struct disk_reservation) { 0 };
op->new_i_size = U64_MAX;
op->i_sectors_delta = 0;
+ op->nocow_devs.nr = 0;
}
void bch2_write(struct closure *);
diff --git a/fs/bcachefs/io_types.h b/fs/bcachefs/io_types.h
index a91635d1e70b..62355913d360 100644
--- a/fs/bcachefs/io_types.h
+++ b/fs/bcachefs/io_types.h
@@ -148,6 +148,8 @@ struct bch_write_op {
struct keylist insert_keys;
u64 inline_keys[BKEY_EXTENT_U64s_MAX * 2];
+ struct bch_devs_list nocow_devs;
+
/* Must be last: */
struct bch_write_bio wbio;
};
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index e039aa54782a..a138bf1d4082 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -261,6 +261,12 @@ static int bch2_move_extent(struct btree_trans *trans,
if (!percpu_ref_tryget_live(&c->writes))
return -EROFS;
+ /*
+ * Before memory allocations & taking nocow locks in
+ * bch2_data_update_init():
+ */
+ bch2_trans_unlock(trans);
+
/* write path might have to decompress data: */
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
@@ -508,6 +514,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
*/
bch2_bkey_buf_reassemble(&sk, c, k);
k = bkey_i_to_s_c(sk.k);
+ bch2_trans_unlock(&trans);
ret2 = bch2_move_extent(&trans, &iter, ctxt, io_opts,
btree_id, k, data_opts);
@@ -606,7 +613,7 @@ again:
prt_str(&buf, "failed to evacuate bucket ");
bch2_bkey_val_to_text(&buf, c, k);
- bch_err(c, "%s", buf.buf);
+ bch_err_ratelimited(c, "%s", buf.buf);
printbuf_exit(&buf);
}
}
diff --git a/fs/bcachefs/nocow_locking.c b/fs/bcachefs/nocow_locking.c
new file mode 100644
index 000000000000..54e86697bee4
--- /dev/null
+++ b/fs/bcachefs/nocow_locking.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "nocow_locking.h"
+#include "util.h"
+
+void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
+ struct bpos bucket, int flags)
+{
+ struct bch_fs *c = container_of(t, struct bch_fs, nocow_locks);
+ two_state_lock_t *l = bucket_nocow_lock(t, bucket);
+ u64 start_time = local_clock();
+
+ bch2_two_state_lock(l, flags & BUCKET_NOCOW_LOCK_UPDATE);
+ bch2_time_stats_update(&c->times[BCH_TIME_nocow_lock_contended], start_time);
+}
diff --git a/fs/bcachefs/nocow_locking.h b/fs/bcachefs/nocow_locking.h
new file mode 100644
index 000000000000..09ab85ac0f9f
--- /dev/null
+++ b/fs/bcachefs/nocow_locking.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_NOCOW_LOCKING_H
+#define _BCACHEFS_NOCOW_LOCKING_H
+
+#include "bcachefs_format.h"
+#include "two_state_shared_lock.h"
+
+#include <linux/siphash.h>
+
+#define BUCKET_NOCOW_LOCKS (1U << 10)
+
+struct bucket_nocow_lock_table {
+ siphash_key_t key;
+ two_state_lock_t l[BUCKET_NOCOW_LOCKS];
+};
+
+#define BUCKET_NOCOW_LOCK_UPDATE (1 << 0)
+
+static inline two_state_lock_t *bucket_nocow_lock(struct bucket_nocow_lock_table *t,
+ struct bpos bucket)
+{
+ u64 dev_bucket = bucket.inode << 56 | bucket.offset;
+ unsigned h = siphash_1u64(dev_bucket, &t->key);
+
+ return t->l + (h & (BUCKET_NOCOW_LOCKS - 1));
+}
+
+static inline bool bch2_bucket_nocow_is_locked(struct bucket_nocow_lock_table *t,
+ struct bpos bucket)
+{
+ two_state_lock_t *l = bucket_nocow_lock(t, bucket);
+
+ return atomic_long_read(&l->v) != 0;
+}
+
+static inline void bch2_bucket_nocow_unlock(struct bucket_nocow_lock_table *t,
+ struct bpos bucket, int flags)
+{
+ two_state_lock_t *l = bucket_nocow_lock(t, bucket);
+
+ bch2_two_state_unlock(l, flags & BUCKET_NOCOW_LOCK_UPDATE);
+}
+
+void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *, struct bpos, int);
+
+static inline void bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
+ struct bpos bucket, int flags)
+{
+ two_state_lock_t *l = bucket_nocow_lock(t, bucket);
+
+ if (!bch2_two_state_trylock(l, flags & BUCKET_NOCOW_LOCK_UPDATE))
+ __bch2_bucket_nocow_lock(t, bucket, flags);
+}
+
+#endif /* _BCACHEFS_NOCOW_LOCKING_H */
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
index 52621446f77a..dcb71c4fe772 100644
--- a/fs/bcachefs/opts.h
+++ b/fs/bcachefs/opts.h
@@ -387,6 +387,13 @@ enum opt_type {
OPT_BOOL(), \
BCH2_NO_SB_OPT, false, \
NULL, NULL) \
+ x(nocow, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME|OPT_INODE, \
+ OPT_BOOL(), \
+ BCH_SB_NOCOW, false, \
+ NULL, "Nocow mode: Writes will be done in place when possible.\n"\
+ "Snapshots and reflink will still caused writes to be COW\n"\
+ "Implicitly disables data checksumming and compression")\
x(fs_size, u64, \
OPT_DEVICE, \
OPT_UINT(0, S64_MAX), \
diff --git a/fs/bcachefs/super.h b/fs/bcachefs/super.h
index 8501adaff4c2..3c83e9b9cb7b 100644
--- a/fs/bcachefs/super.h
+++ b/fs/bcachefs/super.h
@@ -88,9 +88,10 @@ static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs,
static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs,
unsigned dev)
{
- BUG_ON(bch2_dev_list_has_dev(*devs, dev));
- BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs));
- devs->devs[devs->nr++] = dev;
+ if (!bch2_dev_list_has_dev(*devs, dev)) {
+ BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs));
+ devs->devs[devs->nr++] = dev;
+ }
}
static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)