summaryrefslogtreecommitdiff
path: root/fs/bcachefs/io_write.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bcachefs/io_write.c')
-rw-r--r--fs/bcachefs/io_write.c38
1 files changed, 17 insertions, 21 deletions
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index fa077341d2ef..d7620138e038 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -55,14 +55,9 @@ static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
s64 latency_over = io_latency - latency_threshold;
if (latency_threshold && latency_over > 0) {
- /*
- * bump up congested by approximately latency_over * 4 /
- * latency_threshold - we don't need much accuracy here so don't
- * bother with the divide:
- */
if (atomic_read(&ca->congested) < CONGESTED_MAX)
- atomic_add(latency_over >>
- max_t(int, ilog2(latency_threshold) - 2, 0),
+ atomic_add((u32) min(U32_MAX, io_latency * 2) /
+ (u32) min(U32_MAX, latency_threshold),
&ca->congested);
ca->congested_last = now;
@@ -261,7 +256,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
s64 bi_sectors = le64_to_cpu(inode->v.bi_sectors);
if (unlikely(bi_sectors + i_sectors_delta < 0)) {
struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
+ CLASS(printbuf, buf)();
bch2_log_msg_start(c, &buf);
prt_printf(&buf, "inode %llu i_sectors underflow: %lli + %lli < 0",
extent_iter->pos.inode, bi_sectors, i_sectors_delta);
@@ -269,7 +264,6 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
bool print = bch2_count_fsck_err(c, inode_i_sectors_underflow, &buf);
if (print)
bch2_print_str(c, KERN_ERR, buf.buf);
- printbuf_exit(&buf);
if (i_sectors_delta < 0)
i_sectors_delta = -bi_sectors;
@@ -375,7 +369,6 @@ static int bch2_write_index_default(struct bch_write_op *op)
struct bkey_buf sk;
struct keylist *keys = &op->insert_keys;
struct bkey_i *k = bch2_keylist_front(keys);
- struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
subvol_inum inum = {
.subvol = op->subvol,
@@ -385,6 +378,7 @@ static int bch2_write_index_default(struct bch_write_op *op)
BUG_ON(!inum.subvol);
+ CLASS(btree_trans, trans)(c);
bch2_bkey_buf_init(&sk);
do {
@@ -421,7 +415,6 @@ static int bch2_write_index_default(struct bch_write_op *op)
bch2_cut_front(iter.pos, k);
} while (!bch2_keylist_empty(keys));
- bch2_trans_put(trans);
bch2_bkey_buf_exit(&sk, c);
return ret;
@@ -431,7 +424,7 @@ static int bch2_write_index_default(struct bch_write_op *op)
void bch2_write_op_error(struct bch_write_op *op, u64 offset, const char *fmt, ...)
{
- struct printbuf buf = PRINTBUF;
+ CLASS(printbuf, buf)();
if (op->subvol) {
bch2_inum_offset_err_msg(op->c, &buf,
@@ -458,7 +451,6 @@ void bch2_write_op_error(struct bch_write_op *op, u64 offset, const char *fmt, .
}
bch_err_ratelimited(op->c, "%s", buf.buf);
- printbuf_exit(&buf);
}
void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
@@ -470,8 +462,8 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
struct bch_write_bio *n;
unsigned ref_rw = type == BCH_DATA_btree ? READ : WRITE;
unsigned ref_idx = type == BCH_DATA_btree
- ? BCH_DEV_READ_REF_btree_node_write
- : BCH_DEV_WRITE_REF_io_write;
+ ? (unsigned) BCH_DEV_READ_REF_btree_node_write
+ : (unsigned) BCH_DEV_WRITE_REF_io_write;
BUG_ON(c->opts.nochanges);
@@ -1223,6 +1215,7 @@ static bool bch2_extent_is_writeable(struct bch_write_op *op,
static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
struct btree_iter *iter,
+ struct bch_write_op *op,
struct bkey_i *orig,
struct bkey_s_c k,
u64 new_i_size)
@@ -1232,11 +1225,13 @@ static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
return 0;
}
- struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
+ struct bkey_i *new = bch2_trans_kmalloc_nomemzero(trans,
+ bkey_bytes(k.k) + sizeof(struct bch_extent_rebalance));
int ret = PTR_ERR_OR_ZERO(new);
if (ret)
return ret;
+ bkey_reassemble(new, k);
bch2_cut_front(bkey_start_pos(&orig->k), new);
bch2_cut_back(orig->k.p, new);
@@ -1244,6 +1239,8 @@ static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
bkey_for_each_ptr(ptrs, ptr)
ptr->unwritten = 0;
+ bch2_bkey_set_needs_rebalance(op->c, &op->opts, new);
+
/*
* Note that we're not calling bch2_subvol_get_snapshot() in this path -
* that was done when we kicked off the write, and here it's important
@@ -1268,7 +1265,7 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
bkey_start_pos(&orig->k), orig->k.p,
BTREE_ITER_intent, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
- bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
+ bch2_nocow_write_convert_one_unwritten(trans, &iter, op, orig, k, op->new_i_size);
}));
if (ret)
break;
@@ -1477,7 +1474,7 @@ err_bucket_stale:
break;
}
- struct printbuf buf = PRINTBUF;
+ CLASS(printbuf, buf)();
if (bch2_fs_inconsistent_on(stale < 0, c,
"pointer to invalid bucket in nocow path on device %llu\n %s",
stale_at->b.inode,
@@ -1487,7 +1484,6 @@ err_bucket_stale:
/* We can retry this: */
ret = bch_err_throw(c, transaction_restart);
}
- printbuf_exit(&buf);
goto err_get_ioref;
}
@@ -1531,7 +1527,7 @@ again:
* freeing up space on specific disks, which means that
* allocations for specific disks may hang arbitrarily long:
*/
- ret = bch2_trans_run(c, lockrestart_do(trans,
+ ret = bch2_trans_do(c,
bch2_alloc_sectors_start_trans(trans,
op->target,
op->opts.erasure_code && !(op->flags & BCH_WRITE_cached),
@@ -1541,7 +1537,7 @@ again:
op->nr_replicas_required,
op->watermark,
op->flags,
- &op->cl, &wp)));
+ &op->cl, &wp));
if (unlikely(ret)) {
if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
break;