summaryrefslogtreecommitdiff
path: root/fs/bcachefs/journal.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-03-31 21:44:55 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2022-04-17 15:42:53 -0400
commit31b1559e2e2a44c3f35257c6c5fdd5205ffac6c9 (patch)
treec11d4deaa9d66f057f88f2f921d70e25e15f1b74 /fs/bcachefs/journal.c
parent2fd076d1798eff1cb6e5c2a84443f264781ada83 (diff)
bcachefs: Don't flush btree writes more aggressively because of btree key cache
We need to flush the btree key cache when it's too dirty, because otherwise the shrinker won't be able to reclaim memory - this is done by journal reclaim. But journal reclaim also kicks btree node writes: this meant that btree node writes were getting kicked much too often just because we needed to flush btree key cache keys. This patch splits journal pins into two different lists, and teaches journal reclaim to not flush btree node writes when it only needs to flush key cache keys. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs/bcachefs/journal.c')
-rw-r--r--fs/bcachefs/journal.c30
1 files changed, 14 insertions, 16 deletions
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index da3c598fab69..14fa3be5626a 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -60,21 +60,23 @@ journal_seq_to_buf(struct journal *j, u64 seq)
return buf;
}
-static void journal_pin_new_entry(struct journal *j, int count)
+static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
{
- struct journal_entry_pin_list *p;
+ INIT_LIST_HEAD(&p->list);
+ INIT_LIST_HEAD(&p->key_cache_list);
+ INIT_LIST_HEAD(&p->flushed);
+ atomic_set(&p->count, count);
+ p->devs.nr = 0;
+}
+static void journal_pin_new_entry(struct journal *j)
+{
/*
* The fifo_push() needs to happen at the same time as j->seq is
* incremented for journal_last_seq() to be calculated correctly
*/
atomic64_inc(&j->seq);
- p = fifo_push_ref(&j->pin);
-
- INIT_LIST_HEAD(&p->list);
- INIT_LIST_HEAD(&p->flushed);
- atomic_set(&p->count, count);
- p->devs.nr = 0;
+ journal_pin_list_init(fifo_push_ref(&j->pin), 1);
}
static void bch2_journal_buf_init(struct journal *j)
@@ -193,7 +195,7 @@ static bool __journal_entry_close(struct journal *j)
__bch2_journal_pin_put(j, le64_to_cpu(buf->data->seq));
/* Initialize new buffer: */
- journal_pin_new_entry(j, 1);
+ journal_pin_new_entry(j);
bch2_journal_buf_init(j);
@@ -1031,12 +1033,8 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
j->pin.back = cur_seq;
atomic64_set(&j->seq, cur_seq - 1);
- fifo_for_each_entry_ptr(p, &j->pin, seq) {
- INIT_LIST_HEAD(&p->list);
- INIT_LIST_HEAD(&p->flushed);
- atomic_set(&p->count, 1);
- p->devs.nr = 0;
- }
+ fifo_for_each_entry_ptr(p, &j->pin, seq)
+ journal_pin_list_init(p, 1);
list_for_each_entry(i, journal_entries, list) {
unsigned ptr;
@@ -1059,7 +1057,7 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
set_bit(JOURNAL_STARTED, &j->flags);
j->last_flush_write = jiffies;
- journal_pin_new_entry(j, 1);
+ journal_pin_new_entry(j);
j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);