summaryrefslogtreecommitdiff
path: root/fs/bcachefs/journal_reclaim.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bcachefs/journal_reclaim.c')
-rw-r--r--fs/bcachefs/journal_reclaim.c593
1 files changed, 404 insertions, 189 deletions
diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c
index 0e3e5b6abb39..695b2c8ba03b 100644
--- a/fs/bcachefs/journal_reclaim.c
+++ b/fs/bcachefs/journal_reclaim.c
@@ -1,49 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "journal.h"
+#include "journal_io.h"
#include "journal_reclaim.h"
#include "replicas.h"
#include "super.h"
-/*
- * Journal entry pinning - machinery for holding a reference on a given journal
- * entry, holding it open to ensure it gets replayed during recovery:
- */
+/* Free space calculations: */
+
+static unsigned journal_space_from(struct journal_device *ja,
+ enum journal_space_from from)
+{
+ switch (from) {
+ case journal_space_discarded:
+ return ja->discard_idx;
+ case journal_space_clean_ondisk:
+ return ja->dirty_idx_ondisk;
+ case journal_space_clean:
+ return ja->dirty_idx;
+ default:
+ BUG();
+ }
+}
-static inline u64 journal_pin_seq(struct journal *j,
- struct journal_entry_pin_list *pin_list)
+unsigned bch2_journal_dev_buckets_available(struct journal *j,
+ struct journal_device *ja,
+ enum journal_space_from from)
{
- return fifo_entry_idx_abs(&j->pin, pin_list);
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ unsigned available = (journal_space_from(ja, from) -
+ ja->cur_idx - 1 + ja->nr) % ja->nr;
+
+ /*
+ * Allocator startup needs some journal space before we can do journal
+ * replay:
+ */
+ if (available && test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags))
+ --available;
+
+ /*
+ * Don't use the last bucket unless writing the new last_seq
+ * will make another bucket available:
+ */
+ if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
+ --available;
+
+ return available;
+}
+
+static void journal_set_remaining(struct journal *j, unsigned u64s_remaining)
+{
+ union journal_preres_state old, new;
+ u64 v = atomic64_read(&j->prereserved.counter);
+
+ do {
+ old.v = new.v = v;
+ new.remaining = u64s_remaining;
+ } while ((v = atomic64_cmpxchg(&j->prereserved.counter,
+ old.v, new.v)) != old.v);
+}
+
+static struct journal_space {
+ unsigned next_entry;
+ unsigned remaining;
+} __journal_space_available(struct journal *j, unsigned nr_devs_want,
+ enum journal_space_from from)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ unsigned sectors_next_entry = UINT_MAX;
+ unsigned sectors_total = UINT_MAX;
+ unsigned i, nr_devs = 0;
+ unsigned unwritten_sectors = j->reservations.prev_buf_unwritten
+ ? journal_prev_buf(j)->sectors
+ : 0;
+
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i,
+ &c->rw_devs[BCH_DATA_JOURNAL]) {
+ struct journal_device *ja = &ca->journal;
+ unsigned buckets_this_device, sectors_this_device;
+
+ if (!ja->nr)
+ continue;
+
+ buckets_this_device = bch2_journal_dev_buckets_available(j, ja, from);
+ sectors_this_device = ja->sectors_free;
+
+ /*
+ * We that we don't allocate the space for a journal entry
+ * until we write it out - thus, account for it here:
+ */
+ if (unwritten_sectors >= sectors_this_device) {
+ if (!buckets_this_device)
+ continue;
+
+ buckets_this_device--;
+ sectors_this_device = ca->mi.bucket_size;
+ }
+
+ sectors_this_device -= unwritten_sectors;
+
+ if (sectors_this_device < ca->mi.bucket_size &&
+ buckets_this_device) {
+ buckets_this_device--;
+ sectors_this_device = ca->mi.bucket_size;
+ }
+
+ if (!sectors_this_device)
+ continue;
+
+ sectors_next_entry = min(sectors_next_entry,
+ sectors_this_device);
+
+ sectors_total = min(sectors_total,
+ buckets_this_device * ca->mi.bucket_size +
+ sectors_this_device);
+
+ nr_devs++;
+ }
+ rcu_read_unlock();
+
+ if (nr_devs < nr_devs_want)
+ return (struct journal_space) { 0, 0 };
+
+ return (struct journal_space) {
+ .next_entry = sectors_next_entry,
+ .remaining = max_t(int, 0, sectors_total - sectors_next_entry),
+ };
}
-u64 bch2_journal_pin_seq(struct journal *j, struct journal_entry_pin *pin)
+void bch2_journal_space_available(struct journal *j)
{
- u64 ret = 0;
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ struct journal_space discarded, clean_ondisk, clean;
+ unsigned overhead, u64s_remaining = 0;
+ unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
+ j->buf[1].buf_size >> 9);
+ unsigned i, nr_online = 0, nr_devs_want;
+ bool can_discard = false;
+ int ret = 0;
+
+ lockdep_assert_held(&j->lock);
+
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i,
+ &c->rw_devs[BCH_DATA_JOURNAL]) {
+ struct journal_device *ja = &ca->journal;
+
+ if (!ja->nr)
+ continue;
+
+ while (ja->dirty_idx != ja->cur_idx &&
+ ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
+ ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
+
+ while (ja->dirty_idx_ondisk != ja->dirty_idx &&
+ ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
+ ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
+
+ if (ja->discard_idx != ja->dirty_idx_ondisk)
+ can_discard = true;
+
+ max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
+ nr_online++;
+ }
+ rcu_read_unlock();
+
+ j->can_discard = can_discard;
+
+ if (nr_online < c->opts.metadata_replicas_required) {
+ ret = -EROFS;
+ goto out;
+ }
+
+ if (!fifo_free(&j->pin)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
+
+ discarded = __journal_space_available(j, nr_devs_want, journal_space_discarded);
+ clean_ondisk = __journal_space_available(j, nr_devs_want, journal_space_clean_ondisk);
+ clean = __journal_space_available(j, nr_devs_want, journal_space_clean);
+
+ if (!discarded.next_entry)
+ ret = -ENOSPC;
+
+ overhead = DIV_ROUND_UP(clean.remaining, max_entry_size) *
+ journal_entry_overhead(j);
+ u64s_remaining = clean.remaining << 6;
+ u64s_remaining = max_t(int, 0, u64s_remaining - overhead);
+ u64s_remaining /= 4;
+out:
+ j->cur_entry_sectors = !ret ? discarded.next_entry : 0;
+ j->cur_entry_error = ret;
+ journal_set_remaining(j, u64s_remaining);
+ journal_check_may_get_unreserved(j);
+
+ if (!ret)
+ journal_wake(j);
+}
+
+/* Discards - last part of journal reclaim: */
+
+static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
+{
+ bool ret;
spin_lock(&j->lock);
- if (journal_pin_active(pin))
- ret = journal_pin_seq(j, pin->pin_list);
+ ret = ja->discard_idx != ja->dirty_idx_ondisk;
spin_unlock(&j->lock);
return ret;
}
+/*
+ * Advance ja->discard_idx as long as it points to buckets that are no longer
+ * dirty, issuing discards if necessary:
+ */
+void bch2_journal_do_discards(struct journal *j)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ unsigned iter;
+
+ mutex_lock(&j->discard_lock);
+
+ for_each_rw_member(ca, c, iter) {
+ struct journal_device *ja = &ca->journal;
+
+ while (should_discard_bucket(j, ja)) {
+ if (ca->mi.discard &&
+ blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
+ blkdev_issue_discard(ca->disk_sb.bdev,
+ bucket_to_sector(ca,
+ ja->buckets[ja->discard_idx]),
+ ca->mi.bucket_size, GFP_NOIO, 0);
+
+ spin_lock(&j->lock);
+ ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
+
+ bch2_journal_space_available(j);
+ spin_unlock(&j->lock);
+ }
+ }
+
+ mutex_unlock(&j->discard_lock);
+}
+
+/*
+ * Journal entry pinning - machinery for holding a reference on a given journal
+ * entry, holding it open to ensure it gets replayed during recovery:
+ */
+
+static void bch2_journal_reclaim_fast(struct journal *j)
+{
+ struct journal_entry_pin_list temp;
+ bool popped = false;
+
+ lockdep_assert_held(&j->lock);
+
+ /*
+ * Unpin journal entries whose reference counts reached zero, meaning
+ * all btree nodes got written out
+ */
+ while (!fifo_empty(&j->pin) &&
+ !atomic_read(&fifo_peek_front(&j->pin).count)) {
+ BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
+ BUG_ON(!fifo_pop(&j->pin, temp));
+ popped = true;
+ }
+
+ if (popped)
+ bch2_journal_space_available(j);
+}
+
+void bch2_journal_pin_put(struct journal *j, u64 seq)
+{
+ struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
+
+ if (atomic_dec_and_test(&pin_list->count)) {
+ spin_lock(&j->lock);
+ bch2_journal_reclaim_fast(j);
+ spin_unlock(&j->lock);
+ }
+}
+
static inline void __journal_pin_add(struct journal *j,
- struct journal_entry_pin_list *pin_list,
+ u64 seq,
struct journal_entry_pin *pin,
journal_pin_flush_fn flush_fn)
{
+ struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
+
BUG_ON(journal_pin_active(pin));
BUG_ON(!atomic_read(&pin_list->count));
atomic_inc(&pin_list->count);
- pin->pin_list = pin_list;
+ pin->seq = seq;
pin->flush = flush_fn;
- if (flush_fn)
- list_add(&pin->list, &pin_list->list);
- else
- INIT_LIST_HEAD(&pin->list);
+ list_add(&pin->list, flush_fn ? &pin_list->list : &pin_list->flushed);
/*
* If the journal is currently full, we might want to call flush_fn
@@ -57,19 +318,20 @@ void bch2_journal_pin_add(struct journal *j, u64 seq,
journal_pin_flush_fn flush_fn)
{
spin_lock(&j->lock);
- __journal_pin_add(j, journal_seq_pin(j, seq), pin, flush_fn);
+ __journal_pin_add(j, seq, pin, flush_fn);
spin_unlock(&j->lock);
}
static inline void __journal_pin_drop(struct journal *j,
struct journal_entry_pin *pin)
{
- struct journal_entry_pin_list *pin_list = pin->pin_list;
+ struct journal_entry_pin_list *pin_list;
if (!journal_pin_active(pin))
return;
- pin->pin_list = NULL;
+ pin_list = journal_seq_pin(j, pin->seq);
+ pin->seq = 0;
list_del_init(&pin->list);
/*
@@ -79,16 +341,38 @@ static inline void __journal_pin_drop(struct journal *j,
if (atomic_dec_and_test(&pin_list->count) &&
pin_list == &fifo_peek_front(&j->pin))
bch2_journal_reclaim_fast(j);
+ else if (fifo_used(&j->pin) == 1 &&
+ atomic_read(&pin_list->count) == 1)
+ journal_wake(j);
}
void bch2_journal_pin_drop(struct journal *j,
- struct journal_entry_pin *pin)
+ struct journal_entry_pin *pin)
{
spin_lock(&j->lock);
__journal_pin_drop(j, pin);
spin_unlock(&j->lock);
}
+void bch2_journal_pin_update(struct journal *j, u64 seq,
+ struct journal_entry_pin *pin,
+ journal_pin_flush_fn flush_fn)
+{
+ spin_lock(&j->lock);
+
+ if (pin->seq != seq) {
+ __journal_pin_drop(j, pin);
+ __journal_pin_add(j, seq, pin, flush_fn);
+ } else {
+ struct journal_entry_pin_list *pin_list =
+ journal_seq_pin(j, seq);
+
+ list_move(&pin->list, &pin_list->list);
+ }
+
+ spin_unlock(&j->lock);
+}
+
void bch2_journal_pin_add_if_older(struct journal *j,
struct journal_entry_pin *src_pin,
struct journal_entry_pin *pin,
@@ -98,15 +382,21 @@ void bch2_journal_pin_add_if_older(struct journal *j,
if (journal_pin_active(src_pin) &&
(!journal_pin_active(pin) ||
- journal_pin_seq(j, src_pin->pin_list) <
- journal_pin_seq(j, pin->pin_list))) {
+ src_pin->seq < pin->seq)) {
__journal_pin_drop(j, pin);
- __journal_pin_add(j, src_pin->pin_list, pin, flush_fn);
+ __journal_pin_add(j, src_pin->seq, pin, flush_fn);
}
spin_unlock(&j->lock);
}
+void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
+{
+ BUG_ON(journal_pin_active(pin));
+
+ wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
+}
+
/*
* Journal reclaim: flush references to open journal entries to reclaim space in
* the journal
@@ -116,88 +406,55 @@ void bch2_journal_pin_add_if_older(struct journal *j,
* data off of a specific device:
*/
-/**
- * bch2_journal_reclaim_fast - do the fast part of journal reclaim
- *
- * Called from IO submission context, does not block. Cleans up after btree
- * write completions by advancing the journal pin and each cache's last_idx,
- * kicking off discards and background reclaim as necessary.
- */
-void bch2_journal_reclaim_fast(struct journal *j)
-{
- struct journal_entry_pin_list temp;
- bool popped = false;
-
- lockdep_assert_held(&j->lock);
-
- /*
- * Unpin journal entries whose reference counts reached zero, meaning
- * all btree nodes got written out
- */
- while (!atomic_read(&fifo_peek_front(&j->pin).count)) {
- BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
- BUG_ON(!fifo_pop(&j->pin, temp));
- popped = true;
- }
-
- if (popped)
- journal_wake(j);
-}
-
static struct journal_entry_pin *
-__journal_get_next_pin(struct journal *j, u64 seq_to_flush, u64 *seq)
+journal_get_next_pin(struct journal *j, u64 max_seq, u64 *seq)
{
struct journal_entry_pin_list *pin_list;
- struct journal_entry_pin *ret;
- u64 iter;
+ struct journal_entry_pin *ret = NULL;
- /* no need to iterate over empty fifo entries: */
- bch2_journal_reclaim_fast(j);
+ spin_lock(&j->lock);
- fifo_for_each_entry_ptr(pin_list, &j->pin, iter) {
- if (iter > seq_to_flush)
+ fifo_for_each_entry_ptr(pin_list, &j->pin, *seq)
+ if (*seq > max_seq ||
+ (ret = list_first_entry_or_null(&pin_list->list,
+ struct journal_entry_pin, list)))
break;
- ret = list_first_entry_or_null(&pin_list->list,
- struct journal_entry_pin, list);
- if (ret) {
- /* must be list_del_init(), see bch2_journal_pin_drop() */
- list_move(&ret->list, &pin_list->flushed);
- *seq = iter;
- return ret;
- }
+ if (ret) {
+ list_move(&ret->list, &pin_list->flushed);
+ BUG_ON(j->flush_in_progress);
+ j->flush_in_progress = ret;
+ j->last_flushed = jiffies;
}
- return NULL;
-}
-
-static struct journal_entry_pin *
-journal_get_next_pin(struct journal *j, u64 seq_to_flush, u64 *seq)
-{
- struct journal_entry_pin *ret;
-
- spin_lock(&j->lock);
- ret = __journal_get_next_pin(j, seq_to_flush, seq);
spin_unlock(&j->lock);
return ret;
}
-static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
+static void journal_flush_pins(struct journal *j, u64 seq_to_flush,
+ unsigned min_nr)
{
- bool ret;
+ struct journal_entry_pin *pin;
+ u64 seq;
- spin_lock(&j->lock);
- ret = ja->nr &&
- (ja->last_idx != ja->cur_idx &&
- ja->bucket_seq[ja->last_idx] < j->last_seq_ondisk);
- spin_unlock(&j->lock);
+ lockdep_assert_held(&j->reclaim_lock);
- return ret;
+ while ((pin = journal_get_next_pin(j, min_nr
+ ? U64_MAX : seq_to_flush, &seq))) {
+ if (min_nr)
+ min_nr--;
+
+ pin->flush(j, pin, seq);
+
+ BUG_ON(j->flush_in_progress != pin);
+ j->flush_in_progress = NULL;
+ wake_up(&j->pin_flush_wait);
+ }
}
/**
- * bch2_journal_reclaim_work - free up journal buckets
+ * bch2_journal_reclaim - free up journal buckets
*
* Background journal reclaim writes out btree nodes. It should be run
* early enough so that we never completely run out of journal buckets.
@@ -214,75 +471,42 @@ static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
* 512 journal entries or 25% of all journal buckets, then
* journal_next_bucket() should not stall.
*/
-void bch2_journal_reclaim_work(struct work_struct *work)
+void bch2_journal_reclaim(struct journal *j)
{
- struct bch_fs *c = container_of(to_delayed_work(work),
- struct bch_fs, journal.reclaim_work);
- struct journal *j = &c->journal;
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca;
- struct journal_entry_pin *pin;
- u64 seq, seq_to_flush = 0;
- unsigned iter, bucket_to_flush;
- unsigned long next_flush;
- bool reclaim_lock_held = false, need_flush;
+ unsigned iter, min_nr = 0;
+ u64 seq_to_flush = 0;
+
+ lockdep_assert_held(&j->reclaim_lock);
+
+ bch2_journal_do_discards(j);
+
+ spin_lock(&j->lock);
- /*
- * Advance last_idx to point to the oldest journal entry containing
- * btree node updates that have not yet been written out
- */
for_each_rw_member(ca, c, iter) {
struct journal_device *ja = &ca->journal;
+ unsigned nr_buckets, bucket_to_flush;
if (!ja->nr)
continue;
- while (should_discard_bucket(j, ja)) {
- if (!reclaim_lock_held) {
- /*
- * ugh:
- * might be called from __journal_res_get()
- * under wait_event() - have to go back to
- * TASK_RUNNING before doing something that
- * would block, but only if we're doing work:
- */
- __set_current_state(TASK_RUNNING);
-
- mutex_lock(&j->reclaim_lock);
- reclaim_lock_held = true;
- /* recheck under reclaim_lock: */
- continue;
- }
+ /* Try to keep the journal at most half full: */
+ nr_buckets = ja->nr / 2;
- if (ca->mi.discard &&
- blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
- blkdev_issue_discard(ca->disk_sb.bdev,
- bucket_to_sector(ca,
- ja->buckets[ja->last_idx]),
- ca->mi.bucket_size, GFP_NOIO, 0);
+ /* And include pre-reservations: */
+ nr_buckets += DIV_ROUND_UP(j->prereserved.reserved,
+ (ca->mi.bucket_size << 6) -
+ journal_entry_overhead(j));
- spin_lock(&j->lock);
- ja->last_idx = (ja->last_idx + 1) % ja->nr;
- spin_unlock(&j->lock);
-
- journal_wake(j);
- }
+ nr_buckets = min(nr_buckets, ja->nr);
- /*
- * Write out enough btree nodes to free up 50% journal
- * buckets
- */
- spin_lock(&j->lock);
- bucket_to_flush = (ja->cur_idx + (ja->nr >> 1)) % ja->nr;
- seq_to_flush = max_t(u64, seq_to_flush,
- ja->bucket_seq[bucket_to_flush]);
- spin_unlock(&j->lock);
+ bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
+ seq_to_flush = max(seq_to_flush,
+ ja->bucket_seq[bucket_to_flush]);
}
- if (reclaim_lock_held)
- mutex_unlock(&j->reclaim_lock);
-
/* Also flush if the pin fifo is more than half full */
- spin_lock(&j->lock);
seq_to_flush = max_t(s64, seq_to_flush,
(s64) journal_cur_seq(j) -
(j->pin.size >> 1));
@@ -292,86 +516,72 @@ void bch2_journal_reclaim_work(struct work_struct *work)
* If it's been longer than j->reclaim_delay_ms since we last flushed,
* make sure to flush at least one journal pin:
*/
- next_flush = j->last_flushed + msecs_to_jiffies(j->reclaim_delay_ms);
- need_flush = time_after(jiffies, next_flush);
-
- while ((pin = journal_get_next_pin(j, need_flush
- ? U64_MAX
- : seq_to_flush, &seq))) {
- __set_current_state(TASK_RUNNING);
- pin->flush(j, pin, seq);
- need_flush = false;
+ if (time_after(jiffies, j->last_flushed +
+ msecs_to_jiffies(j->reclaim_delay_ms)))
+ min_nr = 1;
- j->last_flushed = jiffies;
+ if (j->prereserved.reserved * 2 > j->prereserved.remaining) {
+ seq_to_flush = max(seq_to_flush, journal_last_seq(j));
+ min_nr = 1;
}
- if (!test_bit(BCH_FS_RO, &c->flags))
- queue_delayed_work(system_freezable_wq, &j->reclaim_work,
+ journal_flush_pins(j, seq_to_flush, min_nr);
+
+ if (!bch2_journal_error(j))
+ queue_delayed_work(c->journal_reclaim_wq, &j->reclaim_work,
msecs_to_jiffies(j->reclaim_delay_ms));
}
-static int journal_flush_done(struct journal *j, u64 seq_to_flush,
- struct journal_entry_pin **pin,
- u64 *pin_seq)
+void bch2_journal_reclaim_work(struct work_struct *work)
{
- int ret;
+ struct journal *j = container_of(to_delayed_work(work),
+ struct journal, reclaim_work);
- *pin = NULL;
+ mutex_lock(&j->reclaim_lock);
+ bch2_journal_reclaim(j);
+ mutex_unlock(&j->reclaim_lock);
+}
+
+static int journal_flush_done(struct journal *j, u64 seq_to_flush)
+{
+ int ret;
ret = bch2_journal_error(j);
if (ret)
return ret;
+ mutex_lock(&j->reclaim_lock);
+
+ journal_flush_pins(j, seq_to_flush, 0);
+
spin_lock(&j->lock);
/*
* If journal replay hasn't completed, the unreplayed journal entries
* hold refs on their corresponding sequence numbers
*/
- ret = (*pin = __journal_get_next_pin(j, seq_to_flush, pin_seq)) != NULL ||
- !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
+ ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
journal_last_seq(j) > seq_to_flush ||
(fifo_used(&j->pin) == 1 &&
atomic_read(&fifo_peek_front(&j->pin).count) == 1);
+
spin_unlock(&j->lock);
+ mutex_unlock(&j->reclaim_lock);
return ret;
}
-int bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
+void bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct journal_entry_pin *pin;
- u64 pin_seq;
- bool flush;
-
if (!test_bit(JOURNAL_STARTED, &j->flags))
- return 0;
-again:
- wait_event(j->wait, journal_flush_done(j, seq_to_flush, &pin, &pin_seq));
- if (pin) {
- /* flushing a journal pin might cause a new one to be added: */
- pin->flush(j, pin, pin_seq);
- goto again;
- }
-
- spin_lock(&j->lock);
- flush = journal_last_seq(j) != j->last_seq_ondisk ||
- (seq_to_flush == U64_MAX && c->btree_roots_dirty);
- spin_unlock(&j->lock);
-
- return flush ? bch2_journal_meta(j) : 0;
-}
+ return;
-int bch2_journal_flush_all_pins(struct journal *j)
-{
- return bch2_journal_flush_pins(j, U64_MAX);
+ closure_wait_event(&j->async_wait, journal_flush_done(j, seq_to_flush));
}
int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct journal_entry_pin_list *p;
- struct bch_devs_list devs;
u64 iter, seq = 0;
int ret = 0;
@@ -383,7 +593,9 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
seq = iter;
spin_unlock(&j->lock);
- ret = bch2_journal_flush_pins(j, seq);
+ bch2_journal_flush_pins(j, seq);
+
+ ret = bch2_journal_error(j);
if (ret)
return ret;
@@ -394,17 +606,20 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
spin_lock(&j->lock);
while (!ret && seq < j->pin.back) {
+ struct bch_replicas_padded replicas;
+
seq = max(seq, journal_last_seq(j));
- devs = journal_seq_pin(j, seq)->devs;
+ bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL,
+ journal_seq_pin(j, seq)->devs);
seq++;
spin_unlock(&j->lock);
- ret = bch2_mark_replicas(c, BCH_DATA_JOURNAL, devs);
+ ret = bch2_mark_replicas(c, &replicas.e);
spin_lock(&j->lock);
}
spin_unlock(&j->lock);
- bch2_replicas_gc_end(c, ret);
+ ret = bch2_replicas_gc_end(c, ret);
mutex_unlock(&c->replicas_gc_lock);
return ret;