summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-09-12 02:22:47 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2022-10-03 23:54:05 -0400
commit218d9eef5cecfb600720cbcc8beebc76a10b8210 (patch)
treeb7934cee15fc0029c3a11a7c007c99d9cd49f33a
parentf97a1938a3a1b25241489c0a4a751d1a8e033261 (diff)
bcachefs: Kill journal_keys->journal_seq_base
This removes an optimization that didn't actually save us any memory, due to alignment, but did make the code more complicated than it needed to be. We were also seeing a bug where journal_seq_base wasn't getting correctly initailized, so hopefully it'll fix that too. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--fs/bcachefs/bcachefs.h5
-rw-r--r--fs/bcachefs/recovery.c14
2 files changed, 4 insertions, 15 deletions
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 53e7b5a0bea9..fc451e46300f 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -557,13 +557,13 @@ struct journal_seq_blacklist_table {
struct journal_keys {
struct journal_key {
+ u64 journal_seq;
+ u32 journal_offset;
enum btree_id btree_id:8;
unsigned level:8;
bool allocated;
bool overwritten;
struct bkey_i *k;
- u32 journal_seq;
- u32 journal_offset;
} *d;
/*
* Gap buffer: instead of all the empty space in the array being at the
@@ -573,7 +573,6 @@ struct journal_keys {
size_t gap;
size_t nr;
size_t size;
- u64 journal_seq_base;
};
struct btree_path_buf {
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index b070bdf01500..18f6ec5cc7d0 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -223,7 +223,6 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
struct journal_keys new_keys = {
.nr = keys->nr,
.size = max_t(size_t, keys->size, 8) * 2,
- .journal_seq_base = keys->journal_seq_base,
};
new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
@@ -494,9 +493,6 @@ static int journal_keys_sort(struct bch_fs *c)
if (!i || i->ignore)
continue;
- if (!keys->journal_seq_base)
- keys->journal_seq_base = le64_to_cpu(i->j.seq);
-
for_each_jset_key(k, _n, entry, &i->j)
nr_keys++;
}
@@ -516,15 +512,12 @@ static int journal_keys_sort(struct bch_fs *c)
if (!i || i->ignore)
continue;
- BUG_ON(le64_to_cpu(i->j.seq) - keys->journal_seq_base > U32_MAX);
-
for_each_jset_key(k, _n, entry, &i->j)
keys->d[keys->nr++] = (struct journal_key) {
.btree_id = entry->btree_id,
.level = entry->level,
.k = k,
- .journal_seq = le64_to_cpu(i->j.seq) -
- keys->journal_seq_base,
+ .journal_seq = le64_to_cpu(i->j.seq),
.journal_offset = k->_data - i->j._data,
};
}
@@ -618,15 +611,12 @@ static int bch2_journal_replay(struct bch_fs *c)
sizeof(keys_sorted[0]),
journal_sort_seq_cmp, NULL);
- if (keys->nr)
- replay_now_at(j, keys->journal_seq_base);
-
for (i = 0; i < keys->nr; i++) {
k = keys_sorted[i];
cond_resched();
- replay_now_at(j, keys->journal_seq_base + k->journal_seq);
+ replay_now_at(j, k->journal_seq);
ret = bch2_trans_do(c, NULL, NULL,
BTREE_INSERT_LAZY_RW|