summaryrefslogtreecommitdiff
path: root/libbcachefs/alloc_background.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-01-18 23:38:05 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2021-01-25 19:05:19 -0500
commit9fce394ca6d0082ced3612a627cd16e06d84244a (patch)
tree409ffa637938cdfd0e0f2e12391cdd640a575278 /libbcachefs/alloc_background.c
parent4a4a7e01d720eb41ba5572355b379368dde47f72 (diff)
Update bcachefs sources to 313b24b652 bcachefs: Fix an assertion
Diffstat (limited to 'libbcachefs/alloc_background.c')
-rw-r--r--libbcachefs/alloc_background.c115
1 files changed, 52 insertions, 63 deletions
diff --git a/libbcachefs/alloc_background.c b/libbcachefs/alloc_background.c
index 60c2c38b..896ec023 100644
--- a/libbcachefs/alloc_background.c
+++ b/libbcachefs/alloc_background.c
@@ -54,10 +54,10 @@ static void pd_controllers_update(struct work_struct *work)
* reclaimed by copy GC
*/
fragmented += max_t(s64, 0, (bucket_to_sector(ca,
- stats.buckets[BCH_DATA_user] +
- stats.buckets[BCH_DATA_cached]) -
- (stats.sectors[BCH_DATA_user] +
- stats.sectors[BCH_DATA_cached])) << 9);
+ stats.d[BCH_DATA_user].buckets +
+ stats.d[BCH_DATA_cached].buckets) -
+ (stats.d[BCH_DATA_user].sectors +
+ stats.d[BCH_DATA_cached].sectors)) << 9);
}
bch2_pd_controller_update(&c->copygc_pd, free, fragmented, -1);
@@ -217,7 +217,7 @@ static int bch2_alloc_read_fn(struct bch_fs *c, enum btree_id id,
return 0;
ca = bch_dev_bkey_exists(c, k.k->p.inode);
- g = __bucket(ca, k.k->p.offset, 0);
+ g = bucket(ca, k.k->p.offset);
u = bch2_alloc_unpack(k);
g->_mark.gen = u.gen;
@@ -278,7 +278,6 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct bkey_s_c k;
struct bch_dev *ca;
- struct bucket_array *ba;
struct bucket *g;
struct bucket_mark m;
struct bkey_alloc_unpacked old_u, new_u;
@@ -302,9 +301,7 @@ retry:
percpu_down_read(&c->mark_lock);
ca = bch_dev_bkey_exists(c, iter->pos.inode);
- ba = bucket_array(ca);
-
- g = &ba->b[iter->pos.offset];
+ g = bucket(ca, iter->pos.offset);
m = READ_ONCE(g->mark);
new_u = alloc_mem_to_key(g, m);
percpu_up_read(&c->mark_lock);
@@ -326,54 +323,36 @@ err:
return ret;
}
-int bch2_dev_alloc_write(struct bch_fs *c, struct bch_dev *ca, unsigned flags)
+int bch2_alloc_write(struct bch_fs *c, unsigned flags)
{
struct btree_trans trans;
struct btree_iter *iter;
- u64 first_bucket, nbuckets;
+ struct bch_dev *ca;
+ unsigned i;
int ret = 0;
- percpu_down_read(&c->mark_lock);
- first_bucket = bucket_array(ca)->first_bucket;
- nbuckets = bucket_array(ca)->nbuckets;
- percpu_up_read(&c->mark_lock);
-
- BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
-
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC,
- POS(ca->dev_idx, first_bucket),
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- while (iter->pos.offset < nbuckets) {
- bch2_trans_cond_resched(&trans);
-
- ret = bch2_alloc_write_key(&trans, iter, flags);
- if (ret)
- break;
- bch2_btree_iter_next_slot(iter);
- }
-
- bch2_trans_exit(&trans);
-
- return ret;
-}
+ for_each_member_device(ca, c, i) {
+ bch2_btree_iter_set_pos(iter,
+ POS(ca->dev_idx, ca->mi.first_bucket));
-int bch2_alloc_write(struct bch_fs *c, unsigned flags)
-{
- struct bch_dev *ca;
- unsigned i;
- int ret = 0;
+ while (iter->pos.offset < ca->mi.nbuckets) {
+ bch2_trans_cond_resched(&trans);
- for_each_member_device(ca, c, i) {
- bch2_dev_alloc_write(c, ca, flags);
- if (ret) {
- percpu_ref_put(&ca->io_ref);
- break;
+ ret = bch2_alloc_write_key(&trans, iter, flags);
+ if (ret) {
+ percpu_ref_put(&ca->io_ref);
+ goto err;
+ }
+ bch2_btree_iter_next_slot(iter);
}
}
-
+err:
+ bch2_trans_exit(&trans);
return ret;
}
@@ -552,7 +531,8 @@ out:
static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
{
unsigned long gc_count = c->gc_count;
- u64 available;
+ s64 available;
+ unsigned i;
int ret = 0;
ca->allocator_state = ALLOCATOR_BLOCKED;
@@ -568,8 +548,15 @@ static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
if (gc_count != c->gc_count)
ca->inc_gen_really_needs_gc = 0;
- available = max_t(s64, 0, dev_buckets_available(ca) -
- ca->inc_gen_really_needs_gc);
+ available = dev_buckets_available(ca);
+ available -= ca->inc_gen_really_needs_gc;
+
+ spin_lock(&c->freelist_lock);
+ for (i = 0; i < RESERVE_NR; i++)
+ available -= fifo_used(&ca->free[i]);
+ spin_unlock(&c->freelist_lock);
+
+ available = max(available, 0LL);
if (available > fifo_free(&ca->free_inc) ||
(available &&
@@ -598,6 +585,9 @@ static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
if (!is_available_bucket(mark))
return false;
+ if (mark.owned_by_allocator)
+ return false;
+
if (ca->buckets_nouse &&
test_bit(bucket, ca->buckets_nouse))
return false;
@@ -894,34 +884,33 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
/* first, put on free_inc and mark as owned by allocator: */
percpu_down_read(&c->mark_lock);
- spin_lock(&c->freelist_lock);
-
- verify_not_on_freelist(c, ca, b);
-
- BUG_ON(!fifo_push(&ca->free_inc, b));
-
g = bucket(ca, b);
m = READ_ONCE(g->mark);
- invalidating_cached_data = m.cached_sectors != 0;
+ BUG_ON(m.dirty_sectors);
+
+ bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
+
+ spin_lock(&c->freelist_lock);
+ verify_not_on_freelist(c, ca, b);
+ BUG_ON(!fifo_push(&ca->free_inc, b));
+ spin_unlock(&c->freelist_lock);
/*
* If we're not invalidating cached data, we only increment the bucket
* gen in memory here, the incremented gen will be updated in the btree
* by bch2_trans_mark_pointer():
*/
+ if (!m.cached_sectors &&
+ !bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
+ BUG_ON(m.data_type);
+ bucket_cmpxchg(g, m, m.gen++);
+ percpu_up_read(&c->mark_lock);
+ goto out;
+ }
- if (!invalidating_cached_data)
- bch2_invalidate_bucket(c, ca, b, &m);
- else
- bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
-
- spin_unlock(&c->freelist_lock);
percpu_up_read(&c->mark_lock);
- if (!invalidating_cached_data)
- goto out;
-
/*
* If the read-only path is trying to shut down, we can't be generating
* new btree updates: