diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2018-11-26 00:13:33 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2018-12-27 11:38:35 -0500 |
commit | f0ba7fad504c4129b855ebf164334729a1693120 (patch) | |
tree | 14665c428922a735240d24ecb32c150a7f6a49b3 | |
parent | 8f519f2751e8d075f100baa9e545a7dbaeb1af86 (diff) |
bcachefs: s/usage_lock/mark_lock
better describes what it's for, and we're going to call a new lock
usage_lock
-rw-r--r-- | fs/bcachefs/alloc_background.c | 16 | ||||
-rw-r--r-- | fs/bcachefs/alloc_foreground.c | 14 | ||||
-rw-r--r-- | fs/bcachefs/bcachefs.h | 4 | ||||
-rw-r--r-- | fs/bcachefs/btree_gc.c | 16 | ||||
-rw-r--r-- | fs/bcachefs/btree_update_interior.c | 12 | ||||
-rw-r--r-- | fs/bcachefs/buckets.c | 40 | ||||
-rw-r--r-- | fs/bcachefs/buckets.h | 2 | ||||
-rw-r--r-- | fs/bcachefs/io.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/journal.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/super.c | 4 |
10 files changed, 58 insertions, 58 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index ff8623e4c2c7..955caa217f69 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -217,9 +217,9 @@ static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k) if (a.k->p.offset >= ca->mi.nbuckets) return; - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); __alloc_read_key(bucket(ca, a.k->p.offset), a.v); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); } int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list) @@ -280,12 +280,12 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca, a->k.p = POS(ca->dev_idx, b); - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); g = bucket(ca, b); m = bucket_cmpxchg(g, m, m.dirty = false); __alloc_write_key(a, g, m); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); bch2_btree_iter_cond_resched(iter); @@ -796,7 +796,7 @@ static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca, { struct bucket_mark m; - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); spin_lock(&c->freelist_lock); bch2_invalidate_bucket(c, ca, bucket, &m); @@ -809,7 +809,7 @@ static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca, bucket_io_clock_reset(c, ca, bucket, READ); bucket_io_clock_reset(c, ca, bucket, WRITE); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); if (m.journal_seq_valid) { u64 journal_seq = atomic64_read(&c->journal.seq); @@ -1337,7 +1337,7 @@ static int __bch2_fs_allocator_start(struct bch_fs *c) struct bucket_mark m; down_read(&ca->bucket_lock); - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); buckets = bucket_array(ca); @@ -1361,7 +1361,7 @@ static int __bch2_fs_allocator_start(struct bch_fs *c) if (fifo_full(&ca->free[RESERVE_BTREE])) break; } - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); up_read(&ca->bucket_lock); } diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index 5024e560079b..ecd07dfdd57f 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -100,7 +100,7 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) return; } - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); spin_lock(&ob->lock); bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), @@ -108,7 +108,7 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) ob->valid = false; spin_unlock(&ob->lock); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); spin_lock(&c->freelist_lock); ob->freelist = c->open_buckets_freelist; @@ -440,7 +440,7 @@ static int ec_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) open_bucket_for_each(c, &h->blocks, ob, i) __clear_bit(ob->ptr.dev, devs.d); - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); rcu_read_lock(); if (h->parity.nr < h->redundancy) { @@ -476,12 +476,12 @@ static int ec_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) } rcu_read_unlock(); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); return bch2_ec_stripe_new_alloc(c, h); err: rcu_read_unlock(); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); return -1; } @@ -637,7 +637,7 @@ static int open_bucket_add_buckets(struct bch_fs *c, if (*nr_effective >= nr_replicas) return 0; - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); rcu_read_lock(); retry_blocking: @@ -654,7 +654,7 @@ retry_blocking: } rcu_read_unlock(); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); return ret; } diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index 7eb89e008331..4a7a58f0eeeb 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -387,7 +387,7 @@ struct bch_dev { /* * Buckets: - * Per-bucket arrays are protected by c->usage_lock, bucket_lock and + * Per-bucket arrays are protected by c->mark_lock, bucket_lock and * gc_lock, for device resize - holding any is sufficient for access: * Or rcu_read_lock(), but only for ptr_stale(): */ @@ -614,7 +614,7 @@ struct bch_fs { struct bch_fs_usage __percpu *usage[2]; - struct percpu_rw_semaphore usage_lock; + struct percpu_rw_semaphore mark_lock; /* * When we invalidate buckets, we use both the priority and the amount diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 361e60e490f1..3d2ff1d80fa8 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -348,7 +348,7 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca, */ if (c) { lockdep_assert_held(&c->sb_lock); - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); } else { preempt_disable(); } @@ -373,7 +373,7 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca, } if (c) { - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); } else { preempt_enable(); } @@ -419,7 +419,7 @@ static void bch2_mark_allocator_buckets(struct bch_fs *c) size_t i, j, iter; unsigned ci; - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); spin_lock(&c->freelist_lock); gc_pos_set(c, gc_pos_alloc(c, NULL)); @@ -455,7 +455,7 @@ static void bch2_mark_allocator_buckets(struct bch_fs *c) spin_unlock(&ob->lock); } - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); } static void bch2_gc_free(struct bch_fs *c) @@ -575,7 +575,7 @@ static void bch2_gc_done(struct bch_fs *c, bool initial) #define copy_fs_field(_f, _msg, ...) \ copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__) - percpu_down_write(&c->usage_lock); + percpu_down_write(&c->mark_lock); if (initial) { bch2_gc_done_nocheck(c); @@ -695,7 +695,7 @@ static void bch2_gc_done(struct bch_fs *c, bool initial) preempt_enable(); } out: - percpu_up_write(&c->usage_lock); + percpu_up_write(&c->mark_lock); #undef copy_fs_field #undef copy_dev_field @@ -740,7 +740,7 @@ static int bch2_gc_start(struct bch_fs *c) } } - percpu_down_write(&c->usage_lock); + percpu_down_write(&c->mark_lock); for_each_member_device(ca, c, i) { struct bucket_array *dst = __bucket_array(ca, 1); @@ -754,7 +754,7 @@ static int bch2_gc_start(struct bch_fs *c) dst->b[b]._mark.gen = src->b[b].mark.gen; }; - percpu_up_write(&c->usage_lock); + percpu_up_write(&c->mark_lock); return bch2_ec_mem_alloc(c, true); } diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 6fc4b9b5b575..e18655e444ce 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -1061,7 +1061,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b) __bch2_btree_set_root_inmem(c, b); mutex_lock(&c->btree_interior_update_lock); - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key), true, 0, @@ -1075,7 +1075,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b) bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res, gc_pos_btree_root(b->btree_id)); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); mutex_unlock(&c->btree_interior_update_lock); } @@ -1154,7 +1154,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, b)); mutex_lock(&c->btree_interior_update_lock); - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); bch2_mark_key_locked(c, bkey_i_to_s_c(insert), true, 0, @@ -1176,7 +1176,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res, gc_pos_btree_node(b)); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); mutex_unlock(&c->btree_interior_update_lock); bch2_btree_bset_insert_key(iter, b, node_iter, insert); @@ -1964,7 +1964,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c, bch2_btree_node_lock_write(b, iter); mutex_lock(&c->btree_interior_update_lock); - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i), true, 0, @@ -1976,7 +1976,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c, bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res, gc_pos_btree_root(b->btree_id)); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); mutex_unlock(&c->btree_interior_update_lock); if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) { diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index 66e122edb2e0..073848b9fea6 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -322,7 +322,7 @@ void bch2_fs_usage_apply(struct bch_fs *c, s64 added = sum.data + sum.reserved; s64 should_not_have_added; - percpu_rwsem_assert_held(&c->usage_lock); + percpu_rwsem_assert_held(&c->mark_lock); /* * Not allowed to reduce sectors_available except by getting a @@ -362,7 +362,7 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, { struct bch_dev_usage *dev_usage; - percpu_rwsem_assert_held(&c->usage_lock); + percpu_rwsem_assert_held(&c->mark_lock); bch2_fs_inconsistent_on(old.data_type && new.data_type && old.data_type != new.data_type, c, @@ -409,14 +409,14 @@ void bch2_dev_usage_from_buckets(struct bch_fs *c, struct bch_dev *ca) struct bucket_array *buckets; struct bucket *g; - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); fs_usage = this_cpu_ptr(c->usage[0]); buckets = bucket_array(ca); for_each_bucket(g, buckets) if (g->mark.data_type) bch2_dev_usage_update(c, ca, fs_usage, old, g->mark, false); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); } #define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr) \ @@ -451,7 +451,7 @@ static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca, void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca, size_t b, struct bucket_mark *old) { - percpu_rwsem_assert_held(&c->usage_lock); + percpu_rwsem_assert_held(&c->mark_lock); __bch2_invalidate_bucket(c, ca, b, old, false); @@ -480,7 +480,7 @@ void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, size_t b, bool owned_by_allocator, struct gc_pos pos, unsigned flags) { - percpu_rwsem_assert_held(&c->usage_lock); + percpu_rwsem_assert_held(&c->mark_lock); if (!(flags & BCH_BUCKET_MARK_GC)) __bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, false); @@ -525,7 +525,7 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, type != BCH_DATA_JOURNAL); if (likely(c)) { - percpu_rwsem_assert_held(&c->usage_lock); + percpu_rwsem_assert_held(&c->mark_lock); if (!(flags & BCH_BUCKET_MARK_GC)) __bch2_mark_metadata_bucket(c, ca, b, type, sectors, @@ -916,10 +916,10 @@ int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k, { int ret; - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); ret = bch2_mark_key_locked(c, k, inserting, sectors, pos, stats, journal_seq, flags); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); return ret; } @@ -938,7 +938,7 @@ void bch2_mark_update(struct btree_insert *trans, if (!btree_node_type_needs_gc(iter->btree_id)) return; - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k), true, @@ -995,7 +995,7 @@ void bch2_mark_update(struct btree_insert *trans, bch2_fs_usage_apply(c, &stats, trans->disk_res, pos); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); } /* Disk reservations: */ @@ -1012,12 +1012,12 @@ static u64 bch2_recalc_sectors_available(struct bch_fs *c) void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res) { - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); this_cpu_sub(c->usage[0]->online_reserved, res->sectors); bch2_fs_stats_verify(c); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); res->sectors = 0; } @@ -1032,7 +1032,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, s64 sectors_available; int ret; - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); stats = this_cpu_ptr(c->usage[0]); if (sectors <= stats->available_cache) @@ -1044,7 +1044,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, get = min((u64) sectors + SECTORS_CACHE, old); if (get < sectors) { - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); goto recalculate; } } while ((v = atomic64_cmpxchg(&c->sectors_available, @@ -1059,7 +1059,7 @@ out: bch2_disk_reservations_verify(c, flags); bch2_fs_stats_verify(c); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); return 0; recalculate: @@ -1080,7 +1080,7 @@ recalculate: return -EINTR; } - percpu_down_write(&c->usage_lock); + percpu_down_write(&c->mark_lock); sectors_available = bch2_recalc_sectors_available(c); if (sectors <= sectors_available || @@ -1098,7 +1098,7 @@ recalculate: } bch2_fs_stats_verify(c); - percpu_up_write(&c->usage_lock); + percpu_up_write(&c->mark_lock); if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD)) up_read(&c->gc_lock); @@ -1174,7 +1174,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) if (resize) { down_write(&c->gc_lock); down_write(&ca->bucket_lock); - percpu_down_write(&c->usage_lock); + percpu_down_write(&c->mark_lock); } old_buckets = bucket_array(ca); @@ -1204,7 +1204,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) swap(ca->buckets_written, buckets_written); if (resize) - percpu_up_write(&c->usage_lock); + percpu_up_write(&c->mark_lock); spin_lock(&c->freelist_lock); for (i = 0; i < RESERVE_NR; i++) { diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h index f2cd70810274..75cb798eaf5d 100644 --- a/fs/bcachefs/buckets.h +++ b/fs/bcachefs/buckets.h @@ -33,7 +33,7 @@ static inline struct bucket_array *__bucket_array(struct bch_dev *ca, { return rcu_dereference_check(ca->buckets[gc], !ca->fs || - percpu_rwsem_is_held(&ca->fs->usage_lock) || + percpu_rwsem_is_held(&ca->fs->mark_lock) || lockdep_is_held(&ca->fs->gc_lock) || lockdep_is_held(&ca->bucket_lock)); } diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c index 98eca9a0cbac..93a3f1dec422 100644 --- a/fs/bcachefs/io.c +++ b/fs/bcachefs/io.c @@ -1720,9 +1720,9 @@ noclone: bch2_increment_clock(c, bio_sectors(&rbio->bio), READ); - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); bucket_io_clock_reset(c, ca, PTR_BUCKET_NR(ca, &pick.ptr), READ); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); if (likely(!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT)))) { bio_inc_remaining(&orig->bio); diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index 47cfd50d444e..379b502afbec 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -755,7 +755,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, } if (c) { - percpu_down_read_preempt_disable(&c->usage_lock); + percpu_down_read_preempt_disable(&c->mark_lock); spin_lock(&c->journal.lock); } else { preempt_disable(); @@ -783,7 +783,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, if (c) { spin_unlock(&c->journal.lock); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read_preempt_enable(&c->mark_lock); } else { preempt_enable(); } diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index b33117dd0e95..c151147f8945 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -405,7 +405,7 @@ static void bch2_fs_free(struct bch_fs *c) bch2_io_clock_exit(&c->io_clock[WRITE]); bch2_io_clock_exit(&c->io_clock[READ]); bch2_fs_compress_exit(c); - percpu_free_rwsem(&c->usage_lock); + percpu_free_rwsem(&c->mark_lock); free_percpu(c->usage[0]); mempool_exit(&c->btree_iters_pool); mempool_exit(&c->btree_bounce_pool); @@ -640,7 +640,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) offsetof(struct btree_write_bio, wbio.bio)), BIOSET_NEED_BVECS) || !(c->usage[0] = alloc_percpu(struct bch_fs_usage)) || - percpu_init_rwsem(&c->usage_lock) || + percpu_init_rwsem(&c->mark_lock) || mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1, btree_bytes(c)) || mempool_init_kmalloc_pool(&c->btree_iters_pool, 1, |