diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2019-06-30 16:28:01 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2020-05-06 17:14:16 -0400 |
commit | ea5715a73506eb929e43b66eb3b87c94e2b44ab4 (patch) | |
tree | a145b47f47c831f20c6ee694995a5f9b7e2e6e31 /fs/bcachefs/buckets.c | |
parent | 5f6131b81dfa624673447c41cfb69c151086b802 (diff) |
Merge with 1f431b384d bcachefs: Refactor trans_(get|update)_key
Diffstat (limited to 'fs/bcachefs/buckets.c')
-rw-r--r-- | fs/bcachefs/buckets.c | 1831 |
1 files changed, 1386 insertions, 445 deletions
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index b17189ee2e4f..b6b3ac5111ca 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Code for manipulating bucket marks for garbage collection. * @@ -63,89 +64,26 @@ */ #include "bcachefs.h" -#include "alloc.h" +#include "alloc_background.h" +#include "bset.h" #include "btree_gc.h" +#include "btree_update.h" #include "buckets.h" +#include "ec.h" #include "error.h" #include "movinggc.h" +#include "replicas.h" #include <linux/preempt.h> #include <trace/events/bcachefs.h> -#ifdef DEBUG_BUCKETS - -#define lg_local_lock lg_global_lock -#define lg_local_unlock lg_global_unlock - -static void bch2_fs_stats_verify(struct bch_fs *c) -{ - struct bch_fs_usage stats = - __bch2_fs_usage_read(c); - unsigned i; - - for (i = 0; i < ARRAY_SIZE(stats.s); i++) { - if ((s64) stats.s[i].data[S_META] < 0) - panic("replicas %u meta underflow: %lli\n", - i + 1, stats.s[i].data[S_META]); - - if ((s64) stats.s[i].data[S_DIRTY] < 0) - panic("replicas %u dirty underflow: %lli\n", - i + 1, stats.s[i].data[S_DIRTY]); - - if ((s64) stats.s[i].persistent_reserved < 0) - panic("replicas %u reserved underflow: %lli\n", - i + 1, stats.s[i].persistent_reserved); - } - - if ((s64) stats.online_reserved < 0) - panic("sectors_online_reserved underflow: %lli\n", - stats.online_reserved); -} - -static void bch2_dev_stats_verify(struct bch_dev *ca) -{ - struct bch_dev_usage stats = - __bch2_dev_usage_read(ca); - u64 n = ca->mi.nbuckets - ca->mi.first_bucket; - unsigned i; - - for (i = 0; i < ARRAY_SIZE(stats.buckets); i++) - BUG_ON(stats.buckets[i] > n); - BUG_ON(stats.buckets_alloc > n); - BUG_ON(stats.buckets_unavailable > n); -} - -static void bch2_disk_reservations_verify(struct bch_fs *c, int flags) -{ - if (!(flags & BCH_DISK_RESERVATION_NOFAIL)) { - u64 used = __bch2_fs_sectors_used(c); - u64 cached = 0; - u64 avail = atomic64_read(&c->sectors_available); - int cpu; - - for_each_possible_cpu(cpu) - cached += per_cpu_ptr(c->usage_percpu, cpu)->available_cache; - - if (used + avail + cached > c->capacity) - panic("used %llu avail %llu cached %llu capacity %llu\n", - used, avail, cached, c->capacity); - } -} - -#else - -static void bch2_fs_stats_verify(struct bch_fs *c) {} -static void bch2_dev_stats_verify(struct bch_dev *ca) {} -static void bch2_disk_reservations_verify(struct bch_fs *c, int flags) {} - -#endif - /* * Clear journal_seq_valid for buckets for which it's not needed, to prevent * wraparound: */ void bch2_bucket_seq_cleanup(struct bch_fs *c) { + u64 journal_seq = atomic64_read(&c->journal.seq); u16 last_seq_ondisk = c->journal.last_seq_ondisk; struct bch_dev *ca; struct bucket_array *buckets; @@ -153,6 +91,12 @@ void bch2_bucket_seq_cleanup(struct bch_fs *c) struct bucket_mark m; unsigned i; + if (journal_seq - c->last_bucket_seq_cleanup < + (1U << (BUCKET_JOURNAL_SEQ_BITS - 2))) + return; + + c->last_bucket_seq_cleanup = journal_seq; + for_each_member_device(ca, c, i) { down_read(&ca->bucket_lock); buckets = bucket_array(ca); @@ -170,85 +114,192 @@ void bch2_bucket_seq_cleanup(struct bch_fs *c) } } -#define bch2_usage_add(_acc, _stats) \ -do { \ - typeof(_acc) _a = (_acc), _s = (_stats); \ - unsigned i; \ - \ - for (i = 0; i < sizeof(*_a) / sizeof(u64); i++) \ - ((u64 *) (_a))[i] += ((u64 *) (_s))[i]; \ -} while (0) +void bch2_fs_usage_initialize(struct bch_fs *c) +{ + struct bch_fs_usage *usage; + unsigned i; -#define bch2_usage_read_raw(_stats) \ -({ \ - typeof(*this_cpu_ptr(_stats)) _acc; \ - int cpu; \ - \ - memset(&_acc, 0, sizeof(_acc)); \ - \ - for_each_possible_cpu(cpu) \ - bch2_usage_add(&_acc, per_cpu_ptr((_stats), cpu)); \ - \ - _acc; \ -}) + percpu_down_write(&c->mark_lock); + usage = c->usage_base; -#define bch2_usage_read_cached(_c, _cached, _uncached) \ -({ \ - typeof(_cached) _ret; \ - unsigned _seq; \ - \ - do { \ - _seq = read_seqcount_begin(&(_c)->gc_pos_lock); \ - _ret = (_c)->gc_pos.phase == GC_PHASE_DONE \ - ? bch2_usage_read_raw(_uncached) \ - : (_cached); \ - } while (read_seqcount_retry(&(_c)->gc_pos_lock, _seq)); \ - \ - _ret; \ -}) + bch2_fs_usage_acc_to_base(c, 0); + bch2_fs_usage_acc_to_base(c, 1); + + for (i = 0; i < BCH_REPLICAS_MAX; i++) + usage->reserved += usage->persistent_reserved[i]; + + for (i = 0; i < c->replicas.nr; i++) { + struct bch_replicas_entry *e = + cpu_replicas_entry(&c->replicas, i); -struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *ca) + switch (e->data_type) { + case BCH_DATA_BTREE: + usage->btree += usage->replicas[i]; + break; + case BCH_DATA_USER: + usage->data += usage->replicas[i]; + break; + case BCH_DATA_CACHED: + usage->cached += usage->replicas[i]; + break; + } + } + + percpu_up_write(&c->mark_lock); +} + +void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage *fs_usage) { - return bch2_usage_read_raw(ca->usage_percpu); + if (fs_usage == c->usage_scratch) + mutex_unlock(&c->usage_scratch_lock); + else + kfree(fs_usage); +} + +struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *c) +{ + struct bch_fs_usage *ret; + unsigned bytes = fs_usage_u64s(c) * sizeof(u64); + + ret = kzalloc(bytes, GFP_NOWAIT); + if (ret) + return ret; + + if (mutex_trylock(&c->usage_scratch_lock)) + goto out_pool; + + ret = kzalloc(bytes, GFP_NOFS); + if (ret) + return ret; + + mutex_lock(&c->usage_scratch_lock); +out_pool: + ret = c->usage_scratch; + memset(ret, 0, bytes); + return ret; } struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *c, struct bch_dev *ca) { - return bch2_usage_read_cached(c, ca->usage_cached, ca->usage_percpu); + struct bch_dev_usage ret; + + memset(&ret, 0, sizeof(ret)); + acc_u64s_percpu((u64 *) &ret, + (u64 __percpu *) ca->usage[0], + sizeof(ret) / sizeof(u64)); + + return ret; } -struct bch_fs_usage -__bch2_fs_usage_read(struct bch_fs *c) +static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c, + unsigned journal_seq, + bool gc) { - return bch2_usage_read_raw(c->usage_percpu); + return this_cpu_ptr(gc + ? c->usage_gc + : c->usage[journal_seq & 1]); } -struct bch_fs_usage -bch2_fs_usage_read(struct bch_fs *c) +u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v) { - return bch2_usage_read_cached(c, - c->usage_cached, - c->usage_percpu); + ssize_t offset = v - (u64 *) c->usage_base; + unsigned seq; + u64 ret; + + BUG_ON(offset < 0 || offset >= fs_usage_u64s(c)); + percpu_rwsem_assert_held(&c->mark_lock); + + do { + seq = read_seqcount_begin(&c->usage_lock); + ret = *v + + percpu_u64_get((u64 __percpu *) c->usage[0] + offset) + + percpu_u64_get((u64 __percpu *) c->usage[1] + offset); + } while (read_seqcount_retry(&c->usage_lock, seq)); + + return ret; } -struct fs_usage_sum { - u64 data; - u64 reserved; -}; +struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c) +{ + struct bch_fs_usage *ret; + unsigned seq, v, u64s = fs_usage_u64s(c); +retry: + ret = kmalloc(u64s * sizeof(u64), GFP_NOFS); + if (unlikely(!ret)) + return NULL; + + percpu_down_read(&c->mark_lock); + + v = fs_usage_u64s(c); + if (unlikely(u64s != v)) { + u64s = v; + percpu_up_read(&c->mark_lock); + kfree(ret); + goto retry; + } + + do { + seq = read_seqcount_begin(&c->usage_lock); + memcpy(ret, c->usage_base, u64s * sizeof(u64)); + acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[0], u64s); + acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[1], u64s); + } while (read_seqcount_retry(&c->usage_lock, seq)); + + return ret; +} -static inline struct fs_usage_sum __fs_usage_sum(struct bch_fs_usage stats) +void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx) +{ + unsigned u64s = fs_usage_u64s(c); + + BUG_ON(idx >= 2); + + write_seqcount_begin(&c->usage_lock); + + acc_u64s_percpu((u64 *) c->usage_base, + (u64 __percpu *) c->usage[idx], u64s); + percpu_memset(c->usage[idx], 0, u64s * sizeof(u64)); + + write_seqcount_end(&c->usage_lock); +} + +void bch2_fs_usage_to_text(struct printbuf *out, + struct bch_fs *c, + struct bch_fs_usage *fs_usage) { - struct fs_usage_sum sum = { 0 }; unsigned i; - for (i = 0; i < ARRAY_SIZE(stats.s); i++) { - sum.data += (stats.s[i].data[S_META] + - stats.s[i].data[S_DIRTY]) * (i + 1); - sum.reserved += stats.s[i].persistent_reserved * (i + 1); + pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity); + + pr_buf(out, "hidden:\t\t\t\t%llu\n", + fs_usage->hidden); + pr_buf(out, "data:\t\t\t\t%llu\n", + fs_usage->data); + pr_buf(out, "cached:\t\t\t\t%llu\n", + fs_usage->cached); + pr_buf(out, "reserved:\t\t\t%llu\n", + fs_usage->reserved); + pr_buf(out, "nr_inodes:\t\t\t%llu\n", + fs_usage->nr_inodes); + pr_buf(out, "online reserved:\t\t%llu\n", + fs_usage->online_reserved); + + for (i = 0; + i < ARRAY_SIZE(fs_usage->persistent_reserved); + i++) { + pr_buf(out, "%u replicas:\n", i + 1); + pr_buf(out, "\treserved:\t\t%llu\n", + fs_usage->persistent_reserved[i]); } - sum.reserved += stats.online_reserved; - return sum; + for (i = 0; i < c->replicas.nr; i++) { + struct bch_replicas_entry *e = + cpu_replicas_entry(&c->replicas, i); + + pr_buf(out, "\t"); + bch2_replicas_entry_to_text(out, e); + pr_buf(out, ":\t%llu\n", fs_usage->replicas[i]); + } } #define RESERVE_FACTOR 6 @@ -260,24 +311,51 @@ static u64 reserve_factor(u64 r) static u64 avail_factor(u64 r) { - return (r << RESERVE_FACTOR) / (1 << RESERVE_FACTOR) + 1; + return (r << RESERVE_FACTOR) / ((1 << RESERVE_FACTOR) + 1); } -u64 __bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage stats) +u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage *fs_usage) { - struct fs_usage_sum sum = __fs_usage_sum(stats); - - return sum.data + reserve_factor(sum.reserved); + return min(fs_usage->hidden + + fs_usage->btree + + fs_usage->data + + reserve_factor(fs_usage->reserved + + fs_usage->online_reserved), + c->capacity); } -u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage stats) +static struct bch_fs_usage_short +__bch2_fs_usage_read_short(struct bch_fs *c) { - return min(c->capacity, __bch2_fs_sectors_used(c, stats)); + struct bch_fs_usage_short ret; + u64 data, reserved; + + ret.capacity = c->capacity - + bch2_fs_usage_read_one(c, &c->usage_base->hidden); + + data = bch2_fs_usage_read_one(c, &c->usage_base->data) + + bch2_fs_usage_read_one(c, &c->usage_base->btree); + reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) + + bch2_fs_usage_read_one(c, &c->usage_base->online_reserved); + + ret.used = min(ret.capacity, data + reserve_factor(reserved)); + ret.free = ret.capacity - ret.used; + + ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes); + + return ret; } -u64 bch2_fs_sectors_free(struct bch_fs *c, struct bch_fs_usage stats) +struct bch_fs_usage_short +bch2_fs_usage_read_short(struct bch_fs *c) { - return avail_factor(c->capacity - bch2_fs_sectors_used(c, stats)); + struct bch_fs_usage_short ret; + + percpu_down_read(&c->mark_lock); + ret = __bch2_fs_usage_read_short(c); + percpu_up_read(&c->mark_lock); + + return ret; } static inline int is_unavailable_bucket(struct bucket_mark m) @@ -299,76 +377,94 @@ static inline int is_fragmented_bucket(struct bucket_mark m, static inline enum bch_data_type bucket_type(struct bucket_mark m) { return m.cached_sectors && !m.dirty_sectors - ? BCH_DATA_CACHED + ? BCH_DATA_CACHED : m.data_type; } -static bool bucket_became_unavailable(struct bch_fs *c, - struct bucket_mark old, +static bool bucket_became_unavailable(struct bucket_mark old, struct bucket_mark new) { return is_available_bucket(old) && - !is_available_bucket(new) && - (!c || c->gc_pos.phase == GC_PHASE_DONE); + !is_available_bucket(new); } -void bch2_fs_usage_apply(struct bch_fs *c, - struct bch_fs_usage *stats, +int bch2_fs_usage_apply(struct bch_fs *c, + struct bch_fs_usage *fs_usage, struct disk_reservation *disk_res, - struct gc_pos gc_pos) + unsigned journal_seq) { - struct fs_usage_sum sum = __fs_usage_sum(*stats); - s64 added = sum.data + sum.reserved; + s64 added = fs_usage->data + fs_usage->reserved; + s64 should_not_have_added; + int ret = 0; + + percpu_rwsem_assert_held(&c->mark_lock); /* * Not allowed to reduce sectors_available except by getting a * reservation: */ - BUG_ON(added > (s64) (disk_res ? disk_res->sectors : 0)); + should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0); + if (WARN_ONCE(should_not_have_added > 0, + "disk usage increased without a reservation")) { + atomic64_sub(should_not_have_added, &c->sectors_available); + added -= should_not_have_added; + ret = -1; + } if (added > 0) { - disk_res->sectors -= added; - stats->online_reserved -= added; + disk_res->sectors -= added; + fs_usage->online_reserved -= added; } - percpu_down_read_preempt_disable(&c->usage_lock); - /* online_reserved not subject to gc: */ - this_cpu_ptr(c->usage_percpu)->online_reserved += - stats->online_reserved; - stats->online_reserved = 0; + preempt_disable(); + acc_u64s((u64 *) fs_usage_ptr(c, journal_seq, false), + (u64 *) fs_usage, fs_usage_u64s(c)); + preempt_enable(); - if (!gc_will_visit(c, gc_pos)) - bch2_usage_add(this_cpu_ptr(c->usage_percpu), stats); + return ret; +} - bch2_fs_stats_verify(c); - percpu_up_read_preempt_enable(&c->usage_lock); +static inline void account_bucket(struct bch_fs_usage *fs_usage, + struct bch_dev_usage *dev_usage, + enum bch_data_type type, + int nr, s64 size) +{ + if (type == BCH_DATA_SB || type == BCH_DATA_JOURNAL) + fs_usage->hidden += size; - memset(stats, 0, sizeof(*stats)); + dev_usage->buckets[type] += nr; } static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, - struct bucket_mark old, struct bucket_mark new) + struct bch_fs_usage *fs_usage, + struct bucket_mark old, struct bucket_mark new, + bool gc) { struct bch_dev_usage *dev_usage; - if (c) - percpu_rwsem_assert_held(&c->usage_lock); + percpu_rwsem_assert_held(&c->mark_lock); - if (old.data_type && new.data_type && - old.data_type != new.data_type) { - BUG_ON(!c); - bch2_fs_inconsistent(c, - "different types of data in same bucket: %u, %u", - old.data_type, new.data_type); - } + bch2_fs_inconsistent_on(old.data_type && new.data_type && + old.data_type != new.data_type, c, + "different types of data in same bucket: %s, %s", + bch2_data_types[old.data_type], + bch2_data_types[new.data_type]); + + preempt_disable(); + dev_usage = this_cpu_ptr(ca->usage[gc]); - dev_usage = this_cpu_ptr(ca->usage_percpu); + if (bucket_type(old)) + account_bucket(fs_usage, dev_usage, bucket_type(old), + -1, -ca->mi.bucket_size); - dev_usage->buckets[bucket_type(old)]--; - dev_usage->buckets[bucket_type(new)]++; + if (bucket_type(new)) + account_bucket(fs_usage, dev_usage, bucket_type(new), + 1, ca->mi.bucket_size); dev_usage->buckets_alloc += (int) new.owned_by_allocator - (int) old.owned_by_allocator; + dev_usage->buckets_ec += + (int) new.stripe - (int) old.stripe; dev_usage->buckets_unavailable += is_unavailable_bucket(new) - is_unavailable_bucket(old); @@ -378,203 +474,426 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, (int) new.cached_sectors - (int) old.cached_sectors; dev_usage->sectors_fragmented += is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca); + preempt_enable(); if (!is_available_bucket(old) && is_available_bucket(new)) bch2_wake_allocator(ca); +} + +void bch2_dev_usage_from_buckets(struct bch_fs *c) +{ + struct bch_dev *ca; + struct bucket_mark old = { .v.counter = 0 }; + struct bucket_array *buckets; + struct bucket *g; + unsigned i; + int cpu; - bch2_dev_stats_verify(ca); + c->usage_base->hidden = 0; + + for_each_member_device(ca, c, i) { + for_each_possible_cpu(cpu) + memset(per_cpu_ptr(ca->usage[0], cpu), 0, + sizeof(*ca->usage[0])); + + buckets = bucket_array(ca); + + for_each_bucket(g, buckets) + bch2_dev_usage_update(c, ca, c->usage_base, + old, g->mark, false); + } } -#define bucket_data_cmpxchg(c, ca, g, new, expr) \ +#define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr) \ ({ \ struct bucket_mark _old = bucket_cmpxchg(g, new, expr); \ \ - bch2_dev_usage_update(c, ca, _old, new); \ + bch2_dev_usage_update(c, ca, fs_usage, _old, new, gc); \ _old; \ }) -bool bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca, - size_t b, struct bucket_mark *old) +static inline void update_replicas(struct bch_fs *c, + struct bch_fs_usage *fs_usage, + struct bch_replicas_entry *r, + s64 sectors) { - struct bucket *g; - struct bucket_mark new; + int idx = bch2_replicas_entry_idx(c, r); - percpu_rwsem_assert_held(&c->usage_lock); + BUG_ON(idx < 0); + BUG_ON(!sectors); - g = bucket(ca, b); + switch (r->data_type) { + case BCH_DATA_BTREE: + fs_usage->btree += sectors; + break; + case BCH_DATA_USER: + fs_usage->data += sectors; + break; + case BCH_DATA_CACHED: + fs_usage->cached += sectors; + break; + } + fs_usage->replicas[idx] += sectors; +} - *old = bucket_data_cmpxchg(c, ca, g, new, ({ - if (!is_available_bucket(new)) { - percpu_up_read_preempt_enable(&c->usage_lock); - return false; - } +static inline void update_cached_sectors(struct bch_fs *c, + struct bch_fs_usage *fs_usage, + unsigned dev, s64 sectors) +{ + struct bch_replicas_padded r; + + bch2_replicas_entry_cached(&r.e, dev); + + update_replicas(c, fs_usage, &r.e, sectors); +} + +static struct replicas_delta_list * +replicas_deltas_realloc(struct btree_trans *trans, unsigned more) +{ + struct replicas_delta_list *d = trans->fs_usage_deltas; + unsigned new_size = d ? (d->size + more) * 2 : 128; + + if (!d || d->used + more > d->size) { + d = krealloc(d, sizeof(*d) + new_size, GFP_NOIO|__GFP_ZERO); + BUG_ON(!d); + + d->size = new_size; + trans->fs_usage_deltas = d; + } + return d; +} + +static inline void update_replicas_list(struct btree_trans *trans, + struct bch_replicas_entry *r, + s64 sectors) +{ + struct replicas_delta_list *d; + struct replicas_delta *n; + unsigned b = replicas_entry_bytes(r) + 8; + + d = replicas_deltas_realloc(trans, b); + + n = (void *) d->d + d->used; + n->delta = sectors; + memcpy(&n->r, r, replicas_entry_bytes(r)); + d->used += b; +} + +static inline void update_cached_sectors_list(struct btree_trans *trans, + unsigned dev, s64 sectors) +{ + struct bch_replicas_padded r; + + bch2_replicas_entry_cached(&r.e, dev); + + update_replicas_list(trans, &r.e, sectors); +} + +void bch2_replicas_delta_list_apply(struct bch_fs *c, + struct bch_fs_usage *fs_usage, + struct replicas_delta_list *r) +{ + struct replicas_delta *d = r->d; + struct replicas_delta *top = (void *) r->d + r->used; + + acc_u64s((u64 *) fs_usage, + (u64 *) &r->fs_usage, sizeof(*fs_usage) / sizeof(u64)); + + while (d != top) { + BUG_ON((void *) d > (void *) top); + + update_replicas(c, fs_usage, &d->r, d->delta); + + d = (void *) d + replicas_entry_bytes(&d->r) + 8; + } +} + +#define do_mark_fn(fn, c, pos, flags, ...) \ +({ \ + int gc, ret = 0; \ + \ + percpu_rwsem_assert_held(&c->mark_lock); \ + \ + for (gc = 0; gc < 2 && !ret; gc++) \ + if (!gc == !(flags & BCH_BUCKET_MARK_GC) || \ + (gc && gc_visited(c, pos))) \ + ret = fn(c, __VA_ARGS__, gc); \ + ret; \ +}) + +static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca, + size_t b, struct bucket_mark *ret, + bool gc) +{ + struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc); + struct bucket *g = __bucket(ca, b, gc); + struct bucket_mark old, new; + + old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({ + BUG_ON(!is_available_bucket(new)); - new.owned_by_allocator = 1; + new.owned_by_allocator = true; + new.dirty = true; new.data_type = 0; new.cached_sectors = 0; new.dirty_sectors = 0; new.gen++; })); + if (old.cached_sectors) + update_cached_sectors(c, fs_usage, ca->dev_idx, + -((s64) old.cached_sectors)); + + if (!gc) + *ret = old; + return 0; +} + +void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca, + size_t b, struct bucket_mark *old) +{ + do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0, + ca, b, old); + if (!old->owned_by_allocator && old->cached_sectors) trace_invalidate(ca, bucket_to_sector(ca, b), old->cached_sectors); - return true; +} + +static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, + size_t b, bool owned_by_allocator, + bool gc) +{ + struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc); + struct bucket *g = __bucket(ca, b, gc); + struct bucket_mark old, new; + + old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({ + new.owned_by_allocator = owned_by_allocator; + })); + + BUG_ON(!gc && + !owned_by_allocator && !old.owned_by_allocator); + + return 0; } void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, size_t b, bool owned_by_allocator, struct gc_pos pos, unsigned flags) { + preempt_disable(); + + do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags, + ca, b, owned_by_allocator); + + preempt_enable(); +} + +static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c k, + struct bch_fs_usage *fs_usage, + u64 journal_seq, unsigned flags) +{ + bool gc = flags & BCH_BUCKET_MARK_GC; + struct bkey_alloc_unpacked u; + struct bch_dev *ca; struct bucket *g; - struct bucket_mark old, new; + struct bucket_mark old, m; + + /* + * alloc btree is read in by bch2_alloc_read, not gc: + */ + if ((flags & BCH_BUCKET_MARK_GC) && + !(flags & BCH_BUCKET_MARK_BUCKET_INVALIDATE)) + return 0; - percpu_rwsem_assert_held(&c->usage_lock); - g = bucket(ca, b); + ca = bch_dev_bkey_exists(c, k.k->p.inode); - if (!(flags & BCH_BUCKET_MARK_GC_LOCK_HELD) && - gc_will_visit(c, pos)) - return; + if (k.k->p.offset >= ca->mi.nbuckets) + return 0; - old = bucket_data_cmpxchg(c, ca, g, new, ({ - new.owned_by_allocator = owned_by_allocator; + g = __bucket(ca, k.k->p.offset, gc); + u = bch2_alloc_unpack(k); + + old = bucket_cmpxchg(g, m, ({ + m.gen = u.gen; + m.data_type = u.data_type; + m.dirty_sectors = u.dirty_sectors; + m.cached_sectors = u.cached_sectors; + + if (journal_seq) { + m.journal_seq_valid = 1; + m.journal_seq = journal_seq; + } })); - BUG_ON(!owned_by_allocator && !old.owned_by_allocator && - c->gc_pos.phase == GC_PHASE_DONE); + if (!(flags & BCH_BUCKET_MARK_ALLOC_READ)) + bch2_dev_usage_update(c, ca, fs_usage, old, m, gc); + + g->io_time[READ] = u.read_time; + g->io_time[WRITE] = u.write_time; + g->oldest_gen = u.oldest_gen; + g->gen_valid = 1; + + /* + * need to know if we're getting called from the invalidate path or + * not: + */ + + if ((flags & BCH_BUCKET_MARK_BUCKET_INVALIDATE) && + old.cached_sectors) { + update_cached_sectors(c, fs_usage, ca->dev_idx, + -old.cached_sectors); + trace_invalidate(ca, bucket_to_sector(ca, k.k->p.offset), + old.cached_sectors); + } + + return 0; } -#define saturated_add(ca, dst, src, max) \ -do { \ - BUG_ON((int) (dst) + (src) < 0); \ - if ((dst) == (max)) \ - ; \ - else if ((dst) + (src) <= (max)) \ - dst += (src); \ - else { \ - dst = (max); \ - trace_sectors_saturated(ca); \ - } \ -} while (0) +#define checked_add(a, b) \ +({ \ + unsigned _res = (unsigned) (a) + (b); \ + bool overflow = _res > U16_MAX; \ + if (overflow) \ + _res = U16_MAX; \ + (a) = _res; \ + overflow; \ +}) + +static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, + size_t b, enum bch_data_type type, + unsigned sectors, bool gc) +{ + struct bucket *g = __bucket(ca, b, gc); + struct bucket_mark old, new; + bool overflow; + + BUG_ON(type != BCH_DATA_SB && + type != BCH_DATA_JOURNAL); + + old = bucket_cmpxchg(g, new, ({ + new.dirty = true; + new.data_type = type; + overflow = checked_add(new.dirty_sectors, sectors); + })); + + bch2_fs_inconsistent_on(overflow, c, + "bucket sector count overflow: %u + %u > U16_MAX", + old.dirty_sectors, sectors); + + if (c) + bch2_dev_usage_update(c, ca, fs_usage_ptr(c, 0, gc), + old, new, gc); + + return 0; +} void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, size_t b, enum bch_data_type type, unsigned sectors, struct gc_pos pos, unsigned flags) { - struct bucket *g; - struct bucket_mark old, new; + BUG_ON(type != BCH_DATA_SB && + type != BCH_DATA_JOURNAL); - BUG_ON(!type); + preempt_disable(); if (likely(c)) { - percpu_rwsem_assert_held(&c->usage_lock); - - if (!(flags & BCH_BUCKET_MARK_GC_LOCK_HELD) && - gc_will_visit(c, pos)) - return; + do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags, + ca, b, type, sectors); + } else { + __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0); } - rcu_read_lock(); - - g = bucket(ca, b); - old = bucket_data_cmpxchg(c, ca, g, new, ({ - saturated_add(ca, new.dirty_sectors, sectors, - GC_MAX_SECTORS_USED); - new.data_type = type; - })); - - rcu_read_unlock(); - - BUG_ON(!(flags & BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE) && - bucket_became_unavailable(c, old, new)); + preempt_enable(); } -/* Reverting this until the copygc + compression issue is fixed: */ - -static int __disk_sectors(struct bch_extent_crc_unpacked crc, unsigned sectors) +static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p, + s64 delta) { - if (!sectors) - return 0; + if (delta > 0) { + /* + * marking a new extent, which _will have size_ @delta + * + * in the bch2_mark_update -> BCH_EXTENT_OVERLAP_MIDDLE + * case, we haven't actually created the key we'll be inserting + * yet (for the split) - so we don't want to be using + * k->size/crc.live_size here: + */ + return __ptr_disk_sectors(p, delta); + } else { + BUG_ON(-delta > p.crc.live_size); - return max(1U, DIV_ROUND_UP(sectors * crc.compressed_size, - crc.uncompressed_size)); + return (s64) __ptr_disk_sectors(p, p.crc.live_size + delta) - + (s64) ptr_disk_sectors(p); + } } -/* - * Checking against gc's position has to be done here, inside the cmpxchg() - * loop, to avoid racing with the start of gc clearing all the marks - GC does - * that with the gc pos seqlock held. - */ -static void bch2_mark_pointer(struct bch_fs *c, - struct bkey_s_c_extent e, - const struct bch_extent_ptr *ptr, - struct bch_extent_crc_unpacked crc, - s64 sectors, enum s_alloc type, - struct bch_fs_usage *stats, - u64 journal_seq, unsigned flags) +static void bucket_set_stripe(struct bch_fs *c, + const struct bch_stripe *v, + struct bch_fs_usage *fs_usage, + u64 journal_seq, + unsigned flags) { - struct bucket_mark old, new; - unsigned saturated; - struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); - struct bucket *g = PTR_BUCKET(ca, ptr); - enum bch_data_type data_type = type == S_META - ? BCH_DATA_BTREE : BCH_DATA_USER; - u64 v; - - if (crc.compression_type) { - unsigned old_sectors, new_sectors; + bool enabled = !(flags & BCH_BUCKET_MARK_OVERWRITE); + bool gc = flags & BCH_BUCKET_MARK_GC; + unsigned i; - if (sectors > 0) { - old_sectors = 0; - new_sectors = sectors; - } else { - old_sectors = e.k->size; - new_sectors = e.k->size + sectors; - } + for (i = 0; i < v->nr_blocks; i++) { + const struct bch_extent_ptr *ptr = v->ptrs + i; + struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); + struct bucket *g = PTR_BUCKET(ca, ptr, gc); + struct bucket_mark new, old; - sectors = -__disk_sectors(crc, old_sectors) - +__disk_sectors(crc, new_sectors); - } + BUG_ON(ptr_stale(ca, ptr)); - if (flags & BCH_BUCKET_MARK_GC_WILL_VISIT) { - if (journal_seq) - bucket_cmpxchg(g, new, ({ + old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({ + new.dirty = true; + new.stripe = enabled; + if (journal_seq) { new.journal_seq_valid = 1; new.journal_seq = journal_seq; - })); - - return; + } + })); } +} + +static bool bch2_mark_pointer(struct bch_fs *c, + struct extent_ptr_decoded p, + s64 sectors, enum bch_data_type data_type, + struct bch_fs_usage *fs_usage, + u64 journal_seq, unsigned flags) +{ + bool gc = flags & BCH_BUCKET_MARK_GC; + struct bucket_mark old, new; + struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev); + struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc); + bool overflow; + u64 v; v = atomic64_read(&g->_mark.v); do { new.v.counter = old.v.counter = v; - saturated = 0; + + new.dirty = true; /* * Check this after reading bucket mark to guard against * the allocator invalidating a bucket after we've already * checked the gen */ - if (gen_after(new.gen, ptr->gen)) { + if (gen_after(new.gen, p.ptr.gen)) { BUG_ON(!test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags)); - EBUG_ON(!ptr->cached && + EBUG_ON(!p.ptr.cached && test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)); - return; + return true; } - if (!ptr->cached && - new.dirty_sectors == GC_MAX_SECTORS_USED && - sectors < 0) - saturated = -sectors; - - if (ptr->cached) - saturated_add(ca, new.cached_sectors, sectors, - GC_MAX_SECTORS_USED); + if (!p.ptr.cached) + overflow = checked_add(new.dirty_sectors, sectors); else - saturated_add(ca, new.dirty_sectors, sectors, - GC_MAX_SECTORS_USED); + overflow = checked_add(new.cached_sectors, sectors); if (!new.dirty_sectors && !new.cached_sectors) { @@ -596,129 +915,769 @@ static void bch2_mark_pointer(struct bch_fs *c, old.v.counter, new.v.counter)) != old.v.counter); - bch2_dev_usage_update(c, ca, old, new); + bch2_fs_inconsistent_on(overflow, c, + "bucket sector count overflow: %u + %lli > U16_MAX", + !p.ptr.cached + ? old.dirty_sectors + : old.cached_sectors, sectors); + + bch2_dev_usage_update(c, ca, fs_usage, old, new, gc); + + BUG_ON(!gc && bucket_became_unavailable(old, new)); + + return false; +} + +static int bch2_mark_stripe_ptr(struct bch_fs *c, + struct bch_extent_stripe_ptr p, + enum bch_data_type data_type, + struct bch_fs_usage *fs_usage, + s64 sectors, unsigned flags) +{ + bool gc = flags & BCH_BUCKET_MARK_GC; + struct stripe *m; + unsigned old, new, nr_data; + int blocks_nonempty_delta; + s64 parity_sectors; + + BUG_ON(!sectors); + + m = genradix_ptr(&c->stripes[gc], p.idx); + + spin_lock(&c->ec_stripes_heap_lock); + + if (!m || !m->alive) { + spin_unlock(&c->ec_stripes_heap_lock); + bch_err_ratelimited(c, "pointer to nonexistent stripe %llu", + (u64) p.idx); + return -1; + } + + BUG_ON(m->r.e.data_type != data_type); + + nr_data = m->nr_blocks - m->nr_redundant; + + parity_sectors = DIV_ROUND_UP(abs(sectors) * m->nr_redundant, nr_data); + + if (sectors < 0) + parity_sectors = -parity_sectors; + sectors += parity_sectors; + + old = m->block_sectors[p.block]; + m->block_sectors[p.block] += sectors; + new = m->block_sectors[p.block]; + + blocks_nonempty_delta = (int) !!new - (int) !!old; + if (blocks_nonempty_delta) { + m->blocks_nonempty += blocks_nonempty_delta; + + if (!gc) + bch2_stripes_heap_update(c, m, p.idx); + } + + m->dirty = true; + + spin_unlock(&c->ec_stripes_heap_lock); + + update_replicas(c, fs_usage, &m->r.e, sectors); - BUG_ON(!(flags & BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE) && - bucket_became_unavailable(c, old, new)); + return 0; +} - if (saturated && - atomic_long_add_return(saturated, - &ca->saturated_count) >= - bucket_to_sector(ca, ca->free_inc.size)) { - if (c->gc_thread) { - trace_gc_sectors_saturated(c); - wake_up_process(c->gc_thread); +static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k, + s64 sectors, enum bch_data_type data_type, + struct bch_fs_usage *fs_usage, + unsigned journal_seq, unsigned flags) +{ + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + const union bch_extent_entry *entry; + struct extent_ptr_decoded p; + struct bch_replicas_padded r; + s64 dirty_sectors = 0; + unsigned i; + int ret; + + r.e.data_type = data_type; + r.e.nr_devs = 0; + r.e.nr_required = 1; + + BUG_ON(!sectors); + + bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { + s64 disk_sectors = data_type == BCH_DATA_BTREE + ? sectors + : ptr_disk_sectors_delta(p, sectors); + bool stale = bch2_mark_pointer(c, p, disk_sectors, data_type, + fs_usage, journal_seq, flags); + + if (p.ptr.cached) { + if (disk_sectors && !stale) + update_cached_sectors(c, fs_usage, p.ptr.dev, + disk_sectors); + } else if (!p.ec_nr) { + dirty_sectors += disk_sectors; + r.e.devs[r.e.nr_devs++] = p.ptr.dev; + } else { + for (i = 0; i < p.ec_nr; i++) { + ret = bch2_mark_stripe_ptr(c, p.ec[i], + data_type, fs_usage, + disk_sectors, flags); + if (ret) + return ret; + } + + r.e.nr_required = 0; } } + + if (dirty_sectors) + update_replicas(c, fs_usage, &r.e, dirty_sectors); + + return 0; } -void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k, - s64 sectors, bool metadata, - struct gc_pos pos, - struct bch_fs_usage *stats, +static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k, + struct bch_fs_usage *fs_usage, + u64 journal_seq, unsigned flags) +{ + bool gc = flags & BCH_BUCKET_MARK_GC; + struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k); + size_t idx = s.k->p.offset; + struct stripe *m = genradix_ptr(&c->stripes[gc], idx); + unsigned i; + + spin_lock(&c->ec_stripes_heap_lock); + + if (!m || ((flags & BCH_BUCKET_MARK_OVERWRITE) && !m->alive)) { + spin_unlock(&c->ec_stripes_heap_lock); + bch_err_ratelimited(c, "error marking nonexistent stripe %zu", + idx); + return -1; + } + + if (!(flags & BCH_BUCKET_MARK_OVERWRITE)) { + m->sectors = le16_to_cpu(s.v->sectors); + m->algorithm = s.v->algorithm; + m->nr_blocks = s.v->nr_blocks; + m->nr_redundant = s.v->nr_redundant; + + bch2_bkey_to_replicas(&m->r.e, k); + + /* + * XXX: account for stripes somehow here + */ +#if 0 + update_replicas(c, fs_usage, &m->r.e, stripe_sectors); +#endif + + /* gc recalculates these fields: */ + if (!(flags & BCH_BUCKET_MARK_GC)) { + for (i = 0; i < s.v->nr_blocks; i++) { + m->block_sectors[i] = + stripe_blockcount_get(s.v, i); + m->blocks_nonempty += !!m->block_sectors[i]; + } + } + + if (!gc) + bch2_stripes_heap_update(c, m, idx); + m->alive = true; + } else { + if (!gc) + bch2_stripes_heap_del(c, m, idx); + memset(m, 0, sizeof(*m)); + } + + spin_unlock(&c->ec_stripes_heap_lock); + + bucket_set_stripe(c, s.v, fs_usage, 0, flags); + return 0; +} + +int bch2_mark_key_locked(struct bch_fs *c, + struct bkey_s_c k, s64 sectors, + struct bch_fs_usage *fs_usage, u64 journal_seq, unsigned flags) { - /* - * synchronization w.r.t. GC: - * - * Normally, bucket sector counts/marks are updated on the fly, as - * references are added/removed from the btree, the lists of buckets the - * allocator owns, other metadata buckets, etc. - * - * When GC is in progress and going to mark this reference, we do _not_ - * mark this reference here, to avoid double counting - GC will count it - * when it gets to it. - * - * To know whether we should mark a given reference (GC either isn't - * running, or has already marked references at this position) we - * construct a total order for everything GC walks. Then, we can simply - * compare the position of the reference we're marking - @pos - with - * GC's current position. If GC is going to mark this reference, GC's - * current position will be less than @pos; if GC's current position is - * greater than @pos GC has either already walked this position, or - * isn't running. - * - * To avoid racing with GC's position changing, we have to deal with - * - GC's position being set to GC_POS_MIN when GC starts: - * usage_lock guards against this - * - GC's position overtaking @pos: we guard against this with - * whatever lock protects the data structure the reference lives in - * (e.g. the btree node lock, or the relevant allocator lock). - */ + int ret = 0; - percpu_down_read_preempt_disable(&c->usage_lock); - if (!(flags & BCH_BUCKET_MARK_GC_LOCK_HELD) && - gc_will_visit(c, pos)) - flags |= BCH_BUCKET_MARK_GC_WILL_VISIT; + preempt_disable(); - if (!stats) - stats = this_cpu_ptr(c->usage_percpu); + if (!fs_usage || (flags & BCH_BUCKET_MARK_GC)) + fs_usage = fs_usage_ptr(c, journal_seq, + flags & BCH_BUCKET_MARK_GC); switch (k.k->type) { - case BCH_EXTENT: - case BCH_EXTENT_CACHED: { - struct bkey_s_c_extent e = bkey_s_c_to_extent(k); - const struct bch_extent_ptr *ptr; - struct bch_extent_crc_unpacked crc; - enum s_alloc type = metadata ? S_META : S_DIRTY; - unsigned replicas = 0; - - BUG_ON(metadata && bkey_extent_is_cached(e.k)); - BUG_ON(!sectors); - - extent_for_each_ptr_crc(e, ptr, crc) { - bch2_mark_pointer(c, e, ptr, crc, sectors, type, - stats, journal_seq, flags); - replicas += !ptr->cached; + case KEY_TYPE_alloc: + ret = bch2_mark_alloc(c, k, fs_usage, journal_seq, flags); + break; + case KEY_TYPE_btree_ptr: + sectors = !(flags & BCH_BUCKET_MARK_OVERWRITE) + ? c->opts.btree_node_size + : -c->opts.btree_node_size; + + ret = bch2_mark_extent(c, k, sectors, BCH_DATA_BTREE, + fs_usage, journal_seq, flags); + break; + case KEY_TYPE_extent: + ret = bch2_mark_extent(c, k, sectors, BCH_DATA_USER, + fs_usage, journal_seq, flags); + break; + case KEY_TYPE_stripe: + ret = bch2_mark_stripe(c, k, fs_usage, journal_seq, flags); + break; + case KEY_TYPE_inode: + if (!(flags & BCH_BUCKET_MARK_OVERWRITE)) + fs_usage->nr_inodes++; + else + fs_usage->nr_inodes--; + break; + case KEY_TYPE_reservation: { + unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas; + + sectors *= replicas; + replicas = clamp_t(unsigned, replicas, 1, + ARRAY_SIZE(fs_usage->persistent_reserved)); + + fs_usage->reserved += sectors; + fs_usage->persistent_reserved[replicas - 1] += sectors; + break; + } + } + + preempt_enable(); + + return ret; +} + +int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k, + s64 sectors, + struct bch_fs_usage *fs_usage, + u64 journal_seq, unsigned flags) +{ + int ret; + + percpu_down_read(&c->mark_lock); + ret = bch2_mark_key_locked(c, k, sectors, + fs_usage, journal_seq, flags); + percpu_up_read(&c->mark_lock); + + return ret; +} + +inline int bch2_mark_overwrite(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c old, + struct bkey_i *new, + struct bch_fs_usage *fs_usage, + unsigned flags) +{ + struct bch_fs *c = trans->c; + struct btree *b = iter->l[0].b; + s64 sectors = 0; + + if (btree_node_is_extents(b) + ? bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0 + : bkey_cmp(new->k.p, old.k->p)) + return 0; + + if (btree_node_is_extents(b)) { + switch (bch2_extent_overlap(&new->k, old.k)) { + case BCH_EXTENT_OVERLAP_ALL: + sectors = -((s64) old.k->size); + break; + case BCH_EXTENT_OVERLAP_BACK: + sectors = bkey_start_offset(&new->k) - + old.k->p.offset; + break; + case BCH_EXTENT_OVERLAP_FRONT: + sectors = bkey_start_offset(old.k) - + new->k.p.offset; + break; + case BCH_EXTENT_OVERLAP_MIDDLE: + sectors = old.k->p.offset - new->k.p.offset; + BUG_ON(sectors <= 0); + + bch2_mark_key_locked(c, old, sectors, + fs_usage, trans->journal_res.seq, + BCH_BUCKET_MARK_INSERT|flags); + + sectors = bkey_start_offset(&new->k) - + old.k->p.offset; + break; } - if (replicas) { - BUG_ON(replicas - 1 > ARRAY_SIZE(stats->s)); - stats->s[replicas - 1].data[type] += sectors; + BUG_ON(sectors >= 0); + } + + return bch2_mark_key_locked(c, old, sectors, fs_usage, + trans->journal_res.seq, + BCH_BUCKET_MARK_OVERWRITE|flags) ?: 1; +} + +int bch2_mark_update(struct btree_trans *trans, + struct btree_insert_entry *insert, + struct bch_fs_usage *fs_usage, + unsigned flags) +{ + struct bch_fs *c = trans->c; + struct btree_iter *iter = insert->iter; + struct btree *b = iter->l[0].b; + struct btree_node_iter node_iter = iter->l[0].iter; + struct bkey_packed *_k; + int ret = 0; + + if (!btree_node_type_needs_gc(iter->btree_id)) + return 0; + + if (!(trans->flags & BTREE_INSERT_NOMARK_INSERT)) + bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k), + bpos_min(insert->k->k.p, b->key.k.p).offset - + bkey_start_offset(&insert->k->k), + fs_usage, trans->journal_res.seq, + BCH_BUCKET_MARK_INSERT|flags); + + if (unlikely(trans->flags & BTREE_INSERT_NOMARK_OVERWRITES)) + return 0; + + /* + * For non extents, we only mark the new key, not the key being + * overwritten - unless we're actually deleting: + */ + if ((iter->btree_id == BTREE_ID_ALLOC || + iter->btree_id == BTREE_ID_EC) && + !bkey_deleted(&insert->k->k)) + return 0; + + while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b, + KEY_TYPE_discard))) { + struct bkey unpacked; + struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked); + + ret = bch2_mark_overwrite(trans, iter, k, insert->k, + fs_usage, flags); + if (ret <= 0) + break; + + bch2_btree_node_iter_advance(&node_iter, b); + } + + return ret; +} + +void bch2_trans_fs_usage_apply(struct btree_trans *trans, + struct bch_fs_usage *fs_usage) +{ + struct bch_fs *c = trans->c; + struct btree_insert_entry *i; + static int warned_disk_usage = 0; + u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0; + char buf[200]; + + if (!bch2_fs_usage_apply(c, fs_usage, trans->disk_res, + trans->journal_res.seq) || + warned_disk_usage || + xchg(&warned_disk_usage, 1)) + return; + + pr_err("disk usage increased more than %llu sectors reserved", disk_res_sectors); + + trans_for_each_update_iter(trans, i) { + struct btree_iter *iter = i->iter; + struct btree *b = iter->l[0].b; + struct btree_node_iter node_iter = iter->l[0].iter; + struct bkey_packed *_k; + + pr_err("while inserting"); + bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k)); + pr_err("%s", buf); + pr_err("overlapping with"); + + node_iter = iter->l[0].iter; + while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b, + KEY_TYPE_discard))) { + struct bkey unpacked; + struct bkey_s_c k; + + k = bkey_disassemble(b, _k, &unpacked); + + if (btree_node_is_extents(b) + ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0 + : bkey_cmp(i->k->k.p, k.k->p)) + break; + + bch2_bkey_val_to_text(&PBUF(buf), c, k); + pr_err("%s", buf); + + bch2_btree_node_iter_advance(&node_iter, b); } - break; } - case BCH_RESERVATION: { - struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k); +} - if (r.v->nr_replicas) { - BUG_ON(r.v->nr_replicas - 1 > ARRAY_SIZE(stats->s)); - stats->s[r.v->nr_replicas - 1].persistent_reserved += sectors; +/* trans_mark: */ + +static int trans_get_key(struct btree_trans *trans, + enum btree_id btree_id, struct bpos pos, + struct btree_iter **iter, + struct bkey_s_c *k) +{ + unsigned i; + int ret; + + for (i = 0; i < trans->nr_updates; i++) + if (!trans->updates[i].deferred && + trans->updates[i].iter->btree_id == btree_id && + !bkey_cmp(pos, trans->updates[i].iter->pos)) { + *iter = trans->updates[i].iter; + *k = bkey_i_to_s_c(trans->updates[i].k); + return 0; } - break; + + *iter = __bch2_trans_get_iter(trans, btree_id, pos, + BTREE_ITER_SLOTS|BTREE_ITER_INTENT, 0); + if (IS_ERR(*iter)) + return PTR_ERR(*iter); + + *k = bch2_btree_iter_peek_slot(*iter); + ret = bkey_err(*k); + if (ret) + bch2_trans_iter_put(trans, *iter); + return ret; +} + +static void *trans_update_key(struct btree_trans *trans, + struct btree_iter *iter, + unsigned u64s) +{ + struct bkey_i *new_k; + unsigned i; + + new_k = bch2_trans_kmalloc(trans, u64s * sizeof(u64)); + if (IS_ERR(new_k)) + return new_k; + + bkey_init(&new_k->k); + new_k->k.p = iter->pos; + + for (i = 0; i < trans->nr_updates; i++) + if (!trans->updates[i].deferred && + trans->updates[i].iter == iter) { + trans->updates[i].k = new_k; + return new_k; + } + + bch2_trans_update(trans, ((struct btree_insert_entry) { + .iter = iter, + .k = new_k, + .triggered = true, + })); + + return new_k; +} + +static int bch2_trans_mark_pointer(struct btree_trans *trans, + struct extent_ptr_decoded p, + s64 sectors, enum bch_data_type data_type) +{ + struct bch_fs *c = trans->c; + struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev); + struct btree_iter *iter; + struct bkey_s_c k; + struct bkey_alloc_unpacked u; + struct bkey_i_alloc *a; + bool overflow; + int ret; + + ret = trans_get_key(trans, BTREE_ID_ALLOC, + POS(p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr)), + &iter, &k); + if (ret) + return ret; + + if (k.k->type != KEY_TYPE_alloc) { + bch_err_ratelimited(c, "pointer to nonexistent bucket %u:%zu", + p.ptr.dev, + PTR_BUCKET_NR(ca, &p.ptr)); + ret = -1; + goto out; } + + u = bch2_alloc_unpack(k); + + if (gen_after(u.gen, p.ptr.gen)) { + ret = 1; + goto out; } - percpu_up_read_preempt_enable(&c->usage_lock); + + if (!p.ptr.cached) + overflow = checked_add(u.dirty_sectors, sectors); + else + overflow = checked_add(u.cached_sectors, sectors); + + u.data_type = u.dirty_sectors || u.cached_sectors + ? data_type : 0; + + bch2_fs_inconsistent_on(overflow, c, + "bucket sector count overflow: %u + %lli > U16_MAX", + !p.ptr.cached + ? u.dirty_sectors + : u.cached_sectors, sectors); + + a = trans_update_key(trans, iter, BKEY_ALLOC_U64s_MAX); + ret = PTR_ERR_OR_ZERO(a); + if (ret) + goto out; + + bkey_alloc_init(&a->k_i); + a->k.p = iter->pos; + bch2_alloc_pack(a, u); +out: + bch2_trans_iter_put(trans, iter); + return ret; } -/* Disk reservations: */ +static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans, + struct bch_extent_stripe_ptr p, + s64 sectors, enum bch_data_type data_type) +{ + struct bch_replicas_padded r; + struct btree_iter *iter; + struct bkey_i *new_k; + struct bkey_s_c k; + struct bkey_s_stripe s; + unsigned nr_data; + s64 parity_sectors; + int ret = 0; + + BUG_ON(!sectors); + + ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k); + if (ret) + return ret; + + if (k.k->type != KEY_TYPE_stripe) { + bch_err_ratelimited(trans->c, + "pointer to nonexistent stripe %llu", + (u64) p.idx); + ret = -1; + goto out; + } + + new_k = trans_update_key(trans, iter, k.k->u64s); + ret = PTR_ERR_OR_ZERO(new_k); + if (ret) + goto out; + + bkey_reassemble(new_k, k); + s = bkey_i_to_s_stripe(new_k); + + nr_data = s.v->nr_blocks - s.v->nr_redundant; + + parity_sectors = DIV_ROUND_UP(abs(sectors) * s.v->nr_redundant, nr_data); + + if (sectors < 0) + parity_sectors = -parity_sectors; -static u64 __recalc_sectors_available(struct bch_fs *c) + stripe_blockcount_set(s.v, p.block, + stripe_blockcount_get(s.v, p.block) + + sectors + parity_sectors); + + bch2_bkey_to_replicas(&r.e, s.s_c); + + update_replicas_list(trans, &r.e, sectors); +out: + bch2_trans_iter_put(trans, iter); + return ret; +} + +static int bch2_trans_mark_extent(struct btree_trans *trans, + struct bkey_s_c k, + s64 sectors, enum bch_data_type data_type) { - int cpu; + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + const union bch_extent_entry *entry; + struct extent_ptr_decoded p; + struct bch_replicas_padded r; + s64 dirty_sectors = 0; + bool stale; + unsigned i; + int ret; + + r.e.data_type = data_type; + r.e.nr_devs = 0; + r.e.nr_required = 1; + + BUG_ON(!sectors); + + bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { + s64 disk_sectors = data_type == BCH_DATA_BTREE + ? sectors + : ptr_disk_sectors_delta(p, sectors); + + ret = bch2_trans_mark_pointer(trans, p, disk_sectors, + data_type); + if (ret < 0) + return ret; + + stale = ret > 0; + + if (p.ptr.cached) { + if (disk_sectors && !stale) + update_cached_sectors_list(trans, p.ptr.dev, + disk_sectors); + } else if (!p.ec_nr) { + dirty_sectors += disk_sectors; + r.e.devs[r.e.nr_devs++] = p.ptr.dev; + } else { + for (i = 0; i < p.ec_nr; i++) { + ret = bch2_trans_mark_stripe_ptr(trans, p.ec[i], + disk_sectors, data_type); + if (ret) + return ret; + } + + r.e.nr_required = 0; + } + } + + if (dirty_sectors) + update_replicas_list(trans, &r.e, dirty_sectors); + + return 0; +} + +int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k, + s64 sectors, unsigned flags) +{ + struct replicas_delta_list *d; + struct bch_fs *c = trans->c; + + switch (k.k->type) { + case KEY_TYPE_btree_ptr: + sectors = !(flags & BCH_BUCKET_MARK_OVERWRITE) + ? c->opts.btree_node_size + : -c->opts.btree_node_size; + + return bch2_trans_mark_extent(trans, k, sectors, + BCH_DATA_BTREE); + case KEY_TYPE_extent: + return bch2_trans_mark_extent(trans, k, sectors, + BCH_DATA_USER); + case KEY_TYPE_inode: + d = replicas_deltas_realloc(trans, 0); + + if (!(flags & BCH_BUCKET_MARK_OVERWRITE)) + d->fs_usage.nr_inodes++; + else + d->fs_usage.nr_inodes--; + return 0; + case KEY_TYPE_reservation: { + unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas; + + d = replicas_deltas_realloc(trans, 0); + + sectors *= replicas; + replicas = clamp_t(unsigned, replicas, 1, + ARRAY_SIZE(d->fs_usage.persistent_reserved)); + + d->fs_usage.reserved += sectors; + d->fs_usage.persistent_reserved[replicas - 1] += sectors; + return 0; + } + default: + return 0; + } +} - for_each_possible_cpu(cpu) - per_cpu_ptr(c->usage_percpu, cpu)->available_cache = 0; +int bch2_trans_mark_update(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_i *insert) +{ + struct btree *b = iter->l[0].b; + struct btree_node_iter node_iter = iter->l[0].iter; + struct bkey_packed *_k; + int ret; + + if (!btree_node_type_needs_gc(iter->btree_id)) + return 0; + + ret = bch2_trans_mark_key(trans, + bkey_i_to_s_c(insert), + bpos_min(insert->k.p, b->key.k.p).offset - + bkey_start_offset(&insert->k), + BCH_BUCKET_MARK_INSERT); + if (ret) + return ret; + + while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b, + KEY_TYPE_discard))) { + struct bkey unpacked; + struct bkey_s_c k; + s64 sectors = 0; + + k = bkey_disassemble(b, _k, &unpacked); + + if (btree_node_is_extents(b) + ? bkey_cmp(insert->k.p, bkey_start_pos(k.k)) <= 0 + : bkey_cmp(insert->k.p, k.k->p)) + break; + + if (btree_node_is_extents(b)) { + switch (bch2_extent_overlap(&insert->k, k.k)) { + case BCH_EXTENT_OVERLAP_ALL: + sectors = -((s64) k.k->size); + break; + case BCH_EXTENT_OVERLAP_BACK: + sectors = bkey_start_offset(&insert->k) - + k.k->p.offset; + break; + case BCH_EXTENT_OVERLAP_FRONT: + sectors = bkey_start_offset(k.k) - + insert->k.p.offset; + break; + case BCH_EXTENT_OVERLAP_MIDDLE: + sectors = k.k->p.offset - insert->k.p.offset; + BUG_ON(sectors <= 0); + + ret = bch2_trans_mark_key(trans, k, sectors, + BCH_BUCKET_MARK_INSERT); + if (ret) + return ret; + + sectors = bkey_start_offset(&insert->k) - + k.k->p.offset; + break; + } - return bch2_fs_sectors_free(c, bch2_fs_usage_read(c)); + BUG_ON(sectors >= 0); + } + + ret = bch2_trans_mark_key(trans, k, sectors, + BCH_BUCKET_MARK_OVERWRITE); + if (ret) + return ret; + + bch2_btree_node_iter_advance(&node_iter, b); + } + + return 0; } -/* Used by gc when it's starting: */ -void bch2_recalc_sectors_available(struct bch_fs *c) +/* Disk reservations: */ + +static u64 bch2_recalc_sectors_available(struct bch_fs *c) { - percpu_down_write(&c->usage_lock); - atomic64_set(&c->sectors_available, __recalc_sectors_available(c)); - percpu_up_write(&c->usage_lock); + percpu_u64_set(&c->pcpu->sectors_available, 0); + + return avail_factor(__bch2_fs_usage_read_short(c).free); } void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res) { - percpu_down_read_preempt_disable(&c->usage_lock); - this_cpu_sub(c->usage_percpu->online_reserved, + percpu_down_read(&c->mark_lock); + this_cpu_sub(c->usage[0]->online_reserved, res->sectors); - - bch2_fs_stats_verify(c); - percpu_up_read_preempt_enable(&c->usage_lock); + percpu_up_read(&c->mark_lock); res->sectors = 0; } @@ -728,15 +1687,16 @@ void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res) int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, unsigned sectors, int flags) { - struct bch_fs_usage *stats; + struct bch_fs_pcpu *pcpu; u64 old, v, get; s64 sectors_available; int ret; - percpu_down_read_preempt_disable(&c->usage_lock); - stats = this_cpu_ptr(c->usage_percpu); + percpu_down_read(&c->mark_lock); + preempt_disable(); + pcpu = this_cpu_ptr(c->pcpu); - if (sectors <= stats->available_cache) + if (sectors <= pcpu->sectors_available) goto out; v = atomic64_read(&c->sectors_available); @@ -745,64 +1705,42 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, get = min((u64) sectors + SECTORS_CACHE, old); if (get < sectors) { - percpu_up_read_preempt_enable(&c->usage_lock); + preempt_enable(); + percpu_up_read(&c->mark_lock); goto recalculate; } } while ((v = atomic64_cmpxchg(&c->sectors_available, old, old - get)) != old); - stats->available_cache += get; + pcpu->sectors_available += get; out: - stats->available_cache -= sectors; - stats->online_reserved += sectors; - res->sectors += sectors; + pcpu->sectors_available -= sectors; + this_cpu_add(c->usage[0]->online_reserved, sectors); + res->sectors += sectors; - bch2_disk_reservations_verify(c, flags); - bch2_fs_stats_verify(c); - percpu_up_read_preempt_enable(&c->usage_lock); + preempt_enable(); + percpu_up_read(&c->mark_lock); return 0; recalculate: - /* - * GC recalculates sectors_available when it starts, so that hopefully - * we don't normally end up blocking here: - */ + percpu_down_write(&c->mark_lock); - /* - * Piss fuck, we can be called from extent_insert_fixup() with btree - * locks held: - */ - - if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD)) { - if (!(flags & BCH_DISK_RESERVATION_BTREE_LOCKS_HELD)) - down_read(&c->gc_lock); - else if (!down_read_trylock(&c->gc_lock)) - return -EINTR; - } - - percpu_down_write(&c->usage_lock); - sectors_available = __recalc_sectors_available(c); + sectors_available = bch2_recalc_sectors_available(c); if (sectors <= sectors_available || (flags & BCH_DISK_RESERVATION_NOFAIL)) { atomic64_set(&c->sectors_available, max_t(s64, 0, sectors_available - sectors)); - stats->online_reserved += sectors; - res->sectors += sectors; + this_cpu_add(c->usage[0]->online_reserved, sectors); + res->sectors += sectors; ret = 0; - - bch2_disk_reservations_verify(c, flags); } else { atomic64_set(&c->sectors_available, sectors_available); ret = -ENOSPC; } - bch2_fs_stats_verify(c); - percpu_up_write(&c->usage_lock); - - if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD)) - up_read(&c->gc_lock); + percpu_up_write(&c->mark_lock); return ret; } @@ -822,8 +1760,8 @@ static void buckets_free_rcu(struct rcu_head *rcu) int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) { struct bucket_array *buckets = NULL, *old_buckets = NULL; - unsigned long *buckets_dirty = NULL; - u8 *oldest_gens = NULL; + unsigned long *buckets_nouse = NULL; + unsigned long *buckets_written = NULL; alloc_fifo free[RESERVE_NR]; alloc_fifo free_inc; alloc_heap alloc_heap; @@ -832,10 +1770,11 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE, ca->mi.bucket_size / c->opts.btree_node_size); /* XXX: these should be tunable */ - size_t reserve_none = max_t(size_t, 4, ca->mi.nbuckets >> 9); - size_t copygc_reserve = max_t(size_t, 16, ca->mi.nbuckets >> 7); - size_t free_inc_reserve = copygc_reserve / 2; - bool resize = ca->buckets != NULL, + size_t reserve_none = max_t(size_t, 1, nbuckets >> 9); + size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 7); + size_t free_inc_nr = max(max_t(size_t, 1, nbuckets >> 12), + btree_reserve * 2); + bool resize = ca->buckets[0] != NULL, start_copygc = ca->copygc_thread != NULL; int ret = -ENOMEM; unsigned i; @@ -848,17 +1787,18 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) if (!(buckets = kvpmalloc(sizeof(struct bucket_array) + nbuckets * sizeof(struct bucket), GFP_KERNEL|__GFP_ZERO)) || - !(oldest_gens = kvpmalloc(nbuckets * sizeof(u8), + !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) * + sizeof(unsigned long), GFP_KERNEL|__GFP_ZERO)) || - !(buckets_dirty = kvpmalloc(BITS_TO_LONGS(nbuckets) * + !(buckets_written = kvpmalloc(BITS_TO_LONGS(nbuckets) * sizeof(unsigned long), GFP_KERNEL|__GFP_ZERO)) || !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) || !init_fifo(&free[RESERVE_MOVINGGC], copygc_reserve, GFP_KERNEL) || !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) || - !init_fifo(&free_inc, free_inc_reserve, GFP_KERNEL) || - !init_heap(&alloc_heap, free_inc_reserve, GFP_KERNEL) || + !init_fifo(&free_inc, free_inc_nr, GFP_KERNEL) || + !init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL) || !init_heap(©gc_heap, copygc_reserve, GFP_KERNEL)) goto err; @@ -870,7 +1810,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) if (resize) { down_write(&c->gc_lock); down_write(&ca->bucket_lock); - percpu_down_write(&c->usage_lock); + percpu_down_write(&c->mark_lock); } old_buckets = bucket_array(ca); @@ -881,22 +1821,22 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) memcpy(buckets->b, old_buckets->b, n * sizeof(struct bucket)); - memcpy(oldest_gens, - ca->oldest_gens, - n * sizeof(u8)); - memcpy(buckets_dirty, - ca->buckets_dirty, + memcpy(buckets_nouse, + ca->buckets_nouse, + BITS_TO_LONGS(n) * sizeof(unsigned long)); + memcpy(buckets_written, + ca->buckets_written, BITS_TO_LONGS(n) * sizeof(unsigned long)); } - rcu_assign_pointer(ca->buckets, buckets); + rcu_assign_pointer(ca->buckets[0], buckets); buckets = old_buckets; - swap(ca->oldest_gens, oldest_gens); - swap(ca->buckets_dirty, buckets_dirty); + swap(ca->buckets_nouse, buckets_nouse); + swap(ca->buckets_written, buckets_written); if (resize) - percpu_up_write(&c->usage_lock); + percpu_up_write(&c->mark_lock); spin_lock(&c->freelist_lock); for (i = 0; i < RESERVE_NR; i++) { @@ -931,10 +1871,10 @@ err: free_fifo(&free_inc); for (i = 0; i < RESERVE_NR; i++) free_fifo(&free[i]); - kvpfree(buckets_dirty, + kvpfree(buckets_nouse, + BITS_TO_LONGS(nbuckets) * sizeof(unsigned long)); + kvpfree(buckets_written, BITS_TO_LONGS(nbuckets) * sizeof(unsigned long)); - kvpfree(oldest_gens, - nbuckets * sizeof(u8)); if (buckets) call_rcu(&old_buckets->rcu, buckets_free_rcu); @@ -950,19 +1890,20 @@ void bch2_dev_buckets_free(struct bch_dev *ca) free_fifo(&ca->free_inc); for (i = 0; i < RESERVE_NR; i++) free_fifo(&ca->free[i]); - kvpfree(ca->buckets_dirty, + kvpfree(ca->buckets_written, + BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long)); + kvpfree(ca->buckets_nouse, BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long)); - kvpfree(ca->oldest_gens, ca->mi.nbuckets * sizeof(u8)); - kvpfree(rcu_dereference_protected(ca->buckets, 1), + kvpfree(rcu_dereference_protected(ca->buckets[0], 1), sizeof(struct bucket_array) + ca->mi.nbuckets * sizeof(struct bucket)); - free_percpu(ca->usage_percpu); + free_percpu(ca->usage[0]); } int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca) { - if (!(ca->usage_percpu = alloc_percpu(struct bch_dev_usage))) + if (!(ca->usage[0] = alloc_percpu(struct bch_dev_usage))) return -ENOMEM; return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);; |