summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-02-22 11:32:38 -0900
committerKent Overstreet <kent.overstreet@gmail.com>2017-01-18 21:38:32 -0900
commit5e649b72f33716a57fa28dff5dd26b688f72a9c9 (patch)
tree4dfffce06f536b7a0377ba24cbad742823f53ad8
parent35227dffd49dd4c2a84d74bbf6c4632c79b2acfa (diff)
bcache: Better reservations, precise -ENOSPC
-rw-r--r--drivers/md/bcache/bcache.h13
-rw-r--r--drivers/md/bcache/btree_gc.c9
-rw-r--r--drivers/md/bcache/btree_update.c19
-rw-r--r--drivers/md/bcache/btree_update.h3
-rw-r--r--drivers/md/bcache/buckets.c138
-rw-r--r--drivers/md/bcache/buckets.h39
-rw-r--r--drivers/md/bcache/buckets_types.h2
-rw-r--r--drivers/md/bcache/extents.c2
-rw-r--r--drivers/md/bcache/fs-io.c35
-rw-r--r--drivers/md/bcache/fs-io.h4
-rw-r--r--drivers/md/bcache/migrate.c10
-rw-r--r--drivers/md/bcache/move.c3
-rw-r--r--drivers/md/bcache/move.h6
-rw-r--r--drivers/md/bcache/super.c3
-rw-r--r--drivers/md/bcache/sysfs.c2
15 files changed, 224 insertions, 64 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 01a3ff1eca06..be5e0d943729 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -181,6 +181,7 @@
#include <linux/bcache.h>
#include <linux/bio.h>
#include <linux/kobject.h>
+#include <linux/lglock.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/percpu-refcount.h>
@@ -625,11 +626,19 @@ struct cache_set {
struct cache_group cache_tiers[CACHE_TIERS];
u64 capacity; /* sectors */
- atomic64_t sectors_reserved;
- atomic64_t sectors_reserved_cache;
+
+ /*
+ * When capacity _decreases_ (due to a disk being removed), we
+ * increment capacity_gen - this invalidates outstanding reservations
+ * and forces them to be revalidated
+ */
+ u32 capacity_gen;
+
+ atomic64_t sectors_available;
struct bucket_stats_cache_set __percpu *bucket_stats_percpu;
struct bucket_stats_cache_set bucket_stats_cached;
+ struct lglock bucket_stats_lock;
struct mutex bucket_lock;
diff --git a/drivers/md/bcache/btree_gc.c b/drivers/md/bcache/btree_gc.c
index bce666327c9f..a62d7778f4df 100644
--- a/drivers/md/bcache/btree_gc.c
+++ b/drivers/md/bcache/btree_gc.c
@@ -161,7 +161,7 @@ u8 __bch_btree_mark_key_initial(struct cache_set *c, enum bkey_type type,
case BKEY_TYPE_BTREE:
case BKEY_TYPE_EXTENTS:
if (k.k->type == BCH_RESERVATION)
- atomic64_add(k.k->size, &c->sectors_reserved);
+ bch_mark_reservation(c, k.k->size);
return __bch_btree_mark_key(c, type, k);
default:
@@ -380,6 +380,13 @@ void bch_gc(struct cache_set *c)
return;
trace_bcache_gc_start(c);
+
+ /*
+ * Do this before taking gc_lock - bch_disk_reservation_get() blocks on
+ * gc_lock if sectors_available goes to 0:
+ */
+ bch_recalc_sectors_available(c);
+
down_write(&c->gc_lock);
/* Save a copy of the existing bucket stats while we recompute them: */
diff --git a/drivers/md/bcache/btree_update.c b/drivers/md/bcache/btree_update.c
index d3322ce47297..d5da485d0cb7 100644
--- a/drivers/md/bcache/btree_update.c
+++ b/drivers/md/bcache/btree_update.c
@@ -445,7 +445,7 @@ static struct btree *__btree_root_alloc(struct cache_set *c, unsigned level,
void bch_btree_reserve_put(struct cache_set *c, struct btree_reserve *reserve)
{
- atomic64_sub_bug(reserve->sectors_reserved, &c->sectors_reserved);
+ bch_disk_reservation_put(c, &reserve->disk_res);
mutex_lock(&c->btree_reserve_cache_lock);
@@ -484,15 +484,13 @@ static struct btree_reserve *__bch_btree_reserve_get(struct cache_set *c,
{
struct btree_reserve *reserve;
struct btree *b;
- unsigned sectors_reserved = 0;
+ struct disk_reservation disk_res = { 0, 0 };
+ unsigned sectors = nr_nodes * c->sb.btree_node_size;
int ret;
- if (check_enospc) {
- sectors_reserved = nr_nodes * c->sb.btree_node_size;
-
- if (bch_reserve_sectors(c, sectors_reserved))
- return ERR_PTR(-ENOSPC);
- }
+ if (__bch_disk_reservation_get(c, &disk_res, sectors,
+ check_enospc, true))
+ return ERR_PTR(-ENOSPC);
BUG_ON(nr_nodes > BTREE_RESERVE_MAX);
@@ -506,7 +504,7 @@ static struct btree_reserve *__bch_btree_reserve_get(struct cache_set *c,
reserve = mempool_alloc(&c->btree_reserve_pool, GFP_NOIO);
- reserve->sectors_reserved = sectors_reserved;
+ reserve->disk_res = disk_res;
reserve->nr = 0;
while (reserve->nr < nr_nodes) {
@@ -552,7 +550,8 @@ int bch_btree_root_alloc(struct cache_set *c, enum btree_id id,
closure_init_stack(&cl);
while (1) {
- reserve = __bch_btree_reserve_get(c, true, 1, &cl);
+ /* XXX haven't calculated capacity yet :/ */
+ reserve = __bch_btree_reserve_get(c, false, 1, &cl);
if (!IS_ERR(reserve))
break;
diff --git a/drivers/md/bcache/btree_update.h b/drivers/md/bcache/btree_update.h
index e4ccd1ddb7e7..eea17208c12f 100644
--- a/drivers/md/bcache/btree_update.h
+++ b/drivers/md/bcache/btree_update.h
@@ -3,6 +3,7 @@
#include "btree_cache.h"
#include "btree_iter.h"
+#include "buckets.h"
struct cache_set;
struct bkey_format_state;
@@ -10,7 +11,7 @@ struct bkey_format;
struct btree;
struct btree_reserve {
- unsigned sectors_reserved;
+ struct disk_reservation disk_res;
unsigned nr;
struct btree *b[];
};
diff --git a/drivers/md/bcache/buckets.c b/drivers/md/bcache/buckets.c
index a78d883ee656..6c4ac8b91a60 100644
--- a/drivers/md/bcache/buckets.c
+++ b/drivers/md/bcache/buckets.c
@@ -157,7 +157,7 @@ static void bucket_stats_update(struct cache *ca,
!is_available_bucket(new) &&
c->gc_pos.phase == GC_PHASE_DONE);
- preempt_disable();
+ lg_local_lock(&c->bucket_stats_lock);
cache_stats = this_cpu_ptr(ca->bucket_stats_percpu);
cache_set_stats = this_cpu_ptr(c->bucket_stats_percpu);
@@ -188,7 +188,7 @@ static void bucket_stats_update(struct cache *ca,
cache_stats->buckets_meta += is_meta_bucket(new) - is_meta_bucket(old);
cache_stats->buckets_cached += is_cached_bucket(new) - is_cached_bucket(old);
cache_stats->buckets_dirty += is_dirty_bucket(new) - is_dirty_bucket(old);
- preempt_enable();
+ lg_local_unlock(&c->bucket_stats_lock);
if (!is_available_bucket(old) && is_available_bucket(new))
bch_wake_allocator(ca);
@@ -428,6 +428,16 @@ stale:
return -1;
}
+void bch_mark_reservation(struct cache_set *c, int sectors)
+{
+ struct bucket_stats_cache_set *stats;
+
+ lg_local_lock(&c->bucket_stats_lock);
+ stats = this_cpu_ptr(c->bucket_stats_percpu);
+ stats->sectors_reserved += sectors;
+ lg_local_unlock(&c->bucket_stats_lock);
+}
+
void bch_unmark_open_bucket(struct cache *ca, struct bucket *g)
{
struct bucket_mark old, new;
@@ -437,24 +447,124 @@ void bch_unmark_open_bucket(struct cache *ca, struct bucket *g)
}));
}
+static u64 __recalc_sectors_available(struct cache_set *c)
+{
+ return c->capacity - cache_set_sectors_used(c);
+}
+
+/* Used by gc when it's starting: */
+void bch_recalc_sectors_available(struct cache_set *c)
+{
+ int cpu;
+
+ lg_global_lock(&c->bucket_stats_lock);
+
+ for_each_possible_cpu(cpu)
+ this_cpu_ptr(c->bucket_stats_percpu)->sectors_available_cache = 0;
+
+ atomic64_set(&c->sectors_available,
+ __recalc_sectors_available(c));
+
+ lg_global_unlock(&c->bucket_stats_lock);
+}
+
+void bch_disk_reservation_put(struct cache_set *c,
+ struct disk_reservation *res)
+{
+ if (res->sectors) {
+ struct bucket_stats_cache_set *stats;
+
+ lg_local_lock(&c->bucket_stats_lock);
+ stats = this_cpu_ptr(c->bucket_stats_percpu);
+ stats->sectors_reserved -= res->sectors;
+ lg_local_unlock(&c->bucket_stats_lock);
+
+ res->sectors = 0;
+ }
+}
+
#define SECTORS_CACHE 1024
-int bch_reserve_sectors(struct cache_set *c, unsigned sectors)
+/*
+ * XXX
+ *
+ * For the trick we're using here to work, we have to ensure that everything
+ * that decreases the amount of space available goes through here and decreases
+ * sectors_available:
+ *
+ * Need to figure out a way of asserting that that's happening (e.g. btree node
+ * allocations?)
+ */
+int __bch_disk_reservation_get(struct cache_set *c,
+ struct disk_reservation *res,
+ unsigned sectors,
+ bool check_enospc, bool gc_lock_held)
{
- u64 sectors_to_get = SECTORS_CACHE + sectors;
+ struct bucket_stats_cache_set *stats;
+ u64 old, new, v;
+ s64 sectors_available;
+ int ret;
+
+ res->sectors = sectors;
+ res->gen = c->capacity_gen;
+
+ lg_local_lock(&c->bucket_stats_lock);
+ stats = this_cpu_ptr(c->bucket_stats_percpu);
+
+ if (sectors >= stats->sectors_available_cache)
+ goto out;
+
+ v = atomic64_read(&c->sectors_available);
+ do {
+ old = v;
+ if (old < sectors) {
+ lg_local_unlock(&c->bucket_stats_lock);
+ goto recalculate;
+ }
- if (likely(atomic64_sub_return(sectors,
- &c->sectors_reserved_cache) >= 0))
- return 0;
+ new = max_t(s64, 0, old - sectors - SECTORS_CACHE);
+ } while ((v = atomic64_cmpxchg(&c->sectors_available,
+ old, new)) != old);
- atomic64_add(sectors_to_get, &c->sectors_reserved);
+ stats->sectors_available_cache += old - new;
+out:
+ stats->sectors_available_cache -= sectors;
+ stats->sectors_reserved += sectors;
+ lg_local_unlock(&c->bucket_stats_lock);
+ return 0;
- if (likely(!cache_set_full(c))) {
- atomic64_add(sectors_to_get, &c->sectors_reserved_cache);
- return 0;
+recalculate:
+ /*
+ * GC recalculates sectors_available when it starts, so that hopefully
+ * we don't normally end up blocking here:
+ */
+ if (!gc_lock_held)
+ down_read(&c->gc_lock);
+ lg_global_lock(&c->bucket_stats_lock);
+
+ sectors_available = __recalc_sectors_available(c);
+
+ if (!check_enospc || sectors <= sectors_available) {
+ atomic64_set(&c->sectors_available,
+ max_t(s64, 0, sectors_available - sectors));
+ stats->sectors_reserved += sectors;
+ ret = 0;
+ } else {
+ atomic64_set(&c->sectors_available, sectors_available);
+ res->sectors = 0;
+ ret = -ENOSPC;
}
- atomic64_sub_bug(sectors_to_get, &c->sectors_reserved);
- atomic64_add(sectors, &c->sectors_reserved_cache);
- return -ENOSPC;
+ lg_global_unlock(&c->bucket_stats_lock);
+ if (!gc_lock_held)
+ up_read(&c->gc_lock);
+
+ return ret;
+}
+
+int bch_disk_reservation_get(struct cache_set *c,
+ struct disk_reservation *res,
+ unsigned sectors)
+{
+ return __bch_disk_reservation_get(c, res, sectors, true, false);
}
diff --git a/drivers/md/bcache/buckets.h b/drivers/md/bcache/buckets.h
index 0c68ad57a23b..561e97cb7d2b 100644
--- a/drivers/md/bcache/buckets.h
+++ b/drivers/md/bcache/buckets.h
@@ -195,23 +195,14 @@ static inline size_t buckets_free_cache(struct cache *ca,
struct bucket_stats_cache_set __bch_bucket_stats_read_cache_set(struct cache_set *);
struct bucket_stats_cache_set bch_bucket_stats_read_cache_set(struct cache_set *);
-static inline u64 __cache_set_sectors_used(struct cache_set *c)
-{
- struct bucket_stats_cache_set stats = bch_bucket_stats_read_cache_set(c);
-
- return stats.sectors_meta +
- stats.sectors_dirty +
- atomic64_read(&c->sectors_reserved);
-}
-
static inline u64 cache_set_sectors_used(struct cache_set *c)
{
- return min(c->capacity, __cache_set_sectors_used(c));
-}
+ struct bucket_stats_cache_set stats = bch_bucket_stats_read_cache_set(c);
-static inline bool cache_set_full(struct cache_set *c)
-{
- return __cache_set_sectors_used(c) >= c->capacity;
+ return min(c->capacity,
+ stats.sectors_meta +
+ stats.sectors_dirty +
+ stats.sectors_reserved);
}
/* XXX: kill? */
@@ -243,7 +234,25 @@ void bch_unmark_open_bucket(struct cache *, struct bucket *);
int bch_mark_pointers(struct cache_set *, struct bkey_s_c_extent,
int, bool, bool, bool, struct gc_pos);
+void bch_mark_reservation(struct cache_set *, int);
+
+void bch_recalc_sectors_available(struct cache_set *);
-int bch_reserve_sectors(struct cache_set *, unsigned);
+/*
+ * A reservation for space on disk:
+ */
+struct disk_reservation {
+ u32 sectors;
+ u32 gen;
+};
+
+void bch_disk_reservation_put(struct cache_set *,
+ struct disk_reservation *);
+int __bch_disk_reservation_get(struct cache_set *,
+ struct disk_reservation *,
+ unsigned, bool, bool);
+int bch_disk_reservation_get(struct cache_set *,
+ struct disk_reservation *,
+ unsigned);
#endif /* _BUCKETS_H */
diff --git a/drivers/md/bcache/buckets_types.h b/drivers/md/bcache/buckets_types.h
index 2a2c4ef4dfce..1b7df1662144 100644
--- a/drivers/md/bcache/buckets_types.h
+++ b/drivers/md/bcache/buckets_types.h
@@ -47,6 +47,8 @@ struct bucket_stats_cache_set {
u64 sectors_dirty;
u64 sectors_cached;
u64 sectors_meta;
+ u64 sectors_reserved;
+ u64 sectors_available_cache;
};
struct bucket_heap_entry {
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 6cbdad6a8019..06127a117a16 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -822,7 +822,7 @@ static int bch_add_sectors(struct btree_iter *iter, struct bkey_s_c k,
bcache_dev_sectors_dirty_add(c, e.k->p.inode,
offset, sectors);
} else if (k.k->type == BCH_RESERVATION) {
- atomic64_add_bug(sectors, &c->sectors_reserved);
+ bch_mark_reservation(c, sectors);
}
return 0;
diff --git a/drivers/md/bcache/fs-io.c b/drivers/md/bcache/fs-io.c
index 4df3564ad6cc..127f608aabf0 100644
--- a/drivers/md/bcache/fs-io.c
+++ b/drivers/md/bcache/fs-io.c
@@ -432,19 +432,24 @@ static void bch_put_page_reservation(struct cache_set *c, struct page *page)
s.alloc_state = BCH_PAGE_UNALLOCATED;
});
- if (s.alloc_state == BCH_PAGE_RESERVED)
- atomic64_sub_bug(PAGE_SECTORS, &c->sectors_reserved);
+ if (s.alloc_state == BCH_PAGE_RESERVED) {
+ struct disk_reservation res = { .sectors = PAGE_SECTORS };
+
+ /* hack */
+ bch_disk_reservation_put(c, &res);
+ }
}
static int bch_get_page_reservation(struct cache_set *c, struct page *page)
{
struct bch_page_state *s = page_state(page), old, new;
+ struct disk_reservation res;
int ret = 0;
if (s->alloc_state != BCH_PAGE_UNALLOCATED)
return 0;
- ret = bch_reserve_sectors(c, PAGE_SECTORS);
+ ret = bch_disk_reservation_get(c, &res, PAGE_SECTORS);
if (ret)
return ret;
@@ -474,8 +479,12 @@ static void bch_clear_page_bits(struct page *page)
spin_unlock(&inode->i_lock);
}
- if (s.alloc_state == BCH_PAGE_RESERVED)
- atomic64_sub_bug(PAGE_SECTORS, &c->sectors_reserved);
+ if (s.alloc_state == BCH_PAGE_RESERVED) {
+ struct disk_reservation res = { .sectors = PAGE_SECTORS };
+
+ /* hack */
+ bch_disk_reservation_put(c, &res);
+ }
if (s.append)
i_size_update_put(c, ei, s.append_idx, 1);
@@ -792,7 +801,11 @@ static void bch_writepage_io_done(struct closure *cl)
struct bio_vec *bvec;
unsigned i;
- atomic64_sub_bug(io->sectors_reserved, &c->sectors_reserved);
+ if (io->sectors_reserved) {
+ struct disk_reservation res = { .sectors = io->sectors_reserved };
+
+ bch_disk_reservation_put(c, &res);
+ }
for (i = 0; i < ARRAY_SIZE(io->i_size_update_count); i++)
i_size_update_put(c, ei, i, io->i_size_update_count[i]);
@@ -1312,7 +1325,7 @@ static void __bch_dio_write_complete(struct dio_write *dio)
struct bch_inode_info *ei = to_bch_ei(inode);
struct cache_set *c = inode->i_sb->s_fs_info;
- atomic64_sub_bug(dio->nr_sectors, &c->sectors_reserved);
+ bch_disk_reservation_put(c, &dio->res);
i_sectors_dirty_put(ei, &dio->i_sectors_hook);
@@ -1437,7 +1450,6 @@ static int bch_direct_IO_write(struct cache_set *c, struct kiocb *req,
dio->written = 0;
dio->error = 0;
dio->offset = offset;
- dio->nr_sectors = iter->count >> 9;
dio->append = false;
dio->iovec = NULL;
dio->iter = *iter;
@@ -1469,7 +1481,7 @@ static int bch_direct_IO_write(struct cache_set *c, struct kiocb *req,
* Have to then guard against racing with truncate (deleting data that
* we would have been overwriting)
*/
- ret = bch_reserve_sectors(c, dio->nr_sectors);
+ ret = bch_disk_reservation_get(c, &dio->res, iter->count >> 9);
if (ret)
goto err_put_sectors_dirty;
@@ -2269,6 +2281,7 @@ static long bch_fallocate(struct inode *inode, int mode,
goto err;
while (bkey_cmp(iter.pos, end) < 0) {
+ struct disk_reservation disk_res;
unsigned flags = 0;
k = bch_btree_iter_peek_with_holes(&iter);
@@ -2303,7 +2316,7 @@ static long bch_fallocate(struct inode *inode, int mode,
sectors = reservation.k.size;
- ret = bch_reserve_sectors(c, sectors);
+ ret = bch_disk_reservation_get(c, &disk_res, sectors);
if (ret)
goto err_put_sectors_dirty;
@@ -2313,7 +2326,7 @@ static long bch_fallocate(struct inode *inode, int mode,
&ei->journal_seq,
BTREE_INSERT_ATOMIC|flags);
- atomic64_sub_bug(sectors, &c->sectors_reserved);
+ bch_disk_reservation_put(c, &disk_res);
if (ret < 0 && ret != -EINTR)
goto err_put_sectors_dirty;
diff --git a/drivers/md/bcache/fs-io.h b/drivers/md/bcache/fs-io.h
index f701058434de..23d443222c08 100644
--- a/drivers/md/bcache/fs-io.h
+++ b/drivers/md/bcache/fs-io.h
@@ -1,6 +1,7 @@
#ifndef _BCACHE_FS_IO_H
#define _BCACHE_FS_IO_H
+#include "buckets.h"
#include <linux/uio.h>
int bch_set_page_dirty(struct page *);
@@ -64,9 +65,10 @@ struct dio_write {
long written;
long error;
loff_t offset;
- unsigned nr_sectors;
bool append;
+ struct disk_reservation res;
+
struct iovec *iovec;
struct iovec inline_vecs[UIO_FASTIOV];
struct iov_iter iter;
diff --git a/drivers/md/bcache/migrate.c b/drivers/md/bcache/migrate.c
index 18c97ba8ec7a..31dfb5f4dcde 100644
--- a/drivers/md/bcache/migrate.c
+++ b/drivers/md/bcache/migrate.c
@@ -6,6 +6,7 @@
#include "btree_update.h"
#include "buckets.h"
#include "extents.h"
+#include "io.h"
#include "journal.h"
#include "keylist.h"
#include "migrate.h"
@@ -38,15 +39,18 @@ static int issue_migration_move(struct cache *ca,
struct moving_queue *q = &ca->moving_gc_queue;
struct cache_set *c = ca->set;
struct moving_io *io;
+ struct disk_reservation res;
- if (bch_reserve_sectors(c, k.k->size))
+ if (bch_disk_reservation_get(c, &res, k.k->size))
return -ENOSPC;
io = moving_io_alloc(k);
- if (io == NULL)
+ if (!io) {
+ bch_disk_reservation_put(c, &res);
return -ENOMEM;
+ }
- io->has_reservation = true;
+ io->disk_res = res;
/* This also copies k into the write op's replace_key and insert_key */
diff --git a/drivers/md/bcache/move.c b/drivers/md/bcache/move.c
index 0fe6edab0176..f7dfcbd5935b 100644
--- a/drivers/md/bcache/move.c
+++ b/drivers/md/bcache/move.c
@@ -115,8 +115,7 @@ void moving_io_free(struct moving_io *io)
struct bio_vec *bv;
int i;
- if (io->has_reservation)
- atomic64_sub_bug(io->key.k.size, &io->op.c->sectors_reserved);
+ bch_disk_reservation_put(io->op.c, &io->disk_res);
bio_for_each_segment_all(bv, &io->bio.bio.bio, i)
if (bv->bv_page)
diff --git a/drivers/md/bcache/move.h b/drivers/md/bcache/move.h
index 6d5a96b06b07..fe545a6aec9b 100644
--- a/drivers/md/bcache/move.h
+++ b/drivers/md/bcache/move.h
@@ -1,7 +1,8 @@
#ifndef _BCACHE_MOVE_H
#define _BCACHE_MOVE_H
-#include "io.h"
+#include "buckets.h"
+#include "io_types.h"
enum moving_purpose {
MOVING_PURPOSE_UNKNOWN, /* Un-init */
@@ -91,7 +92,8 @@ struct moving_io {
unsigned read_issued:1;
unsigned read_completed:1;
unsigned write_issued:1;
- unsigned has_reservation:1;
+ struct disk_reservation disk_res;
+
/* Must be last since it is variable size */
struct bch_write_bio bio;
};
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index c414ac48a70b..724ca30c9d0a 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -873,6 +873,7 @@ static void cache_set_free(struct cache_set *c)
bch_io_clock_exit(&c->io_clock[WRITE]);
bch_io_clock_exit(&c->io_clock[READ]);
bdi_destroy(&c->bdi);
+ free_percpu(c->bucket_stats_lock.lock);
free_percpu(c->bucket_stats_percpu);
free_percpu(c->bio_decompress_worker);
mempool_exit(&c->compression_workspace_pool);
@@ -1040,6 +1041,7 @@ static struct cache_set *bch_cache_set_alloc(struct cache_sb *sb,
sema_init(&c->sb_write_mutex, 1);
INIT_RADIX_TREE(&c->devices, GFP_KERNEL);
mutex_init(&c->btree_cache_lock);
+ lg_lock_init(&c->bucket_stats_lock);
mutex_init(&c->bucket_lock);
spin_lock_init(&c->btree_root_lock);
INIT_WORK(&c->read_only_work, bch_cache_set_read_only_work);
@@ -1135,6 +1137,7 @@ static struct cache_set *bch_cache_set_alloc(struct cache_sb *sb,
get_order(COMPRESSION_WORKSPACE_SIZE)) ||
!(c->bio_decompress_worker = alloc_percpu(*c->bio_decompress_worker)) ||
!(c->bucket_stats_percpu = alloc_percpu(struct bucket_stats_cache_set)) ||
+ !(c->bucket_stats_lock.lock = alloc_percpu(*c->bucket_stats_lock.lock)) ||
bdi_setup_and_register(&c->bdi, "bcache") ||
bch_io_clock_init(&c->io_clock[READ]) ||
bch_io_clock_init(&c->io_clock[WRITE]) ||
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index a684a04cc95c..b8153af524dc 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -582,7 +582,7 @@ static ssize_t show_cache_set_alloc_debug(struct cache_set *c, char *buf)
c->capacity,
stats.sectors_meta,
stats.sectors_dirty,
- (u64) atomic64_read(&c->sectors_reserved));
+ stats.sectors_reserved);
}
static ssize_t bch_compression_stats(struct cache_set *c, char *buf)