summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-11-23 18:21:09 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2022-03-12 20:13:33 -0500
commitd2ec1789c46f6cf12afd8222bc5508fe8b87a00d (patch)
tree83836e1d87da2f130fefd7362f55d859fe388864
parent531077fcf309698b1bf6880e0469115fbd2b9c99 (diff)
bcachefs: Fix page state after fallocate
This tweaks the fallocate code to also update the page cache to reflect the new on disk reservations, giving us better i_sectors consistency. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/fs-io.c155
1 files changed, 113 insertions, 42 deletions
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 4e298c740b96..e29f160a6da0 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -426,6 +426,110 @@ static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
bv.bv_len >> 9, nr_ptrs, state);
}
+static void mark_pagecache_unallocated(struct bch_inode_info *inode,
+ u64 start, u64 end)
+{
+ pgoff_t index = start >> PAGE_SECTORS_SHIFT;
+ pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
+ struct pagevec pvec;
+
+ if (end <= start)
+ return;
+
+ pagevec_init(&pvec);
+
+ do {
+ unsigned nr_pages, i, j;
+
+ nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
+ &index, end_index);
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+ u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
+ u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
+ unsigned pg_offset = max(start, pg_start) - pg_start;
+ unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
+ struct bch_page_state *s;
+
+ BUG_ON(end <= pg_start);
+ BUG_ON(pg_offset >= PAGE_SECTORS);
+ BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
+
+ lock_page(page);
+ s = bch2_page_state(page);
+
+ if (s) {
+ spin_lock(&s->lock);
+ for (j = pg_offset; j < pg_offset + pg_len; j++)
+ s->s[j].nr_replicas = 0;
+ spin_unlock(&s->lock);
+ }
+
+ unlock_page(page);
+ }
+ pagevec_release(&pvec);
+ } while (index <= end_index);
+}
+
+static void mark_pagecache_reserved(struct bch_inode_info *inode,
+ u64 start, u64 end)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ pgoff_t index = start >> PAGE_SECTORS_SHIFT;
+ pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
+ struct pagevec pvec;
+ s64 i_sectors_delta = 0;
+
+ if (end <= start)
+ return;
+
+ pagevec_init(&pvec);
+
+ do {
+ unsigned nr_pages, i, j;
+
+ nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
+ &index, end_index);
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+ u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
+ u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
+ unsigned pg_offset = max(start, pg_start) - pg_start;
+ unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
+ struct bch_page_state *s;
+
+ BUG_ON(end <= pg_start);
+ BUG_ON(pg_offset >= PAGE_SECTORS);
+ BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
+
+ lock_page(page);
+ s = bch2_page_state(page);
+
+ if (s) {
+ spin_lock(&s->lock);
+ for (j = pg_offset; j < pg_offset + pg_len; j++)
+ switch (s->s[j].state) {
+ case SECTOR_UNALLOCATED:
+ s->s[j].state = SECTOR_RESERVED;
+ break;
+ case SECTOR_DIRTY:
+ s->s[j].state = SECTOR_DIRTY_RESERVED;
+ i_sectors_delta--;
+ break;
+ default:
+ break;
+ }
+ spin_unlock(&s->lock);
+ }
+
+ unlock_page(page);
+ }
+ pagevec_release(&pvec);
+ } while (index <= end_index);
+
+ i_sectors_acct(c, inode, NULL, i_sectors_delta);
+}
+
static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
{
/* XXX: this should not be open coded */
@@ -581,8 +685,7 @@ static void bch2_clear_page_bits(struct page *page)
bch2_disk_reservation_put(c, &disk_res);
- if (dirty_sectors)
- i_sectors_acct(c, inode, NULL, dirty_sectors);
+ i_sectors_acct(c, inode, NULL, dirty_sectors);
bch2_page_state_release(page);
}
@@ -630,8 +733,7 @@ static void bch2_set_page_dirty(struct bch_fs *c,
spin_unlock(&s->lock);
- if (dirty_sectors)
- i_sectors_acct(c, inode, &res->quota, dirty_sectors);
+ i_sectors_acct(c, inode, &res->quota, dirty_sectors);
if (!PageDirty(page))
__set_page_dirty_nobuffers(page);
@@ -2612,6 +2714,8 @@ int bch2_truncate(struct user_namespace *mnt_userns,
U64_MAX, &i_sectors_delta);
i_sectors_acct(c, inode, NULL, i_sectors_delta);
+ BUG_ON(!inode->v.i_size && inode->v.i_blocks);
+
if (unlikely(ret))
goto err;
@@ -2952,6 +3056,9 @@ bkey_err:
ret = 0;
}
+ bch2_trans_unlock(&trans); /* lock ordering, before taking pagecache locks: */
+ mark_pagecache_reserved(inode, start_sector, iter.pos.offset);
+
if (ret == -ENOSPC && (mode & FALLOC_FL_ZERO_RANGE)) {
struct quota_res quota_res = { 0 };
s64 i_sectors_delta = 0;
@@ -3057,43 +3164,6 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
return ret;
}
-static void mark_range_unallocated(struct bch_inode_info *inode,
- loff_t start, loff_t end)
-{
- pgoff_t index = start >> PAGE_SHIFT;
- pgoff_t end_index = (end - 1) >> PAGE_SHIFT;
- struct pagevec pvec;
-
- pagevec_init(&pvec);
-
- do {
- unsigned nr_pages, i, j;
-
- nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping,
- &index, end_index);
- if (nr_pages == 0)
- break;
-
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
- struct bch_page_state *s;
-
- lock_page(page);
- s = bch2_page_state(page);
-
- if (s) {
- spin_lock(&s->lock);
- for (j = 0; j < PAGE_SECTORS; j++)
- s->s[j].nr_replicas = 0;
- spin_unlock(&s->lock);
- }
-
- unlock_page(page);
- }
- pagevec_release(&pvec);
- } while (index <= end_index);
-}
-
loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
struct file *file_dst, loff_t pos_dst,
loff_t len, unsigned remap_flags)
@@ -3139,7 +3209,8 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
if (ret)
goto err;
- mark_range_unallocated(src, pos_src, pos_src + aligned_len);
+ mark_pagecache_unallocated(src, pos_src >> 9,
+ (pos_src + aligned_len) >> 9);
ret = bch2_remap_range(c,
inode_inum(dst), pos_dst >> 9,