summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-10-12 11:06:50 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2022-10-12 11:06:50 -0400
commit3165f53b28b87b3c46c95763bae5e40a29166e2e (patch)
tree0b9f3262721bf95732c511518d714439347112ee
parentab392d3c15be2ad7fb008956ec589cba3bb613e6 (diff)
Update bcachefs sources to 6ee8a33cee bcachefs: Call bch2_btree_update_add_new_node() before dropping write lock
-rw-r--r--.bcachefs_revision2
-rw-r--r--libbcachefs/btree_iter.c8
-rw-r--r--libbcachefs/btree_update_interior.c20
-rw-r--r--libbcachefs/fs-io.c68
4 files changed, 75 insertions, 23 deletions
diff --git a/.bcachefs_revision b/.bcachefs_revision
index e7924381..2c908e6b 100644
--- a/.bcachefs_revision
+++ b/.bcachefs_revision
@@ -1 +1 @@
-83edfdeb29c92e0617c2bb0971184944eac09085
+6ee8a33cee5dfb74a1fb6ff348578fd43aae3a14
diff --git a/libbcachefs/btree_iter.c b/libbcachefs/btree_iter.c
index 63197e4f..af658390 100644
--- a/libbcachefs/btree_iter.c
+++ b/libbcachefs/btree_iter.c
@@ -1540,15 +1540,17 @@ struct btree_path *bch2_path_get(struct btree_trans *trans,
inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
{
+ struct btree_path_level *l = path_l(path);
+ struct bkey_packed *_k;
struct bkey_s_c k;
+ if (unlikely(!l->b))
+ return bkey_s_c_null;
+
EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
EBUG_ON(!btree_node_locked(path, path->level));
if (!path->cached) {
- struct btree_path_level *l = path_l(path);
- struct bkey_packed *_k;
-
_k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
diff --git a/libbcachefs/btree_update_interior.c b/libbcachefs/btree_update_interior.c
index b9661407..03c4fd09 100644
--- a/libbcachefs/btree_update_interior.c
+++ b/libbcachefs/btree_update_interior.c
@@ -429,7 +429,6 @@ static struct btree *__btree_root_alloc(struct btree_update *as,
btree_node_set_format(b, b->data->format);
bch2_btree_build_aux_trees(b);
- six_unlock_write(&b->c.lock);
return b;
}
@@ -1527,6 +1526,9 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
bch2_btree_build_aux_trees(n2);
bch2_btree_build_aux_trees(n1);
+
+ bch2_btree_update_add_new_node(as, n1);
+ bch2_btree_update_add_new_node(as, n2);
six_unlock_write(&n2->c.lock);
six_unlock_write(&n1->c.lock);
@@ -1540,9 +1542,6 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
mark_btree_node_locked(trans, path2, n2->c.level, SIX_LOCK_intent);
bch2_btree_path_level_init(trans, path2, n2);
- bch2_btree_update_add_new_node(as, n1);
- bch2_btree_update_add_new_node(as, n2);
-
/*
* Note that on recursive parent_keys == keys, so we
* can't start adding new keys to parent_keys before emptying it
@@ -1555,6 +1554,9 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
/* Depth increases, make a new root */
n3 = __btree_root_alloc(as, trans, b->c.level + 1);
+ bch2_btree_update_add_new_node(as, n3);
+ six_unlock_write(&n3->c.lock);
+
path2->locks_want++;
BUG_ON(btree_node_locked(path2, n3->c.level));
six_lock_increment(&n3->c.lock, SIX_LOCK_intent);
@@ -1564,14 +1566,13 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
n3->sib_u64s[0] = U16_MAX;
n3->sib_u64s[1] = U16_MAX;
- bch2_btree_update_add_new_node(as, n3);
-
btree_split_insert_keys(as, trans, path, n3, &as->parent_keys);
}
} else {
trace_and_count(c, btree_node_compact, c, b);
bch2_btree_build_aux_trees(n1);
+ bch2_btree_update_add_new_node(as, n1);
six_unlock_write(&n1->c.lock);
path1 = get_unlocked_mut_path(trans, path->btree_id, n1->c.level, n1->key.k.p);
@@ -1579,8 +1580,6 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
mark_btree_node_locked(trans, path1, n1->c.level, SIX_LOCK_intent);
bch2_btree_path_level_init(trans, path1, n1);
- bch2_btree_update_add_new_node(as, n1);
-
if (parent)
bch2_keylist_add(&as->parent_keys, &n1->key);
}
@@ -1903,9 +1902,8 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
bch2_btree_sort_into(c, n, next);
bch2_btree_build_aux_trees(n);
- six_unlock_write(&n->c.lock);
-
bch2_btree_update_add_new_node(as, n);
+ six_unlock_write(&n->c.lock);
new_path = get_unlocked_mut_path(trans, path->btree_id, n->c.level, n->key.k.p);
six_lock_increment(&n->c.lock, SIX_LOCK_intent);
@@ -1979,9 +1977,9 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
bch2_btree_interior_update_will_free_node(as, b);
n = bch2_btree_node_alloc_replacement(as, trans, b);
- bch2_btree_update_add_new_node(as, n);
bch2_btree_build_aux_trees(n);
+ bch2_btree_update_add_new_node(as, n);
six_unlock_write(&n->c.lock);
new_path = get_unlocked_mut_path(trans, iter->btree_id, n->c.level, n->key.k.p);
diff --git a/libbcachefs/fs-io.c b/libbcachefs/fs-io.c
index 8e025768..2ea6e79f 100644
--- a/libbcachefs/fs-io.c
+++ b/libbcachefs/fs-io.c
@@ -151,7 +151,7 @@ static void bch2_quota_reservation_put(struct bch_fs *c,
static int bch2_quota_reservation_add(struct bch_fs *c,
struct bch_inode_info *inode,
struct quota_res *res,
- unsigned sectors,
+ u64 sectors,
bool check_enospc)
{
int ret;
@@ -3256,6 +3256,62 @@ err:
/* fseek: */
+static int page_data_offset(struct page *page, unsigned offset)
+{
+ struct bch_page_state *s = bch2_page_state(page);
+ unsigned i;
+
+ if (s)
+ for (i = offset >> 9; i < PAGE_SECTORS; i++)
+ if (s->s[i].state >= SECTOR_DIRTY)
+ return i << 9;
+
+ return -1;
+}
+
+static loff_t bch2_seek_pagecache_data(struct inode *vinode,
+ loff_t start_offset,
+ loff_t end_offset)
+{
+ struct folio_batch fbatch;
+ pgoff_t start_index = start_offset >> PAGE_SHIFT;
+ pgoff_t end_index = end_offset >> PAGE_SHIFT;
+ pgoff_t index = start_index;
+ unsigned i;
+ loff_t ret;
+ int offset;
+
+ folio_batch_init(&fbatch);
+
+ while (filemap_get_folios(vinode->i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+
+ folio_lock(folio);
+
+ offset = page_data_offset(&folio->page,
+ folio->index == start_index
+ ? start_offset & (PAGE_SIZE - 1)
+ : 0);
+ if (offset >= 0) {
+ ret = clamp(((loff_t) folio->index << PAGE_SHIFT) +
+ offset,
+ start_offset, end_offset);
+ folio_unlock(folio);
+ folio_batch_release(&fbatch);
+ return ret;
+ }
+
+ folio_unlock(folio);
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
+
+ return end_offset;
+}
+
static loff_t bch2_seek_data(struct file *file, u64 offset)
{
struct bch_inode_info *inode = file_bch_inode(file);
@@ -3299,13 +3355,9 @@ err:
if (ret)
return ret;
- if (next_data > offset) {
- loff_t pagecache_next_data =
- mapping_seek_hole_data(inode->v.i_mapping, offset,
- next_data, SEEK_DATA);
- if (pagecache_next_data >= 0)
- next_data = min_t(u64, next_data, pagecache_next_data);
- }
+ if (next_data > offset)
+ next_data = bch2_seek_pagecache_data(&inode->v,
+ offset, next_data);
if (next_data >= isize)
return -ENXIO;