summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFilipe Manana <fdmanana@suse.com>2025-03-31 14:23:42 +0100
committerDavid Sterba <dsterba@suse.com>2025-05-15 14:30:43 +0200
commit242570e80bfdf3f98ca13031c723edd72c69bdc2 (patch)
tree9754587d3ece5c88a1f298e72a43272a69240b49
parent41708a4c23500ebd024d66adaafe0e6022df29a9 (diff)
btrfs: add btrfs prefix to main lock, try lock and unlock extent functions
These functions are exported so they should have a 'btrfs_' prefix by convention, to make it clear they are btrfs specific and to avoid collisions with functions from elsewhere in the kernel. So add a prefix to their name. Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: Filipe Manana <fdmanana@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/compression.c6
-rw-r--r--fs/btrfs/defrag.c22
-rw-r--r--fs/btrfs/direct-io.c8
-rw-r--r--fs/btrfs/extent-io-tree.h12
-rw-r--r--fs/btrfs/extent_io.c20
-rw-r--r--fs/btrfs/extent_map.c4
-rw-r--r--fs/btrfs/fiemap.c4
-rw-r--r--fs/btrfs/file.c61
-rw-r--r--fs/btrfs/free-space-cache.c16
-rw-r--r--fs/btrfs/inode.c84
-rw-r--r--fs/btrfs/ioctl.c8
-rw-r--r--fs/btrfs/ordered-data.c8
-rw-r--r--fs/btrfs/reflink.c8
-rw-r--r--fs/btrfs/relocation.c28
-rw-r--r--fs/btrfs/tests/extent-io-tests.c6
-rw-r--r--fs/btrfs/tree-log.c8
16 files changed, 152 insertions, 151 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 74edd2dd92ff..8f3771451faa 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -499,7 +499,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
}
page_end = (pg_index << PAGE_SHIFT) + folio_size(folio) - 1;
- lock_extent(tree, cur, page_end, NULL);
+ btrfs_lock_extent(tree, cur, page_end, NULL);
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
read_unlock(&em_tree->lock);
@@ -514,14 +514,14 @@ static noinline int add_ra_bio_pages(struct inode *inode,
(extent_map_block_start(em) >> SECTOR_SHIFT) !=
orig_bio->bi_iter.bi_sector) {
free_extent_map(em);
- unlock_extent(tree, cur, page_end, NULL);
+ btrfs_unlock_extent(tree, cur, page_end, NULL);
folio_unlock(folio);
folio_put(folio);
break;
}
add_size = min(em->start + em->len, page_end + 1) - cur;
free_extent_map(em);
- unlock_extent(tree, cur, page_end, NULL);
+ btrfs_unlock_extent(tree, cur, page_end, NULL);
if (folio_contains(folio, end_index)) {
size_t zero_offset = offset_in_folio(folio, isize);
diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
index d302a67efced..3e2e462365d6 100644
--- a/fs/btrfs/defrag.c
+++ b/fs/btrfs/defrag.c
@@ -776,10 +776,10 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
/* Get the big lock and read metadata off disk. */
if (!locked)
- lock_extent(io_tree, start, end, &cached);
+ btrfs_lock_extent(io_tree, start, end, &cached);
em = defrag_get_extent(BTRFS_I(inode), start, newer_than);
if (!locked)
- unlock_extent(io_tree, start, end, &cached);
+ btrfs_unlock_extent(io_tree, start, end, &cached);
if (IS_ERR(em))
return NULL;
@@ -891,10 +891,10 @@ again:
while (1) {
struct btrfs_ordered_extent *ordered;
- lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
- unlock_extent(&inode->io_tree, page_start, page_end,
- &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, page_start, page_end,
+ &cached_state);
if (!ordered)
break;
@@ -1223,9 +1223,9 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
folio_wait_writeback(folios[i]);
/* Lock the pages range */
- lock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
- (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
- &cached_state);
+ btrfs_lock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
+ (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
+ &cached_state);
/*
* Now we have a consistent view about the extent map, re-check
* which range really needs to be defragged.
@@ -1251,9 +1251,9 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
kfree(entry);
}
unlock_extent:
- unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
- (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
- &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
+ (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
+ &cached_state);
free_folios:
for (i = 0; i < nr_pages; i++) {
folio_unlock(folios[i]);
diff --git a/fs/btrfs/direct-io.c b/fs/btrfs/direct-io.c
index a374ce7a1813..eef8f0a93e80 100644
--- a/fs/btrfs/direct-io.c
+++ b/fs/btrfs/direct-io.c
@@ -50,13 +50,13 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
while (1) {
if (nowait) {
- if (!try_lock_extent(io_tree, lockstart, lockend,
- cached_state)) {
+ if (!btrfs_try_lock_extent(io_tree, lockstart, lockend,
+ cached_state)) {
ret = -EAGAIN;
break;
}
} else {
- lock_extent(io_tree, lockstart, lockend, cached_state);
+ btrfs_lock_extent(io_tree, lockstart, lockend, cached_state);
}
/*
* We're concerned with the entire range that we're going to be
@@ -78,7 +78,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
lockstart, lockend)))
break;
- unlock_extent(io_tree, lockstart, lockend, cached_state);
+ btrfs_unlock_extent(io_tree, lockstart, lockend, cached_state);
if (ordered) {
if (nowait) {
diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
index bdcb18324516..86436ed37ad6 100644
--- a/fs/btrfs/extent-io-tree.h
+++ b/fs/btrfs/extent-io-tree.h
@@ -145,14 +145,14 @@ int __lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
struct extent_state **cached);
-static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
- struct extent_state **cached)
+static inline int btrfs_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+ struct extent_state **cached)
{
return __lock_extent(tree, start, end, EXTENT_LOCKED, cached);
}
-static inline bool try_lock_extent(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached)
+static inline bool btrfs_try_lock_extent(struct extent_io_tree *tree, u64 start,
+ u64 end, struct extent_state **cached)
{
return __try_lock_extent(tree, start, end, EXTENT_LOCKED, cached);
}
@@ -184,8 +184,8 @@ static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start,
return __clear_extent_bit(tree, start, end, bits, cached, NULL);
}
-static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
- struct extent_state **cached)
+static inline int btrfs_unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+ struct extent_state **cached)
{
return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached, NULL);
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index ba067d3461e6..31cf2189a224 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -371,13 +371,13 @@ again:
}
/* step three, lock the state bits for the whole range */
- lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
+ btrfs_lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
/* then test to make sure it is all still delalloc */
ret = test_range_bit(tree, delalloc_start, delalloc_end,
EXTENT_DELALLOC, cached_state);
- unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
+ btrfs_unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
if (!ret) {
unlock_delalloc_folio(inode, locked_folio, delalloc_start,
delalloc_end);
@@ -1201,7 +1201,7 @@ static void lock_extents_for_read(struct btrfs_inode *inode, u64 start, u64 end,
ASSERT(IS_ALIGNED(end + 1, PAGE_SIZE));
again:
- lock_extent(&inode->io_tree, start, end, cached_state);
+ btrfs_lock_extent(&inode->io_tree, start, end, cached_state);
cur_pos = start;
while (cur_pos < end) {
struct btrfs_ordered_extent *ordered;
@@ -1224,7 +1224,7 @@ again:
}
/* Now wait for the OE to finish. */
- unlock_extent(&inode->io_tree, start, end, cached_state);
+ btrfs_unlock_extent(&inode->io_tree, start, end, cached_state);
btrfs_start_ordered_extent_nowriteback(ordered, start, end + 1 - start);
btrfs_put_ordered_extent(ordered);
/* We have unlocked the whole range, restart from the beginning. */
@@ -1244,7 +1244,7 @@ int btrfs_read_folio(struct file *file, struct folio *folio)
lock_extents_for_read(inode, start, end, &cached_state);
ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
- unlock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
free_extent_map(em_cached);
@@ -1432,8 +1432,8 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
* We've hit an error during previous delalloc range,
* have to cleanup the remaining locked ranges.
*/
- unlock_extent(&inode->io_tree, found_start,
- found_start + found_len - 1, NULL);
+ btrfs_unlock_extent(&inode->io_tree, found_start,
+ found_start + found_len - 1, NULL);
unlock_delalloc_folio(&inode->vfs_inode, folio,
found_start,
found_start + found_len - 1);
@@ -2563,7 +2563,7 @@ void btrfs_readahead(struct readahead_control *rac)
while ((folio = readahead_folio(rac)) != NULL)
btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
- unlock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
if (em_cached)
free_extent_map(em_cached);
@@ -2590,7 +2590,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
if (start > end)
return 0;
- lock_extent(tree, start, end, &cached_state);
+ btrfs_lock_extent(tree, start, end, &cached_state);
folio_wait_writeback(folio);
/*
@@ -2598,7 +2598,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
* so here we only need to unlock the extent range to free any
* existing extent state.
*/
- unlock_extent(tree, start, end, &cached_state);
+ btrfs_unlock_extent(tree, start, end, &cached_state);
return 0;
}
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index d62c36a0b7ba..67c724a576ee 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -1055,7 +1055,7 @@ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
goto out_free_pre;
}
- lock_extent(&inode->io_tree, start, start + len - 1, NULL);
+ btrfs_lock_extent(&inode->io_tree, start, start + len - 1, NULL);
write_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (!em) {
@@ -1108,7 +1108,7 @@ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
out_unlock:
write_unlock(&em_tree->lock);
- unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
+ btrfs_unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
free_extent_map(split_mid);
out_free_pre:
free_extent_map(split_pre);
diff --git a/fs/btrfs/fiemap.c b/fs/btrfs/fiemap.c
index 7715e30508c5..ba65a4821c44 100644
--- a/fs/btrfs/fiemap.c
+++ b/fs/btrfs/fiemap.c
@@ -661,7 +661,7 @@ restart:
range_end = round_up(start + len, sectorsize);
prev_extent_end = range_start;
- lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, range_start, range_end, &cached_state);
ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
if (ret < 0)
@@ -841,7 +841,7 @@ check_eof_delalloc:
}
out_unlock:
- unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, range_start, range_end, &cached_state);
if (ret == BTRFS_FIEMAP_FLUSH_CACHE) {
btrfs_release_path(path);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index cd2e34e11afd..0baee2bb110b 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -921,14 +921,15 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio,
struct btrfs_ordered_extent *ordered;
if (nowait) {
- if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
- cached_state)) {
+ if (!btrfs_try_lock_extent(&inode->io_tree, start_pos,
+ last_pos, cached_state)) {
folio_unlock(folio);
folio_put(folio);
return -EAGAIN;
}
} else {
- lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
+ btrfs_lock_extent(&inode->io_tree, start_pos, last_pos,
+ cached_state);
}
ordered = btrfs_lookup_ordered_range(inode, start_pos,
@@ -936,8 +937,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio,
if (ordered &&
ordered->file_offset + ordered->num_bytes > start_pos &&
ordered->file_offset <= last_pos) {
- unlock_extent(&inode->io_tree, start_pos, last_pos,
- cached_state);
+ btrfs_unlock_extent(&inode->io_tree, start_pos, last_pos,
+ cached_state);
folio_unlock(folio);
folio_put(folio);
btrfs_start_ordered_extent(ordered);
@@ -1017,7 +1018,7 @@ int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
else
*write_bytes = min_t(size_t, *write_bytes ,
num_bytes - pos + lockstart);
- unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
return ret;
}
@@ -1284,8 +1285,8 @@ again:
/* No copied bytes, unlock, release reserved space and exit. */
if (copied == 0) {
if (extents_locked)
- unlock_extent(&inode->io_tree, lockstart, lockend,
- &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, lockstart, lockend,
+ &cached_state);
else
free_extent_state(cached_state);
btrfs_delalloc_release_extents(inode, reserved_len);
@@ -1314,7 +1315,7 @@ again:
* to avoid a memory leak.
*/
if (extents_locked)
- unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
else
free_extent_state(cached_state);
@@ -1895,11 +1896,11 @@ again:
}
folio_wait_writeback(folio);
- lock_extent(io_tree, page_start, page_end, &cached_state);
+ btrfs_lock_extent(io_tree, page_start, page_end, &cached_state);
ret2 = set_folio_extent_mapped(folio);
if (ret2 < 0) {
ret = vmf_error(ret2);
- unlock_extent(io_tree, page_start, page_end, &cached_state);
+ btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
goto out_unlock;
}
@@ -1909,7 +1910,7 @@ again:
*/
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, fsize);
if (ordered) {
- unlock_extent(io_tree, page_start, page_end, &cached_state);
+ btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
folio_unlock(folio);
up_read(&BTRFS_I(inode)->i_mmap_lock);
btrfs_start_ordered_extent(ordered);
@@ -1941,7 +1942,7 @@ again:
ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
&cached_state);
if (ret2) {
- unlock_extent(io_tree, page_start, page_end, &cached_state);
+ btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
@@ -1961,7 +1962,7 @@ again:
btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
- unlock_extent(io_tree, page_start, page_end, &cached_state);
+ btrfs_unlock_extent(io_tree, page_start, page_end, &cached_state);
up_read(&BTRFS_I(inode)->i_mmap_lock);
btrfs_delalloc_release_extents(BTRFS_I(inode), fsize);
@@ -2222,8 +2223,8 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
while (1) {
truncate_pagecache_range(inode, lockstart, lockend);
- lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- cached_state);
+ btrfs_lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ cached_state);
/*
* We can't have ordered extents in the range, nor dirty/writeback
* pages, because we have locked the inode's VFS lock in exclusive
@@ -2237,8 +2238,8 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
if (!check_range_has_page(inode, lockstart, lockend))
break;
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ cached_state);
}
btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
@@ -2736,8 +2737,8 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
out:
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ &cached_state);
out_only_mutex:
if (!updated_inode && truncated_block && !ret) {
/*
@@ -3007,16 +3008,16 @@ reserve_space:
ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
alloc_start, bytes_to_reserve);
if (ret) {
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, &cached_state);
goto out;
}
ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
alloc_end - alloc_start,
fs_info->sectorsize,
offset + len, &alloc_hint);
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ &cached_state);
/* btrfs_prealloc_file_range releases reserved space on error */
if (ret) {
space_reserved = false;
@@ -3127,8 +3128,8 @@ static long btrfs_fallocate(struct file *file, int mode,
}
locked_end = alloc_end - 1;
- lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
- &cached_state);
+ btrfs_lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
+ &cached_state);
btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
@@ -3216,8 +3217,8 @@ static long btrfs_fallocate(struct file *file, int mode,
*/
ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
out_unlock:
- unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
- &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
+ &cached_state);
out:
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
extent_changeset_free(data_reserved);
@@ -3563,7 +3564,7 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
last_extent_end = lockstart;
- lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
@@ -3709,7 +3710,7 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
}
out:
- unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
btrfs_free_path(path);
if (ret < 0)
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 633dfe4ee93e..30263f15bdca 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -336,7 +336,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
btrfs_i_size_write(inode, 0);
truncate_pagecache(vfs_inode, 0);
- lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
/*
@@ -348,7 +348,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
- unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
if (ret)
goto fail;
@@ -1288,8 +1288,8 @@ cleanup_write_cache_enospc(struct inode *inode,
struct extent_state **cached_state)
{
io_ctl_drop_pages(io_ctl);
- unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
- cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+ cached_state);
}
static int __btrfs_wait_cache_io(struct btrfs_root *root,
@@ -1414,8 +1414,8 @@ static int __btrfs_write_out_cache(struct inode *inode,
if (ret)
goto out_unlock;
- lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
- &cached_state);
+ btrfs_lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+ &cached_state);
io_ctl_set_generation(io_ctl, trans->transid);
@@ -1475,8 +1475,8 @@ static int __btrfs_write_out_cache(struct inode *inode,
io_ctl_drop_pages(io_ctl);
io_ctl_free(io_ctl);
- unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
- &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+ &cached_state);
/*
* at this point the pages are under IO and we're happy,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index b2b454e447ca..6b173fa2c325 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -686,12 +686,12 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode,
if (!can_cow_file_range_inline(inode, offset, size, compressed_size))
return 1;
- lock_extent(&inode->io_tree, offset, end, &cached);
+ btrfs_lock_extent(&inode->io_tree, offset, end, &cached);
ret = __cow_file_range_inline(inode, size, compressed_size,
compress_type, compressed_folio,
update_i_size);
if (ret > 0) {
- unlock_extent(&inode->io_tree, offset, end, &cached);
+ btrfs_unlock_extent(&inode->io_tree, offset, end, &cached);
return ret;
}
@@ -1138,7 +1138,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
goto done;
}
- lock_extent(io_tree, start, end, &cached);
+ btrfs_lock_extent(io_tree, start, end, &cached);
/* Here we're doing allocation and writeback of the compressed pages */
file_extent.disk_bytenr = ins.objectid;
@@ -1389,14 +1389,14 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
* Locked range will be released either during error clean up or
* after the whole range is finished.
*/
- lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1,
- &cached);
+ btrfs_lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1,
+ &cached);
em = btrfs_create_io_em(inode, start, &file_extent,
BTRFS_ORDERED_REGULAR);
if (IS_ERR(em)) {
- unlock_extent(&inode->io_tree, start,
- start + cur_alloc_size - 1, &cached);
+ btrfs_unlock_extent(&inode->io_tree, start,
+ start + cur_alloc_size - 1, &cached);
ret = PTR_ERR(em);
goto out_reserve;
}
@@ -1405,8 +1405,8 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
1 << BTRFS_ORDERED_REGULAR);
if (IS_ERR(ordered)) {
- unlock_extent(&inode->io_tree, start,
- start + cur_alloc_size - 1, &cached);
+ btrfs_unlock_extent(&inode->io_tree, start,
+ start + cur_alloc_size - 1, &cached);
ret = PTR_ERR(ordered);
goto out_drop_extent_cache;
}
@@ -1741,7 +1741,7 @@ static int fallback_to_cow(struct btrfs_inode *inode,
* group that contains that extent to RO mode and therefore force COW
* when starting writeback.
*/
- lock_extent(io_tree, start, end, &cached_state);
+ btrfs_lock_extent(io_tree, start, end, &cached_state);
count = count_range_bits(io_tree, &range_start, end, range_bytes,
EXTENT_NORESERVE, 0, NULL);
if (count > 0 || is_space_ino || is_reloc_ino) {
@@ -1759,7 +1759,7 @@ static int fallback_to_cow(struct btrfs_inode *inode,
if (count > 0)
clear_extent_bits(io_tree, start, end, EXTENT_NORESERVE);
}
- unlock_extent(io_tree, start, end, &cached_state);
+ btrfs_unlock_extent(io_tree, start, end, &cached_state);
/*
* Don't try to create inline extents, as a mix of inline extent that
@@ -1967,7 +1967,7 @@ static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio
u64 end = file_pos + len - 1;
int ret = 0;
- lock_extent(&inode->io_tree, file_pos, end, cached);
+ btrfs_lock_extent(&inode->io_tree, file_pos, end, cached);
if (is_prealloc) {
struct extent_map *em;
@@ -1975,7 +1975,7 @@ static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio
em = btrfs_create_io_em(inode, file_pos, &nocow_args->file_extent,
BTRFS_ORDERED_PREALLOC);
if (IS_ERR(em)) {
- unlock_extent(&inode->io_tree, file_pos, end, cached);
+ btrfs_unlock_extent(&inode->io_tree, file_pos, end, cached);
return PTR_ERR(em);
}
free_extent_map(em);
@@ -1988,7 +1988,7 @@ static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio
if (IS_ERR(ordered)) {
if (is_prealloc)
btrfs_drop_extent_map_range(inode, file_pos, end, false);
- unlock_extent(&inode->io_tree, file_pos, end, cached);
+ btrfs_unlock_extent(&inode->io_tree, file_pos, end, cached);
return PTR_ERR(ordered);
}
@@ -2287,7 +2287,7 @@ error:
if (cur_offset < end) {
struct extent_state *cached = NULL;
- lock_extent(&inode->io_tree, cur_offset, end, &cached);
+ btrfs_lock_extent(&inode->io_tree, cur_offset, end, &cached);
extent_clear_unlock_delalloc(inode, cur_offset, end,
locked_folio, &cached,
EXTENT_LOCKED | EXTENT_DELALLOC |
@@ -2793,7 +2793,7 @@ again:
if (ret)
goto out_page;
- lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
/* already ordered? We're done */
if (folio_test_ordered(folio))
@@ -2801,8 +2801,8 @@ again:
ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
if (ordered) {
- unlock_extent(&inode->io_tree, page_start, page_end,
- &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, page_start, page_end,
+ &cached_state);
folio_unlock(folio);
btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
@@ -2828,7 +2828,7 @@ out_reserved:
if (free_delalloc_space)
btrfs_delalloc_release_space(inode, data_reserved, page_start,
PAGE_SIZE, true);
- unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
out_page:
if (ret) {
/*
@@ -4860,11 +4860,11 @@ again:
folio_wait_writeback(folio);
- lock_extent(io_tree, block_start, block_end, &cached_state);
+ btrfs_lock_extent(io_tree, block_start, block_end, &cached_state);
ordered = btrfs_lookup_ordered_extent(inode, block_start);
if (ordered) {
- unlock_extent(io_tree, block_start, block_end, &cached_state);
+ btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
folio_unlock(folio);
folio_put(folio);
btrfs_start_ordered_extent(ordered);
@@ -4879,7 +4879,7 @@ again:
ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
&cached_state);
if (ret) {
- unlock_extent(io_tree, block_start, block_end, &cached_state);
+ btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
goto out_unlock;
}
@@ -4898,7 +4898,7 @@ again:
block_end + 1 - block_start);
btrfs_folio_set_dirty(fs_info, folio, block_start,
block_end + 1 - block_start);
- unlock_extent(io_tree, block_start, block_end, &cached_state);
+ btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
if (only_release_metadata)
set_extent_bit(&inode->io_tree, block_start, block_end,
@@ -5060,7 +5060,7 @@ next:
break;
}
free_extent_map(em);
- unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
+ btrfs_unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
return ret;
}
@@ -5244,7 +5244,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
state_flags = state->state;
spin_unlock(&io_tree->lock);
- lock_extent(io_tree, start, end, &cached_state);
+ btrfs_lock_extent(io_tree, start, end, &cached_state);
/*
* If still has DELALLOC flag, the extent didn't reach disk,
@@ -7360,7 +7360,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
}
if (!inode_evicting)
- lock_extent(tree, page_start, page_end, &cached_state);
+ btrfs_lock_extent(tree, page_start, page_end, &cached_state);
cur = page_start;
while (cur < page_end) {
@@ -7568,7 +7568,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
control.new_size = new_size;
- lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
/*
* We want to drop from the next block forward in case this new
* size is not block aligned since we will be keeping the last
@@ -7583,7 +7583,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
- unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
trans->block_rsv = &fs_info->trans_block_rsv;
if (ret != -ENOSPC && ret != -EAGAIN)
@@ -9138,7 +9138,7 @@ static ssize_t btrfs_encoded_read_inline(
read_extent_buffer(leaf, tmp, ptr, count);
btrfs_release_path(path);
- unlock_extent(io_tree, start, lockend, cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
*unlocked = true;
@@ -9286,7 +9286,7 @@ ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
if (ret)
goto out;
- unlock_extent(io_tree, start, lockend, cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
*unlocked = true;
@@ -9363,7 +9363,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
goto out_unlock_inode;
}
- if (!try_lock_extent(io_tree, start, lockend, cached_state)) {
+ if (!btrfs_try_lock_extent(io_tree, start, lockend, cached_state)) {
ret = -EAGAIN;
goto out_unlock_inode;
}
@@ -9372,7 +9372,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
lockend - start + 1);
if (ordered) {
btrfs_put_ordered_extent(ordered);
- unlock_extent(io_tree, start, lockend, cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
ret = -EAGAIN;
goto out_unlock_inode;
}
@@ -9385,13 +9385,13 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
if (ret)
goto out_unlock_inode;
- lock_extent(io_tree, start, lockend, cached_state);
+ btrfs_lock_extent(io_tree, start, lockend, cached_state);
ordered = btrfs_lookup_ordered_range(inode, start,
lockend - start + 1);
if (!ordered)
break;
btrfs_put_ordered_extent(ordered);
- unlock_extent(io_tree, start, lockend, cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
cond_resched();
}
}
@@ -9466,7 +9466,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
em = NULL;
if (*disk_bytenr == EXTENT_MAP_HOLE) {
- unlock_extent(io_tree, start, lockend, cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
unlocked = true;
ret = iov_iter_zero(count, iter);
@@ -9482,7 +9482,7 @@ out_em:
out_unlock_extent:
/* Leave inode and extent locked if we need to do a read. */
if (!unlocked && ret != -EIOCBQUEUED)
- unlock_extent(io_tree, start, lockend, cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, cached_state);
out_unlock_inode:
if (!unlocked && ret != -EIOCBQUEUED)
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
@@ -9629,14 +9629,14 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
end >> PAGE_SHIFT);
if (ret)
goto out_folios;
- lock_extent(io_tree, start, end, &cached_state);
+ btrfs_lock_extent(io_tree, start, end, &cached_state);
ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
if (!ordered &&
!filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
break;
if (ordered)
btrfs_put_ordered_extent(ordered);
- unlock_extent(io_tree, start, end, &cached_state);
+ btrfs_unlock_extent(io_tree, start, end, &cached_state);
cond_resched();
}
@@ -9701,7 +9701,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
if (start + encoded->len > inode->vfs_inode.i_size)
i_size_write(&inode->vfs_inode, start + encoded->len);
- unlock_extent(io_tree, start, end, &cached_state);
+ btrfs_unlock_extent(io_tree, start, end, &cached_state);
btrfs_delalloc_release_extents(inode, num_bytes);
@@ -9726,7 +9726,7 @@ out_free_data_space:
if (!extent_reserved)
btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
out_unlock:
- unlock_extent(io_tree, start, end, &cached_state);
+ btrfs_unlock_extent(io_tree, start, end, &cached_state);
out_folios:
for (i = 0; i < nr_folios; i++) {
if (folios[i])
@@ -9991,7 +9991,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
- lock_extent(io_tree, 0, isize - 1, &cached_state);
+ btrfs_lock_extent(io_tree, 0, isize - 1, &cached_state);
while (prev_extent_end < isize) {
struct btrfs_key key;
struct extent_buffer *leaf;
@@ -10169,7 +10169,7 @@ out:
if (!IS_ERR_OR_NULL(map))
btrfs_free_chunk_map(map);
- unlock_extent(io_tree, 0, isize - 1, &cached_state);
+ btrfs_unlock_extent(io_tree, 0, isize - 1, &cached_state);
if (ret)
btrfs_swap_deactivate(file);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 63aeacc54945..3341344a9fd2 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4510,7 +4510,7 @@ static int btrfs_ioctl_encoded_read(struct file *file, void __user *argp,
args.compression, &unlocked);
if (!unlocked) {
- unlock_extent(io_tree, start, lockend, &cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
}
}
@@ -4699,7 +4699,7 @@ static void btrfs_uring_read_finished(struct io_uring_cmd *cmd, unsigned int iss
ret = priv->count;
out:
- unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state);
+ btrfs_unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
io_uring_cmd_done(cmd, ret, 0, issue_flags);
@@ -4788,7 +4788,7 @@ static int btrfs_uring_read_extent(struct kiocb *iocb, struct iov_iter *iter,
return -EIOCBQUEUED;
out_fail:
- unlock_extent(io_tree, start, lockend, &cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
kfree(priv);
return ret;
@@ -4913,7 +4913,7 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
(const char *)&data->args + copy_end_kernel,
sizeof(data->args) - copy_end_kernel)) {
if (ret == -EIOCBQUEUED) {
- unlock_extent(io_tree, start, lockend, &cached_state);
+ btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
}
ret = -EFAULT;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 03c945711003..b5b544712e93 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -1173,7 +1173,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
cachedp = cached_state;
while (1) {
- lock_extent(&inode->io_tree, start, end, cachedp);
+ btrfs_lock_extent(&inode->io_tree, start, end, cachedp);
ordered = btrfs_lookup_ordered_range(inode, start,
end - start + 1);
if (!ordered) {
@@ -1186,7 +1186,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
refcount_dec(&cache->refs);
break;
}
- unlock_extent(&inode->io_tree, start, end, cachedp);
+ btrfs_unlock_extent(&inode->io_tree, start, end, cachedp);
btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
}
@@ -1204,7 +1204,7 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
{
struct btrfs_ordered_extent *ordered;
- if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
+ if (!btrfs_try_lock_extent(&inode->io_tree, start, end, cached_state))
return false;
ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
@@ -1212,7 +1212,7 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
return true;
btrfs_put_ordered_extent(ordered);
- unlock_extent(&inode->io_tree, start, end, cached_state);
+ btrfs_unlock_extent(&inode->io_tree, start, end, cached_state);
return false;
}
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index 09e40a2fdcf0..e6c0a10c4c67 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -645,10 +645,10 @@ static int btrfs_extent_same_range(struct btrfs_inode *src, u64 loff, u64 len,
* because we have already locked the inode's i_mmap_lock in exclusive
* mode.
*/
- lock_extent(&dst->io_tree, dst_loff, end, &cached_state);
+ btrfs_lock_extent(&dst->io_tree, dst_loff, end, &cached_state);
ret = btrfs_clone(&src->vfs_inode, &dst->vfs_inode, loff, len,
ALIGN(len, bs), dst_loff, 1);
- unlock_extent(&dst->io_tree, dst_loff, end, &cached_state);
+ btrfs_unlock_extent(&dst->io_tree, dst_loff, end, &cached_state);
btrfs_btree_balance_dirty(fs_info);
@@ -748,9 +748,9 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
* mode.
*/
end = destoff + len - 1;
- lock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
+ btrfs_lock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
- unlock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
/*
* We may have copied an inline extent into a page of the destination
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index b7f7563d3c14..4ac2e63cbe55 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -910,16 +910,16 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
/* Take mmap lock to serialize with reflinks. */
if (!down_read_trylock(&inode->i_mmap_lock))
continue;
- ret = try_lock_extent(&inode->io_tree, key.offset,
- end, &cached_state);
+ ret = btrfs_try_lock_extent(&inode->io_tree, key.offset,
+ end, &cached_state);
if (!ret) {
up_read(&inode->i_mmap_lock);
continue;
}
btrfs_drop_extent_map_range(inode, key.offset, end, true);
- unlock_extent(&inode->io_tree, key.offset, end,
- &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, key.offset, end,
+ &cached_state);
up_read(&inode->i_mmap_lock);
}
}
@@ -1378,9 +1378,9 @@ static int invalidate_extent_cache(struct btrfs_root *root,
}
/* the lock_extent waits for read_folio to complete */
- lock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
btrfs_drop_extent_map_range(inode, start, end, true);
- unlock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
}
return 0;
}
@@ -2735,13 +2735,13 @@ static noinline_for_stack int prealloc_file_extent_cluster(struct reloc_control
else
end = cluster->end - offset;
- lock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
num_bytes = end + 1 - start;
ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
num_bytes, num_bytes,
end + 1, &alloc_hint);
cur_offset = end + 1;
- unlock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
if (ret)
break;
}
@@ -2774,9 +2774,9 @@ static noinline_for_stack int setup_relocation_extent_mapping(struct reloc_contr
em->ram_bytes = em->len;
em->flags |= EXTENT_FLAG_PINNED;
- lock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_lock_extent(&inode->io_tree, start, end, &cached_state);
ret = btrfs_replace_extent_map_range(inode, em, false);
- unlock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
free_extent_map(em);
return ret;
@@ -2899,8 +2899,8 @@ again:
goto release_folio;
/* Mark the range delalloc and dirty for later writeback */
- lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
- &cached_state);
+ btrfs_lock_extent(&BTRFS_I(inode)->io_tree, clamped_start,
+ clamped_end, &cached_state);
ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
clamped_end, 0, &cached_state);
if (ret) {
@@ -2933,8 +2933,8 @@ again:
boundary_start, boundary_end,
EXTENT_BOUNDARY, NULL);
}
- unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
- &cached_state);
+ btrfs_unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
+ &cached_state);
btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
cur += clamped_len;
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 8773758a8cc7..98c6e237d648 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -190,7 +190,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
sectorsize - 1, start, end);
goto out_bits;
}
- unlock_extent(tmp, start, end, NULL);
+ btrfs_unlock_extent(tmp, start, end, NULL);
unlock_page(locked_page);
put_page(locked_page);
@@ -226,7 +226,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
test_err("there were unlocked pages in the range");
goto out_bits;
}
- unlock_extent(tmp, start, end, NULL);
+ btrfs_unlock_extent(tmp, start, end, NULL);
/* locked_page was unlocked above */
put_page(locked_page);
@@ -281,7 +281,7 @@ static int test_find_delalloc(u32 sectorsize, u32 nodesize)
test_err("pages in range were not all locked");
goto out_bits;
}
- unlock_extent(tmp, start, end, NULL);
+ btrfs_unlock_extent(tmp, start, end, NULL);
/*
* Now to test where we run into a page that is no longer dirty in the
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index f5af11565b87..31087b69d68f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4300,8 +4300,8 @@ static int log_csums(struct btrfs_trans_handle *trans,
* file which happens to refer to the same extent as well. Such races
* can leave checksum items in the log with overlapping ranges.
*/
- ret = lock_extent(&log_root->log_csum_range, sums->logical, lock_end,
- &cached_state);
+ ret = btrfs_lock_extent(&log_root->log_csum_range, sums->logical, lock_end,
+ &cached_state);
if (ret)
return ret;
/*
@@ -4317,8 +4317,8 @@ static int log_csums(struct btrfs_trans_handle *trans,
if (!ret)
ret = btrfs_csum_file_blocks(trans, log_root, sums);
- unlock_extent(&log_root->log_csum_range, sums->logical, lock_end,
- &cached_state);
+ btrfs_unlock_extent(&log_root->log_csum_range, sums->logical, lock_end,
+ &cached_state);
return ret;
}