summaryrefslogtreecommitdiff
path: root/security/landlock/fs.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2024-09-28 21:31:10 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2024-10-01 19:40:47 -0400
commit9ec9b917b3f6cf676226f074fdcdeceebe1a0b29 (patch)
tree7c71ca81faa20a1e44624f7489fae1264db8e269 /security/landlock/fs.c
parent32cb8103ecfacdd5ed8e1eb390221c3f8339de6f (diff)
vfs: use fast_list for superblock's inode listfast_list
Use the new fast_list for super_block.s_inodes. This gives similar performance to Dave's dlock list approach [1]; lock contention is now moved to the lru_list locks. Iteration is now fully lockless - instead we iterate using rcu_read_lock(), which means we must take care for racing with removal. Generally this is already handled - code that iterates over s_inodes takes i_lock and checks i_state, skipping inodes that are I_WILL_FREE|I_FREEING. However, code may also check for nonzero i_sb_list_idx if it wishes to iterate over precisely the inodes that are on the s_inodes list. [1]: https://lore.kernel.org/linux-fsdevel/20231206060629.2827226-4-david@fromorbit.com/ Cc: Christian Brauner <brauner@kernel.org> Cc: Dave Chinner <dchinner@redhat.com> Cc: Waiman Long <longman@redhat.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'security/landlock/fs.c')
-rw-r--r--security/landlock/fs.c41
1 files changed, 13 insertions, 28 deletions
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
index 7d79fc8abe21..9e4d3bd56e3d 100644
--- a/security/landlock/fs.c
+++ b/security/landlock/fs.c
@@ -1228,13 +1228,18 @@ static void hook_inode_free_security_rcu(void *inode_security)
*/
static void hook_sb_delete(struct super_block *const sb)
{
- struct inode *inode, *prev_inode = NULL;
+ struct genradix_iter iter;
+ void **i;
if (!landlock_initialized)
return;
- spin_lock(&sb->s_inode_list_lock);
- list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ rcu_read_lock();
+ genradix_for_each(&sb->s_inodes.items, iter, i) {
+ struct inode *inode = *((struct inode **) i);
+ if (!inode)
+ continue;
+
struct landlock_object *object;
/* Only handles referenced inodes. */
@@ -1258,10 +1263,8 @@ static void hook_sb_delete(struct super_block *const sb)
continue;
}
- rcu_read_lock();
object = rcu_dereference(landlock_inode(inode)->object);
if (!object) {
- rcu_read_unlock();
spin_unlock(&inode->i_lock);
continue;
}
@@ -1278,7 +1281,6 @@ static void hook_sb_delete(struct super_block *const sb)
if (object->underobj == inode) {
object->underobj = NULL;
spin_unlock(&object->lock);
- rcu_read_unlock();
/*
* Because object->underobj was not NULL,
@@ -1299,32 +1301,15 @@ static void hook_sb_delete(struct super_block *const sb)
iput(inode);
} else {
spin_unlock(&object->lock);
- rcu_read_unlock();
}
- if (prev_inode) {
- /*
- * At this point, we still own the __iget() reference
- * that we just set in this loop walk. Therefore we
- * can drop the list lock and know that the inode won't
- * disappear from under us until the next loop walk.
- */
- spin_unlock(&sb->s_inode_list_lock);
- /*
- * We can now actually put the inode reference from the
- * previous loop walk, which is not needed anymore.
- */
- iput(prev_inode);
- cond_resched();
- spin_lock(&sb->s_inode_list_lock);
- }
- prev_inode = inode;
+ rcu_read_unlock();
+ iput(inode);
+ cond_resched();
+ rcu_read_lock();
}
- spin_unlock(&sb->s_inode_list_lock);
+ rcu_read_unlock();
- /* Puts the inode reference from the last loop walk, if any. */
- if (prev_inode)
- iput(prev_inode);
/* Waits for pending iput() in release_inode(). */
wait_var_event(&landlock_superblock(sb)->inode_refs,
!atomic_long_read(&landlock_superblock(sb)->inode_refs));