summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_super.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_super.c')
-rw-r--r--fs/xfs/xfs_super.c25
1 files changed, 23 insertions, 2 deletions
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 02d1844b4ea4..bd634c94b5bb 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -479,7 +479,8 @@ xfs_init_mount_workqueues(
goto out_destroy_cil;
mp->m_blockgc_workqueue = alloc_workqueue("xfs-blockgc/%s",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE, 0,
+ mp->m_super->s_id);
if (!mp->m_blockgc_workqueue)
goto out_destroy_reclaim;
@@ -1341,6 +1342,25 @@ xfs_fc_validate_params(
return 0;
}
+/*
+ * Constrain the number of threads that we start for background work. This
+ * is the estimated parallelism of the filesystem capped to the unbound work
+ * queue maximum.
+ *
+ * We can't set this when we allocate the workqueues because the thread count
+ * derives from AG count, and we can't know that until we're far enough through
+ * setup to read the superblock, which requires functioning workqueues.
+ */
+static inline void
+xfs_configure_background_workqueues(
+ struct xfs_mount *mp)
+{
+ unsigned int max_active = xfs_guess_metadata_threads(mp);
+
+ max_active = min_t(unsigned int, max_active, WQ_UNBOUND_MAX_ACTIVE);
+ workqueue_set_max_active(mp->m_blockgc_workqueue, max_active);
+}
+
static int
xfs_fc_fill_super(
struct super_block *sb,
@@ -1406,6 +1426,8 @@ xfs_fc_fill_super(
if (error)
goto out_free_sb;
+ xfs_configure_background_workqueues(mp);
+
error = xfs_setup_devices(mp);
if (error)
goto out_free_sb;
@@ -1757,7 +1779,6 @@ static int xfs_init_fs_context(
atomic_set(&mp->m_active_trans, 0);
INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
- INIT_DELAYED_WORK(&mp->m_blockgc_work, xfs_blockgc_worker);
mp->m_kobj.kobject.kset = xfs_kset;
/*
* We don't create the finobt per-ag space reservation until after log