diff options
author | Leon Romanovsky <leon@kernel.org> | 2025-05-12 17:17:27 +0200 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2025-05-20 05:34:27 +0200 |
commit | f01e389e88b27a55674bc11d5d44dc75f0d83745 (patch) | |
tree | ffec1251f8e1bd0a6891b1a66309d11346d907c7 | |
parent | a43d304f3abea73883f99287396a6e1eb57c3637 (diff) |
nvme-pci: add a symolic name for the small pool size
Open coding magic numbers in multiple places is never a good idea.
Signed-off-by: Leon Romanovsky <leon@kernel.org>
[hch: split from a larger patch]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
Reviewed-by: Caleb Sander Mateos <csander@purestorage.com>
-rw-r--r-- | drivers/nvme/host/pci.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5017d6c56519..2dbe6757e1a3 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -37,6 +37,9 @@ #define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) +/* Optimisation for I/Os between 4k and 128k */ +#define NVME_SMALL_POOL_SIZE 256 + /* * These can be higher, but we need to ensure that any command doesn't * require an sg allocation that needs more than a page of data. @@ -407,7 +410,7 @@ static struct nvme_descriptor_pools * nvme_setup_descriptor_pools(struct nvme_dev *dev, unsigned numa_node) { struct nvme_descriptor_pools *pools = &dev->descriptor_pools[numa_node]; - size_t small_align = 256; + size_t small_align = NVME_SMALL_POOL_SIZE; if (pools->small) return pools; /* already initialized */ @@ -420,9 +423,8 @@ nvme_setup_descriptor_pools(struct nvme_dev *dev, unsigned numa_node) if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512) small_align = 512; - /* Optimisation for I/Os between 4k and 128k */ - pools->small = dma_pool_create_node("nvme descriptor 256", dev->dev, - 256, small_align, 0, numa_node); + pools->small = dma_pool_create_node("nvme descriptor small", dev->dev, + NVME_SMALL_POOL_SIZE, small_align, 0, numa_node); if (!pools->small) { dma_pool_destroy(pools->large); pools->large = NULL; @@ -689,7 +691,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_queue *nvmeq, } if (DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE) <= - 256 / sizeof(__le64)) + NVME_SMALL_POOL_SIZE / sizeof(__le64)) iod->flags |= IOD_SMALL_DESCRIPTOR; prp_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC, @@ -774,7 +776,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_queue *nvmeq, return BLK_STS_OK; } - if (entries <= 256 / sizeof(*sg_list)) + if (entries <= NVME_SMALL_POOL_SIZE / sizeof(*sg_list)) iod->flags |= IOD_SMALL_DESCRIPTOR; sg_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC, |