diff options
author | Jason Gunthorpe <jgg@nvidia.com> | 2025-04-08 13:54:11 -0300 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2025-04-17 16:22:54 +0200 |
commit | 249d3327f0236302a92d9eccb2b32f64c8daaf86 (patch) | |
tree | 58057e4566e16576b12c25fec34e81dd721cc726 | |
parent | c3b42b6ffaed8e0b042224920085c3ae8db89d2a (diff) |
iommu/vtd: Remove iommu_alloc_pages_node()
Intel is the only thing that uses this now, convert to the size versions,
trying to avoid PAGE_SHIFT.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/23-v4-c8663abbb606+3f7-iommu_pages_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r-- | drivers/iommu/intel/iommu.h | 7 | ||||
-rw-r--r-- | drivers/iommu/intel/irq_remapping.c | 8 | ||||
-rw-r--r-- | drivers/iommu/intel/pasid.c | 3 | ||||
-rw-r--r-- | drivers/iommu/intel/prq.c | 3 | ||||
-rw-r--r-- | drivers/iommu/iommu-pages.h | 16 |
5 files changed, 11 insertions, 26 deletions
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index c4916886da5a..8d5d85bf0080 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -493,14 +493,13 @@ struct q_inval { /* Page Request Queue depth */ #define PRQ_ORDER 4 -#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20) -#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5) +#define PRQ_SIZE (SZ_4K << PRQ_ORDER) +#define PRQ_RING_MASK (PRQ_SIZE - 0x20) +#define PRQ_DEPTH (PRQ_SIZE >> 5) struct dmar_pci_notify_info; #ifdef CONFIG_IRQ_REMAP -/* 1MB - maximum possible interrupt remapping table size */ -#define INTR_REMAP_PAGE_ORDER 8 #define INTR_REMAP_TABLE_REG_SIZE 0xf #define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index 9de83798f580..cf7b6882ec75 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -530,11 +530,11 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) if (!ir_table) return -ENOMEM; - ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, - INTR_REMAP_PAGE_ORDER); + /* 1MB - maximum possible interrupt remapping table size */ + ir_table_base = + iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, SZ_1M); if (!ir_table_base) { - pr_err("IR%d: failed to allocate pages of order %d\n", - iommu->seq_id, INTR_REMAP_PAGE_ORDER); + pr_err("IR%d: failed to allocate 1M of pages\n", iommu->seq_id); goto out_free_table; } diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 728da85a9100..ac67a056b6c8 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -60,7 +60,8 @@ int intel_pasid_alloc_table(struct device *dev) size = max_pasid >> (PASID_PDE_SHIFT - 3); order = size ? get_order(size) : 0; - dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order); + dir = iommu_alloc_pages_node_sz(info->iommu->node, GFP_KERNEL, + 1 << (order + PAGE_SHIFT)); if (!dir) { kfree(pasid_table); return -ENOMEM; diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c index 9859bcbd425e..52570e42a14c 100644 --- a/drivers/iommu/intel/prq.c +++ b/drivers/iommu/intel/prq.c @@ -290,7 +290,8 @@ int intel_iommu_enable_prq(struct intel_iommu *iommu) struct iopf_queue *iopfq; int irq, ret; - iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER); + iommu->prq = + iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, PRQ_SIZE); if (!iommu->prq) { pr_warn("IOMMU: %s: Failed to allocate page request queue\n", iommu->name); diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h index 7ece83bb0f54..b3af2813ed0c 100644 --- a/drivers/iommu/iommu-pages.h +++ b/drivers/iommu/iommu-pages.h @@ -85,22 +85,6 @@ static inline bool iommu_pages_list_empty(struct iommu_pages_list *list) } /** - * iommu_alloc_pages_node - Allocate a zeroed page of a given order from - * specific NUMA node - * @nid: memory NUMA node id - * @gfp: buddy allocator flags - * @order: page order - * - * Returns the virtual address of the allocated page. - * Prefer to use iommu_alloc_pages_node_lg2() - */ -static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, - unsigned int order) -{ - return iommu_alloc_pages_node_sz(nid, gfp, 1 << (order + PAGE_SHIFT)); -} - -/** * iommu_alloc_pages_sz - Allocate a zeroed page of a given size from * specific NUMA node * @nid: memory NUMA node id |