diff options
author | Jason Gunthorpe <jgg@nvidia.com> | 2025-04-08 13:53:54 -0300 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2025-04-17 16:22:36 +0200 |
commit | 3e8e986ce8a0d5fcf5479212d0c1ece4626c4a27 (patch) | |
tree | bdf8dfd9d80026a7ef42b3edc5c4d01beeff1503 | |
parent | 4316ba4a50331a963a8c76c7716cd7a4646aa385 (diff) |
iommu/pages: Remove iommu_free_page()
Use iommu_free_pages() instead.
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Mostafa Saleh <smostafa@google.com>
Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/6-v4-c8663abbb606+3f7-iommu_pages_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r-- | drivers/iommu/amd/init.c | 2 | ||||
-rw-r--r-- | drivers/iommu/amd/io_pgtable.c | 4 | ||||
-rw-r--r-- | drivers/iommu/amd/io_pgtable_v2.c | 8 | ||||
-rw-r--r-- | drivers/iommu/amd/iommu.c | 4 | ||||
-rw-r--r-- | drivers/iommu/intel/dmar.c | 4 | ||||
-rw-r--r-- | drivers/iommu/intel/iommu.c | 12 | ||||
-rw-r--r-- | drivers/iommu/intel/pasid.c | 4 | ||||
-rw-r--r-- | drivers/iommu/iommu-pages.h | 9 | ||||
-rw-r--r-- | drivers/iommu/riscv/iommu.c | 6 | ||||
-rw-r--r-- | drivers/iommu/rockchip-iommu.c | 8 | ||||
-rw-r--r-- | drivers/iommu/tegra-smmu.c | 12 |
11 files changed, 32 insertions, 41 deletions
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 69aad383f97e..b5415dd19426 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -946,7 +946,7 @@ static int __init alloc_cwwb_sem(struct amd_iommu *iommu) static void __init free_cwwb_sem(struct amd_iommu *iommu) { if (iommu->cmd_sem) - iommu_free_page((void *)iommu->cmd_sem); + iommu_free_pages((void *)iommu->cmd_sem); } static void iommu_enable_xt(struct amd_iommu *iommu) diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index 26cf562dde11..42d9e6746dee 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -146,7 +146,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable, out: spin_unlock_irqrestore(&domain->lock, flags); - iommu_free_page(pte); + iommu_free_pages(pte); return ret; } @@ -222,7 +222,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable, /* pte could have been changed somewhere. */ if (!try_cmpxchg64(pte, &__pte, __npte)) - iommu_free_page(page); + iommu_free_pages(page); else if (IOMMU_PTE_PRESENT(__pte)) *updated = true; diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c index a56a27396305..f5f2f5327378 100644 --- a/drivers/iommu/amd/io_pgtable_v2.c +++ b/drivers/iommu/amd/io_pgtable_v2.c @@ -121,10 +121,10 @@ static void free_pgtable(u64 *pt, int level) if (level > 2) free_pgtable(p, level - 1); else - iommu_free_page(p); + iommu_free_pages(p); } - iommu_free_page(pt); + iommu_free_pages(pt); } /* Allocate page table */ @@ -159,7 +159,7 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova, __npte = set_pgtable_attr(page); /* pte could have been changed somewhere. */ if (!try_cmpxchg64(pte, &__pte, __npte)) - iommu_free_page(page); + iommu_free_pages(page); else if (IOMMU_PTE_PRESENT(__pte)) *updated = true; @@ -181,7 +181,7 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova, if (pg_size == IOMMU_PAGE_SIZE_1G) free_pgtable(__pte, end_level - 1); else if (pg_size == IOMMU_PAGE_SIZE_2M) - iommu_free_page(__pte); + iommu_free_pages(__pte); } return pte; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index b503d01e3b80..fe31166ced94 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -1812,7 +1812,7 @@ static void free_gcr3_tbl_level1(u64 *tbl) ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK); - iommu_free_page(ptr); + iommu_free_pages(ptr); } } @@ -1845,7 +1845,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info) /* Free per device domain ID */ pdom_id_free(gcr3_info->domid); - iommu_free_page(gcr3_info->gcr3_tbl); + iommu_free_pages(gcr3_info->gcr3_tbl); gcr3_info->gcr3_tbl = NULL; } diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index e540092d664d..14bc39b2cdd5 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1187,7 +1187,7 @@ static void free_iommu(struct intel_iommu *iommu) } if (iommu->qi) { - iommu_free_page(iommu->qi->desc); + iommu_free_pages(iommu->qi->desc); kfree(iommu->qi->desc_status); kfree(iommu->qi); } @@ -1714,7 +1714,7 @@ int dmar_enable_qi(struct intel_iommu *iommu) qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC); if (!qi->desc_status) { - iommu_free_page(qi->desc); + iommu_free_pages(qi->desc); kfree(qi); iommu->qi = NULL; return -ENOMEM; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index b29da2d96d0b..e66fd919501f 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -571,17 +571,17 @@ static void free_context_table(struct intel_iommu *iommu) for (i = 0; i < ROOT_ENTRY_NR; i++) { context = iommu_context_addr(iommu, i, 0, 0); if (context) - iommu_free_page(context); + iommu_free_pages(context); if (!sm_supported(iommu)) continue; context = iommu_context_addr(iommu, i, 0x80, 0); if (context) - iommu_free_page(context); + iommu_free_pages(context); } - iommu_free_page(iommu->root_entry); + iommu_free_pages(iommu->root_entry); iommu->root_entry = NULL; } @@ -745,7 +745,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, tmp = 0ULL; if (!try_cmpxchg64(&pte->val, &tmp, pteval)) /* Someone else set it while we were thinking; use theirs. */ - iommu_free_page(tmp_page); + iommu_free_pages(tmp_page); else domain_flush_cache(domain, pte, sizeof(*pte)); } @@ -858,7 +858,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, last_pfn < level_pfn + level_size(level) - 1)) { dma_clear_pte(pte); domain_flush_cache(domain, pte, sizeof(*pte)); - iommu_free_page(level_pte); + iommu_free_pages(level_pte); } next: pfn += level_size(level); @@ -882,7 +882,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, /* free pgd */ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { - iommu_free_page(domain->pgd); + iommu_free_pages(domain->pgd); domain->pgd = NULL; } } diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 58a13366b41c..b616aaad03ba 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -96,7 +96,7 @@ void intel_pasid_free_table(struct device *dev) max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT; for (i = 0; i < max_pde; i++) { table = get_pasid_table_from_pde(&dir[i]); - iommu_free_page(table); + iommu_free_pages(table); } iommu_free_pages(pasid_table->table); @@ -160,7 +160,7 @@ retry: tmp = 0ULL; if (!try_cmpxchg64(&dir[dir_index].val, &tmp, (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) { - iommu_free_page(entries); + iommu_free_pages(entries); goto retry; } if (!ecap_coherent(info->iommu->ecap)) { diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h index 88587da1782b..fcd17b94f7b8 100644 --- a/drivers/iommu/iommu-pages.h +++ b/drivers/iommu/iommu-pages.h @@ -123,15 +123,6 @@ static inline void iommu_free_pages(void *virt) } /** - * iommu_free_page - free page - * @virt: virtual address of the page to be freed. - */ -static inline void iommu_free_page(void *virt) -{ - iommu_free_pages(virt); -} - -/** * iommu_put_pages_list - free a list of pages. * @page: the head of the lru list to be freed. * diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c index 1868468d018a..4fe07343d84e 100644 --- a/drivers/iommu/riscv/iommu.c +++ b/drivers/iommu/riscv/iommu.c @@ -1105,7 +1105,7 @@ static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain, if (freelist) list_add_tail(&virt_to_page(ptr)->lru, freelist); else - iommu_free_page(ptr); + iommu_free_pages(ptr); } static unsigned long *riscv_iommu_pte_alloc(struct riscv_iommu_domain *domain, @@ -1148,7 +1148,7 @@ pte_retry: old = pte; pte = _io_pte_entry(virt_to_pfn(addr), _PAGE_TABLE); if (cmpxchg_relaxed(ptr, old, pte) != old) { - iommu_free_page(addr); + iommu_free_pages(addr); goto pte_retry; } } @@ -1393,7 +1393,7 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev) domain->pscid = ida_alloc_range(&riscv_iommu_pscids, 1, RISCV_IOMMU_MAX_PSCID, GFP_KERNEL); if (domain->pscid < 0) { - iommu_free_page(domain->pgd_root); + iommu_free_pages(domain->pgd_root); kfree(domain); return ERR_PTR(-ENOMEM); } diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index af4cc91b2bbf..f1b49370dfa3 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -737,7 +737,7 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, pt_dma = dma_map_single(rk_domain->dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(rk_domain->dma_dev, pt_dma)) { dev_err(rk_domain->dma_dev, "DMA mapping error while allocating page table\n"); - iommu_free_page(page_table); + iommu_free_pages(page_table); return ERR_PTR(-ENOMEM); } @@ -1086,7 +1086,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev) return &rk_domain->domain; err_free_dt: - iommu_free_page(rk_domain->dt); + iommu_free_pages(rk_domain->dt); err_free_domain: kfree(rk_domain); @@ -1107,13 +1107,13 @@ static void rk_iommu_domain_free(struct iommu_domain *domain) u32 *page_table = phys_to_virt(pt_phys); dma_unmap_single(rk_domain->dma_dev, pt_phys, SPAGE_SIZE, DMA_TO_DEVICE); - iommu_free_page(page_table); + iommu_free_pages(page_table); } } dma_unmap_single(rk_domain->dma_dev, rk_domain->dt_dma, SPAGE_SIZE, DMA_TO_DEVICE); - iommu_free_page(rk_domain->dt); + iommu_free_pages(rk_domain->dt); kfree(rk_domain); } diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 95e97d1e4004..7ad1b3c7c7fc 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -303,7 +303,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev) as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL); if (!as->count) { - iommu_free_page(as->pd); + iommu_free_pages(as->pd); kfree(as); return NULL; } @@ -311,7 +311,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev) as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL); if (!as->pts) { kfree(as->count); - iommu_free_page(as->pd); + iommu_free_pages(as->pd); kfree(as); return NULL; } @@ -608,14 +608,14 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, dma = dma_map_single(smmu->dev, pt, SMMU_SIZE_PT, DMA_TO_DEVICE); if (dma_mapping_error(smmu->dev, dma)) { - iommu_free_page(pt); + iommu_free_pages(pt); return NULL; } if (!smmu_dma_addr_valid(smmu, dma)) { dma_unmap_single(smmu->dev, dma, SMMU_SIZE_PT, DMA_TO_DEVICE); - iommu_free_page(pt); + iommu_free_pages(pt); return NULL; } @@ -656,7 +656,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) dma_unmap_single(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE); - iommu_free_page(pt); + iommu_free_pages(pt); as->pts[pde] = NULL; } } @@ -707,7 +707,7 @@ static struct tegra_pt *as_get_pde_page(struct tegra_smmu_as *as, */ if (as->pts[pde]) { if (pt) - iommu_free_page(pt); + iommu_free_pages(pt); pt = as->pts[pde]; } |