From 972bf252f86062e50f9c9ea81f84f5df0e9f1302 Mon Sep 17 00:00:00 2001 From: John Garry via iommu Date: Tue, 7 Dec 2021 19:17:26 +0800 Subject: iommu/iova: Move fast alloc size roundup into alloc_iova_fast() It really is a property of the IOVA rcache code that we need to alloc a power-of-2 size, so relocate the functionality to resize into alloc_iova_fast(), rather than the callsites. Signed-off-by: John Garry Acked-by: Will Deacon Reviewed-by: Xie Yongji Acked-by: Jason Wang Acked-by: Michael S. Tsirkin Acked-by: Robin Murphy Link: https://lore.kernel.org/r/1638875846-23993-1-git-send-email-john.garry@huawei.com Signed-off-by: Joerg Roedel --- drivers/vdpa/vdpa_user/iova_domain.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'drivers/vdpa/vdpa_user') diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c index 1daae2608860..2b1143f11d8f 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.c +++ b/drivers/vdpa/vdpa_user/iova_domain.c @@ -292,14 +292,6 @@ vduse_domain_alloc_iova(struct iova_domain *iovad, unsigned long iova_len = iova_align(iovad, size) >> shift; unsigned long iova_pfn; - /* - * Freeing non-power-of-two-sized allocations back into the IOVA caches - * will come back to bite us badly, so we have to waste a bit of space - * rounding up anything cacheable to make sure that can't happen. The - * order of the unadjusted size will still match upon freeing. - */ - if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) - iova_len = roundup_pow_of_two(iova_len); iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true); return iova_pfn << shift; -- cgit v1.2.3