summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorVasant Hegde <vasant.hegde@amd.com>2023-11-22 12:02:09 +0300
committerJoerg Roedel <jroedel@suse.de>2023-12-11 17:25:34 +0300
commita976da66e8e547cd07a9792f5854c2f73c456a21 (patch)
tree15b608922610ec4fb05b1e7a8fcbfdb2e4a5ceee /drivers/iommu
parent3f2571fed2fa9e9ea61fd990a9a4fc20ca83b62e (diff)
downloadlinux-a976da66e8e547cd07a9792f5854c2f73c456a21.tar.xz
iommu/amd: Remove redundant passing of PDE bit
Current code always sets PDE bit in INVALIDATE_IOMMU_PAGES command. Hence get rid of 'pde' variable across functions. We can re-introduce this bit whenever its needed. Suggested-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Reviewed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20231122090215.6191-4-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd/iommu.c27
1 files changed, 13 insertions, 14 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 233076686379..b1551330758b 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1124,7 +1124,7 @@ static inline u64 build_inv_address(u64 address, size_t size)
}
static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
- size_t size, u16 domid, int pde)
+ size_t size, u16 domid)
{
u64 inv_address = build_inv_address(address, size);
@@ -1133,8 +1133,8 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
cmd->data[2] = lower_32_bits(inv_address);
cmd->data[3] = upper_32_bits(inv_address);
CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
- if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+ /* PDE bit - we want to flush everything, not only the PTEs */
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
}
static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
@@ -1341,7 +1341,7 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
struct iommu_cmd cmd;
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
- dom_id, 1);
+ dom_id);
iommu_queue_command(iommu, &cmd);
}
@@ -1352,8 +1352,7 @@ static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
{
struct iommu_cmd cmd;
- build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
- dom_id, 1);
+ build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, dom_id);
iommu_queue_command(iommu, &cmd);
iommu_completion_wait(iommu);
@@ -1476,13 +1475,13 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
* page. Otherwise it flushes the whole TLB of the IOMMU.
*/
static void __domain_flush_pages(struct protection_domain *domain,
- u64 address, size_t size, int pde)
+ u64 address, size_t size)
{
struct iommu_dev_data *dev_data;
struct iommu_cmd cmd;
int ret = 0, i;
- build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
+ build_inv_iommu_pages(&cmd, address, size, domain->id);
for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
if (!domain->dev_iommu[i])
@@ -1507,10 +1506,10 @@ static void __domain_flush_pages(struct protection_domain *domain,
}
static void domain_flush_pages(struct protection_domain *domain,
- u64 address, size_t size, int pde)
+ u64 address, size_t size)
{
if (likely(!amd_iommu_np_cache)) {
- __domain_flush_pages(domain, address, size, pde);
+ __domain_flush_pages(domain, address, size);
return;
}
@@ -1543,7 +1542,7 @@ static void domain_flush_pages(struct protection_domain *domain,
flush_size = 1ul << min_alignment;
- __domain_flush_pages(domain, address, flush_size, pde);
+ __domain_flush_pages(domain, address, flush_size);
address += flush_size;
size -= flush_size;
}
@@ -1552,7 +1551,7 @@ static void domain_flush_pages(struct protection_domain *domain,
/* Flush the whole IO/TLB for a given protection domain - including PDE */
void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
{
- domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
+ domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
}
void amd_iommu_domain_flush_complete(struct protection_domain *domain)
@@ -1579,7 +1578,7 @@ static void domain_flush_np_cache(struct protection_domain *domain,
unsigned long flags;
spin_lock_irqsave(&domain->lock, flags);
- domain_flush_pages(domain, iova, size, 1);
+ domain_flush_pages(domain, iova, size);
amd_iommu_domain_flush_complete(domain);
spin_unlock_irqrestore(&domain->lock, flags);
}
@@ -2591,7 +2590,7 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
- domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1);
+ domain_flush_pages(dom, gather->start, gather->end - gather->start + 1);
amd_iommu_domain_flush_complete(dom);
spin_unlock_irqrestore(&dom->lock, flags);
}