[v2,5/5] iommu/amd: Improving Interrupt Remapping Table Invalidation
Commit Message
Invalidating Interrupt Remapping Table (IRT) requires, the AMD IOMMU driver
to issue INVALIDATE_INTERRUPT_TABLE and COMPLETION_WAIT commands.
Currently, the driver issues the two commands separately, which requires
calling raw_spin_lock_irqsave() twice. In addition, the COMPLETION_WAIT
could potentially be interleaved with other commands causing delay of
the COMPLETION_WAIT command.
Therefore, combine issuing of the two commands in one spin-lock, and
changing struct amd_iommu.cmd_sem_val to use atomic64 to minimize
locking.
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
drivers/iommu/amd/amd_iommu_types.h | 2 +-
drivers/iommu/amd/init.c | 2 +-
drivers/iommu/amd/iommu.c | 27 ++++++++++++++++++++++-----
3 files changed, 24 insertions(+), 7 deletions(-)
Comments
On Thu, May 18, 2023 at 08:55:29PM -0400, Suravee Suthikulpanit wrote:
> Invalidating Interrupt Remapping Table (IRT) requires, the AMD IOMMU driver
> to issue INVALIDATE_INTERRUPT_TABLE and COMPLETION_WAIT commands.
> Currently, the driver issues the two commands separately, which requires
> calling raw_spin_lock_irqsave() twice. In addition, the COMPLETION_WAIT
> could potentially be interleaved with other commands causing delay of
> the COMPLETION_WAIT command.
>
> Therefore, combine issuing of the two commands in one spin-lock, and
> changing struct amd_iommu.cmd_sem_val to use atomic64 to minimize
> locking.
>
> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
> ---
> drivers/iommu/amd/amd_iommu_types.h | 2 +-
> drivers/iommu/amd/init.c | 2 +-
> drivers/iommu/amd/iommu.c | 27 ++++++++++++++++++++++-----
> 3 files changed, 24 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
> index 486a052e37ca..2fa65da2a9a5 100644
> --- a/drivers/iommu/amd/amd_iommu_types.h
> +++ b/drivers/iommu/amd/amd_iommu_types.h
> @@ -744,7 +744,7 @@ struct amd_iommu {
>
> u32 flags;
> volatile u64 *cmd_sem;
> - u64 cmd_sem_val;
> + atomic64_t cmd_sem_val;
>
> #ifdef CONFIG_AMD_IOMMU_DEBUGFS
> /* DebugFS Info */
> diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
> index fc0392d706db..16737819f79a 100644
> --- a/drivers/iommu/amd/init.c
> +++ b/drivers/iommu/amd/init.c
> @@ -1750,7 +1750,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
> iommu->pci_seg = pci_seg;
>
> raw_spin_lock_init(&iommu->lock);
> - iommu->cmd_sem_val = 0;
> + atomic64_set(&iommu->cmd_sem_val, 0);
>
> /* Add IOMMU to internal data structures */
> list_add_tail(&iommu->list, &amd_iommu_list);
> diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
> index 51c2b018433d..57ae4a8072d3 100644
> --- a/drivers/iommu/amd/iommu.c
> +++ b/drivers/iommu/amd/iommu.c
> @@ -1182,11 +1182,11 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
> if (!iommu->need_sync)
> return 0;
>
> - raw_spin_lock_irqsave(&iommu->lock, flags);
> -
> - data = ++iommu->cmd_sem_val;
> + data = atomic64_add_return(1, &iommu->cmd_sem_val);
> build_completion_wait(&cmd, iommu, data);
>
> + raw_spin_lock_irqsave(&iommu->lock, flags);
> +
> ret = __iommu_queue_command_sync(iommu, &cmd, false);
> if (ret)
> goto out_unlock;
> @@ -1284,11 +1284,28 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
>
> static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
> {
> + int ret;
> + u64 data;
> + unsigned long flags;
> + struct iommu_cmd cmd, cmd2;
> +
> if (iommu->irtcachedis_enabled)
> return;
>
> - iommu_flush_irt(iommu, devid);
> - iommu_completion_wait(iommu);
> + build_inv_irt(&cmd, devid);
> + data = atomic64_add_return(1, &iommu->cmd_sem_val);
> + build_completion_wait(&cmd2, iommu, data);
> +
> + raw_spin_lock_irqsave(&iommu->lock, flags);
> + ret = __iommu_queue_command_sync(iommu, &cmd, true);
> + if (ret)
> + goto out;
> + ret = __iommu_queue_command_sync(iommu, &cmd2, false);
> + if (ret)
> + goto out;
> + wait_on_sem(iommu, data);
> +out:
> + raw_spin_unlock_irqrestore(&iommu->lock, flags);
> }
>
> void iommu_flush_all_caches(struct amd_iommu *iommu)
> --
> 2.31.1
>
@@ -744,7 +744,7 @@ struct amd_iommu {
u32 flags;
volatile u64 *cmd_sem;
- u64 cmd_sem_val;
+ atomic64_t cmd_sem_val;
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
/* DebugFS Info */
@@ -1750,7 +1750,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
iommu->pci_seg = pci_seg;
raw_spin_lock_init(&iommu->lock);
- iommu->cmd_sem_val = 0;
+ atomic64_set(&iommu->cmd_sem_val, 0);
/* Add IOMMU to internal data structures */
list_add_tail(&iommu->list, &amd_iommu_list);
@@ -1182,11 +1182,11 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
if (!iommu->need_sync)
return 0;
- raw_spin_lock_irqsave(&iommu->lock, flags);
-
- data = ++iommu->cmd_sem_val;
+ data = atomic64_add_return(1, &iommu->cmd_sem_val);
build_completion_wait(&cmd, iommu, data);
+ raw_spin_lock_irqsave(&iommu->lock, flags);
+
ret = __iommu_queue_command_sync(iommu, &cmd, false);
if (ret)
goto out_unlock;
@@ -1284,11 +1284,28 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
{
+ int ret;
+ u64 data;
+ unsigned long flags;
+ struct iommu_cmd cmd, cmd2;
+
if (iommu->irtcachedis_enabled)
return;
- iommu_flush_irt(iommu, devid);
- iommu_completion_wait(iommu);
+ build_inv_irt(&cmd, devid);
+ data = atomic64_add_return(1, &iommu->cmd_sem_val);
+ build_completion_wait(&cmd2, iommu, data);
+
+ raw_spin_lock_irqsave(&iommu->lock, flags);
+ ret = __iommu_queue_command_sync(iommu, &cmd, true);
+ if (ret)
+ goto out;
+ ret = __iommu_queue_command_sync(iommu, &cmd2, false);
+ if (ret)
+ goto out;
+ wait_on_sem(iommu, data);
+out:
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
}
void iommu_flush_all_caches(struct amd_iommu *iommu)