[v2,1/5] iommu/amd: Switch amd_iommu_update_ga() to use modify_irte_ga()

Message ID 20230519005529.28171-2-suravee.suthikulpanit@amd.com
State New
Headers
Series iommu/amd: AVIC Interrupt Remapping Improvements |

Commit Message

Suravee Suthikulpanit May 19, 2023, 12:55 a.m. UTC
  From: Joao Martins <joao.m.martins@oracle.com>

The modify_irte_ga() uses cmpxchg_double() to update the IRTE in one shot,
which is necessary when adding IRTE cache disabling support since
the driver no longer need to flush the IRT for hardware to take effect.

Please note that there is a functional change where the IsRun and
Destination bits of IRTE are now cached in the struct amd_ir_data.entry.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
 drivers/iommu/amd/iommu.c | 38 ++++++++++----------------------------
 1 file changed, 10 insertions(+), 28 deletions(-)
  

Comments

Jerry Snitselaar May 23, 2023, 12:02 a.m. UTC | #1
On Thu, May 18, 2023 at 08:55:25PM -0400, Suravee Suthikulpanit wrote:
> From: Joao Martins <joao.m.martins@oracle.com>
> 
> The modify_irte_ga() uses cmpxchg_double() to update the IRTE in one shot,
> which is necessary when adding IRTE cache disabling support since
> the driver no longer need to flush the IRT for hardware to take effect.
> 
> Please note that there is a functional change where the IsRun and
> Destination bits of IRTE are now cached in the struct amd_ir_data.entry.
> 
> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>

Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>

> ---
>  drivers/iommu/amd/iommu.c | 38 ++++++++++----------------------------
>  1 file changed, 10 insertions(+), 28 deletions(-)
> 
> diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
> index ebb155bfef15..4a3a7346ab21 100644
> --- a/drivers/iommu/amd/iommu.c
> +++ b/drivers/iommu/amd/iommu.c
> @@ -3700,44 +3700,26 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
>  
>  int amd_iommu_update_ga(int cpu, bool is_run, void *data)
>  {
> -	unsigned long flags;
> -	struct amd_iommu *iommu;
> -	struct irq_remap_table *table;
>  	struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
> -	int devid = ir_data->irq_2_irte.devid;
>  	struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
> -	struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
>  
>  	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
> -	    !ref || !entry || !entry->lo.fields_vapic.guest_mode)
> +	    !entry || !entry->lo.fields_vapic.guest_mode)
>  		return 0;
>  
> -	iommu = ir_data->iommu;
> -	if (!iommu)
> -		return -ENODEV;
> -
> -	table = get_irq_table(iommu, devid);
> -	if (!table)
> +	if (!ir_data->iommu)
>  		return -ENODEV;
>  
> -	raw_spin_lock_irqsave(&table->lock, flags);
> -
> -	if (ref->lo.fields_vapic.guest_mode) {
> -		if (cpu >= 0) {
> -			ref->lo.fields_vapic.destination =
> -						APICID_TO_IRTE_DEST_LO(cpu);
> -			ref->hi.fields.destination =
> -						APICID_TO_IRTE_DEST_HI(cpu);
> -		}
> -		ref->lo.fields_vapic.is_run = is_run;
> -		barrier();
> +	if (cpu >= 0) {
> +		entry->lo.fields_vapic.destination =
> +					APICID_TO_IRTE_DEST_LO(cpu);
> +		entry->hi.fields.destination =
> +					APICID_TO_IRTE_DEST_HI(cpu);
>  	}
> +	entry->lo.fields_vapic.is_run = is_run;
>  
> -	raw_spin_unlock_irqrestore(&table->lock, flags);
> -
> -	iommu_flush_irt(iommu, devid);
> -	iommu_completion_wait(iommu);
> -	return 0;
> +	return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
> +			      ir_data->irq_2_irte.index, entry, ir_data);
>  }
>  EXPORT_SYMBOL(amd_iommu_update_ga);
>  #endif
> -- 
> 2.31.1
>
  

Patch

diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index ebb155bfef15..4a3a7346ab21 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -3700,44 +3700,26 @@  int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
 
 int amd_iommu_update_ga(int cpu, bool is_run, void *data)
 {
-	unsigned long flags;
-	struct amd_iommu *iommu;
-	struct irq_remap_table *table;
 	struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
-	int devid = ir_data->irq_2_irte.devid;
 	struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
-	struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
 
 	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
-	    !ref || !entry || !entry->lo.fields_vapic.guest_mode)
+	    !entry || !entry->lo.fields_vapic.guest_mode)
 		return 0;
 
-	iommu = ir_data->iommu;
-	if (!iommu)
-		return -ENODEV;
-
-	table = get_irq_table(iommu, devid);
-	if (!table)
+	if (!ir_data->iommu)
 		return -ENODEV;
 
-	raw_spin_lock_irqsave(&table->lock, flags);
-
-	if (ref->lo.fields_vapic.guest_mode) {
-		if (cpu >= 0) {
-			ref->lo.fields_vapic.destination =
-						APICID_TO_IRTE_DEST_LO(cpu);
-			ref->hi.fields.destination =
-						APICID_TO_IRTE_DEST_HI(cpu);
-		}
-		ref->lo.fields_vapic.is_run = is_run;
-		barrier();
+	if (cpu >= 0) {
+		entry->lo.fields_vapic.destination =
+					APICID_TO_IRTE_DEST_LO(cpu);
+		entry->hi.fields.destination =
+					APICID_TO_IRTE_DEST_HI(cpu);
 	}
+	entry->lo.fields_vapic.is_run = is_run;
 
-	raw_spin_unlock_irqrestore(&table->lock, flags);
-
-	iommu_flush_irt(iommu, devid);
-	iommu_completion_wait(iommu);
-	return 0;
+	return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
+			      ir_data->irq_2_irte.index, entry, ir_data);
 }
 EXPORT_SYMBOL(amd_iommu_update_ga);
 #endif