[2/5] iommu/s390: Add I/O TLB ops

Message ID 20221018145132.998866-3-schnelle@linux.ibm.com
State New
Headers
Series iommu/s390: Further improvements |

Commit Message

Niklas Schnelle Oct. 18, 2022, 2:51 p.m. UTC
  Currently s390-iommu does an I/O TLB flush (RPCIT) for every update of
the I/O translation table explicitly. For one this is wasteful since
RPCIT can be skipped after a mapping operation if zdev->tlb_refresh is
unset. Moreover we can do a single RPCIT for a range of pages including
whne doing lazy unmapping.

Thankfully both of these optimizations can be achieved by implementing
the IOMMU operations common code provides for the different types of I/O
tlb flushes:

 * flush_iotlb_all: Flushes the I/O TLB for the entire IOVA space
 * iotlb_sync:  Flushes the I/O TLB for a range of pages that can be
   gathered up, for example to implement lazy unmapping.
 * iotlb_sync_map: Flushes the I/O TLB after a mapping operation

Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
---
 drivers/iommu/s390-iommu.c | 76 ++++++++++++++++++++++++++++++++------
 1 file changed, 65 insertions(+), 11 deletions(-)
  

Comments

Matthew Rosato Oct. 28, 2022, 4:03 p.m. UTC | #1
On 10/18/22 10:51 AM, Niklas Schnelle wrote:
> Currently s390-iommu does an I/O TLB flush (RPCIT) for every update of
> the I/O translation table explicitly. For one this is wasteful since
> RPCIT can be skipped after a mapping operation if zdev->tlb_refresh is
> unset. Moreover we can do a single RPCIT for a range of pages including
> whne doing lazy unmapping.
> 
> Thankfully both of these optimizations can be achieved by implementing
> the IOMMU operations common code provides for the different types of I/O
> tlb flushes:
> 
>  * flush_iotlb_all: Flushes the I/O TLB for the entire IOVA space
>  * iotlb_sync:  Flushes the I/O TLB for a range of pages that can be
>    gathered up, for example to implement lazy unmapping.
>  * iotlb_sync_map: Flushes the I/O TLB after a mapping operation
> 
> Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
> ---
>  drivers/iommu/s390-iommu.c | 76 ++++++++++++++++++++++++++++++++------
>  1 file changed, 65 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
> index ee88e717254b..a4c2e9bc6d83 100644
> --- a/drivers/iommu/s390-iommu.c
> +++ b/drivers/iommu/s390-iommu.c
> @@ -199,14 +199,72 @@ static void s390_iommu_release_device(struct device *dev)
>  		__s390_iommu_detach_device(zdev);
>  }
>  
> +static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
> +{
> +	struct s390_domain *s390_domain = to_s390_domain(domain);
> +	struct zpci_dev *zdev;
> +	unsigned long flags;
> +	int rc;
> +
> +	spin_lock_irqsave(&s390_domain->list_lock, flags);
> +	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
> +		rc = zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
> +					zdev->end_dma - zdev->start_dma + 1);
> +		if (rc)
> +			break;
> +	}
> +	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
> +}
> +
> +static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
> +				  struct iommu_iotlb_gather *gather)
> +{
> +	struct s390_domain *s390_domain = to_s390_domain(domain);
> +	size_t size = gather->end - gather->start + 1;
> +	struct zpci_dev *zdev;
> +	unsigned long flags;
> +	int rc;
> +
> +	/* If gather was never added to there is nothing to flush */
> +	if (gather->start == ULONG_MAX)
> +		return;

Hmm, this seems a little awkward in that it depends on the init value in iommu_iotlb_gather_init never changing.  I don't see any other iommu drivers doing this -- Is there no other way to tell there's nothing to flush?

If we really need to do this, maybe some shared #define in iommu.h that is used in iommu_iotlb_gather_init and here?

> +
> +	spin_lock_irqsave(&s390_domain->list_lock, flags);
> +	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
> +		rc = zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
> +					size);
> +		if (rc)
> +			break;
> +	}
> +	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
> +}
> +
> +static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
> +				      unsigned long iova, size_t size)
> +{
> +	struct s390_domain *s390_domain = to_s390_domain(domain);
> +	struct zpci_dev *zdev;
> +	unsigned long flags;
> +	int rc;
> +
> +	spin_lock_irqsave(&s390_domain->list_lock, flags);
> +	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
> +		if (!zdev->tlb_refresh)
> +			continue;
> +		rc = zpci_refresh_trans((u64)zdev->fh << 32,
> +					iova, size);
> +		if (rc)
> +			break;
> +	}
> +	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
> +}
> +
>  static int s390_iommu_update_trans(struct s390_domain *s390_domain,
>  				   phys_addr_t pa, dma_addr_t dma_addr,
>  				   unsigned long nr_pages, int flags)
>  {
>  	phys_addr_t page_addr = pa & PAGE_MASK;
> -	dma_addr_t start_dma_addr = dma_addr;
>  	unsigned long irq_flags, i;
> -	struct zpci_dev *zdev;
>  	unsigned long *entry;
>  	int rc = 0;
>  
> @@ -225,15 +283,6 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
>  		dma_addr += PAGE_SIZE;
>  	}
>  
> -	spin_lock(&s390_domain->list_lock);
> -	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
> -		rc = zpci_refresh_trans((u64)zdev->fh << 32,
> -					start_dma_addr, nr_pages * PAGE_SIZE);
> -		if (rc)
> -			break;
> -	}
> -	spin_unlock(&s390_domain->list_lock);
> -
>  undo_cpu_trans:
>  	if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
>  		flags = ZPCI_PTE_INVALID;
> @@ -340,6 +389,8 @@ static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
>  	if (rc)
>  		return 0;
>  
> +	iommu_iotlb_gather_add_range(gather, iova, size);
> +
>  	return size;
>  }
>  
> @@ -384,6 +435,9 @@ static const struct iommu_ops s390_iommu_ops = {
>  		.detach_dev	= s390_iommu_detach_device,
>  		.map_pages	= s390_iommu_map_pages,
>  		.unmap_pages	= s390_iommu_unmap_pages,
> +		.flush_iotlb_all = s390_iommu_flush_iotlb_all,
> +		.iotlb_sync      = s390_iommu_iotlb_sync,
> +		.iotlb_sync_map  = s390_iommu_iotlb_sync_map,
>  		.iova_to_phys	= s390_iommu_iova_to_phys,
>  		.free		= s390_domain_free,
>  	}
  
Robin Murphy Oct. 31, 2022, 4:11 p.m. UTC | #2
On 2022-10-28 17:03, Matthew Rosato wrote:
> On 10/18/22 10:51 AM, Niklas Schnelle wrote:
>> Currently s390-iommu does an I/O TLB flush (RPCIT) for every update of
>> the I/O translation table explicitly. For one this is wasteful since
>> RPCIT can be skipped after a mapping operation if zdev->tlb_refresh is
>> unset. Moreover we can do a single RPCIT for a range of pages including
>> whne doing lazy unmapping.
>>
>> Thankfully both of these optimizations can be achieved by implementing
>> the IOMMU operations common code provides for the different types of I/O
>> tlb flushes:
>>
>>   * flush_iotlb_all: Flushes the I/O TLB for the entire IOVA space
>>   * iotlb_sync:  Flushes the I/O TLB for a range of pages that can be
>>     gathered up, for example to implement lazy unmapping.
>>   * iotlb_sync_map: Flushes the I/O TLB after a mapping operation
>>
>> Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
>> ---
>>   drivers/iommu/s390-iommu.c | 76 ++++++++++++++++++++++++++++++++------
>>   1 file changed, 65 insertions(+), 11 deletions(-)
>>
>> diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
>> index ee88e717254b..a4c2e9bc6d83 100644
>> --- a/drivers/iommu/s390-iommu.c
>> +++ b/drivers/iommu/s390-iommu.c
>> @@ -199,14 +199,72 @@ static void s390_iommu_release_device(struct device *dev)
>>   		__s390_iommu_detach_device(zdev);
>>   }
>>   
>> +static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
>> +{
>> +	struct s390_domain *s390_domain = to_s390_domain(domain);
>> +	struct zpci_dev *zdev;
>> +	unsigned long flags;
>> +	int rc;
>> +
>> +	spin_lock_irqsave(&s390_domain->list_lock, flags);
>> +	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
>> +		rc = zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
>> +					zdev->end_dma - zdev->start_dma + 1);
>> +		if (rc)
>> +			break;
>> +	}
>> +	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
>> +}
>> +
>> +static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
>> +				  struct iommu_iotlb_gather *gather)
>> +{
>> +	struct s390_domain *s390_domain = to_s390_domain(domain);
>> +	size_t size = gather->end - gather->start + 1;
>> +	struct zpci_dev *zdev;
>> +	unsigned long flags;
>> +	int rc;
>> +
>> +	/* If gather was never added to there is nothing to flush */
>> +	if (gather->start == ULONG_MAX)
>> +		return;
> 
> Hmm, this seems a little awkward in that it depends on the init value in iommu_iotlb_gather_init never changing.  I don't see any other iommu drivers doing this -- Is there no other way to tell there's nothing to flush?
> 
> If we really need to do this, maybe some shared #define in iommu.h that is used in iommu_iotlb_gather_init and here?

If you can trust yourselves to never gather a single byte (which by 
construction should be impossible), "!gather->end" is perhaps a tiny bit 
more robust (and consistent with iommu_iotlb_gather_is_disjoint()), 
although given the way that iommu_iotlb_gather_add_*() work I don't 
think either initial value has much chance of changing in practice, 
short of some larger refactoring that would likely have to touch all the 
users anyway. If you still want to be as foolproof as possible, using 
"gather->start > gather->end" would represent the most general form of 
the initial conditions.

FWIW, SMMUv3 does also check for an empty range, but using 
gather->pgsize that is only relevant with add_page(). The other gather 
users seem happy to go ahead and just issue whatever wacky invalidation 
command those initial values end up looking like. I think an empty sync 
should really only happen in unexpected conditions like an unmap 
failing, so it shouldn't be a case that deserves a great deal of 
optimisation effort.

Thanks,
Robin.

>> +
>> +	spin_lock_irqsave(&s390_domain->list_lock, flags);
>> +	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
>> +		rc = zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
>> +					size);
>> +		if (rc)
>> +			break;
>> +	}
>> +	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
>> +}
>> +
>> +static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
>> +				      unsigned long iova, size_t size)
>> +{
>> +	struct s390_domain *s390_domain = to_s390_domain(domain);
>> +	struct zpci_dev *zdev;
>> +	unsigned long flags;
>> +	int rc;
>> +
>> +	spin_lock_irqsave(&s390_domain->list_lock, flags);
>> +	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
>> +		if (!zdev->tlb_refresh)
>> +			continue;
>> +		rc = zpci_refresh_trans((u64)zdev->fh << 32,
>> +					iova, size);
>> +		if (rc)
>> +			break;
>> +	}
>> +	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
>> +}
>> +
>>   static int s390_iommu_update_trans(struct s390_domain *s390_domain,
>>   				   phys_addr_t pa, dma_addr_t dma_addr,
>>   				   unsigned long nr_pages, int flags)
>>   {
>>   	phys_addr_t page_addr = pa & PAGE_MASK;
>> -	dma_addr_t start_dma_addr = dma_addr;
>>   	unsigned long irq_flags, i;
>> -	struct zpci_dev *zdev;
>>   	unsigned long *entry;
>>   	int rc = 0;
>>   
>> @@ -225,15 +283,6 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
>>   		dma_addr += PAGE_SIZE;
>>   	}
>>   
>> -	spin_lock(&s390_domain->list_lock);
>> -	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
>> -		rc = zpci_refresh_trans((u64)zdev->fh << 32,
>> -					start_dma_addr, nr_pages * PAGE_SIZE);
>> -		if (rc)
>> -			break;
>> -	}
>> -	spin_unlock(&s390_domain->list_lock);
>> -
>>   undo_cpu_trans:
>>   	if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
>>   		flags = ZPCI_PTE_INVALID;
>> @@ -340,6 +389,8 @@ static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
>>   	if (rc)
>>   		return 0;
>>   
>> +	iommu_iotlb_gather_add_range(gather, iova, size);
>> +
>>   	return size;
>>   }
>>   
>> @@ -384,6 +435,9 @@ static const struct iommu_ops s390_iommu_ops = {
>>   		.detach_dev	= s390_iommu_detach_device,
>>   		.map_pages	= s390_iommu_map_pages,
>>   		.unmap_pages	= s390_iommu_unmap_pages,
>> +		.flush_iotlb_all = s390_iommu_flush_iotlb_all,
>> +		.iotlb_sync      = s390_iommu_iotlb_sync,
>> +		.iotlb_sync_map  = s390_iommu_iotlb_sync_map,
>>   		.iova_to_phys	= s390_iommu_iova_to_phys,
>>   		.free		= s390_domain_free,
>>   	}
>
  
Niklas Schnelle Nov. 2, 2022, 10:51 a.m. UTC | #3
On Mon, 2022-10-31 at 16:11 +0000, Robin Murphy wrote:
> On 2022-10-28 17:03, Matthew Rosato wrote:
> > On 10/18/22 10:51 AM, Niklas Schnelle wrote:
> > > Currently s390-iommu does an I/O TLB flush (RPCIT) for every update of
> > > the I/O translation table explicitly. For one this is wasteful since
> > > RPCIT can be skipped after a mapping operation if zdev->tlb_refresh is
> > > unset. Moreover we can do a single RPCIT for a range of pages including
> > > whne doing lazy unmapping.
> > > 
> > > Thankfully both of these optimizations can be achieved by implementing
> > > the IOMMU operations common code provides for the different types of I/O
> > > tlb flushes:
> > > 
> > >   * flush_iotlb_all: Flushes the I/O TLB for the entire IOVA space
> > >   * iotlb_sync:  Flushes the I/O TLB for a range of pages that can be
> > >     gathered up, for example to implement lazy unmapping.
> > >   * iotlb_sync_map: Flushes the I/O TLB after a mapping operation
> > > 
> > > Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
> > > ---
> > >   drivers/iommu/s390-iommu.c | 76 ++++++++++++++++++++++++++++++++------
> > >   1 file changed, 65 insertions(+), 11 deletions(-)
> > > 
> > > diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
> > > index ee88e717254b..a4c2e9bc6d83 100644
> > > --- a/drivers/iommu/s390-iommu.c
> > > +++ b/drivers/iommu/s390-iommu.c
> > > @@ -199,14 +199,72 @@ static void s390_iommu_release_device(struct device *dev)
> > >   		__s390_iommu_detach_device(zdev);
> > >   }
> > >   
> > > +static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
> > > +{
> > > +	struct s390_domain *s390_domain = to_s390_domain(domain);
> > > +	struct zpci_dev *zdev;
> > > +	unsigned long flags;
> > > +	int rc;
> > > +
> > > +	spin_lock_irqsave(&s390_domain->list_lock, flags);
> > > +	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
> > > +		rc = zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
> > > +					zdev->end_dma - zdev->start_dma + 1);
> > > +		if (rc)
> > > +			break;
> > > +	}
> > > +	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
> > > +}
> > > +
> > > +static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
> > > +				  struct iommu_iotlb_gather *gather)
> > > +{
> > > +	struct s390_domain *s390_domain = to_s390_domain(domain);
> > > +	size_t size = gather->end - gather->start + 1;
> > > +	struct zpci_dev *zdev;
> > > +	unsigned long flags;
> > > +	int rc;
> > > +
> > > +	/* If gather was never added to there is nothing to flush */
> > > +	if (gather->start == ULONG_MAX)
> > > +		return;
> > 
> > Hmm, this seems a little awkward in that it depends on the init value in iommu_iotlb_gather_init never changing.  I don't see any other iommu drivers doing this -- Is there no other way to tell there's nothing to flush?
> > 
> > If we really need to do this, maybe some shared #define in iommu.h that is used in iommu_iotlb_gather_init and here?
> 
> If you can trust yourselves to never gather a single byte (which by 
> construction should be impossible), "!gather->end" is perhaps a tiny bit 
> more robust (and consistent with iommu_iotlb_gather_is_disjoint()), 
> although given the way that iommu_iotlb_gather_add_*() work I don't 
> think either initial value has much chance of changing in practice, 
> short of some larger refactoring that would likely have to touch all the 
> users anyway. If you still want to be as foolproof as possible, using 
> "gather->start > gather->end" would represent the most general form of 
> the initial conditions.
> 
> FWIW, SMMUv3 does also check for an empty range, but using 
> gather->pgsize that is only relevant with add_page(). The other gather 
> users seem happy to go ahead and just issue whatever wacky invalidation 
> command those initial values end up looking like. I think an empty sync 
> should really only happen in unexpected conditions like an unmap 
> failing, so it shouldn't be a case that deserves a great deal of 
> optimisation effort.
> 
> Thanks,
> Robin.
> 

Yeah I agree this should only happen when unmap failed. I think I added
this when I was playing around with adding an intermediate flush
similar to what amd_iommu_iotlb_gather_add_page() does, only that in
some intermediate stages I could end up with nothing left to flush.
That whole optimization did turn out not to help and I removed it
again. I think even if it's only for the error case now, I'd like to
keep it though. This makes sure we don't get weirdly sized flushes in
the error case. I'll use '!gather->end' to be consistent with
iommu_iotlb_gather_is_disjoint() as you suggested.

Speaking of that AMD optimization, I'm actually not sure that it does
the right thing for AMD either. The way I read the code, it does more
but only contiguous TLB flushes in virtualized mode and at least for us
this turned out detrimental. Also the comment, at least to me, makes it
sound as if they were trying for fewer flushes but it's worded a bit
confusingly so not sure.

Thanks,
Niklas

> > > +
> > > +	spin_lock_irqsave(&s390_domain->list_lock, flags);
> > > +	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
> > > +		rc = zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
> > > +					size);
> > > +		if (rc)
> > > +			break;
> > > +	}
> > > +	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
> > > +}
> > > +
> > > +static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
> > > +				      unsigned long iova, size_t size)
> > > +{
> > > +	struct s390_domain *s390_domain = to_s390_domain(domain);
> > > +	struct zpci_dev *zdev;
> > > +	unsigned long flags;
> > > +	int rc;
> > > +
> > > +	spin_lock_irqsave(&s390_domain->list_lock, flags);
> > > +	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
> > > +		if (!zdev->tlb_refresh)
> > > +			continue;
> > > +		rc = zpci_refresh_trans((u64)zdev->fh << 32,
> > > +					iova, size);
> > > +		if (rc)
> > > +			break;
> > > +	}
> > > +	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
> > > +}
> > > +
> > >   static int s390_iommu_update_trans(struct s390_domain *s390_domain,
> > >   				   phys_addr_t pa, dma_addr_t dma_addr,
> > >   				   unsigned long nr_pages, int flags)
> > >   {
> > >   	phys_addr_t page_addr = pa & PAGE_MASK;
> > > -	dma_addr_t start_dma_addr = dma_addr;
> > >   	unsigned long irq_flags, i;
> > > -	struct zpci_dev *zdev;
> > >   	unsigned long *entry;
> > >   	int rc = 0;
> > >   
> > > @@ -225,15 +283,6 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
> > >   		dma_addr += PAGE_SIZE;
> > >   	}
> > >   
> > > -	spin_lock(&s390_domain->list_lock);
> > > -	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
> > > -		rc = zpci_refresh_trans((u64)zdev->fh << 32,
> > > -					start_dma_addr, nr_pages * PAGE_SIZE);
> > > -		if (rc)
> > > -			break;
> > > -	}
> > > -	spin_unlock(&s390_domain->list_lock);
> > > -
> > >   undo_cpu_trans:
> > >   	if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
> > >   		flags = ZPCI_PTE_INVALID;
> > > @@ -340,6 +389,8 @@ static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
> > >   	if (rc)
> > >   		return 0;
> > >   
> > > +	iommu_iotlb_gather_add_range(gather, iova, size);
> > > +
> > >   	return size;
> > >   }
> > >   
> > > @@ -384,6 +435,9 @@ static const struct iommu_ops s390_iommu_ops = {
> > >   		.detach_dev	= s390_iommu_detach_device,
> > >   		.map_pages	= s390_iommu_map_pages,
> > >   		.unmap_pages	= s390_iommu_unmap_pages,
> > > +		.flush_iotlb_all = s390_iommu_flush_iotlb_all,
> > > +		.iotlb_sync      = s390_iommu_iotlb_sync,
> > > +		.iotlb_sync_map  = s390_iommu_iotlb_sync_map,
> > >   		.iova_to_phys	= s390_iommu_iova_to_phys,
> > >   		.free		= s390_domain_free,
> > >   	}
  

Patch

diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index ee88e717254b..a4c2e9bc6d83 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -199,14 +199,72 @@  static void s390_iommu_release_device(struct device *dev)
 		__s390_iommu_detach_device(zdev);
 }
 
+static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+	struct s390_domain *s390_domain = to_s390_domain(domain);
+	struct zpci_dev *zdev;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&s390_domain->list_lock, flags);
+	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
+		rc = zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
+					zdev->end_dma - zdev->start_dma + 1);
+		if (rc)
+			break;
+	}
+	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+}
+
+static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
+				  struct iommu_iotlb_gather *gather)
+{
+	struct s390_domain *s390_domain = to_s390_domain(domain);
+	size_t size = gather->end - gather->start + 1;
+	struct zpci_dev *zdev;
+	unsigned long flags;
+	int rc;
+
+	/* If gather was never added to there is nothing to flush */
+	if (gather->start == ULONG_MAX)
+		return;
+
+	spin_lock_irqsave(&s390_domain->list_lock, flags);
+	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
+		rc = zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
+					size);
+		if (rc)
+			break;
+	}
+	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+}
+
+static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
+				      unsigned long iova, size_t size)
+{
+	struct s390_domain *s390_domain = to_s390_domain(domain);
+	struct zpci_dev *zdev;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&s390_domain->list_lock, flags);
+	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
+		if (!zdev->tlb_refresh)
+			continue;
+		rc = zpci_refresh_trans((u64)zdev->fh << 32,
+					iova, size);
+		if (rc)
+			break;
+	}
+	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+}
+
 static int s390_iommu_update_trans(struct s390_domain *s390_domain,
 				   phys_addr_t pa, dma_addr_t dma_addr,
 				   unsigned long nr_pages, int flags)
 {
 	phys_addr_t page_addr = pa & PAGE_MASK;
-	dma_addr_t start_dma_addr = dma_addr;
 	unsigned long irq_flags, i;
-	struct zpci_dev *zdev;
 	unsigned long *entry;
 	int rc = 0;
 
@@ -225,15 +283,6 @@  static int s390_iommu_update_trans(struct s390_domain *s390_domain,
 		dma_addr += PAGE_SIZE;
 	}
 
-	spin_lock(&s390_domain->list_lock);
-	list_for_each_entry(zdev, &s390_domain->devices, iommu_list) {
-		rc = zpci_refresh_trans((u64)zdev->fh << 32,
-					start_dma_addr, nr_pages * PAGE_SIZE);
-		if (rc)
-			break;
-	}
-	spin_unlock(&s390_domain->list_lock);
-
 undo_cpu_trans:
 	if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
 		flags = ZPCI_PTE_INVALID;
@@ -340,6 +389,8 @@  static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
 	if (rc)
 		return 0;
 
+	iommu_iotlb_gather_add_range(gather, iova, size);
+
 	return size;
 }
 
@@ -384,6 +435,9 @@  static const struct iommu_ops s390_iommu_ops = {
 		.detach_dev	= s390_iommu_detach_device,
 		.map_pages	= s390_iommu_map_pages,
 		.unmap_pages	= s390_iommu_unmap_pages,
+		.flush_iotlb_all = s390_iommu_flush_iotlb_all,
+		.iotlb_sync      = s390_iommu_iotlb_sync,
+		.iotlb_sync_map  = s390_iommu_iotlb_sync_map,
 		.iova_to_phys	= s390_iommu_iova_to_phys,
 		.free		= s390_domain_free,
 	}